Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v3.15
 
   1/*
   2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
   3 *
   4 * Copyright (C) 2007 ARM Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18 */
 
  19#include <linux/err.h>
  20#include <linux/init.h>
 
  21#include <linux/spinlock.h>
 
  22#include <linux/io.h>
  23#include <linux/of.h>
  24#include <linux/of_address.h>
  25
  26#include <asm/cacheflush.h>
 
 
  27#include <asm/hardware/cache-l2x0.h>
 
  28#include "cache-tauros3.h"
  29#include "cache-aurora-l2.h"
 
 
 
 
 
 
 
 
 
 
 
 
  30
  31#define CACHE_LINE_SIZE		32
  32
  33static void __iomem *l2x0_base;
 
  34static DEFINE_RAW_SPINLOCK(l2x0_lock);
  35static u32 l2x0_way_mask;	/* Bitmask of active ways */
  36static u32 l2x0_size;
  37static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
  38
  39/* Aurora don't have the cache ID register available, so we have to
  40 * pass it though the device tree */
  41static u32  cache_id_part_number_from_dt;
  42
  43struct l2x0_regs l2x0_saved_regs;
  44
  45struct l2x0_of_data {
  46	void (*setup)(const struct device_node *, u32 *, u32 *);
  47	void (*save)(void);
  48	struct outer_cache_fns outer_cache;
  49};
  50
  51static bool of_init = false;
  52
  53static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
 
 
 
  54{
  55	/* wait for cache operation by line or way to complete */
  56	while (readl_relaxed(reg) & mask)
  57		cpu_relax();
  58}
  59
  60#ifdef CONFIG_CACHE_PL310
  61static inline void cache_wait(void __iomem *reg, unsigned long mask)
 
 
 
  62{
  63	/* cache operations by line are atomic on PL310 */
 
 
 
 
 
  64}
  65#else
  66#define cache_wait	cache_wait_way
  67#endif
  68
  69static inline void cache_sync(void)
 
 
 
 
 
  70{
  71	void __iomem *base = l2x0_base;
 
  72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  73	writel_relaxed(0, base + sync_reg_offset);
  74	cache_wait(base + L2X0_CACHE_SYNC, 1);
 
 
 
  75}
  76
  77static inline void l2x0_clean_line(unsigned long addr)
  78{
  79	void __iomem *base = l2x0_base;
  80	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  81	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
 
 
 
 
 
 
 
 
 
  82}
  83
  84static inline void l2x0_inv_line(unsigned long addr)
  85{
  86	void __iomem *base = l2x0_base;
  87	cache_wait(base + L2X0_INV_LINE_PA, 1);
  88	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
 
 
 
 
  89}
  90
  91#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
  92static inline void debug_writel(unsigned long val)
 
 
 
 
 
 
 
 
 
 
 
 
 
  93{
  94	if (outer_cache.set_debug)
  95		outer_cache.set_debug(val);
  96}
  97
  98static void pl310_set_debug(unsigned long val)
 
  99{
 100	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
 
 
 
 101}
 102#else
 103/* Optimised out for non-errata case */
 104static inline void debug_writel(unsigned long val)
 105{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 106}
 107
 108#define pl310_set_debug	NULL
 109#endif
 
 
 
 
 
 
 110
 111#ifdef CONFIG_PL310_ERRATA_588369
 112static inline void l2x0_flush_line(unsigned long addr)
 113{
 114	void __iomem *base = l2x0_base;
 115
 116	/* Clean by PA followed by Invalidate by PA */
 117	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
 118	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
 119	cache_wait(base + L2X0_INV_LINE_PA, 1);
 120	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
 121}
 122#else
 123
 124static inline void l2x0_flush_line(unsigned long addr)
 125{
 126	void __iomem *base = l2x0_base;
 127	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
 128	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 129}
 130#endif
 131
 132static void l2x0_cache_sync(void)
 133{
 134	unsigned long flags;
 135
 136	raw_spin_lock_irqsave(&l2x0_lock, flags);
 137	cache_sync();
 
 138	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 139}
 140
 141static void __l2x0_flush_all(void)
 
 142{
 143	debug_writel(0x03);
 144	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
 145	cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
 146	cache_sync();
 147	debug_writel(0x00);
 
 
 
 
 
 
 
 
 
 
 
 
 
 148}
 149
 150static void l2x0_flush_all(void)
 151{
 
 152	unsigned long flags;
 153
 154	/* clean all ways */
 155	raw_spin_lock_irqsave(&l2x0_lock, flags);
 156	__l2x0_flush_all();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 158}
 159
 160static void l2x0_clean_all(void)
 161{
 
 162	unsigned long flags;
 163
 164	/* clean all ways */
 
 
 
 
 
 165	raw_spin_lock_irqsave(&l2x0_lock, flags);
 166	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
 167	cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
 168	cache_sync();
 
 169	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 170}
 171
 172static void l2x0_inv_all(void)
 173{
 
 174	unsigned long flags;
 175
 176	/* invalidate all ways */
 
 
 
 
 
 177	raw_spin_lock_irqsave(&l2x0_lock, flags);
 178	/* Invalidating when L2 is enabled is a nono */
 179	BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
 180	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
 181	cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
 182	cache_sync();
 183	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 184}
 185
 186static void l2x0_inv_range(unsigned long start, unsigned long end)
 
 
 
 
 
 187{
 188	void __iomem *base = l2x0_base;
 189	unsigned long flags;
 190
 191	raw_spin_lock_irqsave(&l2x0_lock, flags);
 192	if (start & (CACHE_LINE_SIZE - 1)) {
 193		start &= ~(CACHE_LINE_SIZE - 1);
 194		debug_writel(0x03);
 195		l2x0_flush_line(start);
 196		debug_writel(0x00);
 197		start += CACHE_LINE_SIZE;
 198	}
 199
 200	if (end & (CACHE_LINE_SIZE - 1)) {
 201		end &= ~(CACHE_LINE_SIZE - 1);
 202		debug_writel(0x03);
 203		l2x0_flush_line(end);
 204		debug_writel(0x00);
 205	}
 
 
 206
 207	while (start < end) {
 208		unsigned long blk_end = start + min(end - start, 4096UL);
 209
 210		while (start < blk_end) {
 211			l2x0_inv_line(start);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 212			start += CACHE_LINE_SIZE;
 213		}
 214
 215		if (blk_end < end) {
 216			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 217			raw_spin_lock_irqsave(&l2x0_lock, flags);
 
 218		}
 
 
 
 219	}
 220	cache_wait(base + L2X0_INV_LINE_PA, 1);
 221	cache_sync();
 222	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 223}
 224
 225static void l2x0_clean_range(unsigned long start, unsigned long end)
 226{
 227	void __iomem *base = l2x0_base;
 228	unsigned long flags;
 
 229
 230	if ((end - start) >= l2x0_size) {
 231		l2x0_clean_all();
 232		return;
 233	}
 234
 235	raw_spin_lock_irqsave(&l2x0_lock, flags);
 236	start &= ~(CACHE_LINE_SIZE - 1);
 237	while (start < end) {
 238		unsigned long blk_end = start + min(end - start, 4096UL);
 239
 
 240		while (start < blk_end) {
 241			l2x0_clean_line(start);
 
 242			start += CACHE_LINE_SIZE;
 243		}
 
 244
 245		if (blk_end < end) {
 246			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 247			raw_spin_lock_irqsave(&l2x0_lock, flags);
 248		}
 249	}
 250	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
 251	cache_sync();
 252	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 253}
 254
 255static void l2x0_flush_range(unsigned long start, unsigned long end)
 256{
 257	void __iomem *base = l2x0_base;
 258	unsigned long flags;
 259
 260	if ((end - start) >= l2x0_size) {
 261		l2x0_flush_all();
 262		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263	}
 264
 265	raw_spin_lock_irqsave(&l2x0_lock, flags);
 266	start &= ~(CACHE_LINE_SIZE - 1);
 267	while (start < end) {
 268		unsigned long blk_end = start + min(end - start, 4096UL);
 269
 270		debug_writel(0x03);
 271		while (start < blk_end) {
 272			l2x0_flush_line(start);
 273			start += CACHE_LINE_SIZE;
 
 
 
 
 
 
 
 274		}
 275		debug_writel(0x00);
 
 
 
 276
 277		if (blk_end < end) {
 278			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 279			raw_spin_lock_irqsave(&l2x0_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 280		}
 281	}
 282	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
 283	cache_sync();
 284	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 285}
 286
 287static void l2x0_disable(void)
 288{
 289	unsigned long flags;
 
 
 
 
 
 290
 291	raw_spin_lock_irqsave(&l2x0_lock, flags);
 292	__l2x0_flush_all();
 293	writel_relaxed(0, l2x0_base + L2X0_CTRL);
 294	dsb(st);
 295	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 296}
 297
 298static void l2x0_unlock(u32 cache_id)
 299{
 300	int lockregs;
 301	int i;
 302
 303	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
 304	case L2X0_CACHE_ID_PART_L310:
 305		lockregs = 8;
 306		break;
 307	case AURORA_CACHE_ID:
 308		lockregs = 4;
 309		break;
 310	default:
 311		/* L210 and unknown types */
 312		lockregs = 1;
 313		break;
 314	}
 315
 316	for (i = 0; i < lockregs; i++) {
 317		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
 318			       i * L2X0_LOCKDOWN_STRIDE);
 319		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
 320			       i * L2X0_LOCKDOWN_STRIDE);
 321	}
 322}
 323
 324void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 325{
 326	u32 aux;
 327	u32 cache_id;
 328	u32 way_size = 0;
 329	int ways;
 330	int way_size_shift = L2X0_WAY_SIZE_SHIFT;
 331	const char *type;
 332
 333	l2x0_base = base;
 334	if (cache_id_part_number_from_dt)
 335		cache_id = cache_id_part_number_from_dt;
 336	else
 337		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
 338	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 
 
 
 
 
 
 
 
 
 339
 
 340	aux &= aux_mask;
 341	aux |= aux_val;
 342
 
 
 
 
 343	/* Determine the number of ways */
 344	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
 345	case L2X0_CACHE_ID_PART_L310:
 
 
 346		if (aux & (1 << 16))
 347			ways = 16;
 348		else
 349			ways = 8;
 350		type = "L310";
 351#ifdef CONFIG_PL310_ERRATA_753970
 352		/* Unmapped register. */
 353		sync_reg_offset = L2X0_DUMMY_REG;
 354#endif
 355		if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
 356			outer_cache.set_debug = pl310_set_debug;
 357		break;
 
 358	case L2X0_CACHE_ID_PART_L210:
 
 359		ways = (aux >> 13) & 0xf;
 360		type = "L210";
 361		break;
 362
 363	case AURORA_CACHE_ID:
 364		sync_reg_offset = AURORA_SYNC_REG;
 365		ways = (aux >> 13) & 0xf;
 366		ways = 2 << ((ways + 1) >> 2);
 367		way_size_shift = AURORA_WAY_SIZE_SHIFT;
 368		type = "Aurora";
 369		break;
 
 370	default:
 371		/* Assume unknown chips have 8 ways */
 372		ways = 8;
 373		type = "L2x0 series";
 374		break;
 375	}
 376
 377	l2x0_way_mask = (1 << ways) - 1;
 378
 379	/*
 380	 * L2 cache Size =  Way size * Number of ways
 
 
 
 
 
 381	 */
 382	way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
 383	way_size = 1 << (way_size + way_size_shift);
 384
 385	l2x0_size = ways * way_size * SZ_1K;
 
 
 
 
 
 
 
 
 
 386
 387	/*
 388	 * Check if l2x0 controller is already enabled.
 389	 * If you are booting from non-secure mode
 390	 * accessing the below registers will fault.
 391	 */
 392	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 393		/* Make sure that I&D is not locked down when starting */
 394		l2x0_unlock(cache_id);
 395
 396		/* l2x0 controller is disabled */
 397		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
 398
 399		l2x0_inv_all();
 400
 401		/* enable L2X0 */
 402		writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
 403	}
 
 
 
 404
 405	/* Re-read it in case some bits are reserved. */
 406	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 407
 408	/* Save the value for resuming. */
 409	l2x0_saved_regs.aux_ctrl = aux;
 
 
 410
 411	if (!of_init) {
 412		outer_cache.inv_range = l2x0_inv_range;
 413		outer_cache.clean_range = l2x0_clean_range;
 414		outer_cache.flush_range = l2x0_flush_range;
 415		outer_cache.sync = l2x0_cache_sync;
 416		outer_cache.flush_all = l2x0_flush_all;
 417		outer_cache.inv_all = l2x0_inv_all;
 418		outer_cache.disable = l2x0_disable;
 419	}
 420
 421	pr_info("%s cache controller enabled\n", type);
 422	pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
 423		ways, cache_id, aux, l2x0_size >> 10);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 424}
 425
 426#ifdef CONFIG_OF
 427static int l2_wt_override;
 428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429/*
 430 * Note that the end addresses passed to Linux primitives are
 431 * noninclusive, while the hardware cache range operations use
 432 * inclusive start and end addresses.
 433 */
 434static unsigned long calc_range_end(unsigned long start, unsigned long end)
 435{
 436	/*
 437	 * Limit the number of cache lines processed at once,
 438	 * since cache range operations stall the CPU pipeline
 439	 * until completion.
 440	 */
 441	if (end > start + MAX_RANGE_SIZE)
 442		end = start + MAX_RANGE_SIZE;
 443
 444	/*
 445	 * Cache range operations can't straddle a page boundary.
 446	 */
 447	if (end > PAGE_ALIGN(start+1))
 448		end = PAGE_ALIGN(start+1);
 449
 450	return end;
 451}
 452
 453/*
 454 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
 455 * and range operations only do a TLB lookup on the start address.
 456 */
 457static void aurora_pa_range(unsigned long start, unsigned long end,
 458			unsigned long offset)
 459{
 
 
 460	unsigned long flags;
 461
 462	raw_spin_lock_irqsave(&l2x0_lock, flags);
 463	writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
 464	writel_relaxed(end, l2x0_base + offset);
 465	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 466
 467	cache_sync();
 468}
 469
 470static void aurora_inv_range(unsigned long start, unsigned long end)
 471{
 472	/*
 473	 * round start and end adresses up to cache line size
 474	 */
 475	start &= ~(CACHE_LINE_SIZE - 1);
 476	end = ALIGN(end, CACHE_LINE_SIZE);
 477
 478	/*
 479	 * Invalidate all full cache lines between 'start' and 'end'.
 480	 */
 481	while (start < end) {
 482		unsigned long range_end = calc_range_end(start, end);
 483		aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 484				AURORA_INVAL_RANGE_REG);
 
 
 
 
 
 485		start = range_end;
 486	}
 487}
 
 
 
 
 488
 489static void aurora_clean_range(unsigned long start, unsigned long end)
 490{
 491	/*
 492	 * If L2 is forced to WT, the L2 will always be clean and we
 493	 * don't need to do anything here.
 494	 */
 495	if (!l2_wt_override) {
 496		start &= ~(CACHE_LINE_SIZE - 1);
 497		end = ALIGN(end, CACHE_LINE_SIZE);
 498		while (start != end) {
 499			unsigned long range_end = calc_range_end(start, end);
 500			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 501					AURORA_CLEAN_RANGE_REG);
 502			start = range_end;
 503		}
 504	}
 505}
 506
 507static void aurora_flush_range(unsigned long start, unsigned long end)
 508{
 509	start &= ~(CACHE_LINE_SIZE - 1);
 510	end = ALIGN(end, CACHE_LINE_SIZE);
 511	while (start != end) {
 512		unsigned long range_end = calc_range_end(start, end);
 513		/*
 514		 * If L2 is forced to WT, the L2 will always be clean and we
 515		 * just need to invalidate.
 516		 */
 517		if (l2_wt_override)
 518			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 519							AURORA_INVAL_RANGE_REG);
 520		else
 521			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 522							AURORA_FLUSH_RANGE_REG);
 523		start = range_end;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524	}
 
 
 
 
 525}
 526
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 527/*
 528 * For certain Broadcom SoCs, depending on the address range, different offsets
 529 * need to be added to the address before passing it to L2 for
 530 * invalidation/clean/flush
 531 *
 532 * Section Address Range              Offset        EMI
 533 *   1     0x00000000 - 0x3FFFFFFF    0x80000000    VC
 534 *   2     0x40000000 - 0xBFFFFFFF    0x40000000    SYS
 535 *   3     0xC0000000 - 0xFFFFFFFF    0x80000000    VC
 536 *
 537 * When the start and end addresses have crossed two different sections, we
 538 * need to break the L2 operation into two, each within its own section.
 539 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
 540 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
 541 * 0xC0000000 - 0xC0001000
 542 *
 543 * Note 1:
 544 * By breaking a single L2 operation into two, we may potentially suffer some
 545 * performance hit, but keep in mind the cross section case is very rare
 546 *
 547 * Note 2:
 548 * We do not need to handle the case when the start address is in
 549 * Section 1 and the end address is in Section 3, since it is not a valid use
 550 * case
 551 *
 552 * Note 3:
 553 * Section 1 in practical terms can no longer be used on rev A2. Because of
 554 * that the code does not need to handle section 1 at all.
 555 *
 556 */
 557#define BCM_SYS_EMI_START_ADDR        0x40000000UL
 558#define BCM_VC_EMI_SEC3_START_ADDR    0xC0000000UL
 559
 560#define BCM_SYS_EMI_OFFSET            0x40000000UL
 561#define BCM_VC_EMI_OFFSET             0x80000000UL
 562
 563static inline int bcm_addr_is_sys_emi(unsigned long addr)
 564{
 565	return (addr >= BCM_SYS_EMI_START_ADDR) &&
 566		(addr < BCM_VC_EMI_SEC3_START_ADDR);
 567}
 568
 569static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
 570{
 571	if (bcm_addr_is_sys_emi(addr))
 572		return addr + BCM_SYS_EMI_OFFSET;
 573	else
 574		return addr + BCM_VC_EMI_OFFSET;
 575}
 576
 577static void bcm_inv_range(unsigned long start, unsigned long end)
 578{
 579	unsigned long new_start, new_end;
 580
 581	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
 582
 583	if (unlikely(end <= start))
 584		return;
 585
 586	new_start = bcm_l2_phys_addr(start);
 587	new_end = bcm_l2_phys_addr(end);
 588
 589	/* normal case, no cross section between start and end */
 590	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
 591		l2x0_inv_range(new_start, new_end);
 592		return;
 593	}
 594
 595	/* They cross sections, so it can only be a cross from section
 596	 * 2 to section 3
 597	 */
 598	l2x0_inv_range(new_start,
 599		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
 600	l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
 601		new_end);
 602}
 603
 604static void bcm_clean_range(unsigned long start, unsigned long end)
 605{
 606	unsigned long new_start, new_end;
 607
 608	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
 609
 610	if (unlikely(end <= start))
 611		return;
 612
 613	if ((end - start) >= l2x0_size) {
 614		l2x0_clean_all();
 615		return;
 616	}
 617
 618	new_start = bcm_l2_phys_addr(start);
 619	new_end = bcm_l2_phys_addr(end);
 620
 621	/* normal case, no cross section between start and end */
 622	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
 623		l2x0_clean_range(new_start, new_end);
 624		return;
 625	}
 626
 627	/* They cross sections, so it can only be a cross from section
 628	 * 2 to section 3
 629	 */
 630	l2x0_clean_range(new_start,
 631		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
 632	l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
 633		new_end);
 634}
 635
 636static void bcm_flush_range(unsigned long start, unsigned long end)
 637{
 638	unsigned long new_start, new_end;
 639
 640	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
 641
 642	if (unlikely(end <= start))
 643		return;
 644
 645	if ((end - start) >= l2x0_size) {
 646		l2x0_flush_all();
 647		return;
 648	}
 649
 650	new_start = bcm_l2_phys_addr(start);
 651	new_end = bcm_l2_phys_addr(end);
 652
 653	/* normal case, no cross section between start and end */
 654	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
 655		l2x0_flush_range(new_start, new_end);
 656		return;
 657	}
 658
 659	/* They cross sections, so it can only be a cross from section
 660	 * 2 to section 3
 661	 */
 662	l2x0_flush_range(new_start,
 663		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
 664	l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
 665		new_end);
 666}
 667
 668static void __init l2x0_of_setup(const struct device_node *np,
 669				 u32 *aux_val, u32 *aux_mask)
 670{
 671	u32 data[2] = { 0, 0 };
 672	u32 tag = 0;
 673	u32 dirty = 0;
 674	u32 val = 0, mask = 0;
 675
 676	of_property_read_u32(np, "arm,tag-latency", &tag);
 677	if (tag) {
 678		mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
 679		val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
 680	}
 681
 682	of_property_read_u32_array(np, "arm,data-latency",
 683				   data, ARRAY_SIZE(data));
 684	if (data[0] && data[1]) {
 685		mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
 686			L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
 687		val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
 688		       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
 689	}
 690
 691	of_property_read_u32(np, "arm,dirty-latency", &dirty);
 692	if (dirty) {
 693		mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
 694		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
 695	}
 696
 697	*aux_val &= ~mask;
 698	*aux_val |= val;
 699	*aux_mask &= ~mask;
 700}
 701
 702static void __init pl310_of_setup(const struct device_node *np,
 703				  u32 *aux_val, u32 *aux_mask)
 704{
 705	u32 data[3] = { 0, 0, 0 };
 706	u32 tag[3] = { 0, 0, 0 };
 707	u32 filter[2] = { 0, 0 };
 708
 709	of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
 710	if (tag[0] && tag[1] && tag[2])
 711		writel_relaxed(
 712			((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
 713			((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
 714			((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
 715			l2x0_base + L2X0_TAG_LATENCY_CTRL);
 716
 717	of_property_read_u32_array(np, "arm,data-latency",
 718				   data, ARRAY_SIZE(data));
 719	if (data[0] && data[1] && data[2])
 720		writel_relaxed(
 721			((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
 722			((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
 723			((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
 724			l2x0_base + L2X0_DATA_LATENCY_CTRL);
 725
 726	of_property_read_u32_array(np, "arm,filter-ranges",
 727				   filter, ARRAY_SIZE(filter));
 728	if (filter[1]) {
 729		writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
 730			       l2x0_base + L2X0_ADDR_FILTER_END);
 731		writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
 732			       l2x0_base + L2X0_ADDR_FILTER_START);
 733	}
 734}
 735
 736static void __init pl310_save(void)
 737{
 738	u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
 739		L2X0_CACHE_ID_RTL_MASK;
 740
 741	l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
 742		L2X0_TAG_LATENCY_CTRL);
 743	l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
 744		L2X0_DATA_LATENCY_CTRL);
 745	l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
 746		L2X0_ADDR_FILTER_END);
 747	l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
 748		L2X0_ADDR_FILTER_START);
 749
 750	if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
 751		/*
 752		 * From r2p0, there is Prefetch offset/control register
 753		 */
 754		l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
 755			L2X0_PREFETCH_CTRL);
 756		/*
 757		 * From r3p0, there is Power control register
 758		 */
 759		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
 760			l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
 761				L2X0_POWER_CTRL);
 762	}
 763}
 764
 765static void aurora_save(void)
 766{
 767	l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
 768	l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 769}
 770
 771static void __init tauros3_save(void)
 772{
 773	l2x0_saved_regs.aux2_ctrl =
 774		readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
 775	l2x0_saved_regs.prefetch_ctrl =
 776		readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
 777}
 778
 779static void l2x0_resume(void)
 780{
 781	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 782		/* restore aux ctrl and enable l2 */
 783		l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
 784
 785		writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
 786			L2X0_AUX_CTRL);
 787
 788		l2x0_inv_all();
 789
 790		writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
 791	}
 792}
 793
 794static void pl310_resume(void)
 795{
 796	u32 l2x0_revision;
 797
 798	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 799		/* restore pl310 setup */
 800		writel_relaxed(l2x0_saved_regs.tag_latency,
 801			l2x0_base + L2X0_TAG_LATENCY_CTRL);
 802		writel_relaxed(l2x0_saved_regs.data_latency,
 803			l2x0_base + L2X0_DATA_LATENCY_CTRL);
 804		writel_relaxed(l2x0_saved_regs.filter_end,
 805			l2x0_base + L2X0_ADDR_FILTER_END);
 806		writel_relaxed(l2x0_saved_regs.filter_start,
 807			l2x0_base + L2X0_ADDR_FILTER_START);
 808
 809		l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
 810			L2X0_CACHE_ID_RTL_MASK;
 811
 812		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
 813			writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
 814				l2x0_base + L2X0_PREFETCH_CTRL);
 815			if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
 816				writel_relaxed(l2x0_saved_regs.pwr_ctrl,
 817					l2x0_base + L2X0_POWER_CTRL);
 818		}
 819	}
 820
 821	l2x0_resume();
 822}
 823
 824static void aurora_resume(void)
 825{
 826	if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 827		writel_relaxed(l2x0_saved_regs.aux_ctrl,
 828				l2x0_base + L2X0_AUX_CTRL);
 829		writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
 830	}
 831}
 832
 833static void tauros3_resume(void)
 834{
 835	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 836		writel_relaxed(l2x0_saved_regs.aux2_ctrl,
 837			       l2x0_base + TAUROS3_AUX2_CTRL);
 838		writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
 839			       l2x0_base + L2X0_PREFETCH_CTRL);
 840	}
 841
 842	l2x0_resume();
 843}
 844
 845static void __init aurora_broadcast_l2_commands(void)
 846{
 847	__u32 u;
 848	/* Enable Broadcasting of cache commands to L2*/
 849	__asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
 850	u |= AURORA_CTRL_FW;		/* Set the FW bit */
 851	__asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
 852	isb();
 853}
 854
 855static void __init aurora_of_setup(const struct device_node *np,
 856				u32 *aux_val, u32 *aux_mask)
 857{
 858	u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
 859	u32 mask =  AURORA_ACR_REPLACEMENT_MASK;
 860
 861	of_property_read_u32(np, "cache-id-part",
 862			&cache_id_part_number_from_dt);
 863
 864	/* Determine and save the write policy */
 865	l2_wt_override = of_property_read_bool(np, "wt-override");
 866
 867	if (l2_wt_override) {
 868		val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
 869		mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
 870	}
 871
 872	*aux_val &= ~mask;
 873	*aux_val |= val;
 874	*aux_mask &= ~mask;
 875}
 876
 877static const struct l2x0_of_data pl310_data = {
 878	.setup = pl310_of_setup,
 879	.save  = pl310_save,
 880	.outer_cache = {
 881		.resume      = pl310_resume,
 882		.inv_range   = l2x0_inv_range,
 883		.clean_range = l2x0_clean_range,
 884		.flush_range = l2x0_flush_range,
 885		.sync        = l2x0_cache_sync,
 886		.flush_all   = l2x0_flush_all,
 887		.inv_all     = l2x0_inv_all,
 888		.disable     = l2x0_disable,
 889	},
 890};
 891
 892static const struct l2x0_of_data l2x0_data = {
 893	.setup = l2x0_of_setup,
 894	.save  = NULL,
 895	.outer_cache = {
 896		.resume      = l2x0_resume,
 897		.inv_range   = l2x0_inv_range,
 898		.clean_range = l2x0_clean_range,
 899		.flush_range = l2x0_flush_range,
 900		.sync        = l2x0_cache_sync,
 901		.flush_all   = l2x0_flush_all,
 902		.inv_all     = l2x0_inv_all,
 903		.disable     = l2x0_disable,
 904	},
 905};
 906
 907static const struct l2x0_of_data aurora_with_outer_data = {
 908	.setup = aurora_of_setup,
 909	.save  = aurora_save,
 910	.outer_cache = {
 911		.resume      = aurora_resume,
 912		.inv_range   = aurora_inv_range,
 913		.clean_range = aurora_clean_range,
 914		.flush_range = aurora_flush_range,
 915		.sync        = l2x0_cache_sync,
 916		.flush_all   = l2x0_flush_all,
 917		.inv_all     = l2x0_inv_all,
 918		.disable     = l2x0_disable,
 919	},
 920};
 921
 922static const struct l2x0_of_data aurora_no_outer_data = {
 923	.setup = aurora_of_setup,
 924	.save  = aurora_save,
 925	.outer_cache = {
 926		.resume      = aurora_resume,
 927	},
 928};
 929
 930static const struct l2x0_of_data tauros3_data = {
 931	.setup = NULL,
 932	.save  = tauros3_save,
 
 
 933	/* Tauros3 broadcasts L1 cache operations to L2 */
 934	.outer_cache = {
 935		.resume      = tauros3_resume,
 936	},
 937};
 938
 939static const struct l2x0_of_data bcm_l2x0_data = {
 940	.setup = pl310_of_setup,
 941	.save  = pl310_save,
 942	.outer_cache = {
 943		.resume      = pl310_resume,
 944		.inv_range   = bcm_inv_range,
 945		.clean_range = bcm_clean_range,
 946		.flush_range = bcm_flush_range,
 947		.sync        = l2x0_cache_sync,
 948		.flush_all   = l2x0_flush_all,
 949		.inv_all     = l2x0_inv_all,
 950		.disable     = l2x0_disable,
 951	},
 952};
 953
 
 954static const struct of_device_id l2x0_ids[] __initconst = {
 955	{ .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
 956	{ .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
 957	{ .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
 958	{ .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
 959	  .data = (void *)&bcm_l2x0_data},
 960	{ .compatible = "brcm,bcm11351-a2-pl310-cache",
 961	  .data = (void *)&bcm_l2x0_data},
 962	{ .compatible = "marvell,aurora-outer-cache",
 963	  .data = (void *)&aurora_with_outer_data},
 964	{ .compatible = "marvell,aurora-system-cache",
 965	  .data = (void *)&aurora_no_outer_data},
 966	{ .compatible = "marvell,tauros3-cache",
 967	  .data = (void *)&tauros3_data },
 968	{}
 969};
 970
 971int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
 972{
 
 973	struct device_node *np;
 974	const struct l2x0_of_data *data;
 975	struct resource res;
 
 
 
 976
 977	np = of_find_matching_node(NULL, l2x0_ids);
 978	if (!np)
 979		return -ENODEV;
 980
 981	if (of_address_to_resource(np, 0, &res))
 982		return -ENODEV;
 983
 984	l2x0_base = ioremap(res.start, resource_size(&res));
 985	if (!l2x0_base)
 986		return -ENOMEM;
 987
 988	l2x0_saved_regs.phy_base = res.start;
 989
 990	data = of_match_node(l2x0_ids, np)->data;
 991
 992	/* L2 configuration can only be changed if the cache is disabled */
 993	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 994		if (data->setup)
 995			data->setup(np, &aux_val, &aux_mask);
 996
 997		/* For aurora cache in no outer mode select the
 998		 * correct mode using the coprocessor*/
 999		if (data == &aurora_no_outer_data)
1000			aurora_broadcast_l2_commands();
 
 
1001	}
1002
 
 
 
 
 
 
 
 
 
 
 
 
 
1003	if (data->save)
1004		data->save();
1005
1006	of_init = true;
1007	memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
1008	l2x0_init(l2x0_base, aux_val, aux_mask);
 
1009
1010	return 0;
 
 
 
 
 
1011}
1012#endif
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support
   4 *
   5 * Copyright (C) 2007 ARM Limited
 
 
 
 
 
 
 
 
 
 
 
 
 
   6 */
   7#include <linux/cpu.h>
   8#include <linux/err.h>
   9#include <linux/init.h>
  10#include <linux/smp.h>
  11#include <linux/spinlock.h>
  12#include <linux/log2.h>
  13#include <linux/io.h>
  14#include <linux/of.h>
  15#include <linux/of_address.h>
  16
  17#include <asm/cacheflush.h>
  18#include <asm/cp15.h>
  19#include <asm/cputype.h>
  20#include <asm/hardware/cache-l2x0.h>
  21#include <asm/hardware/cache-aurora-l2.h>
  22#include "cache-tauros3.h"
  23
  24struct l2c_init_data {
  25	const char *type;
  26	unsigned way_size_0;
  27	unsigned num_lock;
  28	void (*of_parse)(const struct device_node *, u32 *, u32 *);
  29	void (*enable)(void __iomem *, unsigned);
  30	void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
  31	void (*save)(void __iomem *);
  32	void (*configure)(void __iomem *);
  33	void (*unlock)(void __iomem *, unsigned);
  34	struct outer_cache_fns outer_cache;
  35};
  36
  37#define CACHE_LINE_SIZE		32
  38
  39static void __iomem *l2x0_base;
  40static const struct l2c_init_data *l2x0_data;
  41static DEFINE_RAW_SPINLOCK(l2x0_lock);
  42static u32 l2x0_way_mask;	/* Bitmask of active ways */
  43static u32 l2x0_size;
  44static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
  45
 
 
 
 
  46struct l2x0_regs l2x0_saved_regs;
  47
  48static bool l2x0_bresp_disable;
  49static bool l2x0_flz_disable;
 
 
 
 
 
  50
  51/*
  52 * Common code for all cache controllers.
  53 */
  54static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
  55{
  56	/* wait for cache operation by line or way to complete */
  57	while (readl_relaxed(reg) & mask)
  58		cpu_relax();
  59}
  60
  61/*
  62 * By default, we write directly to secure registers.  Platforms must
  63 * override this if they are running non-secure.
  64 */
  65static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
  66{
  67	if (val == readl_relaxed(base + reg))
  68		return;
  69	if (outer_cache.write_sec)
  70		outer_cache.write_sec(val, reg);
  71	else
  72		writel_relaxed(val, base + reg);
  73}
 
 
 
  74
  75/*
  76 * This should only be called when we have a requirement that the
  77 * register be written due to a work-around, as platforms running
  78 * in non-secure mode may not be able to access this register.
  79 */
  80static inline void l2c_set_debug(void __iomem *base, unsigned long val)
  81{
  82	l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
  83}
  84
  85static void __l2c_op_way(void __iomem *reg)
  86{
  87	writel_relaxed(l2x0_way_mask, reg);
  88	l2c_wait_mask(reg, l2x0_way_mask);
  89}
  90
  91static inline void l2c_unlock(void __iomem *base, unsigned num)
  92{
  93	unsigned i;
  94
  95	for (i = 0; i < num; i++) {
  96		writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
  97			       i * L2X0_LOCKDOWN_STRIDE);
  98		writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
  99			       i * L2X0_LOCKDOWN_STRIDE);
 100	}
 101}
 102
 103static void l2c_configure(void __iomem *base)
 104{
 105	l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
 106}
 107
 108/*
 109 * Enable the L2 cache controller.  This function must only be
 110 * called when the cache controller is known to be disabled.
 111 */
 112static void l2c_enable(void __iomem *base, unsigned num_lock)
 113{
 114	unsigned long flags;
 115
 116	if (outer_cache.configure)
 117		outer_cache.configure(&l2x0_saved_regs);
 118	else
 119		l2x0_data->configure(base);
 120
 121	l2x0_data->unlock(base, num_lock);
 122
 123	local_irq_save(flags);
 124	__l2c_op_way(base + L2X0_INV_WAY);
 125	writel_relaxed(0, base + sync_reg_offset);
 126	l2c_wait_mask(base + sync_reg_offset, 1);
 127	local_irq_restore(flags);
 128
 129	l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
 130}
 131
 132static void l2c_disable(void)
 133{
 134	void __iomem *base = l2x0_base;
 135
 136	l2x0_pmu_suspend();
 137
 138	outer_cache.flush_all();
 139	l2c_write_sec(0, base, L2X0_CTRL);
 140	dsb(st);
 141}
 142
 143static void l2c_save(void __iomem *base)
 144{
 145	l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 146}
 147
 148static void l2c_resume(void)
 149{
 150	void __iomem *base = l2x0_base;
 151
 152	/* Do not touch the controller if already enabled. */
 153	if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
 154		l2c_enable(base, l2x0_data->num_lock);
 155
 156	l2x0_pmu_resume();
 157}
 158
 159/*
 160 * L2C-210 specific code.
 161 *
 162 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
 163 * ensure that no background operation is running.  The way operations
 164 * are all background tasks.
 165 *
 166 * While a background operation is in progress, any new operation is
 167 * ignored (unspecified whether this causes an error.)  Thankfully, not
 168 * used on SMP.
 169 *
 170 * Never has a different sync register other than L2X0_CACHE_SYNC, but
 171 * we use sync_reg_offset here so we can share some of this with L2C-310.
 172 */
 173static void __l2c210_cache_sync(void __iomem *base)
 174{
 175	writel_relaxed(0, base + sync_reg_offset);
 
 176}
 177
 178static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
 179	unsigned long end)
 180{
 181	while (start < end) {
 182		writel_relaxed(start, reg);
 183		start += CACHE_LINE_SIZE;
 184	}
 185}
 186
 187static void l2c210_inv_range(unsigned long start, unsigned long end)
 
 188{
 189	void __iomem *base = l2x0_base;
 190
 191	if (start & (CACHE_LINE_SIZE - 1)) {
 192		start &= ~(CACHE_LINE_SIZE - 1);
 193		writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
 194		start += CACHE_LINE_SIZE;
 195	}
 196
 197	if (end & (CACHE_LINE_SIZE - 1)) {
 198		end &= ~(CACHE_LINE_SIZE - 1);
 199		writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
 200	}
 201
 202	__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
 203	__l2c210_cache_sync(base);
 204}
 205
 206static void l2c210_clean_range(unsigned long start, unsigned long end)
 207{
 208	void __iomem *base = l2x0_base;
 209
 210	start &= ~(CACHE_LINE_SIZE - 1);
 211	__l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
 212	__l2c210_cache_sync(base);
 213}
 214
 215static void l2c210_flush_range(unsigned long start, unsigned long end)
 
 216{
 217	void __iomem *base = l2x0_base;
 218
 219	start &= ~(CACHE_LINE_SIZE - 1);
 220	__l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
 221	__l2c210_cache_sync(base);
 
 
 222}
 
 223
 224static void l2c210_flush_all(void)
 225{
 226	void __iomem *base = l2x0_base;
 227
 228	BUG_ON(!irqs_disabled());
 229
 230	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
 231	__l2c210_cache_sync(base);
 232}
 233
 234static void l2c210_sync(void)
 235{
 236	__l2c210_cache_sync(l2x0_base);
 237}
 238
 239static const struct l2c_init_data l2c210_data __initconst = {
 240	.type = "L2C-210",
 241	.way_size_0 = SZ_8K,
 242	.num_lock = 1,
 243	.enable = l2c_enable,
 244	.save = l2c_save,
 245	.configure = l2c_configure,
 246	.unlock = l2c_unlock,
 247	.outer_cache = {
 248		.inv_range = l2c210_inv_range,
 249		.clean_range = l2c210_clean_range,
 250		.flush_range = l2c210_flush_range,
 251		.flush_all = l2c210_flush_all,
 252		.disable = l2c_disable,
 253		.sync = l2c210_sync,
 254		.resume = l2c_resume,
 255	},
 256};
 257
 258/*
 259 * L2C-220 specific code.
 260 *
 261 * All operations are background operations: they have to be waited for.
 262 * Conflicting requests generate a slave error (which will cause an
 263 * imprecise abort.)  Never uses sync_reg_offset, so we hard-code the
 264 * sync register here.
 265 *
 266 * However, we can re-use the l2c210_resume call.
 267 */
 268static inline void __l2c220_cache_sync(void __iomem *base)
 269{
 270	writel_relaxed(0, base + L2X0_CACHE_SYNC);
 271	l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
 272}
 
 273
 274static void l2c220_op_way(void __iomem *base, unsigned reg)
 275{
 276	unsigned long flags;
 277
 278	raw_spin_lock_irqsave(&l2x0_lock, flags);
 279	__l2c_op_way(base + reg);
 280	__l2c220_cache_sync(base);
 281	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 282}
 283
 284static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
 285	unsigned long end, unsigned long flags)
 286{
 287	raw_spinlock_t *lock = &l2x0_lock;
 288
 289	while (start < end) {
 290		unsigned long blk_end = start + min(end - start, 4096UL);
 291
 292		while (start < blk_end) {
 293			l2c_wait_mask(reg, 1);
 294			writel_relaxed(start, reg);
 295			start += CACHE_LINE_SIZE;
 296		}
 297
 298		if (blk_end < end) {
 299			raw_spin_unlock_irqrestore(lock, flags);
 300			raw_spin_lock_irqsave(lock, flags);
 301		}
 302	}
 303
 304	return flags;
 305}
 306
 307static void l2c220_inv_range(unsigned long start, unsigned long end)
 308{
 309	void __iomem *base = l2x0_base;
 310	unsigned long flags;
 311
 
 312	raw_spin_lock_irqsave(&l2x0_lock, flags);
 313	if ((start | end) & (CACHE_LINE_SIZE - 1)) {
 314		if (start & (CACHE_LINE_SIZE - 1)) {
 315			start &= ~(CACHE_LINE_SIZE - 1);
 316			writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
 317			start += CACHE_LINE_SIZE;
 318		}
 319
 320		if (end & (CACHE_LINE_SIZE - 1)) {
 321			end &= ~(CACHE_LINE_SIZE - 1);
 322			l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
 323			writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
 324		}
 325	}
 326
 327	flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
 328				   start, end, flags);
 329	l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
 330	__l2c220_cache_sync(base);
 331	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 332}
 333
 334static void l2c220_clean_range(unsigned long start, unsigned long end)
 335{
 336	void __iomem *base = l2x0_base;
 337	unsigned long flags;
 338
 339	start &= ~(CACHE_LINE_SIZE - 1);
 340	if ((end - start) >= l2x0_size) {
 341		l2c220_op_way(base, L2X0_CLEAN_WAY);
 342		return;
 343	}
 344
 345	raw_spin_lock_irqsave(&l2x0_lock, flags);
 346	flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
 347				   start, end, flags);
 348	l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
 349	__l2c220_cache_sync(base);
 350	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 351}
 352
 353static void l2c220_flush_range(unsigned long start, unsigned long end)
 354{
 355	void __iomem *base = l2x0_base;
 356	unsigned long flags;
 357
 358	start &= ~(CACHE_LINE_SIZE - 1);
 359	if ((end - start) >= l2x0_size) {
 360		l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
 361		return;
 362	}
 363
 364	raw_spin_lock_irqsave(&l2x0_lock, flags);
 365	flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
 366				   start, end, flags);
 367	l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
 368	__l2c220_cache_sync(base);
 
 369	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 370}
 371
 372static void l2c220_flush_all(void)
 373{
 374	l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
 375}
 376
 377static void l2c220_sync(void)
 378{
 
 379	unsigned long flags;
 380
 381	raw_spin_lock_irqsave(&l2x0_lock, flags);
 382	__l2c220_cache_sync(l2x0_base);
 383	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 384}
 
 
 
 
 385
 386static void l2c220_enable(void __iomem *base, unsigned num_lock)
 387{
 388	/*
 389	 * Always enable non-secure access to the lockdown registers -
 390	 * we write to them as part of the L2C enable sequence so they
 391	 * need to be accessible.
 392	 */
 393	l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
 394
 395	l2c_enable(base, num_lock);
 396}
 397
 398static void l2c220_unlock(void __iomem *base, unsigned num_lock)
 399{
 400	if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
 401		l2c_unlock(base, num_lock);
 402}
 403
 404static const struct l2c_init_data l2c220_data = {
 405	.type = "L2C-220",
 406	.way_size_0 = SZ_8K,
 407	.num_lock = 1,
 408	.enable = l2c220_enable,
 409	.save = l2c_save,
 410	.configure = l2c_configure,
 411	.unlock = l2c220_unlock,
 412	.outer_cache = {
 413		.inv_range = l2c220_inv_range,
 414		.clean_range = l2c220_clean_range,
 415		.flush_range = l2c220_flush_range,
 416		.flush_all = l2c220_flush_all,
 417		.disable = l2c_disable,
 418		.sync = l2c220_sync,
 419		.resume = l2c_resume,
 420	},
 421};
 422
 423/*
 424 * L2C-310 specific code.
 425 *
 426 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
 427 * and the way operations are all background tasks.  However, issuing an
 428 * operation while a background operation is in progress results in a
 429 * SLVERR response.  We can reuse:
 430 *
 431 *  __l2c210_cache_sync (using sync_reg_offset)
 432 *  l2c210_sync
 433 *  l2c210_inv_range (if 588369 is not applicable)
 434 *  l2c210_clean_range
 435 *  l2c210_flush_range (if 588369 is not applicable)
 436 *  l2c210_flush_all (if 727915 is not applicable)
 437 *
 438 * Errata:
 439 * 588369: PL310 R0P0->R1P0, fixed R2P0.
 440 *	Affects: all clean+invalidate operations
 441 *	clean and invalidate skips the invalidate step, so we need to issue
 442 *	separate operations.  We also require the above debug workaround
 443 *	enclosing this code fragment on affected parts.  On unaffected parts,
 444 *	we must not use this workaround without the debug register writes
 445 *	to avoid exposing a problem similar to 727915.
 446 *
 447 * 727915: PL310 R2P0->R3P0, fixed R3P1.
 448 *	Affects: clean+invalidate by way
 449 *	clean and invalidate by way runs in the background, and a store can
 450 *	hit the line between the clean operation and invalidate operation,
 451 *	resulting in the store being lost.
 452 *
 453 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
 454 *	Affects: 8x64-bit (double fill) line fetches
 455 *	double fill line fetches can fail to cause dirty data to be evicted
 456 *	from the cache before the new data overwrites the second line.
 457 *
 458 * 753970: PL310 R3P0, fixed R3P1.
 459 *	Affects: sync
 460 *	prevents merging writes after the sync operation, until another L2C
 461 *	operation is performed (or a number of other conditions.)
 462 *
 463 * 769419: PL310 R0P0->R3P1, fixed R3P2.
 464 *	Affects: store buffer
 465 *	store buffer is not automatically drained.
 466 */
 467static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
 468{
 469	void __iomem *base = l2x0_base;
 470
 471	if ((start | end) & (CACHE_LINE_SIZE - 1)) {
 472		unsigned long flags;
 473
 474		/* Erratum 588369 for both clean+invalidate operations */
 475		raw_spin_lock_irqsave(&l2x0_lock, flags);
 476		l2c_set_debug(base, 0x03);
 477
 478		if (start & (CACHE_LINE_SIZE - 1)) {
 479			start &= ~(CACHE_LINE_SIZE - 1);
 480			writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
 481			writel_relaxed(start, base + L2X0_INV_LINE_PA);
 482			start += CACHE_LINE_SIZE;
 483		}
 484
 485		if (end & (CACHE_LINE_SIZE - 1)) {
 486			end &= ~(CACHE_LINE_SIZE - 1);
 487			writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
 488			writel_relaxed(end, base + L2X0_INV_LINE_PA);
 489		}
 490
 491		l2c_set_debug(base, 0x00);
 492		raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 493	}
 494
 495	__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
 496	__l2c210_cache_sync(base);
 497}
 498
 499static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
 500{
 501	raw_spinlock_t *lock = &l2x0_lock;
 502	unsigned long flags;
 503	void __iomem *base = l2x0_base;
 504
 505	raw_spin_lock_irqsave(lock, flags);
 
 
 
 
 
 
 506	while (start < end) {
 507		unsigned long blk_end = start + min(end - start, 4096UL);
 508
 509		l2c_set_debug(base, 0x03);
 510		while (start < blk_end) {
 511			writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
 512			writel_relaxed(start, base + L2X0_INV_LINE_PA);
 513			start += CACHE_LINE_SIZE;
 514		}
 515		l2c_set_debug(base, 0x00);
 516
 517		if (blk_end < end) {
 518			raw_spin_unlock_irqrestore(lock, flags);
 519			raw_spin_lock_irqsave(lock, flags);
 520		}
 521	}
 522	raw_spin_unlock_irqrestore(lock, flags);
 523	__l2c210_cache_sync(base);
 
 524}
 525
 526static void l2c310_flush_all_erratum(void)
 527{
 528	void __iomem *base = l2x0_base;
 529	unsigned long flags;
 530
 531	raw_spin_lock_irqsave(&l2x0_lock, flags);
 532	l2c_set_debug(base, 0x03);
 533	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
 534	l2c_set_debug(base, 0x00);
 535	__l2c210_cache_sync(base);
 536	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 537}
 538
 539static void __init l2c310_save(void __iomem *base)
 540{
 541	unsigned revision;
 542
 543	l2c_save(base);
 544
 545	l2x0_saved_regs.tag_latency = readl_relaxed(base +
 546		L310_TAG_LATENCY_CTRL);
 547	l2x0_saved_regs.data_latency = readl_relaxed(base +
 548		L310_DATA_LATENCY_CTRL);
 549	l2x0_saved_regs.filter_end = readl_relaxed(base +
 550		L310_ADDR_FILTER_END);
 551	l2x0_saved_regs.filter_start = readl_relaxed(base +
 552		L310_ADDR_FILTER_START);
 553
 554	revision = readl_relaxed(base + L2X0_CACHE_ID) &
 555			L2X0_CACHE_ID_RTL_MASK;
 556
 557	/* From r2p0, there is Prefetch offset/control register */
 558	if (revision >= L310_CACHE_ID_RTL_R2P0)
 559		l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
 560							L310_PREFETCH_CTRL);
 561
 562	/* From r3p0, there is Power control register */
 563	if (revision >= L310_CACHE_ID_RTL_R3P0)
 564		l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
 565							L310_POWER_CTRL);
 566}
 567
 568static void l2c310_configure(void __iomem *base)
 569{
 570	unsigned revision;
 571
 572	l2c_configure(base);
 573
 574	/* restore pl310 setup */
 575	l2c_write_sec(l2x0_saved_regs.tag_latency, base,
 576		      L310_TAG_LATENCY_CTRL);
 577	l2c_write_sec(l2x0_saved_regs.data_latency, base,
 578		      L310_DATA_LATENCY_CTRL);
 579	l2c_write_sec(l2x0_saved_regs.filter_end, base,
 580		      L310_ADDR_FILTER_END);
 581	l2c_write_sec(l2x0_saved_regs.filter_start, base,
 582		      L310_ADDR_FILTER_START);
 583
 584	revision = readl_relaxed(base + L2X0_CACHE_ID) &
 585				 L2X0_CACHE_ID_RTL_MASK;
 586
 587	if (revision >= L310_CACHE_ID_RTL_R2P0)
 588		l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
 589			      L310_PREFETCH_CTRL);
 590	if (revision >= L310_CACHE_ID_RTL_R3P0)
 591		l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
 592			      L310_POWER_CTRL);
 593}
 594
 595static int l2c310_starting_cpu(unsigned int cpu)
 596{
 597	set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
 598	return 0;
 599}
 600
 601static int l2c310_dying_cpu(unsigned int cpu)
 602{
 603	set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
 604	return 0;
 605}
 606
 607static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
 608{
 609	unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
 610	bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
 611	u32 aux = l2x0_saved_regs.aux_ctrl;
 612
 613	if (rev >= L310_CACHE_ID_RTL_R2P0) {
 614		if (cortex_a9 && !l2x0_bresp_disable) {
 615			aux |= L310_AUX_CTRL_EARLY_BRESP;
 616			pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
 617		} else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
 618			pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
 619			aux &= ~L310_AUX_CTRL_EARLY_BRESP;
 620		}
 621	}
 622
 623	if (cortex_a9 && !l2x0_flz_disable) {
 624		u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
 625		u32 acr = get_auxcr();
 
 626
 627		pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
 628
 629		if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
 630			pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
 631
 632		if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
 633			pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
 634
 635		if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
 636			aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
 637			pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
 638		}
 639	} else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
 640		pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
 641		aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
 642	}
 643
 644	/*
 645	 * Always enable non-secure access to the lockdown registers -
 646	 * we write to them as part of the L2C enable sequence so they
 647	 * need to be accessible.
 648	 */
 649	l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
 650
 651	l2c_enable(base, num_lock);
 652
 653	/* Read back resulting AUX_CTRL value as it could have been altered. */
 654	aux = readl_relaxed(base + L2X0_AUX_CTRL);
 655
 656	if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
 657		u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
 658
 659		pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
 660			aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
 661			aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
 662			1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
 663	}
 664
 665	/* r3p0 or later has power control register */
 666	if (rev >= L310_CACHE_ID_RTL_R3P0) {
 667		u32 power_ctrl;
 668
 669		power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
 670		pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
 671			power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
 672			power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
 673	}
 674
 675	if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
 676		cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
 677				  "arm/l2x0:starting", l2c310_starting_cpu,
 678				  l2c310_dying_cpu);
 679}
 680
 681static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
 682	struct outer_cache_fns *fns)
 683{
 684	unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
 685	const char *errata[8];
 686	unsigned n = 0;
 687
 688	if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
 689	    revision < L310_CACHE_ID_RTL_R2P0 &&
 690	    /* For bcm compatibility */
 691	    fns->inv_range == l2c210_inv_range) {
 692		fns->inv_range = l2c310_inv_range_erratum;
 693		fns->flush_range = l2c310_flush_range_erratum;
 694		errata[n++] = "588369";
 695	}
 696
 697	if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
 698	    revision >= L310_CACHE_ID_RTL_R2P0 &&
 699	    revision < L310_CACHE_ID_RTL_R3P1) {
 700		fns->flush_all = l2c310_flush_all_erratum;
 701		errata[n++] = "727915";
 702	}
 703
 704	if (revision >= L310_CACHE_ID_RTL_R3P0 &&
 705	    revision < L310_CACHE_ID_RTL_R3P2) {
 706		u32 val = l2x0_saved_regs.prefetch_ctrl;
 707		if (val & L310_PREFETCH_CTRL_DBL_LINEFILL) {
 708			val &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
 709			l2x0_saved_regs.prefetch_ctrl = val;
 710			errata[n++] = "752271";
 711		}
 712	}
 713
 714	if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
 715	    revision == L310_CACHE_ID_RTL_R3P0) {
 716		sync_reg_offset = L2X0_DUMMY_REG;
 717		errata[n++] = "753970";
 718	}
 719
 720	if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
 721		errata[n++] = "769419";
 722
 723	if (n) {
 724		unsigned i;
 725
 726		pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
 727		for (i = 0; i < n; i++)
 728			pr_cont(" %s", errata[i]);
 729		pr_cont(" enabled\n");
 730	}
 731}
 732
 733static void l2c310_disable(void)
 734{
 735	/*
 736	 * If full-line-of-zeros is enabled, we must first disable it in the
 737	 * Cortex-A9 auxiliary control register before disabling the L2 cache.
 738	 */
 739	if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
 740		set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
 741
 742	l2c_disable();
 
 
 
 
 743}
 744
 745static void l2c310_resume(void)
 746{
 747	l2c_resume();
 
 748
 749	/* Re-enable full-line-of-zeros for Cortex-A9 */
 750	if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
 751		set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
 752}
 
 
 
 
 
 
 
 
 753
 754static void l2c310_unlock(void __iomem *base, unsigned num_lock)
 755{
 756	if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
 757		l2c_unlock(base, num_lock);
 
 
 758}
 759
 760static const struct l2c_init_data l2c310_init_fns __initconst = {
 761	.type = "L2C-310",
 762	.way_size_0 = SZ_8K,
 763	.num_lock = 8,
 764	.enable = l2c310_enable,
 765	.fixup = l2c310_fixup,
 766	.save = l2c310_save,
 767	.configure = l2c310_configure,
 768	.unlock = l2c310_unlock,
 769	.outer_cache = {
 770		.inv_range = l2c210_inv_range,
 771		.clean_range = l2c210_clean_range,
 772		.flush_range = l2c210_flush_range,
 773		.flush_all = l2c210_flush_all,
 774		.disable = l2c310_disable,
 775		.sync = l2c210_sync,
 776		.resume = l2c310_resume,
 777	},
 778};
 779
 780static int __init __l2c_init(const struct l2c_init_data *data,
 781			     u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync)
 782{
 783	struct outer_cache_fns fns;
 784	unsigned way_size_bits, ways;
 785	u32 aux, old_aux;
 
 
 
 786
 787	/*
 788	 * Save the pointer globally so that callbacks which do not receive
 789	 * context from callers can access the structure.
 790	 */
 791	l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
 792	if (!l2x0_data)
 793		return -ENOMEM;
 794
 795	/*
 796	 * Sanity check the aux values.  aux_mask is the bits we preserve
 797	 * from reading the hardware register, and aux_val is the bits we
 798	 * set.
 799	 */
 800	if (aux_val & aux_mask)
 801		pr_alert("L2C: platform provided aux values permit register corruption.\n");
 802
 803	old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 804	aux &= aux_mask;
 805	aux |= aux_val;
 806
 807	if (old_aux != aux)
 808		pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
 809		        old_aux, aux);
 810
 811	/* Determine the number of ways */
 812	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
 813	case L2X0_CACHE_ID_PART_L310:
 814		if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
 815			pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
 816		if (aux & (1 << 16))
 817			ways = 16;
 818		else
 819			ways = 8;
 
 
 
 
 
 
 
 820		break;
 821
 822	case L2X0_CACHE_ID_PART_L210:
 823	case L2X0_CACHE_ID_PART_L220:
 824		ways = (aux >> 13) & 0xf;
 
 825		break;
 826
 827	case AURORA_CACHE_ID:
 
 828		ways = (aux >> 13) & 0xf;
 829		ways = 2 << ((ways + 1) >> 2);
 
 
 830		break;
 831
 832	default:
 833		/* Assume unknown chips have 8 ways */
 834		ways = 8;
 
 835		break;
 836	}
 837
 838	l2x0_way_mask = (1 << ways) - 1;
 839
 840	/*
 841	 * way_size_0 is the size that a way_size value of zero would be
 842	 * given the calculation: way_size = way_size_0 << way_size_bits.
 843	 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
 844	 * then way_size_0 would be 8k.
 845	 *
 846	 * L2 cache size = number of ways * way size.
 847	 */
 848	way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
 849			L2C_AUX_CTRL_WAY_SIZE_SHIFT;
 850	l2x0_size = ways * (data->way_size_0 << way_size_bits);
 851
 852	fns = data->outer_cache;
 853	fns.write_sec = outer_cache.write_sec;
 854	fns.configure = outer_cache.configure;
 855	if (data->fixup)
 856		data->fixup(l2x0_base, cache_id, &fns);
 857	if (nosync) {
 858		pr_info("L2C: disabling outer sync\n");
 859		fns.sync = NULL;
 860	}
 861
 862	/*
 863	 * Check if l2x0 controller is already enabled.  If we are booting
 864	 * in non-secure mode accessing the below registers will fault.
 
 865	 */
 866	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 867		l2x0_saved_regs.aux_ctrl = aux;
 
 868
 869		data->enable(l2x0_base, data->num_lock);
 870	}
 871
 872	outer_cache = fns;
 873
 874	/*
 875	 * It is strange to save the register state before initialisation,
 876	 * but hey, this is what the DT implementations decided to do.
 877	 */
 878	if (data->save)
 879		data->save(l2x0_base);
 880
 881	/* Re-read it in case some bits are reserved. */
 882	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 883
 884	pr_info("%s cache controller enabled, %d ways, %d kB\n",
 885		data->type, ways, l2x0_size >> 10);
 886	pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
 887		data->type, cache_id, aux);
 888
 889	l2x0_pmu_register(l2x0_base, cache_id);
 890
 891	return 0;
 892}
 893
 894void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 895{
 896	const struct l2c_init_data *data;
 897	u32 cache_id;
 898
 899	l2x0_base = base;
 900
 901	cache_id = readl_relaxed(base + L2X0_CACHE_ID);
 902
 903	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
 904	default:
 905	case L2X0_CACHE_ID_PART_L210:
 906		data = &l2c210_data;
 907		break;
 908
 909	case L2X0_CACHE_ID_PART_L220:
 910		data = &l2c220_data;
 911		break;
 912
 913	case L2X0_CACHE_ID_PART_L310:
 914		data = &l2c310_init_fns;
 915		break;
 916	}
 917
 918	/* Read back current (default) hardware configuration */
 919	if (data->save)
 920		data->save(l2x0_base);
 921
 922	__l2c_init(data, aux_val, aux_mask, cache_id, false);
 923}
 924
 925#ifdef CONFIG_OF
 926static int l2_wt_override;
 927
 928/* Aurora don't have the cache ID register available, so we have to
 929 * pass it though the device tree */
 930static u32 cache_id_part_number_from_dt;
 931
 932/**
 933 * l2x0_cache_size_of_parse() - read cache size parameters from DT
 934 * @np: the device tree node for the l2 cache
 935 * @aux_val: pointer to machine-supplied auxilary register value, to
 936 * be augmented by the call (bits to be set to 1)
 937 * @aux_mask: pointer to machine-supplied auxilary register mask, to
 938 * be augmented by the call (bits to be set to 0)
 939 * @associativity: variable to return the calculated associativity in
 940 * @max_way_size: the maximum size in bytes for the cache ways
 941 */
 942static int __init l2x0_cache_size_of_parse(const struct device_node *np,
 943					    u32 *aux_val, u32 *aux_mask,
 944					    u32 *associativity,
 945					    u32 max_way_size)
 946{
 947	u32 mask = 0, val = 0;
 948	u32 cache_size = 0, sets = 0;
 949	u32 way_size_bits = 1;
 950	u32 way_size = 0;
 951	u32 block_size = 0;
 952	u32 line_size = 0;
 953
 954	of_property_read_u32(np, "cache-size", &cache_size);
 955	of_property_read_u32(np, "cache-sets", &sets);
 956	of_property_read_u32(np, "cache-block-size", &block_size);
 957	of_property_read_u32(np, "cache-line-size", &line_size);
 958
 959	if (!cache_size || !sets)
 960		return -ENODEV;
 961
 962	/* All these l2 caches have the same line = block size actually */
 963	if (!line_size) {
 964		if (block_size) {
 965			/* If linesize is not given, it is equal to blocksize */
 966			line_size = block_size;
 967		} else {
 968			/* Fall back to known size */
 969			pr_warn("L2C OF: no cache block/line size given: "
 970				"falling back to default size %d bytes\n",
 971				CACHE_LINE_SIZE);
 972			line_size = CACHE_LINE_SIZE;
 973		}
 974	}
 975
 976	if (line_size != CACHE_LINE_SIZE)
 977		pr_warn("L2C OF: DT supplied line size %d bytes does "
 978			"not match hardware line size of %d bytes\n",
 979			line_size,
 980			CACHE_LINE_SIZE);
 981
 982	/*
 983	 * Since:
 984	 * set size = cache size / sets
 985	 * ways = cache size / (sets * line size)
 986	 * way size = cache size / (cache size / (sets * line size))
 987	 * way size = sets * line size
 988	 * associativity = ways = cache size / way size
 989	 */
 990	way_size = sets * line_size;
 991	*associativity = cache_size / way_size;
 992
 993	if (way_size > max_way_size) {
 994		pr_err("L2C OF: set size %dKB is too large\n", way_size);
 995		return -EINVAL;
 996	}
 997
 998	pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
 999		cache_size, cache_size >> 10);
1000	pr_info("L2C OF: override line size: %d bytes\n", line_size);
1001	pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
1002		way_size, way_size >> 10);
1003	pr_info("L2C OF: override associativity: %d\n", *associativity);
1004
1005	/*
1006	 * Calculates the bits 17:19 to set for way size:
1007	 * 512KB -> 6, 256KB -> 5, ... 16KB -> 1
1008	 */
1009	way_size_bits = ilog2(way_size >> 10) - 3;
1010	if (way_size_bits < 1 || way_size_bits > 6) {
1011		pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
1012		       way_size);
1013		return -EINVAL;
1014	}
1015
1016	mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
1017	val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
1018
1019	*aux_val &= ~mask;
1020	*aux_val |= val;
1021	*aux_mask &= ~mask;
1022
1023	return 0;
1024}
1025
1026static void __init l2x0_of_parse(const struct device_node *np,
1027				 u32 *aux_val, u32 *aux_mask)
1028{
1029	u32 data[2] = { 0, 0 };
1030	u32 tag = 0;
1031	u32 dirty = 0;
1032	u32 val = 0, mask = 0;
1033	u32 assoc;
1034	int ret;
1035
1036	of_property_read_u32(np, "arm,tag-latency", &tag);
1037	if (tag) {
1038		mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
1039		val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
1040	}
1041
1042	of_property_read_u32_array(np, "arm,data-latency",
1043				   data, ARRAY_SIZE(data));
1044	if (data[0] && data[1]) {
1045		mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
1046			L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
1047		val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
1048		       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
1049	}
1050
1051	of_property_read_u32(np, "arm,dirty-latency", &dirty);
1052	if (dirty) {
1053		mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
1054		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
1055	}
1056
1057	if (of_property_read_bool(np, "arm,parity-enable")) {
1058		mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1059		val |= L2C_AUX_CTRL_PARITY_ENABLE;
1060	} else if (of_property_read_bool(np, "arm,parity-disable")) {
1061		mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1062	}
1063
1064	if (of_property_read_bool(np, "arm,shared-override")) {
1065		mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
1066		val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
1067	}
1068
1069	ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
1070	if (ret)
1071		return;
1072
1073	if (assoc > 8) {
1074		pr_err("l2x0 of: cache setting yield too high associativity\n");
1075		pr_err("l2x0 of: %d calculated, max 8\n", assoc);
1076	} else {
1077		mask |= L2X0_AUX_CTRL_ASSOC_MASK;
1078		val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
1079	}
1080
1081	*aux_val &= ~mask;
1082	*aux_val |= val;
1083	*aux_mask &= ~mask;
1084}
1085
1086static const struct l2c_init_data of_l2c210_data __initconst = {
1087	.type = "L2C-210",
1088	.way_size_0 = SZ_8K,
1089	.num_lock = 1,
1090	.of_parse = l2x0_of_parse,
1091	.enable = l2c_enable,
1092	.save = l2c_save,
1093	.configure = l2c_configure,
1094	.unlock = l2c_unlock,
1095	.outer_cache = {
1096		.inv_range   = l2c210_inv_range,
1097		.clean_range = l2c210_clean_range,
1098		.flush_range = l2c210_flush_range,
1099		.flush_all   = l2c210_flush_all,
1100		.disable     = l2c_disable,
1101		.sync        = l2c210_sync,
1102		.resume      = l2c_resume,
1103	},
1104};
1105
1106static const struct l2c_init_data of_l2c220_data __initconst = {
1107	.type = "L2C-220",
1108	.way_size_0 = SZ_8K,
1109	.num_lock = 1,
1110	.of_parse = l2x0_of_parse,
1111	.enable = l2c220_enable,
1112	.save = l2c_save,
1113	.configure = l2c_configure,
1114	.unlock = l2c220_unlock,
1115	.outer_cache = {
1116		.inv_range   = l2c220_inv_range,
1117		.clean_range = l2c220_clean_range,
1118		.flush_range = l2c220_flush_range,
1119		.flush_all   = l2c220_flush_all,
1120		.disable     = l2c_disable,
1121		.sync        = l2c220_sync,
1122		.resume      = l2c_resume,
1123	},
1124};
1125
1126static void __init l2c310_of_parse(const struct device_node *np,
1127	u32 *aux_val, u32 *aux_mask)
1128{
1129	u32 data[3] = { 0, 0, 0 };
1130	u32 tag[3] = { 0, 0, 0 };
1131	u32 filter[2] = { 0, 0 };
1132	u32 assoc;
1133	u32 prefetch;
1134	u32 power;
1135	u32 val;
1136	int ret;
1137
1138	of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1139	if (tag[0] && tag[1] && tag[2])
1140		l2x0_saved_regs.tag_latency =
1141			L310_LATENCY_CTRL_RD(tag[0] - 1) |
1142			L310_LATENCY_CTRL_WR(tag[1] - 1) |
1143			L310_LATENCY_CTRL_SETUP(tag[2] - 1);
1144
1145	of_property_read_u32_array(np, "arm,data-latency",
1146				   data, ARRAY_SIZE(data));
1147	if (data[0] && data[1] && data[2])
1148		l2x0_saved_regs.data_latency =
1149			L310_LATENCY_CTRL_RD(data[0] - 1) |
1150			L310_LATENCY_CTRL_WR(data[1] - 1) |
1151			L310_LATENCY_CTRL_SETUP(data[2] - 1);
1152
1153	of_property_read_u32_array(np, "arm,filter-ranges",
1154				   filter, ARRAY_SIZE(filter));
1155	if (filter[1]) {
1156		l2x0_saved_regs.filter_end =
1157					ALIGN(filter[0] + filter[1], SZ_1M);
1158		l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
1159					| L310_ADDR_FILTER_EN;
1160	}
1161
1162	ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
1163	if (!ret) {
1164		switch (assoc) {
1165		case 16:
1166			*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1167			*aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
1168			*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1169			break;
1170		case 8:
1171			*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1172			*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1173			break;
1174		default:
1175			pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
1176			       assoc);
1177			break;
1178		}
1179	}
1180
1181	if (of_property_read_bool(np, "arm,shared-override")) {
1182		*aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
1183		*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
1184	}
1185
1186	if (of_property_read_bool(np, "arm,parity-enable")) {
1187		*aux_val |= L2C_AUX_CTRL_PARITY_ENABLE;
1188		*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1189	} else if (of_property_read_bool(np, "arm,parity-disable")) {
1190		*aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1191		*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
1192	}
1193
1194	if (of_property_read_bool(np, "arm,early-bresp-disable"))
1195		l2x0_bresp_disable = true;
1196
1197	if (of_property_read_bool(np, "arm,full-line-zero-disable"))
1198		l2x0_flz_disable = true;
1199
1200	prefetch = l2x0_saved_regs.prefetch_ctrl;
1201
1202	ret = of_property_read_u32(np, "arm,double-linefill", &val);
1203	if (ret == 0) {
1204		if (val)
1205			prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
1206		else
1207			prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
1208	} else if (ret != -EINVAL) {
1209		pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
1210	}
1211
1212	ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
1213	if (ret == 0) {
1214		if (val)
1215			prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1216		else
1217			prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1218	} else if (ret != -EINVAL) {
1219		pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
1220	}
1221
1222	ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
1223	if (ret == 0) {
1224		if (!val)
1225			prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1226		else
1227			prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1228	} else if (ret != -EINVAL) {
1229		pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
1230	}
1231
1232	ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
1233	if (ret == 0) {
1234		if (val)
1235			prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
1236		else
1237			prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
1238	} else if (ret != -EINVAL) {
1239		pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
1240	}
1241
1242	ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
1243	if (ret == 0) {
1244		prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
1245		prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
1246	} else if (ret != -EINVAL) {
1247		pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
1248	}
1249
1250	ret = of_property_read_u32(np, "prefetch-data", &val);
1251	if (ret == 0) {
1252		if (val) {
1253			prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
1254			*aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
1255		} else {
1256			prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
1257			*aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
1258		}
1259		*aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
1260	} else if (ret != -EINVAL) {
1261		pr_err("L2C-310 OF prefetch-data property value is missing\n");
1262	}
1263
1264	ret = of_property_read_u32(np, "prefetch-instr", &val);
1265	if (ret == 0) {
1266		if (val) {
1267			prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
1268			*aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
1269		} else {
1270			prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
1271			*aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
1272		}
1273		*aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
1274	} else if (ret != -EINVAL) {
1275		pr_err("L2C-310 OF prefetch-instr property value is missing\n");
1276	}
1277
1278	l2x0_saved_regs.prefetch_ctrl = prefetch;
1279
1280	power = l2x0_saved_regs.pwr_ctrl |
1281		L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN;
1282
1283	ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val);
1284	if (!ret) {
1285		if (!val)
1286			power &= ~L310_DYNAMIC_CLK_GATING_EN;
1287	} else if (ret != -EINVAL) {
1288		pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n");
1289	}
1290	ret = of_property_read_u32(np, "arm,standby-mode", &val);
1291	if (!ret) {
1292		if (!val)
1293			power &= ~L310_STNDBY_MODE_EN;
1294	} else if (ret != -EINVAL) {
1295		pr_err("L2C-310 OF standby-mode property value is missing or invalid\n");
1296	}
1297
1298	l2x0_saved_regs.pwr_ctrl = power;
1299}
1300
1301static const struct l2c_init_data of_l2c310_data __initconst = {
1302	.type = "L2C-310",
1303	.way_size_0 = SZ_8K,
1304	.num_lock = 8,
1305	.of_parse = l2c310_of_parse,
1306	.enable = l2c310_enable,
1307	.fixup = l2c310_fixup,
1308	.save  = l2c310_save,
1309	.configure = l2c310_configure,
1310	.unlock = l2c310_unlock,
1311	.outer_cache = {
1312		.inv_range   = l2c210_inv_range,
1313		.clean_range = l2c210_clean_range,
1314		.flush_range = l2c210_flush_range,
1315		.flush_all   = l2c210_flush_all,
1316		.disable     = l2c310_disable,
1317		.sync        = l2c210_sync,
1318		.resume      = l2c310_resume,
1319	},
1320};
1321
1322/*
1323 * This is a variant of the of_l2c310_data with .sync set to
1324 * NULL. Outer sync operations are not needed when the system is I/O
1325 * coherent, and potentially harmful in certain situations (PCIe/PL310
1326 * deadlock on Armada 375/38x due to hardware I/O coherency). The
1327 * other operations are kept because they are infrequent (therefore do
1328 * not cause the deadlock in practice) and needed for secondary CPU
1329 * boot and other power management activities.
1330 */
1331static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
1332	.type = "L2C-310 Coherent",
1333	.way_size_0 = SZ_8K,
1334	.num_lock = 8,
1335	.of_parse = l2c310_of_parse,
1336	.enable = l2c310_enable,
1337	.fixup = l2c310_fixup,
1338	.save  = l2c310_save,
1339	.configure = l2c310_configure,
1340	.unlock = l2c310_unlock,
1341	.outer_cache = {
1342		.inv_range   = l2c210_inv_range,
1343		.clean_range = l2c210_clean_range,
1344		.flush_range = l2c210_flush_range,
1345		.flush_all   = l2c210_flush_all,
1346		.disable     = l2c310_disable,
1347		.resume      = l2c310_resume,
1348	},
1349};
1350
1351/*
1352 * Note that the end addresses passed to Linux primitives are
1353 * noninclusive, while the hardware cache range operations use
1354 * inclusive start and end addresses.
1355 */
1356static unsigned long aurora_range_end(unsigned long start, unsigned long end)
1357{
1358	/*
1359	 * Limit the number of cache lines processed at once,
1360	 * since cache range operations stall the CPU pipeline
1361	 * until completion.
1362	 */
1363	if (end > start + AURORA_MAX_RANGE_SIZE)
1364		end = start + AURORA_MAX_RANGE_SIZE;
1365
1366	/*
1367	 * Cache range operations can't straddle a page boundary.
1368	 */
1369	if (end > PAGE_ALIGN(start+1))
1370		end = PAGE_ALIGN(start+1);
1371
1372	return end;
1373}
1374
 
 
 
 
1375static void aurora_pa_range(unsigned long start, unsigned long end,
1376			    unsigned long offset)
1377{
1378	void __iomem *base = l2x0_base;
1379	unsigned long range_end;
1380	unsigned long flags;
1381
 
 
 
 
 
 
 
 
 
 
1382	/*
1383	 * round start and end adresses up to cache line size
1384	 */
1385	start &= ~(CACHE_LINE_SIZE - 1);
1386	end = ALIGN(end, CACHE_LINE_SIZE);
1387
1388	/*
1389	 * perform operation on all full cache lines between 'start' and 'end'
1390	 */
1391	while (start < end) {
1392		range_end = aurora_range_end(start, end);
1393
1394		raw_spin_lock_irqsave(&l2x0_lock, flags);
1395		writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
1396		writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
1397		raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1398
1399		writel_relaxed(0, base + AURORA_SYNC_REG);
1400		start = range_end;
1401	}
1402}
1403static void aurora_inv_range(unsigned long start, unsigned long end)
1404{
1405	aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
1406}
1407
1408static void aurora_clean_range(unsigned long start, unsigned long end)
1409{
1410	/*
1411	 * If L2 is forced to WT, the L2 will always be clean and we
1412	 * don't need to do anything here.
1413	 */
1414	if (!l2_wt_override)
1415		aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
 
 
 
 
 
 
 
 
1416}
1417
1418static void aurora_flush_range(unsigned long start, unsigned long end)
1419{
1420	if (l2_wt_override)
1421		aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
1422	else
1423		aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
1424}
1425
1426static void aurora_flush_all(void)
1427{
1428	void __iomem *base = l2x0_base;
1429	unsigned long flags;
1430
1431	/* clean all ways */
1432	raw_spin_lock_irqsave(&l2x0_lock, flags);
1433	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
1434	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1435
1436	writel_relaxed(0, base + AURORA_SYNC_REG);
1437}
1438
1439static void aurora_cache_sync(void)
1440{
1441	writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
1442}
1443
1444static void aurora_disable(void)
1445{
1446	void __iomem *base = l2x0_base;
1447	unsigned long flags;
1448
1449	raw_spin_lock_irqsave(&l2x0_lock, flags);
1450	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
1451	writel_relaxed(0, base + AURORA_SYNC_REG);
1452	l2c_write_sec(0, base, L2X0_CTRL);
1453	dsb(st);
1454	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1455}
1456
1457static void aurora_save(void __iomem *base)
1458{
1459	l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1460	l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1461}
1462
1463/*
1464 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1465 * broadcasting of cache commands to L2.
1466 */
1467static void __init aurora_enable_no_outer(void __iomem *base,
1468	unsigned num_lock)
1469{
1470	u32 u;
1471
1472	asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
1473	u |= AURORA_CTRL_FW;		/* Set the FW bit */
1474	asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1475
1476	isb();
1477
1478	l2c_enable(base, num_lock);
1479}
1480
1481static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1482	struct outer_cache_fns *fns)
1483{
1484	sync_reg_offset = AURORA_SYNC_REG;
1485}
1486
1487static void __init aurora_of_parse(const struct device_node *np,
1488				u32 *aux_val, u32 *aux_mask)
1489{
1490	u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1491	u32 mask =  AURORA_ACR_REPLACEMENT_MASK;
1492
1493	of_property_read_u32(np, "cache-id-part",
1494			&cache_id_part_number_from_dt);
1495
1496	/* Determine and save the write policy */
1497	l2_wt_override = of_property_read_bool(np, "wt-override");
1498
1499	if (l2_wt_override) {
1500		val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1501		mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1502	}
1503
1504	if (of_property_read_bool(np, "marvell,ecc-enable")) {
1505		mask |= AURORA_ACR_ECC_EN;
1506		val |= AURORA_ACR_ECC_EN;
1507	}
1508
1509	if (of_property_read_bool(np, "arm,parity-enable")) {
1510		mask |= AURORA_ACR_PARITY_EN;
1511		val |= AURORA_ACR_PARITY_EN;
1512	} else if (of_property_read_bool(np, "arm,parity-disable")) {
1513		mask |= AURORA_ACR_PARITY_EN;
1514	}
1515
1516	*aux_val &= ~mask;
1517	*aux_val |= val;
1518	*aux_mask &= ~mask;
1519}
1520
1521static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
1522	.type = "Aurora",
1523	.way_size_0 = SZ_4K,
1524	.num_lock = 4,
1525	.of_parse = aurora_of_parse,
1526	.enable = l2c_enable,
1527	.fixup = aurora_fixup,
1528	.save  = aurora_save,
1529	.configure = l2c_configure,
1530	.unlock = l2c_unlock,
1531	.outer_cache = {
1532		.inv_range   = aurora_inv_range,
1533		.clean_range = aurora_clean_range,
1534		.flush_range = aurora_flush_range,
1535		.flush_all   = aurora_flush_all,
1536		.disable     = aurora_disable,
1537		.sync	     = aurora_cache_sync,
1538		.resume      = l2c_resume,
1539	},
1540};
1541
1542static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
1543	.type = "Aurora",
1544	.way_size_0 = SZ_4K,
1545	.num_lock = 4,
1546	.of_parse = aurora_of_parse,
1547	.enable = aurora_enable_no_outer,
1548	.fixup = aurora_fixup,
1549	.save  = aurora_save,
1550	.configure = l2c_configure,
1551	.unlock = l2c_unlock,
1552	.outer_cache = {
1553		.resume      = l2c_resume,
1554	},
1555};
1556
1557/*
1558 * For certain Broadcom SoCs, depending on the address range, different offsets
1559 * need to be added to the address before passing it to L2 for
1560 * invalidation/clean/flush
1561 *
1562 * Section Address Range              Offset        EMI
1563 *   1     0x00000000 - 0x3FFFFFFF    0x80000000    VC
1564 *   2     0x40000000 - 0xBFFFFFFF    0x40000000    SYS
1565 *   3     0xC0000000 - 0xFFFFFFFF    0x80000000    VC
1566 *
1567 * When the start and end addresses have crossed two different sections, we
1568 * need to break the L2 operation into two, each within its own section.
1569 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1570 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1571 * 0xC0000000 - 0xC0001000
1572 *
1573 * Note 1:
1574 * By breaking a single L2 operation into two, we may potentially suffer some
1575 * performance hit, but keep in mind the cross section case is very rare
1576 *
1577 * Note 2:
1578 * We do not need to handle the case when the start address is in
1579 * Section 1 and the end address is in Section 3, since it is not a valid use
1580 * case
1581 *
1582 * Note 3:
1583 * Section 1 in practical terms can no longer be used on rev A2. Because of
1584 * that the code does not need to handle section 1 at all.
1585 *
1586 */
1587#define BCM_SYS_EMI_START_ADDR        0x40000000UL
1588#define BCM_VC_EMI_SEC3_START_ADDR    0xC0000000UL
1589
1590#define BCM_SYS_EMI_OFFSET            0x40000000UL
1591#define BCM_VC_EMI_OFFSET             0x80000000UL
1592
1593static inline int bcm_addr_is_sys_emi(unsigned long addr)
1594{
1595	return (addr >= BCM_SYS_EMI_START_ADDR) &&
1596		(addr < BCM_VC_EMI_SEC3_START_ADDR);
1597}
1598
1599static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1600{
1601	if (bcm_addr_is_sys_emi(addr))
1602		return addr + BCM_SYS_EMI_OFFSET;
1603	else
1604		return addr + BCM_VC_EMI_OFFSET;
1605}
1606
1607static void bcm_inv_range(unsigned long start, unsigned long end)
1608{
1609	unsigned long new_start, new_end;
1610
1611	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1612
1613	if (unlikely(end <= start))
1614		return;
1615
1616	new_start = bcm_l2_phys_addr(start);
1617	new_end = bcm_l2_phys_addr(end);
1618
1619	/* normal case, no cross section between start and end */
1620	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1621		l2c210_inv_range(new_start, new_end);
1622		return;
1623	}
1624
1625	/* They cross sections, so it can only be a cross from section
1626	 * 2 to section 3
1627	 */
1628	l2c210_inv_range(new_start,
1629		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1630	l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1631		new_end);
1632}
1633
1634static void bcm_clean_range(unsigned long start, unsigned long end)
1635{
1636	unsigned long new_start, new_end;
1637
1638	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1639
1640	if (unlikely(end <= start))
1641		return;
1642
 
 
 
 
 
1643	new_start = bcm_l2_phys_addr(start);
1644	new_end = bcm_l2_phys_addr(end);
1645
1646	/* normal case, no cross section between start and end */
1647	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1648		l2c210_clean_range(new_start, new_end);
1649		return;
1650	}
1651
1652	/* They cross sections, so it can only be a cross from section
1653	 * 2 to section 3
1654	 */
1655	l2c210_clean_range(new_start,
1656		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1657	l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1658		new_end);
1659}
1660
1661static void bcm_flush_range(unsigned long start, unsigned long end)
1662{
1663	unsigned long new_start, new_end;
1664
1665	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1666
1667	if (unlikely(end <= start))
1668		return;
1669
1670	if ((end - start) >= l2x0_size) {
1671		outer_cache.flush_all();
1672		return;
1673	}
1674
1675	new_start = bcm_l2_phys_addr(start);
1676	new_end = bcm_l2_phys_addr(end);
1677
1678	/* normal case, no cross section between start and end */
1679	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1680		l2c210_flush_range(new_start, new_end);
1681		return;
1682	}
1683
1684	/* They cross sections, so it can only be a cross from section
1685	 * 2 to section 3
1686	 */
1687	l2c210_flush_range(new_start,
1688		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1689	l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1690		new_end);
1691}
1692
1693/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
1694static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
1695	.type = "BCM-L2C-310",
1696	.way_size_0 = SZ_8K,
1697	.num_lock = 8,
1698	.of_parse = l2c310_of_parse,
1699	.enable = l2c310_enable,
1700	.save  = l2c310_save,
1701	.configure = l2c310_configure,
1702	.unlock = l2c310_unlock,
1703	.outer_cache = {
1704		.inv_range   = bcm_inv_range,
1705		.clean_range = bcm_clean_range,
1706		.flush_range = bcm_flush_range,
1707		.flush_all   = l2c210_flush_all,
1708		.disable     = l2c310_disable,
1709		.sync        = l2c210_sync,
1710		.resume      = l2c310_resume,
1711	},
1712};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1713
1714static void __init tauros3_save(void __iomem *base)
1715{
1716	l2c_save(base);
 
 
1717
 
 
1718	l2x0_saved_regs.aux2_ctrl =
1719		readl_relaxed(base + TAUROS3_AUX2_CTRL);
1720	l2x0_saved_regs.prefetch_ctrl =
1721		readl_relaxed(base + L310_PREFETCH_CTRL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1722}
1723
1724static void tauros3_configure(void __iomem *base)
1725{
1726	l2c_configure(base);
1727	writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1728		       base + TAUROS3_AUX2_CTRL);
1729	writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1730		       base + L310_PREFETCH_CTRL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1731}
1732
1733static const struct l2c_init_data of_tauros3_data __initconst = {
1734	.type = "Tauros3",
1735	.way_size_0 = SZ_8K,
1736	.num_lock = 8,
1737	.enable = l2c_enable,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1738	.save  = tauros3_save,
1739	.configure = tauros3_configure,
1740	.unlock = l2c_unlock,
1741	/* Tauros3 broadcasts L1 cache operations to L2 */
1742	.outer_cache = {
1743		.resume      = l2c_resume,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1744	},
1745};
1746
1747#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
1748static const struct of_device_id l2x0_ids[] __initconst = {
1749	L2C_ID("arm,l210-cache", of_l2c210_data),
1750	L2C_ID("arm,l220-cache", of_l2c220_data),
1751	L2C_ID("arm,pl310-cache", of_l2c310_data),
1752	L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1753	L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1754	L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1755	L2C_ID("marvell,tauros3-cache", of_tauros3_data),
1756	/* Deprecated IDs */
1757	L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
 
 
 
 
1758	{}
1759};
1760
1761int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1762{
1763	const struct l2c_init_data *data;
1764	struct device_node *np;
 
1765	struct resource res;
1766	u32 cache_id, old_aux;
1767	u32 cache_level = 2;
1768	bool nosync = false;
1769
1770	np = of_find_matching_node(NULL, l2x0_ids);
1771	if (!np)
1772		return -ENODEV;
1773
1774	if (of_address_to_resource(np, 0, &res))
1775		return -ENODEV;
1776
1777	l2x0_base = ioremap(res.start, resource_size(&res));
1778	if (!l2x0_base)
1779		return -ENOMEM;
1780
1781	l2x0_saved_regs.phy_base = res.start;
1782
1783	data = of_match_node(l2x0_ids, np)->data;
1784
1785	if (of_device_is_compatible(np, "arm,pl310-cache") &&
1786	    of_property_read_bool(np, "arm,io-coherent"))
1787		data = &of_l2c310_coherent_data;
 
1788
1789	old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
1790	if (old_aux != ((old_aux & aux_mask) | aux_val)) {
1791		pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
1792		        old_aux, (old_aux & aux_mask) | aux_val);
1793	} else if (aux_mask != ~0U && aux_val != 0) {
1794		pr_alert("L2C: platform provided aux values match the hardware, so have no effect.  Please remove them.\n");
1795	}
1796
1797	/* All L2 caches are unified, so this property should be specified */
1798	if (!of_property_read_bool(np, "cache-unified"))
1799		pr_err("L2C: device tree omits to specify unified cache\n");
1800
1801	if (of_property_read_u32(np, "cache-level", &cache_level))
1802		pr_err("L2C: device tree omits to specify cache-level\n");
1803
1804	if (cache_level != 2)
1805		pr_err("L2C: device tree specifies invalid cache level\n");
1806
1807	nosync = of_property_read_bool(np, "arm,outer-sync-disable");
1808
1809	/* Read back current (default) hardware configuration */
1810	if (data->save)
1811		data->save(l2x0_base);
1812
1813	/* L2 configuration can only be changed if the cache is disabled */
1814	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
1815		if (data->of_parse)
1816			data->of_parse(np, &aux_val, &aux_mask);
1817
1818	if (cache_id_part_number_from_dt)
1819		cache_id = cache_id_part_number_from_dt;
1820	else
1821		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1822
1823	return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);
1824}
1825#endif