Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
   7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
   8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   9 */
 
  10#include <linux/hardirq.h>
  11#include <linux/init.h>
  12#include <linux/highmem.h>
  13#include <linux/kernel.h>
  14#include <linux/linkage.h>
 
  15#include <linux/sched.h>
  16#include <linux/smp.h>
  17#include <linux/mm.h>
  18#include <linux/module.h>
  19#include <linux/bitops.h>
  20
  21#include <asm/bcache.h>
  22#include <asm/bootinfo.h>
  23#include <asm/cache.h>
  24#include <asm/cacheops.h>
  25#include <asm/cpu.h>
  26#include <asm/cpu-features.h>
 
  27#include <asm/io.h>
  28#include <asm/page.h>
  29#include <asm/pgtable.h>
  30#include <asm/r4kcache.h>
  31#include <asm/sections.h>
  32#include <asm/mmu_context.h>
  33#include <asm/war.h>
  34#include <asm/cacheflush.h> /* for run_uncached() */
  35#include <asm/traps.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  36
  37/*
  38 * Special Variant of smp_call_function for use by cache functions:
  39 *
  40 *  o No return value
  41 *  o collapses to normal function call on UP kernels
  42 *  o collapses to normal function call on systems with a single shared
  43 *    primary cache.
  44 *  o doesn't disable interrupts on the local CPU
  45 */
  46static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
 
  47{
  48	preempt_disable();
  49
  50#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
  51	smp_call_function(func, info, 1);
  52#endif
  53	func(info);
  54	preempt_enable();
  55}
  56
  57#if defined(CONFIG_MIPS_CMP)
  58#define cpu_has_safe_index_cacheops 0
  59#else
  60#define cpu_has_safe_index_cacheops 1
  61#endif
  62
  63/*
  64 * Must die.
  65 */
  66static unsigned long icache_size __read_mostly;
  67static unsigned long dcache_size __read_mostly;
 
  68static unsigned long scache_size __read_mostly;
  69
  70/*
  71 * Dummy cache handling routines for machines without boardcaches
  72 */
  73static void cache_noop(void) {}
  74
  75static struct bcache_ops no_sc_ops = {
  76	.bc_enable = (void *)cache_noop,
  77	.bc_disable = (void *)cache_noop,
  78	.bc_wback_inv = (void *)cache_noop,
  79	.bc_inv = (void *)cache_noop
  80};
  81
  82struct bcache_ops *bcops = &no_sc_ops;
  83
  84#define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
  85#define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
  86
  87#define R4600_HIT_CACHEOP_WAR_IMPL					\
  88do {									\
  89	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())		\
  90		*(volatile unsigned long *)CKSEG1;			\
  91	if (R4600_V1_HIT_CACHEOP_WAR)					\
  92		__asm__ __volatile__("nop;nop;nop;nop");		\
  93} while (0)
  94
  95static void (*r4k_blast_dcache_page)(unsigned long addr);
  96
  97static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
  98{
  99	R4600_HIT_CACHEOP_WAR_IMPL;
 100	blast_dcache32_page(addr);
 101}
 102
 103static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
 104{
 105	R4600_HIT_CACHEOP_WAR_IMPL;
 106	blast_dcache64_page(addr);
 107}
 108
 109static void __cpuinit r4k_blast_dcache_page_setup(void)
 
 
 
 
 
 110{
 111	unsigned long  dc_lsize = cpu_dcache_line_size();
 112
 113	if (dc_lsize == 0)
 
 114		r4k_blast_dcache_page = (void *)cache_noop;
 115	else if (dc_lsize == 16)
 
 116		r4k_blast_dcache_page = blast_dcache16_page;
 117	else if (dc_lsize == 32)
 
 118		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
 119	else if (dc_lsize == 64)
 
 120		r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
 
 
 
 
 
 
 
 121}
 122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 123static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
 124
 125static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
 126{
 127	unsigned long dc_lsize = cpu_dcache_line_size();
 128
 129	if (dc_lsize == 0)
 130		r4k_blast_dcache_page_indexed = (void *)cache_noop;
 131	else if (dc_lsize == 16)
 132		r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
 133	else if (dc_lsize == 32)
 134		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
 135	else if (dc_lsize == 64)
 136		r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
 
 
 137}
 138
 139static void (* r4k_blast_dcache)(void);
 
 140
 141static void __cpuinit r4k_blast_dcache_setup(void)
 142{
 143	unsigned long dc_lsize = cpu_dcache_line_size();
 144
 145	if (dc_lsize == 0)
 146		r4k_blast_dcache = (void *)cache_noop;
 147	else if (dc_lsize == 16)
 148		r4k_blast_dcache = blast_dcache16;
 149	else if (dc_lsize == 32)
 150		r4k_blast_dcache = blast_dcache32;
 151	else if (dc_lsize == 64)
 152		r4k_blast_dcache = blast_dcache64;
 
 
 153}
 154
 155/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
 156#define JUMP_TO_ALIGN(order) \
 157	__asm__ __volatile__( \
 158		"b\t1f\n\t" \
 159		".align\t" #order "\n\t" \
 160		"1:\n\t" \
 161		)
 162#define CACHE32_UNROLL32_ALIGN	JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
 163#define CACHE32_UNROLL32_ALIGN2	JUMP_TO_ALIGN(11)
 164
 165static inline void blast_r4600_v1_icache32(void)
 166{
 167	unsigned long flags;
 168
 169	local_irq_save(flags);
 170	blast_icache32();
 171	local_irq_restore(flags);
 172}
 173
 174static inline void tx49_blast_icache32(void)
 175{
 176	unsigned long start = INDEX_BASE;
 177	unsigned long end = start + current_cpu_data.icache.waysize;
 178	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
 179	unsigned long ws_end = current_cpu_data.icache.ways <<
 180	                       current_cpu_data.icache.waybit;
 181	unsigned long ws, addr;
 182
 183	CACHE32_UNROLL32_ALIGN2;
 184	/* I'm in even chunk.  blast odd chunks */
 185	for (ws = 0; ws < ws_end; ws += ws_inc)
 186		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
 187			cache32_unroll32(addr|ws, Index_Invalidate_I);
 
 188	CACHE32_UNROLL32_ALIGN;
 189	/* I'm in odd chunk.  blast even chunks */
 190	for (ws = 0; ws < ws_end; ws += ws_inc)
 191		for (addr = start; addr < end; addr += 0x400 * 2)
 192			cache32_unroll32(addr|ws, Index_Invalidate_I);
 
 193}
 194
 195static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
 196{
 197	unsigned long flags;
 198
 199	local_irq_save(flags);
 200	blast_icache32_page_indexed(page);
 201	local_irq_restore(flags);
 202}
 203
 204static inline void tx49_blast_icache32_page_indexed(unsigned long page)
 205{
 206	unsigned long indexmask = current_cpu_data.icache.waysize - 1;
 207	unsigned long start = INDEX_BASE + (page & indexmask);
 208	unsigned long end = start + PAGE_SIZE;
 209	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
 210	unsigned long ws_end = current_cpu_data.icache.ways <<
 211	                       current_cpu_data.icache.waybit;
 212	unsigned long ws, addr;
 213
 214	CACHE32_UNROLL32_ALIGN2;
 215	/* I'm in even chunk.  blast odd chunks */
 216	for (ws = 0; ws < ws_end; ws += ws_inc)
 217		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
 218			cache32_unroll32(addr|ws, Index_Invalidate_I);
 
 219	CACHE32_UNROLL32_ALIGN;
 220	/* I'm in odd chunk.  blast even chunks */
 221	for (ws = 0; ws < ws_end; ws += ws_inc)
 222		for (addr = start; addr < end; addr += 0x400 * 2)
 223			cache32_unroll32(addr|ws, Index_Invalidate_I);
 
 224}
 225
 226static void (* r4k_blast_icache_page)(unsigned long addr);
 227
 228static void __cpuinit r4k_blast_icache_page_setup(void)
 229{
 230	unsigned long ic_lsize = cpu_icache_line_size();
 231
 232	if (ic_lsize == 0)
 233		r4k_blast_icache_page = (void *)cache_noop;
 234	else if (ic_lsize == 16)
 235		r4k_blast_icache_page = blast_icache16_page;
 
 
 236	else if (ic_lsize == 32)
 237		r4k_blast_icache_page = blast_icache32_page;
 238	else if (ic_lsize == 64)
 239		r4k_blast_icache_page = blast_icache64_page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 240}
 241
 
 242
 243static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
 244
 245static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
 246{
 247	unsigned long ic_lsize = cpu_icache_line_size();
 248
 249	if (ic_lsize == 0)
 250		r4k_blast_icache_page_indexed = (void *)cache_noop;
 251	else if (ic_lsize == 16)
 252		r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
 253	else if (ic_lsize == 32) {
 254		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
 255			r4k_blast_icache_page_indexed =
 256				blast_icache32_r4600_v1_page_indexed;
 257		else if (TX49XX_ICACHE_INDEX_INV_WAR)
 258			r4k_blast_icache_page_indexed =
 259				tx49_blast_icache32_page_indexed;
 
 
 
 260		else
 261			r4k_blast_icache_page_indexed =
 262				blast_icache32_page_indexed;
 263	} else if (ic_lsize == 64)
 264		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
 265}
 266
 267static void (* r4k_blast_icache)(void);
 
 268
 269static void __cpuinit r4k_blast_icache_setup(void)
 270{
 271	unsigned long ic_lsize = cpu_icache_line_size();
 272
 273	if (ic_lsize == 0)
 274		r4k_blast_icache = (void *)cache_noop;
 275	else if (ic_lsize == 16)
 276		r4k_blast_icache = blast_icache16;
 277	else if (ic_lsize == 32) {
 278		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
 279			r4k_blast_icache = blast_r4600_v1_icache32;
 280		else if (TX49XX_ICACHE_INDEX_INV_WAR)
 281			r4k_blast_icache = tx49_blast_icache32;
 
 
 282		else
 283			r4k_blast_icache = blast_icache32;
 284	} else if (ic_lsize == 64)
 285		r4k_blast_icache = blast_icache64;
 
 
 286}
 287
 288static void (* r4k_blast_scache_page)(unsigned long addr);
 289
 290static void __cpuinit r4k_blast_scache_page_setup(void)
 291{
 292	unsigned long sc_lsize = cpu_scache_line_size();
 293
 294	if (scache_size == 0)
 295		r4k_blast_scache_page = (void *)cache_noop;
 296	else if (sc_lsize == 16)
 297		r4k_blast_scache_page = blast_scache16_page;
 298	else if (sc_lsize == 32)
 299		r4k_blast_scache_page = blast_scache32_page;
 300	else if (sc_lsize == 64)
 301		r4k_blast_scache_page = blast_scache64_page;
 302	else if (sc_lsize == 128)
 303		r4k_blast_scache_page = blast_scache128_page;
 304}
 305
 306static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
 307
 308static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
 309{
 310	unsigned long sc_lsize = cpu_scache_line_size();
 311
 312	if (scache_size == 0)
 313		r4k_blast_scache_page_indexed = (void *)cache_noop;
 314	else if (sc_lsize == 16)
 315		r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
 316	else if (sc_lsize == 32)
 317		r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
 318	else if (sc_lsize == 64)
 319		r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
 320	else if (sc_lsize == 128)
 321		r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
 322}
 323
 324static void (* r4k_blast_scache)(void);
 325
 326static void __cpuinit r4k_blast_scache_setup(void)
 327{
 328	unsigned long sc_lsize = cpu_scache_line_size();
 329
 330	if (scache_size == 0)
 331		r4k_blast_scache = (void *)cache_noop;
 332	else if (sc_lsize == 16)
 333		r4k_blast_scache = blast_scache16;
 334	else if (sc_lsize == 32)
 335		r4k_blast_scache = blast_scache32;
 336	else if (sc_lsize == 64)
 337		r4k_blast_scache = blast_scache64;
 338	else if (sc_lsize == 128)
 339		r4k_blast_scache = blast_scache128;
 340}
 341
 342static inline void local_r4k___flush_cache_all(void * args)
 
 
 343{
 344#if defined(CONFIG_CPU_LOONGSON2)
 345	r4k_blast_scache();
 346	return;
 347#endif
 348	r4k_blast_dcache();
 349	r4k_blast_icache();
 
 
 
 
 
 
 
 350
 
 
 351	switch (current_cpu_type()) {
 
 352	case CPU_R4000SC:
 353	case CPU_R4000MC:
 354	case CPU_R4400SC:
 355	case CPU_R4400MC:
 356	case CPU_R10000:
 357	case CPU_R12000:
 358	case CPU_R14000:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 359		r4k_blast_scache();
 
 
 
 
 
 
 
 360	}
 361}
 362
 363static void r4k___flush_cache_all(void)
 364{
 365	r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
 366}
 367
 368static inline int has_valid_asid(const struct mm_struct *mm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 369{
 370#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
 371	int i;
 
 
 
 372
 373	for_each_online_cpu(i)
 
 
 
 
 
 
 
 
 
 
 374		if (cpu_context(i, mm))
 375			return 1;
 376
 377	return 0;
 378#else
 379	return cpu_context(smp_processor_id(), mm);
 380#endif
 381}
 382
 383static void r4k__flush_cache_vmap(void)
 384{
 385	r4k_blast_dcache();
 386}
 387
 388static void r4k__flush_cache_vunmap(void)
 389{
 390	r4k_blast_dcache();
 391}
 392
 
 
 
 
 393static inline void local_r4k_flush_cache_range(void * args)
 394{
 395	struct vm_area_struct *vma = args;
 396	int exec = vma->vm_flags & VM_EXEC;
 397
 398	if (!(has_valid_asid(vma->vm_mm)))
 399		return;
 400
 401	r4k_blast_dcache();
 
 
 
 
 
 
 
 402	if (exec)
 403		r4k_blast_icache();
 404}
 405
 406static void r4k_flush_cache_range(struct vm_area_struct *vma,
 407	unsigned long start, unsigned long end)
 408{
 409	int exec = vma->vm_flags & VM_EXEC;
 410
 411	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
 412		r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
 413}
 414
 415static inline void local_r4k_flush_cache_mm(void * args)
 416{
 417	struct mm_struct *mm = args;
 418
 419	if (!has_valid_asid(mm))
 420		return;
 421
 422	/*
 423	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
 424	 * only flush the primary caches but R10000 and R12000 behave sane ...
 425	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
 426	 * caches, so we can bail out early.
 427	 */
 428	if (current_cpu_type() == CPU_R4000SC ||
 429	    current_cpu_type() == CPU_R4000MC ||
 430	    current_cpu_type() == CPU_R4400SC ||
 431	    current_cpu_type() == CPU_R4400MC) {
 432		r4k_blast_scache();
 433		return;
 434	}
 435
 436	r4k_blast_dcache();
 437}
 438
 439static void r4k_flush_cache_mm(struct mm_struct *mm)
 440{
 441	if (!cpu_has_dc_aliases)
 442		return;
 443
 444	r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
 445}
 446
 447struct flush_cache_page_args {
 448	struct vm_area_struct *vma;
 449	unsigned long addr;
 450	unsigned long pfn;
 451};
 452
 453static inline void local_r4k_flush_cache_page(void *args)
 454{
 455	struct flush_cache_page_args *fcp_args = args;
 456	struct vm_area_struct *vma = fcp_args->vma;
 457	unsigned long addr = fcp_args->addr;
 458	struct page *page = pfn_to_page(fcp_args->pfn);
 459	int exec = vma->vm_flags & VM_EXEC;
 460	struct mm_struct *mm = vma->vm_mm;
 461	int map_coherent = 0;
 462	pgd_t *pgdp;
 463	pud_t *pudp;
 464	pmd_t *pmdp;
 465	pte_t *ptep;
 466	void *vaddr;
 467
 468	/*
 469	 * If ownes no valid ASID yet, cannot possibly have gotten
 470	 * this page into the cache.
 471	 */
 472	if (!has_valid_asid(mm))
 473		return;
 474
 475	addr &= PAGE_MASK;
 476	pgdp = pgd_offset(mm, addr);
 477	pudp = pud_offset(pgdp, addr);
 478	pmdp = pmd_offset(pudp, addr);
 479	ptep = pte_offset(pmdp, addr);
 480
 481	/*
 482	 * If the page isn't marked valid, the page cannot possibly be
 483	 * in the cache.
 484	 */
 485	if (!(pte_present(*ptep)))
 486		return;
 487
 488	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
 489		vaddr = NULL;
 490	else {
 491		/*
 492		 * Use kmap_coherent or kmap_atomic to do flushes for
 493		 * another ASID than the current one.
 494		 */
 495		map_coherent = (cpu_has_dc_aliases &&
 496				page_mapped(page) && !Page_dcache_dirty(page));
 
 497		if (map_coherent)
 498			vaddr = kmap_coherent(page, addr);
 499		else
 500			vaddr = kmap_atomic(page);
 501		addr = (unsigned long)vaddr;
 502	}
 503
 504	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
 505		r4k_blast_dcache_page(addr);
 
 506		if (exec && !cpu_icache_snoops_remote_store)
 507			r4k_blast_scache_page(addr);
 508	}
 509	if (exec) {
 510		if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
 511			int cpu = smp_processor_id();
 512
 513			if (cpu_context(cpu, mm) != 0)
 514				drop_mmu_context(mm, cpu);
 515		} else
 516			r4k_blast_icache_page(addr);
 
 517	}
 518
 519	if (vaddr) {
 520		if (map_coherent)
 521			kunmap_coherent();
 522		else
 523			kunmap_atomic(vaddr);
 524	}
 525}
 526
 527static void r4k_flush_cache_page(struct vm_area_struct *vma,
 528	unsigned long addr, unsigned long pfn)
 529{
 530	struct flush_cache_page_args args;
 531
 532	args.vma = vma;
 533	args.addr = addr;
 534	args.pfn = pfn;
 535
 536	r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
 537}
 538
 539static inline void local_r4k_flush_data_cache_page(void * addr)
 540{
 541	r4k_blast_dcache_page((unsigned long) addr);
 542}
 543
 544static void r4k_flush_data_cache_page(unsigned long addr)
 545{
 546	if (in_atomic())
 547		local_r4k_flush_data_cache_page((void *)addr);
 548	else
 549		r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
 
 550}
 551
 552struct flush_icache_range_args {
 553	unsigned long start;
 554	unsigned long end;
 
 
 555};
 556
 557static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
 
 
 
 558{
 559	if (!cpu_has_ic_fills_f_dc) {
 560		if (end - start >= dcache_size) {
 
 561			r4k_blast_dcache();
 562		} else {
 563			R4600_HIT_CACHEOP_WAR_IMPL;
 564			protected_blast_dcache_range(start, end);
 
 
 
 565		}
 566	}
 567
 568	if (end - start > icache_size)
 
 569		r4k_blast_icache();
 570	else
 571		protected_blast_icache_range(start, end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 572}
 573
 574static inline void local_r4k_flush_icache_range_ipi(void *args)
 575{
 576	struct flush_icache_range_args *fir_args = args;
 577	unsigned long start = fir_args->start;
 578	unsigned long end = fir_args->end;
 
 
 579
 580	local_r4k_flush_icache_range(start, end);
 581}
 582
 583static void r4k_flush_icache_range(unsigned long start, unsigned long end)
 
 584{
 585	struct flush_icache_range_args args;
 
 586
 587	args.start = start;
 588	args.end = end;
 
 
 589
 590	r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 591	instruction_hazard();
 592}
 593
 
 
 
 
 
 
 
 
 
 
 594#ifdef CONFIG_DMA_NONCOHERENT
 595
 596static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 597{
 598	/* Catch bad driver code */
 599	BUG_ON(size == 0);
 
 600
 
 601	if (cpu_has_inclusive_pcaches) {
 602		if (size >= scache_size)
 603			r4k_blast_scache();
 604		else
 
 
 
 605			blast_scache_range(addr, addr + size);
 
 
 606		__sync();
 607		return;
 608	}
 609
 610	/*
 611	 * Either no secondary cache or the available caches don't have the
 612	 * subset property so we have to flush the primary caches
 613	 * explicitly
 
 
 
 614	 */
 615	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
 616		r4k_blast_dcache();
 617	} else {
 618		R4600_HIT_CACHEOP_WAR_IMPL;
 619		blast_dcache_range(addr, addr + size);
 620	}
 
 621
 622	bc_wback_inv(addr, size);
 623	__sync();
 624}
 625
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 626static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
 627{
 628	/* Catch bad driver code */
 629	BUG_ON(size == 0);
 
 630
 631	if (cpu_has_inclusive_pcaches) {
 632		if (size >= scache_size)
 633			r4k_blast_scache();
 634		else {
 635			unsigned long lsize = cpu_scache_line_size();
 636			unsigned long almask = ~(lsize - 1);
 637
 
 
 
 
 
 
 
 
 
 
 638			/*
 639			 * There is no clearly documented alignment requirement
 640			 * for the cache instruction on MIPS processors and
 641			 * some processors, among them the RM5200 and RM7000
 642			 * QED processors will throw an address error for cache
 643			 * hit ops with insufficient alignment.  Solved by
 644			 * aligning the address to cache line size.
 645			 */
 646			cache_op(Hit_Writeback_Inv_SD, addr & almask);
 647			cache_op(Hit_Writeback_Inv_SD,
 648				 (addr + size - 1) & almask);
 649			blast_inv_scache_range(addr, addr + size);
 650		}
 
 651		__sync();
 652		return;
 653	}
 654
 655	if (cpu_has_safe_index_cacheops && size >= dcache_size) {
 656		r4k_blast_dcache();
 657	} else {
 658		unsigned long lsize = cpu_dcache_line_size();
 659		unsigned long almask = ~(lsize - 1);
 660
 661		R4600_HIT_CACHEOP_WAR_IMPL;
 662		cache_op(Hit_Writeback_Inv_D, addr & almask);
 663		cache_op(Hit_Writeback_Inv_D, (addr + size - 1)  & almask);
 664		blast_inv_dcache_range(addr, addr + size);
 665	}
 
 666
 667	bc_inv(addr, size);
 668	__sync();
 669}
 670#endif /* CONFIG_DMA_NONCOHERENT */
 671
 672/*
 673 * While we're protected against bad userland addresses we don't care
 674 * very much about what happens in that case.  Usually a segmentation
 675 * fault will dump the process later on anyway ...
 676 */
 677static void local_r4k_flush_cache_sigtramp(void * arg)
 678{
 679	unsigned long ic_lsize = cpu_icache_line_size();
 680	unsigned long dc_lsize = cpu_dcache_line_size();
 681	unsigned long sc_lsize = cpu_scache_line_size();
 682	unsigned long addr = (unsigned long) arg;
 683
 684	R4600_HIT_CACHEOP_WAR_IMPL;
 685	if (dc_lsize)
 686		protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
 687	if (!cpu_icache_snoops_remote_store && scache_size)
 688		protected_writeback_scache_line(addr & ~(sc_lsize - 1));
 689	if (ic_lsize)
 690		protected_flush_icache_line(addr & ~(ic_lsize - 1));
 691	if (MIPS4K_ICACHE_REFILL_WAR) {
 692		__asm__ __volatile__ (
 693			".set push\n\t"
 694			".set noat\n\t"
 695			".set mips3\n\t"
 696#ifdef CONFIG_32BIT
 697			"la	$at,1f\n\t"
 698#endif
 699#ifdef CONFIG_64BIT
 700			"dla	$at,1f\n\t"
 701#endif
 702			"cache	%0,($at)\n\t"
 703			"nop; nop; nop\n"
 704			"1:\n\t"
 705			".set pop"
 706			:
 707			: "i" (Hit_Invalidate_I));
 708	}
 709	if (MIPS_CACHE_SYNC_WAR)
 710		__asm__ __volatile__ ("sync");
 711}
 712
 713static void r4k_flush_cache_sigtramp(unsigned long addr)
 714{
 715	r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
 716}
 717
 718static void r4k_flush_icache_all(void)
 719{
 720	if (cpu_has_vtag_icache)
 721		r4k_blast_icache();
 722}
 723
 724struct flush_kernel_vmap_range_args {
 725	unsigned long	vaddr;
 726	int		size;
 727};
 728
 
 
 
 
 
 
 
 
 
 729static inline void local_r4k_flush_kernel_vmap_range(void *args)
 730{
 731	struct flush_kernel_vmap_range_args *vmra = args;
 732	unsigned long vaddr = vmra->vaddr;
 733	int size = vmra->size;
 734
 735	/*
 736	 * Aliases only affect the primary caches so don't bother with
 737	 * S-caches or T-caches.
 738	 */
 739	if (cpu_has_safe_index_cacheops && size >= dcache_size)
 740		r4k_blast_dcache();
 741	else {
 742		R4600_HIT_CACHEOP_WAR_IMPL;
 743		blast_dcache_range(vaddr, vaddr + size);
 744	}
 745}
 746
 747static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
 748{
 749	struct flush_kernel_vmap_range_args args;
 750
 751	args.vaddr = (unsigned long) vaddr;
 752	args.size = size;
 753
 754	r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
 
 
 
 
 
 755}
 756
 757static inline void rm7k_erratum31(void)
 758{
 759	const unsigned long ic_lsize = 32;
 760	unsigned long addr;
 761
 762	/* RM7000 erratum #31. The icache is screwed at startup. */
 763	write_c0_taglo(0);
 764	write_c0_taghi(0);
 765
 766	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
 767		__asm__ __volatile__ (
 768			".set push\n\t"
 769			".set noreorder\n\t"
 770			".set mips3\n\t"
 771			"cache\t%1, 0(%0)\n\t"
 772			"cache\t%1, 0x1000(%0)\n\t"
 773			"cache\t%1, 0x2000(%0)\n\t"
 774			"cache\t%1, 0x3000(%0)\n\t"
 775			"cache\t%2, 0(%0)\n\t"
 776			"cache\t%2, 0x1000(%0)\n\t"
 777			"cache\t%2, 0x2000(%0)\n\t"
 778			"cache\t%2, 0x3000(%0)\n\t"
 779			"cache\t%1, 0(%0)\n\t"
 780			"cache\t%1, 0x1000(%0)\n\t"
 781			"cache\t%1, 0x2000(%0)\n\t"
 782			"cache\t%1, 0x3000(%0)\n\t"
 783			".set pop\n"
 784			:
 785			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 786	}
 
 
 787}
 788
 789static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
 790	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 791};
 792
 793static void __cpuinit probe_pcache(void)
 794{
 795	struct cpuinfo_mips *c = &current_cpu_data;
 796	unsigned int config = read_c0_config();
 797	unsigned int prid = read_c0_prid();
 
 798	unsigned long config1;
 799	unsigned int lsize;
 800
 801	switch (c->cputype) {
 802	case CPU_R4600:			/* QED style two way caches? */
 803	case CPU_R4700:
 804	case CPU_R5000:
 805	case CPU_NEVADA:
 806		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 807		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 808		c->icache.ways = 2;
 809		c->icache.waybit = __ffs(icache_size/2);
 810
 811		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 812		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 813		c->dcache.ways = 2;
 814		c->dcache.waybit= __ffs(dcache_size/2);
 815
 816		c->options |= MIPS_CPU_CACHE_CDEX_P;
 817		break;
 818
 819	case CPU_R5432:
 820	case CPU_R5500:
 821		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 822		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 823		c->icache.ways = 2;
 824		c->icache.waybit= 0;
 825
 826		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 827		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 828		c->dcache.ways = 2;
 829		c->dcache.waybit = 0;
 830
 831		c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
 832		break;
 833
 834	case CPU_TX49XX:
 835		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 836		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 837		c->icache.ways = 4;
 838		c->icache.waybit= 0;
 839
 840		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 841		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 842		c->dcache.ways = 4;
 843		c->dcache.waybit = 0;
 844
 845		c->options |= MIPS_CPU_CACHE_CDEX_P;
 846		c->options |= MIPS_CPU_PREFETCH;
 847		break;
 848
 849	case CPU_R4000PC:
 850	case CPU_R4000SC:
 851	case CPU_R4000MC:
 852	case CPU_R4400PC:
 853	case CPU_R4400SC:
 854	case CPU_R4400MC:
 855	case CPU_R4300:
 856		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 857		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 858		c->icache.ways = 1;
 859		c->icache.waybit = 0; 	/* doesn't matter */
 860
 861		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 862		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 863		c->dcache.ways = 1;
 864		c->dcache.waybit = 0;	/* does not matter */
 865
 866		c->options |= MIPS_CPU_CACHE_CDEX_P;
 867		break;
 868
 869	case CPU_R10000:
 870	case CPU_R12000:
 871	case CPU_R14000:
 
 872		icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
 873		c->icache.linesz = 64;
 874		c->icache.ways = 2;
 875		c->icache.waybit = 0;
 876
 877		dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
 878		c->dcache.linesz = 32;
 879		c->dcache.ways = 2;
 880		c->dcache.waybit = 0;
 881
 882		c->options |= MIPS_CPU_PREFETCH;
 883		break;
 884
 885	case CPU_VR4133:
 886		write_c0_config(config & ~VR41_CONF_P4K);
 
 887	case CPU_VR4131:
 888		/* Workaround for cache instruction bug of VR4131 */
 889		if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
 890		    c->processor_id == 0x0c82U) {
 891			config |= 0x00400000U;
 892			if (c->processor_id == 0x0c80U)
 893				config |= VR41_CONF_BP;
 894			write_c0_config(config);
 895		} else
 896			c->options |= MIPS_CPU_CACHE_CDEX_P;
 897
 898		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
 899		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 900		c->icache.ways = 2;
 901		c->icache.waybit = __ffs(icache_size/2);
 902
 903		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
 904		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 905		c->dcache.ways = 2;
 906		c->dcache.waybit = __ffs(dcache_size/2);
 907		break;
 908
 909	case CPU_VR41XX:
 910	case CPU_VR4111:
 911	case CPU_VR4121:
 912	case CPU_VR4122:
 913	case CPU_VR4181:
 914	case CPU_VR4181A:
 915		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
 916		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 917		c->icache.ways = 1;
 918		c->icache.waybit = 0; 	/* doesn't matter */
 919
 920		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
 921		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 922		c->dcache.ways = 1;
 923		c->dcache.waybit = 0;	/* does not matter */
 924
 925		c->options |= MIPS_CPU_CACHE_CDEX_P;
 926		break;
 927
 928	case CPU_RM7000:
 929		rm7k_erratum31();
 930
 931	case CPU_RM9000:
 932		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 933		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 934		c->icache.ways = 4;
 935		c->icache.waybit = __ffs(icache_size / c->icache.ways);
 936
 937		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 938		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 939		c->dcache.ways = 4;
 940		c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
 941
 942#if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
 943		c->options |= MIPS_CPU_CACHE_CDEX_P;
 944#endif
 945		c->options |= MIPS_CPU_PREFETCH;
 946		break;
 947
 948	case CPU_LOONGSON2:
 949		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
 950		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
 951		if (prid & 0x3)
 952			c->icache.ways = 4;
 953		else
 954			c->icache.ways = 2;
 955		c->icache.waybit = 0;
 956
 957		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
 958		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
 959		if (prid & 0x3)
 960			c->dcache.ways = 4;
 961		else
 962			c->dcache.ways = 2;
 963		c->dcache.waybit = 0;
 964		break;
 965
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 966	default:
 967		if (!(config & MIPS_CONF_M))
 968			panic("Don't know how to probe P-caches on this cpu.");
 969
 970		/*
 971		 * So we seem to be a MIPS32 or MIPS64 CPU
 972		 * So let's probe the I-cache ...
 973		 */
 974		config1 = read_c0_config1();
 975
 976		if ((lsize = ((config1 >> 19) & 7)))
 977			c->icache.linesz = 2 << lsize;
 978		else
 979			c->icache.linesz = lsize;
 
 
 
 
 980		c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
 981		c->icache.ways = 1 + ((config1 >> 16) & 7);
 982
 983		icache_size = c->icache.sets *
 984		              c->icache.ways *
 985		              c->icache.linesz;
 986		c->icache.waybit = __ffs(icache_size/c->icache.ways);
 987
 988		if (config & 0x8)		/* VI bit */
 989			c->icache.flags |= MIPS_CACHE_VTAG;
 990
 991		/*
 992		 * Now probe the MIPS32 / MIPS64 data cache.
 993		 */
 994		c->dcache.flags = 0;
 995
 996		if ((lsize = ((config1 >> 10) & 7)))
 997			c->dcache.linesz = 2 << lsize;
 998		else
 999			c->dcache.linesz= lsize;
 
 
 
 
1000		c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
1001		c->dcache.ways = 1 + ((config1 >> 7) & 7);
1002
1003		dcache_size = c->dcache.sets *
1004		              c->dcache.ways *
1005		              c->dcache.linesz;
1006		c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1007
1008		c->options |= MIPS_CPU_PREFETCH;
1009		break;
1010	}
1011
1012	/*
1013	 * Processor configuration sanity check for the R4000SC erratum
1014	 * #5.  With page sizes larger than 32kB there is no possibility
1015	 * to get a VCE exception anymore so we don't care about this
1016	 * misconfiguration.  The case is rather theoretical anyway;
1017	 * presumably no vendor is shipping his hardware in the "bad"
1018	 * configuration.
1019	 */
1020	if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
 
1021	    !(config & CONF_SC) && c->icache.linesz != 16 &&
1022	    PAGE_SIZE <= 0x8000)
1023		panic("Improper R4000SC processor configuration detected");
1024
1025	/* compute a couple of other cache variables */
1026	c->icache.waysize = icache_size / c->icache.ways;
1027	c->dcache.waysize = dcache_size / c->dcache.ways;
1028
1029	c->icache.sets = c->icache.linesz ?
1030		icache_size / (c->icache.linesz * c->icache.ways) : 0;
1031	c->dcache.sets = c->dcache.linesz ?
1032		dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
1033
1034	/*
1035	 * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
1036	 * 2-way virtually indexed so normally would suffer from aliases.  So
1037	 * normally they'd suffer from aliases but magic in the hardware deals
1038	 * with that for us so we don't need to take care ourselves.
1039	 */
1040	switch (c->cputype) {
1041	case CPU_20KC:
1042	case CPU_25KF:
 
 
1043	case CPU_SB1:
1044	case CPU_SB1A:
1045	case CPU_XLR:
1046		c->dcache.flags |= MIPS_CACHE_PINDEX;
1047		break;
1048
1049	case CPU_R10000:
1050	case CPU_R12000:
1051	case CPU_R14000:
 
1052		break;
1053
 
 
 
 
1054	case CPU_M14KC:
 
1055	case CPU_24K:
1056	case CPU_34K:
1057	case CPU_74K:
1058	case CPU_1004K:
1059		if ((read_c0_config7() & (1 << 16))) {
1060			/* effectively physically indexed dcache,
1061			   thus no virtual aliases. */
 
 
 
 
 
 
 
 
 
 
 
 
1062			c->dcache.flags |= MIPS_CACHE_PINDEX;
1063			break;
1064		}
 
1065	default:
1066		if (c->dcache.waysize > PAGE_SIZE)
1067			c->dcache.flags |= MIPS_CACHE_ALIASES;
1068	}
1069
1070	switch (c->cputype) {
 
 
 
 
 
 
 
 
 
 
 
 
1071	case CPU_20KC:
1072		/*
1073		 * Some older 20Kc chips doesn't have the 'VI' bit in
1074		 * the config register.
1075		 */
1076		c->icache.flags |= MIPS_CACHE_VTAG;
1077		break;
1078
1079	case CPU_ALCHEMY:
 
 
1080		c->icache.flags |= MIPS_CACHE_IC_F_DC;
1081		break;
1082	}
1083
1084#ifdef  CONFIG_CPU_LOONGSON2
1085	/*
1086	 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1087	 * one op will act on all 4 ways
1088	 */
1089	c->icache.ways = 1;
1090#endif
1091
1092	printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1093	       icache_size >> 10,
1094	       c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1095	       way_string[c->icache.ways], c->icache.linesz);
1096
1097	printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1098	       dcache_size >> 10, way_string[c->dcache.ways],
1099	       (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1100	       (c->dcache.flags & MIPS_CACHE_ALIASES) ?
 
 
 
 
 
 
 
 
1101			"cache aliases" : "no aliases",
1102	       c->dcache.linesz);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103}
1104
1105/*
1106 * If you even _breathe_ on this function, look at the gcc output and make sure
1107 * it does not pop things on and off the stack for the cache sizing loop that
1108 * executes in KSEG1 space or else you will crash and burn badly.  You have
1109 * been warned.
1110 */
1111static int __cpuinit probe_scache(void)
1112{
1113	unsigned long flags, addr, begin, end, pow2;
1114	unsigned int config = read_c0_config();
1115	struct cpuinfo_mips *c = &current_cpu_data;
1116
1117	if (config & CONF_SC)
1118		return 0;
1119
1120	begin = (unsigned long) &_stext;
1121	begin &= ~((4 * 1024 * 1024) - 1);
1122	end = begin + (4 * 1024 * 1024);
1123
1124	/*
1125	 * This is such a bitch, you'd think they would make it easy to do
1126	 * this.  Away you daemons of stupidity!
1127	 */
1128	local_irq_save(flags);
1129
1130	/* Fill each size-multiple cache line with a valid tag. */
1131	pow2 = (64 * 1024);
1132	for (addr = begin; addr < end; addr = (begin + pow2)) {
1133		unsigned long *p = (unsigned long *) addr;
1134		__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1135		pow2 <<= 1;
1136	}
1137
1138	/* Load first line with zero (therefore invalid) tag. */
1139	write_c0_taglo(0);
1140	write_c0_taghi(0);
1141	__asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1142	cache_op(Index_Store_Tag_I, begin);
1143	cache_op(Index_Store_Tag_D, begin);
1144	cache_op(Index_Store_Tag_SD, begin);
1145
1146	/* Now search for the wrap around point. */
1147	pow2 = (128 * 1024);
1148	for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1149		cache_op(Index_Load_Tag_SD, addr);
1150		__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1151		if (!read_c0_taglo())
1152			break;
1153		pow2 <<= 1;
1154	}
1155	local_irq_restore(flags);
1156	addr -= begin;
1157
1158	scache_size = addr;
1159	c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1160	c->scache.ways = 1;
1161	c->dcache.waybit = 0;		/* does not matter */
1162
1163	return 1;
1164}
1165
1166#if defined(CONFIG_CPU_LOONGSON2)
1167static void __init loongson2_sc_init(void)
1168{
1169	struct cpuinfo_mips *c = &current_cpu_data;
1170
1171	scache_size = 512*1024;
1172	c->scache.linesz = 32;
1173	c->scache.ways = 4;
1174	c->scache.waybit = 0;
1175	c->scache.waysize = scache_size / (c->scache.ways);
1176	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1177	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1178	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1179
1180	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1181}
1182#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1183
1184extern int r5k_sc_init(void);
1185extern int rm7k_sc_init(void);
1186extern int mips_sc_init(void);
1187
1188static void __cpuinit setup_scache(void)
1189{
1190	struct cpuinfo_mips *c = &current_cpu_data;
1191	unsigned int config = read_c0_config();
1192	int sc_present = 0;
1193
1194	/*
1195	 * Do the probing thing on R4000SC and R4400SC processors.  Other
1196	 * processors don't have a S-cache that would be relevant to the
1197	 * Linux memory management.
1198	 */
1199	switch (c->cputype) {
1200	case CPU_R4000SC:
1201	case CPU_R4000MC:
1202	case CPU_R4400SC:
1203	case CPU_R4400MC:
1204		sc_present = run_uncached(probe_scache);
1205		if (sc_present)
1206			c->options |= MIPS_CPU_CACHE_CDEX_S;
1207		break;
1208
1209	case CPU_R10000:
1210	case CPU_R12000:
1211	case CPU_R14000:
 
1212		scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1213		c->scache.linesz = 64 << ((config >> 13) & 1);
1214		c->scache.ways = 2;
1215		c->scache.waybit= 0;
1216		sc_present = 1;
1217		break;
1218
1219	case CPU_R5000:
1220	case CPU_NEVADA:
1221#ifdef CONFIG_R5000_CPU_SCACHE
1222		r5k_sc_init();
1223#endif
1224                return;
1225
1226	case CPU_RM7000:
1227	case CPU_RM9000:
1228#ifdef CONFIG_RM7000_CPU_SCACHE
1229		rm7k_sc_init();
1230#endif
1231		return;
1232
1233#if defined(CONFIG_CPU_LOONGSON2)
1234	case CPU_LOONGSON2:
1235		loongson2_sc_init();
1236		return;
1237#endif
 
 
 
 
 
1238	case CPU_XLP:
1239		/* don't need to worry about L2, fully coherent */
1240		return;
1241
1242	default:
1243		if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1244		    c->isa_level == MIPS_CPU_ISA_M32R2 ||
1245		    c->isa_level == MIPS_CPU_ISA_M64R1 ||
1246		    c->isa_level == MIPS_CPU_ISA_M64R2) {
1247#ifdef CONFIG_MIPS_CPU_SCACHE
1248			if (mips_sc_init ()) {
1249				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1250				printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1251				       scache_size >> 10,
1252				       way_string[c->scache.ways], c->scache.linesz);
 
 
 
1253			}
 
1254#else
1255			if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1256				panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1257#endif
1258			return;
1259		}
1260		sc_present = 0;
1261	}
1262
1263	if (!sc_present)
1264		return;
1265
1266	/* compute a couple of other cache variables */
1267	c->scache.waysize = scache_size / c->scache.ways;
1268
1269	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1270
1271	printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1272	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1273
1274	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1275}
1276
1277void au1x00_fixup_config_od(void)
1278{
1279	/*
1280	 * c0_config.od (bit 19) was write only (and read as 0)
1281	 * on the early revisions of Alchemy SOCs.  It disables the bus
1282	 * transaction overlapping and needs to be set to fix various errata.
1283	 */
1284	switch (read_c0_prid()) {
1285	case 0x00030100: /* Au1000 DA */
1286	case 0x00030201: /* Au1000 HA */
1287	case 0x00030202: /* Au1000 HB */
1288	case 0x01030200: /* Au1500 AB */
1289	/*
1290	 * Au1100 errata actually keeps silence about this bit, so we set it
1291	 * just in case for those revisions that require it to be set according
1292	 * to the (now gone) cpu table.
1293	 */
1294	case 0x02030200: /* Au1100 AB */
1295	case 0x02030201: /* Au1100 BA */
1296	case 0x02030202: /* Au1100 BC */
1297		set_c0_config(1 << 19);
1298		break;
1299	}
1300}
1301
1302/* CP0 hazard avoidance. */
1303#define NXP_BARRIER()							\
1304	 __asm__ __volatile__(						\
1305	".set noreorder\n\t"						\
1306	"nop; nop; nop; nop; nop; nop;\n\t"				\
1307	".set reorder\n\t")
1308
1309static void nxp_pr4450_fixup_config(void)
1310{
1311	unsigned long config0;
1312
1313	config0 = read_c0_config();
1314
1315	/* clear all three cache coherency fields */
1316	config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1317	config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1318		    ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1319		    ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1320	write_c0_config(config0);
1321	NXP_BARRIER();
1322}
1323
1324static int __cpuinitdata cca = -1;
1325
1326static int __init cca_setup(char *str)
1327{
1328	get_option(&str, &cca);
1329
1330	return 1;
1331}
1332
1333__setup("cca=", cca_setup);
1334
1335static void __cpuinit coherency_setup(void)
1336{
1337	if (cca < 0 || cca > 7)
1338		cca = read_c0_config() & CONF_CM_CMASK;
1339	_page_cachable_default = cca << _CACHE_SHIFT;
1340
1341	pr_debug("Using cache attribute %d\n", cca);
1342	change_c0_config(CONF_CM_CMASK, cca);
1343
1344	/*
1345	 * c0_status.cu=0 specifies that updates by the sc instruction use
1346	 * the coherency mode specified by the TLB; 1 means cachable
1347	 * coherent update on write will be used.  Not all processors have
1348	 * this bit and; some wire it to zero, others like Toshiba had the
1349	 * silly idea of putting something else there ...
1350	 */
1351	switch (current_cpu_type()) {
1352	case CPU_R4000PC:
1353	case CPU_R4000SC:
1354	case CPU_R4000MC:
1355	case CPU_R4400PC:
1356	case CPU_R4400SC:
1357	case CPU_R4400MC:
1358		clear_c0_config(CONF_CU);
1359		break;
1360	/*
1361	 * We need to catch the early Alchemy SOCs with
1362	 * the write-only co_config.od bit and set it back to one on:
1363	 * Au1000 rev DA, HA, HB;  Au1100 AB, BA, BC, Au1500 AB
1364	 */
1365	case CPU_ALCHEMY:
1366		au1x00_fixup_config_od();
1367		break;
1368
1369	case PRID_IMP_PR4450:
1370		nxp_pr4450_fixup_config();
1371		break;
1372	}
1373}
1374
1375#if defined(CONFIG_DMA_NONCOHERENT)
1376
1377static int __cpuinitdata coherentio;
1378
1379static int __init setcoherentio(char *str)
1380{
1381	coherentio = 1;
1382
1383	return 1;
1384}
1385
1386__setup("coherentio", setcoherentio);
1387#endif
1388
1389static void __cpuinit r4k_cache_error_setup(void)
1390{
1391	extern char __weak except_vec2_generic;
1392	extern char __weak except_vec2_sb1;
1393	struct cpuinfo_mips *c = &current_cpu_data;
1394
1395	switch (c->cputype) {
1396	case CPU_SB1:
1397	case CPU_SB1A:
1398		set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1399		break;
1400
1401	default:
1402		set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1403		break;
1404	}
1405}
1406
1407void __cpuinit r4k_cache_init(void)
1408{
1409	extern void build_clear_page(void);
1410	extern void build_copy_page(void);
1411	struct cpuinfo_mips *c = &current_cpu_data;
1412
1413	probe_pcache();
 
1414	setup_scache();
1415
1416	r4k_blast_dcache_page_setup();
1417	r4k_blast_dcache_page_indexed_setup();
1418	r4k_blast_dcache_setup();
1419	r4k_blast_icache_page_setup();
1420	r4k_blast_icache_page_indexed_setup();
1421	r4k_blast_icache_setup();
1422	r4k_blast_scache_page_setup();
1423	r4k_blast_scache_page_indexed_setup();
1424	r4k_blast_scache_setup();
 
 
 
 
 
1425
1426	/*
1427	 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1428	 * This code supports virtually indexed processors and will be
1429	 * unnecessarily inefficient on physically indexed processors.
1430	 */
1431	if (c->dcache.linesz)
1432		shm_align_mask = max_t( unsigned long,
1433					c->dcache.sets * c->dcache.linesz - 1,
1434					PAGE_SIZE - 1);
1435	else
1436		shm_align_mask = PAGE_SIZE-1;
1437
1438	__flush_cache_vmap	= r4k__flush_cache_vmap;
1439	__flush_cache_vunmap	= r4k__flush_cache_vunmap;
1440
1441	flush_cache_all		= cache_noop;
1442	__flush_cache_all	= r4k___flush_cache_all;
1443	flush_cache_mm		= r4k_flush_cache_mm;
1444	flush_cache_page	= r4k_flush_cache_page;
1445	flush_cache_range	= r4k_flush_cache_range;
1446
1447	__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
1448
1449	flush_cache_sigtramp	= r4k_flush_cache_sigtramp;
1450	flush_icache_all	= r4k_flush_icache_all;
1451	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
1452	flush_data_cache_page	= r4k_flush_data_cache_page;
1453	flush_icache_range	= r4k_flush_icache_range;
1454	local_flush_icache_range	= local_r4k_flush_icache_range;
 
 
1455
1456#if defined(CONFIG_DMA_NONCOHERENT)
1457	if (coherentio) {
 
 
1458		_dma_cache_wback_inv	= (void *)cache_noop;
1459		_dma_cache_wback	= (void *)cache_noop;
1460		_dma_cache_inv		= (void *)cache_noop;
1461	} else {
 
 
1462		_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
1463		_dma_cache_wback	= r4k_dma_cache_wback_inv;
1464		_dma_cache_inv		= r4k_dma_cache_inv;
1465	}
1466#endif
1467
1468	build_clear_page();
1469	build_copy_page();
1470#if !defined(CONFIG_MIPS_CMP)
 
 
 
 
 
1471	local_r4k___flush_cache_all(NULL);
1472#endif
1473	coherency_setup();
1474	board_cache_error_setup = r4k_cache_error_setup;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1475}
v5.9
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
   7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
   8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   9 */
  10#include <linux/cpu_pm.h>
  11#include <linux/hardirq.h>
  12#include <linux/init.h>
  13#include <linux/highmem.h>
  14#include <linux/kernel.h>
  15#include <linux/linkage.h>
  16#include <linux/preempt.h>
  17#include <linux/sched.h>
  18#include <linux/smp.h>
  19#include <linux/mm.h>
  20#include <linux/export.h>
  21#include <linux/bitops.h>
  22
  23#include <asm/bcache.h>
  24#include <asm/bootinfo.h>
  25#include <asm/cache.h>
  26#include <asm/cacheops.h>
  27#include <asm/cpu.h>
  28#include <asm/cpu-features.h>
  29#include <asm/cpu-type.h>
  30#include <asm/io.h>
  31#include <asm/page.h>
 
  32#include <asm/r4kcache.h>
  33#include <asm/sections.h>
  34#include <asm/mmu_context.h>
  35#include <asm/war.h>
  36#include <asm/cacheflush.h> /* for run_uncached() */
  37#include <asm/traps.h>
  38#include <asm/dma-coherence.h>
  39#include <asm/mips-cps.h>
  40
  41/*
  42 * Bits describing what cache ops an SMP callback function may perform.
  43 *
  44 * R4K_HIT   -	Virtual user or kernel address based cache operations. The
  45 *		active_mm must be checked before using user addresses, falling
  46 *		back to kmap.
  47 * R4K_INDEX -	Index based cache operations.
  48 */
  49
  50#define R4K_HIT		BIT(0)
  51#define R4K_INDEX	BIT(1)
  52
  53/**
  54 * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
  55 * @type:	Type of cache operations (R4K_HIT or R4K_INDEX).
  56 *
  57 * Decides whether a cache op needs to be performed on every core in the system.
  58 * This may change depending on the @type of cache operation, as well as the set
  59 * of online CPUs, so preemption should be disabled by the caller to prevent CPU
  60 * hotplug from changing the result.
  61 *
  62 * Returns:	1 if the cache operation @type should be done on every core in
  63 *		the system.
  64 *		0 if the cache operation @type is globalized and only needs to
  65 *		be performed on a simple CPU.
  66 */
  67static inline bool r4k_op_needs_ipi(unsigned int type)
  68{
  69	/* The MIPS Coherence Manager (CM) globalizes address-based cache ops */
  70	if (type == R4K_HIT && mips_cm_present())
  71		return false;
  72
  73	/*
  74	 * Hardware doesn't globalize the required cache ops, so SMP calls may
  75	 * be needed, but only if there are foreign CPUs (non-siblings with
  76	 * separate caches).
  77	 */
  78	/* cpu_foreign_map[] undeclared when !CONFIG_SMP */
  79#ifdef CONFIG_SMP
  80	return !cpumask_empty(&cpu_foreign_map[0]);
  81#else
  82	return false;
  83#endif
  84}
  85
  86/*
  87 * Special Variant of smp_call_function for use by cache functions:
  88 *
  89 *  o No return value
  90 *  o collapses to normal function call on UP kernels
  91 *  o collapses to normal function call on systems with a single shared
  92 *    primary cache.
  93 *  o doesn't disable interrupts on the local CPU
  94 */
  95static inline void r4k_on_each_cpu(unsigned int type,
  96				   void (*func)(void *info), void *info)
  97{
  98	preempt_disable();
  99	if (r4k_op_needs_ipi(type))
 100		smp_call_function_many(&cpu_foreign_map[smp_processor_id()],
 101				       func, info, 1);
 
 102	func(info);
 103	preempt_enable();
 104}
 105
 
 
 
 
 
 
 106/*
 107 * Must die.
 108 */
 109static unsigned long icache_size __read_mostly;
 110static unsigned long dcache_size __read_mostly;
 111static unsigned long vcache_size __read_mostly;
 112static unsigned long scache_size __read_mostly;
 113
 114/*
 115 * Dummy cache handling routines for machines without boardcaches
 116 */
 117static void cache_noop(void) {}
 118
 119static struct bcache_ops no_sc_ops = {
 120	.bc_enable = (void *)cache_noop,
 121	.bc_disable = (void *)cache_noop,
 122	.bc_wback_inv = (void *)cache_noop,
 123	.bc_inv = (void *)cache_noop
 124};
 125
 126struct bcache_ops *bcops = &no_sc_ops;
 127
 128#define cpu_is_r4600_v1_x()	((read_c0_prid() & 0xfffffff0) == 0x00002010)
 129#define cpu_is_r4600_v2_x()	((read_c0_prid() & 0xfffffff0) == 0x00002020)
 130
 131#define R4600_HIT_CACHEOP_WAR_IMPL					\
 132do {									\
 133	if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())		\
 134		*(volatile unsigned long *)CKSEG1;			\
 135	if (R4600_V1_HIT_CACHEOP_WAR)					\
 136		__asm__ __volatile__("nop;nop;nop;nop");		\
 137} while (0)
 138
 139static void (*r4k_blast_dcache_page)(unsigned long addr);
 140
 141static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
 142{
 143	R4600_HIT_CACHEOP_WAR_IMPL;
 144	blast_dcache32_page(addr);
 145}
 146
 147static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
 148{
 
 149	blast_dcache64_page(addr);
 150}
 151
 152static inline void r4k_blast_dcache_page_dc128(unsigned long addr)
 153{
 154	blast_dcache128_page(addr);
 155}
 156
 157static void r4k_blast_dcache_page_setup(void)
 158{
 159	unsigned long  dc_lsize = cpu_dcache_line_size();
 160
 161	switch (dc_lsize) {
 162	case 0:
 163		r4k_blast_dcache_page = (void *)cache_noop;
 164		break;
 165	case 16:
 166		r4k_blast_dcache_page = blast_dcache16_page;
 167		break;
 168	case 32:
 169		r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
 170		break;
 171	case 64:
 172		r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
 173		break;
 174	case 128:
 175		r4k_blast_dcache_page = r4k_blast_dcache_page_dc128;
 176		break;
 177	default:
 178		break;
 179	}
 180}
 181
 182#ifndef CONFIG_EVA
 183#define r4k_blast_dcache_user_page  r4k_blast_dcache_page
 184#else
 185
 186static void (*r4k_blast_dcache_user_page)(unsigned long addr);
 187
 188static void r4k_blast_dcache_user_page_setup(void)
 189{
 190	unsigned long  dc_lsize = cpu_dcache_line_size();
 191
 192	if (dc_lsize == 0)
 193		r4k_blast_dcache_user_page = (void *)cache_noop;
 194	else if (dc_lsize == 16)
 195		r4k_blast_dcache_user_page = blast_dcache16_user_page;
 196	else if (dc_lsize == 32)
 197		r4k_blast_dcache_user_page = blast_dcache32_user_page;
 198	else if (dc_lsize == 64)
 199		r4k_blast_dcache_user_page = blast_dcache64_user_page;
 200}
 201
 202#endif
 203
 204static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
 205
 206static void r4k_blast_dcache_page_indexed_setup(void)
 207{
 208	unsigned long dc_lsize = cpu_dcache_line_size();
 209
 210	if (dc_lsize == 0)
 211		r4k_blast_dcache_page_indexed = (void *)cache_noop;
 212	else if (dc_lsize == 16)
 213		r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
 214	else if (dc_lsize == 32)
 215		r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
 216	else if (dc_lsize == 64)
 217		r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
 218	else if (dc_lsize == 128)
 219		r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed;
 220}
 221
 222void (* r4k_blast_dcache)(void);
 223EXPORT_SYMBOL(r4k_blast_dcache);
 224
 225static void r4k_blast_dcache_setup(void)
 226{
 227	unsigned long dc_lsize = cpu_dcache_line_size();
 228
 229	if (dc_lsize == 0)
 230		r4k_blast_dcache = (void *)cache_noop;
 231	else if (dc_lsize == 16)
 232		r4k_blast_dcache = blast_dcache16;
 233	else if (dc_lsize == 32)
 234		r4k_blast_dcache = blast_dcache32;
 235	else if (dc_lsize == 64)
 236		r4k_blast_dcache = blast_dcache64;
 237	else if (dc_lsize == 128)
 238		r4k_blast_dcache = blast_dcache128;
 239}
 240
 241/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
 242#define JUMP_TO_ALIGN(order) \
 243	__asm__ __volatile__( \
 244		"b\t1f\n\t" \
 245		".align\t" #order "\n\t" \
 246		"1:\n\t" \
 247		)
 248#define CACHE32_UNROLL32_ALIGN	JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
 249#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
 250
 251static inline void blast_r4600_v1_icache32(void)
 252{
 253	unsigned long flags;
 254
 255	local_irq_save(flags);
 256	blast_icache32();
 257	local_irq_restore(flags);
 258}
 259
 260static inline void tx49_blast_icache32(void)
 261{
 262	unsigned long start = INDEX_BASE;
 263	unsigned long end = start + current_cpu_data.icache.waysize;
 264	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
 265	unsigned long ws_end = current_cpu_data.icache.ways <<
 266			       current_cpu_data.icache.waybit;
 267	unsigned long ws, addr;
 268
 269	CACHE32_UNROLL32_ALIGN2;
 270	/* I'm in even chunk.  blast odd chunks */
 271	for (ws = 0; ws < ws_end; ws += ws_inc)
 272		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
 273			cache_unroll(32, kernel_cache, Index_Invalidate_I,
 274				     addr | ws, 32);
 275	CACHE32_UNROLL32_ALIGN;
 276	/* I'm in odd chunk.  blast even chunks */
 277	for (ws = 0; ws < ws_end; ws += ws_inc)
 278		for (addr = start; addr < end; addr += 0x400 * 2)
 279			cache_unroll(32, kernel_cache, Index_Invalidate_I,
 280				     addr | ws, 32);
 281}
 282
 283static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
 284{
 285	unsigned long flags;
 286
 287	local_irq_save(flags);
 288	blast_icache32_page_indexed(page);
 289	local_irq_restore(flags);
 290}
 291
 292static inline void tx49_blast_icache32_page_indexed(unsigned long page)
 293{
 294	unsigned long indexmask = current_cpu_data.icache.waysize - 1;
 295	unsigned long start = INDEX_BASE + (page & indexmask);
 296	unsigned long end = start + PAGE_SIZE;
 297	unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
 298	unsigned long ws_end = current_cpu_data.icache.ways <<
 299			       current_cpu_data.icache.waybit;
 300	unsigned long ws, addr;
 301
 302	CACHE32_UNROLL32_ALIGN2;
 303	/* I'm in even chunk.  blast odd chunks */
 304	for (ws = 0; ws < ws_end; ws += ws_inc)
 305		for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
 306			cache_unroll(32, kernel_cache, Index_Invalidate_I,
 307				     addr | ws, 32);
 308	CACHE32_UNROLL32_ALIGN;
 309	/* I'm in odd chunk.  blast even chunks */
 310	for (ws = 0; ws < ws_end; ws += ws_inc)
 311		for (addr = start; addr < end; addr += 0x400 * 2)
 312			cache_unroll(32, kernel_cache, Index_Invalidate_I,
 313				     addr | ws, 32);
 314}
 315
 316static void (* r4k_blast_icache_page)(unsigned long addr);
 317
 318static void r4k_blast_icache_page_setup(void)
 319{
 320	unsigned long ic_lsize = cpu_icache_line_size();
 321
 322	if (ic_lsize == 0)
 323		r4k_blast_icache_page = (void *)cache_noop;
 324	else if (ic_lsize == 16)
 325		r4k_blast_icache_page = blast_icache16_page;
 326	else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF)
 327		r4k_blast_icache_page = loongson2_blast_icache32_page;
 328	else if (ic_lsize == 32)
 329		r4k_blast_icache_page = blast_icache32_page;
 330	else if (ic_lsize == 64)
 331		r4k_blast_icache_page = blast_icache64_page;
 332	else if (ic_lsize == 128)
 333		r4k_blast_icache_page = blast_icache128_page;
 334}
 335
 336#ifndef CONFIG_EVA
 337#define r4k_blast_icache_user_page  r4k_blast_icache_page
 338#else
 339
 340static void (*r4k_blast_icache_user_page)(unsigned long addr);
 341
 342static void r4k_blast_icache_user_page_setup(void)
 343{
 344	unsigned long ic_lsize = cpu_icache_line_size();
 345
 346	if (ic_lsize == 0)
 347		r4k_blast_icache_user_page = (void *)cache_noop;
 348	else if (ic_lsize == 16)
 349		r4k_blast_icache_user_page = blast_icache16_user_page;
 350	else if (ic_lsize == 32)
 351		r4k_blast_icache_user_page = blast_icache32_user_page;
 352	else if (ic_lsize == 64)
 353		r4k_blast_icache_user_page = blast_icache64_user_page;
 354}
 355
 356#endif
 357
 358static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
 359
 360static void r4k_blast_icache_page_indexed_setup(void)
 361{
 362	unsigned long ic_lsize = cpu_icache_line_size();
 363
 364	if (ic_lsize == 0)
 365		r4k_blast_icache_page_indexed = (void *)cache_noop;
 366	else if (ic_lsize == 16)
 367		r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
 368	else if (ic_lsize == 32) {
 369		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
 370			r4k_blast_icache_page_indexed =
 371				blast_icache32_r4600_v1_page_indexed;
 372		else if (TX49XX_ICACHE_INDEX_INV_WAR)
 373			r4k_blast_icache_page_indexed =
 374				tx49_blast_icache32_page_indexed;
 375		else if (current_cpu_type() == CPU_LOONGSON2EF)
 376			r4k_blast_icache_page_indexed =
 377				loongson2_blast_icache32_page_indexed;
 378		else
 379			r4k_blast_icache_page_indexed =
 380				blast_icache32_page_indexed;
 381	} else if (ic_lsize == 64)
 382		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
 383}
 384
 385void (* r4k_blast_icache)(void);
 386EXPORT_SYMBOL(r4k_blast_icache);
 387
 388static void r4k_blast_icache_setup(void)
 389{
 390	unsigned long ic_lsize = cpu_icache_line_size();
 391
 392	if (ic_lsize == 0)
 393		r4k_blast_icache = (void *)cache_noop;
 394	else if (ic_lsize == 16)
 395		r4k_blast_icache = blast_icache16;
 396	else if (ic_lsize == 32) {
 397		if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
 398			r4k_blast_icache = blast_r4600_v1_icache32;
 399		else if (TX49XX_ICACHE_INDEX_INV_WAR)
 400			r4k_blast_icache = tx49_blast_icache32;
 401		else if (current_cpu_type() == CPU_LOONGSON2EF)
 402			r4k_blast_icache = loongson2_blast_icache32;
 403		else
 404			r4k_blast_icache = blast_icache32;
 405	} else if (ic_lsize == 64)
 406		r4k_blast_icache = blast_icache64;
 407	else if (ic_lsize == 128)
 408		r4k_blast_icache = blast_icache128;
 409}
 410
 411static void (* r4k_blast_scache_page)(unsigned long addr);
 412
 413static void r4k_blast_scache_page_setup(void)
 414{
 415	unsigned long sc_lsize = cpu_scache_line_size();
 416
 417	if (scache_size == 0)
 418		r4k_blast_scache_page = (void *)cache_noop;
 419	else if (sc_lsize == 16)
 420		r4k_blast_scache_page = blast_scache16_page;
 421	else if (sc_lsize == 32)
 422		r4k_blast_scache_page = blast_scache32_page;
 423	else if (sc_lsize == 64)
 424		r4k_blast_scache_page = blast_scache64_page;
 425	else if (sc_lsize == 128)
 426		r4k_blast_scache_page = blast_scache128_page;
 427}
 428
 429static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
 430
 431static void r4k_blast_scache_page_indexed_setup(void)
 432{
 433	unsigned long sc_lsize = cpu_scache_line_size();
 434
 435	if (scache_size == 0)
 436		r4k_blast_scache_page_indexed = (void *)cache_noop;
 437	else if (sc_lsize == 16)
 438		r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
 439	else if (sc_lsize == 32)
 440		r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
 441	else if (sc_lsize == 64)
 442		r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
 443	else if (sc_lsize == 128)
 444		r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
 445}
 446
 447static void (* r4k_blast_scache)(void);
 448
 449static void r4k_blast_scache_setup(void)
 450{
 451	unsigned long sc_lsize = cpu_scache_line_size();
 452
 453	if (scache_size == 0)
 454		r4k_blast_scache = (void *)cache_noop;
 455	else if (sc_lsize == 16)
 456		r4k_blast_scache = blast_scache16;
 457	else if (sc_lsize == 32)
 458		r4k_blast_scache = blast_scache32;
 459	else if (sc_lsize == 64)
 460		r4k_blast_scache = blast_scache64;
 461	else if (sc_lsize == 128)
 462		r4k_blast_scache = blast_scache128;
 463}
 464
 465static void (*r4k_blast_scache_node)(long node);
 466
 467static void r4k_blast_scache_node_setup(void)
 468{
 469	unsigned long sc_lsize = cpu_scache_line_size();
 470
 471	if (current_cpu_type() != CPU_LOONGSON64)
 472		r4k_blast_scache_node = (void *)cache_noop;
 473	else if (sc_lsize == 16)
 474		r4k_blast_scache_node = blast_scache16_node;
 475	else if (sc_lsize == 32)
 476		r4k_blast_scache_node = blast_scache32_node;
 477	else if (sc_lsize == 64)
 478		r4k_blast_scache_node = blast_scache64_node;
 479	else if (sc_lsize == 128)
 480		r4k_blast_scache_node = blast_scache128_node;
 481}
 482
 483static inline void local_r4k___flush_cache_all(void * args)
 484{
 485	switch (current_cpu_type()) {
 486	case CPU_LOONGSON2EF:
 487	case CPU_R4000SC:
 488	case CPU_R4000MC:
 489	case CPU_R4400SC:
 490	case CPU_R4400MC:
 491	case CPU_R10000:
 492	case CPU_R12000:
 493	case CPU_R14000:
 494	case CPU_R16000:
 495		/*
 496		 * These caches are inclusive caches, that is, if something
 497		 * is not cached in the S-cache, we know it also won't be
 498		 * in one of the primary caches.
 499		 */
 500		r4k_blast_scache();
 501		break;
 502
 503	case CPU_LOONGSON64:
 504		/* Use get_ebase_cpunum() for both NUMA=y/n */
 505		r4k_blast_scache_node(get_ebase_cpunum() >> 2);
 506		break;
 507
 508	case CPU_BMIPS5000:
 509		r4k_blast_scache();
 510		__sync();
 511		break;
 512
 513	default:
 514		r4k_blast_dcache();
 515		r4k_blast_icache();
 516		break;
 517	}
 518}
 519
 520static void r4k___flush_cache_all(void)
 521{
 522	r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
 523}
 524
 525/**
 526 * has_valid_asid() - Determine if an mm already has an ASID.
 527 * @mm:		Memory map.
 528 * @type:	R4K_HIT or R4K_INDEX, type of cache op.
 529 *
 530 * Determines whether @mm already has an ASID on any of the CPUs which cache ops
 531 * of type @type within an r4k_on_each_cpu() call will affect. If
 532 * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
 533 * scope of the operation is confined to sibling CPUs, otherwise all online CPUs
 534 * will need to be checked.
 535 *
 536 * Must be called in non-preemptive context.
 537 *
 538 * Returns:	1 if the CPUs affected by @type cache ops have an ASID for @mm.
 539 *		0 otherwise.
 540 */
 541static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
 542{
 543	unsigned int i;
 544	const cpumask_t *mask = cpu_present_mask;
 545
 546	if (cpu_has_mmid)
 547		return cpu_context(0, mm) != 0;
 548
 549	/* cpu_sibling_map[] undeclared when !CONFIG_SMP */
 550#ifdef CONFIG_SMP
 551	/*
 552	 * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
 553	 * each foreign core, so we only need to worry about siblings.
 554	 * Otherwise we need to worry about all present CPUs.
 555	 */
 556	if (r4k_op_needs_ipi(type))
 557		mask = &cpu_sibling_map[smp_processor_id()];
 558#endif
 559	for_each_cpu(i, mask)
 560		if (cpu_context(i, mm))
 561			return 1;
 
 562	return 0;
 
 
 
 563}
 564
 565static void r4k__flush_cache_vmap(void)
 566{
 567	r4k_blast_dcache();
 568}
 569
 570static void r4k__flush_cache_vunmap(void)
 571{
 572	r4k_blast_dcache();
 573}
 574
 575/*
 576 * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes
 577 * whole caches when vma is executable.
 578 */
 579static inline void local_r4k_flush_cache_range(void * args)
 580{
 581	struct vm_area_struct *vma = args;
 582	int exec = vma->vm_flags & VM_EXEC;
 583
 584	if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
 585		return;
 586
 587	/*
 588	 * If dcache can alias, we must blast it since mapping is changing.
 589	 * If executable, we must ensure any dirty lines are written back far
 590	 * enough to be visible to icache.
 591	 */
 592	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
 593		r4k_blast_dcache();
 594	/* If executable, blast stale lines from icache */
 595	if (exec)
 596		r4k_blast_icache();
 597}
 598
 599static void r4k_flush_cache_range(struct vm_area_struct *vma,
 600	unsigned long start, unsigned long end)
 601{
 602	int exec = vma->vm_flags & VM_EXEC;
 603
 604	if (cpu_has_dc_aliases || exec)
 605		r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
 606}
 607
 608static inline void local_r4k_flush_cache_mm(void * args)
 609{
 610	struct mm_struct *mm = args;
 611
 612	if (!has_valid_asid(mm, R4K_INDEX))
 613		return;
 614
 615	/*
 616	 * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
 617	 * only flush the primary caches but R1x000 behave sane ...
 618	 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
 619	 * caches, so we can bail out early.
 620	 */
 621	if (current_cpu_type() == CPU_R4000SC ||
 622	    current_cpu_type() == CPU_R4000MC ||
 623	    current_cpu_type() == CPU_R4400SC ||
 624	    current_cpu_type() == CPU_R4400MC) {
 625		r4k_blast_scache();
 626		return;
 627	}
 628
 629	r4k_blast_dcache();
 630}
 631
 632static void r4k_flush_cache_mm(struct mm_struct *mm)
 633{
 634	if (!cpu_has_dc_aliases)
 635		return;
 636
 637	r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
 638}
 639
 640struct flush_cache_page_args {
 641	struct vm_area_struct *vma;
 642	unsigned long addr;
 643	unsigned long pfn;
 644};
 645
 646static inline void local_r4k_flush_cache_page(void *args)
 647{
 648	struct flush_cache_page_args *fcp_args = args;
 649	struct vm_area_struct *vma = fcp_args->vma;
 650	unsigned long addr = fcp_args->addr;
 651	struct page *page = pfn_to_page(fcp_args->pfn);
 652	int exec = vma->vm_flags & VM_EXEC;
 653	struct mm_struct *mm = vma->vm_mm;
 654	int map_coherent = 0;
 
 
 655	pmd_t *pmdp;
 656	pte_t *ptep;
 657	void *vaddr;
 658
 659	/*
 660	 * If owns no valid ASID yet, cannot possibly have gotten
 661	 * this page into the cache.
 662	 */
 663	if (!has_valid_asid(mm, R4K_HIT))
 664		return;
 665
 666	addr &= PAGE_MASK;
 667	pmdp = pmd_off(mm, addr);
 668	ptep = pte_offset_kernel(pmdp, addr);
 
 
 669
 670	/*
 671	 * If the page isn't marked valid, the page cannot possibly be
 672	 * in the cache.
 673	 */
 674	if (!(pte_present(*ptep)))
 675		return;
 676
 677	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
 678		vaddr = NULL;
 679	else {
 680		/*
 681		 * Use kmap_coherent or kmap_atomic to do flushes for
 682		 * another ASID than the current one.
 683		 */
 684		map_coherent = (cpu_has_dc_aliases &&
 685				page_mapcount(page) &&
 686				!Page_dcache_dirty(page));
 687		if (map_coherent)
 688			vaddr = kmap_coherent(page, addr);
 689		else
 690			vaddr = kmap_atomic(page);
 691		addr = (unsigned long)vaddr;
 692	}
 693
 694	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
 695		vaddr ? r4k_blast_dcache_page(addr) :
 696			r4k_blast_dcache_user_page(addr);
 697		if (exec && !cpu_icache_snoops_remote_store)
 698			r4k_blast_scache_page(addr);
 699	}
 700	if (exec) {
 701		if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
 702			drop_mmu_context(mm);
 
 
 
 703		} else
 704			vaddr ? r4k_blast_icache_page(addr) :
 705				r4k_blast_icache_user_page(addr);
 706	}
 707
 708	if (vaddr) {
 709		if (map_coherent)
 710			kunmap_coherent();
 711		else
 712			kunmap_atomic(vaddr);
 713	}
 714}
 715
 716static void r4k_flush_cache_page(struct vm_area_struct *vma,
 717	unsigned long addr, unsigned long pfn)
 718{
 719	struct flush_cache_page_args args;
 720
 721	args.vma = vma;
 722	args.addr = addr;
 723	args.pfn = pfn;
 724
 725	r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args);
 726}
 727
 728static inline void local_r4k_flush_data_cache_page(void * addr)
 729{
 730	r4k_blast_dcache_page((unsigned long) addr);
 731}
 732
 733static void r4k_flush_data_cache_page(unsigned long addr)
 734{
 735	if (in_atomic())
 736		local_r4k_flush_data_cache_page((void *)addr);
 737	else
 738		r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page,
 739				(void *) addr);
 740}
 741
 742struct flush_icache_range_args {
 743	unsigned long start;
 744	unsigned long end;
 745	unsigned int type;
 746	bool user;
 747};
 748
 749static inline void __local_r4k_flush_icache_range(unsigned long start,
 750						  unsigned long end,
 751						  unsigned int type,
 752						  bool user)
 753{
 754	if (!cpu_has_ic_fills_f_dc) {
 755		if (type == R4K_INDEX ||
 756		    (type & R4K_INDEX && end - start >= dcache_size)) {
 757			r4k_blast_dcache();
 758		} else {
 759			R4600_HIT_CACHEOP_WAR_IMPL;
 760			if (user)
 761				protected_blast_dcache_range(start, end);
 762			else
 763				blast_dcache_range(start, end);
 764		}
 765	}
 766
 767	if (type == R4K_INDEX ||
 768	    (type & R4K_INDEX && end - start > icache_size))
 769		r4k_blast_icache();
 770	else {
 771		switch (boot_cpu_type()) {
 772		case CPU_LOONGSON2EF:
 773			protected_loongson2_blast_icache_range(start, end);
 774			break;
 775
 776		default:
 777			if (user)
 778				protected_blast_icache_range(start, end);
 779			else
 780				blast_icache_range(start, end);
 781			break;
 782		}
 783	}
 784}
 785
 786static inline void local_r4k_flush_icache_range(unsigned long start,
 787						unsigned long end)
 788{
 789	__local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false);
 790}
 791
 792static inline void local_r4k_flush_icache_user_range(unsigned long start,
 793						     unsigned long end)
 794{
 795	__local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true);
 796}
 797
 798static inline void local_r4k_flush_icache_range_ipi(void *args)
 799{
 800	struct flush_icache_range_args *fir_args = args;
 801	unsigned long start = fir_args->start;
 802	unsigned long end = fir_args->end;
 803	unsigned int type = fir_args->type;
 804	bool user = fir_args->user;
 805
 806	__local_r4k_flush_icache_range(start, end, type, user);
 807}
 808
 809static void __r4k_flush_icache_range(unsigned long start, unsigned long end,
 810				     bool user)
 811{
 812	struct flush_icache_range_args args;
 813	unsigned long size, cache_size;
 814
 815	args.start = start;
 816	args.end = end;
 817	args.type = R4K_HIT | R4K_INDEX;
 818	args.user = user;
 819
 820	/*
 821	 * Indexed cache ops require an SMP call.
 822	 * Consider if that can or should be avoided.
 823	 */
 824	preempt_disable();
 825	if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) {
 826		/*
 827		 * If address-based cache ops don't require an SMP call, then
 828		 * use them exclusively for small flushes.
 829		 */
 830		size = end - start;
 831		cache_size = icache_size;
 832		if (!cpu_has_ic_fills_f_dc) {
 833			size *= 2;
 834			cache_size += dcache_size;
 835		}
 836		if (size <= cache_size)
 837			args.type &= ~R4K_INDEX;
 838	}
 839	r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args);
 840	preempt_enable();
 841	instruction_hazard();
 842}
 843
 844static void r4k_flush_icache_range(unsigned long start, unsigned long end)
 845{
 846	return __r4k_flush_icache_range(start, end, false);
 847}
 848
 849static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
 850{
 851	return __r4k_flush_icache_range(start, end, true);
 852}
 853
 854#ifdef CONFIG_DMA_NONCOHERENT
 855
 856static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
 857{
 858	/* Catch bad driver code */
 859	if (WARN_ON(size == 0))
 860		return;
 861
 862	preempt_disable();
 863	if (cpu_has_inclusive_pcaches) {
 864		if (size >= scache_size) {
 865			if (current_cpu_type() != CPU_LOONGSON64)
 866				r4k_blast_scache();
 867			else
 868				r4k_blast_scache_node(pa_to_nid(addr));
 869		} else {
 870			blast_scache_range(addr, addr + size);
 871		}
 872		preempt_enable();
 873		__sync();
 874		return;
 875	}
 876
 877	/*
 878	 * Either no secondary cache or the available caches don't have the
 879	 * subset property so we have to flush the primary caches
 880	 * explicitly.
 881	 * If we would need IPI to perform an INDEX-type operation, then
 882	 * we have to use the HIT-type alternative as IPI cannot be used
 883	 * here due to interrupts possibly being disabled.
 884	 */
 885	if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
 886		r4k_blast_dcache();
 887	} else {
 888		R4600_HIT_CACHEOP_WAR_IMPL;
 889		blast_dcache_range(addr, addr + size);
 890	}
 891	preempt_enable();
 892
 893	bc_wback_inv(addr, size);
 894	__sync();
 895}
 896
 897static void prefetch_cache_inv(unsigned long addr, unsigned long size)
 898{
 899	unsigned int linesz = cpu_scache_line_size();
 900	unsigned long addr0 = addr, addr1;
 901
 902	addr0 &= ~(linesz - 1);
 903	addr1 = (addr0 + size - 1) & ~(linesz - 1);
 904
 905	protected_writeback_scache_line(addr0);
 906	if (likely(addr1 != addr0))
 907		protected_writeback_scache_line(addr1);
 908	else
 909		return;
 910
 911	addr0 += linesz;
 912	if (likely(addr1 != addr0))
 913		protected_writeback_scache_line(addr0);
 914	else
 915		return;
 916
 917	addr1 -= linesz;
 918	if (likely(addr1 > addr0))
 919		protected_writeback_scache_line(addr0);
 920}
 921
 922static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
 923{
 924	/* Catch bad driver code */
 925	if (WARN_ON(size == 0))
 926		return;
 927
 928	preempt_disable();
 
 
 
 
 
 929
 930	if (current_cpu_type() == CPU_BMIPS5000)
 931		prefetch_cache_inv(addr, size);
 932
 933	if (cpu_has_inclusive_pcaches) {
 934		if (size >= scache_size) {
 935			if (current_cpu_type() != CPU_LOONGSON64)
 936				r4k_blast_scache();
 937			else
 938				r4k_blast_scache_node(pa_to_nid(addr));
 939		} else {
 940			/*
 941			 * There is no clearly documented alignment requirement
 942			 * for the cache instruction on MIPS processors and
 943			 * some processors, among them the RM5200 and RM7000
 944			 * QED processors will throw an address error for cache
 945			 * hit ops with insufficient alignment.	 Solved by
 946			 * aligning the address to cache line size.
 947			 */
 
 
 
 948			blast_inv_scache_range(addr, addr + size);
 949		}
 950		preempt_enable();
 951		__sync();
 952		return;
 953	}
 954
 955	if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
 956		r4k_blast_dcache();
 957	} else {
 
 
 
 958		R4600_HIT_CACHEOP_WAR_IMPL;
 
 
 959		blast_inv_dcache_range(addr, addr + size);
 960	}
 961	preempt_enable();
 962
 963	bc_inv(addr, size);
 964	__sync();
 965}
 966#endif /* CONFIG_DMA_NONCOHERENT */
 967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968static void r4k_flush_icache_all(void)
 969{
 970	if (cpu_has_vtag_icache)
 971		r4k_blast_icache();
 972}
 973
 974struct flush_kernel_vmap_range_args {
 975	unsigned long	vaddr;
 976	int		size;
 977};
 978
 979static inline void local_r4k_flush_kernel_vmap_range_index(void *args)
 980{
 981	/*
 982	 * Aliases only affect the primary caches so don't bother with
 983	 * S-caches or T-caches.
 984	 */
 985	r4k_blast_dcache();
 986}
 987
 988static inline void local_r4k_flush_kernel_vmap_range(void *args)
 989{
 990	struct flush_kernel_vmap_range_args *vmra = args;
 991	unsigned long vaddr = vmra->vaddr;
 992	int size = vmra->size;
 993
 994	/*
 995	 * Aliases only affect the primary caches so don't bother with
 996	 * S-caches or T-caches.
 997	 */
 998	R4600_HIT_CACHEOP_WAR_IMPL;
 999	blast_dcache_range(vaddr, vaddr + size);
 
 
 
 
1000}
1001
1002static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
1003{
1004	struct flush_kernel_vmap_range_args args;
1005
1006	args.vaddr = (unsigned long) vaddr;
1007	args.size = size;
1008
1009	if (size >= dcache_size)
1010		r4k_on_each_cpu(R4K_INDEX,
1011				local_r4k_flush_kernel_vmap_range_index, NULL);
1012	else
1013		r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range,
1014				&args);
1015}
1016
1017static inline void rm7k_erratum31(void)
1018{
1019	const unsigned long ic_lsize = 32;
1020	unsigned long addr;
1021
1022	/* RM7000 erratum #31. The icache is screwed at startup. */
1023	write_c0_taglo(0);
1024	write_c0_taghi(0);
1025
1026	for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
1027		__asm__ __volatile__ (
1028			".set push\n\t"
1029			".set noreorder\n\t"
1030			".set mips3\n\t"
1031			"cache\t%1, 0(%0)\n\t"
1032			"cache\t%1, 0x1000(%0)\n\t"
1033			"cache\t%1, 0x2000(%0)\n\t"
1034			"cache\t%1, 0x3000(%0)\n\t"
1035			"cache\t%2, 0(%0)\n\t"
1036			"cache\t%2, 0x1000(%0)\n\t"
1037			"cache\t%2, 0x2000(%0)\n\t"
1038			"cache\t%2, 0x3000(%0)\n\t"
1039			"cache\t%1, 0(%0)\n\t"
1040			"cache\t%1, 0x1000(%0)\n\t"
1041			"cache\t%1, 0x2000(%0)\n\t"
1042			"cache\t%1, 0x3000(%0)\n\t"
1043			".set pop\n"
1044			:
1045			: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill_I));
1046	}
1047}
1048
1049static inline int alias_74k_erratum(struct cpuinfo_mips *c)
1050{
1051	unsigned int imp = c->processor_id & PRID_IMP_MASK;
1052	unsigned int rev = c->processor_id & PRID_REV_MASK;
1053	int present = 0;
1054
1055	/*
1056	 * Early versions of the 74K do not update the cache tags on a
1057	 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
1058	 * aliases.  In this case it is better to treat the cache as always
1059	 * having aliases.  Also disable the synonym tag update feature
1060	 * where available.  In this case no opportunistic tag update will
1061	 * happen where a load causes a virtual address miss but a physical
1062	 * address hit during a D-cache look-up.
1063	 */
1064	switch (imp) {
1065	case PRID_IMP_74K:
1066		if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
1067			present = 1;
1068		if (rev == PRID_REV_ENCODE_332(2, 4, 0))
1069			write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
1070		break;
1071	case PRID_IMP_1074K:
1072		if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
1073			present = 1;
1074			write_c0_config6(read_c0_config6() | MTI_CONF6_SYND);
1075		}
1076		break;
1077	default:
1078		BUG();
1079	}
1080
1081	return present;
1082}
1083
1084static void b5k_instruction_hazard(void)
1085{
1086	__sync();
1087	__sync();
1088	__asm__ __volatile__(
1089	"       nop; nop; nop; nop; nop; nop; nop; nop\n"
1090	"       nop; nop; nop; nop; nop; nop; nop; nop\n"
1091	"       nop; nop; nop; nop; nop; nop; nop; nop\n"
1092	"       nop; nop; nop; nop; nop; nop; nop; nop\n"
1093	: : : "memory");
1094}
1095
1096static char *way_string[] = { NULL, "direct mapped", "2-way",
1097	"3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
1098	"9-way", "10-way", "11-way", "12-way",
1099	"13-way", "14-way", "15-way", "16-way",
1100};
1101
1102static void probe_pcache(void)
1103{
1104	struct cpuinfo_mips *c = &current_cpu_data;
1105	unsigned int config = read_c0_config();
1106	unsigned int prid = read_c0_prid();
1107	int has_74k_erratum = 0;
1108	unsigned long config1;
1109	unsigned int lsize;
1110
1111	switch (current_cpu_type()) {
1112	case CPU_R4600:			/* QED style two way caches? */
1113	case CPU_R4700:
1114	case CPU_R5000:
1115	case CPU_NEVADA:
1116		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1117		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1118		c->icache.ways = 2;
1119		c->icache.waybit = __ffs(icache_size/2);
1120
1121		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1122		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1123		c->dcache.ways = 2;
1124		c->dcache.waybit= __ffs(dcache_size/2);
1125
1126		c->options |= MIPS_CPU_CACHE_CDEX_P;
1127		break;
1128
 
1129	case CPU_R5500:
1130		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1131		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1132		c->icache.ways = 2;
1133		c->icache.waybit= 0;
1134
1135		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1136		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1137		c->dcache.ways = 2;
1138		c->dcache.waybit = 0;
1139
1140		c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
1141		break;
1142
1143	case CPU_TX49XX:
1144		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1145		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1146		c->icache.ways = 4;
1147		c->icache.waybit= 0;
1148
1149		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1150		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1151		c->dcache.ways = 4;
1152		c->dcache.waybit = 0;
1153
1154		c->options |= MIPS_CPU_CACHE_CDEX_P;
1155		c->options |= MIPS_CPU_PREFETCH;
1156		break;
1157
1158	case CPU_R4000PC:
1159	case CPU_R4000SC:
1160	case CPU_R4000MC:
1161	case CPU_R4400PC:
1162	case CPU_R4400SC:
1163	case CPU_R4400MC:
 
1164		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1165		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1166		c->icache.ways = 1;
1167		c->icache.waybit = 0;	/* doesn't matter */
1168
1169		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1170		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1171		c->dcache.ways = 1;
1172		c->dcache.waybit = 0;	/* does not matter */
1173
1174		c->options |= MIPS_CPU_CACHE_CDEX_P;
1175		break;
1176
1177	case CPU_R10000:
1178	case CPU_R12000:
1179	case CPU_R14000:
1180	case CPU_R16000:
1181		icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
1182		c->icache.linesz = 64;
1183		c->icache.ways = 2;
1184		c->icache.waybit = 0;
1185
1186		dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
1187		c->dcache.linesz = 32;
1188		c->dcache.ways = 2;
1189		c->dcache.waybit = 0;
1190
1191		c->options |= MIPS_CPU_PREFETCH;
1192		break;
1193
1194	case CPU_VR4133:
1195		write_c0_config(config & ~VR41_CONF_P4K);
1196		fallthrough;
1197	case CPU_VR4131:
1198		/* Workaround for cache instruction bug of VR4131 */
1199		if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
1200		    c->processor_id == 0x0c82U) {
1201			config |= 0x00400000U;
1202			if (c->processor_id == 0x0c80U)
1203				config |= VR41_CONF_BP;
1204			write_c0_config(config);
1205		} else
1206			c->options |= MIPS_CPU_CACHE_CDEX_P;
1207
1208		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1209		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1210		c->icache.ways = 2;
1211		c->icache.waybit = __ffs(icache_size/2);
1212
1213		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1214		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1215		c->dcache.ways = 2;
1216		c->dcache.waybit = __ffs(dcache_size/2);
1217		break;
1218
1219	case CPU_VR41XX:
1220	case CPU_VR4111:
1221	case CPU_VR4121:
1222	case CPU_VR4122:
1223	case CPU_VR4181:
1224	case CPU_VR4181A:
1225		icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
1226		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1227		c->icache.ways = 1;
1228		c->icache.waybit = 0;	/* doesn't matter */
1229
1230		dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
1231		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1232		c->dcache.ways = 1;
1233		c->dcache.waybit = 0;	/* does not matter */
1234
1235		c->options |= MIPS_CPU_CACHE_CDEX_P;
1236		break;
1237
1238	case CPU_RM7000:
1239		rm7k_erratum31();
1240
 
1241		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1242		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1243		c->icache.ways = 4;
1244		c->icache.waybit = __ffs(icache_size / c->icache.ways);
1245
1246		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1247		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1248		c->dcache.ways = 4;
1249		c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
1250
 
1251		c->options |= MIPS_CPU_CACHE_CDEX_P;
 
1252		c->options |= MIPS_CPU_PREFETCH;
1253		break;
1254
1255	case CPU_LOONGSON2EF:
1256		icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
1257		c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
1258		if (prid & 0x3)
1259			c->icache.ways = 4;
1260		else
1261			c->icache.ways = 2;
1262		c->icache.waybit = 0;
1263
1264		dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
1265		c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
1266		if (prid & 0x3)
1267			c->dcache.ways = 4;
1268		else
1269			c->dcache.ways = 2;
1270		c->dcache.waybit = 0;
1271		break;
1272
1273	case CPU_LOONGSON64:
1274		config1 = read_c0_config1();
1275		lsize = (config1 >> 19) & 7;
1276		if (lsize)
1277			c->icache.linesz = 2 << lsize;
1278		else
1279			c->icache.linesz = 0;
1280		c->icache.sets = 64 << ((config1 >> 22) & 7);
1281		c->icache.ways = 1 + ((config1 >> 16) & 7);
1282		icache_size = c->icache.sets *
1283					  c->icache.ways *
1284					  c->icache.linesz;
1285		c->icache.waybit = 0;
1286
1287		lsize = (config1 >> 10) & 7;
1288		if (lsize)
1289			c->dcache.linesz = 2 << lsize;
1290		else
1291			c->dcache.linesz = 0;
1292		c->dcache.sets = 64 << ((config1 >> 13) & 7);
1293		c->dcache.ways = 1 + ((config1 >> 7) & 7);
1294		dcache_size = c->dcache.sets *
1295					  c->dcache.ways *
1296					  c->dcache.linesz;
1297		c->dcache.waybit = 0;
1298		if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
1299				(PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
1300				(c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
1301			c->options |= MIPS_CPU_PREFETCH;
1302		break;
1303
1304	case CPU_CAVIUM_OCTEON3:
1305		/* For now lie about the number of ways. */
1306		c->icache.linesz = 128;
1307		c->icache.sets = 16;
1308		c->icache.ways = 8;
1309		c->icache.flags |= MIPS_CACHE_VTAG;
1310		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
1311
1312		c->dcache.linesz = 128;
1313		c->dcache.ways = 8;
1314		c->dcache.sets = 8;
1315		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
1316		c->options |= MIPS_CPU_PREFETCH;
1317		break;
1318
1319	default:
1320		if (!(config & MIPS_CONF_M))
1321			panic("Don't know how to probe P-caches on this cpu.");
1322
1323		/*
1324		 * So we seem to be a MIPS32 or MIPS64 CPU
1325		 * So let's probe the I-cache ...
1326		 */
1327		config1 = read_c0_config1();
1328
1329		lsize = (config1 >> 19) & 7;
1330
1331		/* IL == 7 is reserved */
1332		if (lsize == 7)
1333			panic("Invalid icache line size");
1334
1335		c->icache.linesz = lsize ? 2 << lsize : 0;
1336
1337		c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
1338		c->icache.ways = 1 + ((config1 >> 16) & 7);
1339
1340		icache_size = c->icache.sets *
1341			      c->icache.ways *
1342			      c->icache.linesz;
1343		c->icache.waybit = __ffs(icache_size/c->icache.ways);
1344
1345		if (config & MIPS_CONF_VI)
1346			c->icache.flags |= MIPS_CACHE_VTAG;
1347
1348		/*
1349		 * Now probe the MIPS32 / MIPS64 data cache.
1350		 */
1351		c->dcache.flags = 0;
1352
1353		lsize = (config1 >> 10) & 7;
1354
1355		/* DL == 7 is reserved */
1356		if (lsize == 7)
1357			panic("Invalid dcache line size");
1358
1359		c->dcache.linesz = lsize ? 2 << lsize : 0;
1360
1361		c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
1362		c->dcache.ways = 1 + ((config1 >> 7) & 7);
1363
1364		dcache_size = c->dcache.sets *
1365			      c->dcache.ways *
1366			      c->dcache.linesz;
1367		c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
1368
1369		c->options |= MIPS_CPU_PREFETCH;
1370		break;
1371	}
1372
1373	/*
1374	 * Processor configuration sanity check for the R4000SC erratum
1375	 * #5.	With page sizes larger than 32kB there is no possibility
1376	 * to get a VCE exception anymore so we don't care about this
1377	 * misconfiguration.  The case is rather theoretical anyway;
1378	 * presumably no vendor is shipping his hardware in the "bad"
1379	 * configuration.
1380	 */
1381	if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 &&
1382	    (prid & PRID_REV_MASK) < PRID_REV_R4400 &&
1383	    !(config & CONF_SC) && c->icache.linesz != 16 &&
1384	    PAGE_SIZE <= 0x8000)
1385		panic("Improper R4000SC processor configuration detected");
1386
1387	/* compute a couple of other cache variables */
1388	c->icache.waysize = icache_size / c->icache.ways;
1389	c->dcache.waysize = dcache_size / c->dcache.ways;
1390
1391	c->icache.sets = c->icache.linesz ?
1392		icache_size / (c->icache.linesz * c->icache.ways) : 0;
1393	c->dcache.sets = c->dcache.linesz ?
1394		dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
1395
1396	/*
1397	 * R1x000 P-caches are odd in a positive way.  They're 32kB 2-way
1398	 * virtually indexed so normally would suffer from aliases.  So
1399	 * normally they'd suffer from aliases but magic in the hardware deals
1400	 * with that for us so we don't need to take care ourselves.
1401	 */
1402	switch (current_cpu_type()) {
1403	case CPU_20KC:
1404	case CPU_25KF:
1405	case CPU_I6400:
1406	case CPU_I6500:
1407	case CPU_SB1:
1408	case CPU_SB1A:
1409	case CPU_XLR:
1410		c->dcache.flags |= MIPS_CACHE_PINDEX;
1411		break;
1412
1413	case CPU_R10000:
1414	case CPU_R12000:
1415	case CPU_R14000:
1416	case CPU_R16000:
1417		break;
1418
1419	case CPU_74K:
1420	case CPU_1074K:
1421		has_74k_erratum = alias_74k_erratum(c);
1422		fallthrough;
1423	case CPU_M14KC:
1424	case CPU_M14KEC:
1425	case CPU_24K:
1426	case CPU_34K:
 
1427	case CPU_1004K:
1428	case CPU_INTERAPTIV:
1429	case CPU_P5600:
1430	case CPU_PROAPTIV:
1431	case CPU_M5150:
1432	case CPU_QEMU_GENERIC:
1433	case CPU_P6600:
1434	case CPU_M6250:
1435		if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
1436		    (c->icache.waysize > PAGE_SIZE))
1437			c->icache.flags |= MIPS_CACHE_ALIASES;
1438		if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) {
1439			/*
1440			 * Effectively physically indexed dcache,
1441			 * thus no virtual aliases.
1442			*/
1443			c->dcache.flags |= MIPS_CACHE_PINDEX;
1444			break;
1445		}
1446		fallthrough;
1447	default:
1448		if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
1449			c->dcache.flags |= MIPS_CACHE_ALIASES;
1450	}
1451
1452	/* Physically indexed caches don't suffer from virtual aliasing */
1453	if (c->dcache.flags & MIPS_CACHE_PINDEX)
1454		c->dcache.flags &= ~MIPS_CACHE_ALIASES;
1455
1456	/*
1457	 * In systems with CM the icache fills from L2 or closer caches, and
1458	 * thus sees remote stores without needing to write them back any
1459	 * further than that.
1460	 */
1461	if (mips_cm_present())
1462		c->icache.flags |= MIPS_IC_SNOOPS_REMOTE;
1463
1464	switch (current_cpu_type()) {
1465	case CPU_20KC:
1466		/*
1467		 * Some older 20Kc chips doesn't have the 'VI' bit in
1468		 * the config register.
1469		 */
1470		c->icache.flags |= MIPS_CACHE_VTAG;
1471		break;
1472
1473	case CPU_ALCHEMY:
1474	case CPU_I6400:
1475	case CPU_I6500:
1476		c->icache.flags |= MIPS_CACHE_IC_F_DC;
1477		break;
 
1478
1479	case CPU_BMIPS5000:
1480		c->icache.flags |= MIPS_CACHE_IC_F_DC;
1481		/* Cache aliases are handled in hardware; allow HIGHMEM */
1482		c->dcache.flags &= ~MIPS_CACHE_ALIASES;
1483		break;
 
 
1484
1485	case CPU_LOONGSON2EF:
1486		/*
1487		 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1488		 * one op will act on all 4 ways
1489		 */
1490		c->icache.ways = 1;
1491	}
1492
1493	pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1494		icache_size >> 10,
1495		c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
1496		way_string[c->icache.ways], c->icache.linesz);
1497
1498	pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1499		dcache_size >> 10, way_string[c->dcache.ways],
1500		(c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1501		(c->dcache.flags & MIPS_CACHE_ALIASES) ?
1502			"cache aliases" : "no aliases",
1503		c->dcache.linesz);
1504}
1505
1506static void probe_vcache(void)
1507{
1508	struct cpuinfo_mips *c = &current_cpu_data;
1509	unsigned int config2, lsize;
1510
1511	if (current_cpu_type() != CPU_LOONGSON64)
1512		return;
1513
1514	config2 = read_c0_config2();
1515	if ((lsize = ((config2 >> 20) & 15)))
1516		c->vcache.linesz = 2 << lsize;
1517	else
1518		c->vcache.linesz = lsize;
1519
1520	c->vcache.sets = 64 << ((config2 >> 24) & 15);
1521	c->vcache.ways = 1 + ((config2 >> 16) & 15);
1522
1523	vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
1524
1525	c->vcache.waybit = 0;
1526	c->vcache.waysize = vcache_size / c->vcache.ways;
1527
1528	pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
1529		vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
1530}
1531
1532/*
1533 * If you even _breathe_ on this function, look at the gcc output and make sure
1534 * it does not pop things on and off the stack for the cache sizing loop that
1535 * executes in KSEG1 space or else you will crash and burn badly.  You have
1536 * been warned.
1537 */
1538static int probe_scache(void)
1539{
1540	unsigned long flags, addr, begin, end, pow2;
1541	unsigned int config = read_c0_config();
1542	struct cpuinfo_mips *c = &current_cpu_data;
1543
1544	if (config & CONF_SC)
1545		return 0;
1546
1547	begin = (unsigned long) &_stext;
1548	begin &= ~((4 * 1024 * 1024) - 1);
1549	end = begin + (4 * 1024 * 1024);
1550
1551	/*
1552	 * This is such a bitch, you'd think they would make it easy to do
1553	 * this.  Away you daemons of stupidity!
1554	 */
1555	local_irq_save(flags);
1556
1557	/* Fill each size-multiple cache line with a valid tag. */
1558	pow2 = (64 * 1024);
1559	for (addr = begin; addr < end; addr = (begin + pow2)) {
1560		unsigned long *p = (unsigned long *) addr;
1561		__asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1562		pow2 <<= 1;
1563	}
1564
1565	/* Load first line with zero (therefore invalid) tag. */
1566	write_c0_taglo(0);
1567	write_c0_taghi(0);
1568	__asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1569	cache_op(Index_Store_Tag_I, begin);
1570	cache_op(Index_Store_Tag_D, begin);
1571	cache_op(Index_Store_Tag_SD, begin);
1572
1573	/* Now search for the wrap around point. */
1574	pow2 = (128 * 1024);
1575	for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1576		cache_op(Index_Load_Tag_SD, addr);
1577		__asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1578		if (!read_c0_taglo())
1579			break;
1580		pow2 <<= 1;
1581	}
1582	local_irq_restore(flags);
1583	addr -= begin;
1584
1585	scache_size = addr;
1586	c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1587	c->scache.ways = 1;
1588	c->scache.waybit = 0;		/* does not matter */
1589
1590	return 1;
1591}
1592
 
1593static void __init loongson2_sc_init(void)
1594{
1595	struct cpuinfo_mips *c = &current_cpu_data;
1596
1597	scache_size = 512*1024;
1598	c->scache.linesz = 32;
1599	c->scache.ways = 4;
1600	c->scache.waybit = 0;
1601	c->scache.waysize = scache_size / (c->scache.ways);
1602	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1603	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1604	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1605
1606	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1607}
1608
1609static void __init loongson3_sc_init(void)
1610{
1611	struct cpuinfo_mips *c = &current_cpu_data;
1612	unsigned int config2, lsize;
1613
1614	config2 = read_c0_config2();
1615	lsize = (config2 >> 4) & 15;
1616	if (lsize)
1617		c->scache.linesz = 2 << lsize;
1618	else
1619		c->scache.linesz = 0;
1620	c->scache.sets = 64 << ((config2 >> 8) & 15);
1621	c->scache.ways = 1 + (config2 & 15);
1622
1623	scache_size = c->scache.sets *
1624				  c->scache.ways *
1625				  c->scache.linesz;
1626
1627	/* Loongson-3 has 4-Scache banks, while Loongson-2K have only 2 banks */
1628	if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
1629		scache_size *= 2;
1630	else
1631		scache_size *= 4;
1632
1633	c->scache.waybit = 0;
1634	c->scache.waysize = scache_size / c->scache.ways;
1635	pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1636	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1637	if (scache_size)
1638		c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1639	return;
1640}
1641
1642extern int r5k_sc_init(void);
1643extern int rm7k_sc_init(void);
1644extern int mips_sc_init(void);
1645
1646static void setup_scache(void)
1647{
1648	struct cpuinfo_mips *c = &current_cpu_data;
1649	unsigned int config = read_c0_config();
1650	int sc_present = 0;
1651
1652	/*
1653	 * Do the probing thing on R4000SC and R4400SC processors.  Other
1654	 * processors don't have a S-cache that would be relevant to the
1655	 * Linux memory management.
1656	 */
1657	switch (current_cpu_type()) {
1658	case CPU_R4000SC:
1659	case CPU_R4000MC:
1660	case CPU_R4400SC:
1661	case CPU_R4400MC:
1662		sc_present = run_uncached(probe_scache);
1663		if (sc_present)
1664			c->options |= MIPS_CPU_CACHE_CDEX_S;
1665		break;
1666
1667	case CPU_R10000:
1668	case CPU_R12000:
1669	case CPU_R14000:
1670	case CPU_R16000:
1671		scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1672		c->scache.linesz = 64 << ((config >> 13) & 1);
1673		c->scache.ways = 2;
1674		c->scache.waybit= 0;
1675		sc_present = 1;
1676		break;
1677
1678	case CPU_R5000:
1679	case CPU_NEVADA:
1680#ifdef CONFIG_R5000_CPU_SCACHE
1681		r5k_sc_init();
1682#endif
1683		return;
1684
1685	case CPU_RM7000:
 
1686#ifdef CONFIG_RM7000_CPU_SCACHE
1687		rm7k_sc_init();
1688#endif
1689		return;
1690
1691	case CPU_LOONGSON2EF:
 
1692		loongson2_sc_init();
1693		return;
1694
1695	case CPU_LOONGSON64:
1696		loongson3_sc_init();
1697		return;
1698
1699	case CPU_CAVIUM_OCTEON3:
1700	case CPU_XLP:
1701		/* don't need to worry about L2, fully coherent */
1702		return;
1703
1704	default:
1705		if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
1706				    MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
1707				    MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
1708				    MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
1709#ifdef CONFIG_MIPS_CPU_SCACHE
1710			if (mips_sc_init ()) {
1711				scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1712				printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1713				       scache_size >> 10,
1714				       way_string[c->scache.ways], c->scache.linesz);
1715
1716				if (current_cpu_type() == CPU_BMIPS5000)
1717					c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1718			}
1719
1720#else
1721			if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1722				panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1723#endif
1724			return;
1725		}
1726		sc_present = 0;
1727	}
1728
1729	if (!sc_present)
1730		return;
1731
1732	/* compute a couple of other cache variables */
1733	c->scache.waysize = scache_size / c->scache.ways;
1734
1735	c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1736
1737	printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1738	       scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1739
1740	c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1741}
1742
1743void au1x00_fixup_config_od(void)
1744{
1745	/*
1746	 * c0_config.od (bit 19) was write only (and read as 0)
1747	 * on the early revisions of Alchemy SOCs.  It disables the bus
1748	 * transaction overlapping and needs to be set to fix various errata.
1749	 */
1750	switch (read_c0_prid()) {
1751	case 0x00030100: /* Au1000 DA */
1752	case 0x00030201: /* Au1000 HA */
1753	case 0x00030202: /* Au1000 HB */
1754	case 0x01030200: /* Au1500 AB */
1755	/*
1756	 * Au1100 errata actually keeps silence about this bit, so we set it
1757	 * just in case for those revisions that require it to be set according
1758	 * to the (now gone) cpu table.
1759	 */
1760	case 0x02030200: /* Au1100 AB */
1761	case 0x02030201: /* Au1100 BA */
1762	case 0x02030202: /* Au1100 BC */
1763		set_c0_config(1 << 19);
1764		break;
1765	}
1766}
1767
1768/* CP0 hazard avoidance. */
1769#define NXP_BARRIER()							\
1770	 __asm__ __volatile__(						\
1771	".set noreorder\n\t"						\
1772	"nop; nop; nop; nop; nop; nop;\n\t"				\
1773	".set reorder\n\t")
1774
1775static void nxp_pr4450_fixup_config(void)
1776{
1777	unsigned long config0;
1778
1779	config0 = read_c0_config();
1780
1781	/* clear all three cache coherency fields */
1782	config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1783	config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
1784		    ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1785		    ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1786	write_c0_config(config0);
1787	NXP_BARRIER();
1788}
1789
1790static int cca = -1;
1791
1792static int __init cca_setup(char *str)
1793{
1794	get_option(&str, &cca);
1795
1796	return 0;
1797}
1798
1799early_param("cca", cca_setup);
1800
1801static void coherency_setup(void)
1802{
1803	if (cca < 0 || cca > 7)
1804		cca = read_c0_config() & CONF_CM_CMASK;
1805	_page_cachable_default = cca << _CACHE_SHIFT;
1806
1807	pr_debug("Using cache attribute %d\n", cca);
1808	change_c0_config(CONF_CM_CMASK, cca);
1809
1810	/*
1811	 * c0_status.cu=0 specifies that updates by the sc instruction use
1812	 * the coherency mode specified by the TLB; 1 means cachable
1813	 * coherent update on write will be used.  Not all processors have
1814	 * this bit and; some wire it to zero, others like Toshiba had the
1815	 * silly idea of putting something else there ...
1816	 */
1817	switch (current_cpu_type()) {
1818	case CPU_R4000PC:
1819	case CPU_R4000SC:
1820	case CPU_R4000MC:
1821	case CPU_R4400PC:
1822	case CPU_R4400SC:
1823	case CPU_R4400MC:
1824		clear_c0_config(CONF_CU);
1825		break;
1826	/*
1827	 * We need to catch the early Alchemy SOCs with
1828	 * the write-only co_config.od bit and set it back to one on:
1829	 * Au1000 rev DA, HA, HB;  Au1100 AB, BA, BC, Au1500 AB
1830	 */
1831	case CPU_ALCHEMY:
1832		au1x00_fixup_config_od();
1833		break;
1834
1835	case PRID_IMP_PR4450:
1836		nxp_pr4450_fixup_config();
1837		break;
1838	}
1839}
1840
1841static void r4k_cache_error_setup(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1842{
1843	extern char __weak except_vec2_generic;
1844	extern char __weak except_vec2_sb1;
 
1845
1846	switch (current_cpu_type()) {
1847	case CPU_SB1:
1848	case CPU_SB1A:
1849		set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1850		break;
1851
1852	default:
1853		set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1854		break;
1855	}
1856}
1857
1858void r4k_cache_init(void)
1859{
1860	extern void build_clear_page(void);
1861	extern void build_copy_page(void);
1862	struct cpuinfo_mips *c = &current_cpu_data;
1863
1864	probe_pcache();
1865	probe_vcache();
1866	setup_scache();
1867
1868	r4k_blast_dcache_page_setup();
1869	r4k_blast_dcache_page_indexed_setup();
1870	r4k_blast_dcache_setup();
1871	r4k_blast_icache_page_setup();
1872	r4k_blast_icache_page_indexed_setup();
1873	r4k_blast_icache_setup();
1874	r4k_blast_scache_page_setup();
1875	r4k_blast_scache_page_indexed_setup();
1876	r4k_blast_scache_setup();
1877	r4k_blast_scache_node_setup();
1878#ifdef CONFIG_EVA
1879	r4k_blast_dcache_user_page_setup();
1880	r4k_blast_icache_user_page_setup();
1881#endif
1882
1883	/*
1884	 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1885	 * This code supports virtually indexed processors and will be
1886	 * unnecessarily inefficient on physically indexed processors.
1887	 */
1888	if (c->dcache.linesz && cpu_has_dc_aliases)
1889		shm_align_mask = max_t( unsigned long,
1890					c->dcache.sets * c->dcache.linesz - 1,
1891					PAGE_SIZE - 1);
1892	else
1893		shm_align_mask = PAGE_SIZE-1;
1894
1895	__flush_cache_vmap	= r4k__flush_cache_vmap;
1896	__flush_cache_vunmap	= r4k__flush_cache_vunmap;
1897
1898	flush_cache_all		= cache_noop;
1899	__flush_cache_all	= r4k___flush_cache_all;
1900	flush_cache_mm		= r4k_flush_cache_mm;
1901	flush_cache_page	= r4k_flush_cache_page;
1902	flush_cache_range	= r4k_flush_cache_range;
1903
1904	__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
1905
 
1906	flush_icache_all	= r4k_flush_icache_all;
1907	local_flush_data_cache_page	= local_r4k_flush_data_cache_page;
1908	flush_data_cache_page	= r4k_flush_data_cache_page;
1909	flush_icache_range	= r4k_flush_icache_range;
1910	local_flush_icache_range	= local_r4k_flush_icache_range;
1911	__flush_icache_user_range	= r4k_flush_icache_user_range;
1912	__local_flush_icache_user_range	= local_r4k_flush_icache_user_range;
1913
1914#ifdef CONFIG_DMA_NONCOHERENT
1915#ifdef CONFIG_DMA_MAYBE_COHERENT
1916	if (coherentio == IO_COHERENCE_ENABLED ||
1917	    (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
1918		_dma_cache_wback_inv	= (void *)cache_noop;
1919		_dma_cache_wback	= (void *)cache_noop;
1920		_dma_cache_inv		= (void *)cache_noop;
1921	} else
1922#endif /* CONFIG_DMA_MAYBE_COHERENT */
1923	{
1924		_dma_cache_wback_inv	= r4k_dma_cache_wback_inv;
1925		_dma_cache_wback	= r4k_dma_cache_wback_inv;
1926		_dma_cache_inv		= r4k_dma_cache_inv;
1927	}
1928#endif /* CONFIG_DMA_NONCOHERENT */
1929
1930	build_clear_page();
1931	build_copy_page();
1932
1933	/*
1934	 * We want to run CMP kernels on core with and without coherent
1935	 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
1936	 * or not to flush caches.
1937	 */
1938	local_r4k___flush_cache_all(NULL);
1939
1940	coherency_setup();
1941	board_cache_error_setup = r4k_cache_error_setup;
1942
1943	/*
1944	 * Per-CPU overrides
1945	 */
1946	switch (current_cpu_type()) {
1947	case CPU_BMIPS4350:
1948	case CPU_BMIPS4380:
1949		/* No IPI is needed because all CPUs share the same D$ */
1950		flush_data_cache_page = r4k_blast_dcache_page;
1951		break;
1952	case CPU_BMIPS5000:
1953		/* We lose our superpowers if L2 is disabled */
1954		if (c->scache.flags & MIPS_CACHE_NOT_PRESENT)
1955			break;
1956
1957		/* I$ fills from D$ just by emptying the write buffers */
1958		flush_cache_page = (void *)b5k_instruction_hazard;
1959		flush_cache_range = (void *)b5k_instruction_hazard;
1960		local_flush_data_cache_page = (void *)b5k_instruction_hazard;
1961		flush_data_cache_page = (void *)b5k_instruction_hazard;
1962		flush_icache_range = (void *)b5k_instruction_hazard;
1963		local_flush_icache_range = (void *)b5k_instruction_hazard;
1964
1965
1966		/* Optimization: an L2 flush implicitly flushes the L1 */
1967		current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
1968		break;
1969	case CPU_LOONGSON64:
1970		/* Loongson-3 maintains cache coherency by hardware */
1971		__flush_cache_all	= cache_noop;
1972		__flush_cache_vmap	= cache_noop;
1973		__flush_cache_vunmap	= cache_noop;
1974		__flush_kernel_vmap_range = (void *)cache_noop;
1975		flush_cache_mm		= (void *)cache_noop;
1976		flush_cache_page	= (void *)cache_noop;
1977		flush_cache_range	= (void *)cache_noop;
1978		flush_icache_all	= (void *)cache_noop;
1979		flush_data_cache_page	= (void *)cache_noop;
1980		local_flush_data_cache_page	= (void *)cache_noop;
1981		break;
1982	}
1983}
1984
1985static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
1986			       void *v)
1987{
1988	switch (cmd) {
1989	case CPU_PM_ENTER_FAILED:
1990	case CPU_PM_EXIT:
1991		coherency_setup();
1992		break;
1993	}
1994
1995	return NOTIFY_OK;
1996}
1997
1998static struct notifier_block r4k_cache_pm_notifier_block = {
1999	.notifier_call = r4k_cache_pm_notifier,
2000};
2001
2002int __init r4k_cache_init_pm(void)
2003{
2004	return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
2005}
2006arch_initcall(r4k_cache_init_pm);