Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v3.1
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Synthesize TLB refill handlers at runtime.
   7 *
   8 * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
   9 * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
  10 * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
 
  12 *
  13 * ... and the days got worse and worse and now you see
  14 * I've gone completly out of my mind.
  15 *
  16 * They're coming to take me a away haha
  17 * they're coming to take me a away hoho hihi haha
  18 * to the funny farm where code is beautiful all the time ...
  19 *
  20 * (Condolences to Napoleon XIV)
  21 */
  22
  23#include <linux/bug.h>
  24#include <linux/kernel.h>
  25#include <linux/types.h>
  26#include <linux/smp.h>
  27#include <linux/string.h>
  28#include <linux/init.h>
  29#include <linux/cache.h>
  30
  31#include <asm/cacheflush.h>
 
  32#include <asm/pgtable.h>
  33#include <asm/war.h>
  34#include <asm/uasm.h>
 
 
 
 
 
 
 
 
 
 
 
 
  35
  36/*
  37 * TLB load/store/modify handlers.
  38 *
  39 * Only the fastpath gets synthesized at runtime, the slowpath for
  40 * do_page_fault remains normal asm.
  41 */
  42extern void tlb_do_page_fault_0(void);
  43extern void tlb_do_page_fault_1(void);
  44
  45struct work_registers {
  46	int r1;
  47	int r2;
  48	int r3;
  49};
  50
  51struct tlb_reg_save {
  52	unsigned long a;
  53	unsigned long b;
  54} ____cacheline_aligned_in_smp;
  55
  56static struct tlb_reg_save handler_reg_save[NR_CPUS];
  57
  58static inline int r45k_bvahwbug(void)
  59{
  60	/* XXX: We should probe for the presence of this bug, but we don't. */
  61	return 0;
  62}
  63
  64static inline int r4k_250MHZhwbug(void)
  65{
  66	/* XXX: We should probe for the presence of this bug, but we don't. */
  67	return 0;
  68}
  69
  70static inline int __maybe_unused bcm1250_m3_war(void)
  71{
  72	return BCM1250_M3_WAR;
  73}
  74
  75static inline int __maybe_unused r10000_llsc_war(void)
  76{
  77	return R10000_LLSC_WAR;
  78}
  79
  80static int use_bbit_insns(void)
  81{
  82	switch (current_cpu_type()) {
  83	case CPU_CAVIUM_OCTEON:
  84	case CPU_CAVIUM_OCTEON_PLUS:
  85	case CPU_CAVIUM_OCTEON2:
 
  86		return 1;
  87	default:
  88		return 0;
  89	}
  90}
  91
  92static int use_lwx_insns(void)
  93{
  94	switch (current_cpu_type()) {
  95	case CPU_CAVIUM_OCTEON2:
 
  96		return 1;
  97	default:
  98		return 0;
  99	}
 100}
 101#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
 102    CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
 103static bool scratchpad_available(void)
 104{
 105	return true;
 106}
 107static int scratchpad_offset(int i)
 108{
 109	/*
 110	 * CVMSEG starts at address -32768 and extends for
 111	 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
 112	 */
 113	i += 1; /* Kernel use starts at the top and works down. */
 114	return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
 115}
 116#else
 117static bool scratchpad_available(void)
 118{
 119	return false;
 120}
 121static int scratchpad_offset(int i)
 122{
 123	BUG();
 124	/* Really unreachable, but evidently some GCC want this. */
 125	return 0;
 126}
 127#endif
 128/*
 129 * Found by experiment: At least some revisions of the 4kc throw under
 130 * some circumstances a machine check exception, triggered by invalid
 131 * values in the index register.  Delaying the tlbp instruction until
 132 * after the next branch,  plus adding an additional nop in front of
 133 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
 134 * why; it's not an issue caused by the core RTL.
 135 *
 136 */
 137static int __cpuinit m4kc_tlbp_war(void)
 138{
 139	return (current_cpu_data.processor_id & 0xffff00) ==
 140	       (PRID_COMP_MIPS | PRID_IMP_4KC);
 141}
 142
 143/* Handle labels (which must be positive integers). */
 144enum label_id {
 145	label_second_part = 1,
 146	label_leave,
 147	label_vmalloc,
 148	label_vmalloc_done,
 149	label_tlbw_hazard,
 150	label_split,
 151	label_tlbl_goaround1,
 152	label_tlbl_goaround2,
 153	label_nopage_tlbl,
 154	label_nopage_tlbs,
 155	label_nopage_tlbm,
 156	label_smp_pgtable_change,
 157	label_r3000_write_probe_fail,
 158	label_large_segbits_fault,
 159#ifdef CONFIG_HUGETLB_PAGE
 160	label_tlb_huge_update,
 161#endif
 162};
 163
 164UASM_L_LA(_second_part)
 165UASM_L_LA(_leave)
 166UASM_L_LA(_vmalloc)
 167UASM_L_LA(_vmalloc_done)
 168UASM_L_LA(_tlbw_hazard)
 169UASM_L_LA(_split)
 170UASM_L_LA(_tlbl_goaround1)
 171UASM_L_LA(_tlbl_goaround2)
 172UASM_L_LA(_nopage_tlbl)
 173UASM_L_LA(_nopage_tlbs)
 174UASM_L_LA(_nopage_tlbm)
 175UASM_L_LA(_smp_pgtable_change)
 176UASM_L_LA(_r3000_write_probe_fail)
 177UASM_L_LA(_large_segbits_fault)
 178#ifdef CONFIG_HUGETLB_PAGE
 179UASM_L_LA(_tlb_huge_update)
 180#endif
 181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 182/*
 183 * For debug purposes.
 
 
 
 184 */
 185static inline void dump_handler(const u32 *handler, int count)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 186{
 187	int i;
 188
 
 
 189	pr_debug("\t.set push\n");
 190	pr_debug("\t.set noreorder\n");
 191
 192	for (i = 0; i < count; i++)
 193		pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
 194
 195	pr_debug("\t.set pop\n");
 
 
 196}
 197
 198/* The only general purpose registers allowed in TLB handlers. */
 199#define K0		26
 200#define K1		27
 201
 202/* Some CP0 registers */
 203#define C0_INDEX	0, 0
 204#define C0_ENTRYLO0	2, 0
 205#define C0_TCBIND	2, 2
 206#define C0_ENTRYLO1	3, 0
 207#define C0_CONTEXT	4, 0
 208#define C0_PAGEMASK	5, 0
 
 
 
 
 209#define C0_BADVADDR	8, 0
 
 210#define C0_ENTRYHI	10, 0
 211#define C0_EPC		14, 0
 212#define C0_XCONTEXT	20, 0
 213
 214#ifdef CONFIG_64BIT
 215# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
 216#else
 217# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
 218#endif
 219
 220/* The worst case length of the handler is around 18 instructions for
 221 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
 222 * Maximum space available is 32 instructions for R3000 and 64
 223 * instructions for R4000.
 224 *
 225 * We deliberately chose a buffer size of 128, so we won't scribble
 226 * over anything important on overflow before we panic.
 227 */
 228static u32 tlb_handler[128] __cpuinitdata;
 229
 230/* simply assume worst case size for labels and relocs */
 231static struct uasm_label labels[128] __cpuinitdata;
 232static struct uasm_reloc relocs[128] __cpuinitdata;
 233
 234#ifdef CONFIG_64BIT
 235static int check_for_high_segbits __cpuinitdata;
 236#endif
 237
 238static int check_for_high_segbits __cpuinitdata;
 239
 240static unsigned int kscratch_used_mask __cpuinitdata;
 
 
 
 
 
 
 
 
 
 241
 242static int __cpuinit allocate_kscratch(void)
 243{
 244	int r;
 245	unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
 246
 247	r = ffs(a);
 248
 249	if (r == 0)
 250		return -1;
 251
 252	r--; /* make it zero based */
 253
 254	kscratch_used_mask |= (1 << r);
 255
 256	return r;
 257}
 258
 259static int scratch_reg __cpuinitdata;
 260static int pgd_reg __cpuinitdata;
 261enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
 262
 263static struct work_registers __cpuinit build_get_work_registers(u32 **p)
 264{
 265	struct work_registers r;
 266
 267	int smp_processor_id_reg;
 268	int smp_processor_id_sel;
 269	int smp_processor_id_shift;
 270
 271	if (scratch_reg > 0) {
 272		/* Save in CPU local C0_KScratch? */
 273		UASM_i_MTC0(p, 1, 31, scratch_reg);
 274		r.r1 = K0;
 275		r.r2 = K1;
 276		r.r3 = 1;
 277		return r;
 278	}
 279
 280	if (num_possible_cpus() > 1) {
 281#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 282		smp_processor_id_shift = 51;
 283		smp_processor_id_reg = 20; /* XContext */
 284		smp_processor_id_sel = 0;
 285#else
 286# ifdef CONFIG_32BIT
 287		smp_processor_id_shift = 25;
 288		smp_processor_id_reg = 4; /* Context */
 289		smp_processor_id_sel = 0;
 290# endif
 291# ifdef CONFIG_64BIT
 292		smp_processor_id_shift = 26;
 293		smp_processor_id_reg = 4; /* Context */
 294		smp_processor_id_sel = 0;
 295# endif
 296#endif
 297		/* Get smp_processor_id */
 298		UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
 299		UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
 300
 301		/* handler_reg_save index in K0 */
 302		UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
 303
 304		UASM_i_LA(p, K1, (long)&handler_reg_save);
 305		UASM_i_ADDU(p, K0, K0, K1);
 306	} else {
 307		UASM_i_LA(p, K0, (long)&handler_reg_save);
 308	}
 309	/* K0 now points to save area, save $1 and $2  */
 310	UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
 311	UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
 312
 313	r.r1 = K1;
 314	r.r2 = 1;
 315	r.r3 = 2;
 316	return r;
 317}
 318
 319static void __cpuinit build_restore_work_registers(u32 **p)
 320{
 321	if (scratch_reg > 0) {
 322		UASM_i_MFC0(p, 1, 31, scratch_reg);
 323		return;
 324	}
 325	/* K0 already points to save area, restore $1 and $2  */
 326	UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
 327	UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
 328}
 329
 330#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 331
 332/*
 333 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
 334 * we cannot do r3000 under these circumstances.
 335 *
 336 * Declare pgd_current here instead of including mmu_context.h to avoid type
 337 * conflicts for tlbmiss_handler_setup_pgd
 338 */
 339extern unsigned long pgd_current[];
 340
 341/*
 342 * The R3000 TLB handler is simple.
 343 */
 344static void __cpuinit build_r3000_tlb_refill_handler(void)
 345{
 346	long pgdc = (long)pgd_current;
 347	u32 *p;
 348
 349	memset(tlb_handler, 0, sizeof(tlb_handler));
 350	p = tlb_handler;
 351
 352	uasm_i_mfc0(&p, K0, C0_BADVADDR);
 353	uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
 354	uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
 355	uasm_i_srl(&p, K0, K0, 22); /* load delay */
 356	uasm_i_sll(&p, K0, K0, 2);
 357	uasm_i_addu(&p, K1, K1, K0);
 358	uasm_i_mfc0(&p, K0, C0_CONTEXT);
 359	uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
 360	uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
 361	uasm_i_addu(&p, K1, K1, K0);
 362	uasm_i_lw(&p, K0, 0, K1);
 363	uasm_i_nop(&p); /* load delay */
 364	uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
 365	uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
 366	uasm_i_tlbwr(&p); /* cp0 delay */
 367	uasm_i_jr(&p, K1);
 368	uasm_i_rfe(&p); /* branch delay */
 369
 370	if (p > tlb_handler + 32)
 371		panic("TLB refill handler space exceeded");
 372
 373	pr_debug("Wrote TLB refill handler (%u instructions).\n",
 374		 (unsigned int)(p - tlb_handler));
 375
 376	memcpy((void *)ebase, tlb_handler, 0x80);
 
 377
 378	dump_handler((u32 *)ebase, 32);
 379}
 380#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
 381
 382/*
 383 * The R4000 TLB handler is much more complicated. We have two
 384 * consecutive handler areas with 32 instructions space each.
 385 * Since they aren't used at the same time, we can overflow in the
 386 * other one.To keep things simple, we first assume linear space,
 387 * then we relocate it to the final handler layout as needed.
 388 */
 389static u32 final_handler[64] __cpuinitdata;
 390
 391/*
 392 * Hazards
 393 *
 394 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
 395 * 2. A timing hazard exists for the TLBP instruction.
 396 *
 397 *      stalling_instruction
 398 *      TLBP
 399 *
 400 * The JTLB is being read for the TLBP throughout the stall generated by the
 401 * previous instruction. This is not really correct as the stalling instruction
 402 * can modify the address used to access the JTLB.  The failure symptom is that
 403 * the TLBP instruction will use an address created for the stalling instruction
 404 * and not the address held in C0_ENHI and thus report the wrong results.
 405 *
 406 * The software work-around is to not allow the instruction preceding the TLBP
 407 * to stall - make it an NOP or some other instruction guaranteed not to stall.
 408 *
 409 * Errata 2 will not be fixed.  This errata is also on the R5000.
 410 *
 411 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
 412 */
 413static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
 414{
 415	switch (current_cpu_type()) {
 416	/* Found by experiment: R4600 v2.0/R4700 needs this, too.  */
 417	case CPU_R4600:
 418	case CPU_R4700:
 419	case CPU_R5000:
 420	case CPU_R5000A:
 421	case CPU_NEVADA:
 422		uasm_i_nop(p);
 423		uasm_i_tlbp(p);
 424		break;
 425
 426	default:
 427		uasm_i_tlbp(p);
 428		break;
 429	}
 430}
 431
 432/*
 433 * Write random or indexed TLB entry, and care about the hazards from
 434 * the preceding mtc0 and for the following eret.
 435 */
 436enum tlb_write_entry { tlb_random, tlb_indexed };
 437
 438static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
 439					 struct uasm_reloc **r,
 440					 enum tlb_write_entry wmode)
 441{
 442	void(*tlbw)(u32 **) = NULL;
 443
 444	switch (wmode) {
 445	case tlb_random: tlbw = uasm_i_tlbwr; break;
 446	case tlb_indexed: tlbw = uasm_i_tlbwi; break;
 447	}
 448
 449	if (cpu_has_mips_r2) {
 450		if (cpu_has_mips_r2_exec_hazard)
 451			uasm_i_ehb(p);
 452		tlbw(p);
 453		return;
 454	}
 455
 456	switch (current_cpu_type()) {
 457	case CPU_R4000PC:
 458	case CPU_R4000SC:
 459	case CPU_R4000MC:
 460	case CPU_R4400PC:
 461	case CPU_R4400SC:
 462	case CPU_R4400MC:
 463		/*
 464		 * This branch uses up a mtc0 hazard nop slot and saves
 465		 * two nops after the tlbw instruction.
 466		 */
 467		uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
 468		tlbw(p);
 469		uasm_l_tlbw_hazard(l, *p);
 
 470		uasm_i_nop(p);
 471		break;
 472
 473	case CPU_R4600:
 474	case CPU_R4700:
 475	case CPU_R5000:
 476	case CPU_R5000A:
 477		uasm_i_nop(p);
 478		tlbw(p);
 479		uasm_i_nop(p);
 480		break;
 481
 
 
 
 
 
 
 
 482	case CPU_R4300:
 483	case CPU_5KC:
 484	case CPU_TX49XX:
 485	case CPU_PR4450:
 486	case CPU_XLR:
 487		uasm_i_nop(p);
 488		tlbw(p);
 489		break;
 490
 491	case CPU_R10000:
 492	case CPU_R12000:
 493	case CPU_R14000:
 
 494	case CPU_4KC:
 495	case CPU_4KEC:
 
 
 496	case CPU_SB1:
 497	case CPU_SB1A:
 498	case CPU_4KSC:
 499	case CPU_20KC:
 500	case CPU_25KF:
 501	case CPU_BMIPS32:
 502	case CPU_BMIPS3300:
 503	case CPU_BMIPS4350:
 504	case CPU_BMIPS4380:
 505	case CPU_BMIPS5000:
 506	case CPU_LOONGSON2:
 
 507	case CPU_R5500:
 508		if (m4kc_tlbp_war())
 509			uasm_i_nop(p);
 510	case CPU_ALCHEMY:
 511		tlbw(p);
 512		break;
 513
 514	case CPU_NEVADA:
 515		uasm_i_nop(p); /* QED specifies 2 nops hazard */
 516		/*
 517		 * This branch uses up a mtc0 hazard nop slot and saves
 518		 * a nop after the tlbw instruction.
 519		 */
 520		uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
 521		tlbw(p);
 522		uasm_l_tlbw_hazard(l, *p);
 523		break;
 524
 525	case CPU_RM7000:
 526		uasm_i_nop(p);
 527		uasm_i_nop(p);
 528		uasm_i_nop(p);
 529		uasm_i_nop(p);
 530		tlbw(p);
 531		break;
 532
 533	case CPU_RM9000:
 534		/*
 535		 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
 536		 * use of the JTLB for instructions should not occur for 4
 537		 * cpu cycles and use for data translations should not occur
 538		 * for 3 cpu cycles.
 539		 */
 540		uasm_i_ssnop(p);
 541		uasm_i_ssnop(p);
 542		uasm_i_ssnop(p);
 543		uasm_i_ssnop(p);
 544		tlbw(p);
 545		uasm_i_ssnop(p);
 546		uasm_i_ssnop(p);
 547		uasm_i_ssnop(p);
 548		uasm_i_ssnop(p);
 549		break;
 550
 551	case CPU_VR4111:
 552	case CPU_VR4121:
 553	case CPU_VR4122:
 554	case CPU_VR4181:
 555	case CPU_VR4181A:
 556		uasm_i_nop(p);
 557		uasm_i_nop(p);
 558		tlbw(p);
 559		uasm_i_nop(p);
 560		uasm_i_nop(p);
 561		break;
 562
 563	case CPU_VR4131:
 564	case CPU_VR4133:
 565	case CPU_R5432:
 566		uasm_i_nop(p);
 567		uasm_i_nop(p);
 568		tlbw(p);
 569		break;
 570
 571	case CPU_JZRISC:
 572		tlbw(p);
 573		uasm_i_nop(p);
 574		break;
 575
 576	default:
 577		panic("No TLB refill handler yet (CPU type: %d)",
 578		      current_cpu_data.cputype);
 579		break;
 580	}
 581}
 582
 583static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
 584								  unsigned int reg)
 585{
 586	if (kernel_uses_smartmips_rixi) {
 587		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
 588		UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
 
 
 
 
 
 
 
 
 
 
 589	} else {
 590#ifdef CONFIG_64BIT_PHYS_ADDR
 591		uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
 592#else
 593		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
 594#endif
 595	}
 596}
 597
 598#ifdef CONFIG_HUGETLB_PAGE
 599
 600static __cpuinit void build_restore_pagemask(u32 **p,
 601					     struct uasm_reloc **r,
 602					     unsigned int tmp,
 603					     enum label_id lid,
 604					     int restore_scratch)
 605{
 606	if (restore_scratch) {
 607		/* Reset default page size */
 608		if (PM_DEFAULT_MASK >> 16) {
 609			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
 610			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
 611			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 612			uasm_il_b(p, r, lid);
 613		} else if (PM_DEFAULT_MASK) {
 614			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
 615			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 616			uasm_il_b(p, r, lid);
 617		} else {
 618			uasm_i_mtc0(p, 0, C0_PAGEMASK);
 619			uasm_il_b(p, r, lid);
 620		}
 621		if (scratch_reg > 0)
 622			UASM_i_MFC0(p, 1, 31, scratch_reg);
 623		else
 624			UASM_i_LW(p, 1, scratchpad_offset(0), 0);
 625	} else {
 626		/* Reset default page size */
 627		if (PM_DEFAULT_MASK >> 16) {
 628			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
 629			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
 630			uasm_il_b(p, r, lid);
 631			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 632		} else if (PM_DEFAULT_MASK) {
 633			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
 634			uasm_il_b(p, r, lid);
 635			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 636		} else {
 637			uasm_il_b(p, r, lid);
 638			uasm_i_mtc0(p, 0, C0_PAGEMASK);
 639		}
 640	}
 641}
 642
 643static __cpuinit void build_huge_tlb_write_entry(u32 **p,
 644						 struct uasm_label **l,
 645						 struct uasm_reloc **r,
 646						 unsigned int tmp,
 647						 enum tlb_write_entry wmode,
 648						 int restore_scratch)
 649{
 650	/* Set huge page tlb entry size */
 651	uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
 652	uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
 653	uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 654
 655	build_tlb_write_entry(p, l, r, wmode);
 656
 657	build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
 658}
 659
 660/*
 661 * Check if Huge PTE is present, if so then jump to LABEL.
 662 */
 663static void __cpuinit
 664build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
 665		unsigned int pmd, int lid)
 666{
 667	UASM_i_LW(p, tmp, 0, pmd);
 668	if (use_bbit_insns()) {
 669		uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
 670	} else {
 671		uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
 672		uasm_il_bnez(p, r, tmp, lid);
 673	}
 674}
 675
 676static __cpuinit void build_huge_update_entries(u32 **p,
 677						unsigned int pte,
 678						unsigned int tmp)
 679{
 680	int small_sequence;
 681
 682	/*
 683	 * A huge PTE describes an area the size of the
 684	 * configured huge page size. This is twice the
 685	 * of the large TLB entry size we intend to use.
 686	 * A TLB entry half the size of the configured
 687	 * huge page size is configured into entrylo0
 688	 * and entrylo1 to cover the contiguous huge PTE
 689	 * address space.
 690	 */
 691	small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
 692
 693	/* We can clobber tmp.  It isn't used after this.*/
 694	if (!small_sequence)
 695		uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
 696
 697	build_convert_pte_to_entrylo(p, pte);
 698	UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
 699	/* convert to entrylo1 */
 700	if (small_sequence)
 701		UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
 702	else
 703		UASM_i_ADDU(p, pte, pte, tmp);
 704
 705	UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
 706}
 707
 708static __cpuinit void build_huge_handler_tail(u32 **p,
 709					      struct uasm_reloc **r,
 710					      struct uasm_label **l,
 711					      unsigned int pte,
 712					      unsigned int ptr)
 713{
 714#ifdef CONFIG_SMP
 715	UASM_i_SC(p, pte, 0, ptr);
 716	uasm_il_beqz(p, r, pte, label_tlb_huge_update);
 717	UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
 718#else
 719	UASM_i_SW(p, pte, 0, ptr);
 720#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721	build_huge_update_entries(p, pte, ptr);
 722	build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
 723}
 724#endif /* CONFIG_HUGETLB_PAGE */
 725
 726#ifdef CONFIG_64BIT
 727/*
 728 * TMP and PTR are scratch.
 729 * TMP will be clobbered, PTR will hold the pmd entry.
 730 */
 731static void __cpuinit
 732build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 733		 unsigned int tmp, unsigned int ptr)
 734{
 735#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 736	long pgdc = (long)pgd_current;
 737#endif
 738	/*
 739	 * The vmalloc handling is not in the hotpath.
 740	 */
 741	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
 742
 743	if (check_for_high_segbits) {
 744		/*
 745		 * The kernel currently implicitely assumes that the
 746		 * MIPS SEGBITS parameter for the processor is
 747		 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
 748		 * allocate virtual addresses outside the maximum
 749		 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
 750		 * that doesn't prevent user code from accessing the
 751		 * higher xuseg addresses.  Here, we make sure that
 752		 * everything but the lower xuseg addresses goes down
 753		 * the module_alloc/vmalloc path.
 754		 */
 755		uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
 756		uasm_il_bnez(p, r, ptr, label_vmalloc);
 757	} else {
 758		uasm_il_bltz(p, r, tmp, label_vmalloc);
 759	}
 760	/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
 761
 762#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 763	if (pgd_reg != -1) {
 764		/* pgd is in pgd_reg */
 765		UASM_i_MFC0(p, ptr, 31, pgd_reg);
 
 
 
 766	} else {
 
 767		/*
 768		 * &pgd << 11 stored in CONTEXT [23..63].
 769		 */
 770		UASM_i_MFC0(p, ptr, C0_CONTEXT);
 771
 772		/* Clear lower 23 bits of context. */
 773		uasm_i_dins(p, ptr, 0, 0, 23);
 774
 775		/* 1 0  1 0 1  << 6  xkphys cached */
 776		uasm_i_ori(p, ptr, ptr, 0x540);
 777		uasm_i_drotr(p, ptr, ptr, 11);
 778	}
 779#elif defined(CONFIG_SMP)
 780# ifdef  CONFIG_MIPS_MT_SMTC
 781	/*
 782	 * SMTC uses TCBind value as "CPU" index
 783	 */
 784	uasm_i_mfc0(p, ptr, C0_TCBIND);
 785	uasm_i_dsrl_safe(p, ptr, ptr, 19);
 786# else
 787	/*
 788	 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
 789	 * stored in CONTEXT.
 790	 */
 791	uasm_i_dmfc0(p, ptr, C0_CONTEXT);
 792	uasm_i_dsrl_safe(p, ptr, ptr, 23);
 793# endif
 794	UASM_i_LA_mostly(p, tmp, pgdc);
 795	uasm_i_daddu(p, ptr, ptr, tmp);
 796	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
 797	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 798#else
 799	UASM_i_LA_mostly(p, ptr, pgdc);
 800	uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 801#endif
 
 802
 803	uasm_l_vmalloc_done(l, *p);
 804
 805	/* get pgd offset in bytes */
 806	uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
 807
 808	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
 809	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
 810#ifndef __PAGETABLE_PMD_FOLDED
 811	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 812	uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
 813	uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
 814	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
 815	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
 816#endif
 817}
 818
 819/*
 820 * BVADDR is the faulting address, PTR is scratch.
 821 * PTR will hold the pgd for vmalloc.
 822 */
 823static void __cpuinit
 824build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 825			unsigned int bvaddr, unsigned int ptr,
 826			enum vmalloc64_mode mode)
 827{
 828	long swpd = (long)swapper_pg_dir;
 829	int single_insn_swpd;
 830	int did_vmalloc_branch = 0;
 831
 832	single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
 833
 834	uasm_l_vmalloc(l, *p);
 835
 836	if (mode != not_refill && check_for_high_segbits) {
 837		if (single_insn_swpd) {
 838			uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
 839			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
 840			did_vmalloc_branch = 1;
 841			/* fall through */
 842		} else {
 843			uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
 844		}
 845	}
 846	if (!did_vmalloc_branch) {
 847		if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
 848			uasm_il_b(p, r, label_vmalloc_done);
 849			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
 850		} else {
 851			UASM_i_LA_mostly(p, ptr, swpd);
 852			uasm_il_b(p, r, label_vmalloc_done);
 853			if (uasm_in_compat_space_p(swpd))
 854				uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
 855			else
 856				uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
 857		}
 858	}
 859	if (mode != not_refill && check_for_high_segbits) {
 860		uasm_l_large_segbits_fault(l, *p);
 861		/*
 862		 * We get here if we are an xsseg address, or if we are
 863		 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
 864		 *
 865		 * Ignoring xsseg (assume disabled so would generate
 866		 * (address errors?), the only remaining possibility
 867		 * is the upper xuseg addresses.  On processors with
 868		 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
 869		 * addresses would have taken an address error. We try
 870		 * to mimic that here by taking a load/istream page
 871		 * fault.
 872		 */
 873		UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
 874		uasm_i_jr(p, ptr);
 875
 876		if (mode == refill_scratch) {
 877			if (scratch_reg > 0)
 878				UASM_i_MFC0(p, 1, 31, scratch_reg);
 879			else
 880				UASM_i_LW(p, 1, scratchpad_offset(0), 0);
 881		} else {
 882			uasm_i_nop(p);
 883		}
 884	}
 885}
 886
 887#else /* !CONFIG_64BIT */
 888
 889/*
 890 * TMP and PTR are scratch.
 891 * TMP will be clobbered, PTR will hold the pgd entry.
 892 */
 893static void __cpuinit __maybe_unused
 894build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 895{
 896	long pgdc = (long)pgd_current;
 
 
 
 
 
 897
 898	/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
 899#ifdef CONFIG_SMP
 900#ifdef  CONFIG_MIPS_MT_SMTC
 901	/*
 902	 * SMTC uses TCBind value as "CPU" index
 903	 */
 904	uasm_i_mfc0(p, ptr, C0_TCBIND);
 905	UASM_i_LA_mostly(p, tmp, pgdc);
 906	uasm_i_srl(p, ptr, ptr, 19);
 907#else
 908	/*
 909	 * smp_processor_id() << 3 is stored in CONTEXT.
 910         */
 911	uasm_i_mfc0(p, ptr, C0_CONTEXT);
 912	UASM_i_LA_mostly(p, tmp, pgdc);
 913	uasm_i_srl(p, ptr, ptr, 23);
 914#endif
 915	uasm_i_addu(p, ptr, tmp, ptr);
 916#else
 917	UASM_i_LA_mostly(p, ptr, pgdc);
 918#endif
 919	uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 920	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
 921	uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
 922	uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
 923	uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
 924}
 925
 926#endif /* !CONFIG_64BIT */
 927
 928static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
 929{
 930	unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
 931	unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
 932
 933	switch (current_cpu_type()) {
 934	case CPU_VR41XX:
 935	case CPU_VR4111:
 936	case CPU_VR4121:
 937	case CPU_VR4122:
 938	case CPU_VR4131:
 939	case CPU_VR4181:
 940	case CPU_VR4181A:
 941	case CPU_VR4133:
 942		shift += 2;
 943		break;
 944
 945	default:
 946		break;
 947	}
 948
 949	if (shift)
 950		UASM_i_SRL(p, ctx, ctx, shift);
 951	uasm_i_andi(p, ctx, ctx, mask);
 952}
 953
 954static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
 955{
 956	/*
 957	 * Bug workaround for the Nevada. It seems as if under certain
 958	 * circumstances the move from cp0_context might produce a
 959	 * bogus result when the mfc0 instruction and its consumer are
 960	 * in a different cacheline or a load instruction, probably any
 961	 * memory reference, is between them.
 962	 */
 963	switch (current_cpu_type()) {
 964	case CPU_NEVADA:
 965		UASM_i_LW(p, ptr, 0, ptr);
 966		GET_CONTEXT(p, tmp); /* get context reg */
 967		break;
 968
 969	default:
 970		GET_CONTEXT(p, tmp); /* get context reg */
 971		UASM_i_LW(p, ptr, 0, ptr);
 972		break;
 973	}
 974
 975	build_adjust_context(p, tmp);
 976	UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
 977}
 978
 979static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
 980					unsigned int ptep)
 981{
 982	/*
 983	 * 64bit address support (36bit on a 32bit CPU) in a 32bit
 984	 * Kernel is a special case. Only a few CPUs use it.
 985	 */
 986#ifdef CONFIG_64BIT_PHYS_ADDR
 987	if (cpu_has_64bits) {
 988		uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
 989		uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
 990		if (kernel_uses_smartmips_rixi) {
 991			UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
 992			UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
 993			UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
 994			UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
 995			UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
 996		} else {
 997			uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
 998			UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
 999			uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
1000		}
1001		UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1002	} else {
1003		int pte_off_even = sizeof(pte_t) / 2;
1004		int pte_off_odd = pte_off_even + sizeof(pte_t);
1005
1006		/* The pte entries are pre-shifted */
1007		uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
1008		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1009		uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
1010		UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
 
 
 
 
 
1011	}
1012#else
1013	UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
1014	UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
1015	if (r45k_bvahwbug())
1016		build_tlb_probe_entry(p);
1017	if (kernel_uses_smartmips_rixi) {
1018		UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
1019		UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
1020		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
1021		if (r4k_250MHZhwbug())
1022			UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1023		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1024		UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
1025	} else {
1026		UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
1027		if (r4k_250MHZhwbug())
1028			UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1029		UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1030		UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
1031		if (r45k_bvahwbug())
1032			uasm_i_mfc0(p, tmp, C0_INDEX);
1033	}
1034	if (r4k_250MHZhwbug())
1035		UASM_i_MTC0(p, 0, C0_ENTRYLO1);
1036	UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1037#endif
1038}
1039
1040struct mips_huge_tlb_info {
1041	int huge_pte;
1042	int restore_scratch;
 
1043};
1044
1045static struct mips_huge_tlb_info __cpuinit
1046build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1047			       struct uasm_reloc **r, unsigned int tmp,
1048			       unsigned int ptr, int c0_scratch)
1049{
1050	struct mips_huge_tlb_info rv;
1051	unsigned int even, odd;
1052	int vmalloc_branch_delay_filled = 0;
1053	const int scratch = 1; /* Our extra working register */
1054
1055	rv.huge_pte = scratch;
1056	rv.restore_scratch = 0;
 
1057
1058	if (check_for_high_segbits) {
1059		UASM_i_MFC0(p, tmp, C0_BADVADDR);
1060
1061		if (pgd_reg != -1)
1062			UASM_i_MFC0(p, ptr, 31, pgd_reg);
1063		else
1064			UASM_i_MFC0(p, ptr, C0_CONTEXT);
1065
1066		if (c0_scratch >= 0)
1067			UASM_i_MTC0(p, scratch, 31, c0_scratch);
1068		else
1069			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1070
1071		uasm_i_dsrl_safe(p, scratch, tmp,
1072				 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1073		uasm_il_bnez(p, r, scratch, label_vmalloc);
1074
1075		if (pgd_reg == -1) {
1076			vmalloc_branch_delay_filled = 1;
1077			/* Clear lower 23 bits of context. */
1078			uasm_i_dins(p, ptr, 0, 0, 23);
1079		}
1080	} else {
1081		if (pgd_reg != -1)
1082			UASM_i_MFC0(p, ptr, 31, pgd_reg);
1083		else
1084			UASM_i_MFC0(p, ptr, C0_CONTEXT);
1085
1086		UASM_i_MFC0(p, tmp, C0_BADVADDR);
1087
1088		if (c0_scratch >= 0)
1089			UASM_i_MTC0(p, scratch, 31, c0_scratch);
1090		else
1091			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1092
1093		if (pgd_reg == -1)
1094			/* Clear lower 23 bits of context. */
1095			uasm_i_dins(p, ptr, 0, 0, 23);
1096
1097		uasm_il_bltz(p, r, tmp, label_vmalloc);
1098	}
1099
1100	if (pgd_reg == -1) {
1101		vmalloc_branch_delay_filled = 1;
1102		/* 1 0  1 0 1  << 6  xkphys cached */
1103		uasm_i_ori(p, ptr, ptr, 0x540);
1104		uasm_i_drotr(p, ptr, ptr, 11);
1105	}
1106
1107#ifdef __PAGETABLE_PMD_FOLDED
1108#define LOC_PTEP scratch
1109#else
1110#define LOC_PTEP ptr
1111#endif
1112
1113	if (!vmalloc_branch_delay_filled)
1114		/* get pgd offset in bytes */
1115		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1116
1117	uasm_l_vmalloc_done(l, *p);
1118
1119	/*
1120	 *                         tmp          ptr
1121	 * fall-through case =   badvaddr  *pgd_current
1122	 * vmalloc case      =   badvaddr  swapper_pg_dir
1123	 */
1124
1125	if (vmalloc_branch_delay_filled)
1126		/* get pgd offset in bytes */
1127		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1128
1129#ifdef __PAGETABLE_PMD_FOLDED
1130	GET_CONTEXT(p, tmp); /* get context reg */
1131#endif
1132	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1133
1134	if (use_lwx_insns()) {
1135		UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1136	} else {
1137		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1138		uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1139	}
1140
1141#ifndef __PAGETABLE_PMD_FOLDED
1142	/* get pmd offset in bytes */
1143	uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1144	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1145	GET_CONTEXT(p, tmp); /* get context reg */
1146
1147	if (use_lwx_insns()) {
1148		UASM_i_LWX(p, scratch, scratch, ptr);
1149	} else {
1150		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1151		UASM_i_LW(p, scratch, 0, ptr);
1152	}
1153#endif
1154	/* Adjust the context during the load latency. */
1155	build_adjust_context(p, tmp);
1156
1157#ifdef CONFIG_HUGETLB_PAGE
1158	uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1159	/*
1160	 * The in the LWX case we don't want to do the load in the
1161	 * delay slot.  It cannot issue in the same cycle and may be
1162	 * speculative and unneeded.
1163	 */
1164	if (use_lwx_insns())
1165		uasm_i_nop(p);
1166#endif /* CONFIG_HUGETLB_PAGE */
1167
1168
1169	/* build_update_entries */
1170	if (use_lwx_insns()) {
1171		even = ptr;
1172		odd = tmp;
1173		UASM_i_LWX(p, even, scratch, tmp);
1174		UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1175		UASM_i_LWX(p, odd, scratch, tmp);
1176	} else {
1177		UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1178		even = tmp;
1179		odd = ptr;
1180		UASM_i_LW(p, even, 0, ptr); /* get even pte */
1181		UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1182	}
1183	if (kernel_uses_smartmips_rixi) {
1184		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC));
1185		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC));
1186		uasm_i_drotr(p, even, even,
1187			     ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
1188		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1189		uasm_i_drotr(p, odd, odd,
1190			     ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
1191	} else {
1192		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1193		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1194		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1195	}
1196	UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1197
1198	if (c0_scratch >= 0) {
1199		UASM_i_MFC0(p, scratch, 31, c0_scratch);
1200		build_tlb_write_entry(p, l, r, tlb_random);
1201		uasm_l_leave(l, *p);
1202		rv.restore_scratch = 1;
1203	} else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  {
1204		build_tlb_write_entry(p, l, r, tlb_random);
1205		uasm_l_leave(l, *p);
1206		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1207	} else {
1208		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1209		build_tlb_write_entry(p, l, r, tlb_random);
1210		uasm_l_leave(l, *p);
1211		rv.restore_scratch = 1;
1212	}
1213
1214	uasm_i_eret(p); /* return from trap */
1215
1216	return rv;
1217}
1218
1219/*
1220 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1221 * because EXL == 0.  If we wrap, we can also use the 32 instruction
1222 * slots before the XTLB refill exception handler which belong to the
1223 * unused TLB refill exception.
1224 */
1225#define MIPS64_REFILL_INSNS 32
1226
1227static void __cpuinit build_r4000_tlb_refill_handler(void)
1228{
1229	u32 *p = tlb_handler;
1230	struct uasm_label *l = labels;
1231	struct uasm_reloc *r = relocs;
1232	u32 *f;
1233	unsigned int final_len;
1234	struct mips_huge_tlb_info htlb_info __maybe_unused;
1235	enum vmalloc64_mode vmalloc_mode __maybe_unused;
1236
1237	memset(tlb_handler, 0, sizeof(tlb_handler));
1238	memset(labels, 0, sizeof(labels));
1239	memset(relocs, 0, sizeof(relocs));
1240	memset(final_handler, 0, sizeof(final_handler));
1241
1242	if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
1243		htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1244							  scratch_reg);
1245		vmalloc_mode = refill_scratch;
1246	} else {
1247		htlb_info.huge_pte = K0;
1248		htlb_info.restore_scratch = 0;
 
1249		vmalloc_mode = refill_noscratch;
1250		/*
1251		 * create the plain linear handler
1252		 */
1253		if (bcm1250_m3_war()) {
1254			unsigned int segbits = 44;
1255
1256			uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1257			uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1258			uasm_i_xor(&p, K0, K0, K1);
1259			uasm_i_dsrl_safe(&p, K1, K0, 62);
1260			uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1261			uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1262			uasm_i_or(&p, K0, K0, K1);
1263			uasm_il_bnez(&p, &r, K0, label_leave);
1264			/* No need for uasm_i_nop */
1265		}
1266
1267#ifdef CONFIG_64BIT
1268		build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
1269#else
1270		build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
1271#endif
1272
1273#ifdef CONFIG_HUGETLB_PAGE
1274		build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
1275#endif
1276
1277		build_get_ptep(&p, K0, K1);
1278		build_update_entries(&p, K0, K1);
1279		build_tlb_write_entry(&p, &l, &r, tlb_random);
1280		uasm_l_leave(&l, p);
1281		uasm_i_eret(&p); /* return from trap */
1282	}
1283#ifdef CONFIG_HUGETLB_PAGE
1284	uasm_l_tlb_huge_update(&l, p);
 
 
1285	build_huge_update_entries(&p, htlb_info.huge_pte, K1);
1286	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
1287				   htlb_info.restore_scratch);
1288#endif
1289
1290#ifdef CONFIG_64BIT
1291	build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
1292#endif
1293
1294	/*
1295	 * Overflow check: For the 64bit handler, we need at least one
1296	 * free instruction slot for the wrap-around branch. In worst
1297	 * case, if the intended insertion point is a delay slot, we
1298	 * need three, with the second nop'ed and the third being
1299	 * unused.
1300	 */
1301	/* Loongson2 ebase is different than r4k, we have more space */
1302#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
1303	if ((p - tlb_handler) > 64)
1304		panic("TLB refill handler space exceeded");
1305#else
1306	if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1307	    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1308		&& uasm_insn_has_bdelay(relocs,
1309					tlb_handler + MIPS64_REFILL_INSNS - 3)))
1310		panic("TLB refill handler space exceeded");
1311#endif
1312
1313	/*
1314	 * Now fold the handler in the TLB refill handler space.
1315	 */
1316#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
1317	f = final_handler;
1318	/* Simplest case, just copy the handler. */
1319	uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1320	final_len = p - tlb_handler;
1321#else /* CONFIG_64BIT */
1322	f = final_handler + MIPS64_REFILL_INSNS;
1323	if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1324		/* Just copy the handler. */
1325		uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1326		final_len = p - tlb_handler;
1327	} else {
1328#if defined(CONFIG_HUGETLB_PAGE)
1329		const enum label_id ls = label_tlb_huge_update;
1330#else
1331		const enum label_id ls = label_vmalloc;
1332#endif
1333		u32 *split;
1334		int ov = 0;
1335		int i;
1336
1337		for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1338			;
1339		BUG_ON(i == ARRAY_SIZE(labels));
1340		split = labels[i].addr;
1341
1342		/*
1343		 * See if we have overflown one way or the other.
1344		 */
1345		if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1346		    split < p - MIPS64_REFILL_INSNS)
1347			ov = 1;
1348
1349		if (ov) {
1350			/*
1351			 * Split two instructions before the end.  One
1352			 * for the branch and one for the instruction
1353			 * in the delay slot.
1354			 */
1355			split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1356
 
 
 
 
 
 
 
 
 
1357			/*
1358			 * If the branch would fall in a delay slot,
1359			 * we must back up an additional instruction
1360			 * so that it is no longer in a delay slot.
1361			 */
1362			if (uasm_insn_has_bdelay(relocs, split - 1))
1363				split--;
1364		}
1365		/* Copy first part of the handler. */
1366		uasm_copy_handler(relocs, labels, tlb_handler, split, f);
1367		f += split - tlb_handler;
1368
1369		if (ov) {
1370			/* Insert branch. */
1371			uasm_l_split(&l, final_handler);
1372			uasm_il_b(&f, &r, label_split);
1373			if (uasm_insn_has_bdelay(relocs, split))
1374				uasm_i_nop(&f);
1375			else {
1376				uasm_copy_handler(relocs, labels,
1377						  split, split + 1, f);
1378				uasm_move_labels(labels, f, f + 1, -1);
1379				f++;
1380				split++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1381			}
1382		}
1383
1384		/* Copy the rest of the handler. */
1385		uasm_copy_handler(relocs, labels, split, p, final_handler);
1386		final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1387			    (p - split);
1388	}
1389#endif /* CONFIG_64BIT */
1390
1391	uasm_resolve_relocs(relocs, labels);
1392	pr_debug("Wrote TLB refill handler (%u instructions).\n",
1393		 final_len);
1394
1395	memcpy((void *)ebase, final_handler, 0x100);
 
1396
1397	dump_handler((u32 *)ebase, 64);
1398}
1399
1400/*
1401 * 128 instructions for the fastpath handler is generous and should
1402 * never be exceeded.
1403 */
1404#define FASTPATH_SIZE 128
 
 
 
 
 
1405
1406u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
1407u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
1408u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
1409#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1410u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
 
 
 
 
 
 
1411
1412static void __cpuinit build_r4000_setup_pgd(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1413{
1414	const int a0 = 4;
1415	const int a1 = 5;
1416	u32 *p = tlbmiss_handler_setup_pgd;
1417	struct uasm_label *l = labels;
1418	struct uasm_reloc *r = relocs;
1419
1420	memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
1421	memset(labels, 0, sizeof(labels));
1422	memset(relocs, 0, sizeof(relocs));
 
1423
1424	pgd_reg = allocate_kscratch();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1425
 
 
 
 
 
 
1426	if (pgd_reg == -1) {
 
 
 
1427		/* PGD << 11 in c0_Context */
1428		/*
1429		 * If it is a ckseg0 address, convert to a physical
1430		 * address.  Shifting right by 29 and adding 4 will
1431		 * result in zero for these addresses.
1432		 *
1433		 */
1434		UASM_i_SRA(&p, a1, a0, 29);
1435		UASM_i_ADDIU(&p, a1, a1, 4);
1436		uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1437		uasm_i_nop(&p);
1438		uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1439		uasm_l_tlbl_goaround1(&l, p);
1440		UASM_i_SLL(&p, a0, a0, 11);
1441		uasm_i_jr(&p, 31);
1442		UASM_i_MTC0(&p, a0, C0_CONTEXT);
1443	} else {
1444		/* PGD in c0_KScratch */
1445		uasm_i_jr(&p, 31);
1446		UASM_i_MTC0(&p, a0, 31, pgd_reg);
 
 
 
1447	}
1448	if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1449		panic("tlbmiss_handler_setup_pgd space exceeded");
 
1450	uasm_resolve_relocs(relocs, labels);
1451	pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1452		 (unsigned int)(p - tlbmiss_handler_setup_pgd));
1453
1454	dump_handler(tlbmiss_handler_setup_pgd,
1455		     ARRAY_SIZE(tlbmiss_handler_setup_pgd));
1456}
1457#endif
1458
1459static void __cpuinit
1460iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1461{
1462#ifdef CONFIG_SMP
1463# ifdef CONFIG_64BIT_PHYS_ADDR
1464	if (cpu_has_64bits)
1465		uasm_i_lld(p, pte, 0, ptr);
1466	else
1467# endif
1468		UASM_i_LL(p, pte, 0, ptr);
1469#else
1470# ifdef CONFIG_64BIT_PHYS_ADDR
1471	if (cpu_has_64bits)
1472		uasm_i_ld(p, pte, 0, ptr);
1473	else
1474# endif
1475		UASM_i_LW(p, pte, 0, ptr);
1476#endif
1477}
1478
1479static void __cpuinit
1480iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1481	unsigned int mode)
1482{
1483#ifdef CONFIG_64BIT_PHYS_ADDR
1484	unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1485#endif
 
 
 
 
 
 
 
 
1486
1487	uasm_i_ori(p, pte, pte, mode);
1488#ifdef CONFIG_SMP
1489# ifdef CONFIG_64BIT_PHYS_ADDR
1490	if (cpu_has_64bits)
1491		uasm_i_scd(p, pte, 0, ptr);
1492	else
1493# endif
1494		UASM_i_SC(p, pte, 0, ptr);
1495
1496	if (r10000_llsc_war())
1497		uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
1498	else
1499		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1500
1501# ifdef CONFIG_64BIT_PHYS_ADDR
1502	if (!cpu_has_64bits) {
1503		/* no uasm_i_nop needed */
1504		uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1505		uasm_i_ori(p, pte, pte, hwmode);
 
1506		uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1507		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1508		/* no uasm_i_nop needed */
1509		uasm_i_lw(p, pte, 0, ptr);
1510	} else
1511		uasm_i_nop(p);
1512# else
1513	uasm_i_nop(p);
1514# endif
1515#else
1516# ifdef CONFIG_64BIT_PHYS_ADDR
1517	if (cpu_has_64bits)
1518		uasm_i_sd(p, pte, 0, ptr);
1519	else
1520# endif
1521		UASM_i_SW(p, pte, 0, ptr);
1522
1523# ifdef CONFIG_64BIT_PHYS_ADDR
1524	if (!cpu_has_64bits) {
1525		uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1526		uasm_i_ori(p, pte, pte, hwmode);
 
1527		uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1528		uasm_i_lw(p, pte, 0, ptr);
1529	}
1530# endif
1531#endif
1532}
1533
1534/*
1535 * Check if PTE is present, if not then jump to LABEL. PTR points to
1536 * the page table where this PTE is located, PTE will be re-loaded
1537 * with it's original value.
1538 */
1539static void __cpuinit
1540build_pte_present(u32 **p, struct uasm_reloc **r,
1541		  int pte, int ptr, int scratch, enum label_id lid)
1542{
1543	int t = scratch >= 0 ? scratch : pte;
 
1544
1545	if (kernel_uses_smartmips_rixi) {
1546		if (use_bbit_insns()) {
1547			uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1548			uasm_i_nop(p);
1549		} else {
1550			uasm_i_andi(p, t, pte, _PAGE_PRESENT);
 
 
 
 
1551			uasm_il_beqz(p, r, t, lid);
1552			if (pte == t)
1553				/* You lose the SMP race :-(*/
1554				iPTE_LW(p, pte, ptr);
1555		}
1556	} else {
1557		uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
1558		uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
 
 
 
 
 
1559		uasm_il_bnez(p, r, t, lid);
1560		if (pte == t)
1561			/* You lose the SMP race :-(*/
1562			iPTE_LW(p, pte, ptr);
1563	}
1564}
1565
1566/* Make PTE valid, store result in PTR. */
1567static void __cpuinit
1568build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1569		 unsigned int ptr)
1570{
1571	unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1572
1573	iPTE_SW(p, r, pte, ptr, mode);
1574}
1575
1576/*
1577 * Check if PTE can be written to, if not branch to LABEL. Regardless
1578 * restore PTE with value from PTR when done.
1579 */
1580static void __cpuinit
1581build_pte_writable(u32 **p, struct uasm_reloc **r,
1582		   unsigned int pte, unsigned int ptr, int scratch,
1583		   enum label_id lid)
1584{
1585	int t = scratch >= 0 ? scratch : pte;
 
1586
1587	uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
1588	uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
 
 
 
 
 
 
1589	uasm_il_bnez(p, r, t, lid);
1590	if (pte == t)
1591		/* You lose the SMP race :-(*/
1592		iPTE_LW(p, pte, ptr);
1593	else
1594		uasm_i_nop(p);
1595}
1596
1597/* Make PTE writable, update software status bits as well, then store
1598 * at PTR.
1599 */
1600static void __cpuinit
1601build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1602		 unsigned int ptr)
1603{
1604	unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1605			     | _PAGE_DIRTY);
1606
1607	iPTE_SW(p, r, pte, ptr, mode);
1608}
1609
1610/*
1611 * Check if PTE can be modified, if not branch to LABEL. Regardless
1612 * restore PTE with value from PTR when done.
1613 */
1614static void __cpuinit
1615build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1616		     unsigned int pte, unsigned int ptr, int scratch,
1617		     enum label_id lid)
1618{
1619	if (use_bbit_insns()) {
1620		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1621		uasm_i_nop(p);
1622	} else {
1623		int t = scratch >= 0 ? scratch : pte;
1624		uasm_i_andi(p, t, pte, _PAGE_WRITE);
 
1625		uasm_il_beqz(p, r, t, lid);
1626		if (pte == t)
1627			/* You lose the SMP race :-(*/
1628			iPTE_LW(p, pte, ptr);
1629	}
1630}
1631
1632#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1633
1634
1635/*
1636 * R3000 style TLB load/store/modify handlers.
1637 */
1638
1639/*
1640 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1641 * Then it returns.
1642 */
1643static void __cpuinit
1644build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1645{
1646	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1647	uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1648	uasm_i_tlbwi(p);
1649	uasm_i_jr(p, tmp);
1650	uasm_i_rfe(p); /* branch delay */
1651}
1652
1653/*
1654 * This places the pte into ENTRYLO0 and writes it with tlbwi
1655 * or tlbwr as appropriate.  This is because the index register
1656 * may have the probe fail bit set as a result of a trap on a
1657 * kseg2 access, i.e. without refill.  Then it returns.
1658 */
1659static void __cpuinit
1660build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1661			     struct uasm_reloc **r, unsigned int pte,
1662			     unsigned int tmp)
1663{
1664	uasm_i_mfc0(p, tmp, C0_INDEX);
1665	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1666	uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1667	uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1668	uasm_i_tlbwi(p); /* cp0 delay */
1669	uasm_i_jr(p, tmp);
1670	uasm_i_rfe(p); /* branch delay */
1671	uasm_l_r3000_write_probe_fail(l, *p);
1672	uasm_i_tlbwr(p); /* cp0 delay */
1673	uasm_i_jr(p, tmp);
1674	uasm_i_rfe(p); /* branch delay */
1675}
1676
1677static void __cpuinit
1678build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1679				   unsigned int ptr)
1680{
1681	long pgdc = (long)pgd_current;
1682
1683	uasm_i_mfc0(p, pte, C0_BADVADDR);
1684	uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1685	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1686	uasm_i_srl(p, pte, pte, 22); /* load delay */
1687	uasm_i_sll(p, pte, pte, 2);
1688	uasm_i_addu(p, ptr, ptr, pte);
1689	uasm_i_mfc0(p, pte, C0_CONTEXT);
1690	uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1691	uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1692	uasm_i_addu(p, ptr, ptr, pte);
1693	uasm_i_lw(p, pte, 0, ptr);
1694	uasm_i_tlbp(p); /* load delay */
1695}
1696
1697static void __cpuinit build_r3000_tlb_load_handler(void)
1698{
1699	u32 *p = handle_tlbl;
 
1700	struct uasm_label *l = labels;
1701	struct uasm_reloc *r = relocs;
1702
1703	memset(handle_tlbl, 0, sizeof(handle_tlbl));
1704	memset(labels, 0, sizeof(labels));
1705	memset(relocs, 0, sizeof(relocs));
1706
1707	build_r3000_tlbchange_handler_head(&p, K0, K1);
1708	build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
1709	uasm_i_nop(&p); /* load delay */
1710	build_make_valid(&p, &r, K0, K1);
1711	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1712
1713	uasm_l_nopage_tlbl(&l, p);
1714	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1715	uasm_i_nop(&p);
1716
1717	if ((p - handle_tlbl) > FASTPATH_SIZE)
1718		panic("TLB load handler fastpath space exceeded");
1719
1720	uasm_resolve_relocs(relocs, labels);
1721	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1722		 (unsigned int)(p - handle_tlbl));
1723
1724	dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
1725}
1726
1727static void __cpuinit build_r3000_tlb_store_handler(void)
1728{
1729	u32 *p = handle_tlbs;
 
1730	struct uasm_label *l = labels;
1731	struct uasm_reloc *r = relocs;
1732
1733	memset(handle_tlbs, 0, sizeof(handle_tlbs));
1734	memset(labels, 0, sizeof(labels));
1735	memset(relocs, 0, sizeof(relocs));
1736
1737	build_r3000_tlbchange_handler_head(&p, K0, K1);
1738	build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
1739	uasm_i_nop(&p); /* load delay */
1740	build_make_write(&p, &r, K0, K1);
1741	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1742
1743	uasm_l_nopage_tlbs(&l, p);
1744	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1745	uasm_i_nop(&p);
1746
1747	if ((p - handle_tlbs) > FASTPATH_SIZE)
1748		panic("TLB store handler fastpath space exceeded");
1749
1750	uasm_resolve_relocs(relocs, labels);
1751	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1752		 (unsigned int)(p - handle_tlbs));
1753
1754	dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
1755}
1756
1757static void __cpuinit build_r3000_tlb_modify_handler(void)
1758{
1759	u32 *p = handle_tlbm;
 
1760	struct uasm_label *l = labels;
1761	struct uasm_reloc *r = relocs;
1762
1763	memset(handle_tlbm, 0, sizeof(handle_tlbm));
1764	memset(labels, 0, sizeof(labels));
1765	memset(relocs, 0, sizeof(relocs));
1766
1767	build_r3000_tlbchange_handler_head(&p, K0, K1);
1768	build_pte_modifiable(&p, &r, K0, K1,  -1, label_nopage_tlbm);
1769	uasm_i_nop(&p); /* load delay */
1770	build_make_write(&p, &r, K0, K1);
1771	build_r3000_pte_reload_tlbwi(&p, K0, K1);
1772
1773	uasm_l_nopage_tlbm(&l, p);
1774	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1775	uasm_i_nop(&p);
1776
1777	if ((p - handle_tlbm) > FASTPATH_SIZE)
1778		panic("TLB modify handler fastpath space exceeded");
1779
1780	uasm_resolve_relocs(relocs, labels);
1781	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1782		 (unsigned int)(p - handle_tlbm));
1783
1784	dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
1785}
1786#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1787
1788/*
1789 * R4000 style TLB load/store/modify handlers.
1790 */
1791static struct work_registers __cpuinit
1792build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1793				   struct uasm_reloc **r)
1794{
1795	struct work_registers wr = build_get_work_registers(p);
1796
1797#ifdef CONFIG_64BIT
1798	build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
1799#else
1800	build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
1801#endif
1802
1803#ifdef CONFIG_HUGETLB_PAGE
1804	/*
1805	 * For huge tlb entries, pmd doesn't contain an address but
1806	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1807	 * see if we need to jump to huge tlb processing.
1808	 */
1809	build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
1810#endif
1811
1812	UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
1813	UASM_i_LW(p, wr.r2, 0, wr.r2);
1814	UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
1815	uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
1816	UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
1817
1818#ifdef CONFIG_SMP
1819	uasm_l_smp_pgtable_change(l, *p);
1820#endif
1821	iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
1822	if (!m4kc_tlbp_war())
1823		build_tlb_probe_entry(p);
 
 
 
 
 
 
 
 
1824	return wr;
1825}
1826
1827static void __cpuinit
1828build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1829				   struct uasm_reloc **r, unsigned int tmp,
1830				   unsigned int ptr)
1831{
1832	uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
1833	uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
1834	build_update_entries(p, tmp, ptr);
1835	build_tlb_write_entry(p, l, r, tlb_indexed);
1836	uasm_l_leave(l, *p);
1837	build_restore_work_registers(p);
1838	uasm_i_eret(p); /* return from trap */
1839
1840#ifdef CONFIG_64BIT
1841	build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
1842#endif
1843}
1844
1845static void __cpuinit build_r4000_tlb_load_handler(void)
1846{
1847	u32 *p = handle_tlbl;
 
1848	struct uasm_label *l = labels;
1849	struct uasm_reloc *r = relocs;
1850	struct work_registers wr;
1851
1852	memset(handle_tlbl, 0, sizeof(handle_tlbl));
1853	memset(labels, 0, sizeof(labels));
1854	memset(relocs, 0, sizeof(relocs));
1855
1856	if (bcm1250_m3_war()) {
1857		unsigned int segbits = 44;
1858
1859		uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1860		uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1861		uasm_i_xor(&p, K0, K0, K1);
1862		uasm_i_dsrl_safe(&p, K1, K0, 62);
1863		uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1864		uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1865		uasm_i_or(&p, K0, K0, K1);
1866		uasm_il_bnez(&p, &r, K0, label_leave);
1867		/* No need for uasm_i_nop */
1868	}
1869
1870	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1871	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1872	if (m4kc_tlbp_war())
1873		build_tlb_probe_entry(&p);
1874
1875	if (kernel_uses_smartmips_rixi) {
1876		/*
1877		 * If the page is not _PAGE_VALID, RI or XI could not
1878		 * have triggered it.  Skip the expensive test..
1879		 */
1880		if (use_bbit_insns()) {
1881			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1882				      label_tlbl_goaround1);
1883		} else {
1884			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1885			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
1886		}
1887		uasm_i_nop(&p);
1888
1889		uasm_i_tlbr(&p);
 
 
 
 
 
 
 
 
 
 
 
 
 
1890		/* Examine  entrylo 0 or 1 based on ptr. */
1891		if (use_bbit_insns()) {
1892			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1893		} else {
1894			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
1895			uasm_i_beqz(&p, wr.r3, 8);
1896		}
1897		/* load it in the delay slot*/
1898		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
1899		/* load it if ptr is odd */
1900		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
1901		/*
1902		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
1903		 * XI must have triggered it.
1904		 */
1905		if (use_bbit_insns()) {
1906			uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
1907			uasm_i_nop(&p);
1908			uasm_l_tlbl_goaround1(&l, p);
1909		} else {
1910			uasm_i_andi(&p, wr.r3, wr.r3, 2);
1911			uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
1912			uasm_i_nop(&p);
1913		}
1914		uasm_l_tlbl_goaround1(&l, p);
1915	}
1916	build_make_valid(&p, &r, wr.r1, wr.r2);
1917	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
1918
1919#ifdef CONFIG_HUGETLB_PAGE
1920	/*
1921	 * This is the entry point when build_r4000_tlbchange_handler_head
1922	 * spots a huge page.
1923	 */
1924	uasm_l_tlb_huge_update(&l, p);
1925	iPTE_LW(&p, wr.r1, wr.r2);
1926	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1927	build_tlb_probe_entry(&p);
1928
1929	if (kernel_uses_smartmips_rixi) {
1930		/*
1931		 * If the page is not _PAGE_VALID, RI or XI could not
1932		 * have triggered it.  Skip the expensive test..
1933		 */
1934		if (use_bbit_insns()) {
1935			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1936				      label_tlbl_goaround2);
1937		} else {
1938			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1939			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
1940		}
1941		uasm_i_nop(&p);
1942
1943		uasm_i_tlbr(&p);
 
 
 
 
 
 
 
 
 
 
 
 
 
1944		/* Examine  entrylo 0 or 1 based on ptr. */
1945		if (use_bbit_insns()) {
1946			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1947		} else {
1948			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
1949			uasm_i_beqz(&p, wr.r3, 8);
1950		}
1951		/* load it in the delay slot*/
1952		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
1953		/* load it if ptr is odd */
1954		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
1955		/*
1956		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
1957		 * XI must have triggered it.
1958		 */
1959		if (use_bbit_insns()) {
1960			uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
1961		} else {
1962			uasm_i_andi(&p, wr.r3, wr.r3, 2);
1963			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
1964		}
1965		if (PM_DEFAULT_MASK == 0)
1966			uasm_i_nop(&p);
1967		/*
1968		 * We clobbered C0_PAGEMASK, restore it.  On the other branch
1969		 * it is restored in build_huge_tlb_write_entry.
1970		 */
1971		build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
1972
1973		uasm_l_tlbl_goaround2(&l, p);
1974	}
1975	uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
1976	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
1977#endif
1978
1979	uasm_l_nopage_tlbl(&l, p);
1980	build_restore_work_registers(&p);
 
 
 
 
 
 
 
1981	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1982	uasm_i_nop(&p);
1983
1984	if ((p - handle_tlbl) > FASTPATH_SIZE)
1985		panic("TLB load handler fastpath space exceeded");
1986
1987	uasm_resolve_relocs(relocs, labels);
1988	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1989		 (unsigned int)(p - handle_tlbl));
1990
1991	dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
1992}
1993
1994static void __cpuinit build_r4000_tlb_store_handler(void)
1995{
1996	u32 *p = handle_tlbs;
 
1997	struct uasm_label *l = labels;
1998	struct uasm_reloc *r = relocs;
1999	struct work_registers wr;
2000
2001	memset(handle_tlbs, 0, sizeof(handle_tlbs));
2002	memset(labels, 0, sizeof(labels));
2003	memset(relocs, 0, sizeof(relocs));
2004
2005	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2006	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2007	if (m4kc_tlbp_war())
2008		build_tlb_probe_entry(&p);
2009	build_make_write(&p, &r, wr.r1, wr.r2);
2010	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2011
2012#ifdef CONFIG_HUGETLB_PAGE
2013	/*
2014	 * This is the entry point when
2015	 * build_r4000_tlbchange_handler_head spots a huge page.
2016	 */
2017	uasm_l_tlb_huge_update(&l, p);
2018	iPTE_LW(&p, wr.r1, wr.r2);
2019	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2020	build_tlb_probe_entry(&p);
2021	uasm_i_ori(&p, wr.r1, wr.r1,
2022		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2023	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
2024#endif
2025
2026	uasm_l_nopage_tlbs(&l, p);
2027	build_restore_work_registers(&p);
 
 
 
 
 
 
 
2028	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2029	uasm_i_nop(&p);
2030
2031	if ((p - handle_tlbs) > FASTPATH_SIZE)
2032		panic("TLB store handler fastpath space exceeded");
2033
2034	uasm_resolve_relocs(relocs, labels);
2035	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2036		 (unsigned int)(p - handle_tlbs));
2037
2038	dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
2039}
2040
2041static void __cpuinit build_r4000_tlb_modify_handler(void)
2042{
2043	u32 *p = handle_tlbm;
 
2044	struct uasm_label *l = labels;
2045	struct uasm_reloc *r = relocs;
2046	struct work_registers wr;
2047
2048	memset(handle_tlbm, 0, sizeof(handle_tlbm));
2049	memset(labels, 0, sizeof(labels));
2050	memset(relocs, 0, sizeof(relocs));
2051
2052	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2053	build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2054	if (m4kc_tlbp_war())
2055		build_tlb_probe_entry(&p);
2056	/* Present and writable bits set, set accessed and dirty bits. */
2057	build_make_write(&p, &r, wr.r1, wr.r2);
2058	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2059
2060#ifdef CONFIG_HUGETLB_PAGE
2061	/*
2062	 * This is the entry point when
2063	 * build_r4000_tlbchange_handler_head spots a huge page.
2064	 */
2065	uasm_l_tlb_huge_update(&l, p);
2066	iPTE_LW(&p, wr.r1, wr.r2);
2067	build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm);
2068	build_tlb_probe_entry(&p);
2069	uasm_i_ori(&p, wr.r1, wr.r1,
2070		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2071	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
2072#endif
2073
2074	uasm_l_nopage_tlbm(&l, p);
2075	build_restore_work_registers(&p);
 
 
 
 
 
 
 
2076	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2077	uasm_i_nop(&p);
2078
2079	if ((p - handle_tlbm) > FASTPATH_SIZE)
2080		panic("TLB modify handler fastpath space exceeded");
2081
2082	uasm_resolve_relocs(relocs, labels);
2083	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2084		 (unsigned int)(p - handle_tlbm));
2085
2086	dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2087}
2088
2089void __cpuinit build_tlb_refill_handler(void)
2090{
2091	/*
2092	 * The refill handler is generated per-CPU, multi-node systems
2093	 * may have local storage for it. The other handlers are only
2094	 * needed once.
2095	 */
2096	static int run_once = 0;
2097
 
 
 
 
 
 
2098#ifdef CONFIG_64BIT
2099	check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
2100#endif
2101
2102	switch (current_cpu_type()) {
2103	case CPU_R2000:
2104	case CPU_R3000:
2105	case CPU_R3000A:
2106	case CPU_R3081E:
2107	case CPU_TX3912:
2108	case CPU_TX3922:
2109	case CPU_TX3927:
2110#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2111		build_r3000_tlb_refill_handler();
 
2112		if (!run_once) {
 
 
 
2113			build_r3000_tlb_load_handler();
2114			build_r3000_tlb_store_handler();
2115			build_r3000_tlb_modify_handler();
 
2116			run_once++;
2117		}
2118#else
2119		panic("No R3000 TLB refill handler");
2120#endif
2121		break;
2122
2123	case CPU_R6000:
2124	case CPU_R6000A:
2125		panic("No R6000 TLB refill handler yet");
2126		break;
2127
2128	case CPU_R8000:
2129		panic("No R8000 TLB refill handler yet");
2130		break;
2131
2132	default:
 
 
 
2133		if (!run_once) {
2134			scratch_reg = allocate_kscratch();
2135#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2136			build_r4000_setup_pgd();
2137#endif
2138			build_r4000_tlb_load_handler();
2139			build_r4000_tlb_store_handler();
2140			build_r4000_tlb_modify_handler();
 
 
 
 
 
2141			run_once++;
2142		}
2143		build_r4000_tlb_refill_handler();
 
 
 
 
 
2144	}
2145}
2146
2147void __cpuinit flush_tlb_handlers(void)
2148{
2149	local_flush_icache_range((unsigned long)handle_tlbl,
2150			   (unsigned long)handle_tlbl + sizeof(handle_tlbl));
2151	local_flush_icache_range((unsigned long)handle_tlbs,
2152			   (unsigned long)handle_tlbs + sizeof(handle_tlbs));
2153	local_flush_icache_range((unsigned long)handle_tlbm,
2154			   (unsigned long)handle_tlbm + sizeof(handle_tlbm));
2155#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2156	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
2157			   (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
2158#endif
2159}
v4.10.11
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Synthesize TLB refill handlers at runtime.
   7 *
   8 * Copyright (C) 2004, 2005, 2006, 2008	 Thiemo Seufer
   9 * Copyright (C) 2005, 2007, 2008, 2009	 Maciej W. Rozycki
  10 * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  12 * Copyright (C) 2011  MIPS Technologies, Inc.
  13 *
  14 * ... and the days got worse and worse and now you see
  15 * I've gone completely out of my mind.
  16 *
  17 * They're coming to take me a away haha
  18 * they're coming to take me a away hoho hihi haha
  19 * to the funny farm where code is beautiful all the time ...
  20 *
  21 * (Condolences to Napoleon XIV)
  22 */
  23
  24#include <linux/bug.h>
  25#include <linux/kernel.h>
  26#include <linux/types.h>
  27#include <linux/smp.h>
  28#include <linux/string.h>
 
  29#include <linux/cache.h>
  30
  31#include <asm/cacheflush.h>
  32#include <asm/cpu-type.h>
  33#include <asm/pgtable.h>
  34#include <asm/war.h>
  35#include <asm/uasm.h>
  36#include <asm/setup.h>
  37
  38static int mips_xpa_disabled;
  39
  40static int __init xpa_disable(char *s)
  41{
  42	mips_xpa_disabled = 1;
  43
  44	return 1;
  45}
  46
  47__setup("noxpa", xpa_disable);
  48
  49/*
  50 * TLB load/store/modify handlers.
  51 *
  52 * Only the fastpath gets synthesized at runtime, the slowpath for
  53 * do_page_fault remains normal asm.
  54 */
  55extern void tlb_do_page_fault_0(void);
  56extern void tlb_do_page_fault_1(void);
  57
  58struct work_registers {
  59	int r1;
  60	int r2;
  61	int r3;
  62};
  63
  64struct tlb_reg_save {
  65	unsigned long a;
  66	unsigned long b;
  67} ____cacheline_aligned_in_smp;
  68
  69static struct tlb_reg_save handler_reg_save[NR_CPUS];
  70
  71static inline int r45k_bvahwbug(void)
  72{
  73	/* XXX: We should probe for the presence of this bug, but we don't. */
  74	return 0;
  75}
  76
  77static inline int r4k_250MHZhwbug(void)
  78{
  79	/* XXX: We should probe for the presence of this bug, but we don't. */
  80	return 0;
  81}
  82
  83static inline int __maybe_unused bcm1250_m3_war(void)
  84{
  85	return BCM1250_M3_WAR;
  86}
  87
  88static inline int __maybe_unused r10000_llsc_war(void)
  89{
  90	return R10000_LLSC_WAR;
  91}
  92
  93static int use_bbit_insns(void)
  94{
  95	switch (current_cpu_type()) {
  96	case CPU_CAVIUM_OCTEON:
  97	case CPU_CAVIUM_OCTEON_PLUS:
  98	case CPU_CAVIUM_OCTEON2:
  99	case CPU_CAVIUM_OCTEON3:
 100		return 1;
 101	default:
 102		return 0;
 103	}
 104}
 105
 106static int use_lwx_insns(void)
 107{
 108	switch (current_cpu_type()) {
 109	case CPU_CAVIUM_OCTEON2:
 110	case CPU_CAVIUM_OCTEON3:
 111		return 1;
 112	default:
 113		return 0;
 114	}
 115}
 116#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
 117    CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
 118static bool scratchpad_available(void)
 119{
 120	return true;
 121}
 122static int scratchpad_offset(int i)
 123{
 124	/*
 125	 * CVMSEG starts at address -32768 and extends for
 126	 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
 127	 */
 128	i += 1; /* Kernel use starts at the top and works down. */
 129	return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
 130}
 131#else
 132static bool scratchpad_available(void)
 133{
 134	return false;
 135}
 136static int scratchpad_offset(int i)
 137{
 138	BUG();
 139	/* Really unreachable, but evidently some GCC want this. */
 140	return 0;
 141}
 142#endif
 143/*
 144 * Found by experiment: At least some revisions of the 4kc throw under
 145 * some circumstances a machine check exception, triggered by invalid
 146 * values in the index register.  Delaying the tlbp instruction until
 147 * after the next branch,  plus adding an additional nop in front of
 148 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
 149 * why; it's not an issue caused by the core RTL.
 150 *
 151 */
 152static int m4kc_tlbp_war(void)
 153{
 154	return (current_cpu_data.processor_id & 0xffff00) ==
 155	       (PRID_COMP_MIPS | PRID_IMP_4KC);
 156}
 157
 158/* Handle labels (which must be positive integers). */
 159enum label_id {
 160	label_second_part = 1,
 161	label_leave,
 162	label_vmalloc,
 163	label_vmalloc_done,
 164	label_tlbw_hazard_0,
 165	label_split = label_tlbw_hazard_0 + 8,
 166	label_tlbl_goaround1,
 167	label_tlbl_goaround2,
 168	label_nopage_tlbl,
 169	label_nopage_tlbs,
 170	label_nopage_tlbm,
 171	label_smp_pgtable_change,
 172	label_r3000_write_probe_fail,
 173	label_large_segbits_fault,
 174#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 175	label_tlb_huge_update,
 176#endif
 177};
 178
 179UASM_L_LA(_second_part)
 180UASM_L_LA(_leave)
 181UASM_L_LA(_vmalloc)
 182UASM_L_LA(_vmalloc_done)
 183/* _tlbw_hazard_x is handled differently.  */
 184UASM_L_LA(_split)
 185UASM_L_LA(_tlbl_goaround1)
 186UASM_L_LA(_tlbl_goaround2)
 187UASM_L_LA(_nopage_tlbl)
 188UASM_L_LA(_nopage_tlbs)
 189UASM_L_LA(_nopage_tlbm)
 190UASM_L_LA(_smp_pgtable_change)
 191UASM_L_LA(_r3000_write_probe_fail)
 192UASM_L_LA(_large_segbits_fault)
 193#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 194UASM_L_LA(_tlb_huge_update)
 195#endif
 196
 197static int hazard_instance;
 198
 199static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
 200{
 201	switch (instance) {
 202	case 0 ... 7:
 203		uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
 204		return;
 205	default:
 206		BUG();
 207	}
 208}
 209
 210static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
 211{
 212	switch (instance) {
 213	case 0 ... 7:
 214		uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
 215		break;
 216	default:
 217		BUG();
 218	}
 219}
 220
 221/*
 222 * pgtable bits are assigned dynamically depending on processor feature
 223 * and statically based on kernel configuration.  This spits out the actual
 224 * values the kernel is using.	Required to make sense from disassembled
 225 * TLB exception handlers.
 226 */
 227static void output_pgtable_bits_defines(void)
 228{
 229#define pr_define(fmt, ...)					\
 230	pr_debug("#define " fmt, ##__VA_ARGS__)
 231
 232	pr_debug("#include <asm/asm.h>\n");
 233	pr_debug("#include <asm/regdef.h>\n");
 234	pr_debug("\n");
 235
 236	pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
 237	pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
 238	pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
 239	pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
 240	pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
 241#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 242	pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
 243#endif
 244#ifdef _PAGE_NO_EXEC_SHIFT
 245	if (cpu_has_rixi)
 246		pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
 247#endif
 248	pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
 249	pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
 250	pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
 251	pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
 252	pr_debug("\n");
 253}
 254
 255static inline void dump_handler(const char *symbol, const u32 *handler, int count)
 256{
 257	int i;
 258
 259	pr_debug("LEAF(%s)\n", symbol);
 260
 261	pr_debug("\t.set push\n");
 262	pr_debug("\t.set noreorder\n");
 263
 264	for (i = 0; i < count; i++)
 265		pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
 266
 267	pr_debug("\t.set\tpop\n");
 268
 269	pr_debug("\tEND(%s)\n", symbol);
 270}
 271
 272/* The only general purpose registers allowed in TLB handlers. */
 273#define K0		26
 274#define K1		27
 275
 276/* Some CP0 registers */
 277#define C0_INDEX	0, 0
 278#define C0_ENTRYLO0	2, 0
 279#define C0_TCBIND	2, 2
 280#define C0_ENTRYLO1	3, 0
 281#define C0_CONTEXT	4, 0
 282#define C0_PAGEMASK	5, 0
 283#define C0_PWBASE	5, 5
 284#define C0_PWFIELD	5, 6
 285#define C0_PWSIZE	5, 7
 286#define C0_PWCTL	6, 6
 287#define C0_BADVADDR	8, 0
 288#define C0_PGD		9, 7
 289#define C0_ENTRYHI	10, 0
 290#define C0_EPC		14, 0
 291#define C0_XCONTEXT	20, 0
 292
 293#ifdef CONFIG_64BIT
 294# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
 295#else
 296# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
 297#endif
 298
 299/* The worst case length of the handler is around 18 instructions for
 300 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
 301 * Maximum space available is 32 instructions for R3000 and 64
 302 * instructions for R4000.
 303 *
 304 * We deliberately chose a buffer size of 128, so we won't scribble
 305 * over anything important on overflow before we panic.
 306 */
 307static u32 tlb_handler[128];
 308
 309/* simply assume worst case size for labels and relocs */
 310static struct uasm_label labels[128];
 311static struct uasm_reloc relocs[128];
 312
 313static int check_for_high_segbits;
 314static bool fill_includes_sw_bits;
 
 315
 316static unsigned int kscratch_used_mask;
 317
 318static inline int __maybe_unused c0_kscratch(void)
 319{
 320	switch (current_cpu_type()) {
 321	case CPU_XLP:
 322	case CPU_XLR:
 323		return 22;
 324	default:
 325		return 31;
 326	}
 327}
 328
 329static int allocate_kscratch(void)
 330{
 331	int r;
 332	unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
 333
 334	r = ffs(a);
 335
 336	if (r == 0)
 337		return -1;
 338
 339	r--; /* make it zero based */
 340
 341	kscratch_used_mask |= (1 << r);
 342
 343	return r;
 344}
 345
 346static int scratch_reg;
 347static int pgd_reg;
 348enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
 349
 350static struct work_registers build_get_work_registers(u32 **p)
 351{
 352	struct work_registers r;
 353
 354	if (scratch_reg >= 0) {
 
 
 
 
 355		/* Save in CPU local C0_KScratch? */
 356		UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
 357		r.r1 = K0;
 358		r.r2 = K1;
 359		r.r3 = 1;
 360		return r;
 361	}
 362
 363	if (num_possible_cpus() > 1) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 364		/* Get smp_processor_id */
 365		UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
 366		UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
 367
 368		/* handler_reg_save index in K0 */
 369		UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
 370
 371		UASM_i_LA(p, K1, (long)&handler_reg_save);
 372		UASM_i_ADDU(p, K0, K0, K1);
 373	} else {
 374		UASM_i_LA(p, K0, (long)&handler_reg_save);
 375	}
 376	/* K0 now points to save area, save $1 and $2  */
 377	UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
 378	UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
 379
 380	r.r1 = K1;
 381	r.r2 = 1;
 382	r.r3 = 2;
 383	return r;
 384}
 385
 386static void build_restore_work_registers(u32 **p)
 387{
 388	if (scratch_reg >= 0) {
 389		UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 390		return;
 391	}
 392	/* K0 already points to save area, restore $1 and $2  */
 393	UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
 394	UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
 395}
 396
 397#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 398
 399/*
 400 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
 401 * we cannot do r3000 under these circumstances.
 402 *
 403 * Declare pgd_current here instead of including mmu_context.h to avoid type
 404 * conflicts for tlbmiss_handler_setup_pgd
 405 */
 406extern unsigned long pgd_current[];
 407
 408/*
 409 * The R3000 TLB handler is simple.
 410 */
 411static void build_r3000_tlb_refill_handler(void)
 412{
 413	long pgdc = (long)pgd_current;
 414	u32 *p;
 415
 416	memset(tlb_handler, 0, sizeof(tlb_handler));
 417	p = tlb_handler;
 418
 419	uasm_i_mfc0(&p, K0, C0_BADVADDR);
 420	uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
 421	uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
 422	uasm_i_srl(&p, K0, K0, 22); /* load delay */
 423	uasm_i_sll(&p, K0, K0, 2);
 424	uasm_i_addu(&p, K1, K1, K0);
 425	uasm_i_mfc0(&p, K0, C0_CONTEXT);
 426	uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
 427	uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
 428	uasm_i_addu(&p, K1, K1, K0);
 429	uasm_i_lw(&p, K0, 0, K1);
 430	uasm_i_nop(&p); /* load delay */
 431	uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
 432	uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
 433	uasm_i_tlbwr(&p); /* cp0 delay */
 434	uasm_i_jr(&p, K1);
 435	uasm_i_rfe(&p); /* branch delay */
 436
 437	if (p > tlb_handler + 32)
 438		panic("TLB refill handler space exceeded");
 439
 440	pr_debug("Wrote TLB refill handler (%u instructions).\n",
 441		 (unsigned int)(p - tlb_handler));
 442
 443	memcpy((void *)ebase, tlb_handler, 0x80);
 444	local_flush_icache_range(ebase, ebase + 0x80);
 445
 446	dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
 447}
 448#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
 449
 450/*
 451 * The R4000 TLB handler is much more complicated. We have two
 452 * consecutive handler areas with 32 instructions space each.
 453 * Since they aren't used at the same time, we can overflow in the
 454 * other one.To keep things simple, we first assume linear space,
 455 * then we relocate it to the final handler layout as needed.
 456 */
 457static u32 final_handler[64];
 458
 459/*
 460 * Hazards
 461 *
 462 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
 463 * 2. A timing hazard exists for the TLBP instruction.
 464 *
 465 *	stalling_instruction
 466 *	TLBP
 467 *
 468 * The JTLB is being read for the TLBP throughout the stall generated by the
 469 * previous instruction. This is not really correct as the stalling instruction
 470 * can modify the address used to access the JTLB.  The failure symptom is that
 471 * the TLBP instruction will use an address created for the stalling instruction
 472 * and not the address held in C0_ENHI and thus report the wrong results.
 473 *
 474 * The software work-around is to not allow the instruction preceding the TLBP
 475 * to stall - make it an NOP or some other instruction guaranteed not to stall.
 476 *
 477 * Errata 2 will not be fixed.	This errata is also on the R5000.
 478 *
 479 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
 480 */
 481static void __maybe_unused build_tlb_probe_entry(u32 **p)
 482{
 483	switch (current_cpu_type()) {
 484	/* Found by experiment: R4600 v2.0/R4700 needs this, too.  */
 485	case CPU_R4600:
 486	case CPU_R4700:
 487	case CPU_R5000:
 
 488	case CPU_NEVADA:
 489		uasm_i_nop(p);
 490		uasm_i_tlbp(p);
 491		break;
 492
 493	default:
 494		uasm_i_tlbp(p);
 495		break;
 496	}
 497}
 498
 499/*
 500 * Write random or indexed TLB entry, and care about the hazards from
 501 * the preceding mtc0 and for the following eret.
 502 */
 503enum tlb_write_entry { tlb_random, tlb_indexed };
 504
 505static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
 506				  struct uasm_reloc **r,
 507				  enum tlb_write_entry wmode)
 508{
 509	void(*tlbw)(u32 **) = NULL;
 510
 511	switch (wmode) {
 512	case tlb_random: tlbw = uasm_i_tlbwr; break;
 513	case tlb_indexed: tlbw = uasm_i_tlbwi; break;
 514	}
 515
 516	if (cpu_has_mips_r2_r6) {
 517		if (cpu_has_mips_r2_exec_hazard)
 518			uasm_i_ehb(p);
 519		tlbw(p);
 520		return;
 521	}
 522
 523	switch (current_cpu_type()) {
 524	case CPU_R4000PC:
 525	case CPU_R4000SC:
 526	case CPU_R4000MC:
 527	case CPU_R4400PC:
 528	case CPU_R4400SC:
 529	case CPU_R4400MC:
 530		/*
 531		 * This branch uses up a mtc0 hazard nop slot and saves
 532		 * two nops after the tlbw instruction.
 533		 */
 534		uasm_bgezl_hazard(p, r, hazard_instance);
 535		tlbw(p);
 536		uasm_bgezl_label(l, p, hazard_instance);
 537		hazard_instance++;
 538		uasm_i_nop(p);
 539		break;
 540
 541	case CPU_R4600:
 542	case CPU_R4700:
 
 
 543		uasm_i_nop(p);
 544		tlbw(p);
 545		uasm_i_nop(p);
 546		break;
 547
 548	case CPU_R5000:
 549	case CPU_NEVADA:
 550		uasm_i_nop(p); /* QED specifies 2 nops hazard */
 551		uasm_i_nop(p); /* QED specifies 2 nops hazard */
 552		tlbw(p);
 553		break;
 554
 555	case CPU_R4300:
 556	case CPU_5KC:
 557	case CPU_TX49XX:
 558	case CPU_PR4450:
 559	case CPU_XLR:
 560		uasm_i_nop(p);
 561		tlbw(p);
 562		break;
 563
 564	case CPU_R10000:
 565	case CPU_R12000:
 566	case CPU_R14000:
 567	case CPU_R16000:
 568	case CPU_4KC:
 569	case CPU_4KEC:
 570	case CPU_M14KC:
 571	case CPU_M14KEC:
 572	case CPU_SB1:
 573	case CPU_SB1A:
 574	case CPU_4KSC:
 575	case CPU_20KC:
 576	case CPU_25KF:
 577	case CPU_BMIPS32:
 578	case CPU_BMIPS3300:
 579	case CPU_BMIPS4350:
 580	case CPU_BMIPS4380:
 581	case CPU_BMIPS5000:
 582	case CPU_LOONGSON2:
 583	case CPU_LOONGSON3:
 584	case CPU_R5500:
 585		if (m4kc_tlbp_war())
 586			uasm_i_nop(p);
 587	case CPU_ALCHEMY:
 588		tlbw(p);
 589		break;
 590
 
 
 
 
 
 
 
 
 
 
 
 591	case CPU_RM7000:
 592		uasm_i_nop(p);
 593		uasm_i_nop(p);
 594		uasm_i_nop(p);
 595		uasm_i_nop(p);
 596		tlbw(p);
 597		break;
 598
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 599	case CPU_VR4111:
 600	case CPU_VR4121:
 601	case CPU_VR4122:
 602	case CPU_VR4181:
 603	case CPU_VR4181A:
 604		uasm_i_nop(p);
 605		uasm_i_nop(p);
 606		tlbw(p);
 607		uasm_i_nop(p);
 608		uasm_i_nop(p);
 609		break;
 610
 611	case CPU_VR4131:
 612	case CPU_VR4133:
 613	case CPU_R5432:
 614		uasm_i_nop(p);
 615		uasm_i_nop(p);
 616		tlbw(p);
 617		break;
 618
 619	case CPU_JZRISC:
 620		tlbw(p);
 621		uasm_i_nop(p);
 622		break;
 623
 624	default:
 625		panic("No TLB refill handler yet (CPU type: %d)",
 626		      current_cpu_type());
 627		break;
 628	}
 629}
 630
 631static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
 632							unsigned int reg)
 633{
 634	if (_PAGE_GLOBAL_SHIFT == 0) {
 635		/* pte_t is already in EntryLo format */
 636		return;
 637	}
 638
 639	if (cpu_has_rixi && _PAGE_NO_EXEC) {
 640		if (fill_includes_sw_bits) {
 641			UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
 642		} else {
 643			UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
 644			UASM_i_ROTR(p, reg, reg,
 645				    ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
 646		}
 647	} else {
 648#ifdef CONFIG_PHYS_ADDR_T_64BIT
 649		uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
 650#else
 651		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
 652#endif
 653	}
 654}
 655
 656#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 657
 658static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
 659				   unsigned int tmp, enum label_id lid,
 660				   int restore_scratch)
 
 
 661{
 662	if (restore_scratch) {
 663		/* Reset default page size */
 664		if (PM_DEFAULT_MASK >> 16) {
 665			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
 666			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
 667			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 668			uasm_il_b(p, r, lid);
 669		} else if (PM_DEFAULT_MASK) {
 670			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
 671			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 672			uasm_il_b(p, r, lid);
 673		} else {
 674			uasm_i_mtc0(p, 0, C0_PAGEMASK);
 675			uasm_il_b(p, r, lid);
 676		}
 677		if (scratch_reg >= 0)
 678			UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 679		else
 680			UASM_i_LW(p, 1, scratchpad_offset(0), 0);
 681	} else {
 682		/* Reset default page size */
 683		if (PM_DEFAULT_MASK >> 16) {
 684			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
 685			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
 686			uasm_il_b(p, r, lid);
 687			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 688		} else if (PM_DEFAULT_MASK) {
 689			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
 690			uasm_il_b(p, r, lid);
 691			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 692		} else {
 693			uasm_il_b(p, r, lid);
 694			uasm_i_mtc0(p, 0, C0_PAGEMASK);
 695		}
 696	}
 697}
 698
 699static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
 700				       struct uasm_reloc **r,
 701				       unsigned int tmp,
 702				       enum tlb_write_entry wmode,
 703				       int restore_scratch)
 
 704{
 705	/* Set huge page tlb entry size */
 706	uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
 707	uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
 708	uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 709
 710	build_tlb_write_entry(p, l, r, wmode);
 711
 712	build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
 713}
 714
 715/*
 716 * Check if Huge PTE is present, if so then jump to LABEL.
 717 */
 718static void
 719build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
 720		  unsigned int pmd, int lid)
 721{
 722	UASM_i_LW(p, tmp, 0, pmd);
 723	if (use_bbit_insns()) {
 724		uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
 725	} else {
 726		uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
 727		uasm_il_bnez(p, r, tmp, lid);
 728	}
 729}
 730
 731static void build_huge_update_entries(u32 **p, unsigned int pte,
 732				      unsigned int tmp)
 
 733{
 734	int small_sequence;
 735
 736	/*
 737	 * A huge PTE describes an area the size of the
 738	 * configured huge page size. This is twice the
 739	 * of the large TLB entry size we intend to use.
 740	 * A TLB entry half the size of the configured
 741	 * huge page size is configured into entrylo0
 742	 * and entrylo1 to cover the contiguous huge PTE
 743	 * address space.
 744	 */
 745	small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
 746
 747	/* We can clobber tmp.	It isn't used after this.*/
 748	if (!small_sequence)
 749		uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
 750
 751	build_convert_pte_to_entrylo(p, pte);
 752	UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
 753	/* convert to entrylo1 */
 754	if (small_sequence)
 755		UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
 756	else
 757		UASM_i_ADDU(p, pte, pte, tmp);
 758
 759	UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
 760}
 761
 762static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
 763				    struct uasm_label **l,
 764				    unsigned int pte,
 765				    unsigned int ptr,
 766				    unsigned int flush)
 767{
 768#ifdef CONFIG_SMP
 769	UASM_i_SC(p, pte, 0, ptr);
 770	uasm_il_beqz(p, r, pte, label_tlb_huge_update);
 771	UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
 772#else
 773	UASM_i_SW(p, pte, 0, ptr);
 774#endif
 775	if (cpu_has_ftlb && flush) {
 776		BUG_ON(!cpu_has_tlbinv);
 777
 778		UASM_i_MFC0(p, ptr, C0_ENTRYHI);
 779		uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
 780		UASM_i_MTC0(p, ptr, C0_ENTRYHI);
 781		build_tlb_write_entry(p, l, r, tlb_indexed);
 782
 783		uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
 784		UASM_i_MTC0(p, ptr, C0_ENTRYHI);
 785		build_huge_update_entries(p, pte, ptr);
 786		build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
 787
 788		return;
 789	}
 790
 791	build_huge_update_entries(p, pte, ptr);
 792	build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
 793}
 794#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 795
 796#ifdef CONFIG_64BIT
 797/*
 798 * TMP and PTR are scratch.
 799 * TMP will be clobbered, PTR will hold the pmd entry.
 800 */
 801static void
 802build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 803		 unsigned int tmp, unsigned int ptr)
 804{
 805#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 806	long pgdc = (long)pgd_current;
 807#endif
 808	/*
 809	 * The vmalloc handling is not in the hotpath.
 810	 */
 811	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
 812
 813	if (check_for_high_segbits) {
 814		/*
 815		 * The kernel currently implicitely assumes that the
 816		 * MIPS SEGBITS parameter for the processor is
 817		 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
 818		 * allocate virtual addresses outside the maximum
 819		 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
 820		 * that doesn't prevent user code from accessing the
 821		 * higher xuseg addresses.  Here, we make sure that
 822		 * everything but the lower xuseg addresses goes down
 823		 * the module_alloc/vmalloc path.
 824		 */
 825		uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
 826		uasm_il_bnez(p, r, ptr, label_vmalloc);
 827	} else {
 828		uasm_il_bltz(p, r, tmp, label_vmalloc);
 829	}
 830	/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
 831
 
 832	if (pgd_reg != -1) {
 833		/* pgd is in pgd_reg */
 834		if (cpu_has_ldpte)
 835			UASM_i_MFC0(p, ptr, C0_PWBASE);
 836		else
 837			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
 838	} else {
 839#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
 840		/*
 841		 * &pgd << 11 stored in CONTEXT [23..63].
 842		 */
 843		UASM_i_MFC0(p, ptr, C0_CONTEXT);
 844
 845		/* Clear lower 23 bits of context. */
 846		uasm_i_dins(p, ptr, 0, 0, 23);
 847
 848		/* 1 0	1 0 1  << 6  xkphys cached */
 849		uasm_i_ori(p, ptr, ptr, 0x540);
 850		uasm_i_drotr(p, ptr, ptr, 11);
 
 851#elif defined(CONFIG_SMP)
 852		UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
 853		uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
 854		UASM_i_LA_mostly(p, tmp, pgdc);
 855		uasm_i_daddu(p, ptr, ptr, tmp);
 856		uasm_i_dmfc0(p, tmp, C0_BADVADDR);
 857		uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 858#else
 859		UASM_i_LA_mostly(p, ptr, pgdc);
 860		uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 861#endif
 862	}
 863
 864	uasm_l_vmalloc_done(l, *p);
 865
 866	/* get pgd offset in bytes */
 867	uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
 868
 869	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
 870	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
 871#ifndef __PAGETABLE_PMD_FOLDED
 872	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 873	uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
 874	uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
 875	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
 876	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
 877#endif
 878}
 879
 880/*
 881 * BVADDR is the faulting address, PTR is scratch.
 882 * PTR will hold the pgd for vmalloc.
 883 */
 884static void
 885build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 886			unsigned int bvaddr, unsigned int ptr,
 887			enum vmalloc64_mode mode)
 888{
 889	long swpd = (long)swapper_pg_dir;
 890	int single_insn_swpd;
 891	int did_vmalloc_branch = 0;
 892
 893	single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
 894
 895	uasm_l_vmalloc(l, *p);
 896
 897	if (mode != not_refill && check_for_high_segbits) {
 898		if (single_insn_swpd) {
 899			uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
 900			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
 901			did_vmalloc_branch = 1;
 902			/* fall through */
 903		} else {
 904			uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
 905		}
 906	}
 907	if (!did_vmalloc_branch) {
 908		if (single_insn_swpd) {
 909			uasm_il_b(p, r, label_vmalloc_done);
 910			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
 911		} else {
 912			UASM_i_LA_mostly(p, ptr, swpd);
 913			uasm_il_b(p, r, label_vmalloc_done);
 914			if (uasm_in_compat_space_p(swpd))
 915				uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
 916			else
 917				uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
 918		}
 919	}
 920	if (mode != not_refill && check_for_high_segbits) {
 921		uasm_l_large_segbits_fault(l, *p);
 922		/*
 923		 * We get here if we are an xsseg address, or if we are
 924		 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
 925		 *
 926		 * Ignoring xsseg (assume disabled so would generate
 927		 * (address errors?), the only remaining possibility
 928		 * is the upper xuseg addresses.  On processors with
 929		 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
 930		 * addresses would have taken an address error. We try
 931		 * to mimic that here by taking a load/istream page
 932		 * fault.
 933		 */
 934		UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
 935		uasm_i_jr(p, ptr);
 936
 937		if (mode == refill_scratch) {
 938			if (scratch_reg >= 0)
 939				UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 940			else
 941				UASM_i_LW(p, 1, scratchpad_offset(0), 0);
 942		} else {
 943			uasm_i_nop(p);
 944		}
 945	}
 946}
 947
 948#else /* !CONFIG_64BIT */
 949
 950/*
 951 * TMP and PTR are scratch.
 952 * TMP will be clobbered, PTR will hold the pgd entry.
 953 */
 954static void __maybe_unused
 955build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 956{
 957	if (pgd_reg != -1) {
 958		/* pgd is in pgd_reg */
 959		uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
 960		uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 961	} else {
 962		long pgdc = (long)pgd_current;
 963
 964		/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
 965#ifdef CONFIG_SMP
 966		uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
 967		UASM_i_LA_mostly(p, tmp, pgdc);
 968		uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
 969		uasm_i_addu(p, ptr, tmp, ptr);
 
 
 
 970#else
 971		UASM_i_LA_mostly(p, ptr, pgdc);
 
 
 
 
 
 972#endif
 973		uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 974		uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
 975	}
 
 
 
 976	uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
 977	uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
 978	uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
 979}
 980
 981#endif /* !CONFIG_64BIT */
 982
 983static void build_adjust_context(u32 **p, unsigned int ctx)
 984{
 985	unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
 986	unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
 987
 988	switch (current_cpu_type()) {
 989	case CPU_VR41XX:
 990	case CPU_VR4111:
 991	case CPU_VR4121:
 992	case CPU_VR4122:
 993	case CPU_VR4131:
 994	case CPU_VR4181:
 995	case CPU_VR4181A:
 996	case CPU_VR4133:
 997		shift += 2;
 998		break;
 999
1000	default:
1001		break;
1002	}
1003
1004	if (shift)
1005		UASM_i_SRL(p, ctx, ctx, shift);
1006	uasm_i_andi(p, ctx, ctx, mask);
1007}
1008
1009static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
1010{
1011	/*
1012	 * Bug workaround for the Nevada. It seems as if under certain
1013	 * circumstances the move from cp0_context might produce a
1014	 * bogus result when the mfc0 instruction and its consumer are
1015	 * in a different cacheline or a load instruction, probably any
1016	 * memory reference, is between them.
1017	 */
1018	switch (current_cpu_type()) {
1019	case CPU_NEVADA:
1020		UASM_i_LW(p, ptr, 0, ptr);
1021		GET_CONTEXT(p, tmp); /* get context reg */
1022		break;
1023
1024	default:
1025		GET_CONTEXT(p, tmp); /* get context reg */
1026		UASM_i_LW(p, ptr, 0, ptr);
1027		break;
1028	}
1029
1030	build_adjust_context(p, tmp);
1031	UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1032}
1033
1034static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
 
1035{
1036	int pte_off_even = 0;
1037	int pte_off_odd = sizeof(pte_t);
1038
1039#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT)
1040	/* The low 32 bits of EntryLo is stored in pte_high */
1041	pte_off_even += offsetof(pte_t, pte_high);
1042	pte_off_odd += offsetof(pte_t, pte_high);
1043#endif
1044
1045	if (IS_ENABLED(CONFIG_XPA)) {
1046		uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
1047		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1048		UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
1049
1050		if (cpu_has_xpa && !mips_xpa_disabled) {
1051			uasm_i_lw(p, tmp, 0, ptep);
1052			uasm_i_ext(p, tmp, tmp, 0, 24);
1053			uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
1054		}
 
 
 
 
1055
1056		uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
1057		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1058		UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
1059
1060		if (cpu_has_xpa && !mips_xpa_disabled) {
1061			uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
1062			uasm_i_ext(p, tmp, tmp, 0, 24);
1063			uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
1064		}
1065		return;
1066	}
1067
1068	UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
1069	UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
1070	if (r45k_bvahwbug())
1071		build_tlb_probe_entry(p);
1072	build_convert_pte_to_entrylo(p, tmp);
1073	if (r4k_250MHZhwbug())
1074		UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1075	UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1076	build_convert_pte_to_entrylo(p, ptep);
1077	if (r45k_bvahwbug())
1078		uasm_i_mfc0(p, tmp, C0_INDEX);
 
 
 
 
 
 
 
 
 
 
1079	if (r4k_250MHZhwbug())
1080		UASM_i_MTC0(p, 0, C0_ENTRYLO1);
1081	UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
 
1082}
1083
1084struct mips_huge_tlb_info {
1085	int huge_pte;
1086	int restore_scratch;
1087	bool need_reload_pte;
1088};
1089
1090static struct mips_huge_tlb_info
1091build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1092			       struct uasm_reloc **r, unsigned int tmp,
1093			       unsigned int ptr, int c0_scratch_reg)
1094{
1095	struct mips_huge_tlb_info rv;
1096	unsigned int even, odd;
1097	int vmalloc_branch_delay_filled = 0;
1098	const int scratch = 1; /* Our extra working register */
1099
1100	rv.huge_pte = scratch;
1101	rv.restore_scratch = 0;
1102	rv.need_reload_pte = false;
1103
1104	if (check_for_high_segbits) {
1105		UASM_i_MFC0(p, tmp, C0_BADVADDR);
1106
1107		if (pgd_reg != -1)
1108			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1109		else
1110			UASM_i_MFC0(p, ptr, C0_CONTEXT);
1111
1112		if (c0_scratch_reg >= 0)
1113			UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1114		else
1115			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1116
1117		uasm_i_dsrl_safe(p, scratch, tmp,
1118				 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1119		uasm_il_bnez(p, r, scratch, label_vmalloc);
1120
1121		if (pgd_reg == -1) {
1122			vmalloc_branch_delay_filled = 1;
1123			/* Clear lower 23 bits of context. */
1124			uasm_i_dins(p, ptr, 0, 0, 23);
1125		}
1126	} else {
1127		if (pgd_reg != -1)
1128			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1129		else
1130			UASM_i_MFC0(p, ptr, C0_CONTEXT);
1131
1132		UASM_i_MFC0(p, tmp, C0_BADVADDR);
1133
1134		if (c0_scratch_reg >= 0)
1135			UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1136		else
1137			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1138
1139		if (pgd_reg == -1)
1140			/* Clear lower 23 bits of context. */
1141			uasm_i_dins(p, ptr, 0, 0, 23);
1142
1143		uasm_il_bltz(p, r, tmp, label_vmalloc);
1144	}
1145
1146	if (pgd_reg == -1) {
1147		vmalloc_branch_delay_filled = 1;
1148		/* 1 0	1 0 1  << 6  xkphys cached */
1149		uasm_i_ori(p, ptr, ptr, 0x540);
1150		uasm_i_drotr(p, ptr, ptr, 11);
1151	}
1152
1153#ifdef __PAGETABLE_PMD_FOLDED
1154#define LOC_PTEP scratch
1155#else
1156#define LOC_PTEP ptr
1157#endif
1158
1159	if (!vmalloc_branch_delay_filled)
1160		/* get pgd offset in bytes */
1161		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1162
1163	uasm_l_vmalloc_done(l, *p);
1164
1165	/*
1166	 *			   tmp		ptr
1167	 * fall-through case =	 badvaddr  *pgd_current
1168	 * vmalloc case	     =	 badvaddr  swapper_pg_dir
1169	 */
1170
1171	if (vmalloc_branch_delay_filled)
1172		/* get pgd offset in bytes */
1173		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1174
1175#ifdef __PAGETABLE_PMD_FOLDED
1176	GET_CONTEXT(p, tmp); /* get context reg */
1177#endif
1178	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1179
1180	if (use_lwx_insns()) {
1181		UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1182	} else {
1183		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1184		uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1185	}
1186
1187#ifndef __PAGETABLE_PMD_FOLDED
1188	/* get pmd offset in bytes */
1189	uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1190	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1191	GET_CONTEXT(p, tmp); /* get context reg */
1192
1193	if (use_lwx_insns()) {
1194		UASM_i_LWX(p, scratch, scratch, ptr);
1195	} else {
1196		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1197		UASM_i_LW(p, scratch, 0, ptr);
1198	}
1199#endif
1200	/* Adjust the context during the load latency. */
1201	build_adjust_context(p, tmp);
1202
1203#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1204	uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1205	/*
1206	 * The in the LWX case we don't want to do the load in the
1207	 * delay slot.	It cannot issue in the same cycle and may be
1208	 * speculative and unneeded.
1209	 */
1210	if (use_lwx_insns())
1211		uasm_i_nop(p);
1212#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
1213
1214
1215	/* build_update_entries */
1216	if (use_lwx_insns()) {
1217		even = ptr;
1218		odd = tmp;
1219		UASM_i_LWX(p, even, scratch, tmp);
1220		UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1221		UASM_i_LWX(p, odd, scratch, tmp);
1222	} else {
1223		UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1224		even = tmp;
1225		odd = ptr;
1226		UASM_i_LW(p, even, 0, ptr); /* get even pte */
1227		UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1228	}
1229	if (cpu_has_rixi) {
1230		uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
 
 
 
1231		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1232		uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
 
1233	} else {
1234		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1235		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1236		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1237	}
1238	UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1239
1240	if (c0_scratch_reg >= 0) {
1241		UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1242		build_tlb_write_entry(p, l, r, tlb_random);
1243		uasm_l_leave(l, *p);
1244		rv.restore_scratch = 1;
1245	} else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  {
1246		build_tlb_write_entry(p, l, r, tlb_random);
1247		uasm_l_leave(l, *p);
1248		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1249	} else {
1250		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1251		build_tlb_write_entry(p, l, r, tlb_random);
1252		uasm_l_leave(l, *p);
1253		rv.restore_scratch = 1;
1254	}
1255
1256	uasm_i_eret(p); /* return from trap */
1257
1258	return rv;
1259}
1260
1261/*
1262 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1263 * because EXL == 0.  If we wrap, we can also use the 32 instruction
1264 * slots before the XTLB refill exception handler which belong to the
1265 * unused TLB refill exception.
1266 */
1267#define MIPS64_REFILL_INSNS 32
1268
1269static void build_r4000_tlb_refill_handler(void)
1270{
1271	u32 *p = tlb_handler;
1272	struct uasm_label *l = labels;
1273	struct uasm_reloc *r = relocs;
1274	u32 *f;
1275	unsigned int final_len;
1276	struct mips_huge_tlb_info htlb_info __maybe_unused;
1277	enum vmalloc64_mode vmalloc_mode __maybe_unused;
1278
1279	memset(tlb_handler, 0, sizeof(tlb_handler));
1280	memset(labels, 0, sizeof(labels));
1281	memset(relocs, 0, sizeof(relocs));
1282	memset(final_handler, 0, sizeof(final_handler));
1283
1284	if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
1285		htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1286							  scratch_reg);
1287		vmalloc_mode = refill_scratch;
1288	} else {
1289		htlb_info.huge_pte = K0;
1290		htlb_info.restore_scratch = 0;
1291		htlb_info.need_reload_pte = true;
1292		vmalloc_mode = refill_noscratch;
1293		/*
1294		 * create the plain linear handler
1295		 */
1296		if (bcm1250_m3_war()) {
1297			unsigned int segbits = 44;
1298
1299			uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1300			uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1301			uasm_i_xor(&p, K0, K0, K1);
1302			uasm_i_dsrl_safe(&p, K1, K0, 62);
1303			uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1304			uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1305			uasm_i_or(&p, K0, K0, K1);
1306			uasm_il_bnez(&p, &r, K0, label_leave);
1307			/* No need for uasm_i_nop */
1308		}
1309
1310#ifdef CONFIG_64BIT
1311		build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
1312#else
1313		build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
1314#endif
1315
1316#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1317		build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
1318#endif
1319
1320		build_get_ptep(&p, K0, K1);
1321		build_update_entries(&p, K0, K1);
1322		build_tlb_write_entry(&p, &l, &r, tlb_random);
1323		uasm_l_leave(&l, p);
1324		uasm_i_eret(&p); /* return from trap */
1325	}
1326#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1327	uasm_l_tlb_huge_update(&l, p);
1328	if (htlb_info.need_reload_pte)
1329		UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
1330	build_huge_update_entries(&p, htlb_info.huge_pte, K1);
1331	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
1332				   htlb_info.restore_scratch);
1333#endif
1334
1335#ifdef CONFIG_64BIT
1336	build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
1337#endif
1338
1339	/*
1340	 * Overflow check: For the 64bit handler, we need at least one
1341	 * free instruction slot for the wrap-around branch. In worst
1342	 * case, if the intended insertion point is a delay slot, we
1343	 * need three, with the second nop'ed and the third being
1344	 * unused.
1345	 */
1346	switch (boot_cpu_type()) {
1347	default:
1348		if (sizeof(long) == 4) {
1349	case CPU_LOONGSON2:
1350		/* Loongson2 ebase is different than r4k, we have more space */
1351			if ((p - tlb_handler) > 64)
1352				panic("TLB refill handler space exceeded");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1353			/*
1354			 * Now fold the handler in the TLB refill handler space.
 
 
1355			 */
1356			f = final_handler;
1357			/* Simplest case, just copy the handler. */
1358			uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1359			final_len = p - tlb_handler;
1360			break;
1361		} else {
1362			if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1363			    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1364				&& uasm_insn_has_bdelay(relocs,
1365							tlb_handler + MIPS64_REFILL_INSNS - 3)))
1366				panic("TLB refill handler space exceeded");
1367			/*
1368			 * Now fold the handler in the TLB refill handler space.
 
 
1369			 */
1370			f = final_handler + MIPS64_REFILL_INSNS;
1371			if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1372				/* Just copy the handler. */
1373				uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1374				final_len = p - tlb_handler;
1375			} else {
1376#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1377				const enum label_id ls = label_tlb_huge_update;
1378#else
1379				const enum label_id ls = label_vmalloc;
1380#endif
1381				u32 *split;
1382				int ov = 0;
1383				int i;
1384
1385				for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1386					;
1387				BUG_ON(i == ARRAY_SIZE(labels));
1388				split = labels[i].addr;
1389
1390				/*
1391				 * See if we have overflown one way or the other.
1392				 */
1393				if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1394				    split < p - MIPS64_REFILL_INSNS)
1395					ov = 1;
1396
1397				if (ov) {
1398					/*
1399					 * Split two instructions before the end.  One
1400					 * for the branch and one for the instruction
1401					 * in the delay slot.
1402					 */
1403					split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1404
1405					/*
1406					 * If the branch would fall in a delay slot,
1407					 * we must back up an additional instruction
1408					 * so that it is no longer in a delay slot.
1409					 */
1410					if (uasm_insn_has_bdelay(relocs, split - 1))
1411						split--;
1412				}
1413				/* Copy first part of the handler. */
1414				uasm_copy_handler(relocs, labels, tlb_handler, split, f);
1415				f += split - tlb_handler;
1416
1417				if (ov) {
1418					/* Insert branch. */
1419					uasm_l_split(&l, final_handler);
1420					uasm_il_b(&f, &r, label_split);
1421					if (uasm_insn_has_bdelay(relocs, split))
1422						uasm_i_nop(&f);
1423					else {
1424						uasm_copy_handler(relocs, labels,
1425								  split, split + 1, f);
1426						uasm_move_labels(labels, f, f + 1, -1);
1427						f++;
1428						split++;
1429					}
1430				}
1431
1432				/* Copy the rest of the handler. */
1433				uasm_copy_handler(relocs, labels, split, p, final_handler);
1434				final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1435					    (p - split);
1436			}
1437		}
1438		break;
 
 
 
 
1439	}
 
1440
1441	uasm_resolve_relocs(relocs, labels);
1442	pr_debug("Wrote TLB refill handler (%u instructions).\n",
1443		 final_len);
1444
1445	memcpy((void *)ebase, final_handler, 0x100);
1446	local_flush_icache_range(ebase, ebase + 0x100);
1447
1448	dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
1449}
1450
1451static void setup_pw(void)
1452{
1453	unsigned long pgd_i, pgd_w;
1454#ifndef __PAGETABLE_PMD_FOLDED
1455	unsigned long pmd_i, pmd_w;
1456#endif
1457	unsigned long pt_i, pt_w;
1458	unsigned long pte_i, pte_w;
1459#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1460	unsigned long psn;
1461
1462	psn = ilog2(_PAGE_HUGE);     /* bit used to indicate huge page */
1463#endif
1464	pgd_i = PGDIR_SHIFT;  /* 1st level PGD */
1465#ifndef __PAGETABLE_PMD_FOLDED
1466	pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER;
1467
1468	pmd_i = PMD_SHIFT;    /* 2nd level PMD */
1469	pmd_w = PMD_SHIFT - PAGE_SHIFT;
1470#else
1471	pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER;
1472#endif
1473
1474	pt_i  = PAGE_SHIFT;    /* 3rd level PTE */
1475	pt_w  = PAGE_SHIFT - 3;
1476
1477	pte_i = ilog2(_PAGE_GLOBAL);
1478	pte_w = 0;
1479
1480#ifndef __PAGETABLE_PMD_FOLDED
1481	write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
1482	write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w);
1483#else
1484	write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i);
1485	write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w);
1486#endif
1487
1488#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1489	write_c0_pwctl(1 << 6 | psn);
1490#endif
1491	write_c0_kpgd(swapper_pg_dir);
1492	kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
1493}
1494
1495static void build_loongson3_tlb_refill_handler(void)
1496{
1497	u32 *p = tlb_handler;
 
 
1498	struct uasm_label *l = labels;
1499	struct uasm_reloc *r = relocs;
1500
 
1501	memset(labels, 0, sizeof(labels));
1502	memset(relocs, 0, sizeof(relocs));
1503	memset(tlb_handler, 0, sizeof(tlb_handler));
1504
1505	if (check_for_high_segbits) {
1506		uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1507		uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1508		uasm_il_beqz(&p, &r, K1, label_vmalloc);
1509		uasm_i_nop(&p);
1510
1511		uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
1512		uasm_i_nop(&p);
1513		uasm_l_vmalloc(&l, p);
1514	}
1515
1516	uasm_i_dmfc0(&p, K1, C0_PGD);
1517
1518	uasm_i_lddir(&p, K0, K1, 3);  /* global page dir */
1519#ifndef __PAGETABLE_PMD_FOLDED
1520	uasm_i_lddir(&p, K1, K0, 1);  /* middle page dir */
1521#endif
1522	uasm_i_ldpte(&p, K1, 0);      /* even */
1523	uasm_i_ldpte(&p, K1, 1);      /* odd */
1524	uasm_i_tlbwr(&p);
1525
1526	/* restore page mask */
1527	if (PM_DEFAULT_MASK >> 16) {
1528		uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
1529		uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
1530		uasm_i_mtc0(&p, K0, C0_PAGEMASK);
1531	} else if (PM_DEFAULT_MASK) {
1532		uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
1533		uasm_i_mtc0(&p, K0, C0_PAGEMASK);
1534	} else {
1535		uasm_i_mtc0(&p, 0, C0_PAGEMASK);
1536	}
1537
1538	uasm_i_eret(&p);
1539
1540	if (check_for_high_segbits) {
1541		uasm_l_large_segbits_fault(&l, p);
1542		UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
1543		uasm_i_jr(&p, K1);
1544		uasm_i_nop(&p);
1545	}
1546
1547	uasm_resolve_relocs(relocs, labels);
1548	memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
1549	local_flush_icache_range(ebase + 0x80, ebase + 0x100);
1550	dump_handler("loongson3_tlb_refill", (u32 *)(ebase + 0x80), 32);
1551}
1552
1553extern u32 handle_tlbl[], handle_tlbl_end[];
1554extern u32 handle_tlbs[], handle_tlbs_end[];
1555extern u32 handle_tlbm[], handle_tlbm_end[];
1556extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[];
1557extern u32 tlbmiss_handler_setup_pgd_end[];
1558
1559static void build_setup_pgd(void)
1560{
1561	const int a0 = 4;
1562	const int __maybe_unused a1 = 5;
1563	const int __maybe_unused a2 = 6;
1564	u32 *p = tlbmiss_handler_setup_pgd_start;
1565	const int tlbmiss_handler_setup_pgd_size =
1566		tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
1567#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1568	long pgdc = (long)pgd_current;
1569#endif
1570
1571	memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size *
1572					sizeof(tlbmiss_handler_setup_pgd[0]));
1573	memset(labels, 0, sizeof(labels));
1574	memset(relocs, 0, sizeof(relocs));
1575	pgd_reg = allocate_kscratch();
1576#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1577	if (pgd_reg == -1) {
1578		struct uasm_label *l = labels;
1579		struct uasm_reloc *r = relocs;
1580
1581		/* PGD << 11 in c0_Context */
1582		/*
1583		 * If it is a ckseg0 address, convert to a physical
1584		 * address.  Shifting right by 29 and adding 4 will
1585		 * result in zero for these addresses.
1586		 *
1587		 */
1588		UASM_i_SRA(&p, a1, a0, 29);
1589		UASM_i_ADDIU(&p, a1, a1, 4);
1590		uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1591		uasm_i_nop(&p);
1592		uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1593		uasm_l_tlbl_goaround1(&l, p);
1594		UASM_i_SLL(&p, a0, a0, 11);
1595		uasm_i_jr(&p, 31);
1596		UASM_i_MTC0(&p, a0, C0_CONTEXT);
1597	} else {
1598		/* PGD in c0_KScratch */
1599		uasm_i_jr(&p, 31);
1600		if (cpu_has_ldpte)
1601			UASM_i_MTC0(&p, a0, C0_PWBASE);
1602		else
1603			UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1604	}
1605#else
1606#ifdef CONFIG_SMP
1607	/* Save PGD to pgd_current[smp_processor_id()] */
1608	UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
1609	UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
1610	UASM_i_LA_mostly(&p, a2, pgdc);
1611	UASM_i_ADDU(&p, a2, a2, a1);
1612	UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1613#else
1614	UASM_i_LA_mostly(&p, a2, pgdc);
1615	UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1616#endif /* SMP */
1617	uasm_i_jr(&p, 31);
1618
1619	/* if pgd_reg is allocated, save PGD also to scratch register */
1620	if (pgd_reg != -1)
1621		UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1622	else
1623		uasm_i_nop(&p);
1624#endif
1625	if (p >= tlbmiss_handler_setup_pgd_end)
1626		panic("tlbmiss_handler_setup_pgd space exceeded");
1627
1628	uasm_resolve_relocs(relocs, labels);
1629	pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1630		 (unsigned int)(p - tlbmiss_handler_setup_pgd));
1631
1632	dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
1633					tlbmiss_handler_setup_pgd_size);
1634}
 
1635
1636static void
1637iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1638{
1639#ifdef CONFIG_SMP
1640# ifdef CONFIG_PHYS_ADDR_T_64BIT
1641	if (cpu_has_64bits)
1642		uasm_i_lld(p, pte, 0, ptr);
1643	else
1644# endif
1645		UASM_i_LL(p, pte, 0, ptr);
1646#else
1647# ifdef CONFIG_PHYS_ADDR_T_64BIT
1648	if (cpu_has_64bits)
1649		uasm_i_ld(p, pte, 0, ptr);
1650	else
1651# endif
1652		UASM_i_LW(p, pte, 0, ptr);
1653#endif
1654}
1655
1656static void
1657iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1658	unsigned int mode, unsigned int scratch)
1659{
 
1660	unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1661	unsigned int swmode = mode & ~hwmode;
1662
1663	if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
1664		uasm_i_lui(p, scratch, swmode >> 16);
1665		uasm_i_or(p, pte, pte, scratch);
1666		BUG_ON(swmode & 0xffff);
1667	} else {
1668		uasm_i_ori(p, pte, pte, mode);
1669	}
1670
 
1671#ifdef CONFIG_SMP
1672# ifdef CONFIG_PHYS_ADDR_T_64BIT
1673	if (cpu_has_64bits)
1674		uasm_i_scd(p, pte, 0, ptr);
1675	else
1676# endif
1677		UASM_i_SC(p, pte, 0, ptr);
1678
1679	if (r10000_llsc_war())
1680		uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
1681	else
1682		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1683
1684# ifdef CONFIG_PHYS_ADDR_T_64BIT
1685	if (!cpu_has_64bits) {
1686		/* no uasm_i_nop needed */
1687		uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1688		uasm_i_ori(p, pte, pte, hwmode);
1689		BUG_ON(hwmode & ~0xffff);
1690		uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1691		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1692		/* no uasm_i_nop needed */
1693		uasm_i_lw(p, pte, 0, ptr);
1694	} else
1695		uasm_i_nop(p);
1696# else
1697	uasm_i_nop(p);
1698# endif
1699#else
1700# ifdef CONFIG_PHYS_ADDR_T_64BIT
1701	if (cpu_has_64bits)
1702		uasm_i_sd(p, pte, 0, ptr);
1703	else
1704# endif
1705		UASM_i_SW(p, pte, 0, ptr);
1706
1707# ifdef CONFIG_PHYS_ADDR_T_64BIT
1708	if (!cpu_has_64bits) {
1709		uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1710		uasm_i_ori(p, pte, pte, hwmode);
1711		BUG_ON(hwmode & ~0xffff);
1712		uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1713		uasm_i_lw(p, pte, 0, ptr);
1714	}
1715# endif
1716#endif
1717}
1718
1719/*
1720 * Check if PTE is present, if not then jump to LABEL. PTR points to
1721 * the page table where this PTE is located, PTE will be re-loaded
1722 * with it's original value.
1723 */
1724static void
1725build_pte_present(u32 **p, struct uasm_reloc **r,
1726		  int pte, int ptr, int scratch, enum label_id lid)
1727{
1728	int t = scratch >= 0 ? scratch : pte;
1729	int cur = pte;
1730
1731	if (cpu_has_rixi) {
1732		if (use_bbit_insns()) {
1733			uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1734			uasm_i_nop(p);
1735		} else {
1736			if (_PAGE_PRESENT_SHIFT) {
1737				uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1738				cur = t;
1739			}
1740			uasm_i_andi(p, t, cur, 1);
1741			uasm_il_beqz(p, r, t, lid);
1742			if (pte == t)
1743				/* You lose the SMP race :-(*/
1744				iPTE_LW(p, pte, ptr);
1745		}
1746	} else {
1747		if (_PAGE_PRESENT_SHIFT) {
1748			uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1749			cur = t;
1750		}
1751		uasm_i_andi(p, t, cur,
1752			(_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT);
1753		uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
1754		uasm_il_bnez(p, r, t, lid);
1755		if (pte == t)
1756			/* You lose the SMP race :-(*/
1757			iPTE_LW(p, pte, ptr);
1758	}
1759}
1760
1761/* Make PTE valid, store result in PTR. */
1762static void
1763build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1764		 unsigned int ptr, unsigned int scratch)
1765{
1766	unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1767
1768	iPTE_SW(p, r, pte, ptr, mode, scratch);
1769}
1770
1771/*
1772 * Check if PTE can be written to, if not branch to LABEL. Regardless
1773 * restore PTE with value from PTR when done.
1774 */
1775static void
1776build_pte_writable(u32 **p, struct uasm_reloc **r,
1777		   unsigned int pte, unsigned int ptr, int scratch,
1778		   enum label_id lid)
1779{
1780	int t = scratch >= 0 ? scratch : pte;
1781	int cur = pte;
1782
1783	if (_PAGE_PRESENT_SHIFT) {
1784		uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1785		cur = t;
1786	}
1787	uasm_i_andi(p, t, cur,
1788		    (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
1789	uasm_i_xori(p, t, t,
1790		    (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
1791	uasm_il_bnez(p, r, t, lid);
1792	if (pte == t)
1793		/* You lose the SMP race :-(*/
1794		iPTE_LW(p, pte, ptr);
1795	else
1796		uasm_i_nop(p);
1797}
1798
1799/* Make PTE writable, update software status bits as well, then store
1800 * at PTR.
1801 */
1802static void
1803build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1804		 unsigned int ptr, unsigned int scratch)
1805{
1806	unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1807			     | _PAGE_DIRTY);
1808
1809	iPTE_SW(p, r, pte, ptr, mode, scratch);
1810}
1811
1812/*
1813 * Check if PTE can be modified, if not branch to LABEL. Regardless
1814 * restore PTE with value from PTR when done.
1815 */
1816static void
1817build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1818		     unsigned int pte, unsigned int ptr, int scratch,
1819		     enum label_id lid)
1820{
1821	if (use_bbit_insns()) {
1822		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1823		uasm_i_nop(p);
1824	} else {
1825		int t = scratch >= 0 ? scratch : pte;
1826		uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
1827		uasm_i_andi(p, t, t, 1);
1828		uasm_il_beqz(p, r, t, lid);
1829		if (pte == t)
1830			/* You lose the SMP race :-(*/
1831			iPTE_LW(p, pte, ptr);
1832	}
1833}
1834
1835#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1836
1837
1838/*
1839 * R3000 style TLB load/store/modify handlers.
1840 */
1841
1842/*
1843 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1844 * Then it returns.
1845 */
1846static void
1847build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1848{
1849	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1850	uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1851	uasm_i_tlbwi(p);
1852	uasm_i_jr(p, tmp);
1853	uasm_i_rfe(p); /* branch delay */
1854}
1855
1856/*
1857 * This places the pte into ENTRYLO0 and writes it with tlbwi
1858 * or tlbwr as appropriate.  This is because the index register
1859 * may have the probe fail bit set as a result of a trap on a
1860 * kseg2 access, i.e. without refill.  Then it returns.
1861 */
1862static void
1863build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1864			     struct uasm_reloc **r, unsigned int pte,
1865			     unsigned int tmp)
1866{
1867	uasm_i_mfc0(p, tmp, C0_INDEX);
1868	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1869	uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1870	uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1871	uasm_i_tlbwi(p); /* cp0 delay */
1872	uasm_i_jr(p, tmp);
1873	uasm_i_rfe(p); /* branch delay */
1874	uasm_l_r3000_write_probe_fail(l, *p);
1875	uasm_i_tlbwr(p); /* cp0 delay */
1876	uasm_i_jr(p, tmp);
1877	uasm_i_rfe(p); /* branch delay */
1878}
1879
1880static void
1881build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1882				   unsigned int ptr)
1883{
1884	long pgdc = (long)pgd_current;
1885
1886	uasm_i_mfc0(p, pte, C0_BADVADDR);
1887	uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1888	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1889	uasm_i_srl(p, pte, pte, 22); /* load delay */
1890	uasm_i_sll(p, pte, pte, 2);
1891	uasm_i_addu(p, ptr, ptr, pte);
1892	uasm_i_mfc0(p, pte, C0_CONTEXT);
1893	uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1894	uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1895	uasm_i_addu(p, ptr, ptr, pte);
1896	uasm_i_lw(p, pte, 0, ptr);
1897	uasm_i_tlbp(p); /* load delay */
1898}
1899
1900static void build_r3000_tlb_load_handler(void)
1901{
1902	u32 *p = handle_tlbl;
1903	const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
1904	struct uasm_label *l = labels;
1905	struct uasm_reloc *r = relocs;
1906
1907	memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
1908	memset(labels, 0, sizeof(labels));
1909	memset(relocs, 0, sizeof(relocs));
1910
1911	build_r3000_tlbchange_handler_head(&p, K0, K1);
1912	build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
1913	uasm_i_nop(&p); /* load delay */
1914	build_make_valid(&p, &r, K0, K1, -1);
1915	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1916
1917	uasm_l_nopage_tlbl(&l, p);
1918	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1919	uasm_i_nop(&p);
1920
1921	if (p >= handle_tlbl_end)
1922		panic("TLB load handler fastpath space exceeded");
1923
1924	uasm_resolve_relocs(relocs, labels);
1925	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1926		 (unsigned int)(p - handle_tlbl));
1927
1928	dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
1929}
1930
1931static void build_r3000_tlb_store_handler(void)
1932{
1933	u32 *p = handle_tlbs;
1934	const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
1935	struct uasm_label *l = labels;
1936	struct uasm_reloc *r = relocs;
1937
1938	memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
1939	memset(labels, 0, sizeof(labels));
1940	memset(relocs, 0, sizeof(relocs));
1941
1942	build_r3000_tlbchange_handler_head(&p, K0, K1);
1943	build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
1944	uasm_i_nop(&p); /* load delay */
1945	build_make_write(&p, &r, K0, K1, -1);
1946	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1947
1948	uasm_l_nopage_tlbs(&l, p);
1949	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1950	uasm_i_nop(&p);
1951
1952	if (p >= handle_tlbs_end)
1953		panic("TLB store handler fastpath space exceeded");
1954
1955	uasm_resolve_relocs(relocs, labels);
1956	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1957		 (unsigned int)(p - handle_tlbs));
1958
1959	dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
1960}
1961
1962static void build_r3000_tlb_modify_handler(void)
1963{
1964	u32 *p = handle_tlbm;
1965	const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
1966	struct uasm_label *l = labels;
1967	struct uasm_reloc *r = relocs;
1968
1969	memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
1970	memset(labels, 0, sizeof(labels));
1971	memset(relocs, 0, sizeof(relocs));
1972
1973	build_r3000_tlbchange_handler_head(&p, K0, K1);
1974	build_pte_modifiable(&p, &r, K0, K1,  -1, label_nopage_tlbm);
1975	uasm_i_nop(&p); /* load delay */
1976	build_make_write(&p, &r, K0, K1, -1);
1977	build_r3000_pte_reload_tlbwi(&p, K0, K1);
1978
1979	uasm_l_nopage_tlbm(&l, p);
1980	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1981	uasm_i_nop(&p);
1982
1983	if (p >= handle_tlbm_end)
1984		panic("TLB modify handler fastpath space exceeded");
1985
1986	uasm_resolve_relocs(relocs, labels);
1987	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1988		 (unsigned int)(p - handle_tlbm));
1989
1990	dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size);
1991}
1992#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1993
1994/*
1995 * R4000 style TLB load/store/modify handlers.
1996 */
1997static struct work_registers
1998build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1999				   struct uasm_reloc **r)
2000{
2001	struct work_registers wr = build_get_work_registers(p);
2002
2003#ifdef CONFIG_64BIT
2004	build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
2005#else
2006	build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
2007#endif
2008
2009#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2010	/*
2011	 * For huge tlb entries, pmd doesn't contain an address but
2012	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
2013	 * see if we need to jump to huge tlb processing.
2014	 */
2015	build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
2016#endif
2017
2018	UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
2019	UASM_i_LW(p, wr.r2, 0, wr.r2);
2020	UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
2021	uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
2022	UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
2023
2024#ifdef CONFIG_SMP
2025	uasm_l_smp_pgtable_change(l, *p);
2026#endif
2027	iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
2028	if (!m4kc_tlbp_war()) {
2029		build_tlb_probe_entry(p);
2030		if (cpu_has_htw) {
2031			/* race condition happens, leaving */
2032			uasm_i_ehb(p);
2033			uasm_i_mfc0(p, wr.r3, C0_INDEX);
2034			uasm_il_bltz(p, r, wr.r3, label_leave);
2035			uasm_i_nop(p);
2036		}
2037	}
2038	return wr;
2039}
2040
2041static void
2042build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
2043				   struct uasm_reloc **r, unsigned int tmp,
2044				   unsigned int ptr)
2045{
2046	uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
2047	uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
2048	build_update_entries(p, tmp, ptr);
2049	build_tlb_write_entry(p, l, r, tlb_indexed);
2050	uasm_l_leave(l, *p);
2051	build_restore_work_registers(p);
2052	uasm_i_eret(p); /* return from trap */
2053
2054#ifdef CONFIG_64BIT
2055	build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
2056#endif
2057}
2058
2059static void build_r4000_tlb_load_handler(void)
2060{
2061	u32 *p = handle_tlbl;
2062	const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
2063	struct uasm_label *l = labels;
2064	struct uasm_reloc *r = relocs;
2065	struct work_registers wr;
2066
2067	memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
2068	memset(labels, 0, sizeof(labels));
2069	memset(relocs, 0, sizeof(relocs));
2070
2071	if (bcm1250_m3_war()) {
2072		unsigned int segbits = 44;
2073
2074		uasm_i_dmfc0(&p, K0, C0_BADVADDR);
2075		uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
2076		uasm_i_xor(&p, K0, K0, K1);
2077		uasm_i_dsrl_safe(&p, K1, K0, 62);
2078		uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
2079		uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
2080		uasm_i_or(&p, K0, K0, K1);
2081		uasm_il_bnez(&p, &r, K0, label_leave);
2082		/* No need for uasm_i_nop */
2083	}
2084
2085	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2086	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
2087	if (m4kc_tlbp_war())
2088		build_tlb_probe_entry(&p);
2089
2090	if (cpu_has_rixi && !cpu_has_rixiex) {
2091		/*
2092		 * If the page is not _PAGE_VALID, RI or XI could not
2093		 * have triggered it.  Skip the expensive test..
2094		 */
2095		if (use_bbit_insns()) {
2096			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
2097				      label_tlbl_goaround1);
2098		} else {
2099			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2100			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
2101		}
2102		uasm_i_nop(&p);
2103
2104		uasm_i_tlbr(&p);
2105
2106		switch (current_cpu_type()) {
2107		default:
2108			if (cpu_has_mips_r2_exec_hazard) {
2109				uasm_i_ehb(&p);
2110
2111		case CPU_CAVIUM_OCTEON:
2112		case CPU_CAVIUM_OCTEON_PLUS:
2113		case CPU_CAVIUM_OCTEON2:
2114				break;
2115			}
2116		}
2117
2118		/* Examine  entrylo 0 or 1 based on ptr. */
2119		if (use_bbit_insns()) {
2120			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2121		} else {
2122			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2123			uasm_i_beqz(&p, wr.r3, 8);
2124		}
2125		/* load it in the delay slot*/
2126		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2127		/* load it if ptr is odd */
2128		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2129		/*
2130		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2131		 * XI must have triggered it.
2132		 */
2133		if (use_bbit_insns()) {
2134			uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
2135			uasm_i_nop(&p);
2136			uasm_l_tlbl_goaround1(&l, p);
2137		} else {
2138			uasm_i_andi(&p, wr.r3, wr.r3, 2);
2139			uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
2140			uasm_i_nop(&p);
2141		}
2142		uasm_l_tlbl_goaround1(&l, p);
2143	}
2144	build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
2145	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2146
2147#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2148	/*
2149	 * This is the entry point when build_r4000_tlbchange_handler_head
2150	 * spots a huge page.
2151	 */
2152	uasm_l_tlb_huge_update(&l, p);
2153	iPTE_LW(&p, wr.r1, wr.r2);
2154	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
2155	build_tlb_probe_entry(&p);
2156
2157	if (cpu_has_rixi && !cpu_has_rixiex) {
2158		/*
2159		 * If the page is not _PAGE_VALID, RI or XI could not
2160		 * have triggered it.  Skip the expensive test..
2161		 */
2162		if (use_bbit_insns()) {
2163			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
2164				      label_tlbl_goaround2);
2165		} else {
2166			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2167			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2168		}
2169		uasm_i_nop(&p);
2170
2171		uasm_i_tlbr(&p);
2172
2173		switch (current_cpu_type()) {
2174		default:
2175			if (cpu_has_mips_r2_exec_hazard) {
2176				uasm_i_ehb(&p);
2177
2178		case CPU_CAVIUM_OCTEON:
2179		case CPU_CAVIUM_OCTEON_PLUS:
2180		case CPU_CAVIUM_OCTEON2:
2181				break;
2182			}
2183		}
2184
2185		/* Examine  entrylo 0 or 1 based on ptr. */
2186		if (use_bbit_insns()) {
2187			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2188		} else {
2189			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2190			uasm_i_beqz(&p, wr.r3, 8);
2191		}
2192		/* load it in the delay slot*/
2193		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2194		/* load it if ptr is odd */
2195		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2196		/*
2197		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2198		 * XI must have triggered it.
2199		 */
2200		if (use_bbit_insns()) {
2201			uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
2202		} else {
2203			uasm_i_andi(&p, wr.r3, wr.r3, 2);
2204			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2205		}
2206		if (PM_DEFAULT_MASK == 0)
2207			uasm_i_nop(&p);
2208		/*
2209		 * We clobbered C0_PAGEMASK, restore it.  On the other branch
2210		 * it is restored in build_huge_tlb_write_entry.
2211		 */
2212		build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
2213
2214		uasm_l_tlbl_goaround2(&l, p);
2215	}
2216	uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2217	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2218#endif
2219
2220	uasm_l_nopage_tlbl(&l, p);
2221	build_restore_work_registers(&p);
2222#ifdef CONFIG_CPU_MICROMIPS
2223	if ((unsigned long)tlb_do_page_fault_0 & 1) {
2224		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2225		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2226		uasm_i_jr(&p, K0);
2227	} else
2228#endif
2229	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2230	uasm_i_nop(&p);
2231
2232	if (p >= handle_tlbl_end)
2233		panic("TLB load handler fastpath space exceeded");
2234
2235	uasm_resolve_relocs(relocs, labels);
2236	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
2237		 (unsigned int)(p - handle_tlbl));
2238
2239	dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
2240}
2241
2242static void build_r4000_tlb_store_handler(void)
2243{
2244	u32 *p = handle_tlbs;
2245	const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
2246	struct uasm_label *l = labels;
2247	struct uasm_reloc *r = relocs;
2248	struct work_registers wr;
2249
2250	memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
2251	memset(labels, 0, sizeof(labels));
2252	memset(relocs, 0, sizeof(relocs));
2253
2254	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2255	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2256	if (m4kc_tlbp_war())
2257		build_tlb_probe_entry(&p);
2258	build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
2259	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2260
2261#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2262	/*
2263	 * This is the entry point when
2264	 * build_r4000_tlbchange_handler_head spots a huge page.
2265	 */
2266	uasm_l_tlb_huge_update(&l, p);
2267	iPTE_LW(&p, wr.r1, wr.r2);
2268	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2269	build_tlb_probe_entry(&p);
2270	uasm_i_ori(&p, wr.r1, wr.r1,
2271		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2272	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2273#endif
2274
2275	uasm_l_nopage_tlbs(&l, p);
2276	build_restore_work_registers(&p);
2277#ifdef CONFIG_CPU_MICROMIPS
2278	if ((unsigned long)tlb_do_page_fault_1 & 1) {
2279		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2280		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2281		uasm_i_jr(&p, K0);
2282	} else
2283#endif
2284	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2285	uasm_i_nop(&p);
2286
2287	if (p >= handle_tlbs_end)
2288		panic("TLB store handler fastpath space exceeded");
2289
2290	uasm_resolve_relocs(relocs, labels);
2291	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2292		 (unsigned int)(p - handle_tlbs));
2293
2294	dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
2295}
2296
2297static void build_r4000_tlb_modify_handler(void)
2298{
2299	u32 *p = handle_tlbm;
2300	const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
2301	struct uasm_label *l = labels;
2302	struct uasm_reloc *r = relocs;
2303	struct work_registers wr;
2304
2305	memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
2306	memset(labels, 0, sizeof(labels));
2307	memset(relocs, 0, sizeof(relocs));
2308
2309	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2310	build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2311	if (m4kc_tlbp_war())
2312		build_tlb_probe_entry(&p);
2313	/* Present and writable bits set, set accessed and dirty bits. */
2314	build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
2315	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2316
2317#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2318	/*
2319	 * This is the entry point when
2320	 * build_r4000_tlbchange_handler_head spots a huge page.
2321	 */
2322	uasm_l_tlb_huge_update(&l, p);
2323	iPTE_LW(&p, wr.r1, wr.r2);
2324	build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm);
2325	build_tlb_probe_entry(&p);
2326	uasm_i_ori(&p, wr.r1, wr.r1,
2327		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2328	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
2329#endif
2330
2331	uasm_l_nopage_tlbm(&l, p);
2332	build_restore_work_registers(&p);
2333#ifdef CONFIG_CPU_MICROMIPS
2334	if ((unsigned long)tlb_do_page_fault_1 & 1) {
2335		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2336		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2337		uasm_i_jr(&p, K0);
2338	} else
2339#endif
2340	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2341	uasm_i_nop(&p);
2342
2343	if (p >= handle_tlbm_end)
2344		panic("TLB modify handler fastpath space exceeded");
2345
2346	uasm_resolve_relocs(relocs, labels);
2347	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2348		 (unsigned int)(p - handle_tlbm));
2349
2350	dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
2351}
2352
2353static void flush_tlb_handlers(void)
2354{
2355	local_flush_icache_range((unsigned long)handle_tlbl,
2356			   (unsigned long)handle_tlbl_end);
2357	local_flush_icache_range((unsigned long)handle_tlbs,
2358			   (unsigned long)handle_tlbs_end);
2359	local_flush_icache_range((unsigned long)handle_tlbm,
2360			   (unsigned long)handle_tlbm_end);
2361	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
2362			   (unsigned long)tlbmiss_handler_setup_pgd_end);
2363}
2364
2365static void print_htw_config(void)
2366{
2367	unsigned long config;
2368	unsigned int pwctl;
2369	const int field = 2 * sizeof(unsigned long);
2370
2371	config = read_c0_pwfield();
2372	pr_debug("PWField (0x%0*lx): GDI: 0x%02lx  UDI: 0x%02lx  MDI: 0x%02lx  PTI: 0x%02lx  PTEI: 0x%02lx\n",
2373		field, config,
2374		(config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT,
2375		(config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT,
2376		(config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT,
2377		(config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT,
2378		(config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
2379
2380	config = read_c0_pwsize();
2381	pr_debug("PWSize  (0x%0*lx): PS: 0x%lx  GDW: 0x%02lx  UDW: 0x%02lx  MDW: 0x%02lx  PTW: 0x%02lx  PTEW: 0x%02lx\n",
2382		field, config,
2383		(config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
2384		(config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
2385		(config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
2386		(config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
2387		(config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT,
2388		(config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
2389
2390	pwctl = read_c0_pwctl();
2391	pr_debug("PWCtl   (0x%x): PWEn: 0x%x  XK: 0x%x  XS: 0x%x  XU: 0x%x  DPH: 0x%x  HugePg: 0x%x  Psn: 0x%x\n",
2392		pwctl,
2393		(pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
2394		(pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
2395		(pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
2396		(pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
2397		(pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
2398		(pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
2399		(pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
2400}
2401
2402static void config_htw_params(void)
2403{
2404	unsigned long pwfield, pwsize, ptei;
2405	unsigned int config;
2406
2407	/*
2408	 * We are using 2-level page tables, so we only need to
2409	 * setup GDW and PTW appropriately. UDW and MDW will remain 0.
2410	 * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to
2411	 * write values less than 0xc in these fields because the entire
2412	 * write will be dropped. As a result of which, we must preserve
2413	 * the original reset values and overwrite only what we really want.
2414	 */
2415
2416	pwfield = read_c0_pwfield();
2417	/* re-initialize the GDI field */
2418	pwfield &= ~MIPS_PWFIELD_GDI_MASK;
2419	pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT;
2420	/* re-initialize the PTI field including the even/odd bit */
2421	pwfield &= ~MIPS_PWFIELD_PTI_MASK;
2422	pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT;
2423	if (CONFIG_PGTABLE_LEVELS >= 3) {
2424		pwfield &= ~MIPS_PWFIELD_MDI_MASK;
2425		pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT;
2426	}
2427	/* Set the PTEI right shift */
2428	ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT;
2429	pwfield |= ptei;
2430	write_c0_pwfield(pwfield);
2431	/* Check whether the PTEI value is supported */
2432	back_to_back_c0_hazard();
2433	pwfield = read_c0_pwfield();
2434	if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT)
2435		!= ptei) {
2436		pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled",
2437			ptei);
2438		/*
2439		 * Drop option to avoid HTW being enabled via another path
2440		 * (eg htw_reset())
2441		 */
2442		current_cpu_data.options &= ~MIPS_CPU_HTW;
2443		return;
2444	}
2445
2446	pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
2447	pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
2448	if (CONFIG_PGTABLE_LEVELS >= 3)
2449		pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
2450
2451	/* Set pointer size to size of directory pointers */
2452	if (IS_ENABLED(CONFIG_64BIT))
2453		pwsize |= MIPS_PWSIZE_PS_MASK;
2454	/* PTEs may be multiple pointers long (e.g. with XPA) */
2455	pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
2456			& MIPS_PWSIZE_PTEW_MASK;
2457
2458	write_c0_pwsize(pwsize);
2459
2460	/* Make sure everything is set before we enable the HTW */
2461	back_to_back_c0_hazard();
2462
2463	/*
2464	 * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
2465	 * the pwctl fields.
2466	 */
2467	config = 1 << MIPS_PWCTL_PWEN_SHIFT;
2468	if (IS_ENABLED(CONFIG_64BIT))
2469		config |= MIPS_PWCTL_XU_MASK;
2470	write_c0_pwctl(config);
2471	pr_info("Hardware Page Table Walker enabled\n");
2472
2473	print_htw_config();
2474}
2475
2476static void config_xpa_params(void)
2477{
2478#ifdef CONFIG_XPA
2479	unsigned int pagegrain;
2480
2481	if (mips_xpa_disabled) {
2482		pr_info("Extended Physical Addressing (XPA) disabled\n");
2483		return;
2484	}
2485
2486	pagegrain = read_c0_pagegrain();
2487	write_c0_pagegrain(pagegrain | PG_ELPA);
2488	back_to_back_c0_hazard();
2489	pagegrain = read_c0_pagegrain();
2490
2491	if (pagegrain & PG_ELPA)
2492		pr_info("Extended Physical Addressing (XPA) enabled\n");
2493	else
2494		panic("Extended Physical Addressing (XPA) disabled");
2495#endif
2496}
2497
2498static void check_pabits(void)
2499{
2500	unsigned long entry;
2501	unsigned pabits, fillbits;
2502
2503	if (!cpu_has_rixi || !_PAGE_NO_EXEC) {
2504		/*
2505		 * We'll only be making use of the fact that we can rotate bits
2506		 * into the fill if the CPU supports RIXI, so don't bother
2507		 * probing this for CPUs which don't.
2508		 */
2509		return;
2510	}
2511
2512	write_c0_entrylo0(~0ul);
2513	back_to_back_c0_hazard();
2514	entry = read_c0_entrylo0();
2515
2516	/* clear all non-PFN bits */
2517	entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1);
2518	entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
2519
2520	/* find a lower bound on PABITS, and upper bound on fill bits */
2521	pabits = fls_long(entry) + 6;
2522	fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0);
2523
2524	/* minus the RI & XI bits */
2525	fillbits -= min_t(unsigned, fillbits, 2);
2526
2527	if (fillbits >= ilog2(_PAGE_NO_EXEC))
2528		fill_includes_sw_bits = true;
2529
2530	pr_debug("Entry* registers contain %u fill bits\n", fillbits);
2531}
2532
2533void build_tlb_refill_handler(void)
2534{
2535	/*
2536	 * The refill handler is generated per-CPU, multi-node systems
2537	 * may have local storage for it. The other handlers are only
2538	 * needed once.
2539	 */
2540	static int run_once = 0;
2541
2542	if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
2543		panic("Kernels supporting XPA currently require CPUs with RIXI");
2544
2545	output_pgtable_bits_defines();
2546	check_pabits();
2547
2548#ifdef CONFIG_64BIT
2549	check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
2550#endif
2551
2552	switch (current_cpu_type()) {
2553	case CPU_R2000:
2554	case CPU_R3000:
2555	case CPU_R3000A:
2556	case CPU_R3081E:
2557	case CPU_TX3912:
2558	case CPU_TX3922:
2559	case CPU_TX3927:
2560#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2561		if (cpu_has_local_ebase)
2562			build_r3000_tlb_refill_handler();
2563		if (!run_once) {
2564			if (!cpu_has_local_ebase)
2565				build_r3000_tlb_refill_handler();
2566			build_setup_pgd();
2567			build_r3000_tlb_load_handler();
2568			build_r3000_tlb_store_handler();
2569			build_r3000_tlb_modify_handler();
2570			flush_tlb_handlers();
2571			run_once++;
2572		}
2573#else
2574		panic("No R3000 TLB refill handler");
2575#endif
2576		break;
2577
2578	case CPU_R6000:
2579	case CPU_R6000A:
2580		panic("No R6000 TLB refill handler yet");
2581		break;
2582
2583	case CPU_R8000:
2584		panic("No R8000 TLB refill handler yet");
2585		break;
2586
2587	default:
2588		if (cpu_has_ldpte)
2589			setup_pw();
2590
2591		if (!run_once) {
2592			scratch_reg = allocate_kscratch();
2593			build_setup_pgd();
 
 
2594			build_r4000_tlb_load_handler();
2595			build_r4000_tlb_store_handler();
2596			build_r4000_tlb_modify_handler();
2597			if (cpu_has_ldpte)
2598				build_loongson3_tlb_refill_handler();
2599			else if (!cpu_has_local_ebase)
2600				build_r4000_tlb_refill_handler();
2601			flush_tlb_handlers();
2602			run_once++;
2603		}
2604		if (cpu_has_local_ebase)
2605			build_r4000_tlb_refill_handler();
2606		if (cpu_has_xpa)
2607			config_xpa_params();
2608		if (cpu_has_htw)
2609			config_htw_params();
2610	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2611}