Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Synthesize TLB refill handlers at runtime.
   7 *
   8 * Copyright (C) 2004, 2005, 2006, 2008	 Thiemo Seufer
   9 * Copyright (C) 2005, 2007, 2008, 2009	 Maciej W. Rozycki
  10 * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  12 * Copyright (C) 2011  MIPS Technologies, Inc.
  13 *
  14 * ... and the days got worse and worse and now you see
  15 * I've gone completely out of my mind.
  16 *
  17 * They're coming to take me a away haha
  18 * they're coming to take me a away hoho hihi haha
  19 * to the funny farm where code is beautiful all the time ...
  20 *
  21 * (Condolences to Napoleon XIV)
  22 */
  23
  24#include <linux/bug.h>
  25#include <linux/export.h>
  26#include <linux/kernel.h>
  27#include <linux/types.h>
  28#include <linux/smp.h>
  29#include <linux/string.h>
  30#include <linux/cache.h>
  31#include <linux/pgtable.h>
  32
  33#include <asm/cacheflush.h>
  34#include <asm/cpu-type.h>
 
  35#include <asm/mmu_context.h>
 
  36#include <asm/uasm.h>
  37#include <asm/setup.h>
  38#include <asm/tlbex.h>
  39
  40static int mips_xpa_disabled;
  41
  42static int __init xpa_disable(char *s)
  43{
  44	mips_xpa_disabled = 1;
  45
  46	return 1;
  47}
  48
  49__setup("noxpa", xpa_disable);
  50
  51/*
  52 * TLB load/store/modify handlers.
  53 *
  54 * Only the fastpath gets synthesized at runtime, the slowpath for
  55 * do_page_fault remains normal asm.
  56 */
  57extern void tlb_do_page_fault_0(void);
  58extern void tlb_do_page_fault_1(void);
  59
  60struct work_registers {
  61	int r1;
  62	int r2;
  63	int r3;
  64};
  65
  66struct tlb_reg_save {
  67	unsigned long a;
  68	unsigned long b;
  69} ____cacheline_aligned_in_smp;
  70
  71static struct tlb_reg_save handler_reg_save[NR_CPUS];
  72
  73static inline int r45k_bvahwbug(void)
  74{
  75	/* XXX: We should probe for the presence of this bug, but we don't. */
  76	return 0;
  77}
  78
  79static inline int r4k_250MHZhwbug(void)
  80{
  81	/* XXX: We should probe for the presence of this bug, but we don't. */
  82	return 0;
  83}
  84
  85extern int sb1250_m3_workaround_needed(void);
  86
  87static inline int __maybe_unused bcm1250_m3_war(void)
  88{
  89	if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
  90		return sb1250_m3_workaround_needed();
  91	return 0;
  92}
  93
  94static inline int __maybe_unused r10000_llsc_war(void)
  95{
  96	return IS_ENABLED(CONFIG_WAR_R10000_LLSC);
  97}
  98
  99static int use_bbit_insns(void)
 100{
 101	switch (current_cpu_type()) {
 102	case CPU_CAVIUM_OCTEON:
 103	case CPU_CAVIUM_OCTEON_PLUS:
 104	case CPU_CAVIUM_OCTEON2:
 105	case CPU_CAVIUM_OCTEON3:
 106		return 1;
 107	default:
 108		return 0;
 109	}
 110}
 111
 112static int use_lwx_insns(void)
 113{
 114	switch (current_cpu_type()) {
 115	case CPU_CAVIUM_OCTEON2:
 116	case CPU_CAVIUM_OCTEON3:
 117		return 1;
 118	default:
 119		return 0;
 120	}
 121}
 122#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
 123    CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
 124static bool scratchpad_available(void)
 125{
 126	return true;
 127}
 128static int scratchpad_offset(int i)
 129{
 130	/*
 131	 * CVMSEG starts at address -32768 and extends for
 132	 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
 133	 */
 134	i += 1; /* Kernel use starts at the top and works down. */
 135	return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
 136}
 137#else
 138static bool scratchpad_available(void)
 139{
 140	return false;
 141}
 142static int scratchpad_offset(int i)
 143{
 144	BUG();
 145	/* Really unreachable, but evidently some GCC want this. */
 146	return 0;
 147}
 148#endif
 149/*
 150 * Found by experiment: At least some revisions of the 4kc throw under
 151 * some circumstances a machine check exception, triggered by invalid
 152 * values in the index register.  Delaying the tlbp instruction until
 153 * after the next branch,  plus adding an additional nop in front of
 154 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
 155 * why; it's not an issue caused by the core RTL.
 156 *
 157 */
 158static int m4kc_tlbp_war(void)
 159{
 160	return current_cpu_type() == CPU_4KC;
 161}
 162
 163/* Handle labels (which must be positive integers). */
 164enum label_id {
 165	label_second_part = 1,
 166	label_leave,
 167	label_vmalloc,
 168	label_vmalloc_done,
 169	label_tlbw_hazard_0,
 170	label_split = label_tlbw_hazard_0 + 8,
 171	label_tlbl_goaround1,
 172	label_tlbl_goaround2,
 173	label_nopage_tlbl,
 174	label_nopage_tlbs,
 175	label_nopage_tlbm,
 176	label_smp_pgtable_change,
 177	label_r3000_write_probe_fail,
 178	label_large_segbits_fault,
 179#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 180	label_tlb_huge_update,
 181#endif
 182};
 183
 184UASM_L_LA(_second_part)
 185UASM_L_LA(_leave)
 186UASM_L_LA(_vmalloc)
 187UASM_L_LA(_vmalloc_done)
 188/* _tlbw_hazard_x is handled differently.  */
 189UASM_L_LA(_split)
 190UASM_L_LA(_tlbl_goaround1)
 191UASM_L_LA(_tlbl_goaround2)
 192UASM_L_LA(_nopage_tlbl)
 193UASM_L_LA(_nopage_tlbs)
 194UASM_L_LA(_nopage_tlbm)
 195UASM_L_LA(_smp_pgtable_change)
 196UASM_L_LA(_r3000_write_probe_fail)
 197UASM_L_LA(_large_segbits_fault)
 198#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 199UASM_L_LA(_tlb_huge_update)
 200#endif
 201
 202static int hazard_instance;
 203
 204static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
 205{
 206	switch (instance) {
 207	case 0 ... 7:
 208		uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
 209		return;
 210	default:
 211		BUG();
 212	}
 213}
 214
 215static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
 216{
 217	switch (instance) {
 218	case 0 ... 7:
 219		uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
 220		break;
 221	default:
 222		BUG();
 223	}
 224}
 225
 226/*
 227 * pgtable bits are assigned dynamically depending on processor feature
 228 * and statically based on kernel configuration.  This spits out the actual
 229 * values the kernel is using.	Required to make sense from disassembled
 230 * TLB exception handlers.
 231 */
 232static void output_pgtable_bits_defines(void)
 233{
 234#define pr_define(fmt, ...)					\
 235	pr_debug("#define " fmt, ##__VA_ARGS__)
 236
 237	pr_debug("#include <asm/asm.h>\n");
 238	pr_debug("#include <asm/regdef.h>\n");
 239	pr_debug("\n");
 240
 241	pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
 242	pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
 243	pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
 244	pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
 245	pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
 246#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 247	pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
 248#endif
 249#ifdef _PAGE_NO_EXEC_SHIFT
 250	if (cpu_has_rixi)
 251		pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
 252#endif
 253	pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
 254	pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
 255	pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
 256	pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
 257	pr_debug("\n");
 258}
 259
 260static inline void dump_handler(const char *symbol, const void *start, const void *end)
 261{
 262	unsigned int count = (end - start) / sizeof(u32);
 263	const u32 *handler = start;
 264	int i;
 265
 266	pr_debug("LEAF(%s)\n", symbol);
 267
 268	pr_debug("\t.set push\n");
 269	pr_debug("\t.set noreorder\n");
 270
 271	for (i = 0; i < count; i++)
 272		pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
 273
 274	pr_debug("\t.set\tpop\n");
 275
 276	pr_debug("\tEND(%s)\n", symbol);
 277}
 278
 279/* The only general purpose registers allowed in TLB handlers. */
 280#define K0		26
 281#define K1		27
 282
 283/* Some CP0 registers */
 284#define C0_INDEX	0, 0
 285#define C0_ENTRYLO0	2, 0
 286#define C0_TCBIND	2, 2
 287#define C0_ENTRYLO1	3, 0
 288#define C0_CONTEXT	4, 0
 289#define C0_PAGEMASK	5, 0
 290#define C0_PWBASE	5, 5
 291#define C0_PWFIELD	5, 6
 292#define C0_PWSIZE	5, 7
 293#define C0_PWCTL	6, 6
 294#define C0_BADVADDR	8, 0
 295#define C0_PGD		9, 7
 296#define C0_ENTRYHI	10, 0
 297#define C0_EPC		14, 0
 298#define C0_XCONTEXT	20, 0
 299
 300#ifdef CONFIG_64BIT
 301# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
 302#else
 303# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
 304#endif
 305
 306/* The worst case length of the handler is around 18 instructions for
 307 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
 308 * Maximum space available is 32 instructions for R3000 and 64
 309 * instructions for R4000.
 310 *
 311 * We deliberately chose a buffer size of 128, so we won't scribble
 312 * over anything important on overflow before we panic.
 313 */
 314static u32 tlb_handler[128];
 315
 316/* simply assume worst case size for labels and relocs */
 317static struct uasm_label labels[128];
 318static struct uasm_reloc relocs[128];
 319
 320static int check_for_high_segbits;
 321static bool fill_includes_sw_bits;
 322
 323static unsigned int kscratch_used_mask;
 324
 325static inline int __maybe_unused c0_kscratch(void)
 326{
 327	return 31;
 328}
 329
 330static int allocate_kscratch(void)
 331{
 332	int r;
 333	unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
 334
 335	r = ffs(a);
 336
 337	if (r == 0)
 338		return -1;
 339
 340	r--; /* make it zero based */
 341
 342	kscratch_used_mask |= (1 << r);
 343
 344	return r;
 345}
 346
 347static int scratch_reg;
 348int pgd_reg;
 349EXPORT_SYMBOL_GPL(pgd_reg);
 350enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
 351
 352static struct work_registers build_get_work_registers(u32 **p)
 353{
 354	struct work_registers r;
 355
 356	if (scratch_reg >= 0) {
 357		/* Save in CPU local C0_KScratch? */
 358		UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
 359		r.r1 = K0;
 360		r.r2 = K1;
 361		r.r3 = 1;
 362		return r;
 363	}
 364
 365	if (num_possible_cpus() > 1) {
 366		/* Get smp_processor_id */
 367		UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
 368		UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
 369
 370		/* handler_reg_save index in K0 */
 371		UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
 372
 373		UASM_i_LA(p, K1, (long)&handler_reg_save);
 374		UASM_i_ADDU(p, K0, K0, K1);
 375	} else {
 376		UASM_i_LA(p, K0, (long)&handler_reg_save);
 377	}
 378	/* K0 now points to save area, save $1 and $2  */
 379	UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
 380	UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
 381
 382	r.r1 = K1;
 383	r.r2 = 1;
 384	r.r3 = 2;
 385	return r;
 386}
 387
 388static void build_restore_work_registers(u32 **p)
 389{
 390	if (scratch_reg >= 0) {
 391		uasm_i_ehb(p);
 392		UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 393		return;
 394	}
 395	/* K0 already points to save area, restore $1 and $2  */
 396	UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
 397	UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
 398}
 399
 400#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 401
 402/*
 403 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
 404 * we cannot do r3000 under these circumstances.
 405 *
 406 * The R3000 TLB handler is simple.
 407 */
 408static void build_r3000_tlb_refill_handler(void)
 409{
 410	long pgdc = (long)pgd_current;
 411	u32 *p;
 412
 413	memset(tlb_handler, 0, sizeof(tlb_handler));
 414	p = tlb_handler;
 415
 416	uasm_i_mfc0(&p, K0, C0_BADVADDR);
 417	uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
 418	uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
 419	uasm_i_srl(&p, K0, K0, 22); /* load delay */
 420	uasm_i_sll(&p, K0, K0, 2);
 421	uasm_i_addu(&p, K1, K1, K0);
 422	uasm_i_mfc0(&p, K0, C0_CONTEXT);
 423	uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
 424	uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
 425	uasm_i_addu(&p, K1, K1, K0);
 426	uasm_i_lw(&p, K0, 0, K1);
 427	uasm_i_nop(&p); /* load delay */
 428	uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
 429	uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
 430	uasm_i_tlbwr(&p); /* cp0 delay */
 431	uasm_i_jr(&p, K1);
 432	uasm_i_rfe(&p); /* branch delay */
 433
 434	if (p > tlb_handler + 32)
 435		panic("TLB refill handler space exceeded");
 436
 437	pr_debug("Wrote TLB refill handler (%u instructions).\n",
 438		 (unsigned int)(p - tlb_handler));
 439
 440	memcpy((void *)ebase, tlb_handler, 0x80);
 441	local_flush_icache_range(ebase, ebase + 0x80);
 442	dump_handler("r3000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x80));
 443}
 444#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
 445
 446/*
 447 * The R4000 TLB handler is much more complicated. We have two
 448 * consecutive handler areas with 32 instructions space each.
 449 * Since they aren't used at the same time, we can overflow in the
 450 * other one.To keep things simple, we first assume linear space,
 451 * then we relocate it to the final handler layout as needed.
 452 */
 453static u32 final_handler[64];
 454
 455/*
 456 * Hazards
 457 *
 458 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
 459 * 2. A timing hazard exists for the TLBP instruction.
 460 *
 461 *	stalling_instruction
 462 *	TLBP
 463 *
 464 * The JTLB is being read for the TLBP throughout the stall generated by the
 465 * previous instruction. This is not really correct as the stalling instruction
 466 * can modify the address used to access the JTLB.  The failure symptom is that
 467 * the TLBP instruction will use an address created for the stalling instruction
 468 * and not the address held in C0_ENHI and thus report the wrong results.
 469 *
 470 * The software work-around is to not allow the instruction preceding the TLBP
 471 * to stall - make it an NOP or some other instruction guaranteed not to stall.
 472 *
 473 * Errata 2 will not be fixed.	This errata is also on the R5000.
 474 *
 475 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
 476 */
 477static void __maybe_unused build_tlb_probe_entry(u32 **p)
 478{
 479	switch (current_cpu_type()) {
 480	/* Found by experiment: R4600 v2.0/R4700 needs this, too.  */
 481	case CPU_R4600:
 482	case CPU_R4700:
 483	case CPU_R5000:
 484	case CPU_NEVADA:
 485		uasm_i_nop(p);
 486		uasm_i_tlbp(p);
 487		break;
 488
 489	default:
 490		uasm_i_tlbp(p);
 491		break;
 492	}
 493}
 494
 495void build_tlb_write_entry(u32 **p, struct uasm_label **l,
 496			   struct uasm_reloc **r,
 497			   enum tlb_write_entry wmode)
 498{
 499	void(*tlbw)(u32 **) = NULL;
 500
 501	switch (wmode) {
 502	case tlb_random: tlbw = uasm_i_tlbwr; break;
 503	case tlb_indexed: tlbw = uasm_i_tlbwi; break;
 504	}
 505
 506	if (cpu_has_mips_r2_r6) {
 507		if (cpu_has_mips_r2_exec_hazard)
 508			uasm_i_ehb(p);
 509		tlbw(p);
 510		return;
 511	}
 512
 513	switch (current_cpu_type()) {
 514	case CPU_R4000PC:
 515	case CPU_R4000SC:
 516	case CPU_R4000MC:
 517	case CPU_R4400PC:
 518	case CPU_R4400SC:
 519	case CPU_R4400MC:
 520		/*
 521		 * This branch uses up a mtc0 hazard nop slot and saves
 522		 * two nops after the tlbw instruction.
 523		 */
 524		uasm_bgezl_hazard(p, r, hazard_instance);
 525		tlbw(p);
 526		uasm_bgezl_label(l, p, hazard_instance);
 527		hazard_instance++;
 528		uasm_i_nop(p);
 529		break;
 530
 531	case CPU_R4600:
 532	case CPU_R4700:
 533		uasm_i_nop(p);
 534		tlbw(p);
 535		uasm_i_nop(p);
 536		break;
 537
 538	case CPU_R5000:
 539	case CPU_NEVADA:
 540		uasm_i_nop(p); /* QED specifies 2 nops hazard */
 541		uasm_i_nop(p); /* QED specifies 2 nops hazard */
 542		tlbw(p);
 543		break;
 544
 545	case CPU_R4300:
 546	case CPU_5KC:
 547	case CPU_TX49XX:
 548	case CPU_PR4450:
 549		uasm_i_nop(p);
 550		tlbw(p);
 551		break;
 552
 553	case CPU_R10000:
 554	case CPU_R12000:
 555	case CPU_R14000:
 556	case CPU_R16000:
 557	case CPU_4KC:
 558	case CPU_4KEC:
 559	case CPU_M14KC:
 560	case CPU_M14KEC:
 561	case CPU_SB1:
 562	case CPU_SB1A:
 563	case CPU_4KSC:
 564	case CPU_20KC:
 565	case CPU_25KF:
 566	case CPU_BMIPS32:
 567	case CPU_BMIPS3300:
 568	case CPU_BMIPS4350:
 569	case CPU_BMIPS4380:
 570	case CPU_BMIPS5000:
 571	case CPU_LOONGSON2EF:
 572	case CPU_LOONGSON64:
 573	case CPU_R5500:
 574		if (m4kc_tlbp_war())
 575			uasm_i_nop(p);
 576		fallthrough;
 577	case CPU_ALCHEMY:
 578		tlbw(p);
 579		break;
 580
 581	case CPU_RM7000:
 582		uasm_i_nop(p);
 583		uasm_i_nop(p);
 584		uasm_i_nop(p);
 585		uasm_i_nop(p);
 586		tlbw(p);
 587		break;
 588
 589	case CPU_XBURST:
 590		tlbw(p);
 591		uasm_i_nop(p);
 592		break;
 593
 594	default:
 595		panic("No TLB refill handler yet (CPU type: %d)",
 596		      current_cpu_type());
 597		break;
 598	}
 599}
 600EXPORT_SYMBOL_GPL(build_tlb_write_entry);
 601
 602static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
 603							unsigned int reg)
 604{
 605	if (_PAGE_GLOBAL_SHIFT == 0) {
 606		/* pte_t is already in EntryLo format */
 607		return;
 608	}
 609
 610	if (cpu_has_rixi && _PAGE_NO_EXEC != 0) {
 611		if (fill_includes_sw_bits) {
 612			UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
 613		} else {
 614			UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
 615			UASM_i_ROTR(p, reg, reg,
 616				    ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
 617		}
 618	} else {
 619#ifdef CONFIG_PHYS_ADDR_T_64BIT
 620		uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
 621#else
 622		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
 623#endif
 624	}
 625}
 626
 627#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 628
 629static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
 630				   unsigned int tmp, enum label_id lid,
 631				   int restore_scratch)
 632{
 633	if (restore_scratch) {
 634		/*
 635		 * Ensure the MFC0 below observes the value written to the
 636		 * KScratch register by the prior MTC0.
 637		 */
 638		if (scratch_reg >= 0)
 639			uasm_i_ehb(p);
 640
 641		/* Reset default page size */
 642		if (PM_DEFAULT_MASK >> 16) {
 643			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
 644			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
 645			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 646			uasm_il_b(p, r, lid);
 647		} else if (PM_DEFAULT_MASK) {
 648			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
 649			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 650			uasm_il_b(p, r, lid);
 651		} else {
 652			uasm_i_mtc0(p, 0, C0_PAGEMASK);
 653			uasm_il_b(p, r, lid);
 654		}
 655		if (scratch_reg >= 0)
 656			UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 657		else
 658			UASM_i_LW(p, 1, scratchpad_offset(0), 0);
 659	} else {
 660		/* Reset default page size */
 661		if (PM_DEFAULT_MASK >> 16) {
 662			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
 663			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
 664			uasm_il_b(p, r, lid);
 665			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 666		} else if (PM_DEFAULT_MASK) {
 667			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
 668			uasm_il_b(p, r, lid);
 669			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 670		} else {
 671			uasm_il_b(p, r, lid);
 672			uasm_i_mtc0(p, 0, C0_PAGEMASK);
 673		}
 674	}
 675}
 676
 677static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
 678				       struct uasm_reloc **r,
 679				       unsigned int tmp,
 680				       enum tlb_write_entry wmode,
 681				       int restore_scratch)
 682{
 683	/* Set huge page tlb entry size */
 684	uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
 685	uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
 686	uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 687
 688	build_tlb_write_entry(p, l, r, wmode);
 689
 690	build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
 691}
 692
 693/*
 694 * Check if Huge PTE is present, if so then jump to LABEL.
 695 */
 696static void
 697build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
 698		  unsigned int pmd, int lid)
 699{
 700	UASM_i_LW(p, tmp, 0, pmd);
 701	if (use_bbit_insns()) {
 702		uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
 703	} else {
 704		uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
 705		uasm_il_bnez(p, r, tmp, lid);
 706	}
 707}
 708
 709static void build_huge_update_entries(u32 **p, unsigned int pte,
 710				      unsigned int tmp)
 711{
 712	int small_sequence;
 713
 714	/*
 715	 * A huge PTE describes an area the size of the
 716	 * configured huge page size. This is twice the
 717	 * of the large TLB entry size we intend to use.
 718	 * A TLB entry half the size of the configured
 719	 * huge page size is configured into entrylo0
 720	 * and entrylo1 to cover the contiguous huge PTE
 721	 * address space.
 722	 */
 723	small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
 724
 725	/* We can clobber tmp.	It isn't used after this.*/
 726	if (!small_sequence)
 727		uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
 728
 729	build_convert_pte_to_entrylo(p, pte);
 730	UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
 731	/* convert to entrylo1 */
 732	if (small_sequence)
 733		UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
 734	else
 735		UASM_i_ADDU(p, pte, pte, tmp);
 736
 737	UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
 738}
 739
 740static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
 741				    struct uasm_label **l,
 742				    unsigned int pte,
 743				    unsigned int ptr,
 744				    unsigned int flush)
 745{
 746#ifdef CONFIG_SMP
 747	UASM_i_SC(p, pte, 0, ptr);
 748	uasm_il_beqz(p, r, pte, label_tlb_huge_update);
 749	UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
 750#else
 751	UASM_i_SW(p, pte, 0, ptr);
 752#endif
 753	if (cpu_has_ftlb && flush) {
 754		BUG_ON(!cpu_has_tlbinv);
 755
 756		UASM_i_MFC0(p, ptr, C0_ENTRYHI);
 757		uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
 758		UASM_i_MTC0(p, ptr, C0_ENTRYHI);
 759		build_tlb_write_entry(p, l, r, tlb_indexed);
 760
 761		uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
 762		UASM_i_MTC0(p, ptr, C0_ENTRYHI);
 763		build_huge_update_entries(p, pte, ptr);
 764		build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
 765
 766		return;
 767	}
 768
 769	build_huge_update_entries(p, pte, ptr);
 770	build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
 771}
 772#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 773
 774#ifdef CONFIG_64BIT
 775/*
 776 * TMP and PTR are scratch.
 777 * TMP will be clobbered, PTR will hold the pmd entry.
 778 */
 779void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 780		      unsigned int tmp, unsigned int ptr)
 781{
 782#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 783	long pgdc = (long)pgd_current;
 784#endif
 785	/*
 786	 * The vmalloc handling is not in the hotpath.
 787	 */
 788	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
 789
 790	if (check_for_high_segbits) {
 791		/*
 792		 * The kernel currently implicitely assumes that the
 793		 * MIPS SEGBITS parameter for the processor is
 794		 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
 795		 * allocate virtual addresses outside the maximum
 796		 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
 797		 * that doesn't prevent user code from accessing the
 798		 * higher xuseg addresses.  Here, we make sure that
 799		 * everything but the lower xuseg addresses goes down
 800		 * the module_alloc/vmalloc path.
 801		 */
 802		uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
 803		uasm_il_bnez(p, r, ptr, label_vmalloc);
 804	} else {
 805		uasm_il_bltz(p, r, tmp, label_vmalloc);
 806	}
 807	/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
 808
 809	if (pgd_reg != -1) {
 810		/* pgd is in pgd_reg */
 811		if (cpu_has_ldpte)
 812			UASM_i_MFC0(p, ptr, C0_PWBASE);
 813		else
 814			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
 815	} else {
 816#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
 817		/*
 818		 * &pgd << 11 stored in CONTEXT [23..63].
 819		 */
 820		UASM_i_MFC0(p, ptr, C0_CONTEXT);
 821
 822		/* Clear lower 23 bits of context. */
 823		uasm_i_dins(p, ptr, 0, 0, 23);
 824
 825		/* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
 826		uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
 827		uasm_i_drotr(p, ptr, ptr, 11);
 828#elif defined(CONFIG_SMP)
 829		UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
 830		uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
 831		UASM_i_LA_mostly(p, tmp, pgdc);
 832		uasm_i_daddu(p, ptr, ptr, tmp);
 833		uasm_i_dmfc0(p, tmp, C0_BADVADDR);
 834		uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 835#else
 836		UASM_i_LA_mostly(p, ptr, pgdc);
 837		uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 838#endif
 839	}
 840
 841	uasm_l_vmalloc_done(l, *p);
 842
 843	/* get pgd offset in bytes */
 844	uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
 845
 846	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
 847	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
 848#ifndef __PAGETABLE_PUD_FOLDED
 849	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 850	uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */
 851	uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */
 852	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3);
 853	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */
 854#endif
 855#ifndef __PAGETABLE_PMD_FOLDED
 856	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 857	uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
 858	uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
 859	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
 860	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
 861#endif
 862}
 863EXPORT_SYMBOL_GPL(build_get_pmde64);
 864
 865/*
 866 * BVADDR is the faulting address, PTR is scratch.
 867 * PTR will hold the pgd for vmalloc.
 868 */
 869static void
 870build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 871			unsigned int bvaddr, unsigned int ptr,
 872			enum vmalloc64_mode mode)
 873{
 874	long swpd = (long)swapper_pg_dir;
 875	int single_insn_swpd;
 876	int did_vmalloc_branch = 0;
 877
 878	single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
 879
 880	uasm_l_vmalloc(l, *p);
 881
 882	if (mode != not_refill && check_for_high_segbits) {
 883		if (single_insn_swpd) {
 884			uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
 885			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
 886			did_vmalloc_branch = 1;
 887			/* fall through */
 888		} else {
 889			uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
 890		}
 891	}
 892	if (!did_vmalloc_branch) {
 893		if (single_insn_swpd) {
 894			uasm_il_b(p, r, label_vmalloc_done);
 895			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
 896		} else {
 897			UASM_i_LA_mostly(p, ptr, swpd);
 898			uasm_il_b(p, r, label_vmalloc_done);
 899			if (uasm_in_compat_space_p(swpd))
 900				uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
 901			else
 902				uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
 903		}
 904	}
 905	if (mode != not_refill && check_for_high_segbits) {
 906		uasm_l_large_segbits_fault(l, *p);
 907
 908		if (mode == refill_scratch && scratch_reg >= 0)
 909			uasm_i_ehb(p);
 910
 911		/*
 912		 * We get here if we are an xsseg address, or if we are
 913		 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
 914		 *
 915		 * Ignoring xsseg (assume disabled so would generate
 916		 * (address errors?), the only remaining possibility
 917		 * is the upper xuseg addresses.  On processors with
 918		 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
 919		 * addresses would have taken an address error. We try
 920		 * to mimic that here by taking a load/istream page
 921		 * fault.
 922		 */
 923		if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
 924			uasm_i_sync(p, 0);
 925		UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
 926		uasm_i_jr(p, ptr);
 927
 928		if (mode == refill_scratch) {
 929			if (scratch_reg >= 0)
 930				UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 931			else
 932				UASM_i_LW(p, 1, scratchpad_offset(0), 0);
 933		} else {
 934			uasm_i_nop(p);
 935		}
 936	}
 937}
 938
 939#else /* !CONFIG_64BIT */
 940
 941/*
 942 * TMP and PTR are scratch.
 943 * TMP will be clobbered, PTR will hold the pgd entry.
 944 */
 945void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 946{
 947	if (pgd_reg != -1) {
 948		/* pgd is in pgd_reg */
 949		uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
 950		uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 951	} else {
 952		long pgdc = (long)pgd_current;
 953
 954		/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
 955#ifdef CONFIG_SMP
 956		uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
 957		UASM_i_LA_mostly(p, tmp, pgdc);
 958		uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
 959		uasm_i_addu(p, ptr, tmp, ptr);
 960#else
 961		UASM_i_LA_mostly(p, ptr, pgdc);
 962#endif
 963		uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 964		uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
 965	}
 966	uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
 967	uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
 968	uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
 969}
 970EXPORT_SYMBOL_GPL(build_get_pgde32);
 971
 972#endif /* !CONFIG_64BIT */
 973
 974static void build_adjust_context(u32 **p, unsigned int ctx)
 975{
 976	unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
 977	unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
 978
 979	if (shift)
 980		UASM_i_SRL(p, ctx, ctx, shift);
 981	uasm_i_andi(p, ctx, ctx, mask);
 982}
 983
 984void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
 985{
 986	/*
 987	 * Bug workaround for the Nevada. It seems as if under certain
 988	 * circumstances the move from cp0_context might produce a
 989	 * bogus result when the mfc0 instruction and its consumer are
 990	 * in a different cacheline or a load instruction, probably any
 991	 * memory reference, is between them.
 992	 */
 993	switch (current_cpu_type()) {
 994	case CPU_NEVADA:
 995		UASM_i_LW(p, ptr, 0, ptr);
 996		GET_CONTEXT(p, tmp); /* get context reg */
 997		break;
 998
 999	default:
1000		GET_CONTEXT(p, tmp); /* get context reg */
1001		UASM_i_LW(p, ptr, 0, ptr);
1002		break;
1003	}
1004
1005	build_adjust_context(p, tmp);
1006	UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1007}
1008EXPORT_SYMBOL_GPL(build_get_ptep);
1009
1010void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1011{
1012	int pte_off_even = 0;
1013	int pte_off_odd = sizeof(pte_t);
1014
1015#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT)
1016	/* The low 32 bits of EntryLo is stored in pte_high */
1017	pte_off_even += offsetof(pte_t, pte_high);
1018	pte_off_odd += offsetof(pte_t, pte_high);
1019#endif
1020
1021	if (IS_ENABLED(CONFIG_XPA)) {
1022		uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
1023		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1024		UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
1025
1026		if (cpu_has_xpa && !mips_xpa_disabled) {
1027			uasm_i_lw(p, tmp, 0, ptep);
1028			uasm_i_ext(p, tmp, tmp, 0, 24);
1029			uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
1030		}
1031
1032		uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
1033		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1034		UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
1035
1036		if (cpu_has_xpa && !mips_xpa_disabled) {
1037			uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
1038			uasm_i_ext(p, tmp, tmp, 0, 24);
1039			uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
1040		}
1041		return;
1042	}
1043
1044	UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
1045	UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
1046	if (r45k_bvahwbug())
1047		build_tlb_probe_entry(p);
1048	build_convert_pte_to_entrylo(p, tmp);
1049	if (r4k_250MHZhwbug())
1050		UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1051	UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1052	build_convert_pte_to_entrylo(p, ptep);
1053	if (r45k_bvahwbug())
1054		uasm_i_mfc0(p, tmp, C0_INDEX);
1055	if (r4k_250MHZhwbug())
1056		UASM_i_MTC0(p, 0, C0_ENTRYLO1);
1057	UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1058}
1059EXPORT_SYMBOL_GPL(build_update_entries);
1060
1061struct mips_huge_tlb_info {
1062	int huge_pte;
1063	int restore_scratch;
1064	bool need_reload_pte;
1065};
1066
1067static struct mips_huge_tlb_info
1068build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1069			       struct uasm_reloc **r, unsigned int tmp,
1070			       unsigned int ptr, int c0_scratch_reg)
1071{
1072	struct mips_huge_tlb_info rv;
1073	unsigned int even, odd;
1074	int vmalloc_branch_delay_filled = 0;
1075	const int scratch = 1; /* Our extra working register */
1076
1077	rv.huge_pte = scratch;
1078	rv.restore_scratch = 0;
1079	rv.need_reload_pte = false;
1080
1081	if (check_for_high_segbits) {
1082		UASM_i_MFC0(p, tmp, C0_BADVADDR);
1083
1084		if (pgd_reg != -1)
1085			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1086		else
1087			UASM_i_MFC0(p, ptr, C0_CONTEXT);
1088
1089		if (c0_scratch_reg >= 0)
1090			UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1091		else
1092			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1093
1094		uasm_i_dsrl_safe(p, scratch, tmp,
1095				 PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
1096		uasm_il_bnez(p, r, scratch, label_vmalloc);
1097
1098		if (pgd_reg == -1) {
1099			vmalloc_branch_delay_filled = 1;
1100			/* Clear lower 23 bits of context. */
1101			uasm_i_dins(p, ptr, 0, 0, 23);
1102		}
1103	} else {
1104		if (pgd_reg != -1)
1105			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1106		else
1107			UASM_i_MFC0(p, ptr, C0_CONTEXT);
1108
1109		UASM_i_MFC0(p, tmp, C0_BADVADDR);
1110
1111		if (c0_scratch_reg >= 0)
1112			UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1113		else
1114			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1115
1116		if (pgd_reg == -1)
1117			/* Clear lower 23 bits of context. */
1118			uasm_i_dins(p, ptr, 0, 0, 23);
1119
1120		uasm_il_bltz(p, r, tmp, label_vmalloc);
1121	}
1122
1123	if (pgd_reg == -1) {
1124		vmalloc_branch_delay_filled = 1;
1125		/* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
1126		uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
1127
1128		uasm_i_drotr(p, ptr, ptr, 11);
1129	}
1130
1131#ifdef __PAGETABLE_PMD_FOLDED
1132#define LOC_PTEP scratch
1133#else
1134#define LOC_PTEP ptr
1135#endif
1136
1137	if (!vmalloc_branch_delay_filled)
1138		/* get pgd offset in bytes */
1139		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1140
1141	uasm_l_vmalloc_done(l, *p);
1142
1143	/*
1144	 *			   tmp		ptr
1145	 * fall-through case =	 badvaddr  *pgd_current
1146	 * vmalloc case	     =	 badvaddr  swapper_pg_dir
1147	 */
1148
1149	if (vmalloc_branch_delay_filled)
1150		/* get pgd offset in bytes */
1151		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1152
1153#ifdef __PAGETABLE_PMD_FOLDED
1154	GET_CONTEXT(p, tmp); /* get context reg */
1155#endif
1156	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1157
1158	if (use_lwx_insns()) {
1159		UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1160	} else {
1161		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1162		uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1163	}
1164
1165#ifndef __PAGETABLE_PUD_FOLDED
1166	/* get pud offset in bytes */
1167	uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3);
1168	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
1169
1170	if (use_lwx_insns()) {
1171		UASM_i_LWX(p, ptr, scratch, ptr);
1172	} else {
1173		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1174		UASM_i_LW(p, ptr, 0, ptr);
1175	}
1176	/* ptr contains a pointer to PMD entry */
1177	/* tmp contains the address */
1178#endif
1179
1180#ifndef __PAGETABLE_PMD_FOLDED
1181	/* get pmd offset in bytes */
1182	uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1183	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1184	GET_CONTEXT(p, tmp); /* get context reg */
1185
1186	if (use_lwx_insns()) {
1187		UASM_i_LWX(p, scratch, scratch, ptr);
1188	} else {
1189		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1190		UASM_i_LW(p, scratch, 0, ptr);
1191	}
1192#endif
1193	/* Adjust the context during the load latency. */
1194	build_adjust_context(p, tmp);
1195
1196#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1197	uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1198	/*
1199	 * The in the LWX case we don't want to do the load in the
1200	 * delay slot.	It cannot issue in the same cycle and may be
1201	 * speculative and unneeded.
1202	 */
1203	if (use_lwx_insns())
1204		uasm_i_nop(p);
1205#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
1206
1207
1208	/* build_update_entries */
1209	if (use_lwx_insns()) {
1210		even = ptr;
1211		odd = tmp;
1212		UASM_i_LWX(p, even, scratch, tmp);
1213		UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1214		UASM_i_LWX(p, odd, scratch, tmp);
1215	} else {
1216		UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1217		even = tmp;
1218		odd = ptr;
1219		UASM_i_LW(p, even, 0, ptr); /* get even pte */
1220		UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1221	}
1222	if (cpu_has_rixi) {
1223		uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
1224		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1225		uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
1226	} else {
1227		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1228		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1229		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1230	}
1231	UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1232
1233	if (c0_scratch_reg >= 0) {
1234		uasm_i_ehb(p);
1235		UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1236		build_tlb_write_entry(p, l, r, tlb_random);
1237		uasm_l_leave(l, *p);
1238		rv.restore_scratch = 1;
1239	} else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  {
1240		build_tlb_write_entry(p, l, r, tlb_random);
1241		uasm_l_leave(l, *p);
1242		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1243	} else {
1244		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1245		build_tlb_write_entry(p, l, r, tlb_random);
1246		uasm_l_leave(l, *p);
1247		rv.restore_scratch = 1;
1248	}
1249
1250	uasm_i_eret(p); /* return from trap */
1251
1252	return rv;
1253}
1254
1255/*
1256 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1257 * because EXL == 0.  If we wrap, we can also use the 32 instruction
1258 * slots before the XTLB refill exception handler which belong to the
1259 * unused TLB refill exception.
1260 */
1261#define MIPS64_REFILL_INSNS 32
1262
1263static void build_r4000_tlb_refill_handler(void)
1264{
1265	u32 *p = tlb_handler;
1266	struct uasm_label *l = labels;
1267	struct uasm_reloc *r = relocs;
1268	u32 *f;
1269	unsigned int final_len;
1270	struct mips_huge_tlb_info htlb_info __maybe_unused;
1271	enum vmalloc64_mode vmalloc_mode __maybe_unused;
1272
1273	memset(tlb_handler, 0, sizeof(tlb_handler));
1274	memset(labels, 0, sizeof(labels));
1275	memset(relocs, 0, sizeof(relocs));
1276	memset(final_handler, 0, sizeof(final_handler));
1277
1278	if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
1279		htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1280							  scratch_reg);
1281		vmalloc_mode = refill_scratch;
1282	} else {
1283		htlb_info.huge_pte = K0;
1284		htlb_info.restore_scratch = 0;
1285		htlb_info.need_reload_pte = true;
1286		vmalloc_mode = refill_noscratch;
1287		/*
1288		 * create the plain linear handler
1289		 */
1290		if (bcm1250_m3_war()) {
1291			unsigned int segbits = 44;
1292
1293			uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1294			uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1295			uasm_i_xor(&p, K0, K0, K1);
1296			uasm_i_dsrl_safe(&p, K1, K0, 62);
1297			uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1298			uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1299			uasm_i_or(&p, K0, K0, K1);
1300			uasm_il_bnez(&p, &r, K0, label_leave);
1301			/* No need for uasm_i_nop */
1302		}
1303
1304#ifdef CONFIG_64BIT
1305		build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
1306#else
1307		build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
1308#endif
1309
1310#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1311		build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
1312#endif
1313
1314		build_get_ptep(&p, K0, K1);
1315		build_update_entries(&p, K0, K1);
1316		build_tlb_write_entry(&p, &l, &r, tlb_random);
1317		uasm_l_leave(&l, p);
1318		uasm_i_eret(&p); /* return from trap */
1319	}
1320#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1321	uasm_l_tlb_huge_update(&l, p);
1322	if (htlb_info.need_reload_pte)
1323		UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
1324	build_huge_update_entries(&p, htlb_info.huge_pte, K1);
1325	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
1326				   htlb_info.restore_scratch);
1327#endif
1328
1329#ifdef CONFIG_64BIT
1330	build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
1331#endif
1332
1333	/*
1334	 * Overflow check: For the 64bit handler, we need at least one
1335	 * free instruction slot for the wrap-around branch. In worst
1336	 * case, if the intended insertion point is a delay slot, we
1337	 * need three, with the second nop'ed and the third being
1338	 * unused.
1339	 */
1340	switch (boot_cpu_type()) {
1341	default:
1342		if (sizeof(long) == 4) {
1343		fallthrough;
1344	case CPU_LOONGSON2EF:
1345		/* Loongson2 ebase is different than r4k, we have more space */
1346			if ((p - tlb_handler) > 64)
1347				panic("TLB refill handler space exceeded");
1348			/*
1349			 * Now fold the handler in the TLB refill handler space.
1350			 */
1351			f = final_handler;
1352			/* Simplest case, just copy the handler. */
1353			uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1354			final_len = p - tlb_handler;
1355			break;
1356		} else {
1357			if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1358			    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1359				&& uasm_insn_has_bdelay(relocs,
1360							tlb_handler + MIPS64_REFILL_INSNS - 3)))
1361				panic("TLB refill handler space exceeded");
1362			/*
1363			 * Now fold the handler in the TLB refill handler space.
1364			 */
1365			f = final_handler + MIPS64_REFILL_INSNS;
1366			if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1367				/* Just copy the handler. */
1368				uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1369				final_len = p - tlb_handler;
1370			} else {
1371#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1372				const enum label_id ls = label_tlb_huge_update;
1373#else
1374				const enum label_id ls = label_vmalloc;
1375#endif
1376				u32 *split;
1377				int ov = 0;
1378				int i;
1379
1380				for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1381					;
1382				BUG_ON(i == ARRAY_SIZE(labels));
1383				split = labels[i].addr;
1384
1385				/*
1386				 * See if we have overflown one way or the other.
1387				 */
1388				if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1389				    split < p - MIPS64_REFILL_INSNS)
1390					ov = 1;
1391
1392				if (ov) {
1393					/*
1394					 * Split two instructions before the end.  One
1395					 * for the branch and one for the instruction
1396					 * in the delay slot.
1397					 */
1398					split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1399
1400					/*
1401					 * If the branch would fall in a delay slot,
1402					 * we must back up an additional instruction
1403					 * so that it is no longer in a delay slot.
1404					 */
1405					if (uasm_insn_has_bdelay(relocs, split - 1))
1406						split--;
1407				}
1408				/* Copy first part of the handler. */
1409				uasm_copy_handler(relocs, labels, tlb_handler, split, f);
1410				f += split - tlb_handler;
1411
1412				if (ov) {
1413					/* Insert branch. */
1414					uasm_l_split(&l, final_handler);
1415					uasm_il_b(&f, &r, label_split);
1416					if (uasm_insn_has_bdelay(relocs, split))
1417						uasm_i_nop(&f);
1418					else {
1419						uasm_copy_handler(relocs, labels,
1420								  split, split + 1, f);
1421						uasm_move_labels(labels, f, f + 1, -1);
1422						f++;
1423						split++;
1424					}
1425				}
1426
1427				/* Copy the rest of the handler. */
1428				uasm_copy_handler(relocs, labels, split, p, final_handler);
1429				final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1430					    (p - split);
1431			}
1432		}
1433		break;
1434	}
1435
1436	uasm_resolve_relocs(relocs, labels);
1437	pr_debug("Wrote TLB refill handler (%u instructions).\n",
1438		 final_len);
1439
1440	memcpy((void *)ebase, final_handler, 0x100);
1441	local_flush_icache_range(ebase, ebase + 0x100);
1442	dump_handler("r4000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x100));
1443}
1444
1445static void setup_pw(void)
1446{
1447	unsigned int pwctl;
1448	unsigned long pgd_i, pgd_w;
1449#ifndef __PAGETABLE_PMD_FOLDED
1450	unsigned long pmd_i, pmd_w;
1451#endif
1452	unsigned long pt_i, pt_w;
1453	unsigned long pte_i, pte_w;
1454#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1455	unsigned long psn;
1456
1457	psn = ilog2(_PAGE_HUGE);     /* bit used to indicate huge page */
1458#endif
1459	pgd_i = PGDIR_SHIFT;  /* 1st level PGD */
1460#ifndef __PAGETABLE_PMD_FOLDED
1461	pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER;
1462
1463	pmd_i = PMD_SHIFT;    /* 2nd level PMD */
1464	pmd_w = PMD_SHIFT - PAGE_SHIFT;
1465#else
1466	pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER;
1467#endif
1468
1469	pt_i  = PAGE_SHIFT;    /* 3rd level PTE */
1470	pt_w  = PAGE_SHIFT - 3;
1471
1472	pte_i = ilog2(_PAGE_GLOBAL);
1473	pte_w = 0;
1474	pwctl = 1 << 30; /* Set PWDirExt */
1475
1476#ifndef __PAGETABLE_PMD_FOLDED
1477	write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
1478	write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w);
1479#else
1480	write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i);
1481	write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w);
1482#endif
1483
1484#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1485	pwctl |= (1 << 6 | psn);
1486#endif
1487	write_c0_pwctl(pwctl);
1488	write_c0_kpgd((long)swapper_pg_dir);
1489	kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
1490}
1491
1492static void build_loongson3_tlb_refill_handler(void)
1493{
1494	u32 *p = tlb_handler;
1495	struct uasm_label *l = labels;
1496	struct uasm_reloc *r = relocs;
1497
1498	memset(labels, 0, sizeof(labels));
1499	memset(relocs, 0, sizeof(relocs));
1500	memset(tlb_handler, 0, sizeof(tlb_handler));
1501
1502	if (check_for_high_segbits) {
1503		uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1504		uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
1505		uasm_il_beqz(&p, &r, K1, label_vmalloc);
 
1506		uasm_i_nop(&p);
1507
1508		uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
1509		uasm_i_nop(&p);
1510		uasm_l_vmalloc(&l, p);
1511	}
1512
1513	uasm_i_dmfc0(&p, K1, C0_PGD);
1514
1515	uasm_i_lddir(&p, K0, K1, 3);  /* global page dir */
1516#ifndef __PAGETABLE_PMD_FOLDED
1517	uasm_i_lddir(&p, K1, K0, 1);  /* middle page dir */
1518#endif
1519	uasm_i_ldpte(&p, K1, 0);      /* even */
1520	uasm_i_ldpte(&p, K1, 1);      /* odd */
1521	uasm_i_tlbwr(&p);
1522
1523	/* restore page mask */
1524	if (PM_DEFAULT_MASK >> 16) {
1525		uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
1526		uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
1527		uasm_i_mtc0(&p, K0, C0_PAGEMASK);
1528	} else if (PM_DEFAULT_MASK) {
1529		uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
1530		uasm_i_mtc0(&p, K0, C0_PAGEMASK);
1531	} else {
1532		uasm_i_mtc0(&p, 0, C0_PAGEMASK);
1533	}
1534
1535	uasm_i_eret(&p);
1536
1537	if (check_for_high_segbits) {
1538		uasm_l_large_segbits_fault(&l, p);
1539		UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
1540		uasm_i_jr(&p, K1);
1541		uasm_i_nop(&p);
1542	}
1543
1544	uasm_resolve_relocs(relocs, labels);
1545	memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
1546	local_flush_icache_range(ebase + 0x80, ebase + 0x100);
1547	dump_handler("loongson3_tlb_refill",
1548		     (u32 *)(ebase + 0x80), (u32 *)(ebase + 0x100));
1549}
1550
1551static void build_setup_pgd(void)
1552{
1553	const int a0 = 4;
1554	const int __maybe_unused a1 = 5;
1555	const int __maybe_unused a2 = 6;
1556	u32 *p = (u32 *)msk_isa16_mode((ulong)tlbmiss_handler_setup_pgd);
1557#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1558	long pgdc = (long)pgd_current;
1559#endif
1560
1561	memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p);
1562	memset(labels, 0, sizeof(labels));
1563	memset(relocs, 0, sizeof(relocs));
1564	pgd_reg = allocate_kscratch();
1565#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1566	if (pgd_reg == -1) {
1567		struct uasm_label *l = labels;
1568		struct uasm_reloc *r = relocs;
1569
1570		/* PGD << 11 in c0_Context */
1571		/*
1572		 * If it is a ckseg0 address, convert to a physical
1573		 * address.  Shifting right by 29 and adding 4 will
1574		 * result in zero for these addresses.
1575		 *
1576		 */
1577		UASM_i_SRA(&p, a1, a0, 29);
1578		UASM_i_ADDIU(&p, a1, a1, 4);
1579		uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1580		uasm_i_nop(&p);
1581		uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1582		uasm_l_tlbl_goaround1(&l, p);
1583		UASM_i_SLL(&p, a0, a0, 11);
1584		UASM_i_MTC0(&p, a0, C0_CONTEXT);
1585		uasm_i_jr(&p, 31);
1586		uasm_i_ehb(&p);
1587	} else {
1588		/* PGD in c0_KScratch */
1589		if (cpu_has_ldpte)
1590			UASM_i_MTC0(&p, a0, C0_PWBASE);
1591		else
1592			UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1593		uasm_i_jr(&p, 31);
1594		uasm_i_ehb(&p);
1595	}
1596#else
1597#ifdef CONFIG_SMP
1598	/* Save PGD to pgd_current[smp_processor_id()] */
1599	UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
1600	UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
1601	UASM_i_LA_mostly(&p, a2, pgdc);
1602	UASM_i_ADDU(&p, a2, a2, a1);
1603	UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1604#else
1605	UASM_i_LA_mostly(&p, a2, pgdc);
1606	UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1607#endif /* SMP */
1608
1609	/* if pgd_reg is allocated, save PGD also to scratch register */
1610	if (pgd_reg != -1) {
1611		UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1612		uasm_i_jr(&p, 31);
1613		uasm_i_ehb(&p);
1614	} else {
1615		uasm_i_jr(&p, 31);
1616		uasm_i_nop(&p);
1617	}
1618#endif
1619	if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
1620		panic("tlbmiss_handler_setup_pgd space exceeded");
1621
1622	uasm_resolve_relocs(relocs, labels);
1623	pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1624		 (unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd));
1625
1626	dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
1627					tlbmiss_handler_setup_pgd_end);
1628}
1629
1630static void
1631iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1632{
1633#ifdef CONFIG_SMP
1634	if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
1635		uasm_i_sync(p, 0);
1636# ifdef CONFIG_PHYS_ADDR_T_64BIT
1637	if (cpu_has_64bits)
1638		uasm_i_lld(p, pte, 0, ptr);
1639	else
1640# endif
1641		UASM_i_LL(p, pte, 0, ptr);
1642#else
1643# ifdef CONFIG_PHYS_ADDR_T_64BIT
1644	if (cpu_has_64bits)
1645		uasm_i_ld(p, pte, 0, ptr);
1646	else
1647# endif
1648		UASM_i_LW(p, pte, 0, ptr);
1649#endif
1650}
1651
1652static void
1653iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1654	unsigned int mode, unsigned int scratch)
1655{
1656	unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1657	unsigned int swmode = mode & ~hwmode;
1658
1659	if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
1660		uasm_i_lui(p, scratch, swmode >> 16);
1661		uasm_i_or(p, pte, pte, scratch);
1662		BUG_ON(swmode & 0xffff);
1663	} else {
1664		uasm_i_ori(p, pte, pte, mode);
1665	}
1666
1667#ifdef CONFIG_SMP
1668# ifdef CONFIG_PHYS_ADDR_T_64BIT
1669	if (cpu_has_64bits)
1670		uasm_i_scd(p, pte, 0, ptr);
1671	else
1672# endif
1673		UASM_i_SC(p, pte, 0, ptr);
1674
1675	if (r10000_llsc_war())
1676		uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
1677	else
1678		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1679
1680# ifdef CONFIG_PHYS_ADDR_T_64BIT
1681	if (!cpu_has_64bits) {
1682		/* no uasm_i_nop needed */
1683		uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1684		uasm_i_ori(p, pte, pte, hwmode);
1685		BUG_ON(hwmode & ~0xffff);
1686		uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1687		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1688		/* no uasm_i_nop needed */
1689		uasm_i_lw(p, pte, 0, ptr);
1690	} else
1691		uasm_i_nop(p);
1692# else
1693	uasm_i_nop(p);
1694# endif
1695#else
1696# ifdef CONFIG_PHYS_ADDR_T_64BIT
1697	if (cpu_has_64bits)
1698		uasm_i_sd(p, pte, 0, ptr);
1699	else
1700# endif
1701		UASM_i_SW(p, pte, 0, ptr);
1702
1703# ifdef CONFIG_PHYS_ADDR_T_64BIT
1704	if (!cpu_has_64bits) {
1705		uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1706		uasm_i_ori(p, pte, pte, hwmode);
1707		BUG_ON(hwmode & ~0xffff);
1708		uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1709		uasm_i_lw(p, pte, 0, ptr);
1710	}
1711# endif
1712#endif
1713}
1714
1715/*
1716 * Check if PTE is present, if not then jump to LABEL. PTR points to
1717 * the page table where this PTE is located, PTE will be re-loaded
1718 * with it's original value.
1719 */
1720static void
1721build_pte_present(u32 **p, struct uasm_reloc **r,
1722		  int pte, int ptr, int scratch, enum label_id lid)
1723{
1724	int t = scratch >= 0 ? scratch : pte;
1725	int cur = pte;
1726
1727	if (cpu_has_rixi) {
1728		if (use_bbit_insns()) {
1729			uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1730			uasm_i_nop(p);
1731		} else {
1732			if (_PAGE_PRESENT_SHIFT) {
1733				uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1734				cur = t;
1735			}
1736			uasm_i_andi(p, t, cur, 1);
1737			uasm_il_beqz(p, r, t, lid);
1738			if (pte == t)
1739				/* You lose the SMP race :-(*/
1740				iPTE_LW(p, pte, ptr);
1741		}
1742	} else {
1743		if (_PAGE_PRESENT_SHIFT) {
1744			uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1745			cur = t;
1746		}
1747		uasm_i_andi(p, t, cur,
1748			(_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT);
1749		uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
1750		uasm_il_bnez(p, r, t, lid);
1751		if (pte == t)
1752			/* You lose the SMP race :-(*/
1753			iPTE_LW(p, pte, ptr);
1754	}
1755}
1756
1757/* Make PTE valid, store result in PTR. */
1758static void
1759build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1760		 unsigned int ptr, unsigned int scratch)
1761{
1762	unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1763
1764	iPTE_SW(p, r, pte, ptr, mode, scratch);
1765}
1766
1767/*
1768 * Check if PTE can be written to, if not branch to LABEL. Regardless
1769 * restore PTE with value from PTR when done.
1770 */
1771static void
1772build_pte_writable(u32 **p, struct uasm_reloc **r,
1773		   unsigned int pte, unsigned int ptr, int scratch,
1774		   enum label_id lid)
1775{
1776	int t = scratch >= 0 ? scratch : pte;
1777	int cur = pte;
1778
1779	if (_PAGE_PRESENT_SHIFT) {
1780		uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1781		cur = t;
1782	}
1783	uasm_i_andi(p, t, cur,
1784		    (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
1785	uasm_i_xori(p, t, t,
1786		    (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
1787	uasm_il_bnez(p, r, t, lid);
1788	if (pte == t)
1789		/* You lose the SMP race :-(*/
1790		iPTE_LW(p, pte, ptr);
1791	else
1792		uasm_i_nop(p);
1793}
1794
1795/* Make PTE writable, update software status bits as well, then store
1796 * at PTR.
1797 */
1798static void
1799build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1800		 unsigned int ptr, unsigned int scratch)
1801{
1802	unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1803			     | _PAGE_DIRTY);
1804
1805	iPTE_SW(p, r, pte, ptr, mode, scratch);
1806}
1807
1808/*
1809 * Check if PTE can be modified, if not branch to LABEL. Regardless
1810 * restore PTE with value from PTR when done.
1811 */
1812static void
1813build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1814		     unsigned int pte, unsigned int ptr, int scratch,
1815		     enum label_id lid)
1816{
1817	if (use_bbit_insns()) {
1818		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1819		uasm_i_nop(p);
1820	} else {
1821		int t = scratch >= 0 ? scratch : pte;
1822		uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
1823		uasm_i_andi(p, t, t, 1);
1824		uasm_il_beqz(p, r, t, lid);
1825		if (pte == t)
1826			/* You lose the SMP race :-(*/
1827			iPTE_LW(p, pte, ptr);
1828	}
1829}
1830
1831#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1832
1833
1834/*
1835 * R3000 style TLB load/store/modify handlers.
1836 */
1837
1838/*
1839 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1840 * Then it returns.
1841 */
1842static void
1843build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1844{
1845	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1846	uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1847	uasm_i_tlbwi(p);
1848	uasm_i_jr(p, tmp);
1849	uasm_i_rfe(p); /* branch delay */
1850}
1851
1852/*
1853 * This places the pte into ENTRYLO0 and writes it with tlbwi
1854 * or tlbwr as appropriate.  This is because the index register
1855 * may have the probe fail bit set as a result of a trap on a
1856 * kseg2 access, i.e. without refill.  Then it returns.
1857 */
1858static void
1859build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1860			     struct uasm_reloc **r, unsigned int pte,
1861			     unsigned int tmp)
1862{
1863	uasm_i_mfc0(p, tmp, C0_INDEX);
1864	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1865	uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1866	uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1867	uasm_i_tlbwi(p); /* cp0 delay */
1868	uasm_i_jr(p, tmp);
1869	uasm_i_rfe(p); /* branch delay */
1870	uasm_l_r3000_write_probe_fail(l, *p);
1871	uasm_i_tlbwr(p); /* cp0 delay */
1872	uasm_i_jr(p, tmp);
1873	uasm_i_rfe(p); /* branch delay */
1874}
1875
1876static void
1877build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1878				   unsigned int ptr)
1879{
1880	long pgdc = (long)pgd_current;
1881
1882	uasm_i_mfc0(p, pte, C0_BADVADDR);
1883	uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1884	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1885	uasm_i_srl(p, pte, pte, 22); /* load delay */
1886	uasm_i_sll(p, pte, pte, 2);
1887	uasm_i_addu(p, ptr, ptr, pte);
1888	uasm_i_mfc0(p, pte, C0_CONTEXT);
1889	uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1890	uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1891	uasm_i_addu(p, ptr, ptr, pte);
1892	uasm_i_lw(p, pte, 0, ptr);
1893	uasm_i_tlbp(p); /* load delay */
1894}
1895
1896static void build_r3000_tlb_load_handler(void)
1897{
1898	u32 *p = (u32 *)handle_tlbl;
1899	struct uasm_label *l = labels;
1900	struct uasm_reloc *r = relocs;
1901
1902	memset(p, 0, handle_tlbl_end - (char *)p);
1903	memset(labels, 0, sizeof(labels));
1904	memset(relocs, 0, sizeof(relocs));
1905
1906	build_r3000_tlbchange_handler_head(&p, K0, K1);
1907	build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
1908	uasm_i_nop(&p); /* load delay */
1909	build_make_valid(&p, &r, K0, K1, -1);
1910	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1911
1912	uasm_l_nopage_tlbl(&l, p);
1913	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1914	uasm_i_nop(&p);
1915
1916	if (p >= (u32 *)handle_tlbl_end)
1917		panic("TLB load handler fastpath space exceeded");
1918
1919	uasm_resolve_relocs(relocs, labels);
1920	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1921		 (unsigned int)(p - (u32 *)handle_tlbl));
1922
1923	dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_end);
1924}
1925
1926static void build_r3000_tlb_store_handler(void)
1927{
1928	u32 *p = (u32 *)handle_tlbs;
1929	struct uasm_label *l = labels;
1930	struct uasm_reloc *r = relocs;
1931
1932	memset(p, 0, handle_tlbs_end - (char *)p);
1933	memset(labels, 0, sizeof(labels));
1934	memset(relocs, 0, sizeof(relocs));
1935
1936	build_r3000_tlbchange_handler_head(&p, K0, K1);
1937	build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
1938	uasm_i_nop(&p); /* load delay */
1939	build_make_write(&p, &r, K0, K1, -1);
1940	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1941
1942	uasm_l_nopage_tlbs(&l, p);
1943	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1944	uasm_i_nop(&p);
1945
1946	if (p >= (u32 *)handle_tlbs_end)
1947		panic("TLB store handler fastpath space exceeded");
1948
1949	uasm_resolve_relocs(relocs, labels);
1950	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1951		 (unsigned int)(p - (u32 *)handle_tlbs));
1952
1953	dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_end);
1954}
1955
1956static void build_r3000_tlb_modify_handler(void)
1957{
1958	u32 *p = (u32 *)handle_tlbm;
1959	struct uasm_label *l = labels;
1960	struct uasm_reloc *r = relocs;
1961
1962	memset(p, 0, handle_tlbm_end - (char *)p);
1963	memset(labels, 0, sizeof(labels));
1964	memset(relocs, 0, sizeof(relocs));
1965
1966	build_r3000_tlbchange_handler_head(&p, K0, K1);
1967	build_pte_modifiable(&p, &r, K0, K1,  -1, label_nopage_tlbm);
1968	uasm_i_nop(&p); /* load delay */
1969	build_make_write(&p, &r, K0, K1, -1);
1970	build_r3000_pte_reload_tlbwi(&p, K0, K1);
1971
1972	uasm_l_nopage_tlbm(&l, p);
1973	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1974	uasm_i_nop(&p);
1975
1976	if (p >= (u32 *)handle_tlbm_end)
1977		panic("TLB modify handler fastpath space exceeded");
1978
1979	uasm_resolve_relocs(relocs, labels);
1980	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1981		 (unsigned int)(p - (u32 *)handle_tlbm));
1982
1983	dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_end);
1984}
1985#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1986
1987static bool cpu_has_tlbex_tlbp_race(void)
1988{
1989	/*
1990	 * When a Hardware Table Walker is running it can replace TLB entries
1991	 * at any time, leading to a race between it & the CPU.
1992	 */
1993	if (cpu_has_htw)
1994		return true;
1995
1996	/*
1997	 * If the CPU shares FTLB RAM with its siblings then our entry may be
1998	 * replaced at any time by a sibling performing a write to the FTLB.
1999	 */
2000	if (cpu_has_shared_ftlb_ram)
2001		return true;
2002
2003	/* In all other cases there ought to be no race condition to handle */
2004	return false;
2005}
2006
2007/*
2008 * R4000 style TLB load/store/modify handlers.
2009 */
2010static struct work_registers
2011build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
2012				   struct uasm_reloc **r)
2013{
2014	struct work_registers wr = build_get_work_registers(p);
2015
2016#ifdef CONFIG_64BIT
2017	build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
2018#else
2019	build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
2020#endif
2021
2022#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2023	/*
2024	 * For huge tlb entries, pmd doesn't contain an address but
2025	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
2026	 * see if we need to jump to huge tlb processing.
2027	 */
2028	build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
2029#endif
2030
2031	UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
2032	UASM_i_LW(p, wr.r2, 0, wr.r2);
2033	UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2);
2034	uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
2035	UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
2036
2037#ifdef CONFIG_SMP
2038	uasm_l_smp_pgtable_change(l, *p);
2039#endif
2040	iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
2041	if (!m4kc_tlbp_war()) {
2042		build_tlb_probe_entry(p);
2043		if (cpu_has_tlbex_tlbp_race()) {
2044			/* race condition happens, leaving */
2045			uasm_i_ehb(p);
2046			uasm_i_mfc0(p, wr.r3, C0_INDEX);
2047			uasm_il_bltz(p, r, wr.r3, label_leave);
2048			uasm_i_nop(p);
2049		}
2050	}
2051	return wr;
2052}
2053
2054static void
2055build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
2056				   struct uasm_reloc **r, unsigned int tmp,
2057				   unsigned int ptr)
2058{
2059	uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
2060	uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
2061	build_update_entries(p, tmp, ptr);
2062	build_tlb_write_entry(p, l, r, tlb_indexed);
2063	uasm_l_leave(l, *p);
2064	build_restore_work_registers(p);
2065	uasm_i_eret(p); /* return from trap */
2066
2067#ifdef CONFIG_64BIT
2068	build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
2069#endif
2070}
2071
2072static void build_r4000_tlb_load_handler(void)
2073{
2074	u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl);
2075	struct uasm_label *l = labels;
2076	struct uasm_reloc *r = relocs;
2077	struct work_registers wr;
2078
2079	memset(p, 0, handle_tlbl_end - (char *)p);
2080	memset(labels, 0, sizeof(labels));
2081	memset(relocs, 0, sizeof(relocs));
2082
2083	if (bcm1250_m3_war()) {
2084		unsigned int segbits = 44;
2085
2086		uasm_i_dmfc0(&p, K0, C0_BADVADDR);
2087		uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
2088		uasm_i_xor(&p, K0, K0, K1);
2089		uasm_i_dsrl_safe(&p, K1, K0, 62);
2090		uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
2091		uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
2092		uasm_i_or(&p, K0, K0, K1);
2093		uasm_il_bnez(&p, &r, K0, label_leave);
2094		/* No need for uasm_i_nop */
2095	}
2096
2097	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2098	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
2099	if (m4kc_tlbp_war())
2100		build_tlb_probe_entry(&p);
2101
2102	if (cpu_has_rixi && !cpu_has_rixiex) {
2103		/*
2104		 * If the page is not _PAGE_VALID, RI or XI could not
2105		 * have triggered it.  Skip the expensive test..
2106		 */
2107		if (use_bbit_insns()) {
2108			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
2109				      label_tlbl_goaround1);
2110		} else {
2111			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2112			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
2113		}
2114		uasm_i_nop(&p);
2115
2116		/*
2117		 * Warn if something may race with us & replace the TLB entry
2118		 * before we read it here. Everything with such races should
2119		 * also have dedicated RiXi exception handlers, so this
2120		 * shouldn't be hit.
2121		 */
2122		WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
2123
2124		uasm_i_tlbr(&p);
2125
2126		switch (current_cpu_type()) {
2127		case CPU_CAVIUM_OCTEON:
2128		case CPU_CAVIUM_OCTEON_PLUS:
2129		case CPU_CAVIUM_OCTEON2:
2130			break;
2131		default:
2132			if (cpu_has_mips_r2_exec_hazard)
2133				uasm_i_ehb(&p);
2134			break;
2135		}
2136
2137		/* Examine  entrylo 0 or 1 based on ptr. */
2138		if (use_bbit_insns()) {
2139			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2140		} else {
2141			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2142			uasm_i_beqz(&p, wr.r3, 8);
2143		}
2144		/* load it in the delay slot*/
2145		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2146		/* load it if ptr is odd */
2147		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2148		/*
2149		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2150		 * XI must have triggered it.
2151		 */
2152		if (use_bbit_insns()) {
2153			uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
2154			uasm_i_nop(&p);
2155			uasm_l_tlbl_goaround1(&l, p);
2156		} else {
2157			uasm_i_andi(&p, wr.r3, wr.r3, 2);
2158			uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
2159			uasm_i_nop(&p);
2160		}
2161		uasm_l_tlbl_goaround1(&l, p);
2162	}
2163	build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
2164	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2165
2166#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2167	/*
2168	 * This is the entry point when build_r4000_tlbchange_handler_head
2169	 * spots a huge page.
2170	 */
2171	uasm_l_tlb_huge_update(&l, p);
2172	iPTE_LW(&p, wr.r1, wr.r2);
2173	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
2174	build_tlb_probe_entry(&p);
2175
2176	if (cpu_has_rixi && !cpu_has_rixiex) {
2177		/*
2178		 * If the page is not _PAGE_VALID, RI or XI could not
2179		 * have triggered it.  Skip the expensive test..
2180		 */
2181		if (use_bbit_insns()) {
2182			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
2183				      label_tlbl_goaround2);
2184		} else {
2185			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2186			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2187		}
2188		uasm_i_nop(&p);
2189
2190		/*
2191		 * Warn if something may race with us & replace the TLB entry
2192		 * before we read it here. Everything with such races should
2193		 * also have dedicated RiXi exception handlers, so this
2194		 * shouldn't be hit.
2195		 */
2196		WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
2197
2198		uasm_i_tlbr(&p);
2199
2200		switch (current_cpu_type()) {
2201		case CPU_CAVIUM_OCTEON:
2202		case CPU_CAVIUM_OCTEON_PLUS:
2203		case CPU_CAVIUM_OCTEON2:
2204			break;
2205		default:
2206			if (cpu_has_mips_r2_exec_hazard)
2207				uasm_i_ehb(&p);
2208			break;
2209		}
2210
2211		/* Examine  entrylo 0 or 1 based on ptr. */
2212		if (use_bbit_insns()) {
2213			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2214		} else {
2215			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2216			uasm_i_beqz(&p, wr.r3, 8);
2217		}
2218		/* load it in the delay slot*/
2219		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2220		/* load it if ptr is odd */
2221		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2222		/*
2223		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2224		 * XI must have triggered it.
2225		 */
2226		if (use_bbit_insns()) {
2227			uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
2228		} else {
2229			uasm_i_andi(&p, wr.r3, wr.r3, 2);
2230			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2231		}
2232		if (PM_DEFAULT_MASK == 0)
2233			uasm_i_nop(&p);
2234		/*
2235		 * We clobbered C0_PAGEMASK, restore it.  On the other branch
2236		 * it is restored in build_huge_tlb_write_entry.
2237		 */
2238		build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
2239
2240		uasm_l_tlbl_goaround2(&l, p);
2241	}
2242	uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2243	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2244#endif
2245
2246	uasm_l_nopage_tlbl(&l, p);
2247	if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2248		uasm_i_sync(&p, 0);
2249	build_restore_work_registers(&p);
2250#ifdef CONFIG_CPU_MICROMIPS
2251	if ((unsigned long)tlb_do_page_fault_0 & 1) {
2252		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2253		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2254		uasm_i_jr(&p, K0);
2255	} else
2256#endif
2257	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2258	uasm_i_nop(&p);
2259
2260	if (p >= (u32 *)handle_tlbl_end)
2261		panic("TLB load handler fastpath space exceeded");
2262
2263	uasm_resolve_relocs(relocs, labels);
2264	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
2265		 (unsigned int)(p - (u32 *)handle_tlbl));
2266
2267	dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_end);
2268}
2269
2270static void build_r4000_tlb_store_handler(void)
2271{
2272	u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs);
2273	struct uasm_label *l = labels;
2274	struct uasm_reloc *r = relocs;
2275	struct work_registers wr;
2276
2277	memset(p, 0, handle_tlbs_end - (char *)p);
2278	memset(labels, 0, sizeof(labels));
2279	memset(relocs, 0, sizeof(relocs));
2280
2281	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2282	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2283	if (m4kc_tlbp_war())
2284		build_tlb_probe_entry(&p);
2285	build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
2286	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2287
2288#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2289	/*
2290	 * This is the entry point when
2291	 * build_r4000_tlbchange_handler_head spots a huge page.
2292	 */
2293	uasm_l_tlb_huge_update(&l, p);
2294	iPTE_LW(&p, wr.r1, wr.r2);
2295	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2296	build_tlb_probe_entry(&p);
2297	uasm_i_ori(&p, wr.r1, wr.r1,
2298		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2299	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2300#endif
2301
2302	uasm_l_nopage_tlbs(&l, p);
2303	if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2304		uasm_i_sync(&p, 0);
2305	build_restore_work_registers(&p);
2306#ifdef CONFIG_CPU_MICROMIPS
2307	if ((unsigned long)tlb_do_page_fault_1 & 1) {
2308		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2309		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2310		uasm_i_jr(&p, K0);
2311	} else
2312#endif
2313	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2314	uasm_i_nop(&p);
2315
2316	if (p >= (u32 *)handle_tlbs_end)
2317		panic("TLB store handler fastpath space exceeded");
2318
2319	uasm_resolve_relocs(relocs, labels);
2320	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2321		 (unsigned int)(p - (u32 *)handle_tlbs));
2322
2323	dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_end);
2324}
2325
2326static void build_r4000_tlb_modify_handler(void)
2327{
2328	u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm);
2329	struct uasm_label *l = labels;
2330	struct uasm_reloc *r = relocs;
2331	struct work_registers wr;
2332
2333	memset(p, 0, handle_tlbm_end - (char *)p);
2334	memset(labels, 0, sizeof(labels));
2335	memset(relocs, 0, sizeof(relocs));
2336
2337	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2338	build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2339	if (m4kc_tlbp_war())
2340		build_tlb_probe_entry(&p);
2341	/* Present and writable bits set, set accessed and dirty bits. */
2342	build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
2343	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2344
2345#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2346	/*
2347	 * This is the entry point when
2348	 * build_r4000_tlbchange_handler_head spots a huge page.
2349	 */
2350	uasm_l_tlb_huge_update(&l, p);
2351	iPTE_LW(&p, wr.r1, wr.r2);
2352	build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm);
2353	build_tlb_probe_entry(&p);
2354	uasm_i_ori(&p, wr.r1, wr.r1,
2355		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2356	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
2357#endif
2358
2359	uasm_l_nopage_tlbm(&l, p);
2360	if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2361		uasm_i_sync(&p, 0);
2362	build_restore_work_registers(&p);
2363#ifdef CONFIG_CPU_MICROMIPS
2364	if ((unsigned long)tlb_do_page_fault_1 & 1) {
2365		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2366		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2367		uasm_i_jr(&p, K0);
2368	} else
2369#endif
2370	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2371	uasm_i_nop(&p);
2372
2373	if (p >= (u32 *)handle_tlbm_end)
2374		panic("TLB modify handler fastpath space exceeded");
2375
2376	uasm_resolve_relocs(relocs, labels);
2377	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2378		 (unsigned int)(p - (u32 *)handle_tlbm));
2379
2380	dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_end);
2381}
2382
2383static void flush_tlb_handlers(void)
2384{
2385	local_flush_icache_range((unsigned long)handle_tlbl,
2386			   (unsigned long)handle_tlbl_end);
2387	local_flush_icache_range((unsigned long)handle_tlbs,
2388			   (unsigned long)handle_tlbs_end);
2389	local_flush_icache_range((unsigned long)handle_tlbm,
2390			   (unsigned long)handle_tlbm_end);
2391	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
2392			   (unsigned long)tlbmiss_handler_setup_pgd_end);
2393}
2394
2395static void print_htw_config(void)
2396{
2397	unsigned long config;
2398	unsigned int pwctl;
2399	const int field = 2 * sizeof(unsigned long);
2400
2401	config = read_c0_pwfield();
2402	pr_debug("PWField (0x%0*lx): GDI: 0x%02lx  UDI: 0x%02lx  MDI: 0x%02lx  PTI: 0x%02lx  PTEI: 0x%02lx\n",
2403		field, config,
2404		(config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT,
2405		(config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT,
2406		(config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT,
2407		(config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT,
2408		(config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
2409
2410	config = read_c0_pwsize();
2411	pr_debug("PWSize  (0x%0*lx): PS: 0x%lx  GDW: 0x%02lx  UDW: 0x%02lx  MDW: 0x%02lx  PTW: 0x%02lx  PTEW: 0x%02lx\n",
2412		field, config,
2413		(config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
2414		(config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
2415		(config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
2416		(config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
2417		(config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT,
2418		(config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
2419
2420	pwctl = read_c0_pwctl();
2421	pr_debug("PWCtl   (0x%x): PWEn: 0x%x  XK: 0x%x  XS: 0x%x  XU: 0x%x  DPH: 0x%x  HugePg: 0x%x  Psn: 0x%x\n",
2422		pwctl,
2423		(pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
2424		(pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
2425		(pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
2426		(pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
2427		(pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
2428		(pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
2429		(pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
2430}
2431
2432static void config_htw_params(void)
2433{
2434	unsigned long pwfield, pwsize, ptei;
2435	unsigned int config;
2436
2437	/*
2438	 * We are using 2-level page tables, so we only need to
2439	 * setup GDW and PTW appropriately. UDW and MDW will remain 0.
2440	 * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to
2441	 * write values less than 0xc in these fields because the entire
2442	 * write will be dropped. As a result of which, we must preserve
2443	 * the original reset values and overwrite only what we really want.
2444	 */
2445
2446	pwfield = read_c0_pwfield();
2447	/* re-initialize the GDI field */
2448	pwfield &= ~MIPS_PWFIELD_GDI_MASK;
2449	pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT;
2450	/* re-initialize the PTI field including the even/odd bit */
2451	pwfield &= ~MIPS_PWFIELD_PTI_MASK;
2452	pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT;
2453	if (CONFIG_PGTABLE_LEVELS >= 3) {
2454		pwfield &= ~MIPS_PWFIELD_MDI_MASK;
2455		pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT;
2456	}
2457	/* Set the PTEI right shift */
2458	ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT;
2459	pwfield |= ptei;
2460	write_c0_pwfield(pwfield);
2461	/* Check whether the PTEI value is supported */
2462	back_to_back_c0_hazard();
2463	pwfield = read_c0_pwfield();
2464	if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT)
2465		!= ptei) {
2466		pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled",
2467			ptei);
2468		/*
2469		 * Drop option to avoid HTW being enabled via another path
2470		 * (eg htw_reset())
2471		 */
2472		current_cpu_data.options &= ~MIPS_CPU_HTW;
2473		return;
2474	}
2475
2476	pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
2477	pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
2478	if (CONFIG_PGTABLE_LEVELS >= 3)
2479		pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
2480
2481	/* Set pointer size to size of directory pointers */
2482	if (IS_ENABLED(CONFIG_64BIT))
2483		pwsize |= MIPS_PWSIZE_PS_MASK;
2484	/* PTEs may be multiple pointers long (e.g. with XPA) */
2485	pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
2486			& MIPS_PWSIZE_PTEW_MASK;
2487
2488	write_c0_pwsize(pwsize);
2489
2490	/* Make sure everything is set before we enable the HTW */
2491	back_to_back_c0_hazard();
2492
2493	/*
2494	 * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
2495	 * the pwctl fields.
2496	 */
2497	config = 1 << MIPS_PWCTL_PWEN_SHIFT;
2498	if (IS_ENABLED(CONFIG_64BIT))
2499		config |= MIPS_PWCTL_XU_MASK;
2500	write_c0_pwctl(config);
2501	pr_info("Hardware Page Table Walker enabled\n");
2502
2503	print_htw_config();
2504}
2505
2506static void config_xpa_params(void)
2507{
2508#ifdef CONFIG_XPA
2509	unsigned int pagegrain;
2510
2511	if (mips_xpa_disabled) {
2512		pr_info("Extended Physical Addressing (XPA) disabled\n");
2513		return;
2514	}
2515
2516	pagegrain = read_c0_pagegrain();
2517	write_c0_pagegrain(pagegrain | PG_ELPA);
2518	back_to_back_c0_hazard();
2519	pagegrain = read_c0_pagegrain();
2520
2521	if (pagegrain & PG_ELPA)
2522		pr_info("Extended Physical Addressing (XPA) enabled\n");
2523	else
2524		panic("Extended Physical Addressing (XPA) disabled");
2525#endif
2526}
2527
2528static void check_pabits(void)
2529{
2530	unsigned long entry;
2531	unsigned pabits, fillbits;
2532
2533	if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) {
2534		/*
2535		 * We'll only be making use of the fact that we can rotate bits
2536		 * into the fill if the CPU supports RIXI, so don't bother
2537		 * probing this for CPUs which don't.
2538		 */
2539		return;
2540	}
2541
2542	write_c0_entrylo0(~0ul);
2543	back_to_back_c0_hazard();
2544	entry = read_c0_entrylo0();
2545
2546	/* clear all non-PFN bits */
2547	entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1);
2548	entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
2549
2550	/* find a lower bound on PABITS, and upper bound on fill bits */
2551	pabits = fls_long(entry) + 6;
2552	fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0);
2553
2554	/* minus the RI & XI bits */
2555	fillbits -= min_t(unsigned, fillbits, 2);
2556
2557	if (fillbits >= ilog2(_PAGE_NO_EXEC))
2558		fill_includes_sw_bits = true;
2559
2560	pr_debug("Entry* registers contain %u fill bits\n", fillbits);
2561}
2562
2563void build_tlb_refill_handler(void)
2564{
2565	/*
2566	 * The refill handler is generated per-CPU, multi-node systems
2567	 * may have local storage for it. The other handlers are only
2568	 * needed once.
2569	 */
2570	static int run_once = 0;
2571
2572	if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
2573		panic("Kernels supporting XPA currently require CPUs with RIXI");
2574
2575	output_pgtable_bits_defines();
2576	check_pabits();
2577
2578#ifdef CONFIG_64BIT
2579	check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
2580#endif
2581
2582	if (cpu_has_3kex) {
2583#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2584		if (!run_once) {
2585			build_setup_pgd();
2586			build_r3000_tlb_refill_handler();
2587			build_r3000_tlb_load_handler();
2588			build_r3000_tlb_store_handler();
2589			build_r3000_tlb_modify_handler();
2590			flush_tlb_handlers();
2591			run_once++;
2592		}
2593#else
2594		panic("No R3000 TLB refill handler");
2595#endif
2596		return;
2597	}
2598
2599	if (cpu_has_ldpte)
2600		setup_pw();
2601
2602	if (!run_once) {
2603		scratch_reg = allocate_kscratch();
2604		build_setup_pgd();
2605		build_r4000_tlb_load_handler();
2606		build_r4000_tlb_store_handler();
2607		build_r4000_tlb_modify_handler();
2608		if (cpu_has_ldpte)
2609			build_loongson3_tlb_refill_handler();
2610		else
2611			build_r4000_tlb_refill_handler();
2612		flush_tlb_handlers();
2613		run_once++;
2614	}
2615	if (cpu_has_xpa)
2616		config_xpa_params();
2617	if (cpu_has_htw)
2618		config_htw_params();
2619}
v6.9.4
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Synthesize TLB refill handlers at runtime.
   7 *
   8 * Copyright (C) 2004, 2005, 2006, 2008	 Thiemo Seufer
   9 * Copyright (C) 2005, 2007, 2008, 2009	 Maciej W. Rozycki
  10 * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  12 * Copyright (C) 2011  MIPS Technologies, Inc.
  13 *
  14 * ... and the days got worse and worse and now you see
  15 * I've gone completely out of my mind.
  16 *
  17 * They're coming to take me a away haha
  18 * they're coming to take me a away hoho hihi haha
  19 * to the funny farm where code is beautiful all the time ...
  20 *
  21 * (Condolences to Napoleon XIV)
  22 */
  23
  24#include <linux/bug.h>
  25#include <linux/export.h>
  26#include <linux/kernel.h>
  27#include <linux/types.h>
  28#include <linux/smp.h>
  29#include <linux/string.h>
  30#include <linux/cache.h>
  31#include <linux/pgtable.h>
  32
  33#include <asm/cacheflush.h>
  34#include <asm/cpu-type.h>
  35#include <asm/mipsregs.h>
  36#include <asm/mmu_context.h>
  37#include <asm/regdef.h>
  38#include <asm/uasm.h>
  39#include <asm/setup.h>
  40#include <asm/tlbex.h>
  41
  42static int mips_xpa_disabled;
  43
  44static int __init xpa_disable(char *s)
  45{
  46	mips_xpa_disabled = 1;
  47
  48	return 1;
  49}
  50
  51__setup("noxpa", xpa_disable);
  52
  53/*
  54 * TLB load/store/modify handlers.
  55 *
  56 * Only the fastpath gets synthesized at runtime, the slowpath for
  57 * do_page_fault remains normal asm.
  58 */
  59extern void tlb_do_page_fault_0(void);
  60extern void tlb_do_page_fault_1(void);
  61
  62struct work_registers {
  63	int r1;
  64	int r2;
  65	int r3;
  66};
  67
  68struct tlb_reg_save {
  69	unsigned long a;
  70	unsigned long b;
  71} ____cacheline_aligned_in_smp;
  72
  73static struct tlb_reg_save handler_reg_save[NR_CPUS];
  74
  75static inline int r45k_bvahwbug(void)
  76{
  77	/* XXX: We should probe for the presence of this bug, but we don't. */
  78	return 0;
  79}
  80
  81static inline int r4k_250MHZhwbug(void)
  82{
  83	/* XXX: We should probe for the presence of this bug, but we don't. */
  84	return 0;
  85}
  86
  87extern int sb1250_m3_workaround_needed(void);
  88
  89static inline int __maybe_unused bcm1250_m3_war(void)
  90{
  91	if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
  92		return sb1250_m3_workaround_needed();
  93	return 0;
  94}
  95
  96static inline int __maybe_unused r10000_llsc_war(void)
  97{
  98	return IS_ENABLED(CONFIG_WAR_R10000_LLSC);
  99}
 100
 101static int use_bbit_insns(void)
 102{
 103	switch (current_cpu_type()) {
 104	case CPU_CAVIUM_OCTEON:
 105	case CPU_CAVIUM_OCTEON_PLUS:
 106	case CPU_CAVIUM_OCTEON2:
 107	case CPU_CAVIUM_OCTEON3:
 108		return 1;
 109	default:
 110		return 0;
 111	}
 112}
 113
 114static int use_lwx_insns(void)
 115{
 116	switch (current_cpu_type()) {
 117	case CPU_CAVIUM_OCTEON2:
 118	case CPU_CAVIUM_OCTEON3:
 119		return 1;
 120	default:
 121		return 0;
 122	}
 123}
 124#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
 125    CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
 126static bool scratchpad_available(void)
 127{
 128	return true;
 129}
 130static int scratchpad_offset(int i)
 131{
 132	/*
 133	 * CVMSEG starts at address -32768 and extends for
 134	 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
 135	 */
 136	i += 1; /* Kernel use starts at the top and works down. */
 137	return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
 138}
 139#else
 140static bool scratchpad_available(void)
 141{
 142	return false;
 143}
 144static int scratchpad_offset(int i)
 145{
 146	BUG();
 147	/* Really unreachable, but evidently some GCC want this. */
 148	return 0;
 149}
 150#endif
 151/*
 152 * Found by experiment: At least some revisions of the 4kc throw under
 153 * some circumstances a machine check exception, triggered by invalid
 154 * values in the index register.  Delaying the tlbp instruction until
 155 * after the next branch,  plus adding an additional nop in front of
 156 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
 157 * why; it's not an issue caused by the core RTL.
 158 *
 159 */
 160static int m4kc_tlbp_war(void)
 161{
 162	return current_cpu_type() == CPU_4KC;
 163}
 164
 165/* Handle labels (which must be positive integers). */
 166enum label_id {
 167	label_second_part = 1,
 168	label_leave,
 169	label_vmalloc,
 170	label_vmalloc_done,
 171	label_tlbw_hazard_0,
 172	label_split = label_tlbw_hazard_0 + 8,
 173	label_tlbl_goaround1,
 174	label_tlbl_goaround2,
 175	label_nopage_tlbl,
 176	label_nopage_tlbs,
 177	label_nopage_tlbm,
 178	label_smp_pgtable_change,
 179	label_r3000_write_probe_fail,
 180	label_large_segbits_fault,
 181#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 182	label_tlb_huge_update,
 183#endif
 184};
 185
 186UASM_L_LA(_second_part)
 187UASM_L_LA(_leave)
 188UASM_L_LA(_vmalloc)
 189UASM_L_LA(_vmalloc_done)
 190/* _tlbw_hazard_x is handled differently.  */
 191UASM_L_LA(_split)
 192UASM_L_LA(_tlbl_goaround1)
 193UASM_L_LA(_tlbl_goaround2)
 194UASM_L_LA(_nopage_tlbl)
 195UASM_L_LA(_nopage_tlbs)
 196UASM_L_LA(_nopage_tlbm)
 197UASM_L_LA(_smp_pgtable_change)
 198UASM_L_LA(_r3000_write_probe_fail)
 199UASM_L_LA(_large_segbits_fault)
 200#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 201UASM_L_LA(_tlb_huge_update)
 202#endif
 203
 204static int hazard_instance;
 205
 206static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
 207{
 208	switch (instance) {
 209	case 0 ... 7:
 210		uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
 211		return;
 212	default:
 213		BUG();
 214	}
 215}
 216
 217static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
 218{
 219	switch (instance) {
 220	case 0 ... 7:
 221		uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
 222		break;
 223	default:
 224		BUG();
 225	}
 226}
 227
 228/*
 229 * pgtable bits are assigned dynamically depending on processor feature
 230 * and statically based on kernel configuration.  This spits out the actual
 231 * values the kernel is using.	Required to make sense from disassembled
 232 * TLB exception handlers.
 233 */
 234static void output_pgtable_bits_defines(void)
 235{
 236#define pr_define(fmt, ...)					\
 237	pr_debug("#define " fmt, ##__VA_ARGS__)
 238
 239	pr_debug("#include <asm/asm.h>\n");
 240	pr_debug("#include <asm/regdef.h>\n");
 241	pr_debug("\n");
 242
 243	pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
 244	pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
 245	pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
 246	pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
 247	pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
 248#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 249	pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
 250#endif
 251#ifdef _PAGE_NO_EXEC_SHIFT
 252	if (cpu_has_rixi)
 253		pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
 254#endif
 255	pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
 256	pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
 257	pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
 258	pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
 259	pr_debug("\n");
 260}
 261
 262static inline void dump_handler(const char *symbol, const void *start, const void *end)
 263{
 264	unsigned int count = (end - start) / sizeof(u32);
 265	const u32 *handler = start;
 266	int i;
 267
 268	pr_debug("LEAF(%s)\n", symbol);
 269
 270	pr_debug("\t.set push\n");
 271	pr_debug("\t.set noreorder\n");
 272
 273	for (i = 0; i < count; i++)
 274		pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
 275
 276	pr_debug("\t.set\tpop\n");
 277
 278	pr_debug("\tEND(%s)\n", symbol);
 279}
 280
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 281#ifdef CONFIG_64BIT
 282# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
 283#else
 284# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
 285#endif
 286
 287/* The worst case length of the handler is around 18 instructions for
 288 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
 289 * Maximum space available is 32 instructions for R3000 and 64
 290 * instructions for R4000.
 291 *
 292 * We deliberately chose a buffer size of 128, so we won't scribble
 293 * over anything important on overflow before we panic.
 294 */
 295static u32 tlb_handler[128];
 296
 297/* simply assume worst case size for labels and relocs */
 298static struct uasm_label labels[128];
 299static struct uasm_reloc relocs[128];
 300
 301static int check_for_high_segbits;
 302static bool fill_includes_sw_bits;
 303
 304static unsigned int kscratch_used_mask;
 305
 306static inline int __maybe_unused c0_kscratch(void)
 307{
 308	return 31;
 309}
 310
 311static int allocate_kscratch(void)
 312{
 313	int r;
 314	unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
 315
 316	r = ffs(a);
 317
 318	if (r == 0)
 319		return -1;
 320
 321	r--; /* make it zero based */
 322
 323	kscratch_used_mask |= (1 << r);
 324
 325	return r;
 326}
 327
 328static int scratch_reg;
 329int pgd_reg;
 330EXPORT_SYMBOL_GPL(pgd_reg);
 331enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
 332
 333static struct work_registers build_get_work_registers(u32 **p)
 334{
 335	struct work_registers r;
 336
 337	if (scratch_reg >= 0) {
 338		/* Save in CPU local C0_KScratch? */
 339		UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
 340		r.r1 = GPR_K0;
 341		r.r2 = GPR_K1;
 342		r.r3 = GPR_AT;
 343		return r;
 344	}
 345
 346	if (num_possible_cpus() > 1) {
 347		/* Get smp_processor_id */
 348		UASM_i_CPUID_MFC0(p, GPR_K0, SMP_CPUID_REG);
 349		UASM_i_SRL_SAFE(p, GPR_K0, GPR_K0, SMP_CPUID_REGSHIFT);
 350
 351		/* handler_reg_save index in GPR_K0 */
 352		UASM_i_SLL(p, GPR_K0, GPR_K0, ilog2(sizeof(struct tlb_reg_save)));
 353
 354		UASM_i_LA(p, GPR_K1, (long)&handler_reg_save);
 355		UASM_i_ADDU(p, GPR_K0, GPR_K0, GPR_K1);
 356	} else {
 357		UASM_i_LA(p, GPR_K0, (long)&handler_reg_save);
 358	}
 359	/* GPR_K0 now points to save area, save $1 and $2  */
 360	UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0);
 361	UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0);
 362
 363	r.r1 = GPR_K1;
 364	r.r2 = 1;
 365	r.r3 = 2;
 366	return r;
 367}
 368
 369static void build_restore_work_registers(u32 **p)
 370{
 371	if (scratch_reg >= 0) {
 372		uasm_i_ehb(p);
 373		UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 374		return;
 375	}
 376	/* GPR_K0 already points to save area, restore $1 and $2  */
 377	UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0);
 378	UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0);
 379}
 380
 381#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 382
 383/*
 384 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
 385 * we cannot do r3000 under these circumstances.
 386 *
 387 * The R3000 TLB handler is simple.
 388 */
 389static void build_r3000_tlb_refill_handler(void)
 390{
 391	long pgdc = (long)pgd_current;
 392	u32 *p;
 393
 394	memset(tlb_handler, 0, sizeof(tlb_handler));
 395	p = tlb_handler;
 396
 397	uasm_i_mfc0(&p, GPR_K0, C0_BADVADDR);
 398	uasm_i_lui(&p, GPR_K1, uasm_rel_hi(pgdc)); /* cp0 delay */
 399	uasm_i_lw(&p, GPR_K1, uasm_rel_lo(pgdc), GPR_K1);
 400	uasm_i_srl(&p, GPR_K0, GPR_K0, 22); /* load delay */
 401	uasm_i_sll(&p, GPR_K0, GPR_K0, 2);
 402	uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0);
 403	uasm_i_mfc0(&p, GPR_K0, C0_CONTEXT);
 404	uasm_i_lw(&p, GPR_K1, 0, GPR_K1); /* cp0 delay */
 405	uasm_i_andi(&p, GPR_K0, GPR_K0, 0xffc); /* load delay */
 406	uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0);
 407	uasm_i_lw(&p, GPR_K0, 0, GPR_K1);
 408	uasm_i_nop(&p); /* load delay */
 409	uasm_i_mtc0(&p, GPR_K0, C0_ENTRYLO0);
 410	uasm_i_mfc0(&p, GPR_K1, C0_EPC); /* cp0 delay */
 411	uasm_i_tlbwr(&p); /* cp0 delay */
 412	uasm_i_jr(&p, GPR_K1);
 413	uasm_i_rfe(&p); /* branch delay */
 414
 415	if (p > tlb_handler + 32)
 416		panic("TLB refill handler space exceeded");
 417
 418	pr_debug("Wrote TLB refill handler (%u instructions).\n",
 419		 (unsigned int)(p - tlb_handler));
 420
 421	memcpy((void *)ebase, tlb_handler, 0x80);
 422	local_flush_icache_range(ebase, ebase + 0x80);
 423	dump_handler("r3000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x80));
 424}
 425#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
 426
 427/*
 428 * The R4000 TLB handler is much more complicated. We have two
 429 * consecutive handler areas with 32 instructions space each.
 430 * Since they aren't used at the same time, we can overflow in the
 431 * other one.To keep things simple, we first assume linear space,
 432 * then we relocate it to the final handler layout as needed.
 433 */
 434static u32 final_handler[64];
 435
 436/*
 437 * Hazards
 438 *
 439 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
 440 * 2. A timing hazard exists for the TLBP instruction.
 441 *
 442 *	stalling_instruction
 443 *	TLBP
 444 *
 445 * The JTLB is being read for the TLBP throughout the stall generated by the
 446 * previous instruction. This is not really correct as the stalling instruction
 447 * can modify the address used to access the JTLB.  The failure symptom is that
 448 * the TLBP instruction will use an address created for the stalling instruction
 449 * and not the address held in C0_ENHI and thus report the wrong results.
 450 *
 451 * The software work-around is to not allow the instruction preceding the TLBP
 452 * to stall - make it an NOP or some other instruction guaranteed not to stall.
 453 *
 454 * Errata 2 will not be fixed.	This errata is also on the R5000.
 455 *
 456 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
 457 */
 458static void __maybe_unused build_tlb_probe_entry(u32 **p)
 459{
 460	switch (current_cpu_type()) {
 461	/* Found by experiment: R4600 v2.0/R4700 needs this, too.  */
 462	case CPU_R4600:
 463	case CPU_R4700:
 464	case CPU_R5000:
 465	case CPU_NEVADA:
 466		uasm_i_nop(p);
 467		uasm_i_tlbp(p);
 468		break;
 469
 470	default:
 471		uasm_i_tlbp(p);
 472		break;
 473	}
 474}
 475
 476void build_tlb_write_entry(u32 **p, struct uasm_label **l,
 477			   struct uasm_reloc **r,
 478			   enum tlb_write_entry wmode)
 479{
 480	void(*tlbw)(u32 **) = NULL;
 481
 482	switch (wmode) {
 483	case tlb_random: tlbw = uasm_i_tlbwr; break;
 484	case tlb_indexed: tlbw = uasm_i_tlbwi; break;
 485	}
 486
 487	if (cpu_has_mips_r2_r6) {
 488		if (cpu_has_mips_r2_exec_hazard)
 489			uasm_i_ehb(p);
 490		tlbw(p);
 491		return;
 492	}
 493
 494	switch (current_cpu_type()) {
 495	case CPU_R4000PC:
 496	case CPU_R4000SC:
 497	case CPU_R4000MC:
 498	case CPU_R4400PC:
 499	case CPU_R4400SC:
 500	case CPU_R4400MC:
 501		/*
 502		 * This branch uses up a mtc0 hazard nop slot and saves
 503		 * two nops after the tlbw instruction.
 504		 */
 505		uasm_bgezl_hazard(p, r, hazard_instance);
 506		tlbw(p);
 507		uasm_bgezl_label(l, p, hazard_instance);
 508		hazard_instance++;
 509		uasm_i_nop(p);
 510		break;
 511
 512	case CPU_R4600:
 513	case CPU_R4700:
 514		uasm_i_nop(p);
 515		tlbw(p);
 516		uasm_i_nop(p);
 517		break;
 518
 519	case CPU_R5000:
 520	case CPU_NEVADA:
 521		uasm_i_nop(p); /* QED specifies 2 nops hazard */
 522		uasm_i_nop(p); /* QED specifies 2 nops hazard */
 523		tlbw(p);
 524		break;
 525
 526	case CPU_R4300:
 527	case CPU_5KC:
 528	case CPU_TX49XX:
 529	case CPU_PR4450:
 530		uasm_i_nop(p);
 531		tlbw(p);
 532		break;
 533
 534	case CPU_R10000:
 535	case CPU_R12000:
 536	case CPU_R14000:
 537	case CPU_R16000:
 538	case CPU_4KC:
 539	case CPU_4KEC:
 540	case CPU_M14KC:
 541	case CPU_M14KEC:
 542	case CPU_SB1:
 543	case CPU_SB1A:
 544	case CPU_4KSC:
 545	case CPU_20KC:
 546	case CPU_25KF:
 547	case CPU_BMIPS32:
 548	case CPU_BMIPS3300:
 549	case CPU_BMIPS4350:
 550	case CPU_BMIPS4380:
 551	case CPU_BMIPS5000:
 552	case CPU_LOONGSON2EF:
 553	case CPU_LOONGSON64:
 554	case CPU_R5500:
 555		if (m4kc_tlbp_war())
 556			uasm_i_nop(p);
 557		fallthrough;
 558	case CPU_ALCHEMY:
 559		tlbw(p);
 560		break;
 561
 562	case CPU_RM7000:
 563		uasm_i_nop(p);
 564		uasm_i_nop(p);
 565		uasm_i_nop(p);
 566		uasm_i_nop(p);
 567		tlbw(p);
 568		break;
 569
 570	case CPU_XBURST:
 571		tlbw(p);
 572		uasm_i_nop(p);
 573		break;
 574
 575	default:
 576		panic("No TLB refill handler yet (CPU type: %d)",
 577		      current_cpu_type());
 578		break;
 579	}
 580}
 581EXPORT_SYMBOL_GPL(build_tlb_write_entry);
 582
 583static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
 584							unsigned int reg)
 585{
 586	if (_PAGE_GLOBAL_SHIFT == 0) {
 587		/* pte_t is already in EntryLo format */
 588		return;
 589	}
 590
 591	if (cpu_has_rixi && _PAGE_NO_EXEC != 0) {
 592		if (fill_includes_sw_bits) {
 593			UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
 594		} else {
 595			UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
 596			UASM_i_ROTR(p, reg, reg,
 597				    ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
 598		}
 599	} else {
 600#ifdef CONFIG_PHYS_ADDR_T_64BIT
 601		uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
 602#else
 603		UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
 604#endif
 605	}
 606}
 607
 608#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 609
 610static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
 611				   unsigned int tmp, enum label_id lid,
 612				   int restore_scratch)
 613{
 614	if (restore_scratch) {
 615		/*
 616		 * Ensure the MFC0 below observes the value written to the
 617		 * KScratch register by the prior MTC0.
 618		 */
 619		if (scratch_reg >= 0)
 620			uasm_i_ehb(p);
 621
 622		/* Reset default page size */
 623		if (PM_DEFAULT_MASK >> 16) {
 624			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
 625			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
 626			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 627			uasm_il_b(p, r, lid);
 628		} else if (PM_DEFAULT_MASK) {
 629			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
 630			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 631			uasm_il_b(p, r, lid);
 632		} else {
 633			uasm_i_mtc0(p, 0, C0_PAGEMASK);
 634			uasm_il_b(p, r, lid);
 635		}
 636		if (scratch_reg >= 0)
 637			UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 638		else
 639			UASM_i_LW(p, 1, scratchpad_offset(0), 0);
 640	} else {
 641		/* Reset default page size */
 642		if (PM_DEFAULT_MASK >> 16) {
 643			uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
 644			uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
 645			uasm_il_b(p, r, lid);
 646			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 647		} else if (PM_DEFAULT_MASK) {
 648			uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
 649			uasm_il_b(p, r, lid);
 650			uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 651		} else {
 652			uasm_il_b(p, r, lid);
 653			uasm_i_mtc0(p, 0, C0_PAGEMASK);
 654		}
 655	}
 656}
 657
 658static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
 659				       struct uasm_reloc **r,
 660				       unsigned int tmp,
 661				       enum tlb_write_entry wmode,
 662				       int restore_scratch)
 663{
 664	/* Set huge page tlb entry size */
 665	uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
 666	uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
 667	uasm_i_mtc0(p, tmp, C0_PAGEMASK);
 668
 669	build_tlb_write_entry(p, l, r, wmode);
 670
 671	build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
 672}
 673
 674/*
 675 * Check if Huge PTE is present, if so then jump to LABEL.
 676 */
 677static void
 678build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
 679		  unsigned int pmd, int lid)
 680{
 681	UASM_i_LW(p, tmp, 0, pmd);
 682	if (use_bbit_insns()) {
 683		uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
 684	} else {
 685		uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
 686		uasm_il_bnez(p, r, tmp, lid);
 687	}
 688}
 689
 690static void build_huge_update_entries(u32 **p, unsigned int pte,
 691				      unsigned int tmp)
 692{
 693	int small_sequence;
 694
 695	/*
 696	 * A huge PTE describes an area the size of the
 697	 * configured huge page size. This is twice the
 698	 * of the large TLB entry size we intend to use.
 699	 * A TLB entry half the size of the configured
 700	 * huge page size is configured into entrylo0
 701	 * and entrylo1 to cover the contiguous huge PTE
 702	 * address space.
 703	 */
 704	small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
 705
 706	/* We can clobber tmp.	It isn't used after this.*/
 707	if (!small_sequence)
 708		uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
 709
 710	build_convert_pte_to_entrylo(p, pte);
 711	UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
 712	/* convert to entrylo1 */
 713	if (small_sequence)
 714		UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
 715	else
 716		UASM_i_ADDU(p, pte, pte, tmp);
 717
 718	UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
 719}
 720
 721static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
 722				    struct uasm_label **l,
 723				    unsigned int pte,
 724				    unsigned int ptr,
 725				    unsigned int flush)
 726{
 727#ifdef CONFIG_SMP
 728	UASM_i_SC(p, pte, 0, ptr);
 729	uasm_il_beqz(p, r, pte, label_tlb_huge_update);
 730	UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
 731#else
 732	UASM_i_SW(p, pte, 0, ptr);
 733#endif
 734	if (cpu_has_ftlb && flush) {
 735		BUG_ON(!cpu_has_tlbinv);
 736
 737		UASM_i_MFC0(p, ptr, C0_ENTRYHI);
 738		uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
 739		UASM_i_MTC0(p, ptr, C0_ENTRYHI);
 740		build_tlb_write_entry(p, l, r, tlb_indexed);
 741
 742		uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
 743		UASM_i_MTC0(p, ptr, C0_ENTRYHI);
 744		build_huge_update_entries(p, pte, ptr);
 745		build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
 746
 747		return;
 748	}
 749
 750	build_huge_update_entries(p, pte, ptr);
 751	build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
 752}
 753#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 754
 755#ifdef CONFIG_64BIT
 756/*
 757 * TMP and PTR are scratch.
 758 * TMP will be clobbered, PTR will hold the pmd entry.
 759 */
 760void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 761		      unsigned int tmp, unsigned int ptr)
 762{
 763#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 764	long pgdc = (long)pgd_current;
 765#endif
 766	/*
 767	 * The vmalloc handling is not in the hotpath.
 768	 */
 769	uasm_i_dmfc0(p, tmp, C0_BADVADDR);
 770
 771	if (check_for_high_segbits) {
 772		/*
 773		 * The kernel currently implicitly assumes that the
 774		 * MIPS SEGBITS parameter for the processor is
 775		 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
 776		 * allocate virtual addresses outside the maximum
 777		 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
 778		 * that doesn't prevent user code from accessing the
 779		 * higher xuseg addresses.  Here, we make sure that
 780		 * everything but the lower xuseg addresses goes down
 781		 * the module_alloc/vmalloc path.
 782		 */
 783		uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
 784		uasm_il_bnez(p, r, ptr, label_vmalloc);
 785	} else {
 786		uasm_il_bltz(p, r, tmp, label_vmalloc);
 787	}
 788	/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
 789
 790	if (pgd_reg != -1) {
 791		/* pgd is in pgd_reg */
 792		if (cpu_has_ldpte)
 793			UASM_i_MFC0(p, ptr, C0_PWBASE);
 794		else
 795			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
 796	} else {
 797#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
 798		/*
 799		 * &pgd << 11 stored in CONTEXT [23..63].
 800		 */
 801		UASM_i_MFC0(p, ptr, C0_CONTEXT);
 802
 803		/* Clear lower 23 bits of context. */
 804		uasm_i_dins(p, ptr, 0, 0, 23);
 805
 806		/* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
 807		uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
 808		uasm_i_drotr(p, ptr, ptr, 11);
 809#elif defined(CONFIG_SMP)
 810		UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
 811		uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
 812		UASM_i_LA_mostly(p, tmp, pgdc);
 813		uasm_i_daddu(p, ptr, ptr, tmp);
 814		uasm_i_dmfc0(p, tmp, C0_BADVADDR);
 815		uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 816#else
 817		UASM_i_LA_mostly(p, ptr, pgdc);
 818		uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 819#endif
 820	}
 821
 822	uasm_l_vmalloc_done(l, *p);
 823
 824	/* get pgd offset in bytes */
 825	uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
 826
 827	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
 828	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
 829#ifndef __PAGETABLE_PUD_FOLDED
 830	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 831	uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */
 832	uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */
 833	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3);
 834	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */
 835#endif
 836#ifndef __PAGETABLE_PMD_FOLDED
 837	uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 838	uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
 839	uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
 840	uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
 841	uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
 842#endif
 843}
 844EXPORT_SYMBOL_GPL(build_get_pmde64);
 845
 846/*
 847 * BVADDR is the faulting address, PTR is scratch.
 848 * PTR will hold the pgd for vmalloc.
 849 */
 850static void
 851build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 852			unsigned int bvaddr, unsigned int ptr,
 853			enum vmalloc64_mode mode)
 854{
 855	long swpd = (long)swapper_pg_dir;
 856	int single_insn_swpd;
 857	int did_vmalloc_branch = 0;
 858
 859	single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
 860
 861	uasm_l_vmalloc(l, *p);
 862
 863	if (mode != not_refill && check_for_high_segbits) {
 864		if (single_insn_swpd) {
 865			uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
 866			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
 867			did_vmalloc_branch = 1;
 868			/* fall through */
 869		} else {
 870			uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
 871		}
 872	}
 873	if (!did_vmalloc_branch) {
 874		if (single_insn_swpd) {
 875			uasm_il_b(p, r, label_vmalloc_done);
 876			uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
 877		} else {
 878			UASM_i_LA_mostly(p, ptr, swpd);
 879			uasm_il_b(p, r, label_vmalloc_done);
 880			if (uasm_in_compat_space_p(swpd))
 881				uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
 882			else
 883				uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
 884		}
 885	}
 886	if (mode != not_refill && check_for_high_segbits) {
 887		uasm_l_large_segbits_fault(l, *p);
 888
 889		if (mode == refill_scratch && scratch_reg >= 0)
 890			uasm_i_ehb(p);
 891
 892		/*
 893		 * We get here if we are an xsseg address, or if we are
 894		 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
 895		 *
 896		 * Ignoring xsseg (assume disabled so would generate
 897		 * (address errors?), the only remaining possibility
 898		 * is the upper xuseg addresses.  On processors with
 899		 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
 900		 * addresses would have taken an address error. We try
 901		 * to mimic that here by taking a load/istream page
 902		 * fault.
 903		 */
 904		if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
 905			uasm_i_sync(p, 0);
 906		UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
 907		uasm_i_jr(p, ptr);
 908
 909		if (mode == refill_scratch) {
 910			if (scratch_reg >= 0)
 911				UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
 912			else
 913				UASM_i_LW(p, 1, scratchpad_offset(0), 0);
 914		} else {
 915			uasm_i_nop(p);
 916		}
 917	}
 918}
 919
 920#else /* !CONFIG_64BIT */
 921
 922/*
 923 * TMP and PTR are scratch.
 924 * TMP will be clobbered, PTR will hold the pgd entry.
 925 */
 926void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 927{
 928	if (pgd_reg != -1) {
 929		/* pgd is in pgd_reg */
 930		uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
 931		uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 932	} else {
 933		long pgdc = (long)pgd_current;
 934
 935		/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
 936#ifdef CONFIG_SMP
 937		uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
 938		UASM_i_LA_mostly(p, tmp, pgdc);
 939		uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
 940		uasm_i_addu(p, ptr, tmp, ptr);
 941#else
 942		UASM_i_LA_mostly(p, ptr, pgdc);
 943#endif
 944		uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
 945		uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
 946	}
 947	uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
 948	uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
 949	uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
 950}
 951EXPORT_SYMBOL_GPL(build_get_pgde32);
 952
 953#endif /* !CONFIG_64BIT */
 954
 955static void build_adjust_context(u32 **p, unsigned int ctx)
 956{
 957	unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
 958	unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
 959
 960	if (shift)
 961		UASM_i_SRL(p, ctx, ctx, shift);
 962	uasm_i_andi(p, ctx, ctx, mask);
 963}
 964
 965void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
 966{
 967	/*
 968	 * Bug workaround for the Nevada. It seems as if under certain
 969	 * circumstances the move from cp0_context might produce a
 970	 * bogus result when the mfc0 instruction and its consumer are
 971	 * in a different cacheline or a load instruction, probably any
 972	 * memory reference, is between them.
 973	 */
 974	switch (current_cpu_type()) {
 975	case CPU_NEVADA:
 976		UASM_i_LW(p, ptr, 0, ptr);
 977		GET_CONTEXT(p, tmp); /* get context reg */
 978		break;
 979
 980	default:
 981		GET_CONTEXT(p, tmp); /* get context reg */
 982		UASM_i_LW(p, ptr, 0, ptr);
 983		break;
 984	}
 985
 986	build_adjust_context(p, tmp);
 987	UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
 988}
 989EXPORT_SYMBOL_GPL(build_get_ptep);
 990
 991void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
 992{
 993	int pte_off_even = 0;
 994	int pte_off_odd = sizeof(pte_t);
 995
 996#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT)
 997	/* The low 32 bits of EntryLo is stored in pte_high */
 998	pte_off_even += offsetof(pte_t, pte_high);
 999	pte_off_odd += offsetof(pte_t, pte_high);
1000#endif
1001
1002	if (IS_ENABLED(CONFIG_XPA)) {
1003		uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
1004		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1005		UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
1006
1007		if (cpu_has_xpa && !mips_xpa_disabled) {
1008			uasm_i_lw(p, tmp, 0, ptep);
1009			uasm_i_ext(p, tmp, tmp, 0, 24);
1010			uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
1011		}
1012
1013		uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
1014		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1015		UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
1016
1017		if (cpu_has_xpa && !mips_xpa_disabled) {
1018			uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
1019			uasm_i_ext(p, tmp, tmp, 0, 24);
1020			uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
1021		}
1022		return;
1023	}
1024
1025	UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
1026	UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
1027	if (r45k_bvahwbug())
1028		build_tlb_probe_entry(p);
1029	build_convert_pte_to_entrylo(p, tmp);
1030	if (r4k_250MHZhwbug())
1031		UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1032	UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1033	build_convert_pte_to_entrylo(p, ptep);
1034	if (r45k_bvahwbug())
1035		uasm_i_mfc0(p, tmp, C0_INDEX);
1036	if (r4k_250MHZhwbug())
1037		UASM_i_MTC0(p, 0, C0_ENTRYLO1);
1038	UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1039}
1040EXPORT_SYMBOL_GPL(build_update_entries);
1041
1042struct mips_huge_tlb_info {
1043	int huge_pte;
1044	int restore_scratch;
1045	bool need_reload_pte;
1046};
1047
1048static struct mips_huge_tlb_info
1049build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1050			       struct uasm_reloc **r, unsigned int tmp,
1051			       unsigned int ptr, int c0_scratch_reg)
1052{
1053	struct mips_huge_tlb_info rv;
1054	unsigned int even, odd;
1055	int vmalloc_branch_delay_filled = 0;
1056	const int scratch = 1; /* Our extra working register */
1057
1058	rv.huge_pte = scratch;
1059	rv.restore_scratch = 0;
1060	rv.need_reload_pte = false;
1061
1062	if (check_for_high_segbits) {
1063		UASM_i_MFC0(p, tmp, C0_BADVADDR);
1064
1065		if (pgd_reg != -1)
1066			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1067		else
1068			UASM_i_MFC0(p, ptr, C0_CONTEXT);
1069
1070		if (c0_scratch_reg >= 0)
1071			UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1072		else
1073			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1074
1075		uasm_i_dsrl_safe(p, scratch, tmp,
1076				 PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
1077		uasm_il_bnez(p, r, scratch, label_vmalloc);
1078
1079		if (pgd_reg == -1) {
1080			vmalloc_branch_delay_filled = 1;
1081			/* Clear lower 23 bits of context. */
1082			uasm_i_dins(p, ptr, 0, 0, 23);
1083		}
1084	} else {
1085		if (pgd_reg != -1)
1086			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1087		else
1088			UASM_i_MFC0(p, ptr, C0_CONTEXT);
1089
1090		UASM_i_MFC0(p, tmp, C0_BADVADDR);
1091
1092		if (c0_scratch_reg >= 0)
1093			UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1094		else
1095			UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1096
1097		if (pgd_reg == -1)
1098			/* Clear lower 23 bits of context. */
1099			uasm_i_dins(p, ptr, 0, 0, 23);
1100
1101		uasm_il_bltz(p, r, tmp, label_vmalloc);
1102	}
1103
1104	if (pgd_reg == -1) {
1105		vmalloc_branch_delay_filled = 1;
1106		/* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
1107		uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
1108
1109		uasm_i_drotr(p, ptr, ptr, 11);
1110	}
1111
1112#ifdef __PAGETABLE_PMD_FOLDED
1113#define LOC_PTEP scratch
1114#else
1115#define LOC_PTEP ptr
1116#endif
1117
1118	if (!vmalloc_branch_delay_filled)
1119		/* get pgd offset in bytes */
1120		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1121
1122	uasm_l_vmalloc_done(l, *p);
1123
1124	/*
1125	 *			   tmp		ptr
1126	 * fall-through case =	 badvaddr  *pgd_current
1127	 * vmalloc case	     =	 badvaddr  swapper_pg_dir
1128	 */
1129
1130	if (vmalloc_branch_delay_filled)
1131		/* get pgd offset in bytes */
1132		uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1133
1134#ifdef __PAGETABLE_PMD_FOLDED
1135	GET_CONTEXT(p, tmp); /* get context reg */
1136#endif
1137	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1138
1139	if (use_lwx_insns()) {
1140		UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1141	} else {
1142		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1143		uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1144	}
1145
1146#ifndef __PAGETABLE_PUD_FOLDED
1147	/* get pud offset in bytes */
1148	uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3);
1149	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
1150
1151	if (use_lwx_insns()) {
1152		UASM_i_LWX(p, ptr, scratch, ptr);
1153	} else {
1154		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1155		UASM_i_LW(p, ptr, 0, ptr);
1156	}
1157	/* ptr contains a pointer to PMD entry */
1158	/* tmp contains the address */
1159#endif
1160
1161#ifndef __PAGETABLE_PMD_FOLDED
1162	/* get pmd offset in bytes */
1163	uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1164	uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1165	GET_CONTEXT(p, tmp); /* get context reg */
1166
1167	if (use_lwx_insns()) {
1168		UASM_i_LWX(p, scratch, scratch, ptr);
1169	} else {
1170		uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1171		UASM_i_LW(p, scratch, 0, ptr);
1172	}
1173#endif
1174	/* Adjust the context during the load latency. */
1175	build_adjust_context(p, tmp);
1176
1177#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1178	uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1179	/*
1180	 * The in the LWX case we don't want to do the load in the
1181	 * delay slot.	It cannot issue in the same cycle and may be
1182	 * speculative and unneeded.
1183	 */
1184	if (use_lwx_insns())
1185		uasm_i_nop(p);
1186#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
1187
1188
1189	/* build_update_entries */
1190	if (use_lwx_insns()) {
1191		even = ptr;
1192		odd = tmp;
1193		UASM_i_LWX(p, even, scratch, tmp);
1194		UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1195		UASM_i_LWX(p, odd, scratch, tmp);
1196	} else {
1197		UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1198		even = tmp;
1199		odd = ptr;
1200		UASM_i_LW(p, even, 0, ptr); /* get even pte */
1201		UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1202	}
1203	if (cpu_has_rixi) {
1204		uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
1205		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1206		uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
1207	} else {
1208		uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1209		UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1210		uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1211	}
1212	UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1213
1214	if (c0_scratch_reg >= 0) {
1215		uasm_i_ehb(p);
1216		UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1217		build_tlb_write_entry(p, l, r, tlb_random);
1218		uasm_l_leave(l, *p);
1219		rv.restore_scratch = 1;
1220	} else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13)  {
1221		build_tlb_write_entry(p, l, r, tlb_random);
1222		uasm_l_leave(l, *p);
1223		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1224	} else {
1225		UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1226		build_tlb_write_entry(p, l, r, tlb_random);
1227		uasm_l_leave(l, *p);
1228		rv.restore_scratch = 1;
1229	}
1230
1231	uasm_i_eret(p); /* return from trap */
1232
1233	return rv;
1234}
1235
1236/*
1237 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1238 * because EXL == 0.  If we wrap, we can also use the 32 instruction
1239 * slots before the XTLB refill exception handler which belong to the
1240 * unused TLB refill exception.
1241 */
1242#define MIPS64_REFILL_INSNS 32
1243
1244static void build_r4000_tlb_refill_handler(void)
1245{
1246	u32 *p = tlb_handler;
1247	struct uasm_label *l = labels;
1248	struct uasm_reloc *r = relocs;
1249	u32 *f;
1250	unsigned int final_len;
1251	struct mips_huge_tlb_info htlb_info __maybe_unused;
1252	enum vmalloc64_mode vmalloc_mode __maybe_unused;
1253
1254	memset(tlb_handler, 0, sizeof(tlb_handler));
1255	memset(labels, 0, sizeof(labels));
1256	memset(relocs, 0, sizeof(relocs));
1257	memset(final_handler, 0, sizeof(final_handler));
1258
1259	if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
1260		htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, GPR_K0, GPR_K1,
1261							  scratch_reg);
1262		vmalloc_mode = refill_scratch;
1263	} else {
1264		htlb_info.huge_pte = GPR_K0;
1265		htlb_info.restore_scratch = 0;
1266		htlb_info.need_reload_pte = true;
1267		vmalloc_mode = refill_noscratch;
1268		/*
1269		 * create the plain linear handler
1270		 */
1271		if (bcm1250_m3_war()) {
1272			unsigned int segbits = 44;
1273
1274			uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
1275			uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI);
1276			uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1);
1277			uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62);
1278			uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1);
1279			uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits);
1280			uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1);
1281			uasm_il_bnez(&p, &r, GPR_K0, label_leave);
1282			/* No need for uasm_i_nop */
1283		}
1284
1285#ifdef CONFIG_64BIT
1286		build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */
1287#else
1288		build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */
1289#endif
1290
1291#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1292		build_is_huge_pte(&p, &r, GPR_K0, GPR_K1, label_tlb_huge_update);
1293#endif
1294
1295		build_get_ptep(&p, GPR_K0, GPR_K1);
1296		build_update_entries(&p, GPR_K0, GPR_K1);
1297		build_tlb_write_entry(&p, &l, &r, tlb_random);
1298		uasm_l_leave(&l, p);
1299		uasm_i_eret(&p); /* return from trap */
1300	}
1301#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1302	uasm_l_tlb_huge_update(&l, p);
1303	if (htlb_info.need_reload_pte)
1304		UASM_i_LW(&p, htlb_info.huge_pte, 0, GPR_K1);
1305	build_huge_update_entries(&p, htlb_info.huge_pte, GPR_K1);
1306	build_huge_tlb_write_entry(&p, &l, &r, GPR_K0, tlb_random,
1307				   htlb_info.restore_scratch);
1308#endif
1309
1310#ifdef CONFIG_64BIT
1311	build_get_pgd_vmalloc64(&p, &l, &r, GPR_K0, GPR_K1, vmalloc_mode);
1312#endif
1313
1314	/*
1315	 * Overflow check: For the 64bit handler, we need at least one
1316	 * free instruction slot for the wrap-around branch. In worst
1317	 * case, if the intended insertion point is a delay slot, we
1318	 * need three, with the second nop'ed and the third being
1319	 * unused.
1320	 */
1321	switch (boot_cpu_type()) {
1322	default:
1323		if (sizeof(long) == 4) {
1324		fallthrough;
1325	case CPU_LOONGSON2EF:
1326		/* Loongson2 ebase is different than r4k, we have more space */
1327			if ((p - tlb_handler) > 64)
1328				panic("TLB refill handler space exceeded");
1329			/*
1330			 * Now fold the handler in the TLB refill handler space.
1331			 */
1332			f = final_handler;
1333			/* Simplest case, just copy the handler. */
1334			uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1335			final_len = p - tlb_handler;
1336			break;
1337		} else {
1338			if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1339			    || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1340				&& uasm_insn_has_bdelay(relocs,
1341							tlb_handler + MIPS64_REFILL_INSNS - 3)))
1342				panic("TLB refill handler space exceeded");
1343			/*
1344			 * Now fold the handler in the TLB refill handler space.
1345			 */
1346			f = final_handler + MIPS64_REFILL_INSNS;
1347			if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1348				/* Just copy the handler. */
1349				uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1350				final_len = p - tlb_handler;
1351			} else {
1352#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1353				const enum label_id ls = label_tlb_huge_update;
1354#else
1355				const enum label_id ls = label_vmalloc;
1356#endif
1357				u32 *split;
1358				int ov = 0;
1359				int i;
1360
1361				for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1362					;
1363				BUG_ON(i == ARRAY_SIZE(labels));
1364				split = labels[i].addr;
1365
1366				/*
1367				 * See if we have overflown one way or the other.
1368				 */
1369				if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1370				    split < p - MIPS64_REFILL_INSNS)
1371					ov = 1;
1372
1373				if (ov) {
1374					/*
1375					 * Split two instructions before the end.  One
1376					 * for the branch and one for the instruction
1377					 * in the delay slot.
1378					 */
1379					split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1380
1381					/*
1382					 * If the branch would fall in a delay slot,
1383					 * we must back up an additional instruction
1384					 * so that it is no longer in a delay slot.
1385					 */
1386					if (uasm_insn_has_bdelay(relocs, split - 1))
1387						split--;
1388				}
1389				/* Copy first part of the handler. */
1390				uasm_copy_handler(relocs, labels, tlb_handler, split, f);
1391				f += split - tlb_handler;
1392
1393				if (ov) {
1394					/* Insert branch. */
1395					uasm_l_split(&l, final_handler);
1396					uasm_il_b(&f, &r, label_split);
1397					if (uasm_insn_has_bdelay(relocs, split))
1398						uasm_i_nop(&f);
1399					else {
1400						uasm_copy_handler(relocs, labels,
1401								  split, split + 1, f);
1402						uasm_move_labels(labels, f, f + 1, -1);
1403						f++;
1404						split++;
1405					}
1406				}
1407
1408				/* Copy the rest of the handler. */
1409				uasm_copy_handler(relocs, labels, split, p, final_handler);
1410				final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1411					    (p - split);
1412			}
1413		}
1414		break;
1415	}
1416
1417	uasm_resolve_relocs(relocs, labels);
1418	pr_debug("Wrote TLB refill handler (%u instructions).\n",
1419		 final_len);
1420
1421	memcpy((void *)ebase, final_handler, 0x100);
1422	local_flush_icache_range(ebase, ebase + 0x100);
1423	dump_handler("r4000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x100));
1424}
1425
1426static void setup_pw(void)
1427{
1428	unsigned int pwctl;
1429	unsigned long pgd_i, pgd_w;
1430#ifndef __PAGETABLE_PMD_FOLDED
1431	unsigned long pmd_i, pmd_w;
1432#endif
1433	unsigned long pt_i, pt_w;
1434	unsigned long pte_i, pte_w;
1435#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1436	unsigned long psn;
1437
1438	psn = ilog2(_PAGE_HUGE);     /* bit used to indicate huge page */
1439#endif
1440	pgd_i = PGDIR_SHIFT;  /* 1st level PGD */
1441#ifndef __PAGETABLE_PMD_FOLDED
1442	pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER;
1443
1444	pmd_i = PMD_SHIFT;    /* 2nd level PMD */
1445	pmd_w = PMD_SHIFT - PAGE_SHIFT;
1446#else
1447	pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER;
1448#endif
1449
1450	pt_i  = PAGE_SHIFT;    /* 3rd level PTE */
1451	pt_w  = PAGE_SHIFT - 3;
1452
1453	pte_i = ilog2(_PAGE_GLOBAL);
1454	pte_w = 0;
1455	pwctl = 1 << 30; /* Set PWDirExt */
1456
1457#ifndef __PAGETABLE_PMD_FOLDED
1458	write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
1459	write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w);
1460#else
1461	write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i);
1462	write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w);
1463#endif
1464
1465#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1466	pwctl |= (1 << 6 | psn);
1467#endif
1468	write_c0_pwctl(pwctl);
1469	write_c0_kpgd((long)swapper_pg_dir);
1470	kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
1471}
1472
1473static void build_loongson3_tlb_refill_handler(void)
1474{
1475	u32 *p = tlb_handler;
1476	struct uasm_label *l = labels;
1477	struct uasm_reloc *r = relocs;
1478
1479	memset(labels, 0, sizeof(labels));
1480	memset(relocs, 0, sizeof(relocs));
1481	memset(tlb_handler, 0, sizeof(tlb_handler));
1482
1483	if (check_for_high_segbits) {
1484		uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
1485		uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0,
1486				PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
1487		uasm_il_beqz(&p, &r, GPR_K1, label_vmalloc);
1488		uasm_i_nop(&p);
1489
1490		uasm_il_bgez(&p, &r, GPR_K0, label_large_segbits_fault);
1491		uasm_i_nop(&p);
1492		uasm_l_vmalloc(&l, p);
1493	}
1494
1495	uasm_i_dmfc0(&p, GPR_K1, C0_PGD);
1496
1497	uasm_i_lddir(&p, GPR_K0, GPR_K1, 3);  /* global page dir */
1498#ifndef __PAGETABLE_PMD_FOLDED
1499	uasm_i_lddir(&p, GPR_K1, GPR_K0, 1);  /* middle page dir */
1500#endif
1501	uasm_i_ldpte(&p, GPR_K1, 0);      /* even */
1502	uasm_i_ldpte(&p, GPR_K1, 1);      /* odd */
1503	uasm_i_tlbwr(&p);
1504
1505	/* restore page mask */
1506	if (PM_DEFAULT_MASK >> 16) {
1507		uasm_i_lui(&p, GPR_K0, PM_DEFAULT_MASK >> 16);
1508		uasm_i_ori(&p, GPR_K0, GPR_K0, PM_DEFAULT_MASK & 0xffff);
1509		uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK);
1510	} else if (PM_DEFAULT_MASK) {
1511		uasm_i_ori(&p, GPR_K0, 0, PM_DEFAULT_MASK);
1512		uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK);
1513	} else {
1514		uasm_i_mtc0(&p, 0, C0_PAGEMASK);
1515	}
1516
1517	uasm_i_eret(&p);
1518
1519	if (check_for_high_segbits) {
1520		uasm_l_large_segbits_fault(&l, p);
1521		UASM_i_LA(&p, GPR_K1, (unsigned long)tlb_do_page_fault_0);
1522		uasm_i_jr(&p, GPR_K1);
1523		uasm_i_nop(&p);
1524	}
1525
1526	uasm_resolve_relocs(relocs, labels);
1527	memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
1528	local_flush_icache_range(ebase + 0x80, ebase + 0x100);
1529	dump_handler("loongson3_tlb_refill",
1530		     (u32 *)(ebase + 0x80), (u32 *)(ebase + 0x100));
1531}
1532
1533static void build_setup_pgd(void)
1534{
1535	const int a0 = 4;
1536	const int __maybe_unused a1 = 5;
1537	const int __maybe_unused a2 = 6;
1538	u32 *p = (u32 *)msk_isa16_mode((ulong)tlbmiss_handler_setup_pgd);
1539#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1540	long pgdc = (long)pgd_current;
1541#endif
1542
1543	memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p);
1544	memset(labels, 0, sizeof(labels));
1545	memset(relocs, 0, sizeof(relocs));
1546	pgd_reg = allocate_kscratch();
1547#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1548	if (pgd_reg == -1) {
1549		struct uasm_label *l = labels;
1550		struct uasm_reloc *r = relocs;
1551
1552		/* PGD << 11 in c0_Context */
1553		/*
1554		 * If it is a ckseg0 address, convert to a physical
1555		 * address.  Shifting right by 29 and adding 4 will
1556		 * result in zero for these addresses.
1557		 *
1558		 */
1559		UASM_i_SRA(&p, a1, a0, 29);
1560		UASM_i_ADDIU(&p, a1, a1, 4);
1561		uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1562		uasm_i_nop(&p);
1563		uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1564		uasm_l_tlbl_goaround1(&l, p);
1565		UASM_i_SLL(&p, a0, a0, 11);
1566		UASM_i_MTC0(&p, a0, C0_CONTEXT);
1567		uasm_i_jr(&p, 31);
1568		uasm_i_ehb(&p);
1569	} else {
1570		/* PGD in c0_KScratch */
1571		if (cpu_has_ldpte)
1572			UASM_i_MTC0(&p, a0, C0_PWBASE);
1573		else
1574			UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1575		uasm_i_jr(&p, 31);
1576		uasm_i_ehb(&p);
1577	}
1578#else
1579#ifdef CONFIG_SMP
1580	/* Save PGD to pgd_current[smp_processor_id()] */
1581	UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
1582	UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
1583	UASM_i_LA_mostly(&p, a2, pgdc);
1584	UASM_i_ADDU(&p, a2, a2, a1);
1585	UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1586#else
1587	UASM_i_LA_mostly(&p, a2, pgdc);
1588	UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1589#endif /* SMP */
1590
1591	/* if pgd_reg is allocated, save PGD also to scratch register */
1592	if (pgd_reg != -1) {
1593		UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1594		uasm_i_jr(&p, 31);
1595		uasm_i_ehb(&p);
1596	} else {
1597		uasm_i_jr(&p, 31);
1598		uasm_i_nop(&p);
1599	}
1600#endif
1601	if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
1602		panic("tlbmiss_handler_setup_pgd space exceeded");
1603
1604	uasm_resolve_relocs(relocs, labels);
1605	pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1606		 (unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd));
1607
1608	dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
1609					tlbmiss_handler_setup_pgd_end);
1610}
1611
1612static void
1613iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1614{
1615#ifdef CONFIG_SMP
1616	if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
1617		uasm_i_sync(p, 0);
1618# ifdef CONFIG_PHYS_ADDR_T_64BIT
1619	if (cpu_has_64bits)
1620		uasm_i_lld(p, pte, 0, ptr);
1621	else
1622# endif
1623		UASM_i_LL(p, pte, 0, ptr);
1624#else
1625# ifdef CONFIG_PHYS_ADDR_T_64BIT
1626	if (cpu_has_64bits)
1627		uasm_i_ld(p, pte, 0, ptr);
1628	else
1629# endif
1630		UASM_i_LW(p, pte, 0, ptr);
1631#endif
1632}
1633
1634static void
1635iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1636	unsigned int mode, unsigned int scratch)
1637{
1638	unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1639	unsigned int swmode = mode & ~hwmode;
1640
1641	if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
1642		uasm_i_lui(p, scratch, swmode >> 16);
1643		uasm_i_or(p, pte, pte, scratch);
1644		BUG_ON(swmode & 0xffff);
1645	} else {
1646		uasm_i_ori(p, pte, pte, mode);
1647	}
1648
1649#ifdef CONFIG_SMP
1650# ifdef CONFIG_PHYS_ADDR_T_64BIT
1651	if (cpu_has_64bits)
1652		uasm_i_scd(p, pte, 0, ptr);
1653	else
1654# endif
1655		UASM_i_SC(p, pte, 0, ptr);
1656
1657	if (r10000_llsc_war())
1658		uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
1659	else
1660		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1661
1662# ifdef CONFIG_PHYS_ADDR_T_64BIT
1663	if (!cpu_has_64bits) {
1664		/* no uasm_i_nop needed */
1665		uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1666		uasm_i_ori(p, pte, pte, hwmode);
1667		BUG_ON(hwmode & ~0xffff);
1668		uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1669		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1670		/* no uasm_i_nop needed */
1671		uasm_i_lw(p, pte, 0, ptr);
1672	} else
1673		uasm_i_nop(p);
1674# else
1675	uasm_i_nop(p);
1676# endif
1677#else
1678# ifdef CONFIG_PHYS_ADDR_T_64BIT
1679	if (cpu_has_64bits)
1680		uasm_i_sd(p, pte, 0, ptr);
1681	else
1682# endif
1683		UASM_i_SW(p, pte, 0, ptr);
1684
1685# ifdef CONFIG_PHYS_ADDR_T_64BIT
1686	if (!cpu_has_64bits) {
1687		uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1688		uasm_i_ori(p, pte, pte, hwmode);
1689		BUG_ON(hwmode & ~0xffff);
1690		uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1691		uasm_i_lw(p, pte, 0, ptr);
1692	}
1693# endif
1694#endif
1695}
1696
1697/*
1698 * Check if PTE is present, if not then jump to LABEL. PTR points to
1699 * the page table where this PTE is located, PTE will be re-loaded
1700 * with its original value.
1701 */
1702static void
1703build_pte_present(u32 **p, struct uasm_reloc **r,
1704		  int pte, int ptr, int scratch, enum label_id lid)
1705{
1706	int t = scratch >= 0 ? scratch : pte;
1707	int cur = pte;
1708
1709	if (cpu_has_rixi) {
1710		if (use_bbit_insns()) {
1711			uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1712			uasm_i_nop(p);
1713		} else {
1714			if (_PAGE_PRESENT_SHIFT) {
1715				uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1716				cur = t;
1717			}
1718			uasm_i_andi(p, t, cur, 1);
1719			uasm_il_beqz(p, r, t, lid);
1720			if (pte == t)
1721				/* You lose the SMP race :-(*/
1722				iPTE_LW(p, pte, ptr);
1723		}
1724	} else {
1725		if (_PAGE_PRESENT_SHIFT) {
1726			uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1727			cur = t;
1728		}
1729		uasm_i_andi(p, t, cur,
1730			(_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT);
1731		uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
1732		uasm_il_bnez(p, r, t, lid);
1733		if (pte == t)
1734			/* You lose the SMP race :-(*/
1735			iPTE_LW(p, pte, ptr);
1736	}
1737}
1738
1739/* Make PTE valid, store result in PTR. */
1740static void
1741build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1742		 unsigned int ptr, unsigned int scratch)
1743{
1744	unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1745
1746	iPTE_SW(p, r, pte, ptr, mode, scratch);
1747}
1748
1749/*
1750 * Check if PTE can be written to, if not branch to LABEL. Regardless
1751 * restore PTE with value from PTR when done.
1752 */
1753static void
1754build_pte_writable(u32 **p, struct uasm_reloc **r,
1755		   unsigned int pte, unsigned int ptr, int scratch,
1756		   enum label_id lid)
1757{
1758	int t = scratch >= 0 ? scratch : pte;
1759	int cur = pte;
1760
1761	if (_PAGE_PRESENT_SHIFT) {
1762		uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1763		cur = t;
1764	}
1765	uasm_i_andi(p, t, cur,
1766		    (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
1767	uasm_i_xori(p, t, t,
1768		    (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
1769	uasm_il_bnez(p, r, t, lid);
1770	if (pte == t)
1771		/* You lose the SMP race :-(*/
1772		iPTE_LW(p, pte, ptr);
1773	else
1774		uasm_i_nop(p);
1775}
1776
1777/* Make PTE writable, update software status bits as well, then store
1778 * at PTR.
1779 */
1780static void
1781build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1782		 unsigned int ptr, unsigned int scratch)
1783{
1784	unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1785			     | _PAGE_DIRTY);
1786
1787	iPTE_SW(p, r, pte, ptr, mode, scratch);
1788}
1789
1790/*
1791 * Check if PTE can be modified, if not branch to LABEL. Regardless
1792 * restore PTE with value from PTR when done.
1793 */
1794static void
1795build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1796		     unsigned int pte, unsigned int ptr, int scratch,
1797		     enum label_id lid)
1798{
1799	if (use_bbit_insns()) {
1800		uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1801		uasm_i_nop(p);
1802	} else {
1803		int t = scratch >= 0 ? scratch : pte;
1804		uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
1805		uasm_i_andi(p, t, t, 1);
1806		uasm_il_beqz(p, r, t, lid);
1807		if (pte == t)
1808			/* You lose the SMP race :-(*/
1809			iPTE_LW(p, pte, ptr);
1810	}
1811}
1812
1813#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1814
1815
1816/*
1817 * R3000 style TLB load/store/modify handlers.
1818 */
1819
1820/*
1821 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1822 * Then it returns.
1823 */
1824static void
1825build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1826{
1827	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1828	uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1829	uasm_i_tlbwi(p);
1830	uasm_i_jr(p, tmp);
1831	uasm_i_rfe(p); /* branch delay */
1832}
1833
1834/*
1835 * This places the pte into ENTRYLO0 and writes it with tlbwi
1836 * or tlbwr as appropriate.  This is because the index register
1837 * may have the probe fail bit set as a result of a trap on a
1838 * kseg2 access, i.e. without refill.  Then it returns.
1839 */
1840static void
1841build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1842			     struct uasm_reloc **r, unsigned int pte,
1843			     unsigned int tmp)
1844{
1845	uasm_i_mfc0(p, tmp, C0_INDEX);
1846	uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1847	uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1848	uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1849	uasm_i_tlbwi(p); /* cp0 delay */
1850	uasm_i_jr(p, tmp);
1851	uasm_i_rfe(p); /* branch delay */
1852	uasm_l_r3000_write_probe_fail(l, *p);
1853	uasm_i_tlbwr(p); /* cp0 delay */
1854	uasm_i_jr(p, tmp);
1855	uasm_i_rfe(p); /* branch delay */
1856}
1857
1858static void
1859build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1860				   unsigned int ptr)
1861{
1862	long pgdc = (long)pgd_current;
1863
1864	uasm_i_mfc0(p, pte, C0_BADVADDR);
1865	uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1866	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1867	uasm_i_srl(p, pte, pte, 22); /* load delay */
1868	uasm_i_sll(p, pte, pte, 2);
1869	uasm_i_addu(p, ptr, ptr, pte);
1870	uasm_i_mfc0(p, pte, C0_CONTEXT);
1871	uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1872	uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1873	uasm_i_addu(p, ptr, ptr, pte);
1874	uasm_i_lw(p, pte, 0, ptr);
1875	uasm_i_tlbp(p); /* load delay */
1876}
1877
1878static void build_r3000_tlb_load_handler(void)
1879{
1880	u32 *p = (u32 *)handle_tlbl;
1881	struct uasm_label *l = labels;
1882	struct uasm_reloc *r = relocs;
1883
1884	memset(p, 0, handle_tlbl_end - (char *)p);
1885	memset(labels, 0, sizeof(labels));
1886	memset(relocs, 0, sizeof(relocs));
1887
1888	build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
1889	build_pte_present(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbl);
1890	uasm_i_nop(&p); /* load delay */
1891	build_make_valid(&p, &r, GPR_K0, GPR_K1, -1);
1892	build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1);
1893
1894	uasm_l_nopage_tlbl(&l, p);
1895	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1896	uasm_i_nop(&p);
1897
1898	if (p >= (u32 *)handle_tlbl_end)
1899		panic("TLB load handler fastpath space exceeded");
1900
1901	uasm_resolve_relocs(relocs, labels);
1902	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1903		 (unsigned int)(p - (u32 *)handle_tlbl));
1904
1905	dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_end);
1906}
1907
1908static void build_r3000_tlb_store_handler(void)
1909{
1910	u32 *p = (u32 *)handle_tlbs;
1911	struct uasm_label *l = labels;
1912	struct uasm_reloc *r = relocs;
1913
1914	memset(p, 0, handle_tlbs_end - (char *)p);
1915	memset(labels, 0, sizeof(labels));
1916	memset(relocs, 0, sizeof(relocs));
1917
1918	build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
1919	build_pte_writable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbs);
1920	uasm_i_nop(&p); /* load delay */
1921	build_make_write(&p, &r, GPR_K0, GPR_K1, -1);
1922	build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1);
1923
1924	uasm_l_nopage_tlbs(&l, p);
1925	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1926	uasm_i_nop(&p);
1927
1928	if (p >= (u32 *)handle_tlbs_end)
1929		panic("TLB store handler fastpath space exceeded");
1930
1931	uasm_resolve_relocs(relocs, labels);
1932	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1933		 (unsigned int)(p - (u32 *)handle_tlbs));
1934
1935	dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_end);
1936}
1937
1938static void build_r3000_tlb_modify_handler(void)
1939{
1940	u32 *p = (u32 *)handle_tlbm;
1941	struct uasm_label *l = labels;
1942	struct uasm_reloc *r = relocs;
1943
1944	memset(p, 0, handle_tlbm_end - (char *)p);
1945	memset(labels, 0, sizeof(labels));
1946	memset(relocs, 0, sizeof(relocs));
1947
1948	build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1);
1949	build_pte_modifiable(&p, &r, GPR_K0, GPR_K1,  -1, label_nopage_tlbm);
1950	uasm_i_nop(&p); /* load delay */
1951	build_make_write(&p, &r, GPR_K0, GPR_K1, -1);
1952	build_r3000_pte_reload_tlbwi(&p, GPR_K0, GPR_K1);
1953
1954	uasm_l_nopage_tlbm(&l, p);
1955	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1956	uasm_i_nop(&p);
1957
1958	if (p >= (u32 *)handle_tlbm_end)
1959		panic("TLB modify handler fastpath space exceeded");
1960
1961	uasm_resolve_relocs(relocs, labels);
1962	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1963		 (unsigned int)(p - (u32 *)handle_tlbm));
1964
1965	dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_end);
1966}
1967#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1968
1969static bool cpu_has_tlbex_tlbp_race(void)
1970{
1971	/*
1972	 * When a Hardware Table Walker is running it can replace TLB entries
1973	 * at any time, leading to a race between it & the CPU.
1974	 */
1975	if (cpu_has_htw)
1976		return true;
1977
1978	/*
1979	 * If the CPU shares FTLB RAM with its siblings then our entry may be
1980	 * replaced at any time by a sibling performing a write to the FTLB.
1981	 */
1982	if (cpu_has_shared_ftlb_ram)
1983		return true;
1984
1985	/* In all other cases there ought to be no race condition to handle */
1986	return false;
1987}
1988
1989/*
1990 * R4000 style TLB load/store/modify handlers.
1991 */
1992static struct work_registers
1993build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1994				   struct uasm_reloc **r)
1995{
1996	struct work_registers wr = build_get_work_registers(p);
1997
1998#ifdef CONFIG_64BIT
1999	build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
2000#else
2001	build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
2002#endif
2003
2004#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2005	/*
2006	 * For huge tlb entries, pmd doesn't contain an address but
2007	 * instead contains the tlb pte. Check the PAGE_HUGE bit and
2008	 * see if we need to jump to huge tlb processing.
2009	 */
2010	build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
2011#endif
2012
2013	UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
2014	UASM_i_LW(p, wr.r2, 0, wr.r2);
2015	UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2);
2016	uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
2017	UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
2018
2019#ifdef CONFIG_SMP
2020	uasm_l_smp_pgtable_change(l, *p);
2021#endif
2022	iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
2023	if (!m4kc_tlbp_war()) {
2024		build_tlb_probe_entry(p);
2025		if (cpu_has_tlbex_tlbp_race()) {
2026			/* race condition happens, leaving */
2027			uasm_i_ehb(p);
2028			uasm_i_mfc0(p, wr.r3, C0_INDEX);
2029			uasm_il_bltz(p, r, wr.r3, label_leave);
2030			uasm_i_nop(p);
2031		}
2032	}
2033	return wr;
2034}
2035
2036static void
2037build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
2038				   struct uasm_reloc **r, unsigned int tmp,
2039				   unsigned int ptr)
2040{
2041	uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
2042	uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
2043	build_update_entries(p, tmp, ptr);
2044	build_tlb_write_entry(p, l, r, tlb_indexed);
2045	uasm_l_leave(l, *p);
2046	build_restore_work_registers(p);
2047	uasm_i_eret(p); /* return from trap */
2048
2049#ifdef CONFIG_64BIT
2050	build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
2051#endif
2052}
2053
2054static void build_r4000_tlb_load_handler(void)
2055{
2056	u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl);
2057	struct uasm_label *l = labels;
2058	struct uasm_reloc *r = relocs;
2059	struct work_registers wr;
2060
2061	memset(p, 0, handle_tlbl_end - (char *)p);
2062	memset(labels, 0, sizeof(labels));
2063	memset(relocs, 0, sizeof(relocs));
2064
2065	if (bcm1250_m3_war()) {
2066		unsigned int segbits = 44;
2067
2068		uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR);
2069		uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI);
2070		uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1);
2071		uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62);
2072		uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1);
2073		uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits);
2074		uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1);
2075		uasm_il_bnez(&p, &r, GPR_K0, label_leave);
2076		/* No need for uasm_i_nop */
2077	}
2078
2079	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2080	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
2081	if (m4kc_tlbp_war())
2082		build_tlb_probe_entry(&p);
2083
2084	if (cpu_has_rixi && !cpu_has_rixiex) {
2085		/*
2086		 * If the page is not _PAGE_VALID, RI or XI could not
2087		 * have triggered it.  Skip the expensive test..
2088		 */
2089		if (use_bbit_insns()) {
2090			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
2091				      label_tlbl_goaround1);
2092		} else {
2093			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2094			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
2095		}
2096		uasm_i_nop(&p);
2097
2098		/*
2099		 * Warn if something may race with us & replace the TLB entry
2100		 * before we read it here. Everything with such races should
2101		 * also have dedicated RiXi exception handlers, so this
2102		 * shouldn't be hit.
2103		 */
2104		WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
2105
2106		uasm_i_tlbr(&p);
2107
2108		if (cpu_has_mips_r2_exec_hazard)
2109			uasm_i_ehb(&p);
 
 
 
 
 
 
 
 
2110
2111		/* Examine  entrylo 0 or 1 based on ptr. */
2112		if (use_bbit_insns()) {
2113			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2114		} else {
2115			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2116			uasm_i_beqz(&p, wr.r3, 8);
2117		}
2118		/* load it in the delay slot*/
2119		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2120		/* load it if ptr is odd */
2121		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2122		/*
2123		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2124		 * XI must have triggered it.
2125		 */
2126		if (use_bbit_insns()) {
2127			uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
2128			uasm_i_nop(&p);
2129			uasm_l_tlbl_goaround1(&l, p);
2130		} else {
2131			uasm_i_andi(&p, wr.r3, wr.r3, 2);
2132			uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
2133			uasm_i_nop(&p);
2134		}
2135		uasm_l_tlbl_goaround1(&l, p);
2136	}
2137	build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
2138	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2139
2140#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2141	/*
2142	 * This is the entry point when build_r4000_tlbchange_handler_head
2143	 * spots a huge page.
2144	 */
2145	uasm_l_tlb_huge_update(&l, p);
2146	iPTE_LW(&p, wr.r1, wr.r2);
2147	build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
2148	build_tlb_probe_entry(&p);
2149
2150	if (cpu_has_rixi && !cpu_has_rixiex) {
2151		/*
2152		 * If the page is not _PAGE_VALID, RI or XI could not
2153		 * have triggered it.  Skip the expensive test..
2154		 */
2155		if (use_bbit_insns()) {
2156			uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
2157				      label_tlbl_goaround2);
2158		} else {
2159			uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2160			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2161		}
2162		uasm_i_nop(&p);
2163
2164		/*
2165		 * Warn if something may race with us & replace the TLB entry
2166		 * before we read it here. Everything with such races should
2167		 * also have dedicated RiXi exception handlers, so this
2168		 * shouldn't be hit.
2169		 */
2170		WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
2171
2172		uasm_i_tlbr(&p);
2173
2174		if (cpu_has_mips_r2_exec_hazard)
2175			uasm_i_ehb(&p);
 
 
 
 
 
 
 
 
2176
2177		/* Examine  entrylo 0 or 1 based on ptr. */
2178		if (use_bbit_insns()) {
2179			uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2180		} else {
2181			uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2182			uasm_i_beqz(&p, wr.r3, 8);
2183		}
2184		/* load it in the delay slot*/
2185		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2186		/* load it if ptr is odd */
2187		UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2188		/*
2189		 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2190		 * XI must have triggered it.
2191		 */
2192		if (use_bbit_insns()) {
2193			uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
2194		} else {
2195			uasm_i_andi(&p, wr.r3, wr.r3, 2);
2196			uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2197		}
2198		if (PM_DEFAULT_MASK == 0)
2199			uasm_i_nop(&p);
2200		/*
2201		 * We clobbered C0_PAGEMASK, restore it.  On the other branch
2202		 * it is restored in build_huge_tlb_write_entry.
2203		 */
2204		build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
2205
2206		uasm_l_tlbl_goaround2(&l, p);
2207	}
2208	uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2209	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2210#endif
2211
2212	uasm_l_nopage_tlbl(&l, p);
2213	if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2214		uasm_i_sync(&p, 0);
2215	build_restore_work_registers(&p);
2216#ifdef CONFIG_CPU_MICROMIPS
2217	if ((unsigned long)tlb_do_page_fault_0 & 1) {
2218		uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2219		uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2220		uasm_i_jr(&p, GPR_K0);
2221	} else
2222#endif
2223	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2224	uasm_i_nop(&p);
2225
2226	if (p >= (u32 *)handle_tlbl_end)
2227		panic("TLB load handler fastpath space exceeded");
2228
2229	uasm_resolve_relocs(relocs, labels);
2230	pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
2231		 (unsigned int)(p - (u32 *)handle_tlbl));
2232
2233	dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_end);
2234}
2235
2236static void build_r4000_tlb_store_handler(void)
2237{
2238	u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs);
2239	struct uasm_label *l = labels;
2240	struct uasm_reloc *r = relocs;
2241	struct work_registers wr;
2242
2243	memset(p, 0, handle_tlbs_end - (char *)p);
2244	memset(labels, 0, sizeof(labels));
2245	memset(relocs, 0, sizeof(relocs));
2246
2247	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2248	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2249	if (m4kc_tlbp_war())
2250		build_tlb_probe_entry(&p);
2251	build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
2252	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2253
2254#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2255	/*
2256	 * This is the entry point when
2257	 * build_r4000_tlbchange_handler_head spots a huge page.
2258	 */
2259	uasm_l_tlb_huge_update(&l, p);
2260	iPTE_LW(&p, wr.r1, wr.r2);
2261	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2262	build_tlb_probe_entry(&p);
2263	uasm_i_ori(&p, wr.r1, wr.r1,
2264		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2265	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2266#endif
2267
2268	uasm_l_nopage_tlbs(&l, p);
2269	if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2270		uasm_i_sync(&p, 0);
2271	build_restore_work_registers(&p);
2272#ifdef CONFIG_CPU_MICROMIPS
2273	if ((unsigned long)tlb_do_page_fault_1 & 1) {
2274		uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2275		uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2276		uasm_i_jr(&p, GPR_K0);
2277	} else
2278#endif
2279	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2280	uasm_i_nop(&p);
2281
2282	if (p >= (u32 *)handle_tlbs_end)
2283		panic("TLB store handler fastpath space exceeded");
2284
2285	uasm_resolve_relocs(relocs, labels);
2286	pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2287		 (unsigned int)(p - (u32 *)handle_tlbs));
2288
2289	dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_end);
2290}
2291
2292static void build_r4000_tlb_modify_handler(void)
2293{
2294	u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm);
2295	struct uasm_label *l = labels;
2296	struct uasm_reloc *r = relocs;
2297	struct work_registers wr;
2298
2299	memset(p, 0, handle_tlbm_end - (char *)p);
2300	memset(labels, 0, sizeof(labels));
2301	memset(relocs, 0, sizeof(relocs));
2302
2303	wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2304	build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2305	if (m4kc_tlbp_war())
2306		build_tlb_probe_entry(&p);
2307	/* Present and writable bits set, set accessed and dirty bits. */
2308	build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
2309	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2310
2311#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2312	/*
2313	 * This is the entry point when
2314	 * build_r4000_tlbchange_handler_head spots a huge page.
2315	 */
2316	uasm_l_tlb_huge_update(&l, p);
2317	iPTE_LW(&p, wr.r1, wr.r2);
2318	build_pte_modifiable(&p, &r, wr.r1, wr.r2,  wr.r3, label_nopage_tlbm);
2319	build_tlb_probe_entry(&p);
2320	uasm_i_ori(&p, wr.r1, wr.r1,
2321		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2322	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
2323#endif
2324
2325	uasm_l_nopage_tlbm(&l, p);
2326	if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2327		uasm_i_sync(&p, 0);
2328	build_restore_work_registers(&p);
2329#ifdef CONFIG_CPU_MICROMIPS
2330	if ((unsigned long)tlb_do_page_fault_1 & 1) {
2331		uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2332		uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2333		uasm_i_jr(&p, GPR_K0);
2334	} else
2335#endif
2336	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2337	uasm_i_nop(&p);
2338
2339	if (p >= (u32 *)handle_tlbm_end)
2340		panic("TLB modify handler fastpath space exceeded");
2341
2342	uasm_resolve_relocs(relocs, labels);
2343	pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2344		 (unsigned int)(p - (u32 *)handle_tlbm));
2345
2346	dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_end);
2347}
2348
2349static void flush_tlb_handlers(void)
2350{
2351	local_flush_icache_range((unsigned long)handle_tlbl,
2352			   (unsigned long)handle_tlbl_end);
2353	local_flush_icache_range((unsigned long)handle_tlbs,
2354			   (unsigned long)handle_tlbs_end);
2355	local_flush_icache_range((unsigned long)handle_tlbm,
2356			   (unsigned long)handle_tlbm_end);
2357	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
2358			   (unsigned long)tlbmiss_handler_setup_pgd_end);
2359}
2360
2361static void print_htw_config(void)
2362{
2363	unsigned long config;
2364	unsigned int pwctl;
2365	const int field = 2 * sizeof(unsigned long);
2366
2367	config = read_c0_pwfield();
2368	pr_debug("PWField (0x%0*lx): GDI: 0x%02lx  UDI: 0x%02lx  MDI: 0x%02lx  PTI: 0x%02lx  PTEI: 0x%02lx\n",
2369		field, config,
2370		(config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT,
2371		(config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT,
2372		(config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT,
2373		(config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT,
2374		(config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
2375
2376	config = read_c0_pwsize();
2377	pr_debug("PWSize  (0x%0*lx): PS: 0x%lx  GDW: 0x%02lx  UDW: 0x%02lx  MDW: 0x%02lx  PTW: 0x%02lx  PTEW: 0x%02lx\n",
2378		field, config,
2379		(config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
2380		(config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
2381		(config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
2382		(config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
2383		(config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT,
2384		(config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
2385
2386	pwctl = read_c0_pwctl();
2387	pr_debug("PWCtl   (0x%x): PWEn: 0x%x  XK: 0x%x  XS: 0x%x  XU: 0x%x  DPH: 0x%x  HugePg: 0x%x  Psn: 0x%x\n",
2388		pwctl,
2389		(pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
2390		(pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
2391		(pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
2392		(pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
2393		(pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
2394		(pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
2395		(pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
2396}
2397
2398static void config_htw_params(void)
2399{
2400	unsigned long pwfield, pwsize, ptei;
2401	unsigned int config;
2402
2403	/*
2404	 * We are using 2-level page tables, so we only need to
2405	 * setup GDW and PTW appropriately. UDW and MDW will remain 0.
2406	 * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to
2407	 * write values less than 0xc in these fields because the entire
2408	 * write will be dropped. As a result of which, we must preserve
2409	 * the original reset values and overwrite only what we really want.
2410	 */
2411
2412	pwfield = read_c0_pwfield();
2413	/* re-initialize the GDI field */
2414	pwfield &= ~MIPS_PWFIELD_GDI_MASK;
2415	pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT;
2416	/* re-initialize the PTI field including the even/odd bit */
2417	pwfield &= ~MIPS_PWFIELD_PTI_MASK;
2418	pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT;
2419	if (CONFIG_PGTABLE_LEVELS >= 3) {
2420		pwfield &= ~MIPS_PWFIELD_MDI_MASK;
2421		pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT;
2422	}
2423	/* Set the PTEI right shift */
2424	ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT;
2425	pwfield |= ptei;
2426	write_c0_pwfield(pwfield);
2427	/* Check whether the PTEI value is supported */
2428	back_to_back_c0_hazard();
2429	pwfield = read_c0_pwfield();
2430	if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT)
2431		!= ptei) {
2432		pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled",
2433			ptei);
2434		/*
2435		 * Drop option to avoid HTW being enabled via another path
2436		 * (eg htw_reset())
2437		 */
2438		current_cpu_data.options &= ~MIPS_CPU_HTW;
2439		return;
2440	}
2441
2442	pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
2443	pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
2444	if (CONFIG_PGTABLE_LEVELS >= 3)
2445		pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
2446
2447	/* Set pointer size to size of directory pointers */
2448	if (IS_ENABLED(CONFIG_64BIT))
2449		pwsize |= MIPS_PWSIZE_PS_MASK;
2450	/* PTEs may be multiple pointers long (e.g. with XPA) */
2451	pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
2452			& MIPS_PWSIZE_PTEW_MASK;
2453
2454	write_c0_pwsize(pwsize);
2455
2456	/* Make sure everything is set before we enable the HTW */
2457	back_to_back_c0_hazard();
2458
2459	/*
2460	 * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
2461	 * the pwctl fields.
2462	 */
2463	config = 1 << MIPS_PWCTL_PWEN_SHIFT;
2464	if (IS_ENABLED(CONFIG_64BIT))
2465		config |= MIPS_PWCTL_XU_MASK;
2466	write_c0_pwctl(config);
2467	pr_info("Hardware Page Table Walker enabled\n");
2468
2469	print_htw_config();
2470}
2471
2472static void config_xpa_params(void)
2473{
2474#ifdef CONFIG_XPA
2475	unsigned int pagegrain;
2476
2477	if (mips_xpa_disabled) {
2478		pr_info("Extended Physical Addressing (XPA) disabled\n");
2479		return;
2480	}
2481
2482	pagegrain = read_c0_pagegrain();
2483	write_c0_pagegrain(pagegrain | PG_ELPA);
2484	back_to_back_c0_hazard();
2485	pagegrain = read_c0_pagegrain();
2486
2487	if (pagegrain & PG_ELPA)
2488		pr_info("Extended Physical Addressing (XPA) enabled\n");
2489	else
2490		panic("Extended Physical Addressing (XPA) disabled");
2491#endif
2492}
2493
2494static void check_pabits(void)
2495{
2496	unsigned long entry;
2497	unsigned pabits, fillbits;
2498
2499	if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) {
2500		/*
2501		 * We'll only be making use of the fact that we can rotate bits
2502		 * into the fill if the CPU supports RIXI, so don't bother
2503		 * probing this for CPUs which don't.
2504		 */
2505		return;
2506	}
2507
2508	write_c0_entrylo0(~0ul);
2509	back_to_back_c0_hazard();
2510	entry = read_c0_entrylo0();
2511
2512	/* clear all non-PFN bits */
2513	entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1);
2514	entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
2515
2516	/* find a lower bound on PABITS, and upper bound on fill bits */
2517	pabits = fls_long(entry) + 6;
2518	fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0);
2519
2520	/* minus the RI & XI bits */
2521	fillbits -= min_t(unsigned, fillbits, 2);
2522
2523	if (fillbits >= ilog2(_PAGE_NO_EXEC))
2524		fill_includes_sw_bits = true;
2525
2526	pr_debug("Entry* registers contain %u fill bits\n", fillbits);
2527}
2528
2529void build_tlb_refill_handler(void)
2530{
2531	/*
2532	 * The refill handler is generated per-CPU, multi-node systems
2533	 * may have local storage for it. The other handlers are only
2534	 * needed once.
2535	 */
2536	static int run_once = 0;
2537
2538	if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
2539		panic("Kernels supporting XPA currently require CPUs with RIXI");
2540
2541	output_pgtable_bits_defines();
2542	check_pabits();
2543
2544#ifdef CONFIG_64BIT
2545	check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
2546#endif
2547
2548	if (cpu_has_3kex) {
2549#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2550		if (!run_once) {
2551			build_setup_pgd();
2552			build_r3000_tlb_refill_handler();
2553			build_r3000_tlb_load_handler();
2554			build_r3000_tlb_store_handler();
2555			build_r3000_tlb_modify_handler();
2556			flush_tlb_handlers();
2557			run_once++;
2558		}
2559#else
2560		panic("No R3000 TLB refill handler");
2561#endif
2562		return;
2563	}
2564
2565	if (cpu_has_ldpte)
2566		setup_pw();
2567
2568	if (!run_once) {
2569		scratch_reg = allocate_kscratch();
2570		build_setup_pgd();
2571		build_r4000_tlb_load_handler();
2572		build_r4000_tlb_store_handler();
2573		build_r4000_tlb_modify_handler();
2574		if (cpu_has_ldpte)
2575			build_loongson3_tlb_refill_handler();
2576		else
2577			build_r4000_tlb_refill_handler();
2578		flush_tlb_handlers();
2579		run_once++;
2580	}
2581	if (cpu_has_xpa)
2582		config_xpa_params();
2583	if (cpu_has_htw)
2584		config_htw_params();
2585}