Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Synthesize TLB refill handlers at runtime.
7 *
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
12 * Copyright (C) 2011 MIPS Technologies, Inc.
13 *
14 * ... and the days got worse and worse and now you see
15 * I've gone completely out of my mind.
16 *
17 * They're coming to take me a away haha
18 * they're coming to take me a away hoho hihi haha
19 * to the funny farm where code is beautiful all the time ...
20 *
21 * (Condolences to Napoleon XIV)
22 */
23
24#include <linux/bug.h>
25#include <linux/export.h>
26#include <linux/kernel.h>
27#include <linux/types.h>
28#include <linux/smp.h>
29#include <linux/string.h>
30#include <linux/cache.h>
31#include <linux/pgtable.h>
32
33#include <asm/cacheflush.h>
34#include <asm/cpu-type.h>
35#include <asm/mmu_context.h>
36#include <asm/uasm.h>
37#include <asm/setup.h>
38#include <asm/tlbex.h>
39
40static int mips_xpa_disabled;
41
42static int __init xpa_disable(char *s)
43{
44 mips_xpa_disabled = 1;
45
46 return 1;
47}
48
49__setup("noxpa", xpa_disable);
50
51/*
52 * TLB load/store/modify handlers.
53 *
54 * Only the fastpath gets synthesized at runtime, the slowpath for
55 * do_page_fault remains normal asm.
56 */
57extern void tlb_do_page_fault_0(void);
58extern void tlb_do_page_fault_1(void);
59
60struct work_registers {
61 int r1;
62 int r2;
63 int r3;
64};
65
66struct tlb_reg_save {
67 unsigned long a;
68 unsigned long b;
69} ____cacheline_aligned_in_smp;
70
71static struct tlb_reg_save handler_reg_save[NR_CPUS];
72
73static inline int r45k_bvahwbug(void)
74{
75 /* XXX: We should probe for the presence of this bug, but we don't. */
76 return 0;
77}
78
79static inline int r4k_250MHZhwbug(void)
80{
81 /* XXX: We should probe for the presence of this bug, but we don't. */
82 return 0;
83}
84
85extern int sb1250_m3_workaround_needed(void);
86
87static inline int __maybe_unused bcm1250_m3_war(void)
88{
89 if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
90 return sb1250_m3_workaround_needed();
91 return 0;
92}
93
94static inline int __maybe_unused r10000_llsc_war(void)
95{
96 return IS_ENABLED(CONFIG_WAR_R10000_LLSC);
97}
98
99static int use_bbit_insns(void)
100{
101 switch (current_cpu_type()) {
102 case CPU_CAVIUM_OCTEON:
103 case CPU_CAVIUM_OCTEON_PLUS:
104 case CPU_CAVIUM_OCTEON2:
105 case CPU_CAVIUM_OCTEON3:
106 return 1;
107 default:
108 return 0;
109 }
110}
111
112static int use_lwx_insns(void)
113{
114 switch (current_cpu_type()) {
115 case CPU_CAVIUM_OCTEON2:
116 case CPU_CAVIUM_OCTEON3:
117 return 1;
118 default:
119 return 0;
120 }
121}
122#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
123 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
124static bool scratchpad_available(void)
125{
126 return true;
127}
128static int scratchpad_offset(int i)
129{
130 /*
131 * CVMSEG starts at address -32768 and extends for
132 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
133 */
134 i += 1; /* Kernel use starts at the top and works down. */
135 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
136}
137#else
138static bool scratchpad_available(void)
139{
140 return false;
141}
142static int scratchpad_offset(int i)
143{
144 BUG();
145 /* Really unreachable, but evidently some GCC want this. */
146 return 0;
147}
148#endif
149/*
150 * Found by experiment: At least some revisions of the 4kc throw under
151 * some circumstances a machine check exception, triggered by invalid
152 * values in the index register. Delaying the tlbp instruction until
153 * after the next branch, plus adding an additional nop in front of
154 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
155 * why; it's not an issue caused by the core RTL.
156 *
157 */
158static int m4kc_tlbp_war(void)
159{
160 return current_cpu_type() == CPU_4KC;
161}
162
163/* Handle labels (which must be positive integers). */
164enum label_id {
165 label_second_part = 1,
166 label_leave,
167 label_vmalloc,
168 label_vmalloc_done,
169 label_tlbw_hazard_0,
170 label_split = label_tlbw_hazard_0 + 8,
171 label_tlbl_goaround1,
172 label_tlbl_goaround2,
173 label_nopage_tlbl,
174 label_nopage_tlbs,
175 label_nopage_tlbm,
176 label_smp_pgtable_change,
177 label_r3000_write_probe_fail,
178 label_large_segbits_fault,
179#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
180 label_tlb_huge_update,
181#endif
182};
183
184UASM_L_LA(_second_part)
185UASM_L_LA(_leave)
186UASM_L_LA(_vmalloc)
187UASM_L_LA(_vmalloc_done)
188/* _tlbw_hazard_x is handled differently. */
189UASM_L_LA(_split)
190UASM_L_LA(_tlbl_goaround1)
191UASM_L_LA(_tlbl_goaround2)
192UASM_L_LA(_nopage_tlbl)
193UASM_L_LA(_nopage_tlbs)
194UASM_L_LA(_nopage_tlbm)
195UASM_L_LA(_smp_pgtable_change)
196UASM_L_LA(_r3000_write_probe_fail)
197UASM_L_LA(_large_segbits_fault)
198#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
199UASM_L_LA(_tlb_huge_update)
200#endif
201
202static int hazard_instance;
203
204static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
205{
206 switch (instance) {
207 case 0 ... 7:
208 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
209 return;
210 default:
211 BUG();
212 }
213}
214
215static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
216{
217 switch (instance) {
218 case 0 ... 7:
219 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
220 break;
221 default:
222 BUG();
223 }
224}
225
226/*
227 * pgtable bits are assigned dynamically depending on processor feature
228 * and statically based on kernel configuration. This spits out the actual
229 * values the kernel is using. Required to make sense from disassembled
230 * TLB exception handlers.
231 */
232static void output_pgtable_bits_defines(void)
233{
234#define pr_define(fmt, ...) \
235 pr_debug("#define " fmt, ##__VA_ARGS__)
236
237 pr_debug("#include <asm/asm.h>\n");
238 pr_debug("#include <asm/regdef.h>\n");
239 pr_debug("\n");
240
241 pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
242 pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
243 pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
244 pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
245 pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
246#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
247 pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
248#endif
249#ifdef _PAGE_NO_EXEC_SHIFT
250 if (cpu_has_rixi)
251 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
252#endif
253 pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
254 pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
255 pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
256 pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
257 pr_debug("\n");
258}
259
260static inline void dump_handler(const char *symbol, const void *start, const void *end)
261{
262 unsigned int count = (end - start) / sizeof(u32);
263 const u32 *handler = start;
264 int i;
265
266 pr_debug("LEAF(%s)\n", symbol);
267
268 pr_debug("\t.set push\n");
269 pr_debug("\t.set noreorder\n");
270
271 for (i = 0; i < count; i++)
272 pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
273
274 pr_debug("\t.set\tpop\n");
275
276 pr_debug("\tEND(%s)\n", symbol);
277}
278
279/* The only general purpose registers allowed in TLB handlers. */
280#define K0 26
281#define K1 27
282
283/* Some CP0 registers */
284#define C0_INDEX 0, 0
285#define C0_ENTRYLO0 2, 0
286#define C0_TCBIND 2, 2
287#define C0_ENTRYLO1 3, 0
288#define C0_CONTEXT 4, 0
289#define C0_PAGEMASK 5, 0
290#define C0_PWBASE 5, 5
291#define C0_PWFIELD 5, 6
292#define C0_PWSIZE 5, 7
293#define C0_PWCTL 6, 6
294#define C0_BADVADDR 8, 0
295#define C0_PGD 9, 7
296#define C0_ENTRYHI 10, 0
297#define C0_EPC 14, 0
298#define C0_XCONTEXT 20, 0
299
300#ifdef CONFIG_64BIT
301# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
302#else
303# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
304#endif
305
306/* The worst case length of the handler is around 18 instructions for
307 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
308 * Maximum space available is 32 instructions for R3000 and 64
309 * instructions for R4000.
310 *
311 * We deliberately chose a buffer size of 128, so we won't scribble
312 * over anything important on overflow before we panic.
313 */
314static u32 tlb_handler[128];
315
316/* simply assume worst case size for labels and relocs */
317static struct uasm_label labels[128];
318static struct uasm_reloc relocs[128];
319
320static int check_for_high_segbits;
321static bool fill_includes_sw_bits;
322
323static unsigned int kscratch_used_mask;
324
325static inline int __maybe_unused c0_kscratch(void)
326{
327 return 31;
328}
329
330static int allocate_kscratch(void)
331{
332 int r;
333 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
334
335 r = ffs(a);
336
337 if (r == 0)
338 return -1;
339
340 r--; /* make it zero based */
341
342 kscratch_used_mask |= (1 << r);
343
344 return r;
345}
346
347static int scratch_reg;
348int pgd_reg;
349EXPORT_SYMBOL_GPL(pgd_reg);
350enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
351
352static struct work_registers build_get_work_registers(u32 **p)
353{
354 struct work_registers r;
355
356 if (scratch_reg >= 0) {
357 /* Save in CPU local C0_KScratch? */
358 UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
359 r.r1 = K0;
360 r.r2 = K1;
361 r.r3 = 1;
362 return r;
363 }
364
365 if (num_possible_cpus() > 1) {
366 /* Get smp_processor_id */
367 UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
368 UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
369
370 /* handler_reg_save index in K0 */
371 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
372
373 UASM_i_LA(p, K1, (long)&handler_reg_save);
374 UASM_i_ADDU(p, K0, K0, K1);
375 } else {
376 UASM_i_LA(p, K0, (long)&handler_reg_save);
377 }
378 /* K0 now points to save area, save $1 and $2 */
379 UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
380 UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
381
382 r.r1 = K1;
383 r.r2 = 1;
384 r.r3 = 2;
385 return r;
386}
387
388static void build_restore_work_registers(u32 **p)
389{
390 if (scratch_reg >= 0) {
391 uasm_i_ehb(p);
392 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
393 return;
394 }
395 /* K0 already points to save area, restore $1 and $2 */
396 UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
397 UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
398}
399
400#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
401
402/*
403 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
404 * we cannot do r3000 under these circumstances.
405 *
406 * The R3000 TLB handler is simple.
407 */
408static void build_r3000_tlb_refill_handler(void)
409{
410 long pgdc = (long)pgd_current;
411 u32 *p;
412
413 memset(tlb_handler, 0, sizeof(tlb_handler));
414 p = tlb_handler;
415
416 uasm_i_mfc0(&p, K0, C0_BADVADDR);
417 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
418 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
419 uasm_i_srl(&p, K0, K0, 22); /* load delay */
420 uasm_i_sll(&p, K0, K0, 2);
421 uasm_i_addu(&p, K1, K1, K0);
422 uasm_i_mfc0(&p, K0, C0_CONTEXT);
423 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
424 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
425 uasm_i_addu(&p, K1, K1, K0);
426 uasm_i_lw(&p, K0, 0, K1);
427 uasm_i_nop(&p); /* load delay */
428 uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
429 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
430 uasm_i_tlbwr(&p); /* cp0 delay */
431 uasm_i_jr(&p, K1);
432 uasm_i_rfe(&p); /* branch delay */
433
434 if (p > tlb_handler + 32)
435 panic("TLB refill handler space exceeded");
436
437 pr_debug("Wrote TLB refill handler (%u instructions).\n",
438 (unsigned int)(p - tlb_handler));
439
440 memcpy((void *)ebase, tlb_handler, 0x80);
441 local_flush_icache_range(ebase, ebase + 0x80);
442 dump_handler("r3000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x80));
443}
444#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
445
446/*
447 * The R4000 TLB handler is much more complicated. We have two
448 * consecutive handler areas with 32 instructions space each.
449 * Since they aren't used at the same time, we can overflow in the
450 * other one.To keep things simple, we first assume linear space,
451 * then we relocate it to the final handler layout as needed.
452 */
453static u32 final_handler[64];
454
455/*
456 * Hazards
457 *
458 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
459 * 2. A timing hazard exists for the TLBP instruction.
460 *
461 * stalling_instruction
462 * TLBP
463 *
464 * The JTLB is being read for the TLBP throughout the stall generated by the
465 * previous instruction. This is not really correct as the stalling instruction
466 * can modify the address used to access the JTLB. The failure symptom is that
467 * the TLBP instruction will use an address created for the stalling instruction
468 * and not the address held in C0_ENHI and thus report the wrong results.
469 *
470 * The software work-around is to not allow the instruction preceding the TLBP
471 * to stall - make it an NOP or some other instruction guaranteed not to stall.
472 *
473 * Errata 2 will not be fixed. This errata is also on the R5000.
474 *
475 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
476 */
477static void __maybe_unused build_tlb_probe_entry(u32 **p)
478{
479 switch (current_cpu_type()) {
480 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
481 case CPU_R4600:
482 case CPU_R4700:
483 case CPU_R5000:
484 case CPU_NEVADA:
485 uasm_i_nop(p);
486 uasm_i_tlbp(p);
487 break;
488
489 default:
490 uasm_i_tlbp(p);
491 break;
492 }
493}
494
495void build_tlb_write_entry(u32 **p, struct uasm_label **l,
496 struct uasm_reloc **r,
497 enum tlb_write_entry wmode)
498{
499 void(*tlbw)(u32 **) = NULL;
500
501 switch (wmode) {
502 case tlb_random: tlbw = uasm_i_tlbwr; break;
503 case tlb_indexed: tlbw = uasm_i_tlbwi; break;
504 }
505
506 if (cpu_has_mips_r2_r6) {
507 if (cpu_has_mips_r2_exec_hazard)
508 uasm_i_ehb(p);
509 tlbw(p);
510 return;
511 }
512
513 switch (current_cpu_type()) {
514 case CPU_R4000PC:
515 case CPU_R4000SC:
516 case CPU_R4000MC:
517 case CPU_R4400PC:
518 case CPU_R4400SC:
519 case CPU_R4400MC:
520 /*
521 * This branch uses up a mtc0 hazard nop slot and saves
522 * two nops after the tlbw instruction.
523 */
524 uasm_bgezl_hazard(p, r, hazard_instance);
525 tlbw(p);
526 uasm_bgezl_label(l, p, hazard_instance);
527 hazard_instance++;
528 uasm_i_nop(p);
529 break;
530
531 case CPU_R4600:
532 case CPU_R4700:
533 uasm_i_nop(p);
534 tlbw(p);
535 uasm_i_nop(p);
536 break;
537
538 case CPU_R5000:
539 case CPU_NEVADA:
540 uasm_i_nop(p); /* QED specifies 2 nops hazard */
541 uasm_i_nop(p); /* QED specifies 2 nops hazard */
542 tlbw(p);
543 break;
544
545 case CPU_R4300:
546 case CPU_5KC:
547 case CPU_TX49XX:
548 case CPU_PR4450:
549 uasm_i_nop(p);
550 tlbw(p);
551 break;
552
553 case CPU_R10000:
554 case CPU_R12000:
555 case CPU_R14000:
556 case CPU_R16000:
557 case CPU_4KC:
558 case CPU_4KEC:
559 case CPU_M14KC:
560 case CPU_M14KEC:
561 case CPU_SB1:
562 case CPU_SB1A:
563 case CPU_4KSC:
564 case CPU_20KC:
565 case CPU_25KF:
566 case CPU_BMIPS32:
567 case CPU_BMIPS3300:
568 case CPU_BMIPS4350:
569 case CPU_BMIPS4380:
570 case CPU_BMIPS5000:
571 case CPU_LOONGSON2EF:
572 case CPU_LOONGSON64:
573 case CPU_R5500:
574 if (m4kc_tlbp_war())
575 uasm_i_nop(p);
576 fallthrough;
577 case CPU_ALCHEMY:
578 tlbw(p);
579 break;
580
581 case CPU_RM7000:
582 uasm_i_nop(p);
583 uasm_i_nop(p);
584 uasm_i_nop(p);
585 uasm_i_nop(p);
586 tlbw(p);
587 break;
588
589 case CPU_XBURST:
590 tlbw(p);
591 uasm_i_nop(p);
592 break;
593
594 default:
595 panic("No TLB refill handler yet (CPU type: %d)",
596 current_cpu_type());
597 break;
598 }
599}
600EXPORT_SYMBOL_GPL(build_tlb_write_entry);
601
602static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
603 unsigned int reg)
604{
605 if (_PAGE_GLOBAL_SHIFT == 0) {
606 /* pte_t is already in EntryLo format */
607 return;
608 }
609
610 if (cpu_has_rixi && _PAGE_NO_EXEC != 0) {
611 if (fill_includes_sw_bits) {
612 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
613 } else {
614 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
615 UASM_i_ROTR(p, reg, reg,
616 ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
617 }
618 } else {
619#ifdef CONFIG_PHYS_ADDR_T_64BIT
620 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
621#else
622 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
623#endif
624 }
625}
626
627#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
628
629static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
630 unsigned int tmp, enum label_id lid,
631 int restore_scratch)
632{
633 if (restore_scratch) {
634 /*
635 * Ensure the MFC0 below observes the value written to the
636 * KScratch register by the prior MTC0.
637 */
638 if (scratch_reg >= 0)
639 uasm_i_ehb(p);
640
641 /* Reset default page size */
642 if (PM_DEFAULT_MASK >> 16) {
643 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
644 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
645 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
646 uasm_il_b(p, r, lid);
647 } else if (PM_DEFAULT_MASK) {
648 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
649 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
650 uasm_il_b(p, r, lid);
651 } else {
652 uasm_i_mtc0(p, 0, C0_PAGEMASK);
653 uasm_il_b(p, r, lid);
654 }
655 if (scratch_reg >= 0)
656 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
657 else
658 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
659 } else {
660 /* Reset default page size */
661 if (PM_DEFAULT_MASK >> 16) {
662 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
663 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
664 uasm_il_b(p, r, lid);
665 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
666 } else if (PM_DEFAULT_MASK) {
667 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
668 uasm_il_b(p, r, lid);
669 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
670 } else {
671 uasm_il_b(p, r, lid);
672 uasm_i_mtc0(p, 0, C0_PAGEMASK);
673 }
674 }
675}
676
677static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
678 struct uasm_reloc **r,
679 unsigned int tmp,
680 enum tlb_write_entry wmode,
681 int restore_scratch)
682{
683 /* Set huge page tlb entry size */
684 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
685 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
686 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
687
688 build_tlb_write_entry(p, l, r, wmode);
689
690 build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
691}
692
693/*
694 * Check if Huge PTE is present, if so then jump to LABEL.
695 */
696static void
697build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
698 unsigned int pmd, int lid)
699{
700 UASM_i_LW(p, tmp, 0, pmd);
701 if (use_bbit_insns()) {
702 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
703 } else {
704 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
705 uasm_il_bnez(p, r, tmp, lid);
706 }
707}
708
709static void build_huge_update_entries(u32 **p, unsigned int pte,
710 unsigned int tmp)
711{
712 int small_sequence;
713
714 /*
715 * A huge PTE describes an area the size of the
716 * configured huge page size. This is twice the
717 * of the large TLB entry size we intend to use.
718 * A TLB entry half the size of the configured
719 * huge page size is configured into entrylo0
720 * and entrylo1 to cover the contiguous huge PTE
721 * address space.
722 */
723 small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
724
725 /* We can clobber tmp. It isn't used after this.*/
726 if (!small_sequence)
727 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
728
729 build_convert_pte_to_entrylo(p, pte);
730 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
731 /* convert to entrylo1 */
732 if (small_sequence)
733 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
734 else
735 UASM_i_ADDU(p, pte, pte, tmp);
736
737 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
738}
739
740static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
741 struct uasm_label **l,
742 unsigned int pte,
743 unsigned int ptr,
744 unsigned int flush)
745{
746#ifdef CONFIG_SMP
747 UASM_i_SC(p, pte, 0, ptr);
748 uasm_il_beqz(p, r, pte, label_tlb_huge_update);
749 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
750#else
751 UASM_i_SW(p, pte, 0, ptr);
752#endif
753 if (cpu_has_ftlb && flush) {
754 BUG_ON(!cpu_has_tlbinv);
755
756 UASM_i_MFC0(p, ptr, C0_ENTRYHI);
757 uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
758 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
759 build_tlb_write_entry(p, l, r, tlb_indexed);
760
761 uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
762 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
763 build_huge_update_entries(p, pte, ptr);
764 build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
765
766 return;
767 }
768
769 build_huge_update_entries(p, pte, ptr);
770 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
771}
772#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
773
774#ifdef CONFIG_64BIT
775/*
776 * TMP and PTR are scratch.
777 * TMP will be clobbered, PTR will hold the pmd entry.
778 */
779void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
780 unsigned int tmp, unsigned int ptr)
781{
782#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
783 long pgdc = (long)pgd_current;
784#endif
785 /*
786 * The vmalloc handling is not in the hotpath.
787 */
788 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
789
790 if (check_for_high_segbits) {
791 /*
792 * The kernel currently implicitly assumes that the
793 * MIPS SEGBITS parameter for the processor is
794 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
795 * allocate virtual addresses outside the maximum
796 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
797 * that doesn't prevent user code from accessing the
798 * higher xuseg addresses. Here, we make sure that
799 * everything but the lower xuseg addresses goes down
800 * the module_alloc/vmalloc path.
801 */
802 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
803 uasm_il_bnez(p, r, ptr, label_vmalloc);
804 } else {
805 uasm_il_bltz(p, r, tmp, label_vmalloc);
806 }
807 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
808
809 if (pgd_reg != -1) {
810 /* pgd is in pgd_reg */
811 if (cpu_has_ldpte)
812 UASM_i_MFC0(p, ptr, C0_PWBASE);
813 else
814 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
815 } else {
816#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
817 /*
818 * &pgd << 11 stored in CONTEXT [23..63].
819 */
820 UASM_i_MFC0(p, ptr, C0_CONTEXT);
821
822 /* Clear lower 23 bits of context. */
823 uasm_i_dins(p, ptr, 0, 0, 23);
824
825 /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
826 uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
827 uasm_i_drotr(p, ptr, ptr, 11);
828#elif defined(CONFIG_SMP)
829 UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
830 uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
831 UASM_i_LA_mostly(p, tmp, pgdc);
832 uasm_i_daddu(p, ptr, ptr, tmp);
833 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
834 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
835#else
836 UASM_i_LA_mostly(p, ptr, pgdc);
837 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
838#endif
839 }
840
841 uasm_l_vmalloc_done(l, *p);
842
843 /* get pgd offset in bytes */
844 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
845
846 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
847 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
848#ifndef __PAGETABLE_PUD_FOLDED
849 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
850 uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */
851 uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */
852 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3);
853 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */
854#endif
855#ifndef __PAGETABLE_PMD_FOLDED
856 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
857 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
858 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
859 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
860 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
861#endif
862}
863EXPORT_SYMBOL_GPL(build_get_pmde64);
864
865/*
866 * BVADDR is the faulting address, PTR is scratch.
867 * PTR will hold the pgd for vmalloc.
868 */
869static void
870build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
871 unsigned int bvaddr, unsigned int ptr,
872 enum vmalloc64_mode mode)
873{
874 long swpd = (long)swapper_pg_dir;
875 int single_insn_swpd;
876 int did_vmalloc_branch = 0;
877
878 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
879
880 uasm_l_vmalloc(l, *p);
881
882 if (mode != not_refill && check_for_high_segbits) {
883 if (single_insn_swpd) {
884 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
885 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
886 did_vmalloc_branch = 1;
887 /* fall through */
888 } else {
889 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
890 }
891 }
892 if (!did_vmalloc_branch) {
893 if (single_insn_swpd) {
894 uasm_il_b(p, r, label_vmalloc_done);
895 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
896 } else {
897 UASM_i_LA_mostly(p, ptr, swpd);
898 uasm_il_b(p, r, label_vmalloc_done);
899 if (uasm_in_compat_space_p(swpd))
900 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
901 else
902 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
903 }
904 }
905 if (mode != not_refill && check_for_high_segbits) {
906 uasm_l_large_segbits_fault(l, *p);
907
908 if (mode == refill_scratch && scratch_reg >= 0)
909 uasm_i_ehb(p);
910
911 /*
912 * We get here if we are an xsseg address, or if we are
913 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
914 *
915 * Ignoring xsseg (assume disabled so would generate
916 * (address errors?), the only remaining possibility
917 * is the upper xuseg addresses. On processors with
918 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
919 * addresses would have taken an address error. We try
920 * to mimic that here by taking a load/istream page
921 * fault.
922 */
923 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
924 uasm_i_sync(p, 0);
925 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
926 uasm_i_jr(p, ptr);
927
928 if (mode == refill_scratch) {
929 if (scratch_reg >= 0)
930 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
931 else
932 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
933 } else {
934 uasm_i_nop(p);
935 }
936 }
937}
938
939#else /* !CONFIG_64BIT */
940
941/*
942 * TMP and PTR are scratch.
943 * TMP will be clobbered, PTR will hold the pgd entry.
944 */
945void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
946{
947 if (pgd_reg != -1) {
948 /* pgd is in pgd_reg */
949 uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
950 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
951 } else {
952 long pgdc = (long)pgd_current;
953
954 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
955#ifdef CONFIG_SMP
956 uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
957 UASM_i_LA_mostly(p, tmp, pgdc);
958 uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
959 uasm_i_addu(p, ptr, tmp, ptr);
960#else
961 UASM_i_LA_mostly(p, ptr, pgdc);
962#endif
963 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
964 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
965 }
966 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
967 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
968 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
969}
970EXPORT_SYMBOL_GPL(build_get_pgde32);
971
972#endif /* !CONFIG_64BIT */
973
974static void build_adjust_context(u32 **p, unsigned int ctx)
975{
976 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
977 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
978
979 if (shift)
980 UASM_i_SRL(p, ctx, ctx, shift);
981 uasm_i_andi(p, ctx, ctx, mask);
982}
983
984void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
985{
986 /*
987 * Bug workaround for the Nevada. It seems as if under certain
988 * circumstances the move from cp0_context might produce a
989 * bogus result when the mfc0 instruction and its consumer are
990 * in a different cacheline or a load instruction, probably any
991 * memory reference, is between them.
992 */
993 switch (current_cpu_type()) {
994 case CPU_NEVADA:
995 UASM_i_LW(p, ptr, 0, ptr);
996 GET_CONTEXT(p, tmp); /* get context reg */
997 break;
998
999 default:
1000 GET_CONTEXT(p, tmp); /* get context reg */
1001 UASM_i_LW(p, ptr, 0, ptr);
1002 break;
1003 }
1004
1005 build_adjust_context(p, tmp);
1006 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1007}
1008EXPORT_SYMBOL_GPL(build_get_ptep);
1009
1010void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1011{
1012 int pte_off_even = 0;
1013 int pte_off_odd = sizeof(pte_t);
1014
1015#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT)
1016 /* The low 32 bits of EntryLo is stored in pte_high */
1017 pte_off_even += offsetof(pte_t, pte_high);
1018 pte_off_odd += offsetof(pte_t, pte_high);
1019#endif
1020
1021 if (IS_ENABLED(CONFIG_XPA)) {
1022 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
1023 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1024 UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
1025
1026 if (cpu_has_xpa && !mips_xpa_disabled) {
1027 uasm_i_lw(p, tmp, 0, ptep);
1028 uasm_i_ext(p, tmp, tmp, 0, 24);
1029 uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
1030 }
1031
1032 uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
1033 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1034 UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
1035
1036 if (cpu_has_xpa && !mips_xpa_disabled) {
1037 uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
1038 uasm_i_ext(p, tmp, tmp, 0, 24);
1039 uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
1040 }
1041 return;
1042 }
1043
1044 UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
1045 UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
1046 if (r45k_bvahwbug())
1047 build_tlb_probe_entry(p);
1048 build_convert_pte_to_entrylo(p, tmp);
1049 if (r4k_250MHZhwbug())
1050 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1051 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1052 build_convert_pte_to_entrylo(p, ptep);
1053 if (r45k_bvahwbug())
1054 uasm_i_mfc0(p, tmp, C0_INDEX);
1055 if (r4k_250MHZhwbug())
1056 UASM_i_MTC0(p, 0, C0_ENTRYLO1);
1057 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1058}
1059EXPORT_SYMBOL_GPL(build_update_entries);
1060
1061struct mips_huge_tlb_info {
1062 int huge_pte;
1063 int restore_scratch;
1064 bool need_reload_pte;
1065};
1066
1067static struct mips_huge_tlb_info
1068build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1069 struct uasm_reloc **r, unsigned int tmp,
1070 unsigned int ptr, int c0_scratch_reg)
1071{
1072 struct mips_huge_tlb_info rv;
1073 unsigned int even, odd;
1074 int vmalloc_branch_delay_filled = 0;
1075 const int scratch = 1; /* Our extra working register */
1076
1077 rv.huge_pte = scratch;
1078 rv.restore_scratch = 0;
1079 rv.need_reload_pte = false;
1080
1081 if (check_for_high_segbits) {
1082 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1083
1084 if (pgd_reg != -1)
1085 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1086 else
1087 UASM_i_MFC0(p, ptr, C0_CONTEXT);
1088
1089 if (c0_scratch_reg >= 0)
1090 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1091 else
1092 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1093
1094 uasm_i_dsrl_safe(p, scratch, tmp,
1095 PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
1096 uasm_il_bnez(p, r, scratch, label_vmalloc);
1097
1098 if (pgd_reg == -1) {
1099 vmalloc_branch_delay_filled = 1;
1100 /* Clear lower 23 bits of context. */
1101 uasm_i_dins(p, ptr, 0, 0, 23);
1102 }
1103 } else {
1104 if (pgd_reg != -1)
1105 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1106 else
1107 UASM_i_MFC0(p, ptr, C0_CONTEXT);
1108
1109 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1110
1111 if (c0_scratch_reg >= 0)
1112 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1113 else
1114 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1115
1116 if (pgd_reg == -1)
1117 /* Clear lower 23 bits of context. */
1118 uasm_i_dins(p, ptr, 0, 0, 23);
1119
1120 uasm_il_bltz(p, r, tmp, label_vmalloc);
1121 }
1122
1123 if (pgd_reg == -1) {
1124 vmalloc_branch_delay_filled = 1;
1125 /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
1126 uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
1127
1128 uasm_i_drotr(p, ptr, ptr, 11);
1129 }
1130
1131#ifdef __PAGETABLE_PMD_FOLDED
1132#define LOC_PTEP scratch
1133#else
1134#define LOC_PTEP ptr
1135#endif
1136
1137 if (!vmalloc_branch_delay_filled)
1138 /* get pgd offset in bytes */
1139 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1140
1141 uasm_l_vmalloc_done(l, *p);
1142
1143 /*
1144 * tmp ptr
1145 * fall-through case = badvaddr *pgd_current
1146 * vmalloc case = badvaddr swapper_pg_dir
1147 */
1148
1149 if (vmalloc_branch_delay_filled)
1150 /* get pgd offset in bytes */
1151 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1152
1153#ifdef __PAGETABLE_PMD_FOLDED
1154 GET_CONTEXT(p, tmp); /* get context reg */
1155#endif
1156 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1157
1158 if (use_lwx_insns()) {
1159 UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1160 } else {
1161 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1162 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1163 }
1164
1165#ifndef __PAGETABLE_PUD_FOLDED
1166 /* get pud offset in bytes */
1167 uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3);
1168 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
1169
1170 if (use_lwx_insns()) {
1171 UASM_i_LWX(p, ptr, scratch, ptr);
1172 } else {
1173 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1174 UASM_i_LW(p, ptr, 0, ptr);
1175 }
1176 /* ptr contains a pointer to PMD entry */
1177 /* tmp contains the address */
1178#endif
1179
1180#ifndef __PAGETABLE_PMD_FOLDED
1181 /* get pmd offset in bytes */
1182 uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1183 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1184 GET_CONTEXT(p, tmp); /* get context reg */
1185
1186 if (use_lwx_insns()) {
1187 UASM_i_LWX(p, scratch, scratch, ptr);
1188 } else {
1189 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1190 UASM_i_LW(p, scratch, 0, ptr);
1191 }
1192#endif
1193 /* Adjust the context during the load latency. */
1194 build_adjust_context(p, tmp);
1195
1196#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1197 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1198 /*
1199 * The in the LWX case we don't want to do the load in the
1200 * delay slot. It cannot issue in the same cycle and may be
1201 * speculative and unneeded.
1202 */
1203 if (use_lwx_insns())
1204 uasm_i_nop(p);
1205#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
1206
1207
1208 /* build_update_entries */
1209 if (use_lwx_insns()) {
1210 even = ptr;
1211 odd = tmp;
1212 UASM_i_LWX(p, even, scratch, tmp);
1213 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1214 UASM_i_LWX(p, odd, scratch, tmp);
1215 } else {
1216 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1217 even = tmp;
1218 odd = ptr;
1219 UASM_i_LW(p, even, 0, ptr); /* get even pte */
1220 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1221 }
1222 if (cpu_has_rixi) {
1223 uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
1224 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1225 uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
1226 } else {
1227 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1228 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1229 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1230 }
1231 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1232
1233 if (c0_scratch_reg >= 0) {
1234 uasm_i_ehb(p);
1235 UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1236 build_tlb_write_entry(p, l, r, tlb_random);
1237 uasm_l_leave(l, *p);
1238 rv.restore_scratch = 1;
1239 } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) {
1240 build_tlb_write_entry(p, l, r, tlb_random);
1241 uasm_l_leave(l, *p);
1242 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1243 } else {
1244 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1245 build_tlb_write_entry(p, l, r, tlb_random);
1246 uasm_l_leave(l, *p);
1247 rv.restore_scratch = 1;
1248 }
1249
1250 uasm_i_eret(p); /* return from trap */
1251
1252 return rv;
1253}
1254
1255/*
1256 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1257 * because EXL == 0. If we wrap, we can also use the 32 instruction
1258 * slots before the XTLB refill exception handler which belong to the
1259 * unused TLB refill exception.
1260 */
1261#define MIPS64_REFILL_INSNS 32
1262
1263static void build_r4000_tlb_refill_handler(void)
1264{
1265 u32 *p = tlb_handler;
1266 struct uasm_label *l = labels;
1267 struct uasm_reloc *r = relocs;
1268 u32 *f;
1269 unsigned int final_len;
1270 struct mips_huge_tlb_info htlb_info __maybe_unused;
1271 enum vmalloc64_mode vmalloc_mode __maybe_unused;
1272
1273 memset(tlb_handler, 0, sizeof(tlb_handler));
1274 memset(labels, 0, sizeof(labels));
1275 memset(relocs, 0, sizeof(relocs));
1276 memset(final_handler, 0, sizeof(final_handler));
1277
1278 if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
1279 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1280 scratch_reg);
1281 vmalloc_mode = refill_scratch;
1282 } else {
1283 htlb_info.huge_pte = K0;
1284 htlb_info.restore_scratch = 0;
1285 htlb_info.need_reload_pte = true;
1286 vmalloc_mode = refill_noscratch;
1287 /*
1288 * create the plain linear handler
1289 */
1290 if (bcm1250_m3_war()) {
1291 unsigned int segbits = 44;
1292
1293 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1294 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1295 uasm_i_xor(&p, K0, K0, K1);
1296 uasm_i_dsrl_safe(&p, K1, K0, 62);
1297 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1298 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1299 uasm_i_or(&p, K0, K0, K1);
1300 uasm_il_bnez(&p, &r, K0, label_leave);
1301 /* No need for uasm_i_nop */
1302 }
1303
1304#ifdef CONFIG_64BIT
1305 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
1306#else
1307 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
1308#endif
1309
1310#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1311 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
1312#endif
1313
1314 build_get_ptep(&p, K0, K1);
1315 build_update_entries(&p, K0, K1);
1316 build_tlb_write_entry(&p, &l, &r, tlb_random);
1317 uasm_l_leave(&l, p);
1318 uasm_i_eret(&p); /* return from trap */
1319 }
1320#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1321 uasm_l_tlb_huge_update(&l, p);
1322 if (htlb_info.need_reload_pte)
1323 UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
1324 build_huge_update_entries(&p, htlb_info.huge_pte, K1);
1325 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
1326 htlb_info.restore_scratch);
1327#endif
1328
1329#ifdef CONFIG_64BIT
1330 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
1331#endif
1332
1333 /*
1334 * Overflow check: For the 64bit handler, we need at least one
1335 * free instruction slot for the wrap-around branch. In worst
1336 * case, if the intended insertion point is a delay slot, we
1337 * need three, with the second nop'ed and the third being
1338 * unused.
1339 */
1340 switch (boot_cpu_type()) {
1341 default:
1342 if (sizeof(long) == 4) {
1343 fallthrough;
1344 case CPU_LOONGSON2EF:
1345 /* Loongson2 ebase is different than r4k, we have more space */
1346 if ((p - tlb_handler) > 64)
1347 panic("TLB refill handler space exceeded");
1348 /*
1349 * Now fold the handler in the TLB refill handler space.
1350 */
1351 f = final_handler;
1352 /* Simplest case, just copy the handler. */
1353 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1354 final_len = p - tlb_handler;
1355 break;
1356 } else {
1357 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1358 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1359 && uasm_insn_has_bdelay(relocs,
1360 tlb_handler + MIPS64_REFILL_INSNS - 3)))
1361 panic("TLB refill handler space exceeded");
1362 /*
1363 * Now fold the handler in the TLB refill handler space.
1364 */
1365 f = final_handler + MIPS64_REFILL_INSNS;
1366 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1367 /* Just copy the handler. */
1368 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1369 final_len = p - tlb_handler;
1370 } else {
1371#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1372 const enum label_id ls = label_tlb_huge_update;
1373#else
1374 const enum label_id ls = label_vmalloc;
1375#endif
1376 u32 *split;
1377 int ov = 0;
1378 int i;
1379
1380 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1381 ;
1382 BUG_ON(i == ARRAY_SIZE(labels));
1383 split = labels[i].addr;
1384
1385 /*
1386 * See if we have overflown one way or the other.
1387 */
1388 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1389 split < p - MIPS64_REFILL_INSNS)
1390 ov = 1;
1391
1392 if (ov) {
1393 /*
1394 * Split two instructions before the end. One
1395 * for the branch and one for the instruction
1396 * in the delay slot.
1397 */
1398 split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1399
1400 /*
1401 * If the branch would fall in a delay slot,
1402 * we must back up an additional instruction
1403 * so that it is no longer in a delay slot.
1404 */
1405 if (uasm_insn_has_bdelay(relocs, split - 1))
1406 split--;
1407 }
1408 /* Copy first part of the handler. */
1409 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
1410 f += split - tlb_handler;
1411
1412 if (ov) {
1413 /* Insert branch. */
1414 uasm_l_split(&l, final_handler);
1415 uasm_il_b(&f, &r, label_split);
1416 if (uasm_insn_has_bdelay(relocs, split))
1417 uasm_i_nop(&f);
1418 else {
1419 uasm_copy_handler(relocs, labels,
1420 split, split + 1, f);
1421 uasm_move_labels(labels, f, f + 1, -1);
1422 f++;
1423 split++;
1424 }
1425 }
1426
1427 /* Copy the rest of the handler. */
1428 uasm_copy_handler(relocs, labels, split, p, final_handler);
1429 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1430 (p - split);
1431 }
1432 }
1433 break;
1434 }
1435
1436 uasm_resolve_relocs(relocs, labels);
1437 pr_debug("Wrote TLB refill handler (%u instructions).\n",
1438 final_len);
1439
1440 memcpy((void *)ebase, final_handler, 0x100);
1441 local_flush_icache_range(ebase, ebase + 0x100);
1442 dump_handler("r4000_tlb_refill", (u32 *)ebase, (u32 *)(ebase + 0x100));
1443}
1444
1445static void setup_pw(void)
1446{
1447 unsigned int pwctl;
1448 unsigned long pgd_i, pgd_w;
1449#ifndef __PAGETABLE_PMD_FOLDED
1450 unsigned long pmd_i, pmd_w;
1451#endif
1452 unsigned long pt_i, pt_w;
1453 unsigned long pte_i, pte_w;
1454#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1455 unsigned long psn;
1456
1457 psn = ilog2(_PAGE_HUGE); /* bit used to indicate huge page */
1458#endif
1459 pgd_i = PGDIR_SHIFT; /* 1st level PGD */
1460#ifndef __PAGETABLE_PMD_FOLDED
1461 pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER;
1462
1463 pmd_i = PMD_SHIFT; /* 2nd level PMD */
1464 pmd_w = PMD_SHIFT - PAGE_SHIFT;
1465#else
1466 pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER;
1467#endif
1468
1469 pt_i = PAGE_SHIFT; /* 3rd level PTE */
1470 pt_w = PAGE_SHIFT - 3;
1471
1472 pte_i = ilog2(_PAGE_GLOBAL);
1473 pte_w = 0;
1474 pwctl = 1 << 30; /* Set PWDirExt */
1475
1476#ifndef __PAGETABLE_PMD_FOLDED
1477 write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
1478 write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w);
1479#else
1480 write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i);
1481 write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w);
1482#endif
1483
1484#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1485 pwctl |= (1 << 6 | psn);
1486#endif
1487 write_c0_pwctl(pwctl);
1488 write_c0_kpgd((long)swapper_pg_dir);
1489 kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
1490}
1491
1492static void build_loongson3_tlb_refill_handler(void)
1493{
1494 u32 *p = tlb_handler;
1495 struct uasm_label *l = labels;
1496 struct uasm_reloc *r = relocs;
1497
1498 memset(labels, 0, sizeof(labels));
1499 memset(relocs, 0, sizeof(relocs));
1500 memset(tlb_handler, 0, sizeof(tlb_handler));
1501
1502 if (check_for_high_segbits) {
1503 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1504 uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
1505 uasm_il_beqz(&p, &r, K1, label_vmalloc);
1506 uasm_i_nop(&p);
1507
1508 uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
1509 uasm_i_nop(&p);
1510 uasm_l_vmalloc(&l, p);
1511 }
1512
1513 uasm_i_dmfc0(&p, K1, C0_PGD);
1514
1515 uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
1516#ifndef __PAGETABLE_PMD_FOLDED
1517 uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
1518#endif
1519 uasm_i_ldpte(&p, K1, 0); /* even */
1520 uasm_i_ldpte(&p, K1, 1); /* odd */
1521 uasm_i_tlbwr(&p);
1522
1523 /* restore page mask */
1524 if (PM_DEFAULT_MASK >> 16) {
1525 uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
1526 uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
1527 uasm_i_mtc0(&p, K0, C0_PAGEMASK);
1528 } else if (PM_DEFAULT_MASK) {
1529 uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
1530 uasm_i_mtc0(&p, K0, C0_PAGEMASK);
1531 } else {
1532 uasm_i_mtc0(&p, 0, C0_PAGEMASK);
1533 }
1534
1535 uasm_i_eret(&p);
1536
1537 if (check_for_high_segbits) {
1538 uasm_l_large_segbits_fault(&l, p);
1539 UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
1540 uasm_i_jr(&p, K1);
1541 uasm_i_nop(&p);
1542 }
1543
1544 uasm_resolve_relocs(relocs, labels);
1545 memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
1546 local_flush_icache_range(ebase + 0x80, ebase + 0x100);
1547 dump_handler("loongson3_tlb_refill",
1548 (u32 *)(ebase + 0x80), (u32 *)(ebase + 0x100));
1549}
1550
1551static void build_setup_pgd(void)
1552{
1553 const int a0 = 4;
1554 const int __maybe_unused a1 = 5;
1555 const int __maybe_unused a2 = 6;
1556 u32 *p = (u32 *)msk_isa16_mode((ulong)tlbmiss_handler_setup_pgd);
1557#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1558 long pgdc = (long)pgd_current;
1559#endif
1560
1561 memset(p, 0, tlbmiss_handler_setup_pgd_end - (char *)p);
1562 memset(labels, 0, sizeof(labels));
1563 memset(relocs, 0, sizeof(relocs));
1564 pgd_reg = allocate_kscratch();
1565#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1566 if (pgd_reg == -1) {
1567 struct uasm_label *l = labels;
1568 struct uasm_reloc *r = relocs;
1569
1570 /* PGD << 11 in c0_Context */
1571 /*
1572 * If it is a ckseg0 address, convert to a physical
1573 * address. Shifting right by 29 and adding 4 will
1574 * result in zero for these addresses.
1575 *
1576 */
1577 UASM_i_SRA(&p, a1, a0, 29);
1578 UASM_i_ADDIU(&p, a1, a1, 4);
1579 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1580 uasm_i_nop(&p);
1581 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1582 uasm_l_tlbl_goaround1(&l, p);
1583 UASM_i_SLL(&p, a0, a0, 11);
1584 UASM_i_MTC0(&p, a0, C0_CONTEXT);
1585 uasm_i_jr(&p, 31);
1586 uasm_i_ehb(&p);
1587 } else {
1588 /* PGD in c0_KScratch */
1589 if (cpu_has_ldpte)
1590 UASM_i_MTC0(&p, a0, C0_PWBASE);
1591 else
1592 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1593 uasm_i_jr(&p, 31);
1594 uasm_i_ehb(&p);
1595 }
1596#else
1597#ifdef CONFIG_SMP
1598 /* Save PGD to pgd_current[smp_processor_id()] */
1599 UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
1600 UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
1601 UASM_i_LA_mostly(&p, a2, pgdc);
1602 UASM_i_ADDU(&p, a2, a2, a1);
1603 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1604#else
1605 UASM_i_LA_mostly(&p, a2, pgdc);
1606 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1607#endif /* SMP */
1608
1609 /* if pgd_reg is allocated, save PGD also to scratch register */
1610 if (pgd_reg != -1) {
1611 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1612 uasm_i_jr(&p, 31);
1613 uasm_i_ehb(&p);
1614 } else {
1615 uasm_i_jr(&p, 31);
1616 uasm_i_nop(&p);
1617 }
1618#endif
1619 if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
1620 panic("tlbmiss_handler_setup_pgd space exceeded");
1621
1622 uasm_resolve_relocs(relocs, labels);
1623 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1624 (unsigned int)(p - (u32 *)tlbmiss_handler_setup_pgd));
1625
1626 dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
1627 tlbmiss_handler_setup_pgd_end);
1628}
1629
1630static void
1631iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1632{
1633#ifdef CONFIG_SMP
1634 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
1635 uasm_i_sync(p, 0);
1636# ifdef CONFIG_PHYS_ADDR_T_64BIT
1637 if (cpu_has_64bits)
1638 uasm_i_lld(p, pte, 0, ptr);
1639 else
1640# endif
1641 UASM_i_LL(p, pte, 0, ptr);
1642#else
1643# ifdef CONFIG_PHYS_ADDR_T_64BIT
1644 if (cpu_has_64bits)
1645 uasm_i_ld(p, pte, 0, ptr);
1646 else
1647# endif
1648 UASM_i_LW(p, pte, 0, ptr);
1649#endif
1650}
1651
1652static void
1653iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1654 unsigned int mode, unsigned int scratch)
1655{
1656 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1657 unsigned int swmode = mode & ~hwmode;
1658
1659 if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
1660 uasm_i_lui(p, scratch, swmode >> 16);
1661 uasm_i_or(p, pte, pte, scratch);
1662 BUG_ON(swmode & 0xffff);
1663 } else {
1664 uasm_i_ori(p, pte, pte, mode);
1665 }
1666
1667#ifdef CONFIG_SMP
1668# ifdef CONFIG_PHYS_ADDR_T_64BIT
1669 if (cpu_has_64bits)
1670 uasm_i_scd(p, pte, 0, ptr);
1671 else
1672# endif
1673 UASM_i_SC(p, pte, 0, ptr);
1674
1675 if (r10000_llsc_war())
1676 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
1677 else
1678 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1679
1680# ifdef CONFIG_PHYS_ADDR_T_64BIT
1681 if (!cpu_has_64bits) {
1682 /* no uasm_i_nop needed */
1683 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1684 uasm_i_ori(p, pte, pte, hwmode);
1685 BUG_ON(hwmode & ~0xffff);
1686 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1687 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1688 /* no uasm_i_nop needed */
1689 uasm_i_lw(p, pte, 0, ptr);
1690 } else
1691 uasm_i_nop(p);
1692# else
1693 uasm_i_nop(p);
1694# endif
1695#else
1696# ifdef CONFIG_PHYS_ADDR_T_64BIT
1697 if (cpu_has_64bits)
1698 uasm_i_sd(p, pte, 0, ptr);
1699 else
1700# endif
1701 UASM_i_SW(p, pte, 0, ptr);
1702
1703# ifdef CONFIG_PHYS_ADDR_T_64BIT
1704 if (!cpu_has_64bits) {
1705 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1706 uasm_i_ori(p, pte, pte, hwmode);
1707 BUG_ON(hwmode & ~0xffff);
1708 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1709 uasm_i_lw(p, pte, 0, ptr);
1710 }
1711# endif
1712#endif
1713}
1714
1715/*
1716 * Check if PTE is present, if not then jump to LABEL. PTR points to
1717 * the page table where this PTE is located, PTE will be re-loaded
1718 * with its original value.
1719 */
1720static void
1721build_pte_present(u32 **p, struct uasm_reloc **r,
1722 int pte, int ptr, int scratch, enum label_id lid)
1723{
1724 int t = scratch >= 0 ? scratch : pte;
1725 int cur = pte;
1726
1727 if (cpu_has_rixi) {
1728 if (use_bbit_insns()) {
1729 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1730 uasm_i_nop(p);
1731 } else {
1732 if (_PAGE_PRESENT_SHIFT) {
1733 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1734 cur = t;
1735 }
1736 uasm_i_andi(p, t, cur, 1);
1737 uasm_il_beqz(p, r, t, lid);
1738 if (pte == t)
1739 /* You lose the SMP race :-(*/
1740 iPTE_LW(p, pte, ptr);
1741 }
1742 } else {
1743 if (_PAGE_PRESENT_SHIFT) {
1744 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1745 cur = t;
1746 }
1747 uasm_i_andi(p, t, cur,
1748 (_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT);
1749 uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
1750 uasm_il_bnez(p, r, t, lid);
1751 if (pte == t)
1752 /* You lose the SMP race :-(*/
1753 iPTE_LW(p, pte, ptr);
1754 }
1755}
1756
1757/* Make PTE valid, store result in PTR. */
1758static void
1759build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1760 unsigned int ptr, unsigned int scratch)
1761{
1762 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1763
1764 iPTE_SW(p, r, pte, ptr, mode, scratch);
1765}
1766
1767/*
1768 * Check if PTE can be written to, if not branch to LABEL. Regardless
1769 * restore PTE with value from PTR when done.
1770 */
1771static void
1772build_pte_writable(u32 **p, struct uasm_reloc **r,
1773 unsigned int pte, unsigned int ptr, int scratch,
1774 enum label_id lid)
1775{
1776 int t = scratch >= 0 ? scratch : pte;
1777 int cur = pte;
1778
1779 if (_PAGE_PRESENT_SHIFT) {
1780 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1781 cur = t;
1782 }
1783 uasm_i_andi(p, t, cur,
1784 (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
1785 uasm_i_xori(p, t, t,
1786 (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
1787 uasm_il_bnez(p, r, t, lid);
1788 if (pte == t)
1789 /* You lose the SMP race :-(*/
1790 iPTE_LW(p, pte, ptr);
1791 else
1792 uasm_i_nop(p);
1793}
1794
1795/* Make PTE writable, update software status bits as well, then store
1796 * at PTR.
1797 */
1798static void
1799build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1800 unsigned int ptr, unsigned int scratch)
1801{
1802 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1803 | _PAGE_DIRTY);
1804
1805 iPTE_SW(p, r, pte, ptr, mode, scratch);
1806}
1807
1808/*
1809 * Check if PTE can be modified, if not branch to LABEL. Regardless
1810 * restore PTE with value from PTR when done.
1811 */
1812static void
1813build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1814 unsigned int pte, unsigned int ptr, int scratch,
1815 enum label_id lid)
1816{
1817 if (use_bbit_insns()) {
1818 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1819 uasm_i_nop(p);
1820 } else {
1821 int t = scratch >= 0 ? scratch : pte;
1822 uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
1823 uasm_i_andi(p, t, t, 1);
1824 uasm_il_beqz(p, r, t, lid);
1825 if (pte == t)
1826 /* You lose the SMP race :-(*/
1827 iPTE_LW(p, pte, ptr);
1828 }
1829}
1830
1831#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1832
1833
1834/*
1835 * R3000 style TLB load/store/modify handlers.
1836 */
1837
1838/*
1839 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1840 * Then it returns.
1841 */
1842static void
1843build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1844{
1845 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1846 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1847 uasm_i_tlbwi(p);
1848 uasm_i_jr(p, tmp);
1849 uasm_i_rfe(p); /* branch delay */
1850}
1851
1852/*
1853 * This places the pte into ENTRYLO0 and writes it with tlbwi
1854 * or tlbwr as appropriate. This is because the index register
1855 * may have the probe fail bit set as a result of a trap on a
1856 * kseg2 access, i.e. without refill. Then it returns.
1857 */
1858static void
1859build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1860 struct uasm_reloc **r, unsigned int pte,
1861 unsigned int tmp)
1862{
1863 uasm_i_mfc0(p, tmp, C0_INDEX);
1864 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1865 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1866 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1867 uasm_i_tlbwi(p); /* cp0 delay */
1868 uasm_i_jr(p, tmp);
1869 uasm_i_rfe(p); /* branch delay */
1870 uasm_l_r3000_write_probe_fail(l, *p);
1871 uasm_i_tlbwr(p); /* cp0 delay */
1872 uasm_i_jr(p, tmp);
1873 uasm_i_rfe(p); /* branch delay */
1874}
1875
1876static void
1877build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1878 unsigned int ptr)
1879{
1880 long pgdc = (long)pgd_current;
1881
1882 uasm_i_mfc0(p, pte, C0_BADVADDR);
1883 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1884 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1885 uasm_i_srl(p, pte, pte, 22); /* load delay */
1886 uasm_i_sll(p, pte, pte, 2);
1887 uasm_i_addu(p, ptr, ptr, pte);
1888 uasm_i_mfc0(p, pte, C0_CONTEXT);
1889 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1890 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1891 uasm_i_addu(p, ptr, ptr, pte);
1892 uasm_i_lw(p, pte, 0, ptr);
1893 uasm_i_tlbp(p); /* load delay */
1894}
1895
1896static void build_r3000_tlb_load_handler(void)
1897{
1898 u32 *p = (u32 *)handle_tlbl;
1899 struct uasm_label *l = labels;
1900 struct uasm_reloc *r = relocs;
1901
1902 memset(p, 0, handle_tlbl_end - (char *)p);
1903 memset(labels, 0, sizeof(labels));
1904 memset(relocs, 0, sizeof(relocs));
1905
1906 build_r3000_tlbchange_handler_head(&p, K0, K1);
1907 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
1908 uasm_i_nop(&p); /* load delay */
1909 build_make_valid(&p, &r, K0, K1, -1);
1910 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1911
1912 uasm_l_nopage_tlbl(&l, p);
1913 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1914 uasm_i_nop(&p);
1915
1916 if (p >= (u32 *)handle_tlbl_end)
1917 panic("TLB load handler fastpath space exceeded");
1918
1919 uasm_resolve_relocs(relocs, labels);
1920 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1921 (unsigned int)(p - (u32 *)handle_tlbl));
1922
1923 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_end);
1924}
1925
1926static void build_r3000_tlb_store_handler(void)
1927{
1928 u32 *p = (u32 *)handle_tlbs;
1929 struct uasm_label *l = labels;
1930 struct uasm_reloc *r = relocs;
1931
1932 memset(p, 0, handle_tlbs_end - (char *)p);
1933 memset(labels, 0, sizeof(labels));
1934 memset(relocs, 0, sizeof(relocs));
1935
1936 build_r3000_tlbchange_handler_head(&p, K0, K1);
1937 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
1938 uasm_i_nop(&p); /* load delay */
1939 build_make_write(&p, &r, K0, K1, -1);
1940 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1941
1942 uasm_l_nopage_tlbs(&l, p);
1943 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1944 uasm_i_nop(&p);
1945
1946 if (p >= (u32 *)handle_tlbs_end)
1947 panic("TLB store handler fastpath space exceeded");
1948
1949 uasm_resolve_relocs(relocs, labels);
1950 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1951 (unsigned int)(p - (u32 *)handle_tlbs));
1952
1953 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_end);
1954}
1955
1956static void build_r3000_tlb_modify_handler(void)
1957{
1958 u32 *p = (u32 *)handle_tlbm;
1959 struct uasm_label *l = labels;
1960 struct uasm_reloc *r = relocs;
1961
1962 memset(p, 0, handle_tlbm_end - (char *)p);
1963 memset(labels, 0, sizeof(labels));
1964 memset(relocs, 0, sizeof(relocs));
1965
1966 build_r3000_tlbchange_handler_head(&p, K0, K1);
1967 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
1968 uasm_i_nop(&p); /* load delay */
1969 build_make_write(&p, &r, K0, K1, -1);
1970 build_r3000_pte_reload_tlbwi(&p, K0, K1);
1971
1972 uasm_l_nopage_tlbm(&l, p);
1973 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1974 uasm_i_nop(&p);
1975
1976 if (p >= (u32 *)handle_tlbm_end)
1977 panic("TLB modify handler fastpath space exceeded");
1978
1979 uasm_resolve_relocs(relocs, labels);
1980 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1981 (unsigned int)(p - (u32 *)handle_tlbm));
1982
1983 dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_end);
1984}
1985#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1986
1987static bool cpu_has_tlbex_tlbp_race(void)
1988{
1989 /*
1990 * When a Hardware Table Walker is running it can replace TLB entries
1991 * at any time, leading to a race between it & the CPU.
1992 */
1993 if (cpu_has_htw)
1994 return true;
1995
1996 /*
1997 * If the CPU shares FTLB RAM with its siblings then our entry may be
1998 * replaced at any time by a sibling performing a write to the FTLB.
1999 */
2000 if (cpu_has_shared_ftlb_ram)
2001 return true;
2002
2003 /* In all other cases there ought to be no race condition to handle */
2004 return false;
2005}
2006
2007/*
2008 * R4000 style TLB load/store/modify handlers.
2009 */
2010static struct work_registers
2011build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
2012 struct uasm_reloc **r)
2013{
2014 struct work_registers wr = build_get_work_registers(p);
2015
2016#ifdef CONFIG_64BIT
2017 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
2018#else
2019 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
2020#endif
2021
2022#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2023 /*
2024 * For huge tlb entries, pmd doesn't contain an address but
2025 * instead contains the tlb pte. Check the PAGE_HUGE bit and
2026 * see if we need to jump to huge tlb processing.
2027 */
2028 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
2029#endif
2030
2031 UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
2032 UASM_i_LW(p, wr.r2, 0, wr.r2);
2033 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2);
2034 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
2035 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
2036
2037#ifdef CONFIG_SMP
2038 uasm_l_smp_pgtable_change(l, *p);
2039#endif
2040 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
2041 if (!m4kc_tlbp_war()) {
2042 build_tlb_probe_entry(p);
2043 if (cpu_has_tlbex_tlbp_race()) {
2044 /* race condition happens, leaving */
2045 uasm_i_ehb(p);
2046 uasm_i_mfc0(p, wr.r3, C0_INDEX);
2047 uasm_il_bltz(p, r, wr.r3, label_leave);
2048 uasm_i_nop(p);
2049 }
2050 }
2051 return wr;
2052}
2053
2054static void
2055build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
2056 struct uasm_reloc **r, unsigned int tmp,
2057 unsigned int ptr)
2058{
2059 uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
2060 uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
2061 build_update_entries(p, tmp, ptr);
2062 build_tlb_write_entry(p, l, r, tlb_indexed);
2063 uasm_l_leave(l, *p);
2064 build_restore_work_registers(p);
2065 uasm_i_eret(p); /* return from trap */
2066
2067#ifdef CONFIG_64BIT
2068 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
2069#endif
2070}
2071
2072static void build_r4000_tlb_load_handler(void)
2073{
2074 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbl);
2075 struct uasm_label *l = labels;
2076 struct uasm_reloc *r = relocs;
2077 struct work_registers wr;
2078
2079 memset(p, 0, handle_tlbl_end - (char *)p);
2080 memset(labels, 0, sizeof(labels));
2081 memset(relocs, 0, sizeof(relocs));
2082
2083 if (bcm1250_m3_war()) {
2084 unsigned int segbits = 44;
2085
2086 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
2087 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
2088 uasm_i_xor(&p, K0, K0, K1);
2089 uasm_i_dsrl_safe(&p, K1, K0, 62);
2090 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
2091 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
2092 uasm_i_or(&p, K0, K0, K1);
2093 uasm_il_bnez(&p, &r, K0, label_leave);
2094 /* No need for uasm_i_nop */
2095 }
2096
2097 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2098 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
2099 if (m4kc_tlbp_war())
2100 build_tlb_probe_entry(&p);
2101
2102 if (cpu_has_rixi && !cpu_has_rixiex) {
2103 /*
2104 * If the page is not _PAGE_VALID, RI or XI could not
2105 * have triggered it. Skip the expensive test..
2106 */
2107 if (use_bbit_insns()) {
2108 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
2109 label_tlbl_goaround1);
2110 } else {
2111 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2112 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
2113 }
2114 uasm_i_nop(&p);
2115
2116 /*
2117 * Warn if something may race with us & replace the TLB entry
2118 * before we read it here. Everything with such races should
2119 * also have dedicated RiXi exception handlers, so this
2120 * shouldn't be hit.
2121 */
2122 WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
2123
2124 uasm_i_tlbr(&p);
2125
2126 if (cpu_has_mips_r2_exec_hazard)
2127 uasm_i_ehb(&p);
2128
2129 /* Examine entrylo 0 or 1 based on ptr. */
2130 if (use_bbit_insns()) {
2131 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2132 } else {
2133 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2134 uasm_i_beqz(&p, wr.r3, 8);
2135 }
2136 /* load it in the delay slot*/
2137 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2138 /* load it if ptr is odd */
2139 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2140 /*
2141 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2142 * XI must have triggered it.
2143 */
2144 if (use_bbit_insns()) {
2145 uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
2146 uasm_i_nop(&p);
2147 uasm_l_tlbl_goaround1(&l, p);
2148 } else {
2149 uasm_i_andi(&p, wr.r3, wr.r3, 2);
2150 uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
2151 uasm_i_nop(&p);
2152 }
2153 uasm_l_tlbl_goaround1(&l, p);
2154 }
2155 build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
2156 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2157
2158#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2159 /*
2160 * This is the entry point when build_r4000_tlbchange_handler_head
2161 * spots a huge page.
2162 */
2163 uasm_l_tlb_huge_update(&l, p);
2164 iPTE_LW(&p, wr.r1, wr.r2);
2165 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
2166 build_tlb_probe_entry(&p);
2167
2168 if (cpu_has_rixi && !cpu_has_rixiex) {
2169 /*
2170 * If the page is not _PAGE_VALID, RI or XI could not
2171 * have triggered it. Skip the expensive test..
2172 */
2173 if (use_bbit_insns()) {
2174 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
2175 label_tlbl_goaround2);
2176 } else {
2177 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2178 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2179 }
2180 uasm_i_nop(&p);
2181
2182 /*
2183 * Warn if something may race with us & replace the TLB entry
2184 * before we read it here. Everything with such races should
2185 * also have dedicated RiXi exception handlers, so this
2186 * shouldn't be hit.
2187 */
2188 WARN(cpu_has_tlbex_tlbp_race(), "Unhandled race in RiXi path");
2189
2190 uasm_i_tlbr(&p);
2191
2192 if (cpu_has_mips_r2_exec_hazard)
2193 uasm_i_ehb(&p);
2194
2195 /* Examine entrylo 0 or 1 based on ptr. */
2196 if (use_bbit_insns()) {
2197 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2198 } else {
2199 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2200 uasm_i_beqz(&p, wr.r3, 8);
2201 }
2202 /* load it in the delay slot*/
2203 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2204 /* load it if ptr is odd */
2205 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2206 /*
2207 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2208 * XI must have triggered it.
2209 */
2210 if (use_bbit_insns()) {
2211 uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
2212 } else {
2213 uasm_i_andi(&p, wr.r3, wr.r3, 2);
2214 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2215 }
2216 if (PM_DEFAULT_MASK == 0)
2217 uasm_i_nop(&p);
2218 /*
2219 * We clobbered C0_PAGEMASK, restore it. On the other branch
2220 * it is restored in build_huge_tlb_write_entry.
2221 */
2222 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
2223
2224 uasm_l_tlbl_goaround2(&l, p);
2225 }
2226 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2227 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2228#endif
2229
2230 uasm_l_nopage_tlbl(&l, p);
2231 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2232 uasm_i_sync(&p, 0);
2233 build_restore_work_registers(&p);
2234#ifdef CONFIG_CPU_MICROMIPS
2235 if ((unsigned long)tlb_do_page_fault_0 & 1) {
2236 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2237 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2238 uasm_i_jr(&p, K0);
2239 } else
2240#endif
2241 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2242 uasm_i_nop(&p);
2243
2244 if (p >= (u32 *)handle_tlbl_end)
2245 panic("TLB load handler fastpath space exceeded");
2246
2247 uasm_resolve_relocs(relocs, labels);
2248 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
2249 (unsigned int)(p - (u32 *)handle_tlbl));
2250
2251 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_end);
2252}
2253
2254static void build_r4000_tlb_store_handler(void)
2255{
2256 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbs);
2257 struct uasm_label *l = labels;
2258 struct uasm_reloc *r = relocs;
2259 struct work_registers wr;
2260
2261 memset(p, 0, handle_tlbs_end - (char *)p);
2262 memset(labels, 0, sizeof(labels));
2263 memset(relocs, 0, sizeof(relocs));
2264
2265 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2266 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2267 if (m4kc_tlbp_war())
2268 build_tlb_probe_entry(&p);
2269 build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
2270 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2271
2272#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2273 /*
2274 * This is the entry point when
2275 * build_r4000_tlbchange_handler_head spots a huge page.
2276 */
2277 uasm_l_tlb_huge_update(&l, p);
2278 iPTE_LW(&p, wr.r1, wr.r2);
2279 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2280 build_tlb_probe_entry(&p);
2281 uasm_i_ori(&p, wr.r1, wr.r1,
2282 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2283 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
2284#endif
2285
2286 uasm_l_nopage_tlbs(&l, p);
2287 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2288 uasm_i_sync(&p, 0);
2289 build_restore_work_registers(&p);
2290#ifdef CONFIG_CPU_MICROMIPS
2291 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2292 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2293 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2294 uasm_i_jr(&p, K0);
2295 } else
2296#endif
2297 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2298 uasm_i_nop(&p);
2299
2300 if (p >= (u32 *)handle_tlbs_end)
2301 panic("TLB store handler fastpath space exceeded");
2302
2303 uasm_resolve_relocs(relocs, labels);
2304 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2305 (unsigned int)(p - (u32 *)handle_tlbs));
2306
2307 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_end);
2308}
2309
2310static void build_r4000_tlb_modify_handler(void)
2311{
2312 u32 *p = (u32 *)msk_isa16_mode((ulong)handle_tlbm);
2313 struct uasm_label *l = labels;
2314 struct uasm_reloc *r = relocs;
2315 struct work_registers wr;
2316
2317 memset(p, 0, handle_tlbm_end - (char *)p);
2318 memset(labels, 0, sizeof(labels));
2319 memset(relocs, 0, sizeof(relocs));
2320
2321 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2322 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2323 if (m4kc_tlbp_war())
2324 build_tlb_probe_entry(&p);
2325 /* Present and writable bits set, set accessed and dirty bits. */
2326 build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
2327 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2328
2329#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2330 /*
2331 * This is the entry point when
2332 * build_r4000_tlbchange_handler_head spots a huge page.
2333 */
2334 uasm_l_tlb_huge_update(&l, p);
2335 iPTE_LW(&p, wr.r1, wr.r2);
2336 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2337 build_tlb_probe_entry(&p);
2338 uasm_i_ori(&p, wr.r1, wr.r1,
2339 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2340 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
2341#endif
2342
2343 uasm_l_nopage_tlbm(&l, p);
2344 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
2345 uasm_i_sync(&p, 0);
2346 build_restore_work_registers(&p);
2347#ifdef CONFIG_CPU_MICROMIPS
2348 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2349 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2350 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2351 uasm_i_jr(&p, K0);
2352 } else
2353#endif
2354 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2355 uasm_i_nop(&p);
2356
2357 if (p >= (u32 *)handle_tlbm_end)
2358 panic("TLB modify handler fastpath space exceeded");
2359
2360 uasm_resolve_relocs(relocs, labels);
2361 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2362 (unsigned int)(p - (u32 *)handle_tlbm));
2363
2364 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_end);
2365}
2366
2367static void flush_tlb_handlers(void)
2368{
2369 local_flush_icache_range((unsigned long)handle_tlbl,
2370 (unsigned long)handle_tlbl_end);
2371 local_flush_icache_range((unsigned long)handle_tlbs,
2372 (unsigned long)handle_tlbs_end);
2373 local_flush_icache_range((unsigned long)handle_tlbm,
2374 (unsigned long)handle_tlbm_end);
2375 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
2376 (unsigned long)tlbmiss_handler_setup_pgd_end);
2377}
2378
2379static void print_htw_config(void)
2380{
2381 unsigned long config;
2382 unsigned int pwctl;
2383 const int field = 2 * sizeof(unsigned long);
2384
2385 config = read_c0_pwfield();
2386 pr_debug("PWField (0x%0*lx): GDI: 0x%02lx UDI: 0x%02lx MDI: 0x%02lx PTI: 0x%02lx PTEI: 0x%02lx\n",
2387 field, config,
2388 (config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT,
2389 (config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT,
2390 (config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT,
2391 (config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT,
2392 (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
2393
2394 config = read_c0_pwsize();
2395 pr_debug("PWSize (0x%0*lx): PS: 0x%lx GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n",
2396 field, config,
2397 (config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
2398 (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
2399 (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
2400 (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
2401 (config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT,
2402 (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
2403
2404 pwctl = read_c0_pwctl();
2405 pr_debug("PWCtl (0x%x): PWEn: 0x%x XK: 0x%x XS: 0x%x XU: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n",
2406 pwctl,
2407 (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
2408 (pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
2409 (pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
2410 (pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
2411 (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
2412 (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
2413 (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
2414}
2415
2416static void config_htw_params(void)
2417{
2418 unsigned long pwfield, pwsize, ptei;
2419 unsigned int config;
2420
2421 /*
2422 * We are using 2-level page tables, so we only need to
2423 * setup GDW and PTW appropriately. UDW and MDW will remain 0.
2424 * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to
2425 * write values less than 0xc in these fields because the entire
2426 * write will be dropped. As a result of which, we must preserve
2427 * the original reset values and overwrite only what we really want.
2428 */
2429
2430 pwfield = read_c0_pwfield();
2431 /* re-initialize the GDI field */
2432 pwfield &= ~MIPS_PWFIELD_GDI_MASK;
2433 pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT;
2434 /* re-initialize the PTI field including the even/odd bit */
2435 pwfield &= ~MIPS_PWFIELD_PTI_MASK;
2436 pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT;
2437 if (CONFIG_PGTABLE_LEVELS >= 3) {
2438 pwfield &= ~MIPS_PWFIELD_MDI_MASK;
2439 pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT;
2440 }
2441 /* Set the PTEI right shift */
2442 ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT;
2443 pwfield |= ptei;
2444 write_c0_pwfield(pwfield);
2445 /* Check whether the PTEI value is supported */
2446 back_to_back_c0_hazard();
2447 pwfield = read_c0_pwfield();
2448 if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT)
2449 != ptei) {
2450 pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled",
2451 ptei);
2452 /*
2453 * Drop option to avoid HTW being enabled via another path
2454 * (eg htw_reset())
2455 */
2456 current_cpu_data.options &= ~MIPS_CPU_HTW;
2457 return;
2458 }
2459
2460 pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
2461 pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
2462 if (CONFIG_PGTABLE_LEVELS >= 3)
2463 pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
2464
2465 /* Set pointer size to size of directory pointers */
2466 if (IS_ENABLED(CONFIG_64BIT))
2467 pwsize |= MIPS_PWSIZE_PS_MASK;
2468 /* PTEs may be multiple pointers long (e.g. with XPA) */
2469 pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
2470 & MIPS_PWSIZE_PTEW_MASK;
2471
2472 write_c0_pwsize(pwsize);
2473
2474 /* Make sure everything is set before we enable the HTW */
2475 back_to_back_c0_hazard();
2476
2477 /*
2478 * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
2479 * the pwctl fields.
2480 */
2481 config = 1 << MIPS_PWCTL_PWEN_SHIFT;
2482 if (IS_ENABLED(CONFIG_64BIT))
2483 config |= MIPS_PWCTL_XU_MASK;
2484 write_c0_pwctl(config);
2485 pr_info("Hardware Page Table Walker enabled\n");
2486
2487 print_htw_config();
2488}
2489
2490static void config_xpa_params(void)
2491{
2492#ifdef CONFIG_XPA
2493 unsigned int pagegrain;
2494
2495 if (mips_xpa_disabled) {
2496 pr_info("Extended Physical Addressing (XPA) disabled\n");
2497 return;
2498 }
2499
2500 pagegrain = read_c0_pagegrain();
2501 write_c0_pagegrain(pagegrain | PG_ELPA);
2502 back_to_back_c0_hazard();
2503 pagegrain = read_c0_pagegrain();
2504
2505 if (pagegrain & PG_ELPA)
2506 pr_info("Extended Physical Addressing (XPA) enabled\n");
2507 else
2508 panic("Extended Physical Addressing (XPA) disabled");
2509#endif
2510}
2511
2512static void check_pabits(void)
2513{
2514 unsigned long entry;
2515 unsigned pabits, fillbits;
2516
2517 if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) {
2518 /*
2519 * We'll only be making use of the fact that we can rotate bits
2520 * into the fill if the CPU supports RIXI, so don't bother
2521 * probing this for CPUs which don't.
2522 */
2523 return;
2524 }
2525
2526 write_c0_entrylo0(~0ul);
2527 back_to_back_c0_hazard();
2528 entry = read_c0_entrylo0();
2529
2530 /* clear all non-PFN bits */
2531 entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1);
2532 entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
2533
2534 /* find a lower bound on PABITS, and upper bound on fill bits */
2535 pabits = fls_long(entry) + 6;
2536 fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0);
2537
2538 /* minus the RI & XI bits */
2539 fillbits -= min_t(unsigned, fillbits, 2);
2540
2541 if (fillbits >= ilog2(_PAGE_NO_EXEC))
2542 fill_includes_sw_bits = true;
2543
2544 pr_debug("Entry* registers contain %u fill bits\n", fillbits);
2545}
2546
2547void build_tlb_refill_handler(void)
2548{
2549 /*
2550 * The refill handler is generated per-CPU, multi-node systems
2551 * may have local storage for it. The other handlers are only
2552 * needed once.
2553 */
2554 static int run_once = 0;
2555
2556 if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
2557 panic("Kernels supporting XPA currently require CPUs with RIXI");
2558
2559 output_pgtable_bits_defines();
2560 check_pabits();
2561
2562#ifdef CONFIG_64BIT
2563 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3);
2564#endif
2565
2566 if (cpu_has_3kex) {
2567#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2568 if (!run_once) {
2569 build_setup_pgd();
2570 build_r3000_tlb_refill_handler();
2571 build_r3000_tlb_load_handler();
2572 build_r3000_tlb_store_handler();
2573 build_r3000_tlb_modify_handler();
2574 flush_tlb_handlers();
2575 run_once++;
2576 }
2577#else
2578 panic("No R3000 TLB refill handler");
2579#endif
2580 return;
2581 }
2582
2583 if (cpu_has_ldpte)
2584 setup_pw();
2585
2586 if (!run_once) {
2587 scratch_reg = allocate_kscratch();
2588 build_setup_pgd();
2589 build_r4000_tlb_load_handler();
2590 build_r4000_tlb_store_handler();
2591 build_r4000_tlb_modify_handler();
2592 if (cpu_has_ldpte)
2593 build_loongson3_tlb_refill_handler();
2594 else
2595 build_r4000_tlb_refill_handler();
2596 flush_tlb_handlers();
2597 run_once++;
2598 }
2599 if (cpu_has_xpa)
2600 config_xpa_params();
2601 if (cpu_has_htw)
2602 config_htw_params();
2603}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Synthesize TLB refill handlers at runtime.
7 *
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
12 * Copyright (C) 2011 MIPS Technologies, Inc.
13 *
14 * ... and the days got worse and worse and now you see
15 * I've gone completly out of my mind.
16 *
17 * They're coming to take me a away haha
18 * they're coming to take me a away hoho hihi haha
19 * to the funny farm where code is beautiful all the time ...
20 *
21 * (Condolences to Napoleon XIV)
22 */
23
24#include <linux/bug.h>
25#include <linux/kernel.h>
26#include <linux/types.h>
27#include <linux/smp.h>
28#include <linux/string.h>
29#include <linux/cache.h>
30
31#include <asm/cacheflush.h>
32#include <asm/cpu-type.h>
33#include <asm/pgtable.h>
34#include <asm/war.h>
35#include <asm/uasm.h>
36#include <asm/setup.h>
37
38/*
39 * TLB load/store/modify handlers.
40 *
41 * Only the fastpath gets synthesized at runtime, the slowpath for
42 * do_page_fault remains normal asm.
43 */
44extern void tlb_do_page_fault_0(void);
45extern void tlb_do_page_fault_1(void);
46
47struct work_registers {
48 int r1;
49 int r2;
50 int r3;
51};
52
53struct tlb_reg_save {
54 unsigned long a;
55 unsigned long b;
56} ____cacheline_aligned_in_smp;
57
58static struct tlb_reg_save handler_reg_save[NR_CPUS];
59
60static inline int r45k_bvahwbug(void)
61{
62 /* XXX: We should probe for the presence of this bug, but we don't. */
63 return 0;
64}
65
66static inline int r4k_250MHZhwbug(void)
67{
68 /* XXX: We should probe for the presence of this bug, but we don't. */
69 return 0;
70}
71
72static inline int __maybe_unused bcm1250_m3_war(void)
73{
74 return BCM1250_M3_WAR;
75}
76
77static inline int __maybe_unused r10000_llsc_war(void)
78{
79 return R10000_LLSC_WAR;
80}
81
82static int use_bbit_insns(void)
83{
84 switch (current_cpu_type()) {
85 case CPU_CAVIUM_OCTEON:
86 case CPU_CAVIUM_OCTEON_PLUS:
87 case CPU_CAVIUM_OCTEON2:
88 case CPU_CAVIUM_OCTEON3:
89 return 1;
90 default:
91 return 0;
92 }
93}
94
95static int use_lwx_insns(void)
96{
97 switch (current_cpu_type()) {
98 case CPU_CAVIUM_OCTEON2:
99 case CPU_CAVIUM_OCTEON3:
100 return 1;
101 default:
102 return 0;
103 }
104}
105#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
106 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
107static bool scratchpad_available(void)
108{
109 return true;
110}
111static int scratchpad_offset(int i)
112{
113 /*
114 * CVMSEG starts at address -32768 and extends for
115 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
116 */
117 i += 1; /* Kernel use starts at the top and works down. */
118 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
119}
120#else
121static bool scratchpad_available(void)
122{
123 return false;
124}
125static int scratchpad_offset(int i)
126{
127 BUG();
128 /* Really unreachable, but evidently some GCC want this. */
129 return 0;
130}
131#endif
132/*
133 * Found by experiment: At least some revisions of the 4kc throw under
134 * some circumstances a machine check exception, triggered by invalid
135 * values in the index register. Delaying the tlbp instruction until
136 * after the next branch, plus adding an additional nop in front of
137 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
138 * why; it's not an issue caused by the core RTL.
139 *
140 */
141static int m4kc_tlbp_war(void)
142{
143 return (current_cpu_data.processor_id & 0xffff00) ==
144 (PRID_COMP_MIPS | PRID_IMP_4KC);
145}
146
147/* Handle labels (which must be positive integers). */
148enum label_id {
149 label_second_part = 1,
150 label_leave,
151 label_vmalloc,
152 label_vmalloc_done,
153 label_tlbw_hazard_0,
154 label_split = label_tlbw_hazard_0 + 8,
155 label_tlbl_goaround1,
156 label_tlbl_goaround2,
157 label_nopage_tlbl,
158 label_nopage_tlbs,
159 label_nopage_tlbm,
160 label_smp_pgtable_change,
161 label_r3000_write_probe_fail,
162 label_large_segbits_fault,
163#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
164 label_tlb_huge_update,
165#endif
166};
167
168UASM_L_LA(_second_part)
169UASM_L_LA(_leave)
170UASM_L_LA(_vmalloc)
171UASM_L_LA(_vmalloc_done)
172/* _tlbw_hazard_x is handled differently. */
173UASM_L_LA(_split)
174UASM_L_LA(_tlbl_goaround1)
175UASM_L_LA(_tlbl_goaround2)
176UASM_L_LA(_nopage_tlbl)
177UASM_L_LA(_nopage_tlbs)
178UASM_L_LA(_nopage_tlbm)
179UASM_L_LA(_smp_pgtable_change)
180UASM_L_LA(_r3000_write_probe_fail)
181UASM_L_LA(_large_segbits_fault)
182#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
183UASM_L_LA(_tlb_huge_update)
184#endif
185
186static int hazard_instance;
187
188static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
189{
190 switch (instance) {
191 case 0 ... 7:
192 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
193 return;
194 default:
195 BUG();
196 }
197}
198
199static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
200{
201 switch (instance) {
202 case 0 ... 7:
203 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
204 break;
205 default:
206 BUG();
207 }
208}
209
210/*
211 * pgtable bits are assigned dynamically depending on processor feature
212 * and statically based on kernel configuration. This spits out the actual
213 * values the kernel is using. Required to make sense from disassembled
214 * TLB exception handlers.
215 */
216static void output_pgtable_bits_defines(void)
217{
218#define pr_define(fmt, ...) \
219 pr_debug("#define " fmt, ##__VA_ARGS__)
220
221 pr_debug("#include <asm/asm.h>\n");
222 pr_debug("#include <asm/regdef.h>\n");
223 pr_debug("\n");
224
225 pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
226 pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT);
227 pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
228 pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
229 pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
230#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
231 pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
232 pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
233#endif
234 if (cpu_has_rixi) {
235#ifdef _PAGE_NO_EXEC_SHIFT
236 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
237#endif
238#ifdef _PAGE_NO_READ_SHIFT
239 pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
240#endif
241 }
242 pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
243 pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
244 pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
245 pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
246 pr_debug("\n");
247}
248
249static inline void dump_handler(const char *symbol, const u32 *handler, int count)
250{
251 int i;
252
253 pr_debug("LEAF(%s)\n", symbol);
254
255 pr_debug("\t.set push\n");
256 pr_debug("\t.set noreorder\n");
257
258 for (i = 0; i < count; i++)
259 pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
260
261 pr_debug("\t.set\tpop\n");
262
263 pr_debug("\tEND(%s)\n", symbol);
264}
265
266/* The only general purpose registers allowed in TLB handlers. */
267#define K0 26
268#define K1 27
269
270/* Some CP0 registers */
271#define C0_INDEX 0, 0
272#define C0_ENTRYLO0 2, 0
273#define C0_TCBIND 2, 2
274#define C0_ENTRYLO1 3, 0
275#define C0_CONTEXT 4, 0
276#define C0_PAGEMASK 5, 0
277#define C0_BADVADDR 8, 0
278#define C0_ENTRYHI 10, 0
279#define C0_EPC 14, 0
280#define C0_XCONTEXT 20, 0
281
282#ifdef CONFIG_64BIT
283# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
284#else
285# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
286#endif
287
288/* The worst case length of the handler is around 18 instructions for
289 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
290 * Maximum space available is 32 instructions for R3000 and 64
291 * instructions for R4000.
292 *
293 * We deliberately chose a buffer size of 128, so we won't scribble
294 * over anything important on overflow before we panic.
295 */
296static u32 tlb_handler[128];
297
298/* simply assume worst case size for labels and relocs */
299static struct uasm_label labels[128];
300static struct uasm_reloc relocs[128];
301
302static int check_for_high_segbits;
303
304static unsigned int kscratch_used_mask;
305
306static inline int __maybe_unused c0_kscratch(void)
307{
308 switch (current_cpu_type()) {
309 case CPU_XLP:
310 case CPU_XLR:
311 return 22;
312 default:
313 return 31;
314 }
315}
316
317static int allocate_kscratch(void)
318{
319 int r;
320 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
321
322 r = ffs(a);
323
324 if (r == 0)
325 return -1;
326
327 r--; /* make it zero based */
328
329 kscratch_used_mask |= (1 << r);
330
331 return r;
332}
333
334static int scratch_reg;
335static int pgd_reg;
336enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
337
338static struct work_registers build_get_work_registers(u32 **p)
339{
340 struct work_registers r;
341
342 if (scratch_reg >= 0) {
343 /* Save in CPU local C0_KScratch? */
344 UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
345 r.r1 = K0;
346 r.r2 = K1;
347 r.r3 = 1;
348 return r;
349 }
350
351 if (num_possible_cpus() > 1) {
352 /* Get smp_processor_id */
353 UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
354 UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
355
356 /* handler_reg_save index in K0 */
357 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
358
359 UASM_i_LA(p, K1, (long)&handler_reg_save);
360 UASM_i_ADDU(p, K0, K0, K1);
361 } else {
362 UASM_i_LA(p, K0, (long)&handler_reg_save);
363 }
364 /* K0 now points to save area, save $1 and $2 */
365 UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
366 UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
367
368 r.r1 = K1;
369 r.r2 = 1;
370 r.r3 = 2;
371 return r;
372}
373
374static void build_restore_work_registers(u32 **p)
375{
376 if (scratch_reg >= 0) {
377 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
378 return;
379 }
380 /* K0 already points to save area, restore $1 and $2 */
381 UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
382 UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
383}
384
385#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
386
387/*
388 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
389 * we cannot do r3000 under these circumstances.
390 *
391 * Declare pgd_current here instead of including mmu_context.h to avoid type
392 * conflicts for tlbmiss_handler_setup_pgd
393 */
394extern unsigned long pgd_current[];
395
396/*
397 * The R3000 TLB handler is simple.
398 */
399static void build_r3000_tlb_refill_handler(void)
400{
401 long pgdc = (long)pgd_current;
402 u32 *p;
403
404 memset(tlb_handler, 0, sizeof(tlb_handler));
405 p = tlb_handler;
406
407 uasm_i_mfc0(&p, K0, C0_BADVADDR);
408 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
409 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
410 uasm_i_srl(&p, K0, K0, 22); /* load delay */
411 uasm_i_sll(&p, K0, K0, 2);
412 uasm_i_addu(&p, K1, K1, K0);
413 uasm_i_mfc0(&p, K0, C0_CONTEXT);
414 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
415 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
416 uasm_i_addu(&p, K1, K1, K0);
417 uasm_i_lw(&p, K0, 0, K1);
418 uasm_i_nop(&p); /* load delay */
419 uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
420 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
421 uasm_i_tlbwr(&p); /* cp0 delay */
422 uasm_i_jr(&p, K1);
423 uasm_i_rfe(&p); /* branch delay */
424
425 if (p > tlb_handler + 32)
426 panic("TLB refill handler space exceeded");
427
428 pr_debug("Wrote TLB refill handler (%u instructions).\n",
429 (unsigned int)(p - tlb_handler));
430
431 memcpy((void *)ebase, tlb_handler, 0x80);
432
433 dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
434}
435#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
436
437/*
438 * The R4000 TLB handler is much more complicated. We have two
439 * consecutive handler areas with 32 instructions space each.
440 * Since they aren't used at the same time, we can overflow in the
441 * other one.To keep things simple, we first assume linear space,
442 * then we relocate it to the final handler layout as needed.
443 */
444static u32 final_handler[64];
445
446/*
447 * Hazards
448 *
449 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
450 * 2. A timing hazard exists for the TLBP instruction.
451 *
452 * stalling_instruction
453 * TLBP
454 *
455 * The JTLB is being read for the TLBP throughout the stall generated by the
456 * previous instruction. This is not really correct as the stalling instruction
457 * can modify the address used to access the JTLB. The failure symptom is that
458 * the TLBP instruction will use an address created for the stalling instruction
459 * and not the address held in C0_ENHI and thus report the wrong results.
460 *
461 * The software work-around is to not allow the instruction preceding the TLBP
462 * to stall - make it an NOP or some other instruction guaranteed not to stall.
463 *
464 * Errata 2 will not be fixed. This errata is also on the R5000.
465 *
466 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
467 */
468static void __maybe_unused build_tlb_probe_entry(u32 **p)
469{
470 switch (current_cpu_type()) {
471 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
472 case CPU_R4600:
473 case CPU_R4700:
474 case CPU_R5000:
475 case CPU_NEVADA:
476 uasm_i_nop(p);
477 uasm_i_tlbp(p);
478 break;
479
480 default:
481 uasm_i_tlbp(p);
482 break;
483 }
484}
485
486/*
487 * Write random or indexed TLB entry, and care about the hazards from
488 * the preceding mtc0 and for the following eret.
489 */
490enum tlb_write_entry { tlb_random, tlb_indexed };
491
492static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
493 struct uasm_reloc **r,
494 enum tlb_write_entry wmode)
495{
496 void(*tlbw)(u32 **) = NULL;
497
498 switch (wmode) {
499 case tlb_random: tlbw = uasm_i_tlbwr; break;
500 case tlb_indexed: tlbw = uasm_i_tlbwi; break;
501 }
502
503 if (cpu_has_mips_r2) {
504 /*
505 * The architecture spec says an ehb is required here,
506 * but a number of cores do not have the hazard and
507 * using an ehb causes an expensive pipeline stall.
508 */
509 switch (current_cpu_type()) {
510 case CPU_M14KC:
511 case CPU_74K:
512 case CPU_1074K:
513 case CPU_PROAPTIV:
514 case CPU_P5600:
515 case CPU_M5150:
516 break;
517
518 default:
519 uasm_i_ehb(p);
520 break;
521 }
522 tlbw(p);
523 return;
524 }
525
526 switch (current_cpu_type()) {
527 case CPU_R4000PC:
528 case CPU_R4000SC:
529 case CPU_R4000MC:
530 case CPU_R4400PC:
531 case CPU_R4400SC:
532 case CPU_R4400MC:
533 /*
534 * This branch uses up a mtc0 hazard nop slot and saves
535 * two nops after the tlbw instruction.
536 */
537 uasm_bgezl_hazard(p, r, hazard_instance);
538 tlbw(p);
539 uasm_bgezl_label(l, p, hazard_instance);
540 hazard_instance++;
541 uasm_i_nop(p);
542 break;
543
544 case CPU_R4600:
545 case CPU_R4700:
546 uasm_i_nop(p);
547 tlbw(p);
548 uasm_i_nop(p);
549 break;
550
551 case CPU_R5000:
552 case CPU_NEVADA:
553 uasm_i_nop(p); /* QED specifies 2 nops hazard */
554 uasm_i_nop(p); /* QED specifies 2 nops hazard */
555 tlbw(p);
556 break;
557
558 case CPU_R4300:
559 case CPU_5KC:
560 case CPU_TX49XX:
561 case CPU_PR4450:
562 case CPU_XLR:
563 uasm_i_nop(p);
564 tlbw(p);
565 break;
566
567 case CPU_R10000:
568 case CPU_R12000:
569 case CPU_R14000:
570 case CPU_4KC:
571 case CPU_4KEC:
572 case CPU_M14KC:
573 case CPU_M14KEC:
574 case CPU_SB1:
575 case CPU_SB1A:
576 case CPU_4KSC:
577 case CPU_20KC:
578 case CPU_25KF:
579 case CPU_BMIPS32:
580 case CPU_BMIPS3300:
581 case CPU_BMIPS4350:
582 case CPU_BMIPS4380:
583 case CPU_BMIPS5000:
584 case CPU_LOONGSON2:
585 case CPU_LOONGSON3:
586 case CPU_R5500:
587 if (m4kc_tlbp_war())
588 uasm_i_nop(p);
589 case CPU_ALCHEMY:
590 tlbw(p);
591 break;
592
593 case CPU_RM7000:
594 uasm_i_nop(p);
595 uasm_i_nop(p);
596 uasm_i_nop(p);
597 uasm_i_nop(p);
598 tlbw(p);
599 break;
600
601 case CPU_VR4111:
602 case CPU_VR4121:
603 case CPU_VR4122:
604 case CPU_VR4181:
605 case CPU_VR4181A:
606 uasm_i_nop(p);
607 uasm_i_nop(p);
608 tlbw(p);
609 uasm_i_nop(p);
610 uasm_i_nop(p);
611 break;
612
613 case CPU_VR4131:
614 case CPU_VR4133:
615 case CPU_R5432:
616 uasm_i_nop(p);
617 uasm_i_nop(p);
618 tlbw(p);
619 break;
620
621 case CPU_JZRISC:
622 tlbw(p);
623 uasm_i_nop(p);
624 break;
625
626 default:
627 panic("No TLB refill handler yet (CPU type: %d)",
628 current_cpu_type());
629 break;
630 }
631}
632
633static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
634 unsigned int reg)
635{
636 if (cpu_has_rixi) {
637 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
638 } else {
639#ifdef CONFIG_64BIT_PHYS_ADDR
640 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
641#else
642 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
643#endif
644 }
645}
646
647#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
648
649static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
650 unsigned int tmp, enum label_id lid,
651 int restore_scratch)
652{
653 if (restore_scratch) {
654 /* Reset default page size */
655 if (PM_DEFAULT_MASK >> 16) {
656 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
657 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
658 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
659 uasm_il_b(p, r, lid);
660 } else if (PM_DEFAULT_MASK) {
661 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
662 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
663 uasm_il_b(p, r, lid);
664 } else {
665 uasm_i_mtc0(p, 0, C0_PAGEMASK);
666 uasm_il_b(p, r, lid);
667 }
668 if (scratch_reg >= 0)
669 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
670 else
671 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
672 } else {
673 /* Reset default page size */
674 if (PM_DEFAULT_MASK >> 16) {
675 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
676 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
677 uasm_il_b(p, r, lid);
678 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
679 } else if (PM_DEFAULT_MASK) {
680 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
681 uasm_il_b(p, r, lid);
682 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
683 } else {
684 uasm_il_b(p, r, lid);
685 uasm_i_mtc0(p, 0, C0_PAGEMASK);
686 }
687 }
688}
689
690static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
691 struct uasm_reloc **r,
692 unsigned int tmp,
693 enum tlb_write_entry wmode,
694 int restore_scratch)
695{
696 /* Set huge page tlb entry size */
697 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
698 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
699 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
700
701 build_tlb_write_entry(p, l, r, wmode);
702
703 build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
704}
705
706/*
707 * Check if Huge PTE is present, if so then jump to LABEL.
708 */
709static void
710build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
711 unsigned int pmd, int lid)
712{
713 UASM_i_LW(p, tmp, 0, pmd);
714 if (use_bbit_insns()) {
715 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
716 } else {
717 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
718 uasm_il_bnez(p, r, tmp, lid);
719 }
720}
721
722static void build_huge_update_entries(u32 **p, unsigned int pte,
723 unsigned int tmp)
724{
725 int small_sequence;
726
727 /*
728 * A huge PTE describes an area the size of the
729 * configured huge page size. This is twice the
730 * of the large TLB entry size we intend to use.
731 * A TLB entry half the size of the configured
732 * huge page size is configured into entrylo0
733 * and entrylo1 to cover the contiguous huge PTE
734 * address space.
735 */
736 small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
737
738 /* We can clobber tmp. It isn't used after this.*/
739 if (!small_sequence)
740 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
741
742 build_convert_pte_to_entrylo(p, pte);
743 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
744 /* convert to entrylo1 */
745 if (small_sequence)
746 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
747 else
748 UASM_i_ADDU(p, pte, pte, tmp);
749
750 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
751}
752
753static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
754 struct uasm_label **l,
755 unsigned int pte,
756 unsigned int ptr)
757{
758#ifdef CONFIG_SMP
759 UASM_i_SC(p, pte, 0, ptr);
760 uasm_il_beqz(p, r, pte, label_tlb_huge_update);
761 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
762#else
763 UASM_i_SW(p, pte, 0, ptr);
764#endif
765 build_huge_update_entries(p, pte, ptr);
766 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
767}
768#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
769
770#ifdef CONFIG_64BIT
771/*
772 * TMP and PTR are scratch.
773 * TMP will be clobbered, PTR will hold the pmd entry.
774 */
775static void
776build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
777 unsigned int tmp, unsigned int ptr)
778{
779#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
780 long pgdc = (long)pgd_current;
781#endif
782 /*
783 * The vmalloc handling is not in the hotpath.
784 */
785 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
786
787 if (check_for_high_segbits) {
788 /*
789 * The kernel currently implicitely assumes that the
790 * MIPS SEGBITS parameter for the processor is
791 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
792 * allocate virtual addresses outside the maximum
793 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
794 * that doesn't prevent user code from accessing the
795 * higher xuseg addresses. Here, we make sure that
796 * everything but the lower xuseg addresses goes down
797 * the module_alloc/vmalloc path.
798 */
799 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
800 uasm_il_bnez(p, r, ptr, label_vmalloc);
801 } else {
802 uasm_il_bltz(p, r, tmp, label_vmalloc);
803 }
804 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
805
806 if (pgd_reg != -1) {
807 /* pgd is in pgd_reg */
808 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
809 } else {
810#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
811 /*
812 * &pgd << 11 stored in CONTEXT [23..63].
813 */
814 UASM_i_MFC0(p, ptr, C0_CONTEXT);
815
816 /* Clear lower 23 bits of context. */
817 uasm_i_dins(p, ptr, 0, 0, 23);
818
819 /* 1 0 1 0 1 << 6 xkphys cached */
820 uasm_i_ori(p, ptr, ptr, 0x540);
821 uasm_i_drotr(p, ptr, ptr, 11);
822#elif defined(CONFIG_SMP)
823 UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
824 uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
825 UASM_i_LA_mostly(p, tmp, pgdc);
826 uasm_i_daddu(p, ptr, ptr, tmp);
827 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
828 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
829#else
830 UASM_i_LA_mostly(p, ptr, pgdc);
831 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
832#endif
833 }
834
835 uasm_l_vmalloc_done(l, *p);
836
837 /* get pgd offset in bytes */
838 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
839
840 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
841 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
842#ifndef __PAGETABLE_PMD_FOLDED
843 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
844 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
845 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
846 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
847 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
848#endif
849}
850
851/*
852 * BVADDR is the faulting address, PTR is scratch.
853 * PTR will hold the pgd for vmalloc.
854 */
855static void
856build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
857 unsigned int bvaddr, unsigned int ptr,
858 enum vmalloc64_mode mode)
859{
860 long swpd = (long)swapper_pg_dir;
861 int single_insn_swpd;
862 int did_vmalloc_branch = 0;
863
864 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
865
866 uasm_l_vmalloc(l, *p);
867
868 if (mode != not_refill && check_for_high_segbits) {
869 if (single_insn_swpd) {
870 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
871 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
872 did_vmalloc_branch = 1;
873 /* fall through */
874 } else {
875 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
876 }
877 }
878 if (!did_vmalloc_branch) {
879 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
880 uasm_il_b(p, r, label_vmalloc_done);
881 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
882 } else {
883 UASM_i_LA_mostly(p, ptr, swpd);
884 uasm_il_b(p, r, label_vmalloc_done);
885 if (uasm_in_compat_space_p(swpd))
886 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
887 else
888 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
889 }
890 }
891 if (mode != not_refill && check_for_high_segbits) {
892 uasm_l_large_segbits_fault(l, *p);
893 /*
894 * We get here if we are an xsseg address, or if we are
895 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
896 *
897 * Ignoring xsseg (assume disabled so would generate
898 * (address errors?), the only remaining possibility
899 * is the upper xuseg addresses. On processors with
900 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
901 * addresses would have taken an address error. We try
902 * to mimic that here by taking a load/istream page
903 * fault.
904 */
905 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
906 uasm_i_jr(p, ptr);
907
908 if (mode == refill_scratch) {
909 if (scratch_reg >= 0)
910 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
911 else
912 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
913 } else {
914 uasm_i_nop(p);
915 }
916 }
917}
918
919#else /* !CONFIG_64BIT */
920
921/*
922 * TMP and PTR are scratch.
923 * TMP will be clobbered, PTR will hold the pgd entry.
924 */
925static void __maybe_unused
926build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
927{
928 if (pgd_reg != -1) {
929 /* pgd is in pgd_reg */
930 uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
931 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
932 } else {
933 long pgdc = (long)pgd_current;
934
935 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
936#ifdef CONFIG_SMP
937 uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
938 UASM_i_LA_mostly(p, tmp, pgdc);
939 uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
940 uasm_i_addu(p, ptr, tmp, ptr);
941#else
942 UASM_i_LA_mostly(p, ptr, pgdc);
943#endif
944 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
945 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
946 }
947 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
948 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
949 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
950}
951
952#endif /* !CONFIG_64BIT */
953
954static void build_adjust_context(u32 **p, unsigned int ctx)
955{
956 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
957 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
958
959 switch (current_cpu_type()) {
960 case CPU_VR41XX:
961 case CPU_VR4111:
962 case CPU_VR4121:
963 case CPU_VR4122:
964 case CPU_VR4131:
965 case CPU_VR4181:
966 case CPU_VR4181A:
967 case CPU_VR4133:
968 shift += 2;
969 break;
970
971 default:
972 break;
973 }
974
975 if (shift)
976 UASM_i_SRL(p, ctx, ctx, shift);
977 uasm_i_andi(p, ctx, ctx, mask);
978}
979
980static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
981{
982 /*
983 * Bug workaround for the Nevada. It seems as if under certain
984 * circumstances the move from cp0_context might produce a
985 * bogus result when the mfc0 instruction and its consumer are
986 * in a different cacheline or a load instruction, probably any
987 * memory reference, is between them.
988 */
989 switch (current_cpu_type()) {
990 case CPU_NEVADA:
991 UASM_i_LW(p, ptr, 0, ptr);
992 GET_CONTEXT(p, tmp); /* get context reg */
993 break;
994
995 default:
996 GET_CONTEXT(p, tmp); /* get context reg */
997 UASM_i_LW(p, ptr, 0, ptr);
998 break;
999 }
1000
1001 build_adjust_context(p, tmp);
1002 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1003}
1004
1005static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1006{
1007 /*
1008 * 64bit address support (36bit on a 32bit CPU) in a 32bit
1009 * Kernel is a special case. Only a few CPUs use it.
1010 */
1011#ifdef CONFIG_64BIT_PHYS_ADDR
1012 if (cpu_has_64bits) {
1013 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
1014 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
1015 if (cpu_has_rixi) {
1016 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1017 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1018 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
1019 } else {
1020 uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
1021 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1022 uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
1023 }
1024 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1025 } else {
1026 int pte_off_even = sizeof(pte_t) / 2;
1027 int pte_off_odd = pte_off_even + sizeof(pte_t);
1028
1029 /* The pte entries are pre-shifted */
1030 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
1031 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1032 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
1033 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1034 }
1035#else
1036 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
1037 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
1038 if (r45k_bvahwbug())
1039 build_tlb_probe_entry(p);
1040 if (cpu_has_rixi) {
1041 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1042 if (r4k_250MHZhwbug())
1043 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1044 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1045 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
1046 } else {
1047 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
1048 if (r4k_250MHZhwbug())
1049 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1050 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1051 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
1052 if (r45k_bvahwbug())
1053 uasm_i_mfc0(p, tmp, C0_INDEX);
1054 }
1055 if (r4k_250MHZhwbug())
1056 UASM_i_MTC0(p, 0, C0_ENTRYLO1);
1057 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
1058#endif
1059}
1060
1061struct mips_huge_tlb_info {
1062 int huge_pte;
1063 int restore_scratch;
1064};
1065
1066static struct mips_huge_tlb_info
1067build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1068 struct uasm_reloc **r, unsigned int tmp,
1069 unsigned int ptr, int c0_scratch_reg)
1070{
1071 struct mips_huge_tlb_info rv;
1072 unsigned int even, odd;
1073 int vmalloc_branch_delay_filled = 0;
1074 const int scratch = 1; /* Our extra working register */
1075
1076 rv.huge_pte = scratch;
1077 rv.restore_scratch = 0;
1078
1079 if (check_for_high_segbits) {
1080 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1081
1082 if (pgd_reg != -1)
1083 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1084 else
1085 UASM_i_MFC0(p, ptr, C0_CONTEXT);
1086
1087 if (c0_scratch_reg >= 0)
1088 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1089 else
1090 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1091
1092 uasm_i_dsrl_safe(p, scratch, tmp,
1093 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1094 uasm_il_bnez(p, r, scratch, label_vmalloc);
1095
1096 if (pgd_reg == -1) {
1097 vmalloc_branch_delay_filled = 1;
1098 /* Clear lower 23 bits of context. */
1099 uasm_i_dins(p, ptr, 0, 0, 23);
1100 }
1101 } else {
1102 if (pgd_reg != -1)
1103 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
1104 else
1105 UASM_i_MFC0(p, ptr, C0_CONTEXT);
1106
1107 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1108
1109 if (c0_scratch_reg >= 0)
1110 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1111 else
1112 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1113
1114 if (pgd_reg == -1)
1115 /* Clear lower 23 bits of context. */
1116 uasm_i_dins(p, ptr, 0, 0, 23);
1117
1118 uasm_il_bltz(p, r, tmp, label_vmalloc);
1119 }
1120
1121 if (pgd_reg == -1) {
1122 vmalloc_branch_delay_filled = 1;
1123 /* 1 0 1 0 1 << 6 xkphys cached */
1124 uasm_i_ori(p, ptr, ptr, 0x540);
1125 uasm_i_drotr(p, ptr, ptr, 11);
1126 }
1127
1128#ifdef __PAGETABLE_PMD_FOLDED
1129#define LOC_PTEP scratch
1130#else
1131#define LOC_PTEP ptr
1132#endif
1133
1134 if (!vmalloc_branch_delay_filled)
1135 /* get pgd offset in bytes */
1136 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1137
1138 uasm_l_vmalloc_done(l, *p);
1139
1140 /*
1141 * tmp ptr
1142 * fall-through case = badvaddr *pgd_current
1143 * vmalloc case = badvaddr swapper_pg_dir
1144 */
1145
1146 if (vmalloc_branch_delay_filled)
1147 /* get pgd offset in bytes */
1148 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1149
1150#ifdef __PAGETABLE_PMD_FOLDED
1151 GET_CONTEXT(p, tmp); /* get context reg */
1152#endif
1153 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1154
1155 if (use_lwx_insns()) {
1156 UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1157 } else {
1158 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1159 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1160 }
1161
1162#ifndef __PAGETABLE_PMD_FOLDED
1163 /* get pmd offset in bytes */
1164 uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1165 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1166 GET_CONTEXT(p, tmp); /* get context reg */
1167
1168 if (use_lwx_insns()) {
1169 UASM_i_LWX(p, scratch, scratch, ptr);
1170 } else {
1171 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1172 UASM_i_LW(p, scratch, 0, ptr);
1173 }
1174#endif
1175 /* Adjust the context during the load latency. */
1176 build_adjust_context(p, tmp);
1177
1178#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1179 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1180 /*
1181 * The in the LWX case we don't want to do the load in the
1182 * delay slot. It cannot issue in the same cycle and may be
1183 * speculative and unneeded.
1184 */
1185 if (use_lwx_insns())
1186 uasm_i_nop(p);
1187#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
1188
1189
1190 /* build_update_entries */
1191 if (use_lwx_insns()) {
1192 even = ptr;
1193 odd = tmp;
1194 UASM_i_LWX(p, even, scratch, tmp);
1195 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1196 UASM_i_LWX(p, odd, scratch, tmp);
1197 } else {
1198 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1199 even = tmp;
1200 odd = ptr;
1201 UASM_i_LW(p, even, 0, ptr); /* get even pte */
1202 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1203 }
1204 if (cpu_has_rixi) {
1205 uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
1206 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1207 uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
1208 } else {
1209 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1210 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1211 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1212 }
1213 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1214
1215 if (c0_scratch_reg >= 0) {
1216 UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1217 build_tlb_write_entry(p, l, r, tlb_random);
1218 uasm_l_leave(l, *p);
1219 rv.restore_scratch = 1;
1220 } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) {
1221 build_tlb_write_entry(p, l, r, tlb_random);
1222 uasm_l_leave(l, *p);
1223 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1224 } else {
1225 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1226 build_tlb_write_entry(p, l, r, tlb_random);
1227 uasm_l_leave(l, *p);
1228 rv.restore_scratch = 1;
1229 }
1230
1231 uasm_i_eret(p); /* return from trap */
1232
1233 return rv;
1234}
1235
1236/*
1237 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1238 * because EXL == 0. If we wrap, we can also use the 32 instruction
1239 * slots before the XTLB refill exception handler which belong to the
1240 * unused TLB refill exception.
1241 */
1242#define MIPS64_REFILL_INSNS 32
1243
1244static void build_r4000_tlb_refill_handler(void)
1245{
1246 u32 *p = tlb_handler;
1247 struct uasm_label *l = labels;
1248 struct uasm_reloc *r = relocs;
1249 u32 *f;
1250 unsigned int final_len;
1251 struct mips_huge_tlb_info htlb_info __maybe_unused;
1252 enum vmalloc64_mode vmalloc_mode __maybe_unused;
1253
1254 memset(tlb_handler, 0, sizeof(tlb_handler));
1255 memset(labels, 0, sizeof(labels));
1256 memset(relocs, 0, sizeof(relocs));
1257 memset(final_handler, 0, sizeof(final_handler));
1258
1259 if ((scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
1260 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1261 scratch_reg);
1262 vmalloc_mode = refill_scratch;
1263 } else {
1264 htlb_info.huge_pte = K0;
1265 htlb_info.restore_scratch = 0;
1266 vmalloc_mode = refill_noscratch;
1267 /*
1268 * create the plain linear handler
1269 */
1270 if (bcm1250_m3_war()) {
1271 unsigned int segbits = 44;
1272
1273 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1274 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1275 uasm_i_xor(&p, K0, K0, K1);
1276 uasm_i_dsrl_safe(&p, K1, K0, 62);
1277 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1278 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1279 uasm_i_or(&p, K0, K0, K1);
1280 uasm_il_bnez(&p, &r, K0, label_leave);
1281 /* No need for uasm_i_nop */
1282 }
1283
1284#ifdef CONFIG_64BIT
1285 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
1286#else
1287 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
1288#endif
1289
1290#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1291 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
1292#endif
1293
1294 build_get_ptep(&p, K0, K1);
1295 build_update_entries(&p, K0, K1);
1296 build_tlb_write_entry(&p, &l, &r, tlb_random);
1297 uasm_l_leave(&l, p);
1298 uasm_i_eret(&p); /* return from trap */
1299 }
1300#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1301 uasm_l_tlb_huge_update(&l, p);
1302 build_huge_update_entries(&p, htlb_info.huge_pte, K1);
1303 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
1304 htlb_info.restore_scratch);
1305#endif
1306
1307#ifdef CONFIG_64BIT
1308 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
1309#endif
1310
1311 /*
1312 * Overflow check: For the 64bit handler, we need at least one
1313 * free instruction slot for the wrap-around branch. In worst
1314 * case, if the intended insertion point is a delay slot, we
1315 * need three, with the second nop'ed and the third being
1316 * unused.
1317 */
1318 switch (boot_cpu_type()) {
1319 default:
1320 if (sizeof(long) == 4) {
1321 case CPU_LOONGSON2:
1322 /* Loongson2 ebase is different than r4k, we have more space */
1323 if ((p - tlb_handler) > 64)
1324 panic("TLB refill handler space exceeded");
1325 /*
1326 * Now fold the handler in the TLB refill handler space.
1327 */
1328 f = final_handler;
1329 /* Simplest case, just copy the handler. */
1330 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1331 final_len = p - tlb_handler;
1332 break;
1333 } else {
1334 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1335 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1336 && uasm_insn_has_bdelay(relocs,
1337 tlb_handler + MIPS64_REFILL_INSNS - 3)))
1338 panic("TLB refill handler space exceeded");
1339 /*
1340 * Now fold the handler in the TLB refill handler space.
1341 */
1342 f = final_handler + MIPS64_REFILL_INSNS;
1343 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1344 /* Just copy the handler. */
1345 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1346 final_len = p - tlb_handler;
1347 } else {
1348#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1349 const enum label_id ls = label_tlb_huge_update;
1350#else
1351 const enum label_id ls = label_vmalloc;
1352#endif
1353 u32 *split;
1354 int ov = 0;
1355 int i;
1356
1357 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1358 ;
1359 BUG_ON(i == ARRAY_SIZE(labels));
1360 split = labels[i].addr;
1361
1362 /*
1363 * See if we have overflown one way or the other.
1364 */
1365 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1366 split < p - MIPS64_REFILL_INSNS)
1367 ov = 1;
1368
1369 if (ov) {
1370 /*
1371 * Split two instructions before the end. One
1372 * for the branch and one for the instruction
1373 * in the delay slot.
1374 */
1375 split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1376
1377 /*
1378 * If the branch would fall in a delay slot,
1379 * we must back up an additional instruction
1380 * so that it is no longer in a delay slot.
1381 */
1382 if (uasm_insn_has_bdelay(relocs, split - 1))
1383 split--;
1384 }
1385 /* Copy first part of the handler. */
1386 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
1387 f += split - tlb_handler;
1388
1389 if (ov) {
1390 /* Insert branch. */
1391 uasm_l_split(&l, final_handler);
1392 uasm_il_b(&f, &r, label_split);
1393 if (uasm_insn_has_bdelay(relocs, split))
1394 uasm_i_nop(&f);
1395 else {
1396 uasm_copy_handler(relocs, labels,
1397 split, split + 1, f);
1398 uasm_move_labels(labels, f, f + 1, -1);
1399 f++;
1400 split++;
1401 }
1402 }
1403
1404 /* Copy the rest of the handler. */
1405 uasm_copy_handler(relocs, labels, split, p, final_handler);
1406 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1407 (p - split);
1408 }
1409 }
1410 break;
1411 }
1412
1413 uasm_resolve_relocs(relocs, labels);
1414 pr_debug("Wrote TLB refill handler (%u instructions).\n",
1415 final_len);
1416
1417 memcpy((void *)ebase, final_handler, 0x100);
1418
1419 dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
1420}
1421
1422extern u32 handle_tlbl[], handle_tlbl_end[];
1423extern u32 handle_tlbs[], handle_tlbs_end[];
1424extern u32 handle_tlbm[], handle_tlbm_end[];
1425extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[];
1426extern u32 tlbmiss_handler_setup_pgd_end[];
1427
1428static void build_setup_pgd(void)
1429{
1430 const int a0 = 4;
1431 const int __maybe_unused a1 = 5;
1432 const int __maybe_unused a2 = 6;
1433 u32 *p = tlbmiss_handler_setup_pgd_start;
1434 const int tlbmiss_handler_setup_pgd_size =
1435 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
1436#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1437 long pgdc = (long)pgd_current;
1438#endif
1439
1440 memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size *
1441 sizeof(tlbmiss_handler_setup_pgd[0]));
1442 memset(labels, 0, sizeof(labels));
1443 memset(relocs, 0, sizeof(relocs));
1444 pgd_reg = allocate_kscratch();
1445#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1446 if (pgd_reg == -1) {
1447 struct uasm_label *l = labels;
1448 struct uasm_reloc *r = relocs;
1449
1450 /* PGD << 11 in c0_Context */
1451 /*
1452 * If it is a ckseg0 address, convert to a physical
1453 * address. Shifting right by 29 and adding 4 will
1454 * result in zero for these addresses.
1455 *
1456 */
1457 UASM_i_SRA(&p, a1, a0, 29);
1458 UASM_i_ADDIU(&p, a1, a1, 4);
1459 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1460 uasm_i_nop(&p);
1461 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1462 uasm_l_tlbl_goaround1(&l, p);
1463 UASM_i_SLL(&p, a0, a0, 11);
1464 uasm_i_jr(&p, 31);
1465 UASM_i_MTC0(&p, a0, C0_CONTEXT);
1466 } else {
1467 /* PGD in c0_KScratch */
1468 uasm_i_jr(&p, 31);
1469 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1470 }
1471#else
1472#ifdef CONFIG_SMP
1473 /* Save PGD to pgd_current[smp_processor_id()] */
1474 UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
1475 UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
1476 UASM_i_LA_mostly(&p, a2, pgdc);
1477 UASM_i_ADDU(&p, a2, a2, a1);
1478 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1479#else
1480 UASM_i_LA_mostly(&p, a2, pgdc);
1481 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1482#endif /* SMP */
1483 uasm_i_jr(&p, 31);
1484
1485 /* if pgd_reg is allocated, save PGD also to scratch register */
1486 if (pgd_reg != -1)
1487 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1488 else
1489 uasm_i_nop(&p);
1490#endif
1491 if (p >= tlbmiss_handler_setup_pgd_end)
1492 panic("tlbmiss_handler_setup_pgd space exceeded");
1493
1494 uasm_resolve_relocs(relocs, labels);
1495 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1496 (unsigned int)(p - tlbmiss_handler_setup_pgd));
1497
1498 dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
1499 tlbmiss_handler_setup_pgd_size);
1500}
1501
1502static void
1503iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1504{
1505#ifdef CONFIG_SMP
1506# ifdef CONFIG_64BIT_PHYS_ADDR
1507 if (cpu_has_64bits)
1508 uasm_i_lld(p, pte, 0, ptr);
1509 else
1510# endif
1511 UASM_i_LL(p, pte, 0, ptr);
1512#else
1513# ifdef CONFIG_64BIT_PHYS_ADDR
1514 if (cpu_has_64bits)
1515 uasm_i_ld(p, pte, 0, ptr);
1516 else
1517# endif
1518 UASM_i_LW(p, pte, 0, ptr);
1519#endif
1520}
1521
1522static void
1523iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1524 unsigned int mode)
1525{
1526#ifdef CONFIG_64BIT_PHYS_ADDR
1527 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1528#endif
1529
1530 uasm_i_ori(p, pte, pte, mode);
1531#ifdef CONFIG_SMP
1532# ifdef CONFIG_64BIT_PHYS_ADDR
1533 if (cpu_has_64bits)
1534 uasm_i_scd(p, pte, 0, ptr);
1535 else
1536# endif
1537 UASM_i_SC(p, pte, 0, ptr);
1538
1539 if (r10000_llsc_war())
1540 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
1541 else
1542 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1543
1544# ifdef CONFIG_64BIT_PHYS_ADDR
1545 if (!cpu_has_64bits) {
1546 /* no uasm_i_nop needed */
1547 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1548 uasm_i_ori(p, pte, pte, hwmode);
1549 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1550 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1551 /* no uasm_i_nop needed */
1552 uasm_i_lw(p, pte, 0, ptr);
1553 } else
1554 uasm_i_nop(p);
1555# else
1556 uasm_i_nop(p);
1557# endif
1558#else
1559# ifdef CONFIG_64BIT_PHYS_ADDR
1560 if (cpu_has_64bits)
1561 uasm_i_sd(p, pte, 0, ptr);
1562 else
1563# endif
1564 UASM_i_SW(p, pte, 0, ptr);
1565
1566# ifdef CONFIG_64BIT_PHYS_ADDR
1567 if (!cpu_has_64bits) {
1568 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1569 uasm_i_ori(p, pte, pte, hwmode);
1570 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1571 uasm_i_lw(p, pte, 0, ptr);
1572 }
1573# endif
1574#endif
1575}
1576
1577/*
1578 * Check if PTE is present, if not then jump to LABEL. PTR points to
1579 * the page table where this PTE is located, PTE will be re-loaded
1580 * with it's original value.
1581 */
1582static void
1583build_pte_present(u32 **p, struct uasm_reloc **r,
1584 int pte, int ptr, int scratch, enum label_id lid)
1585{
1586 int t = scratch >= 0 ? scratch : pte;
1587
1588 if (cpu_has_rixi) {
1589 if (use_bbit_insns()) {
1590 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1591 uasm_i_nop(p);
1592 } else {
1593 uasm_i_andi(p, t, pte, _PAGE_PRESENT);
1594 uasm_il_beqz(p, r, t, lid);
1595 if (pte == t)
1596 /* You lose the SMP race :-(*/
1597 iPTE_LW(p, pte, ptr);
1598 }
1599 } else {
1600 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
1601 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
1602 uasm_il_bnez(p, r, t, lid);
1603 if (pte == t)
1604 /* You lose the SMP race :-(*/
1605 iPTE_LW(p, pte, ptr);
1606 }
1607}
1608
1609/* Make PTE valid, store result in PTR. */
1610static void
1611build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1612 unsigned int ptr)
1613{
1614 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1615
1616 iPTE_SW(p, r, pte, ptr, mode);
1617}
1618
1619/*
1620 * Check if PTE can be written to, if not branch to LABEL. Regardless
1621 * restore PTE with value from PTR when done.
1622 */
1623static void
1624build_pte_writable(u32 **p, struct uasm_reloc **r,
1625 unsigned int pte, unsigned int ptr, int scratch,
1626 enum label_id lid)
1627{
1628 int t = scratch >= 0 ? scratch : pte;
1629
1630 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
1631 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
1632 uasm_il_bnez(p, r, t, lid);
1633 if (pte == t)
1634 /* You lose the SMP race :-(*/
1635 iPTE_LW(p, pte, ptr);
1636 else
1637 uasm_i_nop(p);
1638}
1639
1640/* Make PTE writable, update software status bits as well, then store
1641 * at PTR.
1642 */
1643static void
1644build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1645 unsigned int ptr)
1646{
1647 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1648 | _PAGE_DIRTY);
1649
1650 iPTE_SW(p, r, pte, ptr, mode);
1651}
1652
1653/*
1654 * Check if PTE can be modified, if not branch to LABEL. Regardless
1655 * restore PTE with value from PTR when done.
1656 */
1657static void
1658build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1659 unsigned int pte, unsigned int ptr, int scratch,
1660 enum label_id lid)
1661{
1662 if (use_bbit_insns()) {
1663 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1664 uasm_i_nop(p);
1665 } else {
1666 int t = scratch >= 0 ? scratch : pte;
1667 uasm_i_andi(p, t, pte, _PAGE_WRITE);
1668 uasm_il_beqz(p, r, t, lid);
1669 if (pte == t)
1670 /* You lose the SMP race :-(*/
1671 iPTE_LW(p, pte, ptr);
1672 }
1673}
1674
1675#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1676
1677
1678/*
1679 * R3000 style TLB load/store/modify handlers.
1680 */
1681
1682/*
1683 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1684 * Then it returns.
1685 */
1686static void
1687build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1688{
1689 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1690 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1691 uasm_i_tlbwi(p);
1692 uasm_i_jr(p, tmp);
1693 uasm_i_rfe(p); /* branch delay */
1694}
1695
1696/*
1697 * This places the pte into ENTRYLO0 and writes it with tlbwi
1698 * or tlbwr as appropriate. This is because the index register
1699 * may have the probe fail bit set as a result of a trap on a
1700 * kseg2 access, i.e. without refill. Then it returns.
1701 */
1702static void
1703build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1704 struct uasm_reloc **r, unsigned int pte,
1705 unsigned int tmp)
1706{
1707 uasm_i_mfc0(p, tmp, C0_INDEX);
1708 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1709 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1710 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1711 uasm_i_tlbwi(p); /* cp0 delay */
1712 uasm_i_jr(p, tmp);
1713 uasm_i_rfe(p); /* branch delay */
1714 uasm_l_r3000_write_probe_fail(l, *p);
1715 uasm_i_tlbwr(p); /* cp0 delay */
1716 uasm_i_jr(p, tmp);
1717 uasm_i_rfe(p); /* branch delay */
1718}
1719
1720static void
1721build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1722 unsigned int ptr)
1723{
1724 long pgdc = (long)pgd_current;
1725
1726 uasm_i_mfc0(p, pte, C0_BADVADDR);
1727 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1728 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1729 uasm_i_srl(p, pte, pte, 22); /* load delay */
1730 uasm_i_sll(p, pte, pte, 2);
1731 uasm_i_addu(p, ptr, ptr, pte);
1732 uasm_i_mfc0(p, pte, C0_CONTEXT);
1733 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1734 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1735 uasm_i_addu(p, ptr, ptr, pte);
1736 uasm_i_lw(p, pte, 0, ptr);
1737 uasm_i_tlbp(p); /* load delay */
1738}
1739
1740static void build_r3000_tlb_load_handler(void)
1741{
1742 u32 *p = handle_tlbl;
1743 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
1744 struct uasm_label *l = labels;
1745 struct uasm_reloc *r = relocs;
1746
1747 memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
1748 memset(labels, 0, sizeof(labels));
1749 memset(relocs, 0, sizeof(relocs));
1750
1751 build_r3000_tlbchange_handler_head(&p, K0, K1);
1752 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
1753 uasm_i_nop(&p); /* load delay */
1754 build_make_valid(&p, &r, K0, K1);
1755 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1756
1757 uasm_l_nopage_tlbl(&l, p);
1758 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1759 uasm_i_nop(&p);
1760
1761 if (p >= handle_tlbl_end)
1762 panic("TLB load handler fastpath space exceeded");
1763
1764 uasm_resolve_relocs(relocs, labels);
1765 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1766 (unsigned int)(p - handle_tlbl));
1767
1768 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
1769}
1770
1771static void build_r3000_tlb_store_handler(void)
1772{
1773 u32 *p = handle_tlbs;
1774 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
1775 struct uasm_label *l = labels;
1776 struct uasm_reloc *r = relocs;
1777
1778 memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
1779 memset(labels, 0, sizeof(labels));
1780 memset(relocs, 0, sizeof(relocs));
1781
1782 build_r3000_tlbchange_handler_head(&p, K0, K1);
1783 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
1784 uasm_i_nop(&p); /* load delay */
1785 build_make_write(&p, &r, K0, K1);
1786 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1787
1788 uasm_l_nopage_tlbs(&l, p);
1789 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1790 uasm_i_nop(&p);
1791
1792 if (p >= handle_tlbs_end)
1793 panic("TLB store handler fastpath space exceeded");
1794
1795 uasm_resolve_relocs(relocs, labels);
1796 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1797 (unsigned int)(p - handle_tlbs));
1798
1799 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
1800}
1801
1802static void build_r3000_tlb_modify_handler(void)
1803{
1804 u32 *p = handle_tlbm;
1805 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
1806 struct uasm_label *l = labels;
1807 struct uasm_reloc *r = relocs;
1808
1809 memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
1810 memset(labels, 0, sizeof(labels));
1811 memset(relocs, 0, sizeof(relocs));
1812
1813 build_r3000_tlbchange_handler_head(&p, K0, K1);
1814 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
1815 uasm_i_nop(&p); /* load delay */
1816 build_make_write(&p, &r, K0, K1);
1817 build_r3000_pte_reload_tlbwi(&p, K0, K1);
1818
1819 uasm_l_nopage_tlbm(&l, p);
1820 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1821 uasm_i_nop(&p);
1822
1823 if (p >= handle_tlbm_end)
1824 panic("TLB modify handler fastpath space exceeded");
1825
1826 uasm_resolve_relocs(relocs, labels);
1827 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1828 (unsigned int)(p - handle_tlbm));
1829
1830 dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size);
1831}
1832#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1833
1834/*
1835 * R4000 style TLB load/store/modify handlers.
1836 */
1837static struct work_registers
1838build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1839 struct uasm_reloc **r)
1840{
1841 struct work_registers wr = build_get_work_registers(p);
1842
1843#ifdef CONFIG_64BIT
1844 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
1845#else
1846 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
1847#endif
1848
1849#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1850 /*
1851 * For huge tlb entries, pmd doesn't contain an address but
1852 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1853 * see if we need to jump to huge tlb processing.
1854 */
1855 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
1856#endif
1857
1858 UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
1859 UASM_i_LW(p, wr.r2, 0, wr.r2);
1860 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
1861 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
1862 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
1863
1864#ifdef CONFIG_SMP
1865 uasm_l_smp_pgtable_change(l, *p);
1866#endif
1867 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
1868 if (!m4kc_tlbp_war())
1869 build_tlb_probe_entry(p);
1870 return wr;
1871}
1872
1873static void
1874build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1875 struct uasm_reloc **r, unsigned int tmp,
1876 unsigned int ptr)
1877{
1878 uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
1879 uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
1880 build_update_entries(p, tmp, ptr);
1881 build_tlb_write_entry(p, l, r, tlb_indexed);
1882 uasm_l_leave(l, *p);
1883 build_restore_work_registers(p);
1884 uasm_i_eret(p); /* return from trap */
1885
1886#ifdef CONFIG_64BIT
1887 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
1888#endif
1889}
1890
1891static void build_r4000_tlb_load_handler(void)
1892{
1893 u32 *p = handle_tlbl;
1894 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
1895 struct uasm_label *l = labels;
1896 struct uasm_reloc *r = relocs;
1897 struct work_registers wr;
1898
1899 memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
1900 memset(labels, 0, sizeof(labels));
1901 memset(relocs, 0, sizeof(relocs));
1902
1903 if (bcm1250_m3_war()) {
1904 unsigned int segbits = 44;
1905
1906 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1907 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1908 uasm_i_xor(&p, K0, K0, K1);
1909 uasm_i_dsrl_safe(&p, K1, K0, 62);
1910 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1911 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1912 uasm_i_or(&p, K0, K0, K1);
1913 uasm_il_bnez(&p, &r, K0, label_leave);
1914 /* No need for uasm_i_nop */
1915 }
1916
1917 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1918 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1919 if (m4kc_tlbp_war())
1920 build_tlb_probe_entry(&p);
1921
1922 if (cpu_has_rixi) {
1923 /*
1924 * If the page is not _PAGE_VALID, RI or XI could not
1925 * have triggered it. Skip the expensive test..
1926 */
1927 if (use_bbit_insns()) {
1928 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1929 label_tlbl_goaround1);
1930 } else {
1931 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1932 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
1933 }
1934 uasm_i_nop(&p);
1935
1936 uasm_i_tlbr(&p);
1937
1938 switch (current_cpu_type()) {
1939 default:
1940 if (cpu_has_mips_r2) {
1941 uasm_i_ehb(&p);
1942
1943 case CPU_CAVIUM_OCTEON:
1944 case CPU_CAVIUM_OCTEON_PLUS:
1945 case CPU_CAVIUM_OCTEON2:
1946 break;
1947 }
1948 }
1949
1950 /* Examine entrylo 0 or 1 based on ptr. */
1951 if (use_bbit_insns()) {
1952 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1953 } else {
1954 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
1955 uasm_i_beqz(&p, wr.r3, 8);
1956 }
1957 /* load it in the delay slot*/
1958 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
1959 /* load it if ptr is odd */
1960 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
1961 /*
1962 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
1963 * XI must have triggered it.
1964 */
1965 if (use_bbit_insns()) {
1966 uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
1967 uasm_i_nop(&p);
1968 uasm_l_tlbl_goaround1(&l, p);
1969 } else {
1970 uasm_i_andi(&p, wr.r3, wr.r3, 2);
1971 uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
1972 uasm_i_nop(&p);
1973 }
1974 uasm_l_tlbl_goaround1(&l, p);
1975 }
1976 build_make_valid(&p, &r, wr.r1, wr.r2);
1977 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
1978
1979#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1980 /*
1981 * This is the entry point when build_r4000_tlbchange_handler_head
1982 * spots a huge page.
1983 */
1984 uasm_l_tlb_huge_update(&l, p);
1985 iPTE_LW(&p, wr.r1, wr.r2);
1986 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1987 build_tlb_probe_entry(&p);
1988
1989 if (cpu_has_rixi) {
1990 /*
1991 * If the page is not _PAGE_VALID, RI or XI could not
1992 * have triggered it. Skip the expensive test..
1993 */
1994 if (use_bbit_insns()) {
1995 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1996 label_tlbl_goaround2);
1997 } else {
1998 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1999 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2000 }
2001 uasm_i_nop(&p);
2002
2003 uasm_i_tlbr(&p);
2004
2005 switch (current_cpu_type()) {
2006 default:
2007 if (cpu_has_mips_r2) {
2008 uasm_i_ehb(&p);
2009
2010 case CPU_CAVIUM_OCTEON:
2011 case CPU_CAVIUM_OCTEON_PLUS:
2012 case CPU_CAVIUM_OCTEON2:
2013 break;
2014 }
2015 }
2016
2017 /* Examine entrylo 0 or 1 based on ptr. */
2018 if (use_bbit_insns()) {
2019 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
2020 } else {
2021 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2022 uasm_i_beqz(&p, wr.r3, 8);
2023 }
2024 /* load it in the delay slot*/
2025 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2026 /* load it if ptr is odd */
2027 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
2028 /*
2029 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
2030 * XI must have triggered it.
2031 */
2032 if (use_bbit_insns()) {
2033 uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
2034 } else {
2035 uasm_i_andi(&p, wr.r3, wr.r3, 2);
2036 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
2037 }
2038 if (PM_DEFAULT_MASK == 0)
2039 uasm_i_nop(&p);
2040 /*
2041 * We clobbered C0_PAGEMASK, restore it. On the other branch
2042 * it is restored in build_huge_tlb_write_entry.
2043 */
2044 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
2045
2046 uasm_l_tlbl_goaround2(&l, p);
2047 }
2048 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2049 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
2050#endif
2051
2052 uasm_l_nopage_tlbl(&l, p);
2053 build_restore_work_registers(&p);
2054#ifdef CONFIG_CPU_MICROMIPS
2055 if ((unsigned long)tlb_do_page_fault_0 & 1) {
2056 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2057 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2058 uasm_i_jr(&p, K0);
2059 } else
2060#endif
2061 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2062 uasm_i_nop(&p);
2063
2064 if (p >= handle_tlbl_end)
2065 panic("TLB load handler fastpath space exceeded");
2066
2067 uasm_resolve_relocs(relocs, labels);
2068 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
2069 (unsigned int)(p - handle_tlbl));
2070
2071 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
2072}
2073
2074static void build_r4000_tlb_store_handler(void)
2075{
2076 u32 *p = handle_tlbs;
2077 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
2078 struct uasm_label *l = labels;
2079 struct uasm_reloc *r = relocs;
2080 struct work_registers wr;
2081
2082 memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
2083 memset(labels, 0, sizeof(labels));
2084 memset(relocs, 0, sizeof(relocs));
2085
2086 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2087 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2088 if (m4kc_tlbp_war())
2089 build_tlb_probe_entry(&p);
2090 build_make_write(&p, &r, wr.r1, wr.r2);
2091 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2092
2093#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2094 /*
2095 * This is the entry point when
2096 * build_r4000_tlbchange_handler_head spots a huge page.
2097 */
2098 uasm_l_tlb_huge_update(&l, p);
2099 iPTE_LW(&p, wr.r1, wr.r2);
2100 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
2101 build_tlb_probe_entry(&p);
2102 uasm_i_ori(&p, wr.r1, wr.r1,
2103 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2104 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
2105#endif
2106
2107 uasm_l_nopage_tlbs(&l, p);
2108 build_restore_work_registers(&p);
2109#ifdef CONFIG_CPU_MICROMIPS
2110 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2111 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2112 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2113 uasm_i_jr(&p, K0);
2114 } else
2115#endif
2116 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2117 uasm_i_nop(&p);
2118
2119 if (p >= handle_tlbs_end)
2120 panic("TLB store handler fastpath space exceeded");
2121
2122 uasm_resolve_relocs(relocs, labels);
2123 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2124 (unsigned int)(p - handle_tlbs));
2125
2126 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
2127}
2128
2129static void build_r4000_tlb_modify_handler(void)
2130{
2131 u32 *p = handle_tlbm;
2132 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
2133 struct uasm_label *l = labels;
2134 struct uasm_reloc *r = relocs;
2135 struct work_registers wr;
2136
2137 memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
2138 memset(labels, 0, sizeof(labels));
2139 memset(relocs, 0, sizeof(relocs));
2140
2141 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2142 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2143 if (m4kc_tlbp_war())
2144 build_tlb_probe_entry(&p);
2145 /* Present and writable bits set, set accessed and dirty bits. */
2146 build_make_write(&p, &r, wr.r1, wr.r2);
2147 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
2148
2149#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
2150 /*
2151 * This is the entry point when
2152 * build_r4000_tlbchange_handler_head spots a huge page.
2153 */
2154 uasm_l_tlb_huge_update(&l, p);
2155 iPTE_LW(&p, wr.r1, wr.r2);
2156 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
2157 build_tlb_probe_entry(&p);
2158 uasm_i_ori(&p, wr.r1, wr.r1,
2159 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
2160 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
2161#endif
2162
2163 uasm_l_nopage_tlbm(&l, p);
2164 build_restore_work_registers(&p);
2165#ifdef CONFIG_CPU_MICROMIPS
2166 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2167 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2168 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2169 uasm_i_jr(&p, K0);
2170 } else
2171#endif
2172 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2173 uasm_i_nop(&p);
2174
2175 if (p >= handle_tlbm_end)
2176 panic("TLB modify handler fastpath space exceeded");
2177
2178 uasm_resolve_relocs(relocs, labels);
2179 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2180 (unsigned int)(p - handle_tlbm));
2181
2182 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
2183}
2184
2185static void flush_tlb_handlers(void)
2186{
2187 local_flush_icache_range((unsigned long)handle_tlbl,
2188 (unsigned long)handle_tlbl_end);
2189 local_flush_icache_range((unsigned long)handle_tlbs,
2190 (unsigned long)handle_tlbs_end);
2191 local_flush_icache_range((unsigned long)handle_tlbm,
2192 (unsigned long)handle_tlbm_end);
2193 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
2194 (unsigned long)tlbmiss_handler_setup_pgd_end);
2195}
2196
2197void build_tlb_refill_handler(void)
2198{
2199 /*
2200 * The refill handler is generated per-CPU, multi-node systems
2201 * may have local storage for it. The other handlers are only
2202 * needed once.
2203 */
2204 static int run_once = 0;
2205
2206 output_pgtable_bits_defines();
2207
2208#ifdef CONFIG_64BIT
2209 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
2210#endif
2211
2212 switch (current_cpu_type()) {
2213 case CPU_R2000:
2214 case CPU_R3000:
2215 case CPU_R3000A:
2216 case CPU_R3081E:
2217 case CPU_TX3912:
2218 case CPU_TX3922:
2219 case CPU_TX3927:
2220#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2221 if (cpu_has_local_ebase)
2222 build_r3000_tlb_refill_handler();
2223 if (!run_once) {
2224 if (!cpu_has_local_ebase)
2225 build_r3000_tlb_refill_handler();
2226 build_setup_pgd();
2227 build_r3000_tlb_load_handler();
2228 build_r3000_tlb_store_handler();
2229 build_r3000_tlb_modify_handler();
2230 flush_tlb_handlers();
2231 run_once++;
2232 }
2233#else
2234 panic("No R3000 TLB refill handler");
2235#endif
2236 break;
2237
2238 case CPU_R6000:
2239 case CPU_R6000A:
2240 panic("No R6000 TLB refill handler yet");
2241 break;
2242
2243 case CPU_R8000:
2244 panic("No R8000 TLB refill handler yet");
2245 break;
2246
2247 default:
2248 if (!run_once) {
2249 scratch_reg = allocate_kscratch();
2250 build_setup_pgd();
2251 build_r4000_tlb_load_handler();
2252 build_r4000_tlb_store_handler();
2253 build_r4000_tlb_modify_handler();
2254 if (!cpu_has_local_ebase)
2255 build_r4000_tlb_refill_handler();
2256 flush_tlb_handlers();
2257 run_once++;
2258 }
2259 if (cpu_has_local_ebase)
2260 build_r4000_tlb_refill_handler();
2261 }
2262}