Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Generation of main entry point for the guest, exception handling.
  7 *
  8 * Copyright (C) 2012  MIPS Technologies, Inc.
  9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 10 *
 11 * Copyright (C) 2016 Imagination Technologies Ltd.
 12 */
 13
 14#include <linux/kvm_host.h>
 15#include <linux/log2.h>
 
 16#include <asm/mmu_context.h>
 17#include <asm/msa.h>
 
 18#include <asm/setup.h>
 19#include <asm/tlbex.h>
 20#include <asm/uasm.h>
 21
 22/* Register names */
 23#define ZERO		0
 24#define AT		1
 25#define V0		2
 26#define V1		3
 27#define A0		4
 28#define A1		5
 29
 30#if _MIPS_SIM == _MIPS_SIM_ABI32
 31#define T0		8
 32#define T1		9
 33#define T2		10
 34#define T3		11
 35#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 36
 37#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
 38#define T0		12
 39#define T1		13
 40#define T2		14
 41#define T3		15
 42#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
 43
 44#define S0		16
 45#define S1		17
 46#define T9		25
 47#define K0		26
 48#define K1		27
 49#define GP		28
 50#define SP		29
 51#define RA		31
 52
 53/* Some CP0 registers */
 54#define C0_PWBASE	5, 5
 55#define C0_HWRENA	7, 0
 56#define C0_BADVADDR	8, 0
 57#define C0_BADINSTR	8, 1
 58#define C0_BADINSTRP	8, 2
 59#define C0_ENTRYHI	10, 0
 60#define C0_GUESTCTL1	10, 4
 61#define C0_STATUS	12, 0
 62#define C0_GUESTCTL0	12, 6
 63#define C0_CAUSE	13, 0
 64#define C0_EPC		14, 0
 65#define C0_EBASE	15, 1
 66#define C0_CONFIG5	16, 5
 67#define C0_DDATA_LO	28, 3
 68#define C0_ERROREPC	30, 0
 69
 70#define CALLFRAME_SIZ   32
 71
 72#ifdef CONFIG_64BIT
 73#define ST0_KX_IF_64	ST0_KX
 74#else
 75#define ST0_KX_IF_64	0
 76#endif
 77
 78static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
 79static unsigned int scratch_tmp[2] = { C0_ERROREPC };
 80
 81enum label_id {
 82	label_fpu_1 = 1,
 83	label_msa_1,
 84	label_return_to_host,
 85	label_kernel_asid,
 86	label_exit_common,
 87};
 88
 89UASM_L_LA(_fpu_1)
 90UASM_L_LA(_msa_1)
 91UASM_L_LA(_return_to_host)
 92UASM_L_LA(_kernel_asid)
 93UASM_L_LA(_exit_common)
 94
 95static void *kvm_mips_build_enter_guest(void *addr);
 96static void *kvm_mips_build_ret_from_exit(void *addr);
 97static void *kvm_mips_build_ret_to_guest(void *addr);
 98static void *kvm_mips_build_ret_to_host(void *addr);
 99
100/*
101 * The version of this function in tlbex.c uses current_cpu_type(), but for KVM
102 * we assume symmetry.
103 */
104static int c0_kscratch(void)
105{
106	switch (boot_cpu_type()) {
107	case CPU_XLP:
108	case CPU_XLR:
109		return 22;
110	default:
111		return 31;
112	}
113}
114
115/**
116 * kvm_mips_entry_setup() - Perform global setup for entry code.
117 *
118 * Perform global setup for entry code, such as choosing a scratch register.
119 *
120 * Returns:	0 on success.
121 *		-errno on failure.
122 */
123int kvm_mips_entry_setup(void)
124{
125	/*
126	 * We prefer to use KScratchN registers if they are available over the
127	 * defaults above, which may not work on all cores.
128	 */
129	unsigned int kscratch_mask = cpu_data[0].kscratch_mask;
130
131	if (pgd_reg != -1)
132		kscratch_mask &= ~BIT(pgd_reg);
133
134	/* Pick a scratch register for storing VCPU */
135	if (kscratch_mask) {
136		scratch_vcpu[0] = c0_kscratch();
137		scratch_vcpu[1] = ffs(kscratch_mask) - 1;
138		kscratch_mask &= ~BIT(scratch_vcpu[1]);
139	}
140
141	/* Pick a scratch register to use as a temp for saving state */
142	if (kscratch_mask) {
143		scratch_tmp[0] = c0_kscratch();
144		scratch_tmp[1] = ffs(kscratch_mask) - 1;
145		kscratch_mask &= ~BIT(scratch_tmp[1]);
146	}
147
148	return 0;
149}
150
151static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
152					unsigned int frame)
153{
154	/* Save the VCPU scratch register value in cp0_epc of the stack frame */
155	UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
156	UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
157
158	/* Save the temp scratch register value in cp0_cause of stack frame */
159	if (scratch_tmp[0] == c0_kscratch()) {
160		UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
161		UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
162	}
163}
164
165static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
166					   unsigned int frame)
167{
168	/*
169	 * Restore host scratch register values saved by
170	 * kvm_mips_build_save_scratch().
171	 */
172	UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
173	UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
174
175	if (scratch_tmp[0] == c0_kscratch()) {
176		UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
177		UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
178	}
179}
180
181/**
182 * build_set_exc_base() - Assemble code to write exception base address.
183 * @p:		Code buffer pointer.
184 * @reg:	Source register (generated code may set WG bit in @reg).
185 *
186 * Assemble code to modify the exception base address in the EBase register,
187 * using the appropriately sized access and setting the WG bit if necessary.
188 */
189static inline void build_set_exc_base(u32 **p, unsigned int reg)
190{
191	if (cpu_has_ebase_wg) {
192		/* Set WG so that all the bits get written */
193		uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
194		UASM_i_MTC0(p, reg, C0_EBASE);
195	} else {
196		uasm_i_mtc0(p, reg, C0_EBASE);
197	}
198}
199
200/**
201 * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
202 * @addr:	Address to start writing code.
203 *
204 * Assemble the start of the vcpu_run function to run a guest VCPU. The function
205 * conforms to the following prototype:
206 *
207 * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
208 *
209 * The exit from the guest and return to the caller is handled by the code
210 * generated by kvm_mips_build_ret_to_host().
211 *
212 * Returns:	Next address after end of written function.
213 */
214void *kvm_mips_build_vcpu_run(void *addr)
215{
216	u32 *p = addr;
217	unsigned int i;
218
219	/*
220	 * A0: run
221	 * A1: vcpu
222	 */
223
224	/* k0/k1 not being used in host kernel context */
225	UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
226	for (i = 16; i < 32; ++i) {
227		if (i == 24)
228			i = 28;
229		UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
230	}
231
232	/* Save host status */
233	uasm_i_mfc0(&p, V0, C0_STATUS);
234	UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
235
236	/* Save scratch registers, will be used to store pointer to vcpu etc */
237	kvm_mips_build_save_scratch(&p, V1, K1);
238
239	/* VCPU scratch register has pointer to vcpu */
240	UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
241
242	/* Offset into vcpu->arch */
243	UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
244
245	/*
246	 * Save the host stack to VCPU, used for exception processing
247	 * when we exit from the Guest
248	 */
249	UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
250
251	/* Save the kernel gp as well */
252	UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
253
254	/*
255	 * Setup status register for running the guest in UM, interrupts
256	 * are disabled
257	 */
258	UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
259	uasm_i_mtc0(&p, K0, C0_STATUS);
260	uasm_i_ehb(&p);
261
262	/* load up the new EBASE */
263	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
264	build_set_exc_base(&p, K0);
265
266	/*
267	 * Now that the new EBASE has been loaded, unset BEV, set
268	 * interrupt mask as it was but make sure that timer interrupts
269	 * are enabled
270	 */
271	uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
272	uasm_i_andi(&p, V0, V0, ST0_IM);
273	uasm_i_or(&p, K0, K0, V0);
274	uasm_i_mtc0(&p, K0, C0_STATUS);
275	uasm_i_ehb(&p);
276
277	p = kvm_mips_build_enter_guest(p);
278
279	return p;
280}
281
282/**
283 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
284 * @addr:	Address to start writing code.
285 *
286 * Assemble the code to resume guest execution. This code is common between the
287 * initial entry into the guest from the host, and returning from the exit
288 * handler back to the guest.
289 *
290 * Returns:	Next address after end of written function.
291 */
292static void *kvm_mips_build_enter_guest(void *addr)
293{
294	u32 *p = addr;
295	unsigned int i;
296	struct uasm_label labels[2];
297	struct uasm_reloc relocs[2];
298	struct uasm_label __maybe_unused *l = labels;
299	struct uasm_reloc __maybe_unused *r = relocs;
300
301	memset(labels, 0, sizeof(labels));
302	memset(relocs, 0, sizeof(relocs));
303
304	/* Set Guest EPC */
305	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
306	UASM_i_MTC0(&p, T0, C0_EPC);
307
308#ifdef CONFIG_KVM_MIPS_VZ
309	/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
310	UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
311	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
 
 
 
312
313	/*
314	 * Set up KVM GPA pgd.
315	 * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
316	 * - call tlbmiss_handler_setup_pgd(mm->pgd)
317	 * - write mm->pgd into CP0_PWBase
318	 *
319	 * We keep S0 pointing at struct kvm so we can load the ASID below.
320	 */
321	UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
322			  (int)offsetof(struct kvm_vcpu, arch), K1);
323	UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
324	UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
325	uasm_i_jalr(&p, RA, T9);
326	/* delay slot */
327	if (cpu_has_htw)
328		UASM_i_MTC0(&p, A0, C0_PWBASE);
329	else
330		uasm_i_nop(&p);
331
332	/* Set GM bit to setup eret to VZ guest context */
333	uasm_i_addiu(&p, V1, ZERO, 1);
334	uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
335	uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
336	uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
337
338	if (cpu_has_guestid) {
339		/*
340		 * Set root mode GuestID, so that root TLB refill handler can
341		 * use the correct GuestID in the root TLB.
342		 */
343
344		/* Get current GuestID */
345		uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
346		/* Set GuestCtl1.RID = GuestCtl1.ID */
347		uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
348			   MIPS_GCTL1_ID_WIDTH);
349		uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
350			   MIPS_GCTL1_RID_WIDTH);
351		uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
352
353		/* GuestID handles dealiasing so we don't need to touch ASID */
354		goto skip_asid_restore;
355	}
356
357	/* Root ASID Dealias (RAD) */
358
359	/* Save host ASID */
360	UASM_i_MFC0(&p, K0, C0_ENTRYHI);
361	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
362		  K1);
363
364	/* Set the root ASID for the Guest */
365	UASM_i_ADDIU(&p, T1, S0,
366		     offsetof(struct kvm, arch.gpa_mm.context.asid));
367#else
368	/* Set the ASID for the Guest Kernel or User */
369	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
370	UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
371		  T0);
372	uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
373	uasm_i_xori(&p, T0, T0, KSU_USER);
374	uasm_il_bnez(&p, &r, T0, label_kernel_asid);
375	 UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
376					   guest_kernel_mm.context.asid));
377	/* else user */
378	UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
379					  guest_user_mm.context.asid));
380	uasm_l_kernel_asid(&l, p);
381#endif
382
383	/* t1: contains the base of the ASID array, need to get the cpu id  */
384	/* smp_processor_id */
385	uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
386	/* index the ASID array */
387	uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
388	UASM_i_ADDU(&p, T3, T1, T2);
389	UASM_i_LW(&p, K0, 0, T3);
390#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
391	/*
392	 * reuse ASID array offset
393	 * cpuinfo_mips is a multiple of sizeof(long)
394	 */
395	uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
396	uasm_i_mul(&p, T2, T2, T3);
397
398	UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
399	UASM_i_ADDU(&p, AT, AT, T2);
400	UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
401	uasm_i_and(&p, K0, K0, T2);
402#else
403	uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
404#endif
405
406#ifndef CONFIG_KVM_MIPS_VZ
407	/*
408	 * Set up KVM T&E GVA pgd.
409	 * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
410	 * - call tlbmiss_handler_setup_pgd(mm->pgd)
411	 * - but skips write into CP0_PWBase for now
412	 */
413	UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) -
414			  (int)offsetof(struct mm_struct, context.asid), T1);
415
416	UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
417	uasm_i_jalr(&p, RA, T9);
418	 uasm_i_mtc0(&p, K0, C0_ENTRYHI);
419#else
420	/* Set up KVM VZ root ASID (!guestid) */
421	uasm_i_mtc0(&p, K0, C0_ENTRYHI);
422skip_asid_restore:
423#endif
424	uasm_i_ehb(&p);
425
426	/* Disable RDHWR access */
427	uasm_i_mtc0(&p, ZERO, C0_HWRENA);
428
429	/* load the guest context from VCPU and return */
430	for (i = 1; i < 32; ++i) {
431		/* Guest k0/k1 loaded later */
432		if (i == K0 || i == K1)
433			continue;
434		UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
435	}
436
437#ifndef CONFIG_CPU_MIPSR6
438	/* Restore hi/lo */
439	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
440	uasm_i_mthi(&p, K0);
441
442	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
443	uasm_i_mtlo(&p, K0);
444#endif
445
446	/* Restore the guest's k0/k1 registers */
447	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
448	UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
449
450	/* Jump to guest */
451	uasm_i_eret(&p);
452
453	uasm_resolve_relocs(relocs, labels);
454
455	return p;
456}
457
458/**
459 * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler.
460 * @addr:	Address to start writing code.
461 * @handler:	Address of common handler (within range of @addr).
462 *
463 * Assemble TLB refill exception fast path handler for guest execution.
464 *
465 * Returns:	Next address after end of written function.
466 */
467void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
468{
469	u32 *p = addr;
470	struct uasm_label labels[2];
471	struct uasm_reloc relocs[2];
 
472	struct uasm_label *l = labels;
473	struct uasm_reloc *r = relocs;
 
474
475	memset(labels, 0, sizeof(labels));
476	memset(relocs, 0, sizeof(relocs));
477
478	/* Save guest k1 into scratch register */
479	UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
480
481	/* Get the VCPU pointer from the VCPU scratch register */
482	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
483
484	/* Save guest k0 into VCPU structure */
485	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
486
487	/*
488	 * Some of the common tlbex code uses current_cpu_type(). For KVM we
489	 * assume symmetry and just disable preemption to silence the warning.
490	 */
491	preempt_disable();
492
 
 
 
 
 
 
 
 
 
 
493	/*
494	 * Now for the actual refill bit. A lot of this can be common with the
495	 * Linux TLB refill handler, however we don't need to handle so many
496	 * cases. We only need to handle user mode refills, and user mode runs
497	 * with 32-bit addressing.
498	 *
499	 * Therefore the branch to label_vmalloc generated by build_get_pmde64()
500	 * that isn't resolved should never actually get taken and is harmless
501	 * to leave in place for now.
502	 */
503
504#ifdef CONFIG_64BIT
505	build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
506#else
507	build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
508#endif
509
510	/* we don't support huge pages yet */
511
512	build_get_ptep(&p, K0, K1);
513	build_update_entries(&p, K0, K1);
514	build_tlb_write_entry(&p, &l, &r, tlb_random);
 
515
516	preempt_enable();
517
518	/* Get the VCPU pointer from the VCPU scratch register again */
519	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
520
521	/* Restore the guest's k0/k1 registers */
522	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
523	uasm_i_ehb(&p);
524	UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
525
526	/* Jump to guest */
527	uasm_i_eret(&p);
528
529	return p;
530}
531
532/**
533 * kvm_mips_build_exception() - Assemble first level guest exception handler.
534 * @addr:	Address to start writing code.
535 * @handler:	Address of common handler (within range of @addr).
536 *
537 * Assemble exception vector code for guest execution. The generated vector will
538 * branch to the common exception handler generated by kvm_mips_build_exit().
539 *
540 * Returns:	Next address after end of written function.
541 */
542void *kvm_mips_build_exception(void *addr, void *handler)
543{
544	u32 *p = addr;
545	struct uasm_label labels[2];
546	struct uasm_reloc relocs[2];
547	struct uasm_label *l = labels;
548	struct uasm_reloc *r = relocs;
549
550	memset(labels, 0, sizeof(labels));
551	memset(relocs, 0, sizeof(relocs));
552
553	/* Save guest k1 into scratch register */
554	UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
555
556	/* Get the VCPU pointer from the VCPU scratch register */
557	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
558	UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
559
560	/* Save guest k0 into VCPU structure */
561	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
562
563	/* Branch to the common handler */
564	uasm_il_b(&p, &r, label_exit_common);
565	 uasm_i_nop(&p);
566
567	uasm_l_exit_common(&l, handler);
568	uasm_resolve_relocs(relocs, labels);
569
570	return p;
571}
572
573/**
574 * kvm_mips_build_exit() - Assemble common guest exit handler.
575 * @addr:	Address to start writing code.
576 *
577 * Assemble the generic guest exit handling code. This is called by the
578 * exception vectors (generated by kvm_mips_build_exception()), and calls
579 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
580 * depending on the return value.
581 *
582 * Returns:	Next address after end of written function.
583 */
584void *kvm_mips_build_exit(void *addr)
585{
586	u32 *p = addr;
587	unsigned int i;
588	struct uasm_label labels[3];
589	struct uasm_reloc relocs[3];
590	struct uasm_label *l = labels;
591	struct uasm_reloc *r = relocs;
592
593	memset(labels, 0, sizeof(labels));
594	memset(relocs, 0, sizeof(relocs));
595
596	/*
597	 * Generic Guest exception handler. We end up here when the guest
598	 * does something that causes a trap to kernel mode.
599	 *
600	 * Both k0/k1 registers will have already been saved (k0 into the vcpu
601	 * structure, and k1 into the scratch_tmp register).
602	 *
603	 * The k1 register will already contain the kvm_vcpu_arch pointer.
604	 */
605
606	/* Start saving Guest context to VCPU */
607	for (i = 0; i < 32; ++i) {
608		/* Guest k0/k1 saved later */
609		if (i == K0 || i == K1)
610			continue;
611		UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
612	}
613
614#ifndef CONFIG_CPU_MIPSR6
615	/* We need to save hi/lo and restore them on the way out */
616	uasm_i_mfhi(&p, T0);
617	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
618
619	uasm_i_mflo(&p, T0);
620	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
621#endif
622
623	/* Finally save guest k1 to VCPU */
624	uasm_i_ehb(&p);
625	UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
626	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
627
628	/* Now that context has been saved, we can use other registers */
629
630	/* Restore vcpu */
631	UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
632
633	/* Restore run (vcpu->run) */
634	UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1);
635
636	/*
637	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
638	 * the exception
639	 */
640	UASM_i_MFC0(&p, K0, C0_EPC);
641	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
642
643	UASM_i_MFC0(&p, K0, C0_BADVADDR);
644	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
645		  K1);
646
647	uasm_i_mfc0(&p, K0, C0_CAUSE);
648	uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
649
650	if (cpu_has_badinstr) {
651		uasm_i_mfc0(&p, K0, C0_BADINSTR);
652		uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
653					   host_cp0_badinstr), K1);
654	}
655
656	if (cpu_has_badinstrp) {
657		uasm_i_mfc0(&p, K0, C0_BADINSTRP);
658		uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
659					   host_cp0_badinstrp), K1);
660	}
661
662	/* Now restore the host state just enough to run the handlers */
663
664	/* Switch EBASE to the one used by Linux */
665	/* load up the host EBASE */
666	uasm_i_mfc0(&p, V0, C0_STATUS);
667
668	uasm_i_lui(&p, AT, ST0_BEV >> 16);
669	uasm_i_or(&p, K0, V0, AT);
670
671	uasm_i_mtc0(&p, K0, C0_STATUS);
672	uasm_i_ehb(&p);
673
674	UASM_i_LA_mostly(&p, K0, (long)&ebase);
675	UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
676	build_set_exc_base(&p, K0);
677
678	if (raw_cpu_has_fpu) {
679		/*
680		 * If FPU is enabled, save FCR31 and clear it so that later
681		 * ctc1's don't trigger FPE for pending exceptions.
682		 */
683		uasm_i_lui(&p, AT, ST0_CU1 >> 16);
684		uasm_i_and(&p, V1, V0, AT);
685		uasm_il_beqz(&p, &r, V1, label_fpu_1);
686		 uasm_i_nop(&p);
687		uasm_i_cfc1(&p, T0, 31);
688		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
689			  K1);
690		uasm_i_ctc1(&p, ZERO, 31);
691		uasm_l_fpu_1(&l, p);
692	}
693
694	if (cpu_has_msa) {
695		/*
696		 * If MSA is enabled, save MSACSR and clear it so that later
697		 * instructions don't trigger MSAFPE for pending exceptions.
698		 */
699		uasm_i_mfc0(&p, T0, C0_CONFIG5);
700		uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
701		uasm_il_beqz(&p, &r, T0, label_msa_1);
702		 uasm_i_nop(&p);
703		uasm_i_cfcmsa(&p, T0, MSA_CSR);
704		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
705			  K1);
706		uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
707		uasm_l_msa_1(&l, p);
708	}
709
710#ifdef CONFIG_KVM_MIPS_VZ
711	/* Restore host ASID */
712	if (!cpu_has_guestid) {
713		UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
714			  K1);
715		UASM_i_MTC0(&p, K0, C0_ENTRYHI);
716	}
717
718	/*
719	 * Set up normal Linux process pgd.
720	 * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
721	 * - call tlbmiss_handler_setup_pgd(mm->pgd)
722	 * - write mm->pgd into CP0_PWBase
723	 */
724	UASM_i_LW(&p, A0,
725		  offsetof(struct kvm_vcpu_arch, host_pgd), K1);
726	UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
727	uasm_i_jalr(&p, RA, T9);
728	/* delay slot */
729	if (cpu_has_htw)
730		UASM_i_MTC0(&p, A0, C0_PWBASE);
731	else
732		uasm_i_nop(&p);
733
734	/* Clear GM bit so we don't enter guest mode when EXL is cleared */
735	uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
736	uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
737	uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
738
739	/* Save GuestCtl0 so we can access GExcCode after CPU migration */
740	uasm_i_sw(&p, K0,
741		  offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
742
743	if (cpu_has_guestid) {
744		/*
745		 * Clear root mode GuestID, so that root TLB operations use the
746		 * root GuestID in the root TLB.
747		 */
748		uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
749		/* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
750		uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
751			   MIPS_GCTL1_RID_WIDTH);
752		uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
753	}
754#endif
755
756	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
757	uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
758	uasm_i_and(&p, V0, V0, AT);
759	uasm_i_lui(&p, AT, ST0_CU0 >> 16);
760	uasm_i_or(&p, V0, V0, AT);
761#ifdef CONFIG_64BIT
762	uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
763#endif
764	uasm_i_mtc0(&p, V0, C0_STATUS);
765	uasm_i_ehb(&p);
766
767	/* Load up host GP */
768	UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
769
770	/* Need a stack before we can jump to "C" */
771	UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
772
773	/* Saved host state */
774	UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
775
776	/*
777	 * XXXKYMA do we need to load the host ASID, maybe not because the
778	 * kernel entries are marked GLOBAL, need to verify
779	 */
780
781	/* Restore host scratch registers, as we'll have clobbered them */
782	kvm_mips_build_restore_scratch(&p, K0, SP);
783
784	/* Restore RDHWR access */
785	UASM_i_LA_mostly(&p, K0, (long)&hwrena);
786	uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
787	uasm_i_mtc0(&p, K0, C0_HWRENA);
788
789	/* Jump to handler */
790	/*
791	 * XXXKYMA: not sure if this is safe, how large is the stack??
792	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
793	 * with this in the kernel
794	 */
795	uasm_i_move(&p, A0, S0);
796	uasm_i_move(&p, A1, S1);
797	UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
798	uasm_i_jalr(&p, RA, T9);
799	 UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
800
801	uasm_resolve_relocs(relocs, labels);
802
803	p = kvm_mips_build_ret_from_exit(p);
804
805	return p;
806}
807
808/**
809 * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
810 * @addr:	Address to start writing code.
811 *
812 * Assemble the code to handle the return from kvm_mips_handle_exit(), either
813 * resuming the guest or returning to the host depending on the return value.
814 *
815 * Returns:	Next address after end of written function.
816 */
817static void *kvm_mips_build_ret_from_exit(void *addr)
818{
819	u32 *p = addr;
820	struct uasm_label labels[2];
821	struct uasm_reloc relocs[2];
822	struct uasm_label *l = labels;
823	struct uasm_reloc *r = relocs;
824
825	memset(labels, 0, sizeof(labels));
826	memset(relocs, 0, sizeof(relocs));
827
828	/* Return from handler Make sure interrupts are disabled */
829	uasm_i_di(&p, ZERO);
830	uasm_i_ehb(&p);
831
832	/*
833	 * XXXKYMA: k0/k1 could have been blown away if we processed
834	 * an exception while we were handling the exception from the
835	 * guest, reload k1
836	 */
837
838	uasm_i_move(&p, K1, S1);
839	UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
840
841	/*
842	 * Check return value, should tell us if we are returning to the
843	 * host (handle I/O etc)or resuming the guest
844	 */
845	uasm_i_andi(&p, T0, V0, RESUME_HOST);
846	uasm_il_bnez(&p, &r, T0, label_return_to_host);
847	 uasm_i_nop(&p);
848
849	p = kvm_mips_build_ret_to_guest(p);
850
851	uasm_l_return_to_host(&l, p);
852	p = kvm_mips_build_ret_to_host(p);
853
854	uasm_resolve_relocs(relocs, labels);
855
856	return p;
857}
858
859/**
860 * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
861 * @addr:	Address to start writing code.
862 *
863 * Assemble the code to handle return from the guest exit handler
864 * (kvm_mips_handle_exit()) back to the guest.
865 *
866 * Returns:	Next address after end of written function.
867 */
868static void *kvm_mips_build_ret_to_guest(void *addr)
869{
870	u32 *p = addr;
871
872	/* Put the saved pointer to vcpu (s1) back into the scratch register */
873	UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
874
875	/* Load up the Guest EBASE to minimize the window where BEV is set */
876	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
877
878	/* Switch EBASE back to the one used by KVM */
879	uasm_i_mfc0(&p, V1, C0_STATUS);
880	uasm_i_lui(&p, AT, ST0_BEV >> 16);
881	uasm_i_or(&p, K0, V1, AT);
882	uasm_i_mtc0(&p, K0, C0_STATUS);
883	uasm_i_ehb(&p);
884	build_set_exc_base(&p, T0);
885
886	/* Setup status register for running guest in UM */
887	uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
888	UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
889	uasm_i_and(&p, V1, V1, AT);
890	uasm_i_mtc0(&p, V1, C0_STATUS);
891	uasm_i_ehb(&p);
892
893	p = kvm_mips_build_enter_guest(p);
894
895	return p;
896}
897
898/**
899 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
900 * @addr:	Address to start writing code.
901 *
902 * Assemble the code to handle return from the guest exit handler
903 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
904 * function generated by kvm_mips_build_vcpu_run().
905 *
906 * Returns:	Next address after end of written function.
907 */
908static void *kvm_mips_build_ret_to_host(void *addr)
909{
910	u32 *p = addr;
911	unsigned int i;
912
913	/* EBASE is already pointing to Linux */
914	UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
915	UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
916
917	/*
918	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
919	 * to recover the err code
920	 */
921	uasm_i_sra(&p, K0, V0, 2);
922	uasm_i_move(&p, V0, K0);
923
924	/* Load context saved on the host stack */
925	for (i = 16; i < 31; ++i) {
926		if (i == 24)
927			i = 28;
928		UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
929	}
930
931	/* Restore RDHWR access */
932	UASM_i_LA_mostly(&p, K0, (long)&hwrena);
933	uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
934	uasm_i_mtc0(&p, K0, C0_HWRENA);
935
936	/* Restore RA, which is the address we will return to */
937	UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
938	uasm_i_jr(&p, RA);
939	 uasm_i_nop(&p);
940
941	return p;
942}
943
v6.9.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Generation of main entry point for the guest, exception handling.
  7 *
  8 * Copyright (C) 2012  MIPS Technologies, Inc.
  9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 10 *
 11 * Copyright (C) 2016 Imagination Technologies Ltd.
 12 */
 13
 14#include <linux/kvm_host.h>
 15#include <linux/log2.h>
 16#include <asm/mipsregs.h>
 17#include <asm/mmu_context.h>
 18#include <asm/msa.h>
 19#include <asm/regdef.h>
 20#include <asm/setup.h>
 21#include <asm/tlbex.h>
 22#include <asm/uasm.h>
 23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24#define CALLFRAME_SIZ   32
 25
 26static unsigned int scratch_vcpu[2] = { C0_DDATALO };
 
 
 
 
 
 
 27static unsigned int scratch_tmp[2] = { C0_ERROREPC };
 28
 29enum label_id {
 30	label_fpu_1 = 1,
 31	label_msa_1,
 32	label_return_to_host,
 33	label_kernel_asid,
 34	label_exit_common,
 35};
 36
 37UASM_L_LA(_fpu_1)
 38UASM_L_LA(_msa_1)
 39UASM_L_LA(_return_to_host)
 40UASM_L_LA(_kernel_asid)
 41UASM_L_LA(_exit_common)
 42
 43static void *kvm_mips_build_enter_guest(void *addr);
 44static void *kvm_mips_build_ret_from_exit(void *addr);
 45static void *kvm_mips_build_ret_to_guest(void *addr);
 46static void *kvm_mips_build_ret_to_host(void *addr);
 47
 48/*
 49 * The version of this function in tlbex.c uses current_cpu_type(), but for KVM
 50 * we assume symmetry.
 51 */
 52static int c0_kscratch(void)
 53{
 54	return 31;
 
 
 
 
 
 
 55}
 56
 57/**
 58 * kvm_mips_entry_setup() - Perform global setup for entry code.
 59 *
 60 * Perform global setup for entry code, such as choosing a scratch register.
 61 *
 62 * Returns:	0 on success.
 63 *		-errno on failure.
 64 */
 65int kvm_mips_entry_setup(void)
 66{
 67	/*
 68	 * We prefer to use KScratchN registers if they are available over the
 69	 * defaults above, which may not work on all cores.
 70	 */
 71	unsigned int kscratch_mask = cpu_data[0].kscratch_mask;
 72
 73	if (pgd_reg != -1)
 74		kscratch_mask &= ~BIT(pgd_reg);
 75
 76	/* Pick a scratch register for storing VCPU */
 77	if (kscratch_mask) {
 78		scratch_vcpu[0] = c0_kscratch();
 79		scratch_vcpu[1] = ffs(kscratch_mask) - 1;
 80		kscratch_mask &= ~BIT(scratch_vcpu[1]);
 81	}
 82
 83	/* Pick a scratch register to use as a temp for saving state */
 84	if (kscratch_mask) {
 85		scratch_tmp[0] = c0_kscratch();
 86		scratch_tmp[1] = ffs(kscratch_mask) - 1;
 87		kscratch_mask &= ~BIT(scratch_tmp[1]);
 88	}
 89
 90	return 0;
 91}
 92
 93static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
 94					unsigned int frame)
 95{
 96	/* Save the VCPU scratch register value in cp0_epc of the stack frame */
 97	UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
 98	UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
 99
100	/* Save the temp scratch register value in cp0_cause of stack frame */
101	if (scratch_tmp[0] == c0_kscratch()) {
102		UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
103		UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
104	}
105}
106
107static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
108					   unsigned int frame)
109{
110	/*
111	 * Restore host scratch register values saved by
112	 * kvm_mips_build_save_scratch().
113	 */
114	UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
115	UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
116
117	if (scratch_tmp[0] == c0_kscratch()) {
118		UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
119		UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
120	}
121}
122
123/**
124 * build_set_exc_base() - Assemble code to write exception base address.
125 * @p:		Code buffer pointer.
126 * @reg:	Source register (generated code may set WG bit in @reg).
127 *
128 * Assemble code to modify the exception base address in the EBase register,
129 * using the appropriately sized access and setting the WG bit if necessary.
130 */
131static inline void build_set_exc_base(u32 **p, unsigned int reg)
132{
133	if (cpu_has_ebase_wg) {
134		/* Set WG so that all the bits get written */
135		uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
136		UASM_i_MTC0(p, reg, C0_EBASE);
137	} else {
138		uasm_i_mtc0(p, reg, C0_EBASE);
139	}
140}
141
142/**
143 * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
144 * @addr:	Address to start writing code.
145 *
146 * Assemble the start of the vcpu_run function to run a guest VCPU. The function
147 * conforms to the following prototype:
148 *
149 * int vcpu_run(struct kvm_vcpu *vcpu);
150 *
151 * The exit from the guest and return to the caller is handled by the code
152 * generated by kvm_mips_build_ret_to_host().
153 *
154 * Returns:	Next address after end of written function.
155 */
156void *kvm_mips_build_vcpu_run(void *addr)
157{
158	u32 *p = addr;
159	unsigned int i;
160
161	/*
162	 * GPR_A0: vcpu
 
163	 */
164
165	/* k0/k1 not being used in host kernel context */
166	UASM_i_ADDIU(&p, GPR_K1, GPR_SP, -(int)sizeof(struct pt_regs));
167	for (i = 16; i < 32; ++i) {
168		if (i == 24)
169			i = 28;
170		UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1);
171	}
172
173	/* Save host status */
174	uasm_i_mfc0(&p, GPR_V0, C0_STATUS);
175	UASM_i_SW(&p, GPR_V0, offsetof(struct pt_regs, cp0_status), GPR_K1);
176
177	/* Save scratch registers, will be used to store pointer to vcpu etc */
178	kvm_mips_build_save_scratch(&p, GPR_V1, GPR_K1);
179
180	/* VCPU scratch register has pointer to vcpu */
181	UASM_i_MTC0(&p, GPR_A0, scratch_vcpu[0], scratch_vcpu[1]);
182
183	/* Offset into vcpu->arch */
184	UASM_i_ADDIU(&p, GPR_K1, GPR_A0, offsetof(struct kvm_vcpu, arch));
185
186	/*
187	 * Save the host stack to VCPU, used for exception processing
188	 * when we exit from the Guest
189	 */
190	UASM_i_SW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1);
191
192	/* Save the kernel gp as well */
193	UASM_i_SW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1);
194
195	/*
196	 * Setup status register for running the guest in UM, interrupts
197	 * are disabled
198	 */
199	UASM_i_LA(&p, GPR_K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
200	uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
201	uasm_i_ehb(&p);
202
203	/* load up the new EBASE */
204	UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1);
205	build_set_exc_base(&p, GPR_K0);
206
207	/*
208	 * Now that the new EBASE has been loaded, unset BEV, set
209	 * interrupt mask as it was but make sure that timer interrupts
210	 * are enabled
211	 */
212	uasm_i_addiu(&p, GPR_K0, GPR_ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
213	uasm_i_andi(&p, GPR_V0, GPR_V0, ST0_IM);
214	uasm_i_or(&p, GPR_K0, GPR_K0, GPR_V0);
215	uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
216	uasm_i_ehb(&p);
217
218	p = kvm_mips_build_enter_guest(p);
219
220	return p;
221}
222
223/**
224 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
225 * @addr:	Address to start writing code.
226 *
227 * Assemble the code to resume guest execution. This code is common between the
228 * initial entry into the guest from the host, and returning from the exit
229 * handler back to the guest.
230 *
231 * Returns:	Next address after end of written function.
232 */
233static void *kvm_mips_build_enter_guest(void *addr)
234{
235	u32 *p = addr;
236	unsigned int i;
237	struct uasm_label labels[2];
238	struct uasm_reloc relocs[2];
239	struct uasm_label __maybe_unused *l = labels;
240	struct uasm_reloc __maybe_unused *r = relocs;
241
242	memset(labels, 0, sizeof(labels));
243	memset(relocs, 0, sizeof(relocs));
244
245	/* Set Guest EPC */
246	UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1);
247	UASM_i_MTC0(&p, GPR_T0, C0_EPC);
248
 
249	/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
250	if (cpu_has_ldpte)
251		UASM_i_MFC0(&p, GPR_K0, C0_PWBASE);
252	else
253		UASM_i_MFC0(&p, GPR_K0, c0_kscratch(), pgd_reg);
254	UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1);
255
256	/*
257	 * Set up KVM GPA pgd.
258	 * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
259	 * - call tlbmiss_handler_setup_pgd(mm->pgd)
260	 * - write mm->pgd into CP0_PWBase
261	 *
262	 * We keep GPR_S0 pointing at struct kvm so we can load the ASID below.
263	 */
264	UASM_i_LW(&p, GPR_S0, (int)offsetof(struct kvm_vcpu, kvm) -
265			  (int)offsetof(struct kvm_vcpu, arch), GPR_K1);
266	UASM_i_LW(&p, GPR_A0, offsetof(struct kvm, arch.gpa_mm.pgd), GPR_S0);
267	UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd);
268	uasm_i_jalr(&p, GPR_RA, GPR_T9);
269	/* delay slot */
270	if (cpu_has_htw)
271		UASM_i_MTC0(&p, GPR_A0, C0_PWBASE);
272	else
273		uasm_i_nop(&p);
274
275	/* Set GM bit to setup eret to VZ guest context */
276	uasm_i_addiu(&p, GPR_V1, GPR_ZERO, 1);
277	uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0);
278	uasm_i_ins(&p, GPR_K0, GPR_V1, MIPS_GCTL0_GM_SHIFT, 1);
279	uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0);
280
281	if (cpu_has_guestid) {
282		/*
283		 * Set root mode GuestID, so that root TLB refill handler can
284		 * use the correct GuestID in the root TLB.
285		 */
286
287		/* Get current GuestID */
288		uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1);
289		/* Set GuestCtl1.RID = GuestCtl1.ID */
290		uasm_i_ext(&p, GPR_T1, GPR_T0, MIPS_GCTL1_ID_SHIFT,
291			   MIPS_GCTL1_ID_WIDTH);
292		uasm_i_ins(&p, GPR_T0, GPR_T1, MIPS_GCTL1_RID_SHIFT,
293			   MIPS_GCTL1_RID_WIDTH);
294		uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1);
295
296		/* GuestID handles dealiasing so we don't need to touch ASID */
297		goto skip_asid_restore;
298	}
299
300	/* Root ASID Dealias (RAD) */
301
302	/* Save host ASID */
303	UASM_i_MFC0(&p, GPR_K0, C0_ENTRYHI);
304	UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
305		  GPR_K1);
306
307	/* Set the root ASID for the Guest */
308	UASM_i_ADDIU(&p, GPR_T1, GPR_S0,
309		     offsetof(struct kvm, arch.gpa_mm.context.asid));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
311	/* t1: contains the base of the ASID array, need to get the cpu id  */
312	/* smp_processor_id */
313	uasm_i_lw(&p, GPR_T2, offsetof(struct thread_info, cpu), GPR_GP);
314	/* index the ASID array */
315	uasm_i_sll(&p, GPR_T2, GPR_T2, ilog2(sizeof(long)));
316	UASM_i_ADDU(&p, GPR_T3, GPR_T1, GPR_T2);
317	UASM_i_LW(&p, GPR_K0, 0, GPR_T3);
318#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
319	/*
320	 * reuse ASID array offset
321	 * cpuinfo_mips is a multiple of sizeof(long)
322	 */
323	uasm_i_addiu(&p, GPR_T3, GPR_ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
324	uasm_i_mul(&p, GPR_T2, GPR_T2, GPR_T3);
325
326	UASM_i_LA_mostly(&p, GPR_AT, (long)&cpu_data[0].asid_mask);
327	UASM_i_ADDU(&p, GPR_AT, GPR_AT, GPR_T2);
328	UASM_i_LW(&p, GPR_T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), GPR_AT);
329	uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T2);
330#else
331	uasm_i_andi(&p, GPR_K0, GPR_K0, MIPS_ENTRYHI_ASID);
332#endif
333
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334	/* Set up KVM VZ root ASID (!guestid) */
335	uasm_i_mtc0(&p, GPR_K0, C0_ENTRYHI);
336skip_asid_restore:
 
337	uasm_i_ehb(&p);
338
339	/* Disable RDHWR access */
340	uasm_i_mtc0(&p, GPR_ZERO, C0_HWRENA);
341
342	/* load the guest context from VCPU and return */
343	for (i = 1; i < 32; ++i) {
344		/* Guest k0/k1 loaded later */
345		if (i == GPR_K0 || i == GPR_K1)
346			continue;
347		UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1);
348	}
349
350#ifndef CONFIG_CPU_MIPSR6
351	/* Restore hi/lo */
352	UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1);
353	uasm_i_mthi(&p, GPR_K0);
354
355	UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1);
356	uasm_i_mtlo(&p, GPR_K0);
357#endif
358
359	/* Restore the guest's k0/k1 registers */
360	UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1);
361	UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1);
362
363	/* Jump to guest */
364	uasm_i_eret(&p);
365
366	uasm_resolve_relocs(relocs, labels);
367
368	return p;
369}
370
371/**
372 * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler.
373 * @addr:	Address to start writing code.
374 * @handler:	Address of common handler (within range of @addr).
375 *
376 * Assemble TLB refill exception fast path handler for guest execution.
377 *
378 * Returns:	Next address after end of written function.
379 */
380void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
381{
382	u32 *p = addr;
383	struct uasm_label labels[2];
384	struct uasm_reloc relocs[2];
385#ifndef CONFIG_CPU_LOONGSON64
386	struct uasm_label *l = labels;
387	struct uasm_reloc *r = relocs;
388#endif
389
390	memset(labels, 0, sizeof(labels));
391	memset(relocs, 0, sizeof(relocs));
392
393	/* Save guest k1 into scratch register */
394	UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]);
395
396	/* Get the VCPU pointer from the VCPU scratch register */
397	UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]);
398
399	/* Save guest k0 into VCPU structure */
400	UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1);
401
402	/*
403	 * Some of the common tlbex code uses current_cpu_type(). For KVM we
404	 * assume symmetry and just disable preemption to silence the warning.
405	 */
406	preempt_disable();
407
408#ifdef CONFIG_CPU_LOONGSON64
409	UASM_i_MFC0(&p, GPR_K1, C0_PGD);
410	uasm_i_lddir(&p, GPR_K0, GPR_K1, 3);  /* global page dir */
411#ifndef __PAGETABLE_PMD_FOLDED
412	uasm_i_lddir(&p, GPR_K1, GPR_K0, 1);  /* middle page dir */
413#endif
414	uasm_i_ldpte(&p, GPR_K1, 0);      /* even */
415	uasm_i_ldpte(&p, GPR_K1, 1);      /* odd */
416	uasm_i_tlbwr(&p);
417#else
418	/*
419	 * Now for the actual refill bit. A lot of this can be common with the
420	 * Linux TLB refill handler, however we don't need to handle so many
421	 * cases. We only need to handle user mode refills, and user mode runs
422	 * with 32-bit addressing.
423	 *
424	 * Therefore the branch to label_vmalloc generated by build_get_pmde64()
425	 * that isn't resolved should never actually get taken and is harmless
426	 * to leave in place for now.
427	 */
428
429#ifdef CONFIG_64BIT
430	build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */
431#else
432	build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */
433#endif
434
435	/* we don't support huge pages yet */
436
437	build_get_ptep(&p, GPR_K0, GPR_K1);
438	build_update_entries(&p, GPR_K0, GPR_K1);
439	build_tlb_write_entry(&p, &l, &r, tlb_random);
440#endif
441
442	preempt_enable();
443
444	/* Get the VCPU pointer from the VCPU scratch register again */
445	UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]);
446
447	/* Restore the guest's k0/k1 registers */
448	UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1);
449	uasm_i_ehb(&p);
450	UASM_i_MFC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]);
451
452	/* Jump to guest */
453	uasm_i_eret(&p);
454
455	return p;
456}
457
458/**
459 * kvm_mips_build_exception() - Assemble first level guest exception handler.
460 * @addr:	Address to start writing code.
461 * @handler:	Address of common handler (within range of @addr).
462 *
463 * Assemble exception vector code for guest execution. The generated vector will
464 * branch to the common exception handler generated by kvm_mips_build_exit().
465 *
466 * Returns:	Next address after end of written function.
467 */
468void *kvm_mips_build_exception(void *addr, void *handler)
469{
470	u32 *p = addr;
471	struct uasm_label labels[2];
472	struct uasm_reloc relocs[2];
473	struct uasm_label *l = labels;
474	struct uasm_reloc *r = relocs;
475
476	memset(labels, 0, sizeof(labels));
477	memset(relocs, 0, sizeof(relocs));
478
479	/* Save guest k1 into scratch register */
480	UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]);
481
482	/* Get the VCPU pointer from the VCPU scratch register */
483	UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]);
484	UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch));
485
486	/* Save guest k0 into VCPU structure */
487	UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1);
488
489	/* Branch to the common handler */
490	uasm_il_b(&p, &r, label_exit_common);
491	 uasm_i_nop(&p);
492
493	uasm_l_exit_common(&l, handler);
494	uasm_resolve_relocs(relocs, labels);
495
496	return p;
497}
498
499/**
500 * kvm_mips_build_exit() - Assemble common guest exit handler.
501 * @addr:	Address to start writing code.
502 *
503 * Assemble the generic guest exit handling code. This is called by the
504 * exception vectors (generated by kvm_mips_build_exception()), and calls
505 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
506 * depending on the return value.
507 *
508 * Returns:	Next address after end of written function.
509 */
510void *kvm_mips_build_exit(void *addr)
511{
512	u32 *p = addr;
513	unsigned int i;
514	struct uasm_label labels[3];
515	struct uasm_reloc relocs[3];
516	struct uasm_label *l = labels;
517	struct uasm_reloc *r = relocs;
518
519	memset(labels, 0, sizeof(labels));
520	memset(relocs, 0, sizeof(relocs));
521
522	/*
523	 * Generic Guest exception handler. We end up here when the guest
524	 * does something that causes a trap to kernel mode.
525	 *
526	 * Both k0/k1 registers will have already been saved (k0 into the vcpu
527	 * structure, and k1 into the scratch_tmp register).
528	 *
529	 * The k1 register will already contain the kvm_vcpu_arch pointer.
530	 */
531
532	/* Start saving Guest context to VCPU */
533	for (i = 0; i < 32; ++i) {
534		/* Guest k0/k1 saved later */
535		if (i == GPR_K0 || i == GPR_K1)
536			continue;
537		UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1);
538	}
539
540#ifndef CONFIG_CPU_MIPSR6
541	/* We need to save hi/lo and restore them on the way out */
542	uasm_i_mfhi(&p, GPR_T0);
543	UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1);
544
545	uasm_i_mflo(&p, GPR_T0);
546	UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1);
547#endif
548
549	/* Finally save guest k1 to VCPU */
550	uasm_i_ehb(&p);
551	UASM_i_MFC0(&p, GPR_T0, scratch_tmp[0], scratch_tmp[1]);
552	UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1);
553
554	/* Now that context has been saved, we can use other registers */
555
556	/* Restore vcpu */
557	UASM_i_MFC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]);
 
 
 
558
559	/*
560	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
561	 * the exception
562	 */
563	UASM_i_MFC0(&p, GPR_K0, C0_EPC);
564	UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1);
565
566	UASM_i_MFC0(&p, GPR_K0, C0_BADVADDR);
567	UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
568		  GPR_K1);
569
570	uasm_i_mfc0(&p, GPR_K0, C0_CAUSE);
571	uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), GPR_K1);
572
573	if (cpu_has_badinstr) {
574		uasm_i_mfc0(&p, GPR_K0, C0_BADINSTR);
575		uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch,
576					   host_cp0_badinstr), GPR_K1);
577	}
578
579	if (cpu_has_badinstrp) {
580		uasm_i_mfc0(&p, GPR_K0, C0_BADINSTRP);
581		uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch,
582					   host_cp0_badinstrp), GPR_K1);
583	}
584
585	/* Now restore the host state just enough to run the handlers */
586
587	/* Switch EBASE to the one used by Linux */
588	/* load up the host EBASE */
589	uasm_i_mfc0(&p, GPR_V0, C0_STATUS);
590
591	uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16);
592	uasm_i_or(&p, GPR_K0, GPR_V0, GPR_AT);
593
594	uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
595	uasm_i_ehb(&p);
596
597	UASM_i_LA_mostly(&p, GPR_K0, (long)&ebase);
598	UASM_i_LW(&p, GPR_K0, uasm_rel_lo((long)&ebase), GPR_K0);
599	build_set_exc_base(&p, GPR_K0);
600
601	if (raw_cpu_has_fpu) {
602		/*
603		 * If FPU is enabled, save FCR31 and clear it so that later
604		 * ctc1's don't trigger FPE for pending exceptions.
605		 */
606		uasm_i_lui(&p, GPR_AT, ST0_CU1 >> 16);
607		uasm_i_and(&p, GPR_V1, GPR_V0, GPR_AT);
608		uasm_il_beqz(&p, &r, GPR_V1, label_fpu_1);
609		 uasm_i_nop(&p);
610		uasm_i_cfc1(&p, GPR_T0, 31);
611		uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
612			  GPR_K1);
613		uasm_i_ctc1(&p, GPR_ZERO, 31);
614		uasm_l_fpu_1(&l, p);
615	}
616
617	if (cpu_has_msa) {
618		/*
619		 * If MSA is enabled, save MSACSR and clear it so that later
620		 * instructions don't trigger MSAFPE for pending exceptions.
621		 */
622		uasm_i_mfc0(&p, GPR_T0, C0_CONFIG5);
623		uasm_i_ext(&p, GPR_T0, GPR_T0, 27, 1); /* MIPS_CONF5_MSAEN */
624		uasm_il_beqz(&p, &r, GPR_T0, label_msa_1);
625		 uasm_i_nop(&p);
626		uasm_i_cfcmsa(&p, GPR_T0, MSA_CSR);
627		uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
628			  GPR_K1);
629		uasm_i_ctcmsa(&p, MSA_CSR, GPR_ZERO);
630		uasm_l_msa_1(&l, p);
631	}
632
 
633	/* Restore host ASID */
634	if (!cpu_has_guestid) {
635		UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
636			  GPR_K1);
637		UASM_i_MTC0(&p, GPR_K0, C0_ENTRYHI);
638	}
639
640	/*
641	 * Set up normal Linux process pgd.
642	 * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
643	 * - call tlbmiss_handler_setup_pgd(mm->pgd)
644	 * - write mm->pgd into CP0_PWBase
645	 */
646	UASM_i_LW(&p, GPR_A0,
647		  offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1);
648	UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd);
649	uasm_i_jalr(&p, GPR_RA, GPR_T9);
650	/* delay slot */
651	if (cpu_has_htw)
652		UASM_i_MTC0(&p, GPR_A0, C0_PWBASE);
653	else
654		uasm_i_nop(&p);
655
656	/* Clear GM bit so we don't enter guest mode when EXL is cleared */
657	uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0);
658	uasm_i_ins(&p, GPR_K0, GPR_ZERO, MIPS_GCTL0_GM_SHIFT, 1);
659	uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0);
660
661	/* Save GuestCtl0 so we can access GExcCode after CPU migration */
662	uasm_i_sw(&p, GPR_K0,
663		  offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), GPR_K1);
664
665	if (cpu_has_guestid) {
666		/*
667		 * Clear root mode GuestID, so that root TLB operations use the
668		 * root GuestID in the root TLB.
669		 */
670		uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1);
671		/* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
672		uasm_i_ins(&p, GPR_T0, GPR_ZERO, MIPS_GCTL1_RID_SHIFT,
673			   MIPS_GCTL1_RID_WIDTH);
674		uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1);
675	}
 
676
677	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
678	uasm_i_addiu(&p, GPR_AT, GPR_ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
679	uasm_i_and(&p, GPR_V0, GPR_V0, GPR_AT);
680	uasm_i_lui(&p, GPR_AT, ST0_CU0 >> 16);
681	uasm_i_or(&p, GPR_V0, GPR_V0, GPR_AT);
682#ifdef CONFIG_64BIT
683	uasm_i_ori(&p, GPR_V0, GPR_V0, ST0_SX | ST0_UX);
684#endif
685	uasm_i_mtc0(&p, GPR_V0, C0_STATUS);
686	uasm_i_ehb(&p);
687
688	/* Load up host GPR_GP */
689	UASM_i_LW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1);
690
691	/* Need a stack before we can jump to "C" */
692	UASM_i_LW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1);
693
694	/* Saved host state */
695	UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -(int)sizeof(struct pt_regs));
696
697	/*
698	 * XXXKYMA do we need to load the host ASID, maybe not because the
699	 * kernel entries are marked GLOBAL, need to verify
700	 */
701
702	/* Restore host scratch registers, as we'll have clobbered them */
703	kvm_mips_build_restore_scratch(&p, GPR_K0, GPR_SP);
704
705	/* Restore RDHWR access */
706	UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena);
707	uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0);
708	uasm_i_mtc0(&p, GPR_K0, C0_HWRENA);
709
710	/* Jump to handler */
711	/*
712	 * XXXKYMA: not sure if this is safe, how large is the stack??
713	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
714	 * with this in the kernel
715	 */
716	uasm_i_move(&p, GPR_A0, GPR_S0);
717	UASM_i_LA(&p, GPR_T9, (unsigned long)kvm_mips_handle_exit);
718	uasm_i_jalr(&p, GPR_RA, GPR_T9);
719	 UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -CALLFRAME_SIZ);
 
720
721	uasm_resolve_relocs(relocs, labels);
722
723	p = kvm_mips_build_ret_from_exit(p);
724
725	return p;
726}
727
728/**
729 * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
730 * @addr:	Address to start writing code.
731 *
732 * Assemble the code to handle the return from kvm_mips_handle_exit(), either
733 * resuming the guest or returning to the host depending on the return value.
734 *
735 * Returns:	Next address after end of written function.
736 */
737static void *kvm_mips_build_ret_from_exit(void *addr)
738{
739	u32 *p = addr;
740	struct uasm_label labels[2];
741	struct uasm_reloc relocs[2];
742	struct uasm_label *l = labels;
743	struct uasm_reloc *r = relocs;
744
745	memset(labels, 0, sizeof(labels));
746	memset(relocs, 0, sizeof(relocs));
747
748	/* Return from handler Make sure interrupts are disabled */
749	uasm_i_di(&p, GPR_ZERO);
750	uasm_i_ehb(&p);
751
752	/*
753	 * XXXKYMA: k0/k1 could have been blown away if we processed
754	 * an exception while we were handling the exception from the
755	 * guest, reload k1
756	 */
757
758	uasm_i_move(&p, GPR_K1, GPR_S0);
759	UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch));
760
761	/*
762	 * Check return value, should tell us if we are returning to the
763	 * host (handle I/O etc)or resuming the guest
764	 */
765	uasm_i_andi(&p, GPR_T0, GPR_V0, RESUME_HOST);
766	uasm_il_bnez(&p, &r, GPR_T0, label_return_to_host);
767	 uasm_i_nop(&p);
768
769	p = kvm_mips_build_ret_to_guest(p);
770
771	uasm_l_return_to_host(&l, p);
772	p = kvm_mips_build_ret_to_host(p);
773
774	uasm_resolve_relocs(relocs, labels);
775
776	return p;
777}
778
779/**
780 * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
781 * @addr:	Address to start writing code.
782 *
783 * Assemble the code to handle return from the guest exit handler
784 * (kvm_mips_handle_exit()) back to the guest.
785 *
786 * Returns:	Next address after end of written function.
787 */
788static void *kvm_mips_build_ret_to_guest(void *addr)
789{
790	u32 *p = addr;
791
792	/* Put the saved pointer to vcpu (s0) back into the scratch register */
793	UASM_i_MTC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]);
794
795	/* Load up the Guest EBASE to minimize the window where BEV is set */
796	UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1);
797
798	/* Switch EBASE back to the one used by KVM */
799	uasm_i_mfc0(&p, GPR_V1, C0_STATUS);
800	uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16);
801	uasm_i_or(&p, GPR_K0, GPR_V1, GPR_AT);
802	uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
803	uasm_i_ehb(&p);
804	build_set_exc_base(&p, GPR_T0);
805
806	/* Setup status register for running guest in UM */
807	uasm_i_ori(&p, GPR_V1, GPR_V1, ST0_EXL | KSU_USER | ST0_IE);
808	UASM_i_LA(&p, GPR_AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
809	uasm_i_and(&p, GPR_V1, GPR_V1, GPR_AT);
810	uasm_i_mtc0(&p, GPR_V1, C0_STATUS);
811	uasm_i_ehb(&p);
812
813	p = kvm_mips_build_enter_guest(p);
814
815	return p;
816}
817
818/**
819 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
820 * @addr:	Address to start writing code.
821 *
822 * Assemble the code to handle return from the guest exit handler
823 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
824 * function generated by kvm_mips_build_vcpu_run().
825 *
826 * Returns:	Next address after end of written function.
827 */
828static void *kvm_mips_build_ret_to_host(void *addr)
829{
830	u32 *p = addr;
831	unsigned int i;
832
833	/* EBASE is already pointing to Linux */
834	UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1);
835	UASM_i_ADDIU(&p, GPR_K1, GPR_K1, -(int)sizeof(struct pt_regs));
836
837	/*
838	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
839	 * to recover the err code
840	 */
841	uasm_i_sra(&p, GPR_K0, GPR_V0, 2);
842	uasm_i_move(&p, GPR_V0, GPR_K0);
843
844	/* Load context saved on the host stack */
845	for (i = 16; i < 31; ++i) {
846		if (i == 24)
847			i = 28;
848		UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1);
849	}
850
851	/* Restore RDHWR access */
852	UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena);
853	uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0);
854	uasm_i_mtc0(&p, GPR_K0, C0_HWRENA);
855
856	/* Restore GPR_RA, which is the address we will return to */
857	UASM_i_LW(&p, GPR_RA, offsetof(struct pt_regs, regs[GPR_RA]), GPR_K1);
858	uasm_i_jr(&p, GPR_RA);
859	 uasm_i_nop(&p);
860
861	return p;
862}
863