Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
  7 * TLB handlers run from KSEG0
  8 *
  9 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
 10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 11 */
 12
 13#include <linux/sched.h>
 14#include <linux/smp.h>
 15#include <linux/mm.h>
 16#include <linux/delay.h>
 17#include <linux/export.h>
 18#include <linux/kvm_host.h>
 19#include <linux/srcu.h>
 20
 21#include <asm/cpu.h>
 22#include <asm/bootinfo.h>
 23#include <asm/mmu_context.h>
 24#include <asm/pgtable.h>
 25#include <asm/cacheflush.h>
 26#include <asm/tlb.h>
 27#include <asm/tlbdebug.h>
 28
 29#undef CONFIG_MIPS_MT
 30#include <asm/r4kcache.h>
 31#define CONFIG_MIPS_MT
 32
 33#define KVM_GUEST_PC_TLB    0
 34#define KVM_GUEST_SP_TLB    1
 35
 36#ifdef CONFIG_KVM_MIPS_VZ
 37unsigned long GUESTID_MASK;
 38EXPORT_SYMBOL_GPL(GUESTID_MASK);
 39unsigned long GUESTID_FIRST_VERSION;
 40EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
 41unsigned long GUESTID_VERSION_MASK;
 42EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
 43
 44static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
 45{
 46	struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
 47
 48	if (cpu_has_guestid)
 49		return 0;
 50	else
 51		return cpu_asid(smp_processor_id(), gpa_mm);
 52}
 53#endif
 54
 55static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
 56{
 57	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
 58	int cpu = smp_processor_id();
 
 59
 60	return cpu_asid(cpu, kern_mm);
 
 
 61}
 62
 63static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
 64{
 65	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
 66	int cpu = smp_processor_id();
 67
 68	return cpu_asid(cpu, user_mm);
 
 
 69}
 70
 71/* Structure defining an tlb entry data set. */
 72
 73void kvm_mips_dump_host_tlbs(void)
 74{
 
 
 
 75	unsigned long flags;
 
 76
 77	local_irq_save(flags);
 78
 
 
 
 79	kvm_info("HOST TLBs:\n");
 80	dump_tlb_regs();
 81	pr_info("\n");
 82	dump_tlb_all();
 83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84	local_irq_restore(flags);
 85}
 86EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
 87
 88void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
 89{
 90	struct mips_coproc *cop0 = vcpu->arch.cop0;
 91	struct kvm_mips_tlb tlb;
 92	int i;
 93
 94	kvm_info("Guest TLBs:\n");
 95	kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
 96
 97	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
 98		tlb = vcpu->arch.guest_tlb[i];
 99		kvm_info("TLB%c%3d Hi 0x%08lx ",
100			 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
101							? ' ' : '*',
102			 i, tlb.tlb_hi);
103		kvm_info("Lo0=0x%09llx %c%c attr %lx ",
104			 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
105			 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
106			 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
107			 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
108		kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
109			 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
110			 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
111			 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
112			 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
113			 tlb.tlb_mask);
114	}
115}
116EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
117
118int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
119{
120	int i;
121	int index = -1;
122	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
123
124	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
125		if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
126		    TLB_HI_ASID_HIT(tlb[i], entryhi)) {
127			index = i;
128			break;
129		}
 
 
 
 
130	}
131
132	kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
133		  __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
135	return index;
 
 
 
 
 
 
 
 
 
 
 
136}
137EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
138
139static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
 
 
 
 
140{
 
 
141	int idx;
142
 
 
 
143	write_c0_entryhi(entryhi);
144	mtc0_tlbw_hazard();
145
146	tlb_probe();
147	tlb_probe_hazard();
148	idx = read_c0_index();
149
150	if (idx >= current_cpu_data.tlbsize)
151		BUG();
 
 
 
 
152
153	if (idx >= 0) {
154		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
155		write_c0_entrylo0(0);
156		write_c0_entrylo1(0);
157		mtc0_tlbw_hazard();
158
 
 
 
159		tlb_write_indexed();
160		tlbw_use_hazard();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161	}
162
163	return idx;
 
 
 
 
 
164}
165
166int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
167			  bool user, bool kernel)
168{
169	/*
170	 * Initialize idx_user and idx_kernel to workaround bogus
171	 * maybe-initialized warning when using GCC 6.
172	 */
173	int idx_user = 0, idx_kernel = 0;
174	unsigned long flags, old_entryhi;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
176	local_irq_save(flags);
177
178	old_entryhi = read_c0_entryhi();
 
 
 
 
 
 
 
 
 
 
 
 
179
180	if (user)
181		idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
182						  kvm_mips_get_user_asid(vcpu));
183	if (kernel)
184		idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
185						kvm_mips_get_kernel_asid(vcpu));
186
 
187	write_c0_entryhi(old_entryhi);
188	mtc0_tlbw_hazard();
189
190	local_irq_restore(flags);
191
192	/*
193	 * We don't want to get reserved instruction exceptions for missing tlb
194	 * entries.
195	 */
196	if (cpu_has_vtag_icache)
197		flush_icache_all();
198
199	if (user && idx_user >= 0)
200		kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
201			  __func__, (va & VPN2_MASK) |
202				    kvm_mips_get_user_asid(vcpu), idx_user);
203	if (kernel && idx_kernel >= 0)
204		kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
205			  __func__, (va & VPN2_MASK) |
206				    kvm_mips_get_kernel_asid(vcpu), idx_kernel);
207
208	return 0;
209}
210EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
211
212#ifdef CONFIG_KVM_MIPS_VZ
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
214/* GuestID management */
 
215
216/**
217 * clear_root_gid() - Set GuestCtl1.RID for normal root operation.
218 */
219static inline void clear_root_gid(void)
220{
221	if (cpu_has_guestid) {
222		clear_c0_guestctl1(MIPS_GCTL1_RID);
223		mtc0_tlbw_hazard();
224	}
 
 
 
 
 
 
 
 
225}
 
226
227/**
228 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
229 *
230 * Sets the root GuestID to match the current guest GuestID, for TLB operation
231 * on the GPA->RPA mappings in the root TLB.
232 *
233 * The caller must be sure to disable HTW while the root GID is set, and
234 * possibly longer if TLB registers are modified.
235 */
236static inline void set_root_gid_to_guest_gid(void)
237{
238	unsigned int guestctl1;
 
 
239
240	if (cpu_has_guestid) {
241		back_to_back_c0_hazard();
242		guestctl1 = read_c0_guestctl1();
243		guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
244			((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
245						     << MIPS_GCTL1_RID_SHIFT;
246		write_c0_guestctl1(guestctl1);
247		mtc0_tlbw_hazard();
248	}
 
 
 
 
 
249}
 
250
251int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
252{
 
253	int idx;
254	unsigned long flags, old_entryhi;
255
256	local_irq_save(flags);
257	htw_stop();
258
259	/* Set root GuestID for root probe and write of guest TLB entry */
260	set_root_gid_to_guest_gid();
261
262	old_entryhi = read_c0_entryhi();
263
264	idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
265				     kvm_mips_get_root_asid(vcpu));
 
 
 
 
 
 
 
 
 
 
 
266
 
267	write_c0_entryhi(old_entryhi);
268	clear_root_gid();
269	mtc0_tlbw_hazard();
 
270
271	htw_start();
272	local_irq_restore(flags);
273
274	/*
275	 * We don't want to get reserved instruction exceptions for missing tlb
276	 * entries.
277	 */
278	if (cpu_has_vtag_icache)
279		flush_icache_all();
280
281	if (idx > 0)
282		kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
283			  __func__, (va & VPN2_MASK) |
284				    kvm_mips_get_root_asid(vcpu), idx);
285
286	return 0;
287}
288EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
289
290/**
291 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
292 * @vcpu:	KVM VCPU pointer.
293 * @gpa:	Guest virtual address in a TLB mapped guest segment.
294 * @gpa:	Ponter to output guest physical address it maps to.
295 *
296 * Converts a guest virtual address in a guest TLB mapped segment to a guest
297 * physical address, by probing the guest TLB.
298 *
299 * Returns:	0 if guest TLB mapping exists for @gva. *@gpa will have been
300 *		written.
301 *		-EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
302 *		have been written.
303 */
304int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
305			    unsigned long *gpa)
306{
307	unsigned long o_entryhi, o_entrylo[2], o_pagemask;
308	unsigned int o_index;
309	unsigned long entrylo[2], pagemask, pagemaskbit, pa;
310	unsigned long flags;
311	int index;
312
313	/* Probe the guest TLB for a mapping */
314	local_irq_save(flags);
315	/* Set root GuestID for root probe of guest TLB entry */
316	htw_stop();
317	set_root_gid_to_guest_gid();
318
319	o_entryhi = read_gc0_entryhi();
320	o_index = read_gc0_index();
321
322	write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
323	mtc0_tlbw_hazard();
324	guest_tlb_probe();
 
325	tlb_probe_hazard();
 
326
327	index = read_gc0_index();
328	if (index < 0) {
329		/* No match, fail */
330		write_gc0_entryhi(o_entryhi);
331		write_gc0_index(o_index);
332
333		clear_root_gid();
334		htw_start();
335		local_irq_restore(flags);
336		return -EFAULT;
 
 
 
 
 
 
 
 
337	}
338
339	/* Match! read the TLB entry */
340	o_entrylo[0] = read_gc0_entrylo0();
341	o_entrylo[1] = read_gc0_entrylo1();
342	o_pagemask = read_gc0_pagemask();
343
344	mtc0_tlbr_hazard();
345	guest_tlb_read();
346	tlb_read_hazard();
347
348	entrylo[0] = read_gc0_entrylo0();
349	entrylo[1] = read_gc0_entrylo1();
350	pagemask = ~read_gc0_pagemask() & ~0x1fffl;
351
352	write_gc0_entryhi(o_entryhi);
353	write_gc0_index(o_index);
354	write_gc0_entrylo0(o_entrylo[0]);
355	write_gc0_entrylo1(o_entrylo[1]);
356	write_gc0_pagemask(o_pagemask);
357
358	clear_root_gid();
359	htw_start();
360	local_irq_restore(flags);
361
362	/* Select one of the EntryLo values and interpret the GPA */
363	pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
364	pa = entrylo[!!(gva & pagemaskbit)];
365
366	/*
367	 * TLB entry may have become invalid since TLB probe if physical FTLB
368	 * entries are shared between threads (e.g. I6400).
369	 */
370	if (!(pa & ENTRYLO_V))
371		return -EFAULT;
372
373	/*
374	 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is
375	 * split with XI/RI in the middle.
376	 */
377	pa = (pa << 6) & ~0xfffl;
378	pa |= gva & ~(pagemask | pagemaskbit);
379
380	*gpa = pa;
381	return 0;
382}
383EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
384
385/**
386 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
387 * guests.
388 *
389 * Invalidate all entries in root tlb which are GPA mappings.
390 */
391void kvm_vz_local_flush_roottlb_all_guests(void)
392{
393	unsigned long flags;
394	unsigned long old_entryhi, old_pagemask, old_guestctl1;
395	int entry;
396
397	if (WARN_ON(!cpu_has_guestid))
398		return;
399
400	local_irq_save(flags);
401	htw_stop();
402
403	/* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
404	old_entryhi = read_c0_entryhi();
405	old_pagemask = read_c0_pagemask();
406	old_guestctl1 = read_c0_guestctl1();
407
408	/*
409	 * Invalidate guest entries in root TLB while leaving root entries
410	 * intact when possible.
411	 */
412	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
413		write_c0_index(entry);
414		mtc0_tlbw_hazard();
415		tlb_read();
416		tlb_read_hazard();
417
418		/* Don't invalidate non-guest (RVA) mappings in the root TLB */
419		if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
420			continue;
 
 
 
 
 
 
 
421
422		/* Make sure all entries differ. */
423		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 
424		write_c0_entrylo0(0);
 
425		write_c0_entrylo1(0);
426		write_c0_guestctl1(0);
427		mtc0_tlbw_hazard();
 
428		tlb_write_indexed();
 
429	}
430
 
 
431	write_c0_entryhi(old_entryhi);
432	write_c0_pagemask(old_pagemask);
433	write_c0_guestctl1(old_guestctl1);
434	tlbw_use_hazard();
435
436	htw_start();
437	local_irq_restore(flags);
438}
439EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
440
441/**
442 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
443 *
444 * Invalidate all entries in guest tlb irrespective of guestid.
445 */
446void kvm_vz_local_flush_guesttlb_all(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447{
448	unsigned long flags;
449	unsigned long old_index;
450	unsigned long old_entryhi;
451	unsigned long old_entrylo[2];
452	unsigned long old_pagemask;
453	int entry;
454	u64 cvmmemctl2 = 0;
455
456	local_irq_save(flags);
 
 
 
 
457
458	/* Preserve all clobbered guest registers */
459	old_index = read_gc0_index();
460	old_entryhi = read_gc0_entryhi();
461	old_entrylo[0] = read_gc0_entrylo0();
462	old_entrylo[1] = read_gc0_entrylo1();
463	old_pagemask = read_gc0_pagemask();
464
465	switch (current_cpu_type()) {
466	case CPU_CAVIUM_OCTEON3:
467		/* Inhibit machine check due to multiple matching TLB entries */
468		cvmmemctl2 = read_c0_cvmmemctl2();
469		cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
470		write_c0_cvmmemctl2(cvmmemctl2);
471		break;
472	};
473
474	/* Invalidate guest entries in guest TLB */
475	write_gc0_entrylo0(0);
476	write_gc0_entrylo1(0);
477	write_gc0_pagemask(0);
478	for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
479		/* Make sure all entries differ. */
480		write_gc0_index(entry);
481		write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
482		mtc0_tlbw_hazard();
483		guest_tlb_write_indexed();
 
484	}
485
486	if (cvmmemctl2) {
487		cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
488		write_c0_cvmmemctl2(cvmmemctl2);
489	};
490
491	write_gc0_index(old_index);
492	write_gc0_entryhi(old_entryhi);
493	write_gc0_entrylo0(old_entrylo[0]);
494	write_gc0_entrylo1(old_entrylo[1]);
495	write_gc0_pagemask(old_pagemask);
496	tlbw_use_hazard();
 
 
497
498	local_irq_restore(flags);
499}
500EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
501
502/**
503 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
504 * @buf:	Buffer to write TLB entries into.
505 * @index:	Start index.
506 * @count:	Number of entries to save.
 
507 *
508 * Save a range of guest TLB entries. The caller must ensure interrupts are
509 * disabled.
 
510 */
511void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
512			  unsigned int count)
513{
514	unsigned int end = index + count;
515	unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
516	unsigned int guestctl1 = 0;
517	int old_index, i;
518
519	/* Save registers we're about to clobber */
520	old_index = read_gc0_index();
521	old_entryhi = read_gc0_entryhi();
522	old_entrylo0 = read_gc0_entrylo0();
523	old_entrylo1 = read_gc0_entrylo1();
524	old_pagemask = read_gc0_pagemask();
525
526	/* Set root GuestID for root probe */
527	htw_stop();
528	set_root_gid_to_guest_gid();
529	if (cpu_has_guestid)
530		guestctl1 = read_c0_guestctl1();
531
532	/* Read each entry from guest TLB */
533	for (i = index; i < end; ++i, ++buf) {
534		write_gc0_index(i);
535
536		mtc0_tlbr_hazard();
537		guest_tlb_read();
538		tlb_read_hazard();
539
540		if (cpu_has_guestid &&
541		    (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
542			/* Entry invalid or belongs to another guest */
543			buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
544			buf->tlb_lo[0] = 0;
545			buf->tlb_lo[1] = 0;
546			buf->tlb_mask = 0;
547		} else {
548			/* Entry belongs to the right guest */
549			buf->tlb_hi = read_gc0_entryhi();
550			buf->tlb_lo[0] = read_gc0_entrylo0();
551			buf->tlb_lo[1] = read_gc0_entrylo1();
552			buf->tlb_mask = read_gc0_pagemask();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553		}
554	}
555
556	/* Clear root GuestID again */
557	clear_root_gid();
558	htw_start();
559
560	/* Restore clobbered registers */
561	write_gc0_index(old_index);
562	write_gc0_entryhi(old_entryhi);
563	write_gc0_entrylo0(old_entrylo0);
564	write_gc0_entrylo1(old_entrylo1);
565	write_gc0_pagemask(old_pagemask);
566
567	tlbw_use_hazard();
568}
569EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
570
571/**
572 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
573 * @buf:	Buffer to read TLB entries from.
574 * @index:	Start index.
575 * @count:	Number of entries to load.
576 *
577 * Load a range of guest TLB entries. The caller must ensure interrupts are
578 * disabled.
579 */
580void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
581			  unsigned int count)
582{
583	unsigned int end = index + count;
584	unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
585	int old_index, i;
586
587	/* Save registers we're about to clobber */
588	old_index = read_gc0_index();
589	old_entryhi = read_gc0_entryhi();
590	old_entrylo0 = read_gc0_entrylo0();
591	old_entrylo1 = read_gc0_entrylo1();
592	old_pagemask = read_gc0_pagemask();
593
594	/* Set root GuestID for root probe */
595	htw_stop();
596	set_root_gid_to_guest_gid();
597
598	/* Write each entry to guest TLB */
599	for (i = index; i < end; ++i, ++buf) {
600		write_gc0_index(i);
601		write_gc0_entryhi(buf->tlb_hi);
602		write_gc0_entrylo0(buf->tlb_lo[0]);
603		write_gc0_entrylo1(buf->tlb_lo[1]);
604		write_gc0_pagemask(buf->tlb_mask);
605
606		mtc0_tlbw_hazard();
607		guest_tlb_write_indexed();
608	}
609
610	/* Clear root GuestID again */
611	clear_root_gid();
612	htw_start();
613
614	/* Restore clobbered registers */
615	write_gc0_index(old_index);
616	write_gc0_entryhi(old_entryhi);
617	write_gc0_entrylo0(old_entrylo0);
618	write_gc0_entrylo1(old_entrylo1);
619	write_gc0_pagemask(old_pagemask);
620
621	tlbw_use_hazard();
622}
623EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
624
625#endif
626
627/**
628 * kvm_mips_suspend_mm() - Suspend the active mm.
629 * @cpu		The CPU we're running on.
630 *
631 * Suspend the active_mm, ready for a switch to a KVM guest virtual address
632 * space. This is left active for the duration of guest context, including time
633 * with interrupts enabled, so we need to be careful not to confuse e.g. cache
634 * management IPIs.
635 *
636 * kvm_mips_resume_mm() should be called before context switching to a different
637 * process so we don't need to worry about reference counting.
638 *
639 * This needs to be in static kernel code to avoid exporting init_mm.
640 */
641void kvm_mips_suspend_mm(int cpu)
642{
643	cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
644	current->active_mm = &init_mm;
645}
646EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
647
648/**
649 * kvm_mips_resume_mm() - Resume the current process mm.
650 * @cpu		The CPU we're running on.
651 *
652 * Resume the mm of the current process, after a switch back from a KVM guest
653 * virtual address space (see kvm_mips_suspend_mm()).
654 */
655void kvm_mips_resume_mm(int cpu)
656{
657	cpumask_set_cpu(cpu, mm_cpumask(current->mm));
658	current->active_mm = current->mm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
659}
660EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
  7 * TLB handlers run from KSEG0
  8 *
  9 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
 10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 11 */
 12
 13#include <linux/sched.h>
 14#include <linux/smp.h>
 15#include <linux/mm.h>
 16#include <linux/delay.h>
 17#include <linux/module.h>
 18#include <linux/kvm_host.h>
 19#include <linux/srcu.h>
 20
 21#include <asm/cpu.h>
 22#include <asm/bootinfo.h>
 23#include <asm/mmu_context.h>
 24#include <asm/pgtable.h>
 25#include <asm/cacheflush.h>
 26#include <asm/tlb.h>
 
 27
 28#undef CONFIG_MIPS_MT
 29#include <asm/r4kcache.h>
 30#define CONFIG_MIPS_MT
 31
 32#define KVM_GUEST_PC_TLB    0
 33#define KVM_GUEST_SP_TLB    1
 34
 35#define PRIx64 "llx"
 
 
 
 
 
 
 36
 37atomic_t kvm_mips_instance;
 38EXPORT_SYMBOL_GPL(kvm_mips_instance);
 
 39
 40/* These function pointers are initialized once the KVM module is loaded */
 41kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
 42EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
 
 
 
 43
 44void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
 45EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
 46
 47bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
 48EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
 49
 50uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
 51{
 52	return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
 53}
 54
 55uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
 56{
 57	return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
 58}
 59
 60inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
 61{
 62	return vcpu->kvm->arch.commpage_tlb;
 63}
 64
 65/* Structure defining an tlb entry data set. */
 66
 67void kvm_mips_dump_host_tlbs(void)
 68{
 69	unsigned long old_entryhi;
 70	unsigned long old_pagemask;
 71	struct kvm_mips_tlb tlb;
 72	unsigned long flags;
 73	int i;
 74
 75	local_irq_save(flags);
 76
 77	old_entryhi = read_c0_entryhi();
 78	old_pagemask = read_c0_pagemask();
 79
 80	kvm_info("HOST TLBs:\n");
 81	kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
 
 
 82
 83	for (i = 0; i < current_cpu_data.tlbsize; i++) {
 84		write_c0_index(i);
 85		mtc0_tlbw_hazard();
 86
 87		tlb_read();
 88		tlbw_use_hazard();
 89
 90		tlb.tlb_hi = read_c0_entryhi();
 91		tlb.tlb_lo0 = read_c0_entrylo0();
 92		tlb.tlb_lo1 = read_c0_entrylo1();
 93		tlb.tlb_mask = read_c0_pagemask();
 94
 95		kvm_info("TLB%c%3d Hi 0x%08lx ",
 96			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
 97			 i, tlb.tlb_hi);
 98		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
 99			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
100			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
101			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
102			 (tlb.tlb_lo0 >> 3) & 7);
103		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
104			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
105			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
106			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
107			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
108	}
109	write_c0_entryhi(old_entryhi);
110	write_c0_pagemask(old_pagemask);
111	mtc0_tlbw_hazard();
112	local_irq_restore(flags);
113}
114EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
115
116void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
117{
118	struct mips_coproc *cop0 = vcpu->arch.cop0;
119	struct kvm_mips_tlb tlb;
120	int i;
121
122	kvm_info("Guest TLBs:\n");
123	kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
124
125	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
126		tlb = vcpu->arch.guest_tlb[i];
127		kvm_info("TLB%c%3d Hi 0x%08lx ",
128			 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
 
129			 i, tlb.tlb_hi);
130		kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
131			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
132			 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
133			 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
134			 (tlb.tlb_lo0 >> 3) & 7);
135		kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
136			 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
137			 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
138			 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
139			 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
 
140	}
141}
142EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
143
144static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
145{
146	int srcu_idx, err = 0;
147	kvm_pfn_t pfn;
 
148
149	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
150		return 0;
151
152	srcu_idx = srcu_read_lock(&kvm->srcu);
153	pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
154
155	if (kvm_mips_is_error_pfn(pfn)) {
156		kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
157		err = -EFAULT;
158		goto out;
159	}
160
161	kvm->arch.guest_pmap[gfn] = pfn;
162out:
163	srcu_read_unlock(&kvm->srcu, srcu_idx);
164	return err;
165}
166
167/* Translate guest KSEG0 addresses to Host PA */
168unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
169						    unsigned long gva)
170{
171	gfn_t gfn;
172	uint32_t offset = gva & ~PAGE_MASK;
173	struct kvm *kvm = vcpu->kvm;
174
175	if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
176		kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
177			__builtin_return_address(0), gva);
178		return KVM_INVALID_PAGE;
179	}
180
181	gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
182
183	if (gfn >= kvm->arch.guest_pmap_npages) {
184		kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
185			gva);
186		return KVM_INVALID_PAGE;
187	}
188
189	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
190		return KVM_INVALID_ADDR;
191
192	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
193}
194EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
195
196/* XXXKYMA: Must be called with interrupts disabled */
197/* set flush_dcache_mask == 0 if no dcache flush required */
198int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
199			    unsigned long entrylo0, unsigned long entrylo1,
200			    int flush_dcache_mask)
201{
202	unsigned long flags;
203	unsigned long old_entryhi;
204	int idx;
205
206	local_irq_save(flags);
207
208	old_entryhi = read_c0_entryhi();
209	write_c0_entryhi(entryhi);
210	mtc0_tlbw_hazard();
211
212	tlb_probe();
213	tlb_probe_hazard();
214	idx = read_c0_index();
215
216	if (idx > current_cpu_data.tlbsize) {
217		kvm_err("%s: Invalid Index: %d\n", __func__, idx);
218		kvm_mips_dump_host_tlbs();
219		local_irq_restore(flags);
220		return -1;
221	}
222
223	write_c0_entrylo0(entrylo0);
224	write_c0_entrylo1(entrylo1);
225	mtc0_tlbw_hazard();
 
 
226
227	if (idx < 0)
228		tlb_write_random();
229	else
230		tlb_write_indexed();
231	tlbw_use_hazard();
232
233	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
234		  vcpu->arch.pc, idx, read_c0_entryhi(),
235		  read_c0_entrylo0(), read_c0_entrylo1());
236
237	/* Flush D-cache */
238	if (flush_dcache_mask) {
239		if (entrylo0 & MIPS3_PG_V) {
240			++vcpu->stat.flush_dcache_exits;
241			flush_data_cache_page((entryhi & VPN2_MASK) &
242					      ~flush_dcache_mask);
243		}
244		if (entrylo1 & MIPS3_PG_V) {
245			++vcpu->stat.flush_dcache_exits;
246			flush_data_cache_page(((entryhi & VPN2_MASK) &
247					       ~flush_dcache_mask) |
248					      (0x1 << PAGE_SHIFT));
249		}
250	}
251
252	/* Restore old ASID */
253	write_c0_entryhi(old_entryhi);
254	mtc0_tlbw_hazard();
255	tlbw_use_hazard();
256	local_irq_restore(flags);
257	return 0;
258}
259
260/* XXXKYMA: Must be called with interrupts disabled */
261int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
262				    struct kvm_vcpu *vcpu)
263{
264	gfn_t gfn;
265	kvm_pfn_t pfn0, pfn1;
266	unsigned long vaddr = 0;
267	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
268	int even;
269	struct kvm *kvm = vcpu->kvm;
270	const int flush_dcache_mask = 0;
271
272	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
273		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
274		kvm_mips_dump_host_tlbs();
275		return -1;
276	}
277
278	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
279	if (gfn >= kvm->arch.guest_pmap_npages) {
280		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
281			gfn, badvaddr);
282		kvm_mips_dump_host_tlbs();
283		return -1;
284	}
285	even = !(gfn & 0x1);
286	vaddr = badvaddr & (PAGE_MASK << 1);
287
288	if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
289		return -1;
290
291	if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
292		return -1;
293
294	if (even) {
295		pfn0 = kvm->arch.guest_pmap[gfn];
296		pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
297	} else {
298		pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
299		pfn1 = kvm->arch.guest_pmap[gfn];
300	}
301
302	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
303	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
304		   (1 << 2) | (0x1 << 1);
305	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
306		   (1 << 2) | (0x1 << 1);
307
308	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
309				       flush_dcache_mask);
310}
311EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
312
313int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
314	struct kvm_vcpu *vcpu)
315{
316	kvm_pfn_t pfn0, pfn1;
317	unsigned long flags, old_entryhi = 0, vaddr = 0;
318	unsigned long entrylo0 = 0, entrylo1 = 0;
319
320	pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
321	pfn1 = 0;
322	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
323		   (1 << 2) | (0x1 << 1);
324	entrylo1 = 0;
325
326	local_irq_save(flags);
327
328	old_entryhi = read_c0_entryhi();
329	vaddr = badvaddr & (PAGE_MASK << 1);
330	write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
331	mtc0_tlbw_hazard();
332	write_c0_entrylo0(entrylo0);
333	mtc0_tlbw_hazard();
334	write_c0_entrylo1(entrylo1);
335	mtc0_tlbw_hazard();
336	write_c0_index(kvm_mips_get_commpage_asid(vcpu));
337	mtc0_tlbw_hazard();
338	tlb_write_indexed();
339	mtc0_tlbw_hazard();
340	tlbw_use_hazard();
341
342	kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
343		  vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
344		  read_c0_entrylo0(), read_c0_entrylo1());
 
 
 
345
346	/* Restore old ASID */
347	write_c0_entryhi(old_entryhi);
348	mtc0_tlbw_hazard();
349	tlbw_use_hazard();
350	local_irq_restore(flags);
351
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352	return 0;
353}
354EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
355
356int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
357					 struct kvm_mips_tlb *tlb,
358					 unsigned long *hpa0,
359					 unsigned long *hpa1)
360{
361	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
362	struct kvm *kvm = vcpu->kvm;
363	kvm_pfn_t pfn0, pfn1;
364
365	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
366		pfn0 = 0;
367		pfn1 = 0;
368	} else {
369		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
370					   >> PAGE_SHIFT) < 0)
371			return -1;
372
373		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
374					   >> PAGE_SHIFT) < 0)
375			return -1;
376
377		pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
378					    >> PAGE_SHIFT];
379		pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
380					    >> PAGE_SHIFT];
381	}
382
383	if (hpa0)
384		*hpa0 = pfn0 << PAGE_SHIFT;
385
386	if (hpa1)
387		*hpa1 = pfn1 << PAGE_SHIFT;
388
389	/* Get attributes from the Guest TLB */
390	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
391					       kvm_mips_get_kernel_asid(vcpu) :
392					       kvm_mips_get_user_asid(vcpu));
393	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
394		   (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
395	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
396		   (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
397
398	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
399		  tlb->tlb_lo0, tlb->tlb_lo1);
400
401	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
402				       tlb->tlb_mask);
403}
404EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
405
406int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
 
 
 
 
 
 
 
 
 
407{
408	int i;
409	int index = -1;
410	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
411
412	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
413		if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
414		    TLB_HI_ASID_HIT(tlb[i], entryhi)) {
415			index = i;
416			break;
417		}
 
 
418	}
419
420	kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
421		  __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
422
423	return index;
424}
425EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
426
427int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
428{
429	unsigned long old_entryhi, flags;
430	int idx;
 
431
432	local_irq_save(flags);
 
 
 
 
433
434	old_entryhi = read_c0_entryhi();
435
436	if (KVM_GUEST_KERNEL_MODE(vcpu))
437		write_c0_entryhi((vaddr & VPN2_MASK) |
438				 kvm_mips_get_kernel_asid(vcpu));
439	else {
440		write_c0_entryhi((vaddr & VPN2_MASK) |
441				 kvm_mips_get_user_asid(vcpu));
442	}
443
444	mtc0_tlbw_hazard();
445
446	tlb_probe();
447	tlb_probe_hazard();
448	idx = read_c0_index();
449
450	/* Restore old ASID */
451	write_c0_entryhi(old_entryhi);
 
452	mtc0_tlbw_hazard();
453	tlbw_use_hazard();
454
 
455	local_irq_restore(flags);
456
457	kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
 
 
 
 
 
 
 
 
 
 
458
459	return idx;
460}
461EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
462
463int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
464{
465	int idx;
466	unsigned long flags, old_entryhi;
 
 
 
467
 
468	local_irq_save(flags);
 
 
 
469
470	old_entryhi = read_c0_entryhi();
 
471
472	write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
473	mtc0_tlbw_hazard();
474
475	tlb_probe();
476	tlb_probe_hazard();
477	idx = read_c0_index();
478
479	if (idx >= current_cpu_data.tlbsize)
480		BUG();
 
 
 
481
482	if (idx > 0) {
483		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
484		mtc0_tlbw_hazard();
485
486		write_c0_entrylo0(0);
487		mtc0_tlbw_hazard();
488
489		write_c0_entrylo1(0);
490		mtc0_tlbw_hazard();
491
492		tlb_write_indexed();
493		mtc0_tlbw_hazard();
494	}
495
496	write_c0_entryhi(old_entryhi);
497	mtc0_tlbw_hazard();
498	tlbw_use_hazard();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
499
 
 
500	local_irq_restore(flags);
501
502	if (idx > 0)
503		kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
504			  (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505
 
506	return 0;
507}
508EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
509
510void kvm_mips_flush_host_tlb(int skip_kseg0)
 
 
 
 
 
 
511{
512	unsigned long flags;
513	unsigned long old_entryhi, entryhi;
514	unsigned long old_pagemask;
515	int entry = 0;
516	int maxentry = current_cpu_data.tlbsize;
 
517
518	local_irq_save(flags);
 
519
 
520	old_entryhi = read_c0_entryhi();
521	old_pagemask = read_c0_pagemask();
 
522
523	/* Blast 'em all away. */
524	for (entry = 0; entry < maxentry; entry++) {
 
 
 
525		write_c0_index(entry);
526		mtc0_tlbw_hazard();
 
 
527
528		if (skip_kseg0) {
529			tlb_read();
530			tlbw_use_hazard();
531
532			entryhi = read_c0_entryhi();
533
534			/* Don't blow away guest kernel entries */
535			if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
536				continue;
537		}
538
539		/* Make sure all entries differ. */
540		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
541		mtc0_tlbw_hazard();
542		write_c0_entrylo0(0);
543		mtc0_tlbw_hazard();
544		write_c0_entrylo1(0);
 
545		mtc0_tlbw_hazard();
546
547		tlb_write_indexed();
548		mtc0_tlbw_hazard();
549	}
550
551	tlbw_use_hazard();
552
553	write_c0_entryhi(old_entryhi);
554	write_c0_pagemask(old_pagemask);
555	mtc0_tlbw_hazard();
556	tlbw_use_hazard();
557
 
558	local_irq_restore(flags);
559}
560EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
561
562void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
563			     struct kvm_vcpu *vcpu)
564{
565	unsigned long asid = asid_cache(cpu);
566
567	asid += ASID_INC;
568	if (!(asid & ASID_MASK)) {
569		if (cpu_has_vtag_icache)
570			flush_icache_all();
571
572		kvm_local_flush_tlb_all();      /* start new asid cycle */
573
574		if (!asid)      /* fix version if needed */
575			asid = ASID_FIRST_VERSION;
576	}
577
578	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
579}
580
581void kvm_local_flush_tlb_all(void)
582{
583	unsigned long flags;
584	unsigned long old_ctx;
585	int entry = 0;
 
 
 
 
586
587	local_irq_save(flags);
588	/* Save old context and create impossible VPN2 value */
589	old_ctx = read_c0_entryhi();
590	write_c0_entrylo0(0);
591	write_c0_entrylo1(0);
592
593	/* Blast 'em all away. */
594	while (entry < current_cpu_data.tlbsize) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
595		/* Make sure all entries differ. */
596		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
597		write_c0_index(entry);
598		mtc0_tlbw_hazard();
599		tlb_write_indexed();
600		entry++;
601	}
 
 
 
 
 
 
 
 
 
 
 
602	tlbw_use_hazard();
603	write_c0_entryhi(old_ctx);
604	mtc0_tlbw_hazard();
605
606	local_irq_restore(flags);
607}
608EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
609
610/**
611 * kvm_mips_migrate_count() - Migrate timer.
612 * @vcpu:	Virtual CPU.
613 *
614 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
615 * if it was running prior to being cancelled.
616 *
617 * Must be called when the VCPU is migrated to a different CPU to ensure that
618 * timer expiry during guest execution interrupts the guest and causes the
619 * interrupt to be delivered in a timely manner.
620 */
621static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
 
622{
623	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
624		hrtimer_restart(&vcpu->arch.comparecount_timer);
625}
626
627/* Restore ASID once we are scheduled back after preemption */
628void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
629{
630	unsigned long flags;
631	int newasid = 0;
632
633	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
634
635	/* Allocate new kernel and user ASIDs if needed */
636
637	local_irq_save(flags);
638
639	if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
640							ASID_VERSION_MASK) {
641		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
642		vcpu->arch.guest_kernel_asid[cpu] =
643		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
644		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
645		vcpu->arch.guest_user_asid[cpu] =
646		    vcpu->arch.guest_user_mm.context.asid[cpu];
647		newasid++;
648
649		kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
650			  cpu_context(cpu, current->mm));
651		kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
652			  cpu, vcpu->arch.guest_kernel_asid[cpu]);
653		kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
654			  vcpu->arch.guest_user_asid[cpu]);
655	}
656
657	if (vcpu->arch.last_sched_cpu != cpu) {
658		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
659			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
660		/*
661		 * Migrate the timer interrupt to the current CPU so that it
662		 * always interrupts the guest and synchronously triggers a
663		 * guest timer interrupt.
664		 */
665		kvm_mips_migrate_count(vcpu);
666	}
667
668	if (!newasid) {
669		/*
670		 * If we preempted while the guest was executing, then reload
671		 * the pre-empted ASID
672		 */
673		if (current->flags & PF_VCPU) {
674			write_c0_entryhi(vcpu->arch.
675					 preempt_entryhi & ASID_MASK);
676			ehb();
677		}
678	} else {
679		/* New ASIDs were allocated for the VM */
680
681		/*
682		 * Were we in guest context? If so then the pre-empted ASID is
683		 * no longer valid, we need to set it to what it should be based
684		 * on the mode of the Guest (Kernel/User)
685		 */
686		if (current->flags & PF_VCPU) {
687			if (KVM_GUEST_KERNEL_MODE(vcpu))
688				write_c0_entryhi(vcpu->arch.
689						 guest_kernel_asid[cpu] &
690						 ASID_MASK);
691			else
692				write_c0_entryhi(vcpu->arch.
693						 guest_user_asid[cpu] &
694						 ASID_MASK);
695			ehb();
696		}
697	}
698
699	/* restore guest state to registers */
700	kvm_mips_callbacks->vcpu_set_regs(vcpu);
701
702	local_irq_restore(flags);
 
 
 
 
 
 
703
 
704}
705EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
706
707/* ASID can change if another task is scheduled during preemption */
708void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 
 
 
 
 
 
 
 
 
709{
710	unsigned long flags;
711	uint32_t cpu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
712
713	local_irq_save(flags);
 
 
714
715	cpu = smp_processor_id();
716
717	vcpu->arch.preempt_entryhi = read_c0_entryhi();
718	vcpu->arch.last_sched_cpu = cpu;
719
720	/* save guest state in registers */
721	kvm_mips_callbacks->vcpu_get_regs(vcpu);
722
723	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
724	     ASID_VERSION_MASK)) {
725		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
726			  cpu_context(cpu, current->mm));
727		drop_mmu_context(current->mm, cpu);
728	}
729	write_c0_entryhi(cpu_asid(cpu, current->mm));
730	ehb();
731
732	local_irq_restore(flags);
 
 
733}
734EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
735
736uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
 
 
 
 
 
 
 
737{
738	struct mips_coproc *cop0 = vcpu->arch.cop0;
739	unsigned long paddr, flags, vpn2, asid;
740	uint32_t inst;
741	int index;
742
743	if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
744	    KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
745		local_irq_save(flags);
746		index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
747		if (index >= 0) {
748			inst = *(opc);
749		} else {
750			vpn2 = (unsigned long) opc & VPN2_MASK;
751			asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
752			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
753			if (index < 0) {
754				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
755					__func__, opc, vcpu, read_c0_entryhi());
756				kvm_mips_dump_host_tlbs();
757				local_irq_restore(flags);
758				return KVM_INVALID_INST;
759			}
760			kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
761							     &vcpu->arch.
762							     guest_tlb[index],
763							     NULL, NULL);
764			inst = *(opc);
765		}
766		local_irq_restore(flags);
767	} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
768		paddr =
769		    kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
770							  (unsigned long) opc);
771		inst = *(uint32_t *) CKSEG0ADDR(paddr);
772	} else {
773		kvm_err("%s: illegal address: %p\n", __func__, opc);
774		return KVM_INVALID_INST;
775	}
776
777	return inst;
778}
779EXPORT_SYMBOL_GPL(kvm_get_inst);