Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
8 *
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11 */
12
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/mm.h>
16#include <linux/delay.h>
17#include <linux/export.h>
18#include <linux/kvm_host.h>
19#include <linux/srcu.h>
20
21#include <asm/cpu.h>
22#include <asm/bootinfo.h>
23#include <asm/mipsregs.h>
24#include <asm/mmu_context.h>
25#include <asm/cacheflush.h>
26#include <asm/tlb.h>
27#include <asm/tlbdebug.h>
28
29#undef CONFIG_MIPS_MT
30#include <asm/r4kcache.h>
31#define CONFIG_MIPS_MT
32
33unsigned long GUESTID_MASK;
34EXPORT_SYMBOL_GPL(GUESTID_MASK);
35unsigned long GUESTID_FIRST_VERSION;
36EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
37unsigned long GUESTID_VERSION_MASK;
38EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
39
40static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
41{
42 struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
43
44 if (cpu_has_guestid)
45 return 0;
46 else
47 return cpu_asid(smp_processor_id(), gpa_mm);
48}
49
50static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
51{
52 int idx;
53
54 write_c0_entryhi(entryhi);
55 mtc0_tlbw_hazard();
56
57 tlb_probe();
58 tlb_probe_hazard();
59 idx = read_c0_index();
60
61 BUG_ON(idx >= current_cpu_data.tlbsize);
62
63 if (idx >= 0) {
64 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
65 write_c0_entrylo0(0);
66 write_c0_entrylo1(0);
67 mtc0_tlbw_hazard();
68
69 tlb_write_indexed();
70 tlbw_use_hazard();
71 }
72
73 return idx;
74}
75
76/* GuestID management */
77
78/**
79 * clear_root_gid() - Set GuestCtl1.RID for normal root operation.
80 */
81static inline void clear_root_gid(void)
82{
83 if (cpu_has_guestid) {
84 clear_c0_guestctl1(MIPS_GCTL1_RID);
85 mtc0_tlbw_hazard();
86 }
87}
88
89/**
90 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
91 *
92 * Sets the root GuestID to match the current guest GuestID, for TLB operation
93 * on the GPA->RPA mappings in the root TLB.
94 *
95 * The caller must be sure to disable HTW while the root GID is set, and
96 * possibly longer if TLB registers are modified.
97 */
98static inline void set_root_gid_to_guest_gid(void)
99{
100 unsigned int guestctl1;
101
102 if (cpu_has_guestid) {
103 back_to_back_c0_hazard();
104 guestctl1 = read_c0_guestctl1();
105 guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
106 ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
107 << MIPS_GCTL1_RID_SHIFT;
108 write_c0_guestctl1(guestctl1);
109 mtc0_tlbw_hazard();
110 }
111}
112
113int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
114{
115 int idx;
116 unsigned long flags, old_entryhi;
117
118 local_irq_save(flags);
119 htw_stop();
120
121 /* Set root GuestID for root probe and write of guest TLB entry */
122 set_root_gid_to_guest_gid();
123
124 old_entryhi = read_c0_entryhi();
125
126 idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
127 kvm_mips_get_root_asid(vcpu));
128
129 write_c0_entryhi(old_entryhi);
130 clear_root_gid();
131 mtc0_tlbw_hazard();
132
133 htw_start();
134 local_irq_restore(flags);
135
136 /*
137 * We don't want to get reserved instruction exceptions for missing tlb
138 * entries.
139 */
140 if (cpu_has_vtag_icache)
141 flush_icache_all();
142
143 if (idx > 0)
144 kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
145 __func__, (va & VPN2_MASK) |
146 kvm_mips_get_root_asid(vcpu), idx);
147
148 return 0;
149}
150EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
151
152/**
153 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
154 * @vcpu: KVM VCPU pointer.
155 * @gpa: Guest virtual address in a TLB mapped guest segment.
156 * @gpa: Pointer to output guest physical address it maps to.
157 *
158 * Converts a guest virtual address in a guest TLB mapped segment to a guest
159 * physical address, by probing the guest TLB.
160 *
161 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
162 * written.
163 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
164 * have been written.
165 */
166int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
167 unsigned long *gpa)
168{
169 unsigned long o_entryhi, o_entrylo[2], o_pagemask;
170 unsigned int o_index;
171 unsigned long entrylo[2], pagemask, pagemaskbit, pa;
172 unsigned long flags;
173 int index;
174
175 /* Probe the guest TLB for a mapping */
176 local_irq_save(flags);
177 /* Set root GuestID for root probe of guest TLB entry */
178 htw_stop();
179 set_root_gid_to_guest_gid();
180
181 o_entryhi = read_gc0_entryhi();
182 o_index = read_gc0_index();
183
184 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
185 mtc0_tlbw_hazard();
186 guest_tlb_probe();
187 tlb_probe_hazard();
188
189 index = read_gc0_index();
190 if (index < 0) {
191 /* No match, fail */
192 write_gc0_entryhi(o_entryhi);
193 write_gc0_index(o_index);
194
195 clear_root_gid();
196 htw_start();
197 local_irq_restore(flags);
198 return -EFAULT;
199 }
200
201 /* Match! read the TLB entry */
202 o_entrylo[0] = read_gc0_entrylo0();
203 o_entrylo[1] = read_gc0_entrylo1();
204 o_pagemask = read_gc0_pagemask();
205
206 mtc0_tlbr_hazard();
207 guest_tlb_read();
208 tlb_read_hazard();
209
210 entrylo[0] = read_gc0_entrylo0();
211 entrylo[1] = read_gc0_entrylo1();
212 pagemask = ~read_gc0_pagemask() & ~0x1fffl;
213
214 write_gc0_entryhi(o_entryhi);
215 write_gc0_index(o_index);
216 write_gc0_entrylo0(o_entrylo[0]);
217 write_gc0_entrylo1(o_entrylo[1]);
218 write_gc0_pagemask(o_pagemask);
219
220 clear_root_gid();
221 htw_start();
222 local_irq_restore(flags);
223
224 /* Select one of the EntryLo values and interpret the GPA */
225 pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
226 pa = entrylo[!!(gva & pagemaskbit)];
227
228 /*
229 * TLB entry may have become invalid since TLB probe if physical FTLB
230 * entries are shared between threads (e.g. I6400).
231 */
232 if (!(pa & ENTRYLO_V))
233 return -EFAULT;
234
235 /*
236 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is
237 * split with XI/RI in the middle.
238 */
239 pa = (pa << 6) & ~0xfffl;
240 pa |= gva & ~(pagemask | pagemaskbit);
241
242 *gpa = pa;
243 return 0;
244}
245EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
246
247/**
248 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
249 * guests.
250 *
251 * Invalidate all entries in root tlb which are GPA mappings.
252 */
253void kvm_vz_local_flush_roottlb_all_guests(void)
254{
255 unsigned long flags;
256 unsigned long old_entryhi, old_pagemask, old_guestctl1;
257 int entry;
258
259 if (WARN_ON(!cpu_has_guestid))
260 return;
261
262 local_irq_save(flags);
263 htw_stop();
264
265 /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
266 old_entryhi = read_c0_entryhi();
267 old_pagemask = read_c0_pagemask();
268 old_guestctl1 = read_c0_guestctl1();
269
270 /*
271 * Invalidate guest entries in root TLB while leaving root entries
272 * intact when possible.
273 */
274 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
275 write_c0_index(entry);
276 mtc0_tlbw_hazard();
277 tlb_read();
278 tlb_read_hazard();
279
280 /* Don't invalidate non-guest (RVA) mappings in the root TLB */
281 if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
282 continue;
283
284 /* Make sure all entries differ. */
285 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
286 write_c0_entrylo0(0);
287 write_c0_entrylo1(0);
288 write_c0_guestctl1(0);
289 mtc0_tlbw_hazard();
290 tlb_write_indexed();
291 }
292
293 write_c0_entryhi(old_entryhi);
294 write_c0_pagemask(old_pagemask);
295 write_c0_guestctl1(old_guestctl1);
296 tlbw_use_hazard();
297
298 htw_start();
299 local_irq_restore(flags);
300}
301EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
302
303/**
304 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
305 *
306 * Invalidate all entries in guest tlb irrespective of guestid.
307 */
308void kvm_vz_local_flush_guesttlb_all(void)
309{
310 unsigned long flags;
311 unsigned long old_index;
312 unsigned long old_entryhi;
313 unsigned long old_entrylo[2];
314 unsigned long old_pagemask;
315 int entry;
316 u64 cvmmemctl2 = 0;
317
318 local_irq_save(flags);
319
320 /* Preserve all clobbered guest registers */
321 old_index = read_gc0_index();
322 old_entryhi = read_gc0_entryhi();
323 old_entrylo[0] = read_gc0_entrylo0();
324 old_entrylo[1] = read_gc0_entrylo1();
325 old_pagemask = read_gc0_pagemask();
326
327 switch (current_cpu_type()) {
328 case CPU_CAVIUM_OCTEON3:
329 /* Inhibit machine check due to multiple matching TLB entries */
330 cvmmemctl2 = read_c0_cvmmemctl2();
331 cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
332 write_c0_cvmmemctl2(cvmmemctl2);
333 break;
334 }
335
336 /* Invalidate guest entries in guest TLB */
337 write_gc0_entrylo0(0);
338 write_gc0_entrylo1(0);
339 write_gc0_pagemask(0);
340 for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
341 /* Make sure all entries differ. */
342 write_gc0_index(entry);
343 write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
344 mtc0_tlbw_hazard();
345 guest_tlb_write_indexed();
346 }
347
348 if (cvmmemctl2) {
349 cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
350 write_c0_cvmmemctl2(cvmmemctl2);
351 }
352
353 write_gc0_index(old_index);
354 write_gc0_entryhi(old_entryhi);
355 write_gc0_entrylo0(old_entrylo[0]);
356 write_gc0_entrylo1(old_entrylo[1]);
357 write_gc0_pagemask(old_pagemask);
358 tlbw_use_hazard();
359
360 local_irq_restore(flags);
361}
362EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
363
364/**
365 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
366 * @buf: Buffer to write TLB entries into.
367 * @index: Start index.
368 * @count: Number of entries to save.
369 *
370 * Save a range of guest TLB entries. The caller must ensure interrupts are
371 * disabled.
372 */
373void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
374 unsigned int count)
375{
376 unsigned int end = index + count;
377 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
378 unsigned int guestctl1 = 0;
379 int old_index, i;
380
381 /* Save registers we're about to clobber */
382 old_index = read_gc0_index();
383 old_entryhi = read_gc0_entryhi();
384 old_entrylo0 = read_gc0_entrylo0();
385 old_entrylo1 = read_gc0_entrylo1();
386 old_pagemask = read_gc0_pagemask();
387
388 /* Set root GuestID for root probe */
389 htw_stop();
390 set_root_gid_to_guest_gid();
391 if (cpu_has_guestid)
392 guestctl1 = read_c0_guestctl1();
393
394 /* Read each entry from guest TLB */
395 for (i = index; i < end; ++i, ++buf) {
396 write_gc0_index(i);
397
398 mtc0_tlbr_hazard();
399 guest_tlb_read();
400 tlb_read_hazard();
401
402 if (cpu_has_guestid &&
403 (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
404 /* Entry invalid or belongs to another guest */
405 buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
406 buf->tlb_lo[0] = 0;
407 buf->tlb_lo[1] = 0;
408 buf->tlb_mask = 0;
409 } else {
410 /* Entry belongs to the right guest */
411 buf->tlb_hi = read_gc0_entryhi();
412 buf->tlb_lo[0] = read_gc0_entrylo0();
413 buf->tlb_lo[1] = read_gc0_entrylo1();
414 buf->tlb_mask = read_gc0_pagemask();
415 }
416 }
417
418 /* Clear root GuestID again */
419 clear_root_gid();
420 htw_start();
421
422 /* Restore clobbered registers */
423 write_gc0_index(old_index);
424 write_gc0_entryhi(old_entryhi);
425 write_gc0_entrylo0(old_entrylo0);
426 write_gc0_entrylo1(old_entrylo1);
427 write_gc0_pagemask(old_pagemask);
428
429 tlbw_use_hazard();
430}
431EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
432
433/**
434 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
435 * @buf: Buffer to read TLB entries from.
436 * @index: Start index.
437 * @count: Number of entries to load.
438 *
439 * Load a range of guest TLB entries. The caller must ensure interrupts are
440 * disabled.
441 */
442void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
443 unsigned int count)
444{
445 unsigned int end = index + count;
446 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
447 int old_index, i;
448
449 /* Save registers we're about to clobber */
450 old_index = read_gc0_index();
451 old_entryhi = read_gc0_entryhi();
452 old_entrylo0 = read_gc0_entrylo0();
453 old_entrylo1 = read_gc0_entrylo1();
454 old_pagemask = read_gc0_pagemask();
455
456 /* Set root GuestID for root probe */
457 htw_stop();
458 set_root_gid_to_guest_gid();
459
460 /* Write each entry to guest TLB */
461 for (i = index; i < end; ++i, ++buf) {
462 write_gc0_index(i);
463 write_gc0_entryhi(buf->tlb_hi);
464 write_gc0_entrylo0(buf->tlb_lo[0]);
465 write_gc0_entrylo1(buf->tlb_lo[1]);
466 write_gc0_pagemask(buf->tlb_mask);
467
468 mtc0_tlbw_hazard();
469 guest_tlb_write_indexed();
470 }
471
472 /* Clear root GuestID again */
473 clear_root_gid();
474 htw_start();
475
476 /* Restore clobbered registers */
477 write_gc0_index(old_index);
478 write_gc0_entryhi(old_entryhi);
479 write_gc0_entrylo0(old_entrylo0);
480 write_gc0_entrylo1(old_entrylo1);
481 write_gc0_pagemask(old_pagemask);
482
483 tlbw_use_hazard();
484}
485EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
486
487#ifdef CONFIG_CPU_LOONGSON64
488void kvm_loongson_clear_guest_vtlb(void)
489{
490 int idx = read_gc0_index();
491
492 /* Set root GuestID for root probe and write of guest TLB entry */
493 set_root_gid_to_guest_gid();
494
495 write_gc0_index(0);
496 guest_tlbinvf();
497 write_gc0_index(idx);
498
499 clear_root_gid();
500 set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
501}
502EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb);
503
504void kvm_loongson_clear_guest_ftlb(void)
505{
506 int i;
507 int idx = read_gc0_index();
508
509 /* Set root GuestID for root probe and write of guest TLB entry */
510 set_root_gid_to_guest_gid();
511
512 for (i = current_cpu_data.tlbsizevtlb;
513 i < (current_cpu_data.tlbsizevtlb +
514 current_cpu_data.tlbsizeftlbsets);
515 i++) {
516 write_gc0_index(i);
517 guest_tlbinvf();
518 }
519 write_gc0_index(idx);
520
521 clear_root_gid();
522 set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
523}
524EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb);
525#endif
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
8 *
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11 */
12
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/mm.h>
16#include <linux/delay.h>
17#include <linux/module.h>
18#include <linux/kvm_host.h>
19#include <linux/srcu.h>
20
21#include <asm/cpu.h>
22#include <asm/bootinfo.h>
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
25#include <asm/cacheflush.h>
26#include <asm/tlb.h>
27
28#undef CONFIG_MIPS_MT
29#include <asm/r4kcache.h>
30#define CONFIG_MIPS_MT
31
32#define KVM_GUEST_PC_TLB 0
33#define KVM_GUEST_SP_TLB 1
34
35#define PRIx64 "llx"
36
37atomic_t kvm_mips_instance;
38EXPORT_SYMBOL_GPL(kvm_mips_instance);
39
40/* These function pointers are initialized once the KVM module is loaded */
41kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
42EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
43
44void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
45EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
46
47bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
48EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
49
50uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
51{
52 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
53}
54
55uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
56{
57 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
58}
59
60inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
61{
62 return vcpu->kvm->arch.commpage_tlb;
63}
64
65/* Structure defining an tlb entry data set. */
66
67void kvm_mips_dump_host_tlbs(void)
68{
69 unsigned long old_entryhi;
70 unsigned long old_pagemask;
71 struct kvm_mips_tlb tlb;
72 unsigned long flags;
73 int i;
74
75 local_irq_save(flags);
76
77 old_entryhi = read_c0_entryhi();
78 old_pagemask = read_c0_pagemask();
79
80 kvm_info("HOST TLBs:\n");
81 kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
82
83 for (i = 0; i < current_cpu_data.tlbsize; i++) {
84 write_c0_index(i);
85 mtc0_tlbw_hazard();
86
87 tlb_read();
88 tlbw_use_hazard();
89
90 tlb.tlb_hi = read_c0_entryhi();
91 tlb.tlb_lo0 = read_c0_entrylo0();
92 tlb.tlb_lo1 = read_c0_entrylo1();
93 tlb.tlb_mask = read_c0_pagemask();
94
95 kvm_info("TLB%c%3d Hi 0x%08lx ",
96 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
97 i, tlb.tlb_hi);
98 kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
99 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
100 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
101 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
102 (tlb.tlb_lo0 >> 3) & 7);
103 kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
104 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
105 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
106 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
107 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
108 }
109 write_c0_entryhi(old_entryhi);
110 write_c0_pagemask(old_pagemask);
111 mtc0_tlbw_hazard();
112 local_irq_restore(flags);
113}
114EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
115
116void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
117{
118 struct mips_coproc *cop0 = vcpu->arch.cop0;
119 struct kvm_mips_tlb tlb;
120 int i;
121
122 kvm_info("Guest TLBs:\n");
123 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
124
125 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
126 tlb = vcpu->arch.guest_tlb[i];
127 kvm_info("TLB%c%3d Hi 0x%08lx ",
128 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
129 i, tlb.tlb_hi);
130 kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
131 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
132 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
133 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
134 (tlb.tlb_lo0 >> 3) & 7);
135 kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
137 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
138 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
139 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
140 }
141}
142EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
143
144static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
145{
146 int srcu_idx, err = 0;
147 kvm_pfn_t pfn;
148
149 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
150 return 0;
151
152 srcu_idx = srcu_read_lock(&kvm->srcu);
153 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
154
155 if (kvm_mips_is_error_pfn(pfn)) {
156 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
157 err = -EFAULT;
158 goto out;
159 }
160
161 kvm->arch.guest_pmap[gfn] = pfn;
162out:
163 srcu_read_unlock(&kvm->srcu, srcu_idx);
164 return err;
165}
166
167/* Translate guest KSEG0 addresses to Host PA */
168unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
169 unsigned long gva)
170{
171 gfn_t gfn;
172 uint32_t offset = gva & ~PAGE_MASK;
173 struct kvm *kvm = vcpu->kvm;
174
175 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
176 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
177 __builtin_return_address(0), gva);
178 return KVM_INVALID_PAGE;
179 }
180
181 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
182
183 if (gfn >= kvm->arch.guest_pmap_npages) {
184 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
185 gva);
186 return KVM_INVALID_PAGE;
187 }
188
189 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
190 return KVM_INVALID_ADDR;
191
192 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
193}
194EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
195
196/* XXXKYMA: Must be called with interrupts disabled */
197/* set flush_dcache_mask == 0 if no dcache flush required */
198int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
199 unsigned long entrylo0, unsigned long entrylo1,
200 int flush_dcache_mask)
201{
202 unsigned long flags;
203 unsigned long old_entryhi;
204 int idx;
205
206 local_irq_save(flags);
207
208 old_entryhi = read_c0_entryhi();
209 write_c0_entryhi(entryhi);
210 mtc0_tlbw_hazard();
211
212 tlb_probe();
213 tlb_probe_hazard();
214 idx = read_c0_index();
215
216 if (idx > current_cpu_data.tlbsize) {
217 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
218 kvm_mips_dump_host_tlbs();
219 local_irq_restore(flags);
220 return -1;
221 }
222
223 write_c0_entrylo0(entrylo0);
224 write_c0_entrylo1(entrylo1);
225 mtc0_tlbw_hazard();
226
227 if (idx < 0)
228 tlb_write_random();
229 else
230 tlb_write_indexed();
231 tlbw_use_hazard();
232
233 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
234 vcpu->arch.pc, idx, read_c0_entryhi(),
235 read_c0_entrylo0(), read_c0_entrylo1());
236
237 /* Flush D-cache */
238 if (flush_dcache_mask) {
239 if (entrylo0 & MIPS3_PG_V) {
240 ++vcpu->stat.flush_dcache_exits;
241 flush_data_cache_page((entryhi & VPN2_MASK) &
242 ~flush_dcache_mask);
243 }
244 if (entrylo1 & MIPS3_PG_V) {
245 ++vcpu->stat.flush_dcache_exits;
246 flush_data_cache_page(((entryhi & VPN2_MASK) &
247 ~flush_dcache_mask) |
248 (0x1 << PAGE_SHIFT));
249 }
250 }
251
252 /* Restore old ASID */
253 write_c0_entryhi(old_entryhi);
254 mtc0_tlbw_hazard();
255 tlbw_use_hazard();
256 local_irq_restore(flags);
257 return 0;
258}
259
260/* XXXKYMA: Must be called with interrupts disabled */
261int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
262 struct kvm_vcpu *vcpu)
263{
264 gfn_t gfn;
265 kvm_pfn_t pfn0, pfn1;
266 unsigned long vaddr = 0;
267 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
268 int even;
269 struct kvm *kvm = vcpu->kvm;
270 const int flush_dcache_mask = 0;
271
272 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
273 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
274 kvm_mips_dump_host_tlbs();
275 return -1;
276 }
277
278 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
279 if (gfn >= kvm->arch.guest_pmap_npages) {
280 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
281 gfn, badvaddr);
282 kvm_mips_dump_host_tlbs();
283 return -1;
284 }
285 even = !(gfn & 0x1);
286 vaddr = badvaddr & (PAGE_MASK << 1);
287
288 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
289 return -1;
290
291 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
292 return -1;
293
294 if (even) {
295 pfn0 = kvm->arch.guest_pmap[gfn];
296 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
297 } else {
298 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
299 pfn1 = kvm->arch.guest_pmap[gfn];
300 }
301
302 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
303 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
304 (1 << 2) | (0x1 << 1);
305 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
306 (1 << 2) | (0x1 << 1);
307
308 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
309 flush_dcache_mask);
310}
311EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
312
313int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
314 struct kvm_vcpu *vcpu)
315{
316 kvm_pfn_t pfn0, pfn1;
317 unsigned long flags, old_entryhi = 0, vaddr = 0;
318 unsigned long entrylo0 = 0, entrylo1 = 0;
319
320 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
321 pfn1 = 0;
322 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
323 (1 << 2) | (0x1 << 1);
324 entrylo1 = 0;
325
326 local_irq_save(flags);
327
328 old_entryhi = read_c0_entryhi();
329 vaddr = badvaddr & (PAGE_MASK << 1);
330 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
331 mtc0_tlbw_hazard();
332 write_c0_entrylo0(entrylo0);
333 mtc0_tlbw_hazard();
334 write_c0_entrylo1(entrylo1);
335 mtc0_tlbw_hazard();
336 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
337 mtc0_tlbw_hazard();
338 tlb_write_indexed();
339 mtc0_tlbw_hazard();
340 tlbw_use_hazard();
341
342 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
343 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
344 read_c0_entrylo0(), read_c0_entrylo1());
345
346 /* Restore old ASID */
347 write_c0_entryhi(old_entryhi);
348 mtc0_tlbw_hazard();
349 tlbw_use_hazard();
350 local_irq_restore(flags);
351
352 return 0;
353}
354EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
355
356int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
357 struct kvm_mips_tlb *tlb,
358 unsigned long *hpa0,
359 unsigned long *hpa1)
360{
361 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
362 struct kvm *kvm = vcpu->kvm;
363 kvm_pfn_t pfn0, pfn1;
364
365 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
366 pfn0 = 0;
367 pfn1 = 0;
368 } else {
369 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
370 >> PAGE_SHIFT) < 0)
371 return -1;
372
373 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
374 >> PAGE_SHIFT) < 0)
375 return -1;
376
377 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
378 >> PAGE_SHIFT];
379 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
380 >> PAGE_SHIFT];
381 }
382
383 if (hpa0)
384 *hpa0 = pfn0 << PAGE_SHIFT;
385
386 if (hpa1)
387 *hpa1 = pfn1 << PAGE_SHIFT;
388
389 /* Get attributes from the Guest TLB */
390 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
391 kvm_mips_get_kernel_asid(vcpu) :
392 kvm_mips_get_user_asid(vcpu));
393 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
394 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
395 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
396 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
397
398 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
399 tlb->tlb_lo0, tlb->tlb_lo1);
400
401 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
402 tlb->tlb_mask);
403}
404EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
405
406int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
407{
408 int i;
409 int index = -1;
410 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
411
412 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
413 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
414 TLB_HI_ASID_HIT(tlb[i], entryhi)) {
415 index = i;
416 break;
417 }
418 }
419
420 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
421 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
422
423 return index;
424}
425EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
426
427int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
428{
429 unsigned long old_entryhi, flags;
430 int idx;
431
432 local_irq_save(flags);
433
434 old_entryhi = read_c0_entryhi();
435
436 if (KVM_GUEST_KERNEL_MODE(vcpu))
437 write_c0_entryhi((vaddr & VPN2_MASK) |
438 kvm_mips_get_kernel_asid(vcpu));
439 else {
440 write_c0_entryhi((vaddr & VPN2_MASK) |
441 kvm_mips_get_user_asid(vcpu));
442 }
443
444 mtc0_tlbw_hazard();
445
446 tlb_probe();
447 tlb_probe_hazard();
448 idx = read_c0_index();
449
450 /* Restore old ASID */
451 write_c0_entryhi(old_entryhi);
452 mtc0_tlbw_hazard();
453 tlbw_use_hazard();
454
455 local_irq_restore(flags);
456
457 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
458
459 return idx;
460}
461EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
462
463int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
464{
465 int idx;
466 unsigned long flags, old_entryhi;
467
468 local_irq_save(flags);
469
470 old_entryhi = read_c0_entryhi();
471
472 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
473 mtc0_tlbw_hazard();
474
475 tlb_probe();
476 tlb_probe_hazard();
477 idx = read_c0_index();
478
479 if (idx >= current_cpu_data.tlbsize)
480 BUG();
481
482 if (idx > 0) {
483 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
484 mtc0_tlbw_hazard();
485
486 write_c0_entrylo0(0);
487 mtc0_tlbw_hazard();
488
489 write_c0_entrylo1(0);
490 mtc0_tlbw_hazard();
491
492 tlb_write_indexed();
493 mtc0_tlbw_hazard();
494 }
495
496 write_c0_entryhi(old_entryhi);
497 mtc0_tlbw_hazard();
498 tlbw_use_hazard();
499
500 local_irq_restore(flags);
501
502 if (idx > 0)
503 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
504 (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
505
506 return 0;
507}
508EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
509
510void kvm_mips_flush_host_tlb(int skip_kseg0)
511{
512 unsigned long flags;
513 unsigned long old_entryhi, entryhi;
514 unsigned long old_pagemask;
515 int entry = 0;
516 int maxentry = current_cpu_data.tlbsize;
517
518 local_irq_save(flags);
519
520 old_entryhi = read_c0_entryhi();
521 old_pagemask = read_c0_pagemask();
522
523 /* Blast 'em all away. */
524 for (entry = 0; entry < maxentry; entry++) {
525 write_c0_index(entry);
526 mtc0_tlbw_hazard();
527
528 if (skip_kseg0) {
529 tlb_read();
530 tlbw_use_hazard();
531
532 entryhi = read_c0_entryhi();
533
534 /* Don't blow away guest kernel entries */
535 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
536 continue;
537 }
538
539 /* Make sure all entries differ. */
540 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
541 mtc0_tlbw_hazard();
542 write_c0_entrylo0(0);
543 mtc0_tlbw_hazard();
544 write_c0_entrylo1(0);
545 mtc0_tlbw_hazard();
546
547 tlb_write_indexed();
548 mtc0_tlbw_hazard();
549 }
550
551 tlbw_use_hazard();
552
553 write_c0_entryhi(old_entryhi);
554 write_c0_pagemask(old_pagemask);
555 mtc0_tlbw_hazard();
556 tlbw_use_hazard();
557
558 local_irq_restore(flags);
559}
560EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
561
562void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
563 struct kvm_vcpu *vcpu)
564{
565 unsigned long asid = asid_cache(cpu);
566
567 asid += ASID_INC;
568 if (!(asid & ASID_MASK)) {
569 if (cpu_has_vtag_icache)
570 flush_icache_all();
571
572 kvm_local_flush_tlb_all(); /* start new asid cycle */
573
574 if (!asid) /* fix version if needed */
575 asid = ASID_FIRST_VERSION;
576 }
577
578 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
579}
580
581void kvm_local_flush_tlb_all(void)
582{
583 unsigned long flags;
584 unsigned long old_ctx;
585 int entry = 0;
586
587 local_irq_save(flags);
588 /* Save old context and create impossible VPN2 value */
589 old_ctx = read_c0_entryhi();
590 write_c0_entrylo0(0);
591 write_c0_entrylo1(0);
592
593 /* Blast 'em all away. */
594 while (entry < current_cpu_data.tlbsize) {
595 /* Make sure all entries differ. */
596 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
597 write_c0_index(entry);
598 mtc0_tlbw_hazard();
599 tlb_write_indexed();
600 entry++;
601 }
602 tlbw_use_hazard();
603 write_c0_entryhi(old_ctx);
604 mtc0_tlbw_hazard();
605
606 local_irq_restore(flags);
607}
608EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
609
610/**
611 * kvm_mips_migrate_count() - Migrate timer.
612 * @vcpu: Virtual CPU.
613 *
614 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
615 * if it was running prior to being cancelled.
616 *
617 * Must be called when the VCPU is migrated to a different CPU to ensure that
618 * timer expiry during guest execution interrupts the guest and causes the
619 * interrupt to be delivered in a timely manner.
620 */
621static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
622{
623 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
624 hrtimer_restart(&vcpu->arch.comparecount_timer);
625}
626
627/* Restore ASID once we are scheduled back after preemption */
628void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
629{
630 unsigned long flags;
631 int newasid = 0;
632
633 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
634
635 /* Allocate new kernel and user ASIDs if needed */
636
637 local_irq_save(flags);
638
639 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
640 ASID_VERSION_MASK) {
641 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
642 vcpu->arch.guest_kernel_asid[cpu] =
643 vcpu->arch.guest_kernel_mm.context.asid[cpu];
644 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
645 vcpu->arch.guest_user_asid[cpu] =
646 vcpu->arch.guest_user_mm.context.asid[cpu];
647 newasid++;
648
649 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
650 cpu_context(cpu, current->mm));
651 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
652 cpu, vcpu->arch.guest_kernel_asid[cpu]);
653 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
654 vcpu->arch.guest_user_asid[cpu]);
655 }
656
657 if (vcpu->arch.last_sched_cpu != cpu) {
658 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
659 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
660 /*
661 * Migrate the timer interrupt to the current CPU so that it
662 * always interrupts the guest and synchronously triggers a
663 * guest timer interrupt.
664 */
665 kvm_mips_migrate_count(vcpu);
666 }
667
668 if (!newasid) {
669 /*
670 * If we preempted while the guest was executing, then reload
671 * the pre-empted ASID
672 */
673 if (current->flags & PF_VCPU) {
674 write_c0_entryhi(vcpu->arch.
675 preempt_entryhi & ASID_MASK);
676 ehb();
677 }
678 } else {
679 /* New ASIDs were allocated for the VM */
680
681 /*
682 * Were we in guest context? If so then the pre-empted ASID is
683 * no longer valid, we need to set it to what it should be based
684 * on the mode of the Guest (Kernel/User)
685 */
686 if (current->flags & PF_VCPU) {
687 if (KVM_GUEST_KERNEL_MODE(vcpu))
688 write_c0_entryhi(vcpu->arch.
689 guest_kernel_asid[cpu] &
690 ASID_MASK);
691 else
692 write_c0_entryhi(vcpu->arch.
693 guest_user_asid[cpu] &
694 ASID_MASK);
695 ehb();
696 }
697 }
698
699 /* restore guest state to registers */
700 kvm_mips_callbacks->vcpu_set_regs(vcpu);
701
702 local_irq_restore(flags);
703
704}
705EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
706
707/* ASID can change if another task is scheduled during preemption */
708void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
709{
710 unsigned long flags;
711 uint32_t cpu;
712
713 local_irq_save(flags);
714
715 cpu = smp_processor_id();
716
717 vcpu->arch.preempt_entryhi = read_c0_entryhi();
718 vcpu->arch.last_sched_cpu = cpu;
719
720 /* save guest state in registers */
721 kvm_mips_callbacks->vcpu_get_regs(vcpu);
722
723 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
724 ASID_VERSION_MASK)) {
725 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
726 cpu_context(cpu, current->mm));
727 drop_mmu_context(current->mm, cpu);
728 }
729 write_c0_entryhi(cpu_asid(cpu, current->mm));
730 ehb();
731
732 local_irq_restore(flags);
733}
734EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
735
736uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
737{
738 struct mips_coproc *cop0 = vcpu->arch.cop0;
739 unsigned long paddr, flags, vpn2, asid;
740 uint32_t inst;
741 int index;
742
743 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
744 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
745 local_irq_save(flags);
746 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
747 if (index >= 0) {
748 inst = *(opc);
749 } else {
750 vpn2 = (unsigned long) opc & VPN2_MASK;
751 asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
752 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
753 if (index < 0) {
754 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
755 __func__, opc, vcpu, read_c0_entryhi());
756 kvm_mips_dump_host_tlbs();
757 local_irq_restore(flags);
758 return KVM_INVALID_INST;
759 }
760 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
761 &vcpu->arch.
762 guest_tlb[index],
763 NULL, NULL);
764 inst = *(opc);
765 }
766 local_irq_restore(flags);
767 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
768 paddr =
769 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
770 (unsigned long) opc);
771 inst = *(uint32_t *) CKSEG0ADDR(paddr);
772 } else {
773 kvm_err("%s: illegal address: %p\n", __func__, opc);
774 return KVM_INVALID_INST;
775 }
776
777 return inst;
778}
779EXPORT_SYMBOL_GPL(kvm_get_inst);