Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 *
5 * Author: Yu Liu, <yu.liu@freescale.com>
6 *
7 * Description:
8 * This file is derived from arch/powerpc/kvm/44x_emulate.c,
9 * by Hollis Blanchard <hollisb@us.ibm.com>.
10 */
11
12#include <asm/kvm_ppc.h>
13#include <asm/disassemble.h>
14#include <asm/dbell.h>
15#include <asm/reg_booke.h>
16
17#include "booke.h"
18#include "e500.h"
19
20#define XOP_DCBTLS 166
21#define XOP_MSGSND 206
22#define XOP_MSGCLR 238
23#define XOP_MFTMR 366
24#define XOP_TLBIVAX 786
25#define XOP_TLBSX 914
26#define XOP_TLBRE 946
27#define XOP_TLBWE 978
28#define XOP_TLBILX 18
29#define XOP_EHPRIV 270
30
31#ifdef CONFIG_KVM_E500MC
32static int dbell2prio(ulong param)
33{
34 int msg = param & PPC_DBELL_TYPE_MASK;
35 int prio = -1;
36
37 switch (msg) {
38 case PPC_DBELL_TYPE(PPC_DBELL):
39 prio = BOOKE_IRQPRIO_DBELL;
40 break;
41 case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
42 prio = BOOKE_IRQPRIO_DBELL_CRIT;
43 break;
44 default:
45 break;
46 }
47
48 return prio;
49}
50
51static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
52{
53 ulong param = vcpu->arch.regs.gpr[rb];
54 int prio = dbell2prio(param);
55
56 if (prio < 0)
57 return EMULATE_FAIL;
58
59 clear_bit(prio, &vcpu->arch.pending_exceptions);
60 return EMULATE_DONE;
61}
62
63static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
64{
65 ulong param = vcpu->arch.regs.gpr[rb];
66 int prio = dbell2prio(rb);
67 int pir = param & PPC_DBELL_PIR_MASK;
68 unsigned long i;
69 struct kvm_vcpu *cvcpu;
70
71 if (prio < 0)
72 return EMULATE_FAIL;
73
74 kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
75 int cpir = cvcpu->arch.shared->pir;
76 if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
77 set_bit(prio, &cvcpu->arch.pending_exceptions);
78 kvm_vcpu_kick(cvcpu);
79 }
80 }
81
82 return EMULATE_DONE;
83}
84#endif
85
86static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu *vcpu,
87 unsigned int inst, int *advance)
88{
89 int emulated = EMULATE_DONE;
90
91 switch (get_oc(inst)) {
92 case EHPRIV_OC_DEBUG:
93 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
94 vcpu->run->debug.arch.address = vcpu->arch.regs.nip;
95 vcpu->run->debug.arch.status = 0;
96 kvmppc_account_exit(vcpu, DEBUG_EXITS);
97 emulated = EMULATE_EXIT_USER;
98 *advance = 0;
99 break;
100 default:
101 emulated = EMULATE_FAIL;
102 }
103 return emulated;
104}
105
106static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
107{
108 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
109
110 /* Always fail to lock the cache */
111 vcpu_e500->l1csr0 |= L1CSR0_CUL;
112 return EMULATE_DONE;
113}
114
115static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
116 int rt)
117{
118 /* Expose one thread per vcpu */
119 if (get_tmrn(inst) == TMRN_TMCFG0) {
120 kvmppc_set_gpr(vcpu, rt,
121 1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT));
122 return EMULATE_DONE;
123 }
124
125 return EMULATE_FAIL;
126}
127
128int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
129 unsigned int inst, int *advance)
130{
131 int emulated = EMULATE_DONE;
132 int ra = get_ra(inst);
133 int rb = get_rb(inst);
134 int rt = get_rt(inst);
135 gva_t ea;
136
137 switch (get_op(inst)) {
138 case 31:
139 switch (get_xop(inst)) {
140
141 case XOP_DCBTLS:
142 emulated = kvmppc_e500_emul_dcbtls(vcpu);
143 break;
144
145#ifdef CONFIG_KVM_E500MC
146 case XOP_MSGSND:
147 emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
148 break;
149
150 case XOP_MSGCLR:
151 emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
152 break;
153#endif
154
155 case XOP_TLBRE:
156 emulated = kvmppc_e500_emul_tlbre(vcpu);
157 break;
158
159 case XOP_TLBWE:
160 emulated = kvmppc_e500_emul_tlbwe(vcpu);
161 break;
162
163 case XOP_TLBSX:
164 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
165 emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
166 break;
167
168 case XOP_TLBILX: {
169 int type = rt & 0x3;
170 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
171 emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
172 break;
173 }
174
175 case XOP_TLBIVAX:
176 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
177 emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
178 break;
179
180 case XOP_MFTMR:
181 emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
182 break;
183
184 case XOP_EHPRIV:
185 emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance);
186 break;
187
188 default:
189 emulated = EMULATE_FAIL;
190 }
191
192 break;
193
194 default:
195 emulated = EMULATE_FAIL;
196 }
197
198 if (emulated == EMULATE_FAIL)
199 emulated = kvmppc_booke_emulate_op(vcpu, inst, advance);
200
201 return emulated;
202}
203
204int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
205{
206 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
207 int emulated = EMULATE_DONE;
208
209 switch (sprn) {
210#ifndef CONFIG_KVM_BOOKE_HV
211 case SPRN_PID:
212 kvmppc_set_pid(vcpu, spr_val);
213 break;
214 case SPRN_PID1:
215 if (spr_val != 0)
216 return EMULATE_FAIL;
217 vcpu_e500->pid[1] = spr_val;
218 break;
219 case SPRN_PID2:
220 if (spr_val != 0)
221 return EMULATE_FAIL;
222 vcpu_e500->pid[2] = spr_val;
223 break;
224 case SPRN_MAS0:
225 vcpu->arch.shared->mas0 = spr_val;
226 break;
227 case SPRN_MAS1:
228 vcpu->arch.shared->mas1 = spr_val;
229 break;
230 case SPRN_MAS2:
231 vcpu->arch.shared->mas2 = spr_val;
232 break;
233 case SPRN_MAS3:
234 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
235 vcpu->arch.shared->mas7_3 |= spr_val;
236 break;
237 case SPRN_MAS4:
238 vcpu->arch.shared->mas4 = spr_val;
239 break;
240 case SPRN_MAS6:
241 vcpu->arch.shared->mas6 = spr_val;
242 break;
243 case SPRN_MAS7:
244 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
245 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
246 break;
247#endif
248 case SPRN_L1CSR0:
249 vcpu_e500->l1csr0 = spr_val;
250 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
251 break;
252 case SPRN_L1CSR1:
253 vcpu_e500->l1csr1 = spr_val;
254 vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR);
255 break;
256 case SPRN_HID0:
257 vcpu_e500->hid0 = spr_val;
258 break;
259 case SPRN_HID1:
260 vcpu_e500->hid1 = spr_val;
261 break;
262
263 case SPRN_MMUCSR0:
264 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
265 spr_val);
266 break;
267
268 case SPRN_PWRMGTCR0:
269 /*
270 * Guest relies on host power management configurations
271 * Treat the request as a general store
272 */
273 vcpu->arch.pwrmgtcr0 = spr_val;
274 break;
275
276 case SPRN_BUCSR:
277 /*
278 * If we are here, it means that we have already flushed the
279 * branch predictor, so just return to guest.
280 */
281 break;
282
283 /* extra exceptions */
284#ifdef CONFIG_SPE_POSSIBLE
285 case SPRN_IVOR32:
286 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
287 break;
288 case SPRN_IVOR33:
289 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
290 break;
291 case SPRN_IVOR34:
292 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
293 break;
294#endif
295#ifdef CONFIG_ALTIVEC
296 case SPRN_IVOR32:
297 vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
298 break;
299 case SPRN_IVOR33:
300 vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
301 break;
302#endif
303 case SPRN_IVOR35:
304 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
305 break;
306#ifdef CONFIG_KVM_BOOKE_HV
307 case SPRN_IVOR36:
308 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
309 break;
310 case SPRN_IVOR37:
311 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
312 break;
313#endif
314 default:
315 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
316 }
317
318 return emulated;
319}
320
321int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
322{
323 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
324 int emulated = EMULATE_DONE;
325
326 switch (sprn) {
327#ifndef CONFIG_KVM_BOOKE_HV
328 case SPRN_PID:
329 *spr_val = vcpu_e500->pid[0];
330 break;
331 case SPRN_PID1:
332 *spr_val = vcpu_e500->pid[1];
333 break;
334 case SPRN_PID2:
335 *spr_val = vcpu_e500->pid[2];
336 break;
337 case SPRN_MAS0:
338 *spr_val = vcpu->arch.shared->mas0;
339 break;
340 case SPRN_MAS1:
341 *spr_val = vcpu->arch.shared->mas1;
342 break;
343 case SPRN_MAS2:
344 *spr_val = vcpu->arch.shared->mas2;
345 break;
346 case SPRN_MAS3:
347 *spr_val = (u32)vcpu->arch.shared->mas7_3;
348 break;
349 case SPRN_MAS4:
350 *spr_val = vcpu->arch.shared->mas4;
351 break;
352 case SPRN_MAS6:
353 *spr_val = vcpu->arch.shared->mas6;
354 break;
355 case SPRN_MAS7:
356 *spr_val = vcpu->arch.shared->mas7_3 >> 32;
357 break;
358#endif
359 case SPRN_DECAR:
360 *spr_val = vcpu->arch.decar;
361 break;
362 case SPRN_TLB0CFG:
363 *spr_val = vcpu->arch.tlbcfg[0];
364 break;
365 case SPRN_TLB1CFG:
366 *spr_val = vcpu->arch.tlbcfg[1];
367 break;
368 case SPRN_TLB0PS:
369 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
370 return EMULATE_FAIL;
371 *spr_val = vcpu->arch.tlbps[0];
372 break;
373 case SPRN_TLB1PS:
374 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
375 return EMULATE_FAIL;
376 *spr_val = vcpu->arch.tlbps[1];
377 break;
378 case SPRN_L1CSR0:
379 *spr_val = vcpu_e500->l1csr0;
380 break;
381 case SPRN_L1CSR1:
382 *spr_val = vcpu_e500->l1csr1;
383 break;
384 case SPRN_HID0:
385 *spr_val = vcpu_e500->hid0;
386 break;
387 case SPRN_HID1:
388 *spr_val = vcpu_e500->hid1;
389 break;
390 case SPRN_SVR:
391 *spr_val = vcpu_e500->svr;
392 break;
393
394 case SPRN_MMUCSR0:
395 *spr_val = 0;
396 break;
397
398 case SPRN_MMUCFG:
399 *spr_val = vcpu->arch.mmucfg;
400 break;
401 case SPRN_EPTCFG:
402 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
403 return EMULATE_FAIL;
404 /*
405 * Legacy Linux guests access EPTCFG register even if the E.PT
406 * category is disabled in the VM. Give them a chance to live.
407 */
408 *spr_val = vcpu->arch.eptcfg;
409 break;
410
411 case SPRN_PWRMGTCR0:
412 *spr_val = vcpu->arch.pwrmgtcr0;
413 break;
414
415 /* extra exceptions */
416#ifdef CONFIG_SPE_POSSIBLE
417 case SPRN_IVOR32:
418 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
419 break;
420 case SPRN_IVOR33:
421 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
422 break;
423 case SPRN_IVOR34:
424 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
425 break;
426#endif
427#ifdef CONFIG_ALTIVEC
428 case SPRN_IVOR32:
429 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
430 break;
431 case SPRN_IVOR33:
432 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
433 break;
434#endif
435 case SPRN_IVOR35:
436 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
437 break;
438#ifdef CONFIG_KVM_BOOKE_HV
439 case SPRN_IVOR36:
440 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
441 break;
442 case SPRN_IVOR37:
443 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
444 break;
445#endif
446 default:
447 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
448 }
449
450 return emulated;
451}
452
1/*
2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu, <yu.liu@freescale.com>
5 *
6 * Description:
7 * This file is derived from arch/powerpc/kvm/44x_emulate.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15#include <asm/kvm_ppc.h>
16#include <asm/disassemble.h>
17#include <asm/dbell.h>
18
19#include "booke.h"
20#include "e500.h"
21
22#define XOP_MSGSND 206
23#define XOP_MSGCLR 238
24#define XOP_TLBIVAX 786
25#define XOP_TLBSX 914
26#define XOP_TLBRE 946
27#define XOP_TLBWE 978
28#define XOP_TLBILX 18
29#define XOP_EHPRIV 270
30
31#ifdef CONFIG_KVM_E500MC
32static int dbell2prio(ulong param)
33{
34 int msg = param & PPC_DBELL_TYPE_MASK;
35 int prio = -1;
36
37 switch (msg) {
38 case PPC_DBELL_TYPE(PPC_DBELL):
39 prio = BOOKE_IRQPRIO_DBELL;
40 break;
41 case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
42 prio = BOOKE_IRQPRIO_DBELL_CRIT;
43 break;
44 default:
45 break;
46 }
47
48 return prio;
49}
50
51static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
52{
53 ulong param = vcpu->arch.gpr[rb];
54 int prio = dbell2prio(param);
55
56 if (prio < 0)
57 return EMULATE_FAIL;
58
59 clear_bit(prio, &vcpu->arch.pending_exceptions);
60 return EMULATE_DONE;
61}
62
63static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
64{
65 ulong param = vcpu->arch.gpr[rb];
66 int prio = dbell2prio(rb);
67 int pir = param & PPC_DBELL_PIR_MASK;
68 int i;
69 struct kvm_vcpu *cvcpu;
70
71 if (prio < 0)
72 return EMULATE_FAIL;
73
74 kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
75 int cpir = cvcpu->arch.shared->pir;
76 if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
77 set_bit(prio, &cvcpu->arch.pending_exceptions);
78 kvm_vcpu_kick(cvcpu);
79 }
80 }
81
82 return EMULATE_DONE;
83}
84#endif
85
86static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
87 unsigned int inst, int *advance)
88{
89 int emulated = EMULATE_DONE;
90
91 switch (get_oc(inst)) {
92 case EHPRIV_OC_DEBUG:
93 run->exit_reason = KVM_EXIT_DEBUG;
94 run->debug.arch.address = vcpu->arch.pc;
95 run->debug.arch.status = 0;
96 kvmppc_account_exit(vcpu, DEBUG_EXITS);
97 emulated = EMULATE_EXIT_USER;
98 *advance = 0;
99 break;
100 default:
101 emulated = EMULATE_FAIL;
102 }
103 return emulated;
104}
105
106int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
107 unsigned int inst, int *advance)
108{
109 int emulated = EMULATE_DONE;
110 int ra = get_ra(inst);
111 int rb = get_rb(inst);
112 int rt = get_rt(inst);
113 gva_t ea;
114
115 switch (get_op(inst)) {
116 case 31:
117 switch (get_xop(inst)) {
118
119#ifdef CONFIG_KVM_E500MC
120 case XOP_MSGSND:
121 emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
122 break;
123
124 case XOP_MSGCLR:
125 emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
126 break;
127#endif
128
129 case XOP_TLBRE:
130 emulated = kvmppc_e500_emul_tlbre(vcpu);
131 break;
132
133 case XOP_TLBWE:
134 emulated = kvmppc_e500_emul_tlbwe(vcpu);
135 break;
136
137 case XOP_TLBSX:
138 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
139 emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
140 break;
141
142 case XOP_TLBILX: {
143 int type = rt & 0x3;
144 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
145 emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
146 break;
147 }
148
149 case XOP_TLBIVAX:
150 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
151 emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
152 break;
153
154 case XOP_EHPRIV:
155 emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
156 advance);
157 break;
158
159 default:
160 emulated = EMULATE_FAIL;
161 }
162
163 break;
164
165 default:
166 emulated = EMULATE_FAIL;
167 }
168
169 if (emulated == EMULATE_FAIL)
170 emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
171
172 return emulated;
173}
174
175int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
176{
177 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
178 int emulated = EMULATE_DONE;
179
180 switch (sprn) {
181#ifndef CONFIG_KVM_BOOKE_HV
182 case SPRN_PID:
183 kvmppc_set_pid(vcpu, spr_val);
184 break;
185 case SPRN_PID1:
186 if (spr_val != 0)
187 return EMULATE_FAIL;
188 vcpu_e500->pid[1] = spr_val;
189 break;
190 case SPRN_PID2:
191 if (spr_val != 0)
192 return EMULATE_FAIL;
193 vcpu_e500->pid[2] = spr_val;
194 break;
195 case SPRN_MAS0:
196 vcpu->arch.shared->mas0 = spr_val;
197 break;
198 case SPRN_MAS1:
199 vcpu->arch.shared->mas1 = spr_val;
200 break;
201 case SPRN_MAS2:
202 vcpu->arch.shared->mas2 = spr_val;
203 break;
204 case SPRN_MAS3:
205 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
206 vcpu->arch.shared->mas7_3 |= spr_val;
207 break;
208 case SPRN_MAS4:
209 vcpu->arch.shared->mas4 = spr_val;
210 break;
211 case SPRN_MAS6:
212 vcpu->arch.shared->mas6 = spr_val;
213 break;
214 case SPRN_MAS7:
215 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
216 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
217 break;
218#endif
219 case SPRN_L1CSR0:
220 vcpu_e500->l1csr0 = spr_val;
221 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
222 break;
223 case SPRN_L1CSR1:
224 vcpu_e500->l1csr1 = spr_val;
225 break;
226 case SPRN_HID0:
227 vcpu_e500->hid0 = spr_val;
228 break;
229 case SPRN_HID1:
230 vcpu_e500->hid1 = spr_val;
231 break;
232
233 case SPRN_MMUCSR0:
234 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
235 spr_val);
236 break;
237
238 /* extra exceptions */
239 case SPRN_IVOR32:
240 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
241 break;
242 case SPRN_IVOR33:
243 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
244 break;
245 case SPRN_IVOR34:
246 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
247 break;
248 case SPRN_IVOR35:
249 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
250 break;
251#ifdef CONFIG_KVM_BOOKE_HV
252 case SPRN_IVOR36:
253 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
254 break;
255 case SPRN_IVOR37:
256 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
257 break;
258#endif
259 default:
260 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
261 }
262
263 return emulated;
264}
265
266int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
267{
268 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
269 int emulated = EMULATE_DONE;
270
271 switch (sprn) {
272#ifndef CONFIG_KVM_BOOKE_HV
273 case SPRN_PID:
274 *spr_val = vcpu_e500->pid[0];
275 break;
276 case SPRN_PID1:
277 *spr_val = vcpu_e500->pid[1];
278 break;
279 case SPRN_PID2:
280 *spr_val = vcpu_e500->pid[2];
281 break;
282 case SPRN_MAS0:
283 *spr_val = vcpu->arch.shared->mas0;
284 break;
285 case SPRN_MAS1:
286 *spr_val = vcpu->arch.shared->mas1;
287 break;
288 case SPRN_MAS2:
289 *spr_val = vcpu->arch.shared->mas2;
290 break;
291 case SPRN_MAS3:
292 *spr_val = (u32)vcpu->arch.shared->mas7_3;
293 break;
294 case SPRN_MAS4:
295 *spr_val = vcpu->arch.shared->mas4;
296 break;
297 case SPRN_MAS6:
298 *spr_val = vcpu->arch.shared->mas6;
299 break;
300 case SPRN_MAS7:
301 *spr_val = vcpu->arch.shared->mas7_3 >> 32;
302 break;
303#endif
304 case SPRN_DECAR:
305 *spr_val = vcpu->arch.decar;
306 break;
307 case SPRN_TLB0CFG:
308 *spr_val = vcpu->arch.tlbcfg[0];
309 break;
310 case SPRN_TLB1CFG:
311 *spr_val = vcpu->arch.tlbcfg[1];
312 break;
313 case SPRN_TLB0PS:
314 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
315 return EMULATE_FAIL;
316 *spr_val = vcpu->arch.tlbps[0];
317 break;
318 case SPRN_TLB1PS:
319 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
320 return EMULATE_FAIL;
321 *spr_val = vcpu->arch.tlbps[1];
322 break;
323 case SPRN_L1CSR0:
324 *spr_val = vcpu_e500->l1csr0;
325 break;
326 case SPRN_L1CSR1:
327 *spr_val = vcpu_e500->l1csr1;
328 break;
329 case SPRN_HID0:
330 *spr_val = vcpu_e500->hid0;
331 break;
332 case SPRN_HID1:
333 *spr_val = vcpu_e500->hid1;
334 break;
335 case SPRN_SVR:
336 *spr_val = vcpu_e500->svr;
337 break;
338
339 case SPRN_MMUCSR0:
340 *spr_val = 0;
341 break;
342
343 case SPRN_MMUCFG:
344 *spr_val = vcpu->arch.mmucfg;
345 break;
346 case SPRN_EPTCFG:
347 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
348 return EMULATE_FAIL;
349 /*
350 * Legacy Linux guests access EPTCFG register even if the E.PT
351 * category is disabled in the VM. Give them a chance to live.
352 */
353 *spr_val = vcpu->arch.eptcfg;
354 break;
355
356 /* extra exceptions */
357 case SPRN_IVOR32:
358 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
359 break;
360 case SPRN_IVOR33:
361 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
362 break;
363 case SPRN_IVOR34:
364 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
365 break;
366 case SPRN_IVOR35:
367 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
368 break;
369#ifdef CONFIG_KVM_BOOKE_HV
370 case SPRN_IVOR36:
371 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
372 break;
373 case SPRN_IVOR37:
374 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
375 break;
376#endif
377 default:
378 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
379 }
380
381 return emulated;
382}
383