Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright IBM Corp. 2007
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 *
7 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8 */
9
10#include <linux/jiffies.h>
11#include <linux/hrtimer.h>
12#include <linux/types.h>
13#include <linux/string.h>
14#include <linux/kvm_host.h>
15#include <linux/clockchips.h>
16
17#include <asm/reg.h>
18#include <asm/time.h>
19#include <asm/byteorder.h>
20#include <asm/kvm_ppc.h>
21#include <asm/disassemble.h>
22#include <asm/ppc-opcode.h>
23#include "timing.h"
24#include "trace.h"
25
26void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
27{
28 unsigned long dec_nsec;
29 unsigned long long dec_time;
30
31 pr_debug("mtDEC: %lx\n", vcpu->arch.dec);
32 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
33
34#ifdef CONFIG_PPC_BOOK3S
35 /* mtdec lowers the interrupt line when positive. */
36 kvmppc_core_dequeue_dec(vcpu);
37#endif
38
39#ifdef CONFIG_BOOKE
40 /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
41 if (vcpu->arch.dec == 0)
42 return;
43#endif
44
45 /*
46 * The decrementer ticks at the same rate as the timebase, so
47 * that's how we convert the guest DEC value to the number of
48 * host ticks.
49 */
50
51 dec_time = vcpu->arch.dec;
52 /*
53 * Guest timebase ticks at the same frequency as host timebase.
54 * So use the host timebase calculations for decrementer emulation.
55 */
56 dec_time = tb_to_ns(dec_time);
57 dec_nsec = do_div(dec_time, NSEC_PER_SEC);
58 hrtimer_start(&vcpu->arch.dec_timer,
59 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
60 vcpu->arch.dec_jiffies = get_tb();
61}
62
63u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
64{
65 u64 jd = tb - vcpu->arch.dec_jiffies;
66
67#ifdef CONFIG_BOOKE
68 if (vcpu->arch.dec < jd)
69 return 0;
70#endif
71
72 return vcpu->arch.dec - jd;
73}
74
75static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
76{
77 enum emulation_result emulated = EMULATE_DONE;
78 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
79
80 switch (sprn) {
81 case SPRN_SRR0:
82 kvmppc_set_srr0(vcpu, spr_val);
83 break;
84 case SPRN_SRR1:
85 kvmppc_set_srr1(vcpu, spr_val);
86 break;
87
88 /* XXX We need to context-switch the timebase for
89 * watchdog and FIT. */
90 case SPRN_TBWL: break;
91 case SPRN_TBWU: break;
92
93 case SPRN_DEC:
94 vcpu->arch.dec = (u32) spr_val;
95 kvmppc_emulate_dec(vcpu);
96 break;
97
98 case SPRN_SPRG0:
99 kvmppc_set_sprg0(vcpu, spr_val);
100 break;
101 case SPRN_SPRG1:
102 kvmppc_set_sprg1(vcpu, spr_val);
103 break;
104 case SPRN_SPRG2:
105 kvmppc_set_sprg2(vcpu, spr_val);
106 break;
107 case SPRN_SPRG3:
108 kvmppc_set_sprg3(vcpu, spr_val);
109 break;
110
111 /* PIR can legally be written, but we ignore it */
112 case SPRN_PIR: break;
113
114 default:
115 emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
116 spr_val);
117 if (emulated == EMULATE_FAIL)
118 printk(KERN_INFO "mtspr: unknown spr "
119 "0x%x\n", sprn);
120 break;
121 }
122
123 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
124
125 return emulated;
126}
127
128static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
129{
130 enum emulation_result emulated = EMULATE_DONE;
131 ulong spr_val = 0;
132
133 switch (sprn) {
134 case SPRN_SRR0:
135 spr_val = kvmppc_get_srr0(vcpu);
136 break;
137 case SPRN_SRR1:
138 spr_val = kvmppc_get_srr1(vcpu);
139 break;
140 case SPRN_PVR:
141 spr_val = vcpu->arch.pvr;
142 break;
143 case SPRN_PIR:
144 spr_val = vcpu->vcpu_id;
145 break;
146
147 /* Note: mftb and TBRL/TBWL are user-accessible, so
148 * the guest can always access the real TB anyways.
149 * In fact, we probably will never see these traps. */
150 case SPRN_TBWL:
151 spr_val = get_tb() >> 32;
152 break;
153 case SPRN_TBWU:
154 spr_val = get_tb();
155 break;
156
157 case SPRN_SPRG0:
158 spr_val = kvmppc_get_sprg0(vcpu);
159 break;
160 case SPRN_SPRG1:
161 spr_val = kvmppc_get_sprg1(vcpu);
162 break;
163 case SPRN_SPRG2:
164 spr_val = kvmppc_get_sprg2(vcpu);
165 break;
166 case SPRN_SPRG3:
167 spr_val = kvmppc_get_sprg3(vcpu);
168 break;
169 /* Note: SPRG4-7 are user-readable, so we don't get
170 * a trap. */
171
172 case SPRN_DEC:
173 spr_val = kvmppc_get_dec(vcpu, get_tb());
174 break;
175 default:
176 emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
177 &spr_val);
178 if (unlikely(emulated == EMULATE_FAIL)) {
179 printk(KERN_INFO "mfspr: unknown spr "
180 "0x%x\n", sprn);
181 }
182 break;
183 }
184
185 if (emulated == EMULATE_DONE)
186 kvmppc_set_gpr(vcpu, rt, spr_val);
187 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
188
189 return emulated;
190}
191
192/* XXX Should probably auto-generate instruction decoding for a particular core
193 * from opcode tables in the future. */
194int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
195{
196 u32 inst;
197 ppc_inst_t pinst;
198 int rs, rt, sprn;
199 enum emulation_result emulated;
200 int advance = 1;
201
202 /* this default type might be overwritten by subcategories */
203 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
204
205 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
206 inst = ppc_inst_val(pinst);
207 if (emulated != EMULATE_DONE)
208 return emulated;
209
210 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
211
212 rs = get_rs(inst);
213 rt = get_rt(inst);
214 sprn = get_sprn(inst);
215
216 switch (get_op(inst)) {
217 case OP_TRAP:
218#ifdef CONFIG_PPC_BOOK3S
219 case OP_TRAP_64:
220 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
221#else
222 kvmppc_core_queue_program(vcpu,
223 vcpu->arch.shared->esr | ESR_PTR);
224#endif
225 advance = 0;
226 break;
227
228 case 31:
229 switch (get_xop(inst)) {
230
231 case OP_31_XOP_TRAP:
232#ifdef CONFIG_64BIT
233 case OP_31_XOP_TRAP_64:
234#endif
235#ifdef CONFIG_PPC_BOOK3S
236 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
237#else
238 kvmppc_core_queue_program(vcpu,
239 vcpu->arch.shared->esr | ESR_PTR);
240#endif
241 advance = 0;
242 break;
243
244 case OP_31_XOP_MFSPR:
245 emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
246 if (emulated == EMULATE_AGAIN) {
247 emulated = EMULATE_DONE;
248 advance = 0;
249 }
250 break;
251
252 case OP_31_XOP_MTSPR:
253 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
254 if (emulated == EMULATE_AGAIN) {
255 emulated = EMULATE_DONE;
256 advance = 0;
257 }
258 break;
259
260 case OP_31_XOP_TLBSYNC:
261 break;
262
263 default:
264 /* Attempt core-specific emulation below. */
265 emulated = EMULATE_FAIL;
266 }
267 break;
268
269 case 0:
270 /*
271 * Instruction with primary opcode 0. Based on PowerISA
272 * these are illegal instructions.
273 */
274 if (inst == KVMPPC_INST_SW_BREAKPOINT) {
275 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
276 vcpu->run->debug.arch.status = 0;
277 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
278 emulated = EMULATE_EXIT_USER;
279 advance = 0;
280 } else
281 emulated = EMULATE_FAIL;
282
283 break;
284
285 default:
286 emulated = EMULATE_FAIL;
287 }
288
289 if (emulated == EMULATE_FAIL) {
290 emulated = vcpu->kvm->arch.kvm_ops->emulate_op(vcpu, inst,
291 &advance);
292 if (emulated == EMULATE_AGAIN) {
293 advance = 0;
294 } else if (emulated == EMULATE_FAIL) {
295 advance = 0;
296 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
297 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
298 }
299 }
300
301 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
302
303 /* Advance past emulated instruction. */
304 /*
305 * If this ever handles prefixed instructions, the 4
306 * will need to become ppc_inst_len(pinst) instead.
307 */
308 if (advance)
309 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
310
311 return emulated;
312}
313EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21#include <linux/jiffies.h>
22#include <linux/hrtimer.h>
23#include <linux/types.h>
24#include <linux/string.h>
25#include <linux/kvm_host.h>
26#include <linux/clockchips.h>
27
28#include <asm/reg.h>
29#include <asm/time.h>
30#include <asm/byteorder.h>
31#include <asm/kvm_ppc.h>
32#include <asm/disassemble.h>
33#include "timing.h"
34#include "trace.h"
35
36#define OP_TRAP 3
37#define OP_TRAP_64 2
38
39#define OP_31_XOP_TRAP 4
40#define OP_31_XOP_LWZX 23
41#define OP_31_XOP_TRAP_64 68
42#define OP_31_XOP_LBZX 87
43#define OP_31_XOP_STWX 151
44#define OP_31_XOP_STBX 215
45#define OP_31_XOP_LBZUX 119
46#define OP_31_XOP_STBUX 247
47#define OP_31_XOP_LHZX 279
48#define OP_31_XOP_LHZUX 311
49#define OP_31_XOP_MFSPR 339
50#define OP_31_XOP_LHAX 343
51#define OP_31_XOP_STHX 407
52#define OP_31_XOP_STHUX 439
53#define OP_31_XOP_MTSPR 467
54#define OP_31_XOP_DCBI 470
55#define OP_31_XOP_LWBRX 534
56#define OP_31_XOP_TLBSYNC 566
57#define OP_31_XOP_STWBRX 662
58#define OP_31_XOP_LHBRX 790
59#define OP_31_XOP_STHBRX 918
60
61#define OP_LWZ 32
62#define OP_LWZU 33
63#define OP_LBZ 34
64#define OP_LBZU 35
65#define OP_STW 36
66#define OP_STWU 37
67#define OP_STB 38
68#define OP_STBU 39
69#define OP_LHZ 40
70#define OP_LHZU 41
71#define OP_LHA 42
72#define OP_LHAU 43
73#define OP_STH 44
74#define OP_STHU 45
75
76void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
77{
78 unsigned long dec_nsec;
79 unsigned long long dec_time;
80
81 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
82 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
83
84#ifdef CONFIG_PPC_BOOK3S
85 /* mtdec lowers the interrupt line when positive. */
86 kvmppc_core_dequeue_dec(vcpu);
87
88 /* POWER4+ triggers a dec interrupt if the value is < 0 */
89 if (vcpu->arch.dec & 0x80000000) {
90 kvmppc_core_queue_dec(vcpu);
91 return;
92 }
93#endif
94
95#ifdef CONFIG_BOOKE
96 /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
97 if (vcpu->arch.dec == 0)
98 return;
99#endif
100
101 /*
102 * The decrementer ticks at the same rate as the timebase, so
103 * that's how we convert the guest DEC value to the number of
104 * host ticks.
105 */
106
107 dec_time = vcpu->arch.dec;
108 /*
109 * Guest timebase ticks at the same frequency as host decrementer.
110 * So use the host decrementer calculations for decrementer emulation.
111 */
112 dec_time = dec_time << decrementer_clockevent.shift;
113 do_div(dec_time, decrementer_clockevent.mult);
114 dec_nsec = do_div(dec_time, NSEC_PER_SEC);
115 hrtimer_start(&vcpu->arch.dec_timer,
116 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
117 vcpu->arch.dec_jiffies = get_tb();
118}
119
120u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
121{
122 u64 jd = tb - vcpu->arch.dec_jiffies;
123
124#ifdef CONFIG_BOOKE
125 if (vcpu->arch.dec < jd)
126 return 0;
127#endif
128
129 return vcpu->arch.dec - jd;
130}
131
132/* XXX to do:
133 * lhax
134 * lhaux
135 * lswx
136 * lswi
137 * stswx
138 * stswi
139 * lha
140 * lhau
141 * lmw
142 * stmw
143 *
144 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
145 */
146/* XXX Should probably auto-generate instruction decoding for a particular core
147 * from opcode tables in the future. */
148int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
149{
150 u32 inst = kvmppc_get_last_inst(vcpu);
151 int ra = get_ra(inst);
152 int rs = get_rs(inst);
153 int rt = get_rt(inst);
154 int sprn = get_sprn(inst);
155 enum emulation_result emulated = EMULATE_DONE;
156 int advance = 1;
157 ulong spr_val = 0;
158
159 /* this default type might be overwritten by subcategories */
160 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
161
162 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
163
164 switch (get_op(inst)) {
165 case OP_TRAP:
166#ifdef CONFIG_PPC_BOOK3S
167 case OP_TRAP_64:
168 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
169#else
170 kvmppc_core_queue_program(vcpu,
171 vcpu->arch.shared->esr | ESR_PTR);
172#endif
173 advance = 0;
174 break;
175
176 case 31:
177 switch (get_xop(inst)) {
178
179 case OP_31_XOP_TRAP:
180#ifdef CONFIG_64BIT
181 case OP_31_XOP_TRAP_64:
182#endif
183#ifdef CONFIG_PPC_BOOK3S
184 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
185#else
186 kvmppc_core_queue_program(vcpu,
187 vcpu->arch.shared->esr | ESR_PTR);
188#endif
189 advance = 0;
190 break;
191 case OP_31_XOP_LWZX:
192 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
193 break;
194
195 case OP_31_XOP_LBZX:
196 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
197 break;
198
199 case OP_31_XOP_LBZUX:
200 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
201 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
202 break;
203
204 case OP_31_XOP_STWX:
205 emulated = kvmppc_handle_store(run, vcpu,
206 kvmppc_get_gpr(vcpu, rs),
207 4, 1);
208 break;
209
210 case OP_31_XOP_STBX:
211 emulated = kvmppc_handle_store(run, vcpu,
212 kvmppc_get_gpr(vcpu, rs),
213 1, 1);
214 break;
215
216 case OP_31_XOP_STBUX:
217 emulated = kvmppc_handle_store(run, vcpu,
218 kvmppc_get_gpr(vcpu, rs),
219 1, 1);
220 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
221 break;
222
223 case OP_31_XOP_LHAX:
224 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
225 break;
226
227 case OP_31_XOP_LHZX:
228 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
229 break;
230
231 case OP_31_XOP_LHZUX:
232 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
233 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
234 break;
235
236 case OP_31_XOP_MFSPR:
237 switch (sprn) {
238 case SPRN_SRR0:
239 spr_val = vcpu->arch.shared->srr0;
240 break;
241 case SPRN_SRR1:
242 spr_val = vcpu->arch.shared->srr1;
243 break;
244 case SPRN_PVR:
245 spr_val = vcpu->arch.pvr;
246 break;
247 case SPRN_PIR:
248 spr_val = vcpu->vcpu_id;
249 break;
250 case SPRN_MSSSR0:
251 spr_val = 0;
252 break;
253
254 /* Note: mftb and TBRL/TBWL are user-accessible, so
255 * the guest can always access the real TB anyways.
256 * In fact, we probably will never see these traps. */
257 case SPRN_TBWL:
258 spr_val = get_tb() >> 32;
259 break;
260 case SPRN_TBWU:
261 spr_val = get_tb();
262 break;
263
264 case SPRN_SPRG0:
265 spr_val = vcpu->arch.shared->sprg0;
266 break;
267 case SPRN_SPRG1:
268 spr_val = vcpu->arch.shared->sprg1;
269 break;
270 case SPRN_SPRG2:
271 spr_val = vcpu->arch.shared->sprg2;
272 break;
273 case SPRN_SPRG3:
274 spr_val = vcpu->arch.shared->sprg3;
275 break;
276 /* Note: SPRG4-7 are user-readable, so we don't get
277 * a trap. */
278
279 case SPRN_DEC:
280 spr_val = kvmppc_get_dec(vcpu, get_tb());
281 break;
282 default:
283 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
284 &spr_val);
285 if (unlikely(emulated == EMULATE_FAIL)) {
286 printk(KERN_INFO "mfspr: unknown spr "
287 "0x%x\n", sprn);
288 }
289 break;
290 }
291 kvmppc_set_gpr(vcpu, rt, spr_val);
292 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
293 break;
294
295 case OP_31_XOP_STHX:
296 emulated = kvmppc_handle_store(run, vcpu,
297 kvmppc_get_gpr(vcpu, rs),
298 2, 1);
299 break;
300
301 case OP_31_XOP_STHUX:
302 emulated = kvmppc_handle_store(run, vcpu,
303 kvmppc_get_gpr(vcpu, rs),
304 2, 1);
305 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
306 break;
307
308 case OP_31_XOP_MTSPR:
309 spr_val = kvmppc_get_gpr(vcpu, rs);
310 switch (sprn) {
311 case SPRN_SRR0:
312 vcpu->arch.shared->srr0 = spr_val;
313 break;
314 case SPRN_SRR1:
315 vcpu->arch.shared->srr1 = spr_val;
316 break;
317
318 /* XXX We need to context-switch the timebase for
319 * watchdog and FIT. */
320 case SPRN_TBWL: break;
321 case SPRN_TBWU: break;
322
323 case SPRN_MSSSR0: break;
324
325 case SPRN_DEC:
326 vcpu->arch.dec = spr_val;
327 kvmppc_emulate_dec(vcpu);
328 break;
329
330 case SPRN_SPRG0:
331 vcpu->arch.shared->sprg0 = spr_val;
332 break;
333 case SPRN_SPRG1:
334 vcpu->arch.shared->sprg1 = spr_val;
335 break;
336 case SPRN_SPRG2:
337 vcpu->arch.shared->sprg2 = spr_val;
338 break;
339 case SPRN_SPRG3:
340 vcpu->arch.shared->sprg3 = spr_val;
341 break;
342
343 default:
344 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
345 spr_val);
346 if (emulated == EMULATE_FAIL)
347 printk(KERN_INFO "mtspr: unknown spr "
348 "0x%x\n", sprn);
349 break;
350 }
351 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
352 break;
353
354 case OP_31_XOP_DCBI:
355 /* Do nothing. The guest is performing dcbi because
356 * hardware DMA is not snooped by the dcache, but
357 * emulated DMA either goes through the dcache as
358 * normal writes, or the host kernel has handled dcache
359 * coherence. */
360 break;
361
362 case OP_31_XOP_LWBRX:
363 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
364 break;
365
366 case OP_31_XOP_TLBSYNC:
367 break;
368
369 case OP_31_XOP_STWBRX:
370 emulated = kvmppc_handle_store(run, vcpu,
371 kvmppc_get_gpr(vcpu, rs),
372 4, 0);
373 break;
374
375 case OP_31_XOP_LHBRX:
376 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
377 break;
378
379 case OP_31_XOP_STHBRX:
380 emulated = kvmppc_handle_store(run, vcpu,
381 kvmppc_get_gpr(vcpu, rs),
382 2, 0);
383 break;
384
385 default:
386 /* Attempt core-specific emulation below. */
387 emulated = EMULATE_FAIL;
388 }
389 break;
390
391 case OP_LWZ:
392 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
393 break;
394
395 case OP_LWZU:
396 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
397 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
398 break;
399
400 case OP_LBZ:
401 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
402 break;
403
404 case OP_LBZU:
405 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
406 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
407 break;
408
409 case OP_STW:
410 emulated = kvmppc_handle_store(run, vcpu,
411 kvmppc_get_gpr(vcpu, rs),
412 4, 1);
413 break;
414
415 case OP_STWU:
416 emulated = kvmppc_handle_store(run, vcpu,
417 kvmppc_get_gpr(vcpu, rs),
418 4, 1);
419 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
420 break;
421
422 case OP_STB:
423 emulated = kvmppc_handle_store(run, vcpu,
424 kvmppc_get_gpr(vcpu, rs),
425 1, 1);
426 break;
427
428 case OP_STBU:
429 emulated = kvmppc_handle_store(run, vcpu,
430 kvmppc_get_gpr(vcpu, rs),
431 1, 1);
432 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
433 break;
434
435 case OP_LHZ:
436 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
437 break;
438
439 case OP_LHZU:
440 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
441 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
442 break;
443
444 case OP_LHA:
445 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
446 break;
447
448 case OP_LHAU:
449 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
450 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
451 break;
452
453 case OP_STH:
454 emulated = kvmppc_handle_store(run, vcpu,
455 kvmppc_get_gpr(vcpu, rs),
456 2, 1);
457 break;
458
459 case OP_STHU:
460 emulated = kvmppc_handle_store(run, vcpu,
461 kvmppc_get_gpr(vcpu, rs),
462 2, 1);
463 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
464 break;
465
466 default:
467 emulated = EMULATE_FAIL;
468 }
469
470 if (emulated == EMULATE_FAIL) {
471 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
472 if (emulated == EMULATE_AGAIN) {
473 advance = 0;
474 } else if (emulated == EMULATE_FAIL) {
475 advance = 0;
476 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
477 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
478 kvmppc_core_queue_program(vcpu, 0);
479 }
480 }
481
482 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
483
484 /* Advance past emulated instruction. */
485 if (advance)
486 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
487
488 return emulated;
489}