Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
 
 
 
 
 
 
 
 
 
 
 
 
  3 *
  4 * Copyright IBM Corp. 2007
  5 * Copyright 2011 Freescale Semiconductor, Inc.
  6 *
  7 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  8 */
  9
 10#include <linux/jiffies.h>
 11#include <linux/hrtimer.h>
 12#include <linux/types.h>
 13#include <linux/string.h>
 14#include <linux/kvm_host.h>
 15#include <linux/clockchips.h>
 16
 17#include <asm/reg.h>
 18#include <asm/time.h>
 19#include <asm/byteorder.h>
 20#include <asm/kvm_ppc.h>
 21#include <asm/disassemble.h>
 22#include <asm/ppc-opcode.h>
 23#include <asm/sstep.h>
 24#include "timing.h"
 25#include "trace.h"
 26
 27#ifdef CONFIG_PPC_FPU
 28static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
 29{
 30	if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
 31		kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
 32		return true;
 33	}
 34
 35	return false;
 36}
 37#endif /* CONFIG_PPC_FPU */
 38
 39#ifdef CONFIG_VSX
 40static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
 41{
 42	if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
 43		kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
 44		return true;
 45	}
 46
 47	return false;
 48}
 49#endif /* CONFIG_VSX */
 50
 51#ifdef CONFIG_ALTIVEC
 52static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
 53{
 54	if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
 55		kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
 56		return true;
 57	}
 58
 59	return false;
 60}
 61#endif /* CONFIG_ALTIVEC */
 62
 63/*
 64 * XXX to do:
 65 * lfiwax, lfiwzx
 66 * vector loads and stores
 67 *
 68 * Instructions that trap when used on cache-inhibited mappings
 69 * are not emulated here: multiple and string instructions,
 70 * lq/stq, and the load-reserve/store-conditional instructions.
 71 */
 72int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
 73{
 74	ppc_inst_t inst;
 75	enum emulation_result emulated = EMULATE_FAIL;
 76	struct instruction_op op;
 
 
 77
 78	/* this default type might be overwritten by subcategories */
 79	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
 80
 81	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
 82	if (emulated != EMULATE_DONE)
 83		return emulated;
 84
 85	vcpu->arch.mmio_vsx_copy_nums = 0;
 86	vcpu->arch.mmio_vsx_offset = 0;
 87	vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
 88	vcpu->arch.mmio_sp64_extend = 0;
 89	vcpu->arch.mmio_sign_extend = 0;
 90	vcpu->arch.mmio_vmx_copy_nums = 0;
 91	vcpu->arch.mmio_vmx_offset = 0;
 92	vcpu->arch.mmio_host_swabbed = 0;
 93
 94	emulated = EMULATE_FAIL;
 95	vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
 96	if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
 97		int type = op.type & INSTR_TYPE_MASK;
 98		int size = GETSIZE(op.type);
 99
100		vcpu->mmio_is_write = OP_IS_STORE(type);
101
102		switch (type) {
103		case LOAD:  {
104			int instr_byte_swap = op.type & BYTEREV;
105
106			if (op.type & SIGNEXT)
107				emulated = kvmppc_handle_loads(vcpu,
108						op.reg, size, !instr_byte_swap);
109			else
110				emulated = kvmppc_handle_load(vcpu,
111						op.reg, size, !instr_byte_swap);
112
113			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
114				kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
 
115
 
 
 
116			break;
117		}
118#ifdef CONFIG_PPC_FPU
119		case LOAD_FP:
120			if (kvmppc_check_fp_disabled(vcpu))
121				return EMULATE_DONE;
122
123			if (op.type & FPCONV)
124				vcpu->arch.mmio_sp64_extend = 1;
125
126			if (op.type & SIGNEXT)
127				emulated = kvmppc_handle_loads(vcpu,
128					     KVM_MMIO_REG_FPR|op.reg, size, 1);
129			else
130				emulated = kvmppc_handle_load(vcpu,
131					     KVM_MMIO_REG_FPR|op.reg, size, 1);
132
133			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
134				kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
135
136			break;
137#endif
138#ifdef CONFIG_ALTIVEC
139		case LOAD_VMX:
140			if (kvmppc_check_altivec_disabled(vcpu))
141				return EMULATE_DONE;
142
143			/* Hardware enforces alignment of VMX accesses */
144			vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
145			vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
146
147			if (size == 16) { /* lvx */
148				vcpu->arch.mmio_copy_type =
149						KVMPPC_VMX_COPY_DWORD;
150			} else if (size == 4) { /* lvewx  */
151				vcpu->arch.mmio_copy_type =
152						KVMPPC_VMX_COPY_WORD;
153			} else if (size == 2) { /* lvehx  */
154				vcpu->arch.mmio_copy_type =
155						KVMPPC_VMX_COPY_HWORD;
156			} else if (size == 1) { /* lvebx  */
157				vcpu->arch.mmio_copy_type =
158						KVMPPC_VMX_COPY_BYTE;
159			} else
160				break;
161
162			vcpu->arch.mmio_vmx_offset =
163				(vcpu->arch.vaddr_accessed & 0xf)/size;
164
165			if (size == 16) {
166				vcpu->arch.mmio_vmx_copy_nums = 2;
167				emulated = kvmppc_handle_vmx_load(vcpu,
168						KVM_MMIO_REG_VMX|op.reg,
169						8, 1);
170			} else {
171				vcpu->arch.mmio_vmx_copy_nums = 1;
172				emulated = kvmppc_handle_vmx_load(vcpu,
173						KVM_MMIO_REG_VMX|op.reg,
174						size, 1);
175			}
176			break;
177#endif
178#ifdef CONFIG_VSX
179		case LOAD_VSX: {
180			int io_size_each;
181
182			if (op.vsx_flags & VSX_CHECK_VEC) {
183				if (kvmppc_check_altivec_disabled(vcpu))
184					return EMULATE_DONE;
185			} else {
186				if (kvmppc_check_vsx_disabled(vcpu))
187					return EMULATE_DONE;
188			}
189
190			if (op.vsx_flags & VSX_FPCONV)
191				vcpu->arch.mmio_sp64_extend = 1;
192
193			if (op.element_size == 8)  {
194				if (op.vsx_flags & VSX_SPLAT)
195					vcpu->arch.mmio_copy_type =
196						KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
197				else
198					vcpu->arch.mmio_copy_type =
199						KVMPPC_VSX_COPY_DWORD;
200			} else if (op.element_size == 4) {
201				if (op.vsx_flags & VSX_SPLAT)
202					vcpu->arch.mmio_copy_type =
203						KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
204				else
205					vcpu->arch.mmio_copy_type =
206						KVMPPC_VSX_COPY_WORD;
207			} else
208				break;
209
210			if (size < op.element_size) {
211				/* precision convert case: lxsspx, etc */
212				vcpu->arch.mmio_vsx_copy_nums = 1;
213				io_size_each = size;
214			} else { /* lxvw4x, lxvd2x, etc */
215				vcpu->arch.mmio_vsx_copy_nums =
216					size/op.element_size;
217				io_size_each = op.element_size;
218			}
219
220			emulated = kvmppc_handle_vsx_load(vcpu,
221					KVM_MMIO_REG_VSX|op.reg, io_size_each,
222					1, op.type & SIGNEXT);
223			break;
224		}
225#endif
226		case STORE: {
227			int instr_byte_swap = op.type & BYTEREV;
228
229			emulated = kvmppc_handle_store(vcpu, kvmppc_get_gpr(vcpu, op.reg),
230						       size, !instr_byte_swap);
 
 
 
231
232			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
233				kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
 
 
 
 
234
 
 
235			break;
236		}
237#ifdef CONFIG_PPC_FPU
238		case STORE_FP:
239			if (kvmppc_check_fp_disabled(vcpu))
240				return EMULATE_DONE;
241
242			/* The FP registers need to be flushed so that
243			 * kvmppc_handle_store() can read actual FP vals
244			 * from vcpu->arch.
245			 */
246			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
247				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
248						MSR_FP);
249
250			if (op.type & FPCONV)
251				vcpu->arch.mmio_sp64_extend = 1;
252
253			emulated = kvmppc_handle_store(vcpu,
254					kvmppc_get_fpr(vcpu, op.reg), size, 1);
255
256			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
257				kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
258
259			break;
260#endif
261#ifdef CONFIG_ALTIVEC
262		case STORE_VMX:
263			if (kvmppc_check_altivec_disabled(vcpu))
264				return EMULATE_DONE;
265
266			/* Hardware enforces alignment of VMX accesses. */
267			vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
268			vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
269
270			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
271				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
272						MSR_VEC);
273			if (size == 16) { /* stvx */
274				vcpu->arch.mmio_copy_type =
275						KVMPPC_VMX_COPY_DWORD;
276			} else if (size == 4) { /* stvewx  */
277				vcpu->arch.mmio_copy_type =
278						KVMPPC_VMX_COPY_WORD;
279			} else if (size == 2) { /* stvehx  */
280				vcpu->arch.mmio_copy_type =
281						KVMPPC_VMX_COPY_HWORD;
282			} else if (size == 1) { /* stvebx  */
283				vcpu->arch.mmio_copy_type =
284						KVMPPC_VMX_COPY_BYTE;
285			} else
286				break;
287
288			vcpu->arch.mmio_vmx_offset =
289				(vcpu->arch.vaddr_accessed & 0xf)/size;
290
291			if (size == 16) {
292				vcpu->arch.mmio_vmx_copy_nums = 2;
293				emulated = kvmppc_handle_vmx_store(vcpu,
294						op.reg, 8, 1);
295			} else {
296				vcpu->arch.mmio_vmx_copy_nums = 1;
297				emulated = kvmppc_handle_vmx_store(vcpu,
298						op.reg, size, 1);
299			}
300
301			break;
302#endif
303#ifdef CONFIG_VSX
304		case STORE_VSX: {
305			int io_size_each;
306
307			if (op.vsx_flags & VSX_CHECK_VEC) {
308				if (kvmppc_check_altivec_disabled(vcpu))
309					return EMULATE_DONE;
310			} else {
311				if (kvmppc_check_vsx_disabled(vcpu))
312					return EMULATE_DONE;
313			}
314
315			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
316				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
317						MSR_VSX);
318
319			if (op.vsx_flags & VSX_FPCONV)
320				vcpu->arch.mmio_sp64_extend = 1;
321
322			if (op.element_size == 8)
323				vcpu->arch.mmio_copy_type =
324						KVMPPC_VSX_COPY_DWORD;
325			else if (op.element_size == 4)
326				vcpu->arch.mmio_copy_type =
327						KVMPPC_VSX_COPY_WORD;
328			else
329				break;
330
331			if (size < op.element_size) {
332				/* precise conversion case, like stxsspx */
333				vcpu->arch.mmio_vsx_copy_nums = 1;
334				io_size_each = size;
335			} else { /* stxvw4x, stxvd2x, etc */
336				vcpu->arch.mmio_vsx_copy_nums =
337						size/op.element_size;
338				io_size_each = op.element_size;
339			}
340
341			emulated = kvmppc_handle_vsx_store(vcpu,
342					op.reg, io_size_each, 1);
343			break;
344		}
345#endif
346		case CACHEOP:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347			/* Do nothing. The guest is performing dcbi because
348			 * hardware DMA is not snooped by the dcache, but
349			 * emulated DMA either goes through the dcache as
350			 * normal writes, or the host kernel has handled dcache
351			 * coherence.
352			 */
353			emulated = EMULATE_DONE;
 
 
354			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355		default:
 
356			break;
357		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358	}
359
360	trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
361
362	/* Advance past emulated instruction. */
363	if (emulated != EMULATE_FAIL)
364		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst));
365
366	return emulated;
367}
v4.6
 
  1/*
  2 * This program is free software; you can redistribute it and/or modify
  3 * it under the terms of the GNU General Public License, version 2, as
  4 * published by the Free Software Foundation.
  5 *
  6 * This program is distributed in the hope that it will be useful,
  7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  9 * GNU General Public License for more details.
 10 *
 11 * You should have received a copy of the GNU General Public License
 12 * along with this program; if not, write to the Free Software
 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 14 *
 15 * Copyright IBM Corp. 2007
 16 * Copyright 2011 Freescale Semiconductor, Inc.
 17 *
 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 19 */
 20
 21#include <linux/jiffies.h>
 22#include <linux/hrtimer.h>
 23#include <linux/types.h>
 24#include <linux/string.h>
 25#include <linux/kvm_host.h>
 26#include <linux/clockchips.h>
 27
 28#include <asm/reg.h>
 29#include <asm/time.h>
 30#include <asm/byteorder.h>
 31#include <asm/kvm_ppc.h>
 32#include <asm/disassemble.h>
 33#include <asm/ppc-opcode.h>
 
 34#include "timing.h"
 35#include "trace.h"
 36
 37/* XXX to do:
 38 * lhax
 39 * lhaux
 40 * lswx
 41 * lswi
 42 * stswx
 43 * stswi
 44 * lha
 45 * lhau
 46 * lmw
 47 * stmw
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48 *
 
 
 
 49 */
 50int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
 51{
 52	struct kvm_run *run = vcpu->run;
 53	u32 inst;
 54	int ra, rs, rt;
 55	enum emulation_result emulated;
 56	int advance = 1;
 57
 58	/* this default type might be overwritten by subcategories */
 59	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
 60
 61	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
 62	if (emulated != EMULATE_DONE)
 63		return emulated;
 64
 65	ra = get_ra(inst);
 66	rs = get_rs(inst);
 67	rt = get_rt(inst);
 68
 69	switch (get_op(inst)) {
 70	case 31:
 71		switch (get_xop(inst)) {
 72		case OP_31_XOP_LWZX:
 73			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
 74			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75
 76		case OP_31_XOP_LBZX:
 77			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
 78			break;
 79
 80		case OP_31_XOP_LBZUX:
 81			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
 82			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 83			break;
 84
 85		case OP_31_XOP_STWX:
 86			emulated = kvmppc_handle_store(run, vcpu,
 87						       kvmppc_get_gpr(vcpu, rs),
 88			                               4, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 89			break;
 
 
 
 
 90
 91		case OP_31_XOP_STBX:
 92			emulated = kvmppc_handle_store(run, vcpu,
 93						       kvmppc_get_gpr(vcpu, rs),
 94			                               1, 1);
 95			break;
 96
 97		case OP_31_XOP_STBUX:
 98			emulated = kvmppc_handle_store(run, vcpu,
 99						       kvmppc_get_gpr(vcpu, rs),
100			                               1, 1);
101			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
102			break;
103
104		case OP_31_XOP_LHAX:
105			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
106			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
108		case OP_31_XOP_LHZX:
109			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
110			break;
111
112		case OP_31_XOP_LHZUX:
113			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
114			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
115			break;
116
117		case OP_31_XOP_STHX:
118			emulated = kvmppc_handle_store(run, vcpu,
119						       kvmppc_get_gpr(vcpu, rs),
120			                               2, 1);
121			break;
122
123		case OP_31_XOP_STHUX:
124			emulated = kvmppc_handle_store(run, vcpu,
125						       kvmppc_get_gpr(vcpu, rs),
126			                               2, 1);
127			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
128			break;
129
130		case OP_31_XOP_DCBST:
131		case OP_31_XOP_DCBF:
132		case OP_31_XOP_DCBI:
133			/* Do nothing. The guest is performing dcbi because
134			 * hardware DMA is not snooped by the dcache, but
135			 * emulated DMA either goes through the dcache as
136			 * normal writes, or the host kernel has handled dcache
137			 * coherence. */
138			break;
139
140		case OP_31_XOP_LWBRX:
141			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
142			break;
143
144		case OP_31_XOP_STWBRX:
145			emulated = kvmppc_handle_store(run, vcpu,
146						       kvmppc_get_gpr(vcpu, rs),
147			                               4, 0);
148			break;
149
150		case OP_31_XOP_LHBRX:
151			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
152			break;
153
154		case OP_31_XOP_STHBRX:
155			emulated = kvmppc_handle_store(run, vcpu,
156						       kvmppc_get_gpr(vcpu, rs),
157			                               2, 0);
158			break;
159
160		default:
161			emulated = EMULATE_FAIL;
162			break;
163		}
164		break;
165
166	case OP_LWZ:
167		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
168		break;
169
170	/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
171	case OP_LD:
172		rt = get_rt(inst);
173		emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
174		break;
175
176	case OP_LWZU:
177		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
178		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
179		break;
180
181	case OP_LBZ:
182		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
183		break;
184
185	case OP_LBZU:
186		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
187		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
188		break;
189
190	case OP_STW:
191		emulated = kvmppc_handle_store(run, vcpu,
192					       kvmppc_get_gpr(vcpu, rs),
193		                               4, 1);
194		break;
195
196	/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
197	case OP_STD:
198		rs = get_rs(inst);
199		emulated = kvmppc_handle_store(run, vcpu,
200					       kvmppc_get_gpr(vcpu, rs),
201		                               8, 1);
202		break;
203
204	case OP_STWU:
205		emulated = kvmppc_handle_store(run, vcpu,
206					       kvmppc_get_gpr(vcpu, rs),
207		                               4, 1);
208		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
209		break;
210
211	case OP_STB:
212		emulated = kvmppc_handle_store(run, vcpu,
213					       kvmppc_get_gpr(vcpu, rs),
214		                               1, 1);
215		break;
216
217	case OP_STBU:
218		emulated = kvmppc_handle_store(run, vcpu,
219					       kvmppc_get_gpr(vcpu, rs),
220		                               1, 1);
221		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
222		break;
223
224	case OP_LHZ:
225		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
226		break;
227
228	case OP_LHZU:
229		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
230		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
231		break;
232
233	case OP_LHA:
234		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
235		break;
236
237	case OP_LHAU:
238		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
239		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
240		break;
241
242	case OP_STH:
243		emulated = kvmppc_handle_store(run, vcpu,
244					       kvmppc_get_gpr(vcpu, rs),
245		                               2, 1);
246		break;
247
248	case OP_STHU:
249		emulated = kvmppc_handle_store(run, vcpu,
250					       kvmppc_get_gpr(vcpu, rs),
251		                               2, 1);
252		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
253		break;
254
255	default:
256		emulated = EMULATE_FAIL;
257		break;
258	}
259
260	if (emulated == EMULATE_FAIL) {
261		advance = 0;
262		kvmppc_core_queue_program(vcpu, 0);
263	}
264
265	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
266
267	/* Advance past emulated instruction. */
268	if (advance)
269		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
270
271	return emulated;
272}