Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 */
8
9#ifndef __POWERPC_KVM_PPC_H__
10#define __POWERPC_KVM_PPC_H__
11
12/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
13 * dependencies. */
14
15#include <linux/mutex.h>
16#include <linux/timer.h>
17#include <linux/types.h>
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
20#include <linux/bug.h>
21#ifdef CONFIG_PPC_BOOK3S
22#include <asm/kvm_book3s.h>
23#else
24#include <asm/kvm_booke.h>
25#endif
26#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
27#include <asm/paca.h>
28#include <asm/xive.h>
29#include <asm/cpu_has_feature.h>
30#endif
31#include <asm/inst.h>
32
33/*
34 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
35 * for supporting software breakpoint.
36 */
37#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
38
39enum emulation_result {
40 EMULATE_DONE, /* no further processing */
41 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
42 EMULATE_FAIL, /* can't emulate this instruction */
43 EMULATE_AGAIN, /* something went wrong. go again */
44 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
45};
46
47enum instruction_fetch_type {
48 INST_GENERIC,
49 INST_SC, /* system call */
50};
51
52enum xlate_instdata {
53 XLATE_INST, /* translate instruction address */
54 XLATE_DATA /* translate data address */
55};
56
57enum xlate_readwrite {
58 XLATE_READ, /* check for read permissions */
59 XLATE_WRITE /* check for write permissions */
60};
61
62extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
63extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
64extern void kvmppc_handler_highmem(void);
65
66extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
67extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
68 unsigned int rt, unsigned int bytes,
69 int is_default_endian);
70extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
71 unsigned int rt, unsigned int bytes,
72 int is_default_endian);
73extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
74 unsigned int rt, unsigned int bytes,
75 int is_default_endian, int mmio_sign_extend);
76extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
77 unsigned int rt, unsigned int bytes, int is_default_endian);
78extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
79 unsigned int rs, unsigned int bytes, int is_default_endian);
80extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
81 u64 val, unsigned int bytes,
82 int is_default_endian);
83extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
84 int rs, unsigned int bytes,
85 int is_default_endian);
86
87extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
88 enum instruction_fetch_type type,
89 unsigned long *inst);
90
91extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92 bool data);
93extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
94 bool data);
95extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
96extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
97extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
98extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
99extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
100extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
101extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
102extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
103extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
104
105/* Core-specific hooks */
106
107extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
108 unsigned int gtlb_idx);
109extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
110extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
111extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
112extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
113 gva_t eaddr);
114extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
115extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
116extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
117 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
118 struct kvmppc_pte *pte);
119
120extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
121extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
122extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
123extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
124 struct kvm_translation *tr);
125
126extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
127extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
128
129extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
130extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
131
132extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu,
133 ulong srr1_flags);
134extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
135extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu,
136 ulong srr1_flags);
137extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu,
138 ulong srr1_flags);
139extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu,
140 ulong srr1_flags);
141extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu,
142 ulong srr1_flags);
143extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
144extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
145extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
146 struct kvm_interrupt *irq);
147extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
148extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
149 ulong dear_flags,
150 ulong esr_flags);
151extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
152 ulong srr1_flags,
153 ulong dar,
154 ulong dsisr);
155extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
156extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
157 ulong srr1_flags);
158
159extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
160extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
161
162extern int kvmppc_booke_init(void);
163extern void kvmppc_booke_exit(void);
164
165extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
166extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
167
168extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
169extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
170extern int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
171extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
172extern void kvmppc_rmap_reset(struct kvm *kvm);
173extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
174 struct kvm_memory_slot *memslot, unsigned long porder);
175extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
176extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
177 struct iommu_group *grp);
178extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
179 struct iommu_group *grp);
180extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
181extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
182extern void kvmppc_setup_partition_table(struct kvm *kvm);
183
184extern int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
185 struct kvm_create_spapr_tce_64 *args);
186#define kvmppc_ioba_validate(stt, ioba, npages) \
187 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
188 (stt)->size, (ioba), (npages)) ? \
189 H_PARAMETER : H_SUCCESS)
190extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
191 unsigned long ioba, unsigned long tce);
192extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
193 unsigned long liobn, unsigned long ioba,
194 unsigned long tce_list, unsigned long npages);
195extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
196 unsigned long liobn, unsigned long ioba,
197 unsigned long tce_value, unsigned long npages);
198extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
199 unsigned long ioba);
200extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
201extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
202extern int kvmppc_core_init_vm(struct kvm *kvm);
203extern void kvmppc_core_destroy_vm(struct kvm *kvm);
204extern void kvmppc_core_free_memslot(struct kvm *kvm,
205 struct kvm_memory_slot *slot);
206extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
207 const struct kvm_memory_slot *old,
208 struct kvm_memory_slot *new,
209 enum kvm_mr_change change);
210extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
211 struct kvm_memory_slot *old,
212 const struct kvm_memory_slot *new,
213 enum kvm_mr_change change);
214extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
215 struct kvm_ppc_smmu_info *info);
216extern void kvmppc_core_flush_memslot(struct kvm *kvm,
217 struct kvm_memory_slot *memslot);
218
219extern int kvmppc_bookehv_init(void);
220extern void kvmppc_bookehv_exit(void);
221
222extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
223
224extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
225extern int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
226 struct kvm_ppc_resize_hpt *rhpt);
227extern int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
228 struct kvm_ppc_resize_hpt *rhpt);
229
230int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
231
232extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
233extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
234extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
235
236extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
237 u32 priority);
238extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
239 u32 *priority);
240extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
241extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
242
243void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
244void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
245
246union kvmppc_one_reg {
247 u32 wval;
248 u64 dval;
249 vector128 vval;
250 u64 vsxval[2];
251 u32 vsx32val[4];
252 u16 vsx16val[8];
253 u8 vsx8val[16];
254 struct {
255 u64 addr;
256 u64 length;
257 } vpaval;
258 u64 xive_timaval[2];
259};
260
261struct kvmppc_ops {
262 struct module *owner;
263 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
264 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
265 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
266 union kvmppc_one_reg *val);
267 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
268 union kvmppc_one_reg *val);
269 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
270 void (*vcpu_put)(struct kvm_vcpu *vcpu);
271 void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
272 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
273 int (*vcpu_run)(struct kvm_vcpu *vcpu);
274 int (*vcpu_create)(struct kvm_vcpu *vcpu);
275 void (*vcpu_free)(struct kvm_vcpu *vcpu);
276 int (*check_requests)(struct kvm_vcpu *vcpu);
277 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
278 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
279 int (*prepare_memory_region)(struct kvm *kvm,
280 const struct kvm_memory_slot *old,
281 struct kvm_memory_slot *new,
282 enum kvm_mr_change change);
283 void (*commit_memory_region)(struct kvm *kvm,
284 struct kvm_memory_slot *old,
285 const struct kvm_memory_slot *new,
286 enum kvm_mr_change change);
287 bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
288 bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
289 bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
290 bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
291 void (*free_memslot)(struct kvm_memory_slot *slot);
292 int (*init_vm)(struct kvm *kvm);
293 void (*destroy_vm)(struct kvm *kvm);
294 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
295 int (*emulate_op)(struct kvm_vcpu *vcpu,
296 unsigned int inst, int *advance);
297 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
298 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
299 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
300 int (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
301 unsigned long arg);
302 int (*hcall_implemented)(unsigned long hcall);
303 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
304 struct irq_bypass_producer *);
305 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
306 struct irq_bypass_producer *);
307 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
308 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
309 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
310 unsigned long flags);
311 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
312 int (*enable_nested)(struct kvm *kvm);
313 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
314 int size);
315 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
316 int size);
317 int (*enable_svm)(struct kvm *kvm);
318 int (*svm_off)(struct kvm *kvm);
319 int (*enable_dawr1)(struct kvm *kvm);
320 bool (*hash_v3_possible)(void);
321 int (*create_vm_debugfs)(struct kvm *kvm);
322 int (*create_vcpu_debugfs)(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
323};
324
325extern struct kvmppc_ops *kvmppc_hv_ops;
326extern struct kvmppc_ops *kvmppc_pr_ops;
327
328static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
329 enum instruction_fetch_type type, ppc_inst_t *inst)
330{
331 int ret = EMULATE_DONE;
332 u32 fetched_inst;
333
334 /* Load the instruction manually if it failed to do so in the
335 * exit path */
336 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
337 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
338
339 /* Write fetch_failed unswapped if the fetch failed */
340 if (ret != EMULATE_DONE) {
341 *inst = ppc_inst(KVM_INST_FETCH_FAILED);
342 return ret;
343 }
344
345#ifdef CONFIG_PPC64
346 /* Is this a prefixed instruction? */
347 if ((vcpu->arch.last_inst >> 32) != 0) {
348 u32 prefix = vcpu->arch.last_inst >> 32;
349 u32 suffix = vcpu->arch.last_inst;
350 if (kvmppc_need_byteswap(vcpu)) {
351 prefix = swab32(prefix);
352 suffix = swab32(suffix);
353 }
354 *inst = ppc_inst_prefix(prefix, suffix);
355 return EMULATE_DONE;
356 }
357#endif
358
359 fetched_inst = kvmppc_need_byteswap(vcpu) ?
360 swab32(vcpu->arch.last_inst) :
361 vcpu->arch.last_inst;
362 *inst = ppc_inst(fetched_inst);
363 return EMULATE_DONE;
364}
365
366static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
367{
368 return kvm->arch.kvm_ops == kvmppc_hv_ops;
369}
370
371extern int kvmppc_hwrng_present(void);
372
373/*
374 * Cuts out inst bits with ordering according to spec.
375 * That means the leftmost bit is zero. All given bits are included.
376 */
377static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
378{
379 u32 r;
380 u32 mask;
381
382 BUG_ON(msb > lsb);
383
384 mask = (1 << (lsb - msb + 1)) - 1;
385 r = (inst >> (63 - lsb)) & mask;
386
387 return r;
388}
389
390/*
391 * Replaces inst bits with ordering according to spec.
392 */
393static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
394{
395 u32 r;
396 u32 mask;
397
398 BUG_ON(msb > lsb);
399
400 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
401 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
402
403 return r;
404}
405
406#define one_reg_size(id) \
407 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
408
409#define get_reg_val(id, reg) ({ \
410 union kvmppc_one_reg __u; \
411 switch (one_reg_size(id)) { \
412 case 4: __u.wval = (reg); break; \
413 case 8: __u.dval = (reg); break; \
414 default: BUG(); \
415 } \
416 __u; \
417})
418
419
420#define set_reg_val(id, val) ({ \
421 u64 __v; \
422 switch (one_reg_size(id)) { \
423 case 4: __v = (val).wval; break; \
424 case 8: __v = (val).dval; break; \
425 default: BUG(); \
426 } \
427 __v; \
428})
429
430int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
431int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
432
433int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
434int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
435
436int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
437int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
438int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
439int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
440
441void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
442
443struct openpic;
444
445#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
446extern void kvm_cma_reserve(void) __init;
447static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
448{
449 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
450}
451
452static inline void kvmppc_set_xive_tima(int cpu,
453 unsigned long phys_addr,
454 void __iomem *virt_addr)
455{
456 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
457 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
458}
459
460static inline u32 kvmppc_get_xics_latch(void)
461{
462 u32 xirr;
463
464 xirr = get_paca()->kvm_hstate.saved_xirr;
465 get_paca()->kvm_hstate.saved_xirr = 0;
466 return xirr;
467}
468
469/*
470 * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
471 * a CPU thread that's running/napping inside of a guest is by default regarded
472 * as a request to wake the CPU (if needed) and continue execution within the
473 * guest, potentially to process new state like externally-generated
474 * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
475 *
476 * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
477 * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
478 * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
479 * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
480 * the receiving side prior to processing the IPI work.
481 *
482 * NOTE:
483 *
484 * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
485 * This is to guard against sequences such as the following:
486 *
487 * CPU
488 * X: smp_muxed_ipi_set_message():
489 * X: smp_mb()
490 * X: message[RESCHEDULE] = 1
491 * X: doorbell_global_ipi(42):
492 * X: kvmppc_set_host_ipi(42)
493 * X: ppc_msgsnd_sync()/smp_mb()
494 * X: ppc_msgsnd() -> 42
495 * 42: doorbell_exception(): // from CPU X
496 * 42: ppc_msgsync()
497 * 105: smp_muxed_ipi_set_message():
498 * 105: smb_mb()
499 * // STORE DEFERRED DUE TO RE-ORDERING
500 * --105: message[CALL_FUNCTION] = 1
501 * | 105: doorbell_global_ipi(42):
502 * | 105: kvmppc_set_host_ipi(42)
503 * | 42: kvmppc_clear_host_ipi(42)
504 * | 42: smp_ipi_demux_relaxed()
505 * | 42: // returns to executing guest
506 * | // RE-ORDERED STORE COMPLETES
507 * ->105: message[CALL_FUNCTION] = 1
508 * 105: ppc_msgsnd_sync()/smp_mb()
509 * 105: ppc_msgsnd() -> 42
510 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
511 * 105: // hangs waiting on 42 to process messages/call_single_queue
512 *
513 * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
514 * to guard against sequences such as the following (as well as to create
515 * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
516 *
517 * CPU
518 * X: smp_muxed_ipi_set_message():
519 * X: smp_mb()
520 * X: message[RESCHEDULE] = 1
521 * X: doorbell_global_ipi(42):
522 * X: kvmppc_set_host_ipi(42)
523 * X: ppc_msgsnd_sync()/smp_mb()
524 * X: ppc_msgsnd() -> 42
525 * 42: doorbell_exception(): // from CPU X
526 * 42: ppc_msgsync()
527 * // STORE DEFERRED DUE TO RE-ORDERING
528 * -- 42: kvmppc_clear_host_ipi(42)
529 * | 42: smp_ipi_demux_relaxed()
530 * | 105: smp_muxed_ipi_set_message():
531 * | 105: smb_mb()
532 * | 105: message[CALL_FUNCTION] = 1
533 * | 105: doorbell_global_ipi(42):
534 * | 105: kvmppc_set_host_ipi(42)
535 * | // RE-ORDERED STORE COMPLETES
536 * -> 42: kvmppc_clear_host_ipi(42)
537 * 42: // returns to executing guest
538 * 105: ppc_msgsnd_sync()/smp_mb()
539 * 105: ppc_msgsnd() -> 42
540 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
541 * 105: // hangs waiting on 42 to process messages/call_single_queue
542 */
543static inline void kvmppc_set_host_ipi(int cpu)
544{
545 /*
546 * order stores of IPI messages vs. setting of host_ipi flag
547 *
548 * pairs with the barrier in kvmppc_clear_host_ipi()
549 */
550 smp_mb();
551 WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 1);
552}
553
554static inline void kvmppc_clear_host_ipi(int cpu)
555{
556 WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 0);
557 /*
558 * order clearing of host_ipi flag vs. processing of IPI messages
559 *
560 * pairs with the barrier in kvmppc_set_host_ipi()
561 */
562 smp_mb();
563}
564
565static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
566{
567 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
568}
569
570extern void kvm_hv_vm_activated(void);
571extern void kvm_hv_vm_deactivated(void);
572extern bool kvm_hv_mode_active(void);
573
574extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu);
575
576#else
577static inline void __init kvm_cma_reserve(void)
578{}
579
580static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
581{}
582
583static inline void kvmppc_set_xive_tima(int cpu,
584 unsigned long phys_addr,
585 void __iomem *virt_addr)
586{}
587
588static inline u32 kvmppc_get_xics_latch(void)
589{
590 return 0;
591}
592
593static inline void kvmppc_set_host_ipi(int cpu)
594{}
595
596static inline void kvmppc_clear_host_ipi(int cpu)
597{}
598
599static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
600{
601 kvm_vcpu_kick(vcpu);
602}
603
604static inline bool kvm_hv_mode_active(void) { return false; }
605
606#endif
607
608#ifdef CONFIG_PPC_PSERIES
609static inline bool kvmhv_on_pseries(void)
610{
611 return !cpu_has_feature(CPU_FTR_HVMODE);
612}
613#else
614static inline bool kvmhv_on_pseries(void)
615{
616 return false;
617}
618
619#endif
620
621#ifndef CONFIG_PPC_BOOK3S
622
623static inline bool kvmhv_is_nestedv2(void)
624{
625 return false;
626}
627
628static inline bool kvmhv_is_nestedv1(void)
629{
630 return false;
631}
632
633static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
634 struct pt_regs *regs)
635{
636 return 0;
637}
638static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
639 struct pt_regs *regs)
640{
641 return 0;
642}
643
644static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
645{
646 return 0;
647}
648
649static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
650{
651 return 0;
652}
653
654#endif
655
656#ifdef CONFIG_KVM_XICS
657static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
658{
659 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
660}
661
662static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
663 struct kvm *kvm)
664{
665 if (kvm && kvm_irq_bypass)
666 return kvm->arch.pimap;
667 return NULL;
668}
669
670extern void kvmppc_alloc_host_rm_ops(void);
671extern void kvmppc_free_host_rm_ops(void);
672extern void kvmppc_free_pimap(struct kvm *kvm);
673extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
674extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
675extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
676extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
677extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
678extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
679extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
680 struct kvm_vcpu *vcpu, u32 cpu);
681extern void kvmppc_xics_ipi_action(void);
682extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
683 unsigned long host_irq);
684extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
685 unsigned long host_irq);
686extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
687 struct kvmppc_irq_map *irq_map,
688 struct kvmppc_passthru_irqmap *pimap,
689 bool *again);
690
691extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
692 int level, bool line_status);
693
694extern int h_ipi_redirect;
695#else
696static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
697 struct kvm *kvm)
698 { return NULL; }
699static inline void kvmppc_alloc_host_rm_ops(void) {}
700static inline void kvmppc_free_host_rm_ops(void) {}
701static inline void kvmppc_free_pimap(struct kvm *kvm) {}
702static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
703 { return 0; }
704static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
705 { return 0; }
706static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
707static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
708 { return 0; }
709static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
710 { return 0; }
711#endif
712
713#ifdef CONFIG_KVM_XIVE
714/*
715 * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
716 * ie. P9 new interrupt controller, while the second "xive" is the legacy
717 * "eXternal Interrupt Vector Entry" which is the configuration of an
718 * interrupt on the "xics" interrupt controller on P8 and earlier. Those
719 * two function consume or produce a legacy "XIVE" state from the
720 * new "XIVE" interrupt controller.
721 */
722extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
723 u32 priority);
724extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
725 u32 *priority);
726extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
727extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
728
729extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
730 struct kvm_vcpu *vcpu, u32 cpu);
731extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
732extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
733 unsigned long host_irq);
734extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
735 unsigned long host_irq);
736extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
737extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
738
739extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
740 int level, bool line_status);
741extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
742extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
743extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
744
745static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
746{
747 return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
748}
749
750extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
751 struct kvm_vcpu *vcpu, u32 cpu);
752extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
753extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
754 union kvmppc_one_reg *val);
755extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
756 union kvmppc_one_reg *val);
757extern bool kvmppc_xive_native_supported(void);
758
759#else
760static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
761 u32 priority) { return -1; }
762static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
763 u32 *priority) { return -1; }
764static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
765static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
766
767static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
768 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
769static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
770static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
771 struct irq_desc *host_desc) { return -ENODEV; }
772static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
773 struct irq_desc *host_desc) { return -ENODEV; }
774static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
775static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
776
777static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
778 int level, bool line_status) { return -ENODEV; }
779static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
780static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
781static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
782
783static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
784 { return 0; }
785static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
786 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
787static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
788static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
789 union kvmppc_one_reg *val)
790{ return 0; }
791static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
792 union kvmppc_one_reg *val)
793{ return -ENOENT; }
794
795#endif /* CONFIG_KVM_XIVE */
796
797#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
798static inline bool xics_on_xive(void)
799{
800 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
801}
802#else
803static inline bool xics_on_xive(void)
804{
805 return false;
806}
807#endif
808
809/*
810 * Prototypes for functions called only from assembler code.
811 * Having prototypes reduces sparse errors.
812 */
813long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
814 unsigned long ioba, unsigned long tce);
815long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
816 unsigned long liobn, unsigned long ioba,
817 unsigned long tce_list, unsigned long npages);
818long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
819 unsigned long liobn, unsigned long ioba,
820 unsigned long tce_value, unsigned long npages);
821long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
822 unsigned int yield_count);
823long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
824void kvmhv_commence_exit(int trap);
825void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
826void kvmppc_subcore_enter_guest(void);
827void kvmppc_subcore_exit_guest(void);
828long kvmppc_realmode_hmi_handler(void);
829long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
830long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
831 long pte_index, unsigned long pteh, unsigned long ptel);
832long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
833 unsigned long pte_index, unsigned long avpn);
834long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
835long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
836 unsigned long pte_index, unsigned long avpn);
837long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
838 unsigned long pte_index);
839long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
840 unsigned long pte_index);
841long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
842 unsigned long pte_index);
843long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
844 unsigned long dest, unsigned long src);
845long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
846 unsigned long slb_v, unsigned int status, bool data);
847void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
848
849/*
850 * Host-side operations we want to set up while running in real
851 * mode in the guest operating on the xics.
852 * Currently only VCPU wakeup is supported.
853 */
854
855union kvmppc_rm_state {
856 unsigned long raw;
857 struct {
858 u32 in_host;
859 u32 rm_action;
860 };
861};
862
863struct kvmppc_host_rm_core {
864 union kvmppc_rm_state rm_state;
865 void *rm_data;
866 char pad[112];
867};
868
869struct kvmppc_host_rm_ops {
870 struct kvmppc_host_rm_core *rm_core;
871 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
872};
873
874extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
875
876static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
877{
878#ifdef CONFIG_KVM_BOOKE_HV
879 return mfspr(SPRN_GEPR);
880#elif defined(CONFIG_BOOKE)
881 return vcpu->arch.epr;
882#else
883 return 0;
884#endif
885}
886
887static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
888{
889#ifdef CONFIG_KVM_BOOKE_HV
890 mtspr(SPRN_GEPR, epr);
891#elif defined(CONFIG_BOOKE)
892 vcpu->arch.epr = epr;
893#endif
894}
895
896#ifdef CONFIG_KVM_MPIC
897
898void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
899int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
900 u32 cpu);
901void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
902
903#else
904
905static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
906{
907}
908
909static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
910 struct kvm_vcpu *vcpu, u32 cpu)
911{
912 return -EINVAL;
913}
914
915static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
916 struct kvm_vcpu *vcpu)
917{
918}
919
920#endif /* CONFIG_KVM_MPIC */
921
922int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
923 struct kvm_config_tlb *cfg);
924int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
925 struct kvm_dirty_tlb *cfg);
926
927long kvmppc_alloc_lpid(void);
928void kvmppc_free_lpid(long lpid);
929void kvmppc_init_lpid(unsigned long nr_lpids);
930
931static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
932{
933 struct folio *folio;
934 /*
935 * We can only access pages that the kernel maps
936 * as memory. Bail out for unmapped ones.
937 */
938 if (!pfn_valid(pfn))
939 return;
940
941 /* Clear i-cache for new pages */
942 folio = page_folio(pfn_to_page(pfn));
943 if (!test_bit(PG_dcache_clean, &folio->flags)) {
944 flush_dcache_icache_folio(folio);
945 set_bit(PG_dcache_clean, &folio->flags);
946 }
947}
948
949/*
950 * Shared struct helpers. The shared struct can be little or big endian,
951 * depending on the guest endianness. So expose helpers to all of them.
952 */
953static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
954{
955#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
956 /* Only Book3S_64 PR supports bi-endian for now */
957 return vcpu->arch.shared_big_endian;
958#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
959 /* Book3s_64 HV on little endian is always little endian */
960 return false;
961#else
962 return true;
963#endif
964}
965
966#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \
967static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
968{ \
969 return mfspr(bookehv_spr); \
970} \
971
972#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \
973static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
974{ \
975 mtspr(bookehv_spr, val); \
976} \
977
978#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \
979static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
980{ \
981 if (iden) \
982 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
983 if (kvmppc_shared_big_endian(vcpu)) \
984 return be##size##_to_cpu((__be##size __force)vcpu->arch.shared->reg); \
985 else \
986 return le##size##_to_cpu((__le##size __force)vcpu->arch.shared->reg); \
987} \
988
989#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \
990static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
991{ \
992 if (kvmppc_shared_big_endian(vcpu)) \
993 vcpu->arch.shared->reg = (u##size __force)cpu_to_be##size(val); \
994 else \
995 vcpu->arch.shared->reg = (u##size __force)cpu_to_le##size(val); \
996 \
997 if (iden) \
998 kvmhv_nestedv2_mark_dirty(vcpu, iden); \
999} \
1000
1001#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \
1002 KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \
1003 KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \
1004
1005#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
1006 KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \
1007 KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \
1008
1009#ifdef CONFIG_KVM_BOOKE_HV
1010
1011#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \
1012 KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
1013
1014#else
1015
1016#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \
1017 KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \
1018
1019#endif
1020
1021KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64, 0)
1022KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0, KVMPPC_GSID_SPRG0)
1023KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1, KVMPPC_GSID_SPRG1)
1024KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2, KVMPPC_GSID_SPRG2)
1025KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3, KVMPPC_GSID_SPRG3)
1026KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0, KVMPPC_GSID_SRR0)
1027KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1, KVMPPC_GSID_SRR1)
1028KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR, KVMPPC_GSID_DAR)
1029KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR, 0)
1030KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64, KVMPPC_GSID_MSR)
1031static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
1032{
1033 if (kvmppc_shared_big_endian(vcpu))
1034 vcpu->arch.shared->msr = cpu_to_be64(val);
1035 else
1036 vcpu->arch.shared->msr = cpu_to_le64(val);
1037 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
1038}
1039KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32, KVMPPC_GSID_DSISR)
1040KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32, 0)
1041KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64, 0)
1042KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64, 0)
1043KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64, 0)
1044KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64, 0)
1045
1046static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
1047{
1048 if (kvmppc_shared_big_endian(vcpu))
1049 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
1050 else
1051 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
1052}
1053
1054static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
1055{
1056 if (kvmppc_shared_big_endian(vcpu))
1057 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
1058 else
1059 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
1060}
1061
1062/*
1063 * Please call after prepare_to_enter. This function puts the lazy ee and irq
1064 * disabled tracking state back to normal mode, without actually enabling
1065 * interrupts.
1066 */
1067static inline void kvmppc_fix_ee_before_entry(void)
1068{
1069 trace_hardirqs_on();
1070
1071#ifdef CONFIG_PPC64
1072 /*
1073 * To avoid races, the caller must have gone directly from having
1074 * interrupts fully-enabled to hard-disabled.
1075 */
1076 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1077
1078 /* Only need to enable IRQs by hard enabling them after this */
1079 local_paca->irq_happened = 0;
1080 irq_soft_mask_set(IRQS_ENABLED);
1081#endif
1082}
1083
1084static inline void kvmppc_fix_ee_after_exit(void)
1085{
1086#ifdef CONFIG_PPC64
1087 /* Only need to enable IRQs by hard enabling them after this */
1088 local_paca->irq_happened = PACA_IRQ_HARD_DIS;
1089 irq_soft_mask_set(IRQS_ALL_DISABLED);
1090#endif
1091
1092 trace_hardirqs_off();
1093}
1094
1095
1096static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1097{
1098 ulong ea;
1099 ulong msr_64bit = 0;
1100
1101 ea = kvmppc_get_gpr(vcpu, rb);
1102 if (ra)
1103 ea += kvmppc_get_gpr(vcpu, ra);
1104
1105#if defined(CONFIG_PPC_BOOK3E_64)
1106 msr_64bit = MSR_CM;
1107#elif defined(CONFIG_PPC_BOOK3S_64)
1108 msr_64bit = MSR_SF;
1109#endif
1110
1111 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
1112 ea = (uint32_t)ea;
1113
1114 return ea;
1115}
1116
1117extern void xics_wake_cpu(int cpu);
1118
1119#endif /* __POWERPC_KVM_PPC_H__ */
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_PPC_H__
21#define __POWERPC_KVM_PPC_H__
22
23/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24 * dependencies. */
25
26#include <linux/mutex.h>
27#include <linux/timer.h>
28#include <linux/types.h>
29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h>
31#include <linux/bug.h>
32#ifdef CONFIG_PPC_BOOK3S
33#include <asm/kvm_book3s.h>
34#else
35#include <asm/kvm_booke.h>
36#endif
37#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38#include <asm/paca.h>
39#endif
40
41/*
42 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43 * for supporting software breakpoint.
44 */
45#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
46
47enum emulation_result {
48 EMULATE_DONE, /* no further processing */
49 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
50 EMULATE_FAIL, /* can't emulate this instruction */
51 EMULATE_AGAIN, /* something went wrong. go again */
52 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
53};
54
55enum instruction_type {
56 INST_GENERIC,
57 INST_SC, /* system call */
58};
59
60enum xlate_instdata {
61 XLATE_INST, /* translate instruction address */
62 XLATE_DATA /* translate data address */
63};
64
65enum xlate_readwrite {
66 XLATE_READ, /* check for read permissions */
67 XLATE_WRITE /* check for write permissions */
68};
69
70extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
71extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
72extern void kvmppc_handler_highmem(void);
73
74extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes,
77 int is_default_endian);
78extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
79 unsigned int rt, unsigned int bytes,
80 int is_default_endian);
81extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 u64 val, unsigned int bytes,
83 int is_default_endian);
84
85extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
86 enum instruction_type type, u32 *inst);
87
88extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
89 bool data);
90extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
91 bool data);
92extern int kvmppc_emulate_instruction(struct kvm_run *run,
93 struct kvm_vcpu *vcpu);
94extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
95extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
96extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
97extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
98extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
99extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
100extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
102
103/* Core-specific hooks */
104
105extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
106 unsigned int gtlb_idx);
107extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
108extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
109extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
110extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
111extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
112extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
113extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
114 gva_t eaddr);
115extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
116extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
117extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
118 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
119 struct kvmppc_pte *pte);
120
121extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
122 unsigned int id);
123extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
124extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
125extern int kvmppc_core_check_processor_compat(void);
126extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
127 struct kvm_translation *tr);
128
129extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
130extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
131
132extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
133extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
134extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
135extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
136extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
137extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
138 struct kvm_interrupt *irq);
139extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
140extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
141 ulong esr_flags);
142extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
143 ulong dear_flags,
144 ulong esr_flags);
145extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
146extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
147 ulong esr_flags);
148extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
149extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
150
151extern int kvmppc_booke_init(void);
152extern void kvmppc_booke_exit(void);
153
154extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
155extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
156extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
157
158extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
159extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
160extern void kvmppc_free_hpt(struct kvm *kvm);
161extern long kvmppc_prepare_vrma(struct kvm *kvm,
162 struct kvm_userspace_memory_region *mem);
163extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
164 struct kvm_memory_slot *memslot, unsigned long porder);
165extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
166
167extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
168 struct kvm_create_spapr_tce_64 *args);
169extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
170 struct kvm_vcpu *vcpu, unsigned long liobn);
171extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
172 unsigned long ioba, unsigned long npages);
173extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
174 unsigned long tce);
175extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
176 unsigned long *ua, unsigned long **prmap);
177extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
178 unsigned long idx, unsigned long tce);
179extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
180 unsigned long ioba, unsigned long tce);
181extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
182 unsigned long liobn, unsigned long ioba,
183 unsigned long tce_list, unsigned long npages);
184extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
185 unsigned long liobn, unsigned long ioba,
186 unsigned long tce_value, unsigned long npages);
187extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
188 unsigned long ioba);
189extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
190extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
191extern int kvmppc_core_init_vm(struct kvm *kvm);
192extern void kvmppc_core_destroy_vm(struct kvm *kvm);
193extern void kvmppc_core_free_memslot(struct kvm *kvm,
194 struct kvm_memory_slot *free,
195 struct kvm_memory_slot *dont);
196extern int kvmppc_core_create_memslot(struct kvm *kvm,
197 struct kvm_memory_slot *slot,
198 unsigned long npages);
199extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
200 struct kvm_memory_slot *memslot,
201 const struct kvm_userspace_memory_region *mem);
202extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
203 const struct kvm_userspace_memory_region *mem,
204 const struct kvm_memory_slot *old,
205 const struct kvm_memory_slot *new);
206extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
207 struct kvm_ppc_smmu_info *info);
208extern void kvmppc_core_flush_memslot(struct kvm *kvm,
209 struct kvm_memory_slot *memslot);
210
211extern int kvmppc_bookehv_init(void);
212extern void kvmppc_bookehv_exit(void);
213
214extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
215
216extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
217
218int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
219
220extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
221extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
222extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
223extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
224 u32 priority);
225extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
226 u32 *priority);
227extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
228extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
229
230void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
231void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
232
233union kvmppc_one_reg {
234 u32 wval;
235 u64 dval;
236 vector128 vval;
237 u64 vsxval[2];
238 struct {
239 u64 addr;
240 u64 length;
241 } vpaval;
242};
243
244struct kvmppc_ops {
245 struct module *owner;
246 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
247 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
248 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
249 union kvmppc_one_reg *val);
250 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
251 union kvmppc_one_reg *val);
252 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
253 void (*vcpu_put)(struct kvm_vcpu *vcpu);
254 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
255 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
256 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
257 void (*vcpu_free)(struct kvm_vcpu *vcpu);
258 int (*check_requests)(struct kvm_vcpu *vcpu);
259 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
260 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
261 int (*prepare_memory_region)(struct kvm *kvm,
262 struct kvm_memory_slot *memslot,
263 const struct kvm_userspace_memory_region *mem);
264 void (*commit_memory_region)(struct kvm *kvm,
265 const struct kvm_userspace_memory_region *mem,
266 const struct kvm_memory_slot *old,
267 const struct kvm_memory_slot *new);
268 int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
269 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
270 unsigned long end);
271 int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
272 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
273 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
274 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
275 void (*free_memslot)(struct kvm_memory_slot *free,
276 struct kvm_memory_slot *dont);
277 int (*create_memslot)(struct kvm_memory_slot *slot,
278 unsigned long npages);
279 int (*init_vm)(struct kvm *kvm);
280 void (*destroy_vm)(struct kvm *kvm);
281 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
282 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
283 unsigned int inst, int *advance);
284 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
285 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
286 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
287 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
288 unsigned long arg);
289 int (*hcall_implemented)(unsigned long hcall);
290};
291
292extern struct kvmppc_ops *kvmppc_hv_ops;
293extern struct kvmppc_ops *kvmppc_pr_ops;
294
295static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
296 enum instruction_type type, u32 *inst)
297{
298 int ret = EMULATE_DONE;
299 u32 fetched_inst;
300
301 /* Load the instruction manually if it failed to do so in the
302 * exit path */
303 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
304 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
305
306 /* Write fetch_failed unswapped if the fetch failed */
307 if (ret == EMULATE_DONE)
308 fetched_inst = kvmppc_need_byteswap(vcpu) ?
309 swab32(vcpu->arch.last_inst) :
310 vcpu->arch.last_inst;
311 else
312 fetched_inst = vcpu->arch.last_inst;
313
314 *inst = fetched_inst;
315 return ret;
316}
317
318static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
319{
320 return kvm->arch.kvm_ops == kvmppc_hv_ops;
321}
322
323extern int kvmppc_hwrng_present(void);
324
325/*
326 * Cuts out inst bits with ordering according to spec.
327 * That means the leftmost bit is zero. All given bits are included.
328 */
329static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
330{
331 u32 r;
332 u32 mask;
333
334 BUG_ON(msb > lsb);
335
336 mask = (1 << (lsb - msb + 1)) - 1;
337 r = (inst >> (63 - lsb)) & mask;
338
339 return r;
340}
341
342/*
343 * Replaces inst bits with ordering according to spec.
344 */
345static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
346{
347 u32 r;
348 u32 mask;
349
350 BUG_ON(msb > lsb);
351
352 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
353 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
354
355 return r;
356}
357
358#define one_reg_size(id) \
359 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
360
361#define get_reg_val(id, reg) ({ \
362 union kvmppc_one_reg __u; \
363 switch (one_reg_size(id)) { \
364 case 4: __u.wval = (reg); break; \
365 case 8: __u.dval = (reg); break; \
366 default: BUG(); \
367 } \
368 __u; \
369})
370
371
372#define set_reg_val(id, val) ({ \
373 u64 __v; \
374 switch (one_reg_size(id)) { \
375 case 4: __v = (val).wval; break; \
376 case 8: __v = (val).dval; break; \
377 default: BUG(); \
378 } \
379 __v; \
380})
381
382int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
383int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
384
385int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
386int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
387
388int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
389int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
390int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
391int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
392
393void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
394
395struct openpic;
396
397#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
398extern void kvm_cma_reserve(void) __init;
399static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
400{
401 paca[cpu].kvm_hstate.xics_phys = addr;
402}
403
404static inline u32 kvmppc_get_xics_latch(void)
405{
406 u32 xirr;
407
408 xirr = get_paca()->kvm_hstate.saved_xirr;
409 get_paca()->kvm_hstate.saved_xirr = 0;
410 return xirr;
411}
412
413static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
414{
415 paca[cpu].kvm_hstate.host_ipi = host_ipi;
416}
417
418static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
419{
420 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
421}
422
423extern void kvm_hv_vm_activated(void);
424extern void kvm_hv_vm_deactivated(void);
425extern bool kvm_hv_mode_active(void);
426
427#else
428static inline void __init kvm_cma_reserve(void)
429{}
430
431static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
432{}
433
434static inline u32 kvmppc_get_xics_latch(void)
435{
436 return 0;
437}
438
439static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
440{}
441
442static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
443{
444 kvm_vcpu_kick(vcpu);
445}
446
447static inline bool kvm_hv_mode_active(void) { return false; }
448
449#endif
450
451#ifdef CONFIG_KVM_XICS
452static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
453{
454 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
455}
456extern void kvmppc_alloc_host_rm_ops(void);
457extern void kvmppc_free_host_rm_ops(void);
458extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
459extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
460extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
461extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
462extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
463extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
464extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
465 struct kvm_vcpu *vcpu, u32 cpu);
466extern void kvmppc_xics_ipi_action(void);
467extern int h_ipi_redirect;
468#else
469static inline void kvmppc_alloc_host_rm_ops(void) {};
470static inline void kvmppc_free_host_rm_ops(void) {};
471static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
472 { return 0; }
473static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
474static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
475 unsigned long server)
476 { return -EINVAL; }
477static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
478 struct kvm_irq_level *args)
479 { return -ENOTTY; }
480static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
481 { return 0; }
482#endif
483
484/*
485 * Host-side operations we want to set up while running in real
486 * mode in the guest operating on the xics.
487 * Currently only VCPU wakeup is supported.
488 */
489
490union kvmppc_rm_state {
491 unsigned long raw;
492 struct {
493 u32 in_host;
494 u32 rm_action;
495 };
496};
497
498struct kvmppc_host_rm_core {
499 union kvmppc_rm_state rm_state;
500 void *rm_data;
501 char pad[112];
502};
503
504struct kvmppc_host_rm_ops {
505 struct kvmppc_host_rm_core *rm_core;
506 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
507};
508
509extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
510
511static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
512{
513#ifdef CONFIG_KVM_BOOKE_HV
514 return mfspr(SPRN_GEPR);
515#elif defined(CONFIG_BOOKE)
516 return vcpu->arch.epr;
517#else
518 return 0;
519#endif
520}
521
522static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
523{
524#ifdef CONFIG_KVM_BOOKE_HV
525 mtspr(SPRN_GEPR, epr);
526#elif defined(CONFIG_BOOKE)
527 vcpu->arch.epr = epr;
528#endif
529}
530
531#ifdef CONFIG_KVM_MPIC
532
533void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
534int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
535 u32 cpu);
536void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
537
538#else
539
540static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
541{
542}
543
544static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
545 struct kvm_vcpu *vcpu, u32 cpu)
546{
547 return -EINVAL;
548}
549
550static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
551 struct kvm_vcpu *vcpu)
552{
553}
554
555#endif /* CONFIG_KVM_MPIC */
556
557int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
558 struct kvm_config_tlb *cfg);
559int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
560 struct kvm_dirty_tlb *cfg);
561
562long kvmppc_alloc_lpid(void);
563void kvmppc_claim_lpid(long lpid);
564void kvmppc_free_lpid(long lpid);
565void kvmppc_init_lpid(unsigned long nr_lpids);
566
567static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
568{
569 struct page *page;
570 /*
571 * We can only access pages that the kernel maps
572 * as memory. Bail out for unmapped ones.
573 */
574 if (!pfn_valid(pfn))
575 return;
576
577 /* Clear i-cache for new pages */
578 page = pfn_to_page(pfn);
579 if (!test_bit(PG_arch_1, &page->flags)) {
580 flush_dcache_icache_page(page);
581 set_bit(PG_arch_1, &page->flags);
582 }
583}
584
585/*
586 * Shared struct helpers. The shared struct can be little or big endian,
587 * depending on the guest endianness. So expose helpers to all of them.
588 */
589static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
590{
591#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
592 /* Only Book3S_64 PR supports bi-endian for now */
593 return vcpu->arch.shared_big_endian;
594#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
595 /* Book3s_64 HV on little endian is always little endian */
596 return false;
597#else
598 return true;
599#endif
600}
601
602#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
603static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
604{ \
605 return mfspr(bookehv_spr); \
606} \
607
608#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
609static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
610{ \
611 mtspr(bookehv_spr, val); \
612} \
613
614#define SHARED_WRAPPER_GET(reg, size) \
615static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
616{ \
617 if (kvmppc_shared_big_endian(vcpu)) \
618 return be##size##_to_cpu(vcpu->arch.shared->reg); \
619 else \
620 return le##size##_to_cpu(vcpu->arch.shared->reg); \
621} \
622
623#define SHARED_WRAPPER_SET(reg, size) \
624static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
625{ \
626 if (kvmppc_shared_big_endian(vcpu)) \
627 vcpu->arch.shared->reg = cpu_to_be##size(val); \
628 else \
629 vcpu->arch.shared->reg = cpu_to_le##size(val); \
630} \
631
632#define SHARED_WRAPPER(reg, size) \
633 SHARED_WRAPPER_GET(reg, size) \
634 SHARED_WRAPPER_SET(reg, size) \
635
636#define SPRNG_WRAPPER(reg, bookehv_spr) \
637 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
638 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
639
640#ifdef CONFIG_KVM_BOOKE_HV
641
642#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
643 SPRNG_WRAPPER(reg, bookehv_spr) \
644
645#else
646
647#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
648 SHARED_WRAPPER(reg, size) \
649
650#endif
651
652SHARED_WRAPPER(critical, 64)
653SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
654SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
655SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
656SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
657SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
658SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
659SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
660SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
661SHARED_WRAPPER_GET(msr, 64)
662static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
663{
664 if (kvmppc_shared_big_endian(vcpu))
665 vcpu->arch.shared->msr = cpu_to_be64(val);
666 else
667 vcpu->arch.shared->msr = cpu_to_le64(val);
668}
669SHARED_WRAPPER(dsisr, 32)
670SHARED_WRAPPER(int_pending, 32)
671SHARED_WRAPPER(sprg4, 64)
672SHARED_WRAPPER(sprg5, 64)
673SHARED_WRAPPER(sprg6, 64)
674SHARED_WRAPPER(sprg7, 64)
675
676static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
677{
678 if (kvmppc_shared_big_endian(vcpu))
679 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
680 else
681 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
682}
683
684static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
685{
686 if (kvmppc_shared_big_endian(vcpu))
687 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
688 else
689 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
690}
691
692/*
693 * Please call after prepare_to_enter. This function puts the lazy ee and irq
694 * disabled tracking state back to normal mode, without actually enabling
695 * interrupts.
696 */
697static inline void kvmppc_fix_ee_before_entry(void)
698{
699 trace_hardirqs_on();
700
701#ifdef CONFIG_PPC64
702 /*
703 * To avoid races, the caller must have gone directly from having
704 * interrupts fully-enabled to hard-disabled.
705 */
706 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
707
708 /* Only need to enable IRQs by hard enabling them after this */
709 local_paca->irq_happened = 0;
710 local_paca->soft_enabled = 1;
711#endif
712}
713
714static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
715{
716 ulong ea;
717 ulong msr_64bit = 0;
718
719 ea = kvmppc_get_gpr(vcpu, rb);
720 if (ra)
721 ea += kvmppc_get_gpr(vcpu, ra);
722
723#if defined(CONFIG_PPC_BOOK3E_64)
724 msr_64bit = MSR_CM;
725#elif defined(CONFIG_PPC_BOOK3S_64)
726 msr_64bit = MSR_SF;
727#endif
728
729 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
730 ea = (uint32_t)ea;
731
732 return ea;
733}
734
735extern void xics_wake_cpu(int cpu);
736
737#endif /* __POWERPC_KVM_PPC_H__ */