Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 * process.c: handle interruption inject for guests.
   3 * Copyright (c) 2005, Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16 * Place - Suite 330, Boston, MA 02111-1307 USA.
  17 *
  18 *  	Shaofan Li (Susue Li) <susie.li@intel.com>
  19 *  	Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
  20 *  	Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
  21 *  	Xiantao Zhang (xiantao.zhang@intel.com)
  22 */
  23#include "vcpu.h"
  24
  25#include <asm/pal.h>
  26#include <asm/sal.h>
  27#include <asm/fpswa.h>
  28#include <asm/kregs.h>
  29#include <asm/tlb.h>
  30
  31fpswa_interface_t *vmm_fpswa_interface;
  32
  33#define IA64_VHPT_TRANS_VECTOR			0x0000
  34#define IA64_INST_TLB_VECTOR			0x0400
  35#define IA64_DATA_TLB_VECTOR			0x0800
  36#define IA64_ALT_INST_TLB_VECTOR		0x0c00
  37#define IA64_ALT_DATA_TLB_VECTOR		0x1000
  38#define IA64_DATA_NESTED_TLB_VECTOR		0x1400
  39#define IA64_INST_KEY_MISS_VECTOR		0x1800
  40#define IA64_DATA_KEY_MISS_VECTOR		0x1c00
  41#define IA64_DIRTY_BIT_VECTOR			0x2000
  42#define IA64_INST_ACCESS_BIT_VECTOR		0x2400
  43#define IA64_DATA_ACCESS_BIT_VECTOR		0x2800
  44#define IA64_BREAK_VECTOR			0x2c00
  45#define IA64_EXTINT_VECTOR			0x3000
  46#define IA64_PAGE_NOT_PRESENT_VECTOR		0x5000
  47#define IA64_KEY_PERMISSION_VECTOR		0x5100
  48#define IA64_INST_ACCESS_RIGHTS_VECTOR		0x5200
  49#define IA64_DATA_ACCESS_RIGHTS_VECTOR		0x5300
  50#define IA64_GENEX_VECTOR			0x5400
  51#define IA64_DISABLED_FPREG_VECTOR		0x5500
  52#define IA64_NAT_CONSUMPTION_VECTOR		0x5600
  53#define IA64_SPECULATION_VECTOR		0x5700 /* UNUSED */
  54#define IA64_DEBUG_VECTOR			0x5900
  55#define IA64_UNALIGNED_REF_VECTOR		0x5a00
  56#define IA64_UNSUPPORTED_DATA_REF_VECTOR	0x5b00
  57#define IA64_FP_FAULT_VECTOR			0x5c00
  58#define IA64_FP_TRAP_VECTOR			0x5d00
  59#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 	0x5e00
  60#define IA64_TAKEN_BRANCH_TRAP_VECTOR		0x5f00
  61#define IA64_SINGLE_STEP_TRAP_VECTOR		0x6000
  62
  63/* SDM vol2 5.5 - IVA based interruption handling */
  64#define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\
  65			IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT |    	\
  66			IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT)
  67
  68#define DOMN_PAL_REQUEST    0x110000
  69#define DOMN_SAL_REQUEST    0x110001
  70
  71static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800,
  72	0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00,
  73	0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400,
  74	0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00,
  75	0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600,
  76	0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00,
  77	0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800,
  78	0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00
  79};
  80
  81static void collect_interruption(struct kvm_vcpu *vcpu)
  82{
  83	u64 ipsr;
  84	u64 vdcr;
  85	u64 vifs;
  86	unsigned long vpsr;
  87	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
  88
  89	vpsr = vcpu_get_psr(vcpu);
  90	vcpu_bsw0(vcpu);
  91	if (vpsr & IA64_PSR_IC) {
  92
  93		/* Sync mpsr id/da/dd/ss/ed bits to vipsr
  94		 * since after guest do rfi, we still want these bits on in
  95		 * mpsr
  96		 */
  97
  98		ipsr = regs->cr_ipsr;
  99		vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
 100					| IA64_PSR_DD | IA64_PSR_SS
 101					| IA64_PSR_ED));
 102		vcpu_set_ipsr(vcpu, vpsr);
 103
 104		/* Currently, for trap, we do not advance IIP to next
 105		 * instruction. That's because we assume caller already
 106		 * set up IIP correctly
 107		 */
 108
 109		vcpu_set_iip(vcpu , regs->cr_iip);
 110
 111		/* set vifs.v to zero */
 112		vifs = VCPU(vcpu, ifs);
 113		vifs &= ~IA64_IFS_V;
 114		vcpu_set_ifs(vcpu, vifs);
 115
 116		vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa));
 117	}
 118
 119	vdcr = VCPU(vcpu, dcr);
 120
 121	/* Set guest psr
 122	 * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
 123	 * be: set to the value of dcr.be
 124	 * pp: set to the value of dcr.pp
 125	 */
 126	vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
 127	vpsr |= (vdcr & IA64_DCR_BE);
 128
 129	/* VDCR pp bit position is different from VPSR pp bit */
 130	if (vdcr & IA64_DCR_PP) {
 131		vpsr |= IA64_PSR_PP;
 132	} else {
 133		vpsr &= ~IA64_PSR_PP;
 134	}
 135
 136	vcpu_set_psr(vcpu, vpsr);
 137
 138}
 139
 140void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec)
 141{
 142	u64 viva;
 143	struct kvm_pt_regs *regs;
 144	union ia64_isr pt_isr;
 145
 146	regs = vcpu_regs(vcpu);
 147
 148	/* clear cr.isr.ir (incomplete register frame)*/
 149	pt_isr.val = VMX(vcpu, cr_isr);
 150	pt_isr.ir = 0;
 151	VMX(vcpu, cr_isr) = pt_isr.val;
 152
 153	collect_interruption(vcpu);
 154
 155	viva = vcpu_get_iva(vcpu);
 156	regs->cr_iip = viva + vec;
 157}
 158
 159static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
 160{
 161	union ia64_rr rr, rr1;
 162
 163	rr.val = vcpu_get_rr(vcpu, ifa);
 164	rr1.val = 0;
 165	rr1.ps = rr.ps;
 166	rr1.rid = rr.rid;
 167	return (rr1.val);
 168}
 169
 170/*
 171 * Set vIFA & vITIR & vIHA, when vPSR.ic =1
 172 * Parameter:
 173 *  set_ifa: if true, set vIFA
 174 *  set_itir: if true, set vITIR
 175 *  set_iha: if true, set vIHA
 176 */
 177void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr,
 178		int set_ifa, int set_itir, int set_iha)
 179{
 180	long vpsr;
 181	u64 value;
 182
 183	vpsr = VCPU(vcpu, vpsr);
 184	/* Vol2, Table 8-1 */
 185	if (vpsr & IA64_PSR_IC) {
 186		if (set_ifa)
 187			vcpu_set_ifa(vcpu, vadr);
 188		if (set_itir) {
 189			value = vcpu_get_itir_on_fault(vcpu, vadr);
 190			vcpu_set_itir(vcpu, value);
 191		}
 192
 193		if (set_iha) {
 194			value = vcpu_thash(vcpu, vadr);
 195			vcpu_set_iha(vcpu, value);
 196		}
 197	}
 198}
 199
 200/*
 201 * Data TLB Fault
 202 *  @ Data TLB vector
 203 * Refer to SDM Vol2 Table 5-6 & 8-1
 204 */
 205void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
 206{
 207	/* If vPSR.ic, IFA, ITIR, IHA */
 208	set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
 209	inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
 210}
 211
 212/*
 213 * Instruction TLB Fault
 214 *  @ Instruction TLB vector
 215 * Refer to SDM Vol2 Table 5-6 & 8-1
 216 */
 217void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
 218{
 219	/* If vPSR.ic, IFA, ITIR, IHA */
 220	set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
 221	inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
 222}
 223
 224/*
 225 * Data Nested TLB Fault
 226 *  @ Data Nested TLB Vector
 227 * Refer to SDM Vol2 Table 5-6 & 8-1
 228 */
 229void nested_dtlb(struct kvm_vcpu *vcpu)
 230{
 231	inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
 232}
 233
 234/*
 235 * Alternate Data TLB Fault
 236 *  @ Alternate Data TLB vector
 237 * Refer to SDM Vol2 Table 5-6 & 8-1
 238 */
 239void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
 240{
 241	set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
 242	inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
 243}
 244
 245/*
 246 * Data TLB Fault
 247 *  @ Data TLB vector
 248 * Refer to SDM Vol2 Table 5-6 & 8-1
 249 */
 250void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr)
 251{
 252	set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
 253	inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
 254}
 255
 256/* Deal with:
 257 *  VHPT Translation Vector
 258 */
 259static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
 260{
 261	/* If vPSR.ic, IFA, ITIR, IHA*/
 262	set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
 263	inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
 264}
 265
 266/*
 267 * VHPT Instruction Fault
 268 *  @ VHPT Translation vector
 269 * Refer to SDM Vol2 Table 5-6 & 8-1
 270 */
 271void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
 272{
 273	_vhpt_fault(vcpu, vadr);
 274}
 275
 276/*
 277 * VHPT Data Fault
 278 *  @ VHPT Translation vector
 279 * Refer to SDM Vol2 Table 5-6 & 8-1
 280 */
 281void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
 282{
 283	_vhpt_fault(vcpu, vadr);
 284}
 285
 286/*
 287 * Deal with:
 288 *  General Exception vector
 289 */
 290void _general_exception(struct kvm_vcpu *vcpu)
 291{
 292	inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
 293}
 294
 295/*
 296 * Illegal Operation Fault
 297 *  @ General Exception Vector
 298 * Refer to SDM Vol2 Table 5-6 & 8-1
 299 */
 300void illegal_op(struct kvm_vcpu *vcpu)
 301{
 302	_general_exception(vcpu);
 303}
 304
 305/*
 306 * Illegal Dependency Fault
 307 *  @ General Exception Vector
 308 * Refer to SDM Vol2 Table 5-6 & 8-1
 309 */
 310void illegal_dep(struct kvm_vcpu *vcpu)
 311{
 312	_general_exception(vcpu);
 313}
 314
 315/*
 316 * Reserved Register/Field Fault
 317 *  @ General Exception Vector
 318 * Refer to SDM Vol2 Table 5-6 & 8-1
 319 */
 320void rsv_reg_field(struct kvm_vcpu *vcpu)
 321{
 322	_general_exception(vcpu);
 323}
 324/*
 325 * Privileged Operation Fault
 326 *  @ General Exception Vector
 327 * Refer to SDM Vol2 Table 5-6 & 8-1
 328 */
 329
 330void privilege_op(struct kvm_vcpu *vcpu)
 331{
 332	_general_exception(vcpu);
 333}
 334
 335/*
 336 * Unimplement Data Address Fault
 337 *  @ General Exception Vector
 338 * Refer to SDM Vol2 Table 5-6 & 8-1
 339 */
 340void unimpl_daddr(struct kvm_vcpu *vcpu)
 341{
 342	_general_exception(vcpu);
 343}
 344
 345/*
 346 * Privileged Register Fault
 347 *  @ General Exception Vector
 348 * Refer to SDM Vol2 Table 5-6 & 8-1
 349 */
 350void privilege_reg(struct kvm_vcpu *vcpu)
 351{
 352	_general_exception(vcpu);
 353}
 354
 355/* Deal with
 356 *  Nat consumption vector
 357 * Parameter:
 358 *  vaddr: Optional, if t == REGISTER
 359 */
 360static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr,
 361						enum tlb_miss_type t)
 362{
 363	/* If vPSR.ic && t == DATA/INST, IFA */
 364	if (t == DATA || t == INSTRUCTION) {
 365		/* IFA */
 366		set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
 367	}
 368
 369	inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
 370}
 371
 372/*
 373 * Instruction Nat Page Consumption Fault
 374 *  @ Nat Consumption Vector
 375 * Refer to SDM Vol2 Table 5-6 & 8-1
 376 */
 377void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
 378{
 379	_nat_consumption_fault(vcpu, vadr, INSTRUCTION);
 380}
 381
 382/*
 383 * Register Nat Consumption Fault
 384 *  @ Nat Consumption Vector
 385 * Refer to SDM Vol2 Table 5-6 & 8-1
 386 */
 387void rnat_consumption(struct kvm_vcpu *vcpu)
 388{
 389	_nat_consumption_fault(vcpu, 0, REGISTER);
 390}
 391
 392/*
 393 * Data Nat Page Consumption Fault
 394 *  @ Nat Consumption Vector
 395 * Refer to SDM Vol2 Table 5-6 & 8-1
 396 */
 397void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
 398{
 399	_nat_consumption_fault(vcpu, vadr, DATA);
 400}
 401
 402/* Deal with
 403 *  Page not present vector
 404 */
 405static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
 406{
 407	/* If vPSR.ic, IFA, ITIR */
 408	set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
 409	inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
 410}
 411
 412void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
 413{
 414	__page_not_present(vcpu, vadr);
 415}
 416
 417void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
 418{
 419	__page_not_present(vcpu, vadr);
 420}
 421
 422/* Deal with
 423 *  Data access rights vector
 424 */
 425void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr)
 426{
 427	/* If vPSR.ic, IFA, ITIR */
 428	set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
 429	inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
 430}
 431
 432fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
 433		unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
 434		unsigned long *ifs, struct kvm_pt_regs *regs)
 435{
 436	fp_state_t fp_state;
 437	fpswa_ret_t ret;
 438	struct kvm_vcpu *vcpu = current_vcpu;
 439
 440	uint64_t old_rr7 = ia64_get_rr(7UL<<61);
 441
 442	if (!vmm_fpswa_interface)
 443		return (fpswa_ret_t) {-1, 0, 0, 0};
 444
 445	memset(&fp_state, 0, sizeof(fp_state_t));
 446
 447	/*
 448	 * compute fp_state.  only FP registers f6 - f11 are used by the
 449	 * vmm, so set those bits in the mask and set the low volatile
 450	 * pointer to point to these registers.
 451	 */
 452	fp_state.bitmask_low64 = 0xfc0;  /* bit6..bit11 */
 453
 454	fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
 455
 456   /*
 457	 * unsigned long (*EFI_FPSWA) (
 458	 *      unsigned long    trap_type,
 459	 *      void             *Bundle,
 460	 *      unsigned long    *pipsr,
 461	 *      unsigned long    *pfsr,
 462	 *      unsigned long    *pisr,
 463	 *      unsigned long    *ppreds,
 464	 *      unsigned long    *pifs,
 465	 *      void             *fp_state);
 466	 */
 467	/*Call host fpswa interface directly to virtualize
 468	 *guest fpswa request!
 469	 */
 470	ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]);
 471	ia64_srlz_d();
 472
 473	ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle,
 474			ipsr, fpsr, isr, pr, ifs, &fp_state);
 475	ia64_set_rr(7UL << 61, old_rr7);
 476	ia64_srlz_d();
 477	return ret;
 478}
 479
 480/*
 481 * Handle floating-point assist faults and traps for domain.
 482 */
 483unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs,
 484					unsigned long isr)
 485{
 486	struct kvm_vcpu *v = current_vcpu;
 487	IA64_BUNDLE bundle;
 488	unsigned long fault_ip;
 489	fpswa_ret_t ret;
 490
 491	fault_ip = regs->cr_iip;
 492	/*
 493	 * When the FP trap occurs, the trapping instruction is completed.
 494	 * If ipsr.ri == 0, there is the trapping instruction in previous
 495	 * bundle.
 496	 */
 497	if (!fp_fault && (ia64_psr(regs)->ri == 0))
 498		fault_ip -= 16;
 499
 500	if (fetch_code(v, fault_ip, &bundle))
 501		return -EAGAIN;
 502
 503	if (!bundle.i64[0] && !bundle.i64[1])
 504		return -EACCES;
 505
 506	ret = vmm_fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
 507			&isr, &regs->pr, &regs->cr_ifs, regs);
 508	return ret.status;
 509}
 510
 511void reflect_interruption(u64 ifa, u64 isr, u64 iim,
 512		u64 vec, struct kvm_pt_regs *regs)
 513{
 514	u64 vector;
 515	int status ;
 516	struct kvm_vcpu *vcpu = current_vcpu;
 517	u64 vpsr = VCPU(vcpu, vpsr);
 518
 519	vector = vec2off[vec];
 520
 521	if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
 522		panic_vm(vcpu, "Interruption with vector :0x%lx occurs "
 523						"with psr.ic = 0\n", vector);
 524		return;
 525	}
 526
 527	switch (vec) {
 528	case 32: 	/*IA64_FP_FAULT_VECTOR*/
 529		status = vmm_handle_fpu_swa(1, regs, isr);
 530		if (!status) {
 531			vcpu_increment_iip(vcpu);
 532			return;
 533		} else if (-EAGAIN == status)
 534			return;
 535		break;
 536	case 33:	/*IA64_FP_TRAP_VECTOR*/
 537		status = vmm_handle_fpu_swa(0, regs, isr);
 538		if (!status)
 539			return ;
 540		break;
 541	}
 542
 543	VCPU(vcpu, isr) = isr;
 544	VCPU(vcpu, iipa) = regs->cr_iip;
 545	if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
 546		VCPU(vcpu, iim) = iim;
 547	else
 548		set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
 549
 550	inject_guest_interruption(vcpu, vector);
 551}
 552
 553static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu,
 554						unsigned long arg)
 555{
 556	struct thash_data *data;
 557	unsigned long gpa, poff;
 558
 559	if (!is_physical_mode(vcpu)) {
 560		/* Depends on caller to provide the DTR or DTC mapping.*/
 561		data = vtlb_lookup(vcpu, arg, D_TLB);
 562		if (data)
 563			gpa = data->page_flags & _PAGE_PPN_MASK;
 564		else {
 565			data = vhpt_lookup(arg);
 566			if (!data)
 567				return 0;
 568			gpa = data->gpaddr & _PAGE_PPN_MASK;
 569		}
 570
 571		poff = arg & (PSIZE(data->ps) - 1);
 572		arg = PAGEALIGN(gpa, data->ps) | poff;
 573	}
 574	arg = kvm_gpa_to_mpa(arg << 1 >> 1);
 575
 576	return (unsigned long)__va(arg);
 577}
 578
 579static void set_pal_call_data(struct kvm_vcpu *vcpu)
 580{
 581	struct exit_ctl_data *p = &vcpu->arch.exit_data;
 582	unsigned long gr28 = vcpu_get_gr(vcpu, 28);
 583	unsigned long gr29 = vcpu_get_gr(vcpu, 29);
 584	unsigned long gr30 = vcpu_get_gr(vcpu, 30);
 585
 586	/*FIXME:For static and stacked convention, firmware
 587	 * has put the parameters in gr28-gr31 before
 588	 * break to vmm  !!*/
 589
 590	switch (gr28) {
 591	case PAL_PERF_MON_INFO:
 592	case PAL_HALT_INFO:
 593		p->u.pal_data.gr29 =  kvm_trans_pal_call_args(vcpu, gr29);
 594		p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
 595		break;
 596	case PAL_BRAND_INFO:
 597		p->u.pal_data.gr29 = gr29;
 598		p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
 599		break;
 600	default:
 601		p->u.pal_data.gr29 = gr29;
 602		p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
 603	}
 604	p->u.pal_data.gr28 = gr28;
 605	p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
 606
 607	p->exit_reason = EXIT_REASON_PAL_CALL;
 608}
 609
 610static void get_pal_call_result(struct kvm_vcpu *vcpu)
 611{
 612	struct exit_ctl_data *p = &vcpu->arch.exit_data;
 613
 614	if (p->exit_reason == EXIT_REASON_PAL_CALL) {
 615		vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0);
 616		vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0);
 617		vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
 618		vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
 619	} else
 620		panic_vm(vcpu, "Mis-set for exit reason!\n");
 621}
 622
 623static void set_sal_call_data(struct kvm_vcpu *vcpu)
 624{
 625	struct exit_ctl_data *p = &vcpu->arch.exit_data;
 626
 627	p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32);
 628	p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33);
 629	p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34);
 630	p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35);
 631	p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36);
 632	p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37);
 633	p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38);
 634	p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39);
 635	p->exit_reason = EXIT_REASON_SAL_CALL;
 636}
 637
 638static void get_sal_call_result(struct kvm_vcpu *vcpu)
 639{
 640	struct exit_ctl_data *p = &vcpu->arch.exit_data;
 641
 642	if (p->exit_reason == EXIT_REASON_SAL_CALL) {
 643		vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0);
 644		vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0);
 645		vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
 646		vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
 647	} else
 648		panic_vm(vcpu, "Mis-set for exit reason!\n");
 649}
 650
 651void  kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
 652		unsigned long isr, unsigned long iim)
 653{
 654	struct kvm_vcpu *v = current_vcpu;
 655	long psr;
 656
 657	if (ia64_psr(regs)->cpl == 0) {
 658		/* Allow hypercalls only when cpl = 0.  */
 659		if (iim == DOMN_PAL_REQUEST) {
 660			local_irq_save(psr);
 661			set_pal_call_data(v);
 662			vmm_transition(v);
 663			get_pal_call_result(v);
 664			vcpu_increment_iip(v);
 665			local_irq_restore(psr);
 666			return;
 667		} else if (iim == DOMN_SAL_REQUEST) {
 668			local_irq_save(psr);
 669			set_sal_call_data(v);
 670			vmm_transition(v);
 671			get_sal_call_result(v);
 672			vcpu_increment_iip(v);
 673			local_irq_restore(psr);
 674			return;
 675		}
 676	}
 677	reflect_interruption(ifa, isr, iim, 11, regs);
 678}
 679
 680void check_pending_irq(struct kvm_vcpu *vcpu)
 681{
 682	int  mask, h_pending, h_inservice;
 683	u64 isr;
 684	unsigned long  vpsr;
 685	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
 686
 687	h_pending = highest_pending_irq(vcpu);
 688	if (h_pending == NULL_VECTOR) {
 689		update_vhpi(vcpu, NULL_VECTOR);
 690		return;
 691	}
 692	h_inservice = highest_inservice_irq(vcpu);
 693
 694	vpsr = VCPU(vcpu, vpsr);
 695	mask = irq_masked(vcpu, h_pending, h_inservice);
 696	if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) {
 697		isr = vpsr & IA64_PSR_RI;
 698		update_vhpi(vcpu, h_pending);
 699		reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
 700	} else if (mask == IRQ_MASKED_BY_INSVC) {
 701		if (VCPU(vcpu, vhpi))
 702			update_vhpi(vcpu, NULL_VECTOR);
 703	} else {
 704		/* masked by vpsr.i or vtpr.*/
 705		update_vhpi(vcpu, h_pending);
 706	}
 707}
 708
 709static void generate_exirq(struct kvm_vcpu *vcpu)
 710{
 711	unsigned  vpsr;
 712	uint64_t isr;
 713
 714	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
 715
 716	vpsr = VCPU(vcpu, vpsr);
 717	isr = vpsr & IA64_PSR_RI;
 718	if (!(vpsr & IA64_PSR_IC))
 719		panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n");
 720	reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
 721}
 722
 723void vhpi_detection(struct kvm_vcpu *vcpu)
 724{
 725	uint64_t    threshold, vhpi;
 726	union ia64_tpr       vtpr;
 727	struct ia64_psr vpsr;
 728
 729	vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
 730	vtpr.val = VCPU(vcpu, tpr);
 731
 732	threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
 733	vhpi = VCPU(vcpu, vhpi);
 734	if (vhpi > threshold) {
 735		/* interrupt actived*/
 736		generate_exirq(vcpu);
 737	}
 738}
 739
 740void leave_hypervisor_tail(void)
 741{
 742	struct kvm_vcpu *v = current_vcpu;
 743
 744	if (VMX(v, timer_check)) {
 745		VMX(v, timer_check) = 0;
 746		if (VMX(v, itc_check)) {
 747			if (vcpu_get_itc(v) > VCPU(v, itm)) {
 748				if (!(VCPU(v, itv) & (1 << 16))) {
 749					vcpu_pend_interrupt(v, VCPU(v, itv)
 750							& 0xff);
 751					VMX(v, itc_check) = 0;
 752				} else {
 753					v->arch.timer_pending = 1;
 754				}
 755				VMX(v, last_itc) = VCPU(v, itm) + 1;
 756			}
 757		}
 758	}
 759
 760	rmb();
 761	if (v->arch.irq_new_pending) {
 762		v->arch.irq_new_pending = 0;
 763		VMX(v, irq_check) = 0;
 764		check_pending_irq(v);
 765		return;
 766	}
 767	if (VMX(v, irq_check)) {
 768		VMX(v, irq_check) = 0;
 769		vhpi_detection(v);
 770	}
 771}
 772
 773static inline void handle_lds(struct kvm_pt_regs *regs)
 774{
 775	regs->cr_ipsr |= IA64_PSR_ED;
 776}
 777
 778void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type)
 779{
 780	unsigned long pte;
 781	union ia64_rr rr;
 782
 783	rr.val = ia64_get_rr(vadr);
 784	pte =  vadr & _PAGE_PPN_MASK;
 785	pte = pte | PHY_PAGE_WB;
 786	thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type);
 787	return;
 788}
 789
 790void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs)
 791{
 792	unsigned long vpsr;
 793	int type;
 794
 795	u64 vhpt_adr, gppa, pteval, rr, itir;
 796	union ia64_isr misr;
 797	union ia64_pta vpta;
 798	struct thash_data *data;
 799	struct kvm_vcpu *v = current_vcpu;
 800
 801	vpsr = VCPU(v, vpsr);
 802	misr.val = VMX(v, cr_isr);
 803
 804	type = vec;
 805
 806	if (is_physical_mode(v) && (!(vadr << 1 >> 62))) {
 807		if (vec == 2) {
 808			if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) {
 809				emulate_io_inst(v, ((vadr << 1) >> 1), 4);
 810				return;
 811			}
 812		}
 813		physical_tlb_miss(v, vadr, type);
 814		return;
 815	}
 816	data = vtlb_lookup(v, vadr, type);
 817	if (data != 0) {
 818		if (type == D_TLB) {
 819			gppa = (vadr & ((1UL << data->ps) - 1))
 820				+ (data->ppn >> (data->ps - 12) << data->ps);
 821			if (__gpfn_is_io(gppa >> PAGE_SHIFT)) {
 822				if (data->pl >= ((regs->cr_ipsr >>
 823						IA64_PSR_CPL0_BIT) & 3))
 824					emulate_io_inst(v, gppa, data->ma);
 825				else {
 826					vcpu_set_isr(v, misr.val);
 827					data_access_rights(v, vadr);
 828				}
 829				return ;
 830			}
 831		}
 832		thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
 833
 834	} else if (type == D_TLB) {
 835		if (misr.sp) {
 836			handle_lds(regs);
 837			return;
 838		}
 839
 840		rr = vcpu_get_rr(v, vadr);
 841		itir = rr & (RR_RID_MASK | RR_PS_MASK);
 842
 843		if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
 844			if (vpsr & IA64_PSR_IC) {
 845				vcpu_set_isr(v, misr.val);
 846				alt_dtlb(v, vadr);
 847			} else {
 848				nested_dtlb(v);
 849			}
 850			return ;
 851		}
 852
 853		vpta.val = vcpu_get_pta(v);
 854		/* avoid recursively walking (short format) VHPT */
 855
 856		vhpt_adr = vcpu_thash(v, vadr);
 857		if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
 858			/* VHPT successfully read.  */
 859			if (!(pteval & _PAGE_P)) {
 860				if (vpsr & IA64_PSR_IC) {
 861					vcpu_set_isr(v, misr.val);
 862					dtlb_fault(v, vadr);
 863				} else {
 864					nested_dtlb(v);
 865				}
 866			} else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
 867				thash_purge_and_insert(v, pteval, itir,
 868								vadr, D_TLB);
 869			} else if (vpsr & IA64_PSR_IC) {
 870				vcpu_set_isr(v, misr.val);
 871				dtlb_fault(v, vadr);
 872			} else {
 873				nested_dtlb(v);
 874			}
 875		} else {
 876			/* Can't read VHPT.  */
 877			if (vpsr & IA64_PSR_IC) {
 878				vcpu_set_isr(v, misr.val);
 879				dvhpt_fault(v, vadr);
 880			} else {
 881				nested_dtlb(v);
 882			}
 883		}
 884	} else if (type == I_TLB) {
 885		if (!(vpsr & IA64_PSR_IC))
 886			misr.ni = 1;
 887		if (!vhpt_enabled(v, vadr, INST_REF)) {
 888			vcpu_set_isr(v, misr.val);
 889			alt_itlb(v, vadr);
 890			return;
 891		}
 892
 893		vpta.val = vcpu_get_pta(v);
 894
 895		vhpt_adr = vcpu_thash(v, vadr);
 896		if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
 897			/* VHPT successfully read.  */
 898			if (pteval & _PAGE_P) {
 899				if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
 900					vcpu_set_isr(v, misr.val);
 901					itlb_fault(v, vadr);
 902					return ;
 903				}
 904				rr = vcpu_get_rr(v, vadr);
 905				itir = rr & (RR_RID_MASK | RR_PS_MASK);
 906				thash_purge_and_insert(v, pteval, itir,
 907							vadr, I_TLB);
 908			} else {
 909				vcpu_set_isr(v, misr.val);
 910				inst_page_not_present(v, vadr);
 911			}
 912		} else {
 913			vcpu_set_isr(v, misr.val);
 914			ivhpt_fault(v, vadr);
 915		}
 916	}
 917}
 918
 919void kvm_vexirq(struct kvm_vcpu *vcpu)
 920{
 921	u64 vpsr, isr;
 922	struct kvm_pt_regs *regs;
 923
 924	regs = vcpu_regs(vcpu);
 925	vpsr = VCPU(vcpu, vpsr);
 926	isr = vpsr & IA64_PSR_RI;
 927	reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/
 928}
 929
 930void kvm_ia64_handle_irq(struct kvm_vcpu *v)
 931{
 932	struct exit_ctl_data *p = &v->arch.exit_data;
 933	long psr;
 934
 935	local_irq_save(psr);
 936	p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
 937	vmm_transition(v);
 938	local_irq_restore(psr);
 939
 940	VMX(v, timer_check) = 1;
 941
 942}
 943
 944static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos)
 945{
 946	u64 oldrid, moldrid, oldpsbits, vaddr;
 947	struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos];
 948	vaddr = p->vaddr;
 949
 950	oldrid = VMX(v, vrr[0]);
 951	VMX(v, vrr[0]) = p->rr;
 952	oldpsbits = VMX(v, psbits[0]);
 953	VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]);
 954	moldrid = ia64_get_rr(0x0);
 955	ia64_set_rr(0x0, vrrtomrr(p->rr));
 956	ia64_srlz_d();
 957
 958	vaddr = PAGEALIGN(vaddr, p->ps);
 959	thash_purge_entries_remote(v, vaddr, p->ps);
 960
 961	VMX(v, vrr[0]) = oldrid;
 962	VMX(v, psbits[0]) = oldpsbits;
 963	ia64_set_rr(0x0, moldrid);
 964	ia64_dv_serialize_data();
 965}
 966
 967static void vcpu_do_resume(struct kvm_vcpu *vcpu)
 968{
 969	/*Re-init VHPT and VTLB once from resume*/
 970	vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES;
 971	thash_init(&vcpu->arch.vhpt, VHPT_SHIFT);
 972	vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES;
 973	thash_init(&vcpu->arch.vtlb, VTLB_SHIFT);
 974
 975	ia64_set_pta(vcpu->arch.vhpt.pta.val);
 976}
 977
 978static void vmm_sanity_check(struct kvm_vcpu *vcpu)
 979{
 980	struct exit_ctl_data *p = &vcpu->arch.exit_data;
 981
 982	if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) {
 983		panic_vm(vcpu, "Failed to do vmm sanity check,"
 984			"it maybe caused by crashed vmm!!\n\n");
 985	}
 986}
 987
 988static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
 989{
 990	vmm_sanity_check(vcpu); /*Guarantee vcpu running on healthy vmm!*/
 991
 992	if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
 993		vcpu_do_resume(vcpu);
 994		return;
 995	}
 996
 997	if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) {
 998		thash_purge_all(vcpu);
 999		return;
1000	}
1001
1002	if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) {
1003		while (vcpu->arch.ptc_g_count > 0)
1004			ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count);
1005	}
1006}
1007
1008void vmm_transition(struct kvm_vcpu *vcpu)
1009{
1010	ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
1011			1, 0, 0, 0, 0, 0);
1012	vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
1013	ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
1014						1, 0, 0, 0, 0, 0);
1015	kvm_do_resume_op(vcpu);
1016}
1017
1018void vmm_panic_handler(u64 vec)
1019{
1020	struct kvm_vcpu *vcpu = current_vcpu;
1021	vmm_sanity = 0;
1022	panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n",
1023			vec2off[vec]);
1024}