Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1/*
   2 * arch/ia64/kvm/optvfault.S
   3 * optimize virtualization fault handler
   4 *
   5 * Copyright (C) 2006 Intel Co
   6 *	Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
   7 * Copyright (C) 2008 Intel Co
   8 *      Add the support for Tukwila processors.
   9 *	Xiantao Zhang <xiantao.zhang@intel.com>
  10 */
  11
  12#include <asm/asmmacro.h>
  13#include <asm/processor.h>
  14#include <asm/kvm_host.h>
  15
  16#include "vti.h"
  17#include "asm-offsets.h"
  18
  19#define ACCE_MOV_FROM_AR
  20#define ACCE_MOV_FROM_RR
  21#define ACCE_MOV_TO_RR
  22#define ACCE_RSM
  23#define ACCE_SSM
  24#define ACCE_MOV_TO_PSR
  25#define ACCE_THASH
  26
  27#define VMX_VPS_SYNC_READ			\
  28	add r16=VMM_VPD_BASE_OFFSET,r21;	\
  29	mov r17 = b0;				\
  30	mov r18 = r24;				\
  31	mov r19 = r25;				\
  32	mov r20 = r31;				\
  33	;;					\
  34{.mii;						\
  35	ld8 r16 = [r16];			\
  36	nop 0x0;				\
  37	mov r24 = ip;				\
  38	;;					\
  39};						\
  40{.mmb;						\
  41	add r24=0x20, r24;			\
  42	mov r25 =r16;				\
  43	br.sptk.many kvm_vps_sync_read;		\
  44};						\
  45	mov b0 = r17;				\
  46	mov r24 = r18;				\
  47	mov r25 = r19;				\
  48	mov r31 = r20
  49
  50ENTRY(kvm_vps_entry)
  51	adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
  52	;;
  53	ld8 r29 = [r29]
  54	;;
  55	add r29 = r29, r30
  56	;;
  57	mov b0 = r29
  58	br.sptk.many b0
  59END(kvm_vps_entry)
  60
  61/*
  62 *	Inputs:
  63 *	r24 : return address
  64 *  	r25 : vpd
  65 *	r29 : scratch
  66 *
  67 */
  68GLOBAL_ENTRY(kvm_vps_sync_read)
  69	movl r30 = PAL_VPS_SYNC_READ
  70	;;
  71	br.sptk.many kvm_vps_entry
  72END(kvm_vps_sync_read)
  73
  74/*
  75 *	Inputs:
  76 *	r24 : return address
  77 *  	r25 : vpd
  78 *	r29 : scratch
  79 *
  80 */
  81GLOBAL_ENTRY(kvm_vps_sync_write)
  82	movl r30 = PAL_VPS_SYNC_WRITE
  83	;;
  84	br.sptk.many kvm_vps_entry
  85END(kvm_vps_sync_write)
  86
  87/*
  88 *	Inputs:
  89 *	r23 : pr
  90 *	r24 : guest b0
  91 *  	r25 : vpd
  92 *
  93 */
  94GLOBAL_ENTRY(kvm_vps_resume_normal)
  95	movl r30 = PAL_VPS_RESUME_NORMAL
  96	;;
  97	mov pr=r23,-2
  98	br.sptk.many kvm_vps_entry
  99END(kvm_vps_resume_normal)
 100
 101/*
 102 *	Inputs:
 103 *	r23 : pr
 104 *	r24 : guest b0
 105 *  	r25 : vpd
 106 *	r17 : isr
 107 */
 108GLOBAL_ENTRY(kvm_vps_resume_handler)
 109	movl r30 = PAL_VPS_RESUME_HANDLER
 110	;;
 111	ld8 r26=[r25]
 112	shr r17=r17,IA64_ISR_IR_BIT
 113	;;
 114	dep r26=r17,r26,63,1   // bit 63 of r26 indicate whether enable CFLE
 115	mov pr=r23,-2
 116	br.sptk.many kvm_vps_entry
 117END(kvm_vps_resume_handler)
 118
 119//mov r1=ar3
 120GLOBAL_ENTRY(kvm_asm_mov_from_ar)
 121#ifndef ACCE_MOV_FROM_AR
 122	br.many kvm_virtualization_fault_back
 123#endif
 124	add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
 125	add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
 126	extr.u r17=r25,6,7
 127	;;
 128	ld8 r18=[r18]
 129	mov r19=ar.itc
 130	mov r24=b0
 131	;;
 132	add r19=r19,r18
 133	addl r20=@gprel(asm_mov_to_reg),gp
 134	;;
 135	st8 [r16] = r19
 136	adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
 137	shladd r17=r17,4,r20
 138	;;
 139	mov b0=r17
 140	br.sptk.few b0
 141	;;
 142END(kvm_asm_mov_from_ar)
 143
 144/*
 145 * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC
 146 * clock as it's source for emulating the ITC. This version will be
 147 * copied on top of the original version if the host is determined to
 148 * be an SN2.
 149 */
 150GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2)
 151	add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
 152	movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT))
 153
 154	add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
 155	extr.u r17=r25,6,7
 156	mov r24=b0
 157	;;
 158	ld8 r18=[r18]
 159	ld8 r19=[r19]
 160	addl r20=@gprel(asm_mov_to_reg),gp
 161	;;
 162	add r19=r19,r18
 163	shladd r17=r17,4,r20
 164	;;
 165	adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
 166	st8 [r16] = r19
 167	mov b0=r17
 168	br.sptk.few b0
 169	;;
 170END(kvm_asm_mov_from_ar_sn2)
 171
 172
 173
 174// mov r1=rr[r3]
 175GLOBAL_ENTRY(kvm_asm_mov_from_rr)
 176#ifndef ACCE_MOV_FROM_RR
 177	br.many kvm_virtualization_fault_back
 178#endif
 179	extr.u r16=r25,20,7
 180	extr.u r17=r25,6,7
 181	addl r20=@gprel(asm_mov_from_reg),gp
 182	;;
 183	adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
 184	shladd r16=r16,4,r20
 185	mov r24=b0
 186	;;
 187	add r27=VMM_VCPU_VRR0_OFFSET,r21
 188	mov b0=r16
 189	br.many b0
 190	;;
 191kvm_asm_mov_from_rr_back_1:
 192	adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
 193	adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
 194	shr.u r26=r19,61
 195	;;
 196	shladd r17=r17,4,r22
 197	shladd r27=r26,3,r27
 198	;;
 199	ld8 r19=[r27]
 200	mov b0=r17
 201	br.many b0
 202END(kvm_asm_mov_from_rr)
 203
 204
 205// mov rr[r3]=r2
 206GLOBAL_ENTRY(kvm_asm_mov_to_rr)
 207#ifndef ACCE_MOV_TO_RR
 208	br.many kvm_virtualization_fault_back
 209#endif
 210	extr.u r16=r25,20,7
 211	extr.u r17=r25,13,7
 212	addl r20=@gprel(asm_mov_from_reg),gp
 213	;;
 214	adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
 215	shladd r16=r16,4,r20
 216	mov r22=b0
 217	;;
 218	add r27=VMM_VCPU_VRR0_OFFSET,r21
 219	mov b0=r16
 220	br.many b0
 221	;;
 222kvm_asm_mov_to_rr_back_1:
 223	adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
 224	shr.u r23=r19,61
 225	shladd r17=r17,4,r20
 226	;;
 227	//if rr6, go back
 228	cmp.eq p6,p0=6,r23
 229	mov b0=r22
 230	(p6) br.cond.dpnt.many kvm_virtualization_fault_back
 231	;;
 232	mov r28=r19
 233	mov b0=r17
 234	br.many b0
 235kvm_asm_mov_to_rr_back_2:
 236	adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
 237	shladd r27=r23,3,r27
 238	;; // vrr.rid<<4 |0xe
 239	st8 [r27]=r19
 240	mov b0=r30
 241	;;
 242	extr.u r16=r19,8,26
 243	extr.u r18 =r19,2,6
 244	mov r17 =0xe
 245	;;
 246	shladd r16 = r16, 4, r17
 247	extr.u r19 =r19,0,8
 248	;;
 249	shl r16 = r16,8
 250	;;
 251	add r19 = r19, r16
 252	;; //set ve 1
 253	dep r19=-1,r19,0,1
 254	cmp.lt p6,p0=14,r18
 255	;;
 256	(p6) mov r18=14
 257	;;
 258	(p6) dep r19=r18,r19,2,6
 259	;;
 260	cmp.eq p6,p0=0,r23
 261	;;
 262	cmp.eq.or p6,p0=4,r23
 263	;;
 264	adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
 265	(p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
 266	;;
 267	ld4 r16=[r16]
 268	cmp.eq p7,p0=r0,r0
 269	(p6) shladd r17=r23,1,r17
 270	;;
 271	(p6) st8 [r17]=r19
 272	(p6) tbit.nz p6,p7=r16,0
 273	;;
 274	(p7) mov rr[r28]=r19
 275	mov r24=r22
 276	br.many b0
 277END(kvm_asm_mov_to_rr)
 278
 279
 280//rsm
 281GLOBAL_ENTRY(kvm_asm_rsm)
 282#ifndef ACCE_RSM
 283	br.many kvm_virtualization_fault_back
 284#endif
 285	VMX_VPS_SYNC_READ
 286	;;
 287	extr.u r26=r25,6,21
 288	extr.u r27=r25,31,2
 289	;;
 290	extr.u r28=r25,36,1
 291	dep r26=r27,r26,21,2
 292	;;
 293	add r17=VPD_VPSR_START_OFFSET,r16
 294	add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
 295	//r26 is imm24
 296	dep r26=r28,r26,23,1
 297	;;
 298	ld8 r18=[r17]
 299	movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
 300	ld4 r23=[r22]
 301	sub r27=-1,r26
 302	mov r24=b0
 303	;;
 304	mov r20=cr.ipsr
 305	or r28=r27,r28
 306	and r19=r18,r27
 307	;;
 308	st8 [r17]=r19
 309	and r20=r20,r28
 310	/* Comment it out due to short of fp lazy alorgithm support
 311	adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
 312	;;
 313	ld8 r27=[r27]
 314	;;
 315	tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
 316	;;
 317	(p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
 318	*/
 319	;;
 320	mov cr.ipsr=r20
 321	tbit.nz p6,p0=r23,0
 322	;;
 323	tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
 324	(p6) br.dptk kvm_resume_to_guest_with_sync
 325	;;
 326	add r26=VMM_VCPU_META_RR0_OFFSET,r21
 327	add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
 328	dep r23=-1,r23,0,1
 329	;;
 330	ld8 r26=[r26]
 331	ld8 r27=[r27]
 332	st4 [r22]=r23
 333	dep.z r28=4,61,3
 334	;;
 335	mov rr[r0]=r26
 336	;;
 337	mov rr[r28]=r27
 338	;;
 339	srlz.d
 340	br.many kvm_resume_to_guest_with_sync
 341END(kvm_asm_rsm)
 342
 343
 344//ssm
 345GLOBAL_ENTRY(kvm_asm_ssm)
 346#ifndef ACCE_SSM
 347	br.many kvm_virtualization_fault_back
 348#endif
 349	VMX_VPS_SYNC_READ
 350	;;
 351	extr.u r26=r25,6,21
 352	extr.u r27=r25,31,2
 353	;;
 354	extr.u r28=r25,36,1
 355	dep r26=r27,r26,21,2
 356	;;  //r26 is imm24
 357	add r27=VPD_VPSR_START_OFFSET,r16
 358	dep r26=r28,r26,23,1
 359	;;  //r19 vpsr
 360	ld8 r29=[r27]
 361	mov r24=b0
 362	;;
 363	add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
 364	mov r20=cr.ipsr
 365	or r19=r29,r26
 366	;;
 367	ld4 r23=[r22]
 368	st8 [r27]=r19
 369	or r20=r20,r26
 370	;;
 371	mov cr.ipsr=r20
 372	movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
 373	;;
 374	and r19=r28,r19
 375	tbit.z p6,p0=r23,0
 376	;;
 377	cmp.ne.or p6,p0=r28,r19
 378	(p6) br.dptk kvm_asm_ssm_1
 379	;;
 380	add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
 381	add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
 382	dep r23=0,r23,0,1
 383	;;
 384	ld8 r26=[r26]
 385	ld8 r27=[r27]
 386	st4 [r22]=r23
 387	dep.z r28=4,61,3
 388	;;
 389	mov rr[r0]=r26
 390	;;
 391	mov rr[r28]=r27
 392	;;
 393	srlz.d
 394	;;
 395kvm_asm_ssm_1:
 396	tbit.nz p6,p0=r29,IA64_PSR_I_BIT
 397	;;
 398	tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
 399	(p6) br.dptk kvm_resume_to_guest_with_sync
 400	;;
 401	add r29=VPD_VTPR_START_OFFSET,r16
 402	add r30=VPD_VHPI_START_OFFSET,r16
 403	;;
 404	ld8 r29=[r29]
 405	ld8 r30=[r30]
 406	;;
 407	extr.u r17=r29,4,4
 408	extr.u r18=r29,16,1
 409	;;
 410	dep r17=r18,r17,4,1
 411	;;
 412	cmp.gt p6,p0=r30,r17
 413	(p6) br.dpnt.few kvm_asm_dispatch_vexirq
 414	br.many kvm_resume_to_guest_with_sync
 415END(kvm_asm_ssm)
 416
 417
 418//mov psr.l=r2
 419GLOBAL_ENTRY(kvm_asm_mov_to_psr)
 420#ifndef ACCE_MOV_TO_PSR
 421	br.many kvm_virtualization_fault_back
 422#endif
 423	VMX_VPS_SYNC_READ
 424	;;
 425	extr.u r26=r25,13,7 //r2
 426	addl r20=@gprel(asm_mov_from_reg),gp
 427	;;
 428	adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
 429	shladd r26=r26,4,r20
 430	mov r24=b0
 431	;;
 432	add r27=VPD_VPSR_START_OFFSET,r16
 433	mov b0=r26
 434	br.many b0
 435	;;
 436kvm_asm_mov_to_psr_back:
 437	ld8 r17=[r27]
 438	add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
 439	dep r19=0,r19,32,32
 440	;;
 441	ld4 r23=[r22]
 442	dep r18=0,r17,0,32
 443	;;
 444	add r30=r18,r19
 445	movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
 446	;;
 447	st8 [r27]=r30
 448	and r27=r28,r30
 449	and r29=r28,r17
 450	;;
 451	cmp.eq p5,p0=r29,r27
 452	cmp.eq p6,p7=r28,r27
 453	(p5) br.many kvm_asm_mov_to_psr_1
 454	;;
 455	//virtual to physical
 456	(p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
 457	(p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
 458	(p7) dep r23=-1,r23,0,1
 459	;;
 460	//physical to virtual
 461	(p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
 462	(p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
 463	(p6) dep r23=0,r23,0,1
 464	;;
 465	ld8 r26=[r26]
 466	ld8 r27=[r27]
 467	st4 [r22]=r23
 468	dep.z r28=4,61,3
 469	;;
 470	mov rr[r0]=r26
 471	;;
 472	mov rr[r28]=r27
 473	;;
 474	srlz.d
 475	;;
 476kvm_asm_mov_to_psr_1:
 477	mov r20=cr.ipsr
 478	movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
 479	;;
 480	or r19=r19,r28
 481	dep r20=0,r20,0,32
 482	;;
 483	add r20=r19,r20
 484	mov b0=r24
 485	;;
 486	/* Comment it out due to short of fp lazy algorithm support
 487	adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
 488	;;
 489	ld8 r27=[r27]
 490	;;
 491	tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
 492	;;
 493	(p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
 494	;;
 495	*/
 496	mov cr.ipsr=r20
 497	cmp.ne p6,p0=r0,r0
 498	;;
 499	tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
 500	tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
 501	(p6) br.dpnt.few kvm_resume_to_guest_with_sync
 502	;;
 503	add r29=VPD_VTPR_START_OFFSET,r16
 504	add r30=VPD_VHPI_START_OFFSET,r16
 505	;;
 506	ld8 r29=[r29]
 507	ld8 r30=[r30]
 508	;;
 509	extr.u r17=r29,4,4
 510	extr.u r18=r29,16,1
 511	;;
 512	dep r17=r18,r17,4,1
 513	;;
 514	cmp.gt p6,p0=r30,r17
 515	(p6) br.dpnt.few kvm_asm_dispatch_vexirq
 516	br.many kvm_resume_to_guest_with_sync
 517END(kvm_asm_mov_to_psr)
 518
 519
 520ENTRY(kvm_asm_dispatch_vexirq)
 521//increment iip
 522	mov r17 = b0
 523	mov r18 = r31
 524{.mii
 525	add r25=VMM_VPD_BASE_OFFSET,r21
 526	nop 0x0
 527	mov r24 = ip
 528	;;
 529}
 530{.mmb
 531	add r24 = 0x20, r24
 532	ld8 r25 = [r25]
 533	br.sptk.many kvm_vps_sync_write
 534}
 535	mov b0 =r17
 536	mov r16=cr.ipsr
 537	mov r31 = r18
 538	mov r19 = 37
 539	;;
 540	extr.u r17=r16,IA64_PSR_RI_BIT,2
 541	tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
 542	;;
 543	(p6) mov r18=cr.iip
 544	(p6) mov r17=r0
 545	(p7) add r17=1,r17
 546	;;
 547	(p6) add r18=0x10,r18
 548	dep r16=r17,r16,IA64_PSR_RI_BIT,2
 549	;;
 550	(p6) mov cr.iip=r18
 551	mov cr.ipsr=r16
 552	mov r30 =1
 553	br.many kvm_dispatch_vexirq
 554END(kvm_asm_dispatch_vexirq)
 555
 556// thash
 557// TODO: add support when pta.vf = 1
 558GLOBAL_ENTRY(kvm_asm_thash)
 559#ifndef ACCE_THASH
 560	br.many kvm_virtualization_fault_back
 561#endif
 562	extr.u r17=r25,20,7		// get r3 from opcode in r25
 563	extr.u r18=r25,6,7		// get r1 from opcode in r25
 564	addl r20=@gprel(asm_mov_from_reg),gp
 565	;;
 566	adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
 567	shladd r17=r17,4,r20	// get addr of MOVE_FROM_REG(r17)
 568	adds r16=VMM_VPD_BASE_OFFSET,r21	// get vcpu.arch.priveregs
 569	;;
 570	mov r24=b0
 571	;;
 572	ld8 r16=[r16]		// get VPD addr
 573	mov b0=r17
 574	br.many b0			// r19 return value
 575	;;
 576kvm_asm_thash_back1:
 577	shr.u r23=r19,61		// get RR number
 578	adds r28=VMM_VCPU_VRR0_OFFSET,r21	// get vcpu->arch.vrr[0]'s addr
 579	adds r16=VMM_VPD_VPTA_OFFSET,r16	// get vpta
 580	;;
 581	shladd r27=r23,3,r28	// get vcpu->arch.vrr[r23]'s addr
 582	ld8 r17=[r16]		// get PTA
 583	mov r26=1
 584	;;
 585	extr.u r29=r17,2,6	// get pta.size
 586	ld8 r28=[r27]		// get vcpu->arch.vrr[r23]'s value
 587	;;
 588	mov b0=r24
 589	//Fallback to C if pta.vf is set
 590	tbit.nz p6,p0=r17, 8
 591	;;
 592	(p6) mov r24=EVENT_THASH
 593	(p6) br.cond.dpnt.many kvm_virtualization_fault_back
 594	extr.u r28=r28,2,6	// get rr.ps
 595	shl r22=r26,r29		// 1UL << pta.size
 596	;;
 597	shr.u r23=r19,r28	// vaddr >> rr.ps
 598	adds r26=3,r29		// pta.size + 3
 599	shl r27=r17,3		// pta << 3
 600	;;
 601	shl r23=r23,3		// (vaddr >> rr.ps) << 3
 602	shr.u r27=r27,r26	// (pta << 3) >> (pta.size+3)
 603	movl r16=7<<61
 604	;;
 605	adds r22=-1,r22		// (1UL << pta.size) - 1
 606	shl r27=r27,r29		// ((pta<<3)>>(pta.size+3))<<pta.size
 607	and r19=r19,r16		// vaddr & VRN_MASK
 608	;;
 609	and r22=r22,r23		// vhpt_offset
 610	or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
 611	adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
 612	;;
 613	or r19=r19,r22		// calc pval
 614	shladd r17=r18,4,r26
 615	adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
 616	;;
 617	mov b0=r17
 618	br.many b0
 619END(kvm_asm_thash)
 620
 621#define MOV_TO_REG0	\
 622{;			\
 623	nop.b 0x0;		\
 624	nop.b 0x0;		\
 625	nop.b 0x0;		\
 626	;;			\
 627};
 628
 629
 630#define MOV_TO_REG(n)	\
 631{;			\
 632	mov r##n##=r19;	\
 633	mov b0=r30;	\
 634	br.sptk.many b0;	\
 635	;;			\
 636};
 637
 638
 639#define MOV_FROM_REG(n)	\
 640{;				\
 641	mov r19=r##n##;		\
 642	mov b0=r30;		\
 643	br.sptk.many b0;		\
 644	;;				\
 645};
 646
 647
 648#define MOV_TO_BANK0_REG(n)			\
 649ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##);	\
 650{;						\
 651	mov r26=r2;				\
 652	mov r2=r19;				\
 653	bsw.1;					\
 654	;;						\
 655};						\
 656{;						\
 657	mov r##n##=r2;				\
 658	nop.b 0x0;					\
 659	bsw.0;					\
 660	;;						\
 661};						\
 662{;						\
 663	mov r2=r26;				\
 664	mov b0=r30;				\
 665	br.sptk.many b0;				\
 666	;;						\
 667};						\
 668END(asm_mov_to_bank0_reg##n##)
 669
 670
 671#define MOV_FROM_BANK0_REG(n)			\
 672ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##);	\
 673{;						\
 674	mov r26=r2;				\
 675	nop.b 0x0;					\
 676	bsw.1;					\
 677	;;						\
 678};						\
 679{;						\
 680	mov r2=r##n##;				\
 681	nop.b 0x0;					\
 682	bsw.0;					\
 683	;;						\
 684};						\
 685{;						\
 686	mov r19=r2;				\
 687	mov r2=r26;				\
 688	mov b0=r30;				\
 689};						\
 690{;						\
 691	nop.b 0x0;					\
 692	nop.b 0x0;					\
 693	br.sptk.many b0;				\
 694	;;						\
 695};						\
 696END(asm_mov_from_bank0_reg##n##)
 697
 698
 699#define JMP_TO_MOV_TO_BANK0_REG(n)		\
 700{;						\
 701	nop.b 0x0;					\
 702	nop.b 0x0;					\
 703	br.sptk.many asm_mov_to_bank0_reg##n##;	\
 704	;;						\
 705}
 706
 707
 708#define JMP_TO_MOV_FROM_BANK0_REG(n)		\
 709{;						\
 710	nop.b 0x0;					\
 711	nop.b 0x0;					\
 712	br.sptk.many asm_mov_from_bank0_reg##n##;	\
 713	;;						\
 714}
 715
 716
 717MOV_FROM_BANK0_REG(16)
 718MOV_FROM_BANK0_REG(17)
 719MOV_FROM_BANK0_REG(18)
 720MOV_FROM_BANK0_REG(19)
 721MOV_FROM_BANK0_REG(20)
 722MOV_FROM_BANK0_REG(21)
 723MOV_FROM_BANK0_REG(22)
 724MOV_FROM_BANK0_REG(23)
 725MOV_FROM_BANK0_REG(24)
 726MOV_FROM_BANK0_REG(25)
 727MOV_FROM_BANK0_REG(26)
 728MOV_FROM_BANK0_REG(27)
 729MOV_FROM_BANK0_REG(28)
 730MOV_FROM_BANK0_REG(29)
 731MOV_FROM_BANK0_REG(30)
 732MOV_FROM_BANK0_REG(31)
 733
 734
 735// mov from reg table
 736ENTRY(asm_mov_from_reg)
 737	MOV_FROM_REG(0)
 738	MOV_FROM_REG(1)
 739	MOV_FROM_REG(2)
 740	MOV_FROM_REG(3)
 741	MOV_FROM_REG(4)
 742	MOV_FROM_REG(5)
 743	MOV_FROM_REG(6)
 744	MOV_FROM_REG(7)
 745	MOV_FROM_REG(8)
 746	MOV_FROM_REG(9)
 747	MOV_FROM_REG(10)
 748	MOV_FROM_REG(11)
 749	MOV_FROM_REG(12)
 750	MOV_FROM_REG(13)
 751	MOV_FROM_REG(14)
 752	MOV_FROM_REG(15)
 753	JMP_TO_MOV_FROM_BANK0_REG(16)
 754	JMP_TO_MOV_FROM_BANK0_REG(17)
 755	JMP_TO_MOV_FROM_BANK0_REG(18)
 756	JMP_TO_MOV_FROM_BANK0_REG(19)
 757	JMP_TO_MOV_FROM_BANK0_REG(20)
 758	JMP_TO_MOV_FROM_BANK0_REG(21)
 759	JMP_TO_MOV_FROM_BANK0_REG(22)
 760	JMP_TO_MOV_FROM_BANK0_REG(23)
 761	JMP_TO_MOV_FROM_BANK0_REG(24)
 762	JMP_TO_MOV_FROM_BANK0_REG(25)
 763	JMP_TO_MOV_FROM_BANK0_REG(26)
 764	JMP_TO_MOV_FROM_BANK0_REG(27)
 765	JMP_TO_MOV_FROM_BANK0_REG(28)
 766	JMP_TO_MOV_FROM_BANK0_REG(29)
 767	JMP_TO_MOV_FROM_BANK0_REG(30)
 768	JMP_TO_MOV_FROM_BANK0_REG(31)
 769	MOV_FROM_REG(32)
 770	MOV_FROM_REG(33)
 771	MOV_FROM_REG(34)
 772	MOV_FROM_REG(35)
 773	MOV_FROM_REG(36)
 774	MOV_FROM_REG(37)
 775	MOV_FROM_REG(38)
 776	MOV_FROM_REG(39)
 777	MOV_FROM_REG(40)
 778	MOV_FROM_REG(41)
 779	MOV_FROM_REG(42)
 780	MOV_FROM_REG(43)
 781	MOV_FROM_REG(44)
 782	MOV_FROM_REG(45)
 783	MOV_FROM_REG(46)
 784	MOV_FROM_REG(47)
 785	MOV_FROM_REG(48)
 786	MOV_FROM_REG(49)
 787	MOV_FROM_REG(50)
 788	MOV_FROM_REG(51)
 789	MOV_FROM_REG(52)
 790	MOV_FROM_REG(53)
 791	MOV_FROM_REG(54)
 792	MOV_FROM_REG(55)
 793	MOV_FROM_REG(56)
 794	MOV_FROM_REG(57)
 795	MOV_FROM_REG(58)
 796	MOV_FROM_REG(59)
 797	MOV_FROM_REG(60)
 798	MOV_FROM_REG(61)
 799	MOV_FROM_REG(62)
 800	MOV_FROM_REG(63)
 801	MOV_FROM_REG(64)
 802	MOV_FROM_REG(65)
 803	MOV_FROM_REG(66)
 804	MOV_FROM_REG(67)
 805	MOV_FROM_REG(68)
 806	MOV_FROM_REG(69)
 807	MOV_FROM_REG(70)
 808	MOV_FROM_REG(71)
 809	MOV_FROM_REG(72)
 810	MOV_FROM_REG(73)
 811	MOV_FROM_REG(74)
 812	MOV_FROM_REG(75)
 813	MOV_FROM_REG(76)
 814	MOV_FROM_REG(77)
 815	MOV_FROM_REG(78)
 816	MOV_FROM_REG(79)
 817	MOV_FROM_REG(80)
 818	MOV_FROM_REG(81)
 819	MOV_FROM_REG(82)
 820	MOV_FROM_REG(83)
 821	MOV_FROM_REG(84)
 822	MOV_FROM_REG(85)
 823	MOV_FROM_REG(86)
 824	MOV_FROM_REG(87)
 825	MOV_FROM_REG(88)
 826	MOV_FROM_REG(89)
 827	MOV_FROM_REG(90)
 828	MOV_FROM_REG(91)
 829	MOV_FROM_REG(92)
 830	MOV_FROM_REG(93)
 831	MOV_FROM_REG(94)
 832	MOV_FROM_REG(95)
 833	MOV_FROM_REG(96)
 834	MOV_FROM_REG(97)
 835	MOV_FROM_REG(98)
 836	MOV_FROM_REG(99)
 837	MOV_FROM_REG(100)
 838	MOV_FROM_REG(101)
 839	MOV_FROM_REG(102)
 840	MOV_FROM_REG(103)
 841	MOV_FROM_REG(104)
 842	MOV_FROM_REG(105)
 843	MOV_FROM_REG(106)
 844	MOV_FROM_REG(107)
 845	MOV_FROM_REG(108)
 846	MOV_FROM_REG(109)
 847	MOV_FROM_REG(110)
 848	MOV_FROM_REG(111)
 849	MOV_FROM_REG(112)
 850	MOV_FROM_REG(113)
 851	MOV_FROM_REG(114)
 852	MOV_FROM_REG(115)
 853	MOV_FROM_REG(116)
 854	MOV_FROM_REG(117)
 855	MOV_FROM_REG(118)
 856	MOV_FROM_REG(119)
 857	MOV_FROM_REG(120)
 858	MOV_FROM_REG(121)
 859	MOV_FROM_REG(122)
 860	MOV_FROM_REG(123)
 861	MOV_FROM_REG(124)
 862	MOV_FROM_REG(125)
 863	MOV_FROM_REG(126)
 864	MOV_FROM_REG(127)
 865END(asm_mov_from_reg)
 866
 867
 868/* must be in bank 0
 869 * parameter:
 870 * r31: pr
 871 * r24: b0
 872 */
 873ENTRY(kvm_resume_to_guest_with_sync)
 874	adds r19=VMM_VPD_BASE_OFFSET,r21
 875	mov r16 = r31
 876	mov r17 = r24
 877	;;
 878{.mii
 879	ld8 r25 =[r19]
 880	nop 0x0
 881	mov r24 = ip
 882	;;
 883}
 884{.mmb
 885	add r24 =0x20, r24
 886	nop 0x0
 887	br.sptk.many kvm_vps_sync_write
 888}
 889
 890	mov r31 = r16
 891	mov r24 =r17
 892	;;
 893	br.sptk.many kvm_resume_to_guest
 894END(kvm_resume_to_guest_with_sync)
 895
 896ENTRY(kvm_resume_to_guest)
 897	adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
 898	;;
 899	ld8 r1 =[r16]
 900	adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
 901	;;
 902	mov r16=cr.ipsr
 903	;;
 904	ld8 r20 = [r20]
 905	adds r19=VMM_VPD_BASE_OFFSET,r21
 906	;;
 907	ld8 r25=[r19]
 908	extr.u r17=r16,IA64_PSR_RI_BIT,2
 909	tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
 910	;;
 911	(p6) mov r18=cr.iip
 912	(p6) mov r17=r0
 913	;;
 914	(p6) add r18=0x10,r18
 915	(p7) add r17=1,r17
 916	;;
 917	(p6) mov cr.iip=r18
 918	dep r16=r17,r16,IA64_PSR_RI_BIT,2
 919	;;
 920	mov cr.ipsr=r16
 921	adds r19= VPD_VPSR_START_OFFSET,r25
 922	add r28=PAL_VPS_RESUME_NORMAL,r20
 923	add r29=PAL_VPS_RESUME_HANDLER,r20
 924	;;
 925	ld8 r19=[r19]
 926	mov b0=r29
 927	mov r27=cr.isr
 928	;;
 929	tbit.z p6,p7 = r19,IA64_PSR_IC_BIT		// p7=vpsr.ic
 930	shr r27=r27,IA64_ISR_IR_BIT
 931	;;
 932	(p6) ld8 r26=[r25]
 933	(p7) mov b0=r28
 934	;;
 935	(p6) dep r26=r27,r26,63,1
 936	mov pr=r31,-2
 937	br.sptk.many b0             // call pal service
 938	;;
 939END(kvm_resume_to_guest)
 940
 941
 942MOV_TO_BANK0_REG(16)
 943MOV_TO_BANK0_REG(17)
 944MOV_TO_BANK0_REG(18)
 945MOV_TO_BANK0_REG(19)
 946MOV_TO_BANK0_REG(20)
 947MOV_TO_BANK0_REG(21)
 948MOV_TO_BANK0_REG(22)
 949MOV_TO_BANK0_REG(23)
 950MOV_TO_BANK0_REG(24)
 951MOV_TO_BANK0_REG(25)
 952MOV_TO_BANK0_REG(26)
 953MOV_TO_BANK0_REG(27)
 954MOV_TO_BANK0_REG(28)
 955MOV_TO_BANK0_REG(29)
 956MOV_TO_BANK0_REG(30)
 957MOV_TO_BANK0_REG(31)
 958
 959
 960// mov to reg table
 961ENTRY(asm_mov_to_reg)
 962	MOV_TO_REG0
 963	MOV_TO_REG(1)
 964	MOV_TO_REG(2)
 965	MOV_TO_REG(3)
 966	MOV_TO_REG(4)
 967	MOV_TO_REG(5)
 968	MOV_TO_REG(6)
 969	MOV_TO_REG(7)
 970	MOV_TO_REG(8)
 971	MOV_TO_REG(9)
 972	MOV_TO_REG(10)
 973	MOV_TO_REG(11)
 974	MOV_TO_REG(12)
 975	MOV_TO_REG(13)
 976	MOV_TO_REG(14)
 977	MOV_TO_REG(15)
 978	JMP_TO_MOV_TO_BANK0_REG(16)
 979	JMP_TO_MOV_TO_BANK0_REG(17)
 980	JMP_TO_MOV_TO_BANK0_REG(18)
 981	JMP_TO_MOV_TO_BANK0_REG(19)
 982	JMP_TO_MOV_TO_BANK0_REG(20)
 983	JMP_TO_MOV_TO_BANK0_REG(21)
 984	JMP_TO_MOV_TO_BANK0_REG(22)
 985	JMP_TO_MOV_TO_BANK0_REG(23)
 986	JMP_TO_MOV_TO_BANK0_REG(24)
 987	JMP_TO_MOV_TO_BANK0_REG(25)
 988	JMP_TO_MOV_TO_BANK0_REG(26)
 989	JMP_TO_MOV_TO_BANK0_REG(27)
 990	JMP_TO_MOV_TO_BANK0_REG(28)
 991	JMP_TO_MOV_TO_BANK0_REG(29)
 992	JMP_TO_MOV_TO_BANK0_REG(30)
 993	JMP_TO_MOV_TO_BANK0_REG(31)
 994	MOV_TO_REG(32)
 995	MOV_TO_REG(33)
 996	MOV_TO_REG(34)
 997	MOV_TO_REG(35)
 998	MOV_TO_REG(36)
 999	MOV_TO_REG(37)
1000	MOV_TO_REG(38)
1001	MOV_TO_REG(39)
1002	MOV_TO_REG(40)
1003	MOV_TO_REG(41)
1004	MOV_TO_REG(42)
1005	MOV_TO_REG(43)
1006	MOV_TO_REG(44)
1007	MOV_TO_REG(45)
1008	MOV_TO_REG(46)
1009	MOV_TO_REG(47)
1010	MOV_TO_REG(48)
1011	MOV_TO_REG(49)
1012	MOV_TO_REG(50)
1013	MOV_TO_REG(51)
1014	MOV_TO_REG(52)
1015	MOV_TO_REG(53)
1016	MOV_TO_REG(54)
1017	MOV_TO_REG(55)
1018	MOV_TO_REG(56)
1019	MOV_TO_REG(57)
1020	MOV_TO_REG(58)
1021	MOV_TO_REG(59)
1022	MOV_TO_REG(60)
1023	MOV_TO_REG(61)
1024	MOV_TO_REG(62)
1025	MOV_TO_REG(63)
1026	MOV_TO_REG(64)
1027	MOV_TO_REG(65)
1028	MOV_TO_REG(66)
1029	MOV_TO_REG(67)
1030	MOV_TO_REG(68)
1031	MOV_TO_REG(69)
1032	MOV_TO_REG(70)
1033	MOV_TO_REG(71)
1034	MOV_TO_REG(72)
1035	MOV_TO_REG(73)
1036	MOV_TO_REG(74)
1037	MOV_TO_REG(75)
1038	MOV_TO_REG(76)
1039	MOV_TO_REG(77)
1040	MOV_TO_REG(78)
1041	MOV_TO_REG(79)
1042	MOV_TO_REG(80)
1043	MOV_TO_REG(81)
1044	MOV_TO_REG(82)
1045	MOV_TO_REG(83)
1046	MOV_TO_REG(84)
1047	MOV_TO_REG(85)
1048	MOV_TO_REG(86)
1049	MOV_TO_REG(87)
1050	MOV_TO_REG(88)
1051	MOV_TO_REG(89)
1052	MOV_TO_REG(90)
1053	MOV_TO_REG(91)
1054	MOV_TO_REG(92)
1055	MOV_TO_REG(93)
1056	MOV_TO_REG(94)
1057	MOV_TO_REG(95)
1058	MOV_TO_REG(96)
1059	MOV_TO_REG(97)
1060	MOV_TO_REG(98)
1061	MOV_TO_REG(99)
1062	MOV_TO_REG(100)
1063	MOV_TO_REG(101)
1064	MOV_TO_REG(102)
1065	MOV_TO_REG(103)
1066	MOV_TO_REG(104)
1067	MOV_TO_REG(105)
1068	MOV_TO_REG(106)
1069	MOV_TO_REG(107)
1070	MOV_TO_REG(108)
1071	MOV_TO_REG(109)
1072	MOV_TO_REG(110)
1073	MOV_TO_REG(111)
1074	MOV_TO_REG(112)
1075	MOV_TO_REG(113)
1076	MOV_TO_REG(114)
1077	MOV_TO_REG(115)
1078	MOV_TO_REG(116)
1079	MOV_TO_REG(117)
1080	MOV_TO_REG(118)
1081	MOV_TO_REG(119)
1082	MOV_TO_REG(120)
1083	MOV_TO_REG(121)
1084	MOV_TO_REG(122)
1085	MOV_TO_REG(123)
1086	MOV_TO_REG(124)
1087	MOV_TO_REG(125)
1088	MOV_TO_REG(126)
1089	MOV_TO_REG(127)
1090END(asm_mov_to_reg)