/*
* arch/ia64/kvm/optvfault.S
* optimize virtualization fault handler
*
* Copyright (C) 2006 Intel Co
* Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
* Copyright (C) 2008 Intel Co
* Add the support for Tukwila processors.
* Xiantao Zhang <xiantao.zhang@intel.com>
*/
#include <asm/asmmacro.h>
#include <asm/processor.h>
#include "vti.h"
#include "asm-offsets.h"
#define ACCE_MOV_FROM_AR
#define ACCE_MOV_FROM_RR
#define ACCE_MOV_TO_RR
#define ACCE_RSM
#define ACCE_SSM
#define ACCE_MOV_TO_PSR
#define ACCE_THASH
#define VMX_VPS_SYNC_READ \
add r16=VMM_VPD_BASE_OFFSET,r21; \
mov r17 = b0; \
mov r18 = r24; \
mov r19 = r25; \
mov r20 = r31; \
;; \
{.mii; \
ld8 r16 = [r16]; \
nop 0x0; \
mov r24 = ip; \
;; \
}; \
{.mmb; \
add r24=0x20, r24; \
mov r25 =r16; \
br.sptk.many kvm_vps_sync_read; \
}; \
mov b0 = r17; \
mov r24 = r18; \
mov r25 = r19; \
mov r31 = r20
ENTRY(kvm_vps_entry)
adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
;;
ld8 r29 = [r29]
;;
add r29 = r29, r30
;;
mov b0 = r29
br.sptk.many b0
END(kvm_vps_entry)
/*
* Inputs:
* r24 : return address
* r25 : vpd
* r29 : scratch
*
*/
GLOBAL_ENTRY(kvm_vps_sync_read)
movl r30 = PAL_VPS_SYNC_READ
;;
br.sptk.many kvm_vps_entry
END(kvm_vps_sync_read)
/*
* Inputs:
* r24 : return address
* r25 : vpd
* r29 : scratch
*
*/
GLOBAL_ENTRY(kvm_vps_sync_write)
movl r30 = PAL_VPS_SYNC_WRITE
;;
br.sptk.many kvm_vps_entry
END(kvm_vps_sync_write)
/*
* Inputs:
* r23 : pr
* r24 : guest b0
* r25 : vpd
*
*/
GLOBAL_ENTRY(kvm_vps_resume_normal)
movl r30 = PAL_VPS_RESUME_NORMAL
;;
mov pr=r23,-2
br.sptk.many kvm_vps_entry
END(kvm_vps_resume_normal)
/*
* Inputs:
* r23 : pr
* r24 : guest b0
* r25 : vpd
* r17 : isr
*/
GLOBAL_ENTRY(kvm_vps_resume_handler)
movl r30 = PAL_VPS_RESUME_HANDLER
;;
ld8 r26=[r25]
shr r17=r17,IA64_ISR_IR_BIT
;;
dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE
mov pr=r23,-2
br.sptk.many kvm_vps_entry
END(kvm_vps_resume_handler)
//mov r1=ar3
GLOBAL_ENTRY(kvm_asm_mov_from_ar)
#ifndef ACCE_MOV_FROM_AR
br.many kvm_virtualization_fault_back
#endif
add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
extr.u r17=r25,6,7
;;
ld8 r18=[r18]
mov r19=ar.itc
mov r24=b0
;;
add r19=r19,r18
addl r20=@gprel(asm_mov_to_reg),gp
;;
st8 [r16] = r19
adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
shladd r17=r17,4,r20
;;
mov b0=r17
br.sptk.few b0
;;
END(kvm_asm_mov_from_ar)
// mov r1=rr[r3]
GLOBAL_ENTRY(kvm_asm_mov_from_rr)
#ifndef ACCE_MOV_FROM_RR
br.many kvm_virtualization_fault_back
#endif
extr.u r16=r25,