diff options
Diffstat (limited to 'arch/ia64/kvm/vcpu.c')
| -rw-r--r-- | arch/ia64/kvm/vcpu.c | 80 |
1 files changed, 32 insertions, 48 deletions
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index ecd526b5532..958815c9787 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c @@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu) return; } - void switch_to_virtual_rid(struct kvm_vcpu *vcpu) { unsigned long psr; @@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, return; } - - /* * In physical mode, insert tc/tr for region 0 and 4 uses * RID[0] and RID[4] which is for physical mode emulation. @@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs, return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); } - /* * The inverse of the above: given bspstore and the number of * registers, calculate ar.bsp. @@ -390,7 +386,7 @@ void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, else *rnat_addr = (*rnat_addr) & (~nat_mask); - ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore); + ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore); ia64_setreg(_IA64_REG_AR_RNAT, rnat); } local_irq_restore(psr); @@ -410,7 +406,7 @@ void getreg(unsigned long regnum, unsigned long *val, * Now look at registers in [0-31] range and init correct UNAT */ addr = (unsigned long)regs; - unat = ®s->eml_unat;; + unat = ®s->eml_unat; addr += gr_info[regnum]; @@ -465,7 +461,7 @@ void setreg(unsigned long regnum, unsigned long val, u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) { struct kvm_pt_regs *regs = vcpu_regs(vcpu); - u64 val; + unsigned long val; if (!reg) return 0; @@ -473,7 +469,7 @@ u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg) return val; } -void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat) +void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat) { struct kvm_pt_regs *regs = vcpu_regs(vcpu); long sof = (regs->cr_ifs) & 0x7f; @@ -792,13 +788,29 @@ void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg, setfpreg(reg, val, regs); /* FIXME: handle NATs later*/ } +/* + * The Altix RTC is mapped specially here for the vmm module + */ +#define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT)) +static long kvm_get_itc(struct kvm_vcpu *vcpu) +{ +#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) + struct kvm *kvm = (struct kvm *)KVM_VM_BASE; + + if (kvm->arch.is_sn2) + return (*SN_RTC_BASE); + else +#endif + return ia64_getreg(_IA64_REG_AR_ITC); +} + /************************************************************************ * lsapic timer ***********************************************************************/ u64 vcpu_get_itc(struct kvm_vcpu *vcpu) { unsigned long guest_itc; - guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC); + guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu); if (guest_itc >= VMX(vcpu, last_itc)) { VMX(vcpu, last_itc) = guest_itc; @@ -811,12 +823,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val); static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) { struct kvm_vcpu *v; + struct kvm *kvm; int i; - long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC); + long itc_offset = val - kvm_get_itc(vcpu); unsigned long vitv = VCPU(vcpu, itv); - if (vcpu->vcpu_id == 0) { - for (i = 0; i < KVM_MAX_VCPUS; i++) { + kvm = (struct kvm *)KVM_VM_BASE; + + if (kvm_vcpu_is_bsp(vcpu)) { + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { v = (struct kvm_vcpu *)((char *)vcpu + sizeof(struct kvm_vcpu_data) * i); VMX(v, itc_offset) = itc_offset; @@ -1039,8 +1054,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr) return key; } - - void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long thash, vadr; @@ -1050,7 +1063,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_gr(vcpu, inst.M46.r1, thash, 0); } - void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long tag, vadr; @@ -1060,7 +1072,7 @@ void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_gr(vcpu, inst.M46.r1, tag, 0); } -int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) +int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr) { struct thash_data *data; union ia64_isr visr, pt_isr; @@ -1131,7 +1143,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr) return IA64_NO_FAULT; } - int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r1, r3; @@ -1154,7 +1165,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); } - /************************************ * Insert/Purge translation register/cache ************************************/ @@ -1385,7 +1395,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_itc(vcpu, r2); } - void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r1; @@ -1393,8 +1402,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst) r1 = vcpu_get_itc(vcpu); vcpu_set_gr(vcpu, inst.M31.r1, r1, 0); } + /************************************************************************** - struct kvm_vcpu*protection key register access routines + struct kvm_vcpu protection key register access routines **************************************************************************/ unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg) @@ -1407,20 +1417,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val) ia64_set_pkr(reg, val); } - -unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa) -{ - union ia64_rr rr, rr1; - - rr.val = vcpu_get_rr(vcpu, ifa); - rr1.val = 0; - rr1.ps = rr.ps; - rr1.rid = rr.rid; - return (rr1.val); -} - - - /******************************** * Moves to privileged registers ********************************/ @@ -1464,8 +1460,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg, return (IA64_NO_FAULT); } - - void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r3, r2; @@ -1510,8 +1504,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_pkr(vcpu, r3, r2); } - - void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long r3, r1; @@ -1557,7 +1549,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst) vcpu_set_gr(vcpu, inst.M43.r1, r1, 0); } - unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg) { /* FIXME: This could get called as a result of a rsvd-reg fault */ @@ -1609,7 +1600,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst) return 0; } - unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) { unsigned long tgt = inst.M33.r1; @@ -1633,8 +1623,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst) return 0; } - - void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) { @@ -1651,8 +1639,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) * Otherwise panic */ if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) - panic_vm(vcpu, "Only support guests with vpsr.pk =0 \ - & vpsr.is=0\n"); + panic_vm(vcpu, "Only support guests with vpsr.pk =0 " + "& vpsr.is=0\n"); /* * For those IA64_PSR bits: id/da/dd/ss/ed/ia @@ -1776,9 +1764,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu) } } - - - void vcpu_rfi(struct kvm_vcpu *vcpu) { unsigned long ifs, psr; @@ -1796,7 +1781,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu) regs->cr_iip = VCPU(vcpu, iip); } - /* VPSR can't keep track of below bits of guest PSR This function gets guest PSR |
