diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2013-02-06 11:29:35 +0000 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2013-06-12 16:42:18 +0100 |
commit | e82e030556e42e823e174e0c3bd97988d1a09d1f (patch) | |
tree | 4c394a68bfe1a1f4164f120fcf7b09f289bd7da2 /arch/arm64 | |
parent | b4afad06c19e3489767532f86ff453a1d1e28b8c (diff) |
arm64: KVM: 32bit guest fault injection
Add fault injection capability for 32bit guests.
Reviewed-by: Christopher Covington <cov@codeaurora.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/kvm/inject_fault.c | 79 |
1 files changed, 78 insertions, 1 deletions
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 54f65627126..81a02a8762b 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -1,5 +1,5 @@ /* - * Fault injection for 64bit guests. + * Fault injection for both 32 and 64bit guests. * * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> @@ -29,6 +29,74 @@ PSR_I_BIT | PSR_D_BIT) #define EL1_EXCEPT_SYNC_OFFSET 0x200 +static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) +{ + unsigned long cpsr; + unsigned long new_spsr_value = *vcpu_cpsr(vcpu); + bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); + u32 return_offset = (is_thumb) ? 4 : 0; + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); + + cpsr = mode | COMPAT_PSR_I_BIT; + + if (sctlr & (1 << 30)) + cpsr |= COMPAT_PSR_T_BIT; + if (sctlr & (1 << 25)) + cpsr |= COMPAT_PSR_E_BIT; + + *vcpu_cpsr(vcpu) = cpsr; + + /* Note: These now point to the banked copies */ + *vcpu_spsr(vcpu) = new_spsr_value; + *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; + + /* Branch to exception vector */ + if (sctlr & (1 << 13)) + vect_offset += 0xffff0000; + else /* always have security exceptions */ + vect_offset += vcpu_cp15(vcpu, c12_VBAR); + + *vcpu_pc(vcpu) = vect_offset; +} + +static void inject_undef32(struct kvm_vcpu *vcpu) +{ + prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); +} + +/* + * Modelled after TakeDataAbortException() and TakePrefetchAbortException + * pseudocode. + */ +static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, + unsigned long addr) +{ + u32 vect_offset; + u32 *far, *fsr; + bool is_lpae; + + if (is_pabt) { + vect_offset = 12; + far = &vcpu_cp15(vcpu, c6_IFAR); + fsr = &vcpu_cp15(vcpu, c5_IFSR); + } else { /* !iabt */ + vect_offset = 16; + far = &vcpu_cp15(vcpu, c6_DFAR); + fsr = &vcpu_cp15(vcpu, c5_DFSR); + } + + prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); + + *far = addr; + + /* Give the guest an IMPLEMENTATION DEFINED exception */ + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); + if (is_lpae) + *fsr = 1 << 9 | 0x34; + else + *fsr = 0x14; +} + static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) { unsigned long cpsr = *vcpu_cpsr(vcpu); @@ -98,6 +166,9 @@ static void inject_undef64(struct kvm_vcpu *vcpu) */ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) { + if (!(vcpu->arch.hcr_el2 & HCR_RW)) + inject_abt32(vcpu, false, addr); + inject_abt64(vcpu, false, addr); } @@ -111,6 +182,9 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) */ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) { + if (!(vcpu->arch.hcr_el2 & HCR_RW)) + inject_abt32(vcpu, true, addr); + inject_abt64(vcpu, true, addr); } @@ -122,5 +196,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) */ void kvm_inject_undefined(struct kvm_vcpu *vcpu) { + if (!(vcpu->arch.hcr_el2 & HCR_RW)) + inject_undef32(vcpu); + inject_undef64(vcpu); } |