diff options
Diffstat (limited to 'arch/powerpc/kvm/emulate.c')
| -rw-r--r-- | arch/powerpc/kvm/emulate.c | 503 |
1 files changed, 258 insertions, 245 deletions
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 4a9ac6640fa..da86d9ba347 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -13,6 +13,7 @@ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2007 + * Copyright 2011 Freescale Semiconductor, Inc. * * Authors: Hollis Blanchard <hollisb@us.ibm.com> */ @@ -22,89 +23,188 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/kvm_host.h> +#include <linux/clockchips.h> #include <asm/reg.h> #include <asm/time.h> #include <asm/byteorder.h> #include <asm/kvm_ppc.h> #include <asm/disassemble.h> +#include <asm/ppc-opcode.h> #include "timing.h" #include "trace.h" -#define OP_TRAP 3 -#define OP_TRAP_64 2 - -#define OP_31_XOP_LWZX 23 -#define OP_31_XOP_LBZX 87 -#define OP_31_XOP_STWX 151 -#define OP_31_XOP_STBX 215 -#define OP_31_XOP_STBUX 247 -#define OP_31_XOP_LHZX 279 -#define OP_31_XOP_LHZUX 311 -#define OP_31_XOP_MFSPR 339 -#define OP_31_XOP_STHX 407 -#define OP_31_XOP_STHUX 439 -#define OP_31_XOP_MTSPR 467 -#define OP_31_XOP_DCBI 470 -#define OP_31_XOP_LWBRX 534 -#define OP_31_XOP_TLBSYNC 566 -#define OP_31_XOP_STWBRX 662 -#define OP_31_XOP_LHBRX 790 -#define OP_31_XOP_STHBRX 918 - -#define OP_LWZ 32 -#define OP_LWZU 33 -#define OP_LBZ 34 -#define OP_LBZU 35 -#define OP_STW 36 -#define OP_STWU 37 -#define OP_STB 38 -#define OP_STBU 39 -#define OP_LHZ 40 -#define OP_LHZU 41 -#define OP_STH 44 -#define OP_STHU 45 - -#ifdef CONFIG_PPC64 -static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) -{ - return 1; -} -#else -static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.tcr & TCR_DIE; -} -#endif - void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) { unsigned long dec_nsec; + unsigned long long dec_time; pr_debug("mtDEC: %x\n", vcpu->arch.dec); -#ifdef CONFIG_PPC64 + hrtimer_try_to_cancel(&vcpu->arch.dec_timer); + +#ifdef CONFIG_PPC_BOOK3S + /* mtdec lowers the interrupt line when positive. */ + kvmppc_core_dequeue_dec(vcpu); + /* POWER4+ triggers a dec interrupt if the value is < 0 */ if (vcpu->arch.dec & 0x80000000) { - hrtimer_try_to_cancel(&vcpu->arch.dec_timer); kvmppc_core_queue_dec(vcpu); return; } #endif - if (kvmppc_dec_enabled(vcpu)) { - /* The decrementer ticks at the same rate as the timebase, so - * that's how we convert the guest DEC value to the number of - * host ticks. */ - - hrtimer_try_to_cancel(&vcpu->arch.dec_timer); - dec_nsec = vcpu->arch.dec; - dec_nsec *= 1000; - dec_nsec /= tb_ticks_per_usec; - hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), - HRTIMER_MODE_REL); - vcpu->arch.dec_jiffies = get_tb(); - } else { - hrtimer_try_to_cancel(&vcpu->arch.dec_timer); + +#ifdef CONFIG_BOOKE + /* On BOOKE, DEC = 0 is as good as decrementer not enabled */ + if (vcpu->arch.dec == 0) + return; +#endif + + /* + * The decrementer ticks at the same rate as the timebase, so + * that's how we convert the guest DEC value to the number of + * host ticks. + */ + + dec_time = vcpu->arch.dec; + /* + * Guest timebase ticks at the same frequency as host decrementer. + * So use the host decrementer calculations for decrementer emulation. + */ + dec_time = dec_time << decrementer_clockevent.shift; + do_div(dec_time, decrementer_clockevent.mult); + dec_nsec = do_div(dec_time, NSEC_PER_SEC); + hrtimer_start(&vcpu->arch.dec_timer, + ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); + vcpu->arch.dec_jiffies = get_tb(); +} + +u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) +{ + u64 jd = tb - vcpu->arch.dec_jiffies; + +#ifdef CONFIG_BOOKE + if (vcpu->arch.dec < jd) + return 0; +#endif + + return vcpu->arch.dec - jd; +} + +static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) +{ + enum emulation_result emulated = EMULATE_DONE; + ulong spr_val = kvmppc_get_gpr(vcpu, rs); + + switch (sprn) { + case SPRN_SRR0: + kvmppc_set_srr0(vcpu, spr_val); + break; + case SPRN_SRR1: + kvmppc_set_srr1(vcpu, spr_val); + break; + + /* XXX We need to context-switch the timebase for + * watchdog and FIT. */ + case SPRN_TBWL: break; + case SPRN_TBWU: break; + + case SPRN_DEC: + vcpu->arch.dec = spr_val; + kvmppc_emulate_dec(vcpu); + break; + + case SPRN_SPRG0: + kvmppc_set_sprg0(vcpu, spr_val); + break; + case SPRN_SPRG1: + kvmppc_set_sprg1(vcpu, spr_val); + break; + case SPRN_SPRG2: + kvmppc_set_sprg2(vcpu, spr_val); + break; + case SPRN_SPRG3: + kvmppc_set_sprg3(vcpu, spr_val); + break; + + /* PIR can legally be written, but we ignore it */ + case SPRN_PIR: break; + + default: + emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn, + spr_val); + if (emulated == EMULATE_FAIL) + printk(KERN_INFO "mtspr: unknown spr " + "0x%x\n", sprn); + break; } + + kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); + + return emulated; +} + +static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) +{ + enum emulation_result emulated = EMULATE_DONE; + ulong spr_val = 0; + + switch (sprn) { + case SPRN_SRR0: + spr_val = kvmppc_get_srr0(vcpu); + break; + case SPRN_SRR1: + spr_val = kvmppc_get_srr1(vcpu); + break; + case SPRN_PVR: + spr_val = vcpu->arch.pvr; + break; + case SPRN_PIR: + spr_val = vcpu->vcpu_id; + break; + + /* Note: mftb and TBRL/TBWL are user-accessible, so + * the guest can always access the real TB anyways. + * In fact, we probably will never see these traps. */ + case SPRN_TBWL: + spr_val = get_tb() >> 32; + break; + case SPRN_TBWU: + spr_val = get_tb(); + break; + + case SPRN_SPRG0: + spr_val = kvmppc_get_sprg0(vcpu); + break; + case SPRN_SPRG1: + spr_val = kvmppc_get_sprg1(vcpu); + break; + case SPRN_SPRG2: + spr_val = kvmppc_get_sprg2(vcpu); + break; + case SPRN_SPRG3: + spr_val = kvmppc_get_sprg3(vcpu); + break; + /* Note: SPRG4-7 are user-readable, so we don't get + * a trap. */ + + case SPRN_DEC: + spr_val = kvmppc_get_dec(vcpu, get_tb()); + break; + default: + emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn, + &spr_val); + if (unlikely(emulated == EMULATE_FAIL)) { + printk(KERN_INFO "mfspr: unknown spr " + "0x%x\n", sprn); + } + break; + } + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, rt, spr_val); + kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); + + return emulated; } /* XXX to do: @@ -119,213 +219,119 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) * lmw * stmw * - * XXX is_bigendian should depend on MMU mapping or MSR[LE] */ /* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { - u32 inst = vcpu->arch.last_inst; - u32 ea; - int ra; - int rb; - int rs; - int rt; - int sprn; + u32 inst = kvmppc_get_last_inst(vcpu); + int ra = get_ra(inst); + int rs = get_rs(inst); + int rt = get_rt(inst); + int sprn = get_sprn(inst); enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); - pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); + pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: + kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else - vcpu->arch.esr |= ESR_PTR; + kvmppc_core_queue_program(vcpu, + vcpu->arch.shared->esr | ESR_PTR); #endif - kvmppc_core_queue_program(vcpu); advance = 0; break; case 31: switch (get_xop(inst)) { + case OP_31_XOP_TRAP: +#ifdef CONFIG_64BIT + case OP_31_XOP_TRAP_64: +#endif +#ifdef CONFIG_PPC_BOOK3S + kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); +#else + kvmppc_core_queue_program(vcpu, + vcpu->arch.shared->esr | ESR_PTR); +#endif + advance = 0; + break; case OP_31_XOP_LWZX: - rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: - rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; + case OP_31_XOP_LBZUX: + emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + case OP_31_XOP_STWX: - rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: - rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: - rs = get_rs(inst); - ra = get_ra(inst); - rb = get_rb(inst); - - ea = vcpu->arch.gpr[rb]; - if (ra) - ea += vcpu->arch.gpr[ra]; - emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 1, 1); - vcpu->arch.gpr[rs] = ea; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_31_XOP_LHAX: + emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: - rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: - rt = get_rt(inst); - ra = get_ra(inst); - rb = get_rb(inst); - - ea = vcpu->arch.gpr[rb]; - if (ra) - ea += vcpu->arch.gpr[ra]; - emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); - vcpu->arch.gpr[ra] = ea; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MFSPR: - sprn = get_sprn(inst); - rt = get_rt(inst); - - switch (sprn) { - case SPRN_SRR0: - vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; - case SPRN_SRR1: - vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; - case SPRN_PVR: - vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; - case SPRN_PIR: - vcpu->arch.gpr[rt] = vcpu->vcpu_id; break; - case SPRN_MSSSR0: - vcpu->arch.gpr[rt] = 0; break; - - /* Note: mftb and TBRL/TBWL are user-accessible, so - * the guest can always access the real TB anyways. - * In fact, we probably will never see these traps. */ - case SPRN_TBWL: - vcpu->arch.gpr[rt] = get_tb() >> 32; break; - case SPRN_TBWU: - vcpu->arch.gpr[rt] = get_tb(); break; - - case SPRN_SPRG0: - vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; - case SPRN_SPRG1: - vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; - case SPRN_SPRG2: - vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; - case SPRN_SPRG3: - vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; - /* Note: SPRG4-7 are user-readable, so we don't get - * a trap. */ - - case SPRN_DEC: - { - u64 jd = get_tb() - vcpu->arch.dec_jiffies; - vcpu->arch.gpr[rt] = vcpu->arch.dec - jd; - pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]); - break; - } - default: - emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); - if (emulated == EMULATE_FAIL) { - printk("mfspr: unknown spr %x\n", sprn); - vcpu->arch.gpr[rt] = 0; - } - break; - } + emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); break; case OP_31_XOP_STHX: - rs = get_rs(inst); - ra = get_ra(inst); - rb = get_rb(inst); - emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: - rs = get_rs(inst); - ra = get_ra(inst); - rb = get_rb(inst); - - ea = vcpu->arch.gpr[rb]; - if (ra) - ea += vcpu->arch.gpr[ra]; - emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 2, 1); - vcpu->arch.gpr[ra] = ea; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MTSPR: - sprn = get_sprn(inst); - rs = get_rs(inst); - switch (sprn) { - case SPRN_SRR0: - vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; - case SPRN_SRR1: - vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; - - /* XXX We need to context-switch the timebase for - * watchdog and FIT. */ - case SPRN_TBWL: break; - case SPRN_TBWU: break; - - case SPRN_MSSSR0: break; - - case SPRN_DEC: - vcpu->arch.dec = vcpu->arch.gpr[rs]; - kvmppc_emulate_dec(vcpu); - break; - - case SPRN_SPRG0: - vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; - case SPRN_SPRG1: - vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; - case SPRN_SPRG2: - vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; - case SPRN_SPRG3: - vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; - - default: - emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); - if (emulated == EMULATE_FAIL) - printk("mtspr: unknown spr %x\n", sprn); - break; - } + emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); break; + case OP_31_XOP_DCBST: + case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but @@ -335,7 +341,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case OP_31_XOP_LWBRX: - rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; @@ -343,27 +348,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case OP_31_XOP_STWBRX: - rs = get_rs(inst); - ra = get_ra(inst); - rb = get_rb(inst); - emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: - rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: - rs = get_rs(inst); - ra = get_ra(inst); - rb = get_rb(inst); - emulated = kvmppc_handle_store(run, vcpu, - vcpu->arch.gpr[rs], + kvmppc_get_gpr(vcpu, rs), 2, 0); break; @@ -374,81 +370,92 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case OP_LWZ: - rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; - case OP_LWZU: - ra = get_ra(inst); + /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ + case OP_LD: rt = get_rt(inst); + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + break; + + case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: - rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: - ra = get_ra(inst); - rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: - rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 4, 1); break; - case OP_STWU: - ra = get_ra(inst); + /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ + case OP_STD: rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), + 8, 1); + break; + + case OP_STWU: + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 4, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: - rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: - ra = get_ra(inst); - rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 1, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: - rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: - ra = get_ra(inst); - rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_LHA: + emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); + break; + + case OP_LHAU: + emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: - rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: - ra = get_ra(inst); - rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 2, 1); - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: @@ -456,18 +463,24 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) } if (emulated == EMULATE_FAIL) { - emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); - if (emulated == EMULATE_FAIL) { + emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, + &advance); + if (emulated == EMULATE_AGAIN) { + advance = 0; + } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); + kvmppc_core_queue_program(vcpu, 0); } } - trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated); + trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); + /* Advance past emulated instruction. */ if (advance) - vcpu->arch.pc += 4; /* Advance past emulated instruction. */ + kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; } +EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction); |
