diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s.c')
| -rw-r--r-- | arch/powerpc/kvm/book3s.c | 399 | 
1 files changed, 349 insertions, 50 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 700df6f1d32..c254c27f240 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -18,6 +18,8 @@  #include <linux/err.h>  #include <linux/export.h>  #include <linux/slab.h> +#include <linux/module.h> +#include <linux/miscdevice.h>  #include <asm/reg.h>  #include <asm/cputable.h> @@ -34,6 +36,7 @@  #include <linux/vmalloc.h>  #include <linux/highmem.h> +#include "book3s.h"  #include "trace.h"  #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU @@ -69,10 +72,54 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)  {  } +static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) +{ +	if (!is_kvmppc_hv_enabled(vcpu->kvm)) +		return to_book3s(vcpu)->hior; +	return 0; +} + +static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, +			unsigned long pending_now, unsigned long old_pending) +{ +	if (is_kvmppc_hv_enabled(vcpu->kvm)) +		return; +	if (pending_now) +		kvmppc_set_int_pending(vcpu, 1); +	else if (old_pending) +		kvmppc_set_int_pending(vcpu, 0); +} + +static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) +{ +	ulong crit_raw; +	ulong crit_r1; +	bool crit; + +	if (is_kvmppc_hv_enabled(vcpu->kvm)) +		return false; + +	crit_raw = kvmppc_get_critical(vcpu); +	crit_r1 = kvmppc_get_gpr(vcpu, 1); + +	/* Truncate crit indicators in 32 bit mode */ +	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { +		crit_raw &= 0xffffffff; +		crit_r1 &= 0xffffffff; +	} + +	/* Critical section when crit == r1 */ +	crit = (crit_raw == crit_r1); +	/* ... and we're in supervisor mode */ +	crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); + +	return crit; +} +  void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)  { -	vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); -	vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags; +	kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); +	kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);  	kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);  	vcpu->arch.mmu.reset_msr(vcpu);  } @@ -98,6 +145,7 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)  	case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;		break;  	case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;		break;  	case 0xf40: prio = BOOK3S_IRQPRIO_VSX;			break; +	case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL;		break;  	default:    prio = BOOK3S_IRQPRIO_MAX;			break;  	} @@ -126,28 +174,32 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)  	printk(KERN_INFO "Queueing interrupt %x\n", vec);  #endif  } - +EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);  void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)  {  	/* might as well deliver this straight away */  	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);  } +EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);  void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)  {  	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);  } +EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);  int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)  {  	return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);  } +EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);  void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)  {  	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);  } +EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);  void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,                                  struct kvm_interrupt *irq) @@ -174,12 +226,12 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)  	switch (priority) {  	case BOOK3S_IRQPRIO_DECREMENTER: -		deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; +		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;  		vec = BOOK3S_INTERRUPT_DECREMENTER;  		break;  	case BOOK3S_IRQPRIO_EXTERNAL:  	case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: -		deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; +		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;  		vec = BOOK3S_INTERRUPT_EXTERNAL;  		break;  	case BOOK3S_IRQPRIO_SYSTEM_RESET: @@ -224,6 +276,9 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)  	case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:  		vec = BOOK3S_INTERRUPT_PERFMON;  		break; +	case BOOK3S_IRQPRIO_FAC_UNAVAIL: +		vec = BOOK3S_INTERRUPT_FAC_UNAVAIL; +		break;  	default:  		deliver = 0;  		printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); @@ -285,12 +340,14 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)  	return 0;  } +EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); -pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) +pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, +			bool *writable)  {  	ulong mp_pa = vcpu->arch.magic_page_pa; -	if (!(vcpu->arch.shared->msr & MSR_SF)) +	if (!(kvmppc_get_msr(vcpu) & MSR_SF))  		mp_pa = (uint32_t)mp_pa;  	/* Magic page override */ @@ -302,20 +359,23 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)  		pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;  		get_page(pfn_to_page(pfn)); +		if (writable) +			*writable = true;  		return pfn;  	} -	return gfn_to_pfn(vcpu->kvm, gfn); +	return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);  } +EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);  static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, -			 struct kvmppc_pte *pte) +			bool iswrite, struct kvmppc_pte *pte)  { -	int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); +	int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));  	int r;  	if (relocated) { -		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); +		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);  	} else {  		pte->eaddr = eaddr;  		pte->raddr = eaddr & KVM_PAM; @@ -361,7 +421,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,  	vcpu->stat.st++; -	if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) +	if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))  		return -ENOENT;  	*eaddr = pte.raddr; @@ -374,6 +434,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,  	return EMULATE_DONE;  } +EXPORT_SYMBOL_GPL(kvmppc_st);  int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,  		      bool data) @@ -383,7 +444,7 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,  	vcpu->stat.ld++; -	if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) +	if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))  		goto nopte;  	*eaddr = pte.raddr; @@ -404,6 +465,7 @@ nopte:  mmio:  	return EMULATE_DO_MMIO;  } +EXPORT_SYMBOL_GPL(kvmppc_ld);  int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)  { @@ -419,6 +481,18 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)  {  } +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, +				  struct kvm_sregs *sregs) +{ +	return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, +				  struct kvm_sregs *sregs) +{ +	return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); +} +  int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)  {  	int i; @@ -428,18 +502,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)  	regs->ctr = kvmppc_get_ctr(vcpu);  	regs->lr = kvmppc_get_lr(vcpu);  	regs->xer = kvmppc_get_xer(vcpu); -	regs->msr = vcpu->arch.shared->msr; -	regs->srr0 = vcpu->arch.shared->srr0; -	regs->srr1 = vcpu->arch.shared->srr1; +	regs->msr = kvmppc_get_msr(vcpu); +	regs->srr0 = kvmppc_get_srr0(vcpu); +	regs->srr1 = kvmppc_get_srr1(vcpu);  	regs->pid = vcpu->arch.pid; -	regs->sprg0 = vcpu->arch.shared->sprg0; -	regs->sprg1 = vcpu->arch.shared->sprg1; -	regs->sprg2 = vcpu->arch.shared->sprg2; -	regs->sprg3 = vcpu->arch.shared->sprg3; -	regs->sprg4 = vcpu->arch.shared->sprg4; -	regs->sprg5 = vcpu->arch.shared->sprg5; -	regs->sprg6 = vcpu->arch.shared->sprg6; -	regs->sprg7 = vcpu->arch.shared->sprg7; +	regs->sprg0 = kvmppc_get_sprg0(vcpu); +	regs->sprg1 = kvmppc_get_sprg1(vcpu); +	regs->sprg2 = kvmppc_get_sprg2(vcpu); +	regs->sprg3 = kvmppc_get_sprg3(vcpu); +	regs->sprg4 = kvmppc_get_sprg4(vcpu); +	regs->sprg5 = kvmppc_get_sprg5(vcpu); +	regs->sprg6 = kvmppc_get_sprg6(vcpu); +	regs->sprg7 = kvmppc_get_sprg7(vcpu);  	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)  		regs->gpr[i] = kvmppc_get_gpr(vcpu, i); @@ -457,16 +531,16 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)  	kvmppc_set_lr(vcpu, regs->lr);  	kvmppc_set_xer(vcpu, regs->xer);  	kvmppc_set_msr(vcpu, regs->msr); -	vcpu->arch.shared->srr0 = regs->srr0; -	vcpu->arch.shared->srr1 = regs->srr1; -	vcpu->arch.shared->sprg0 = regs->sprg0; -	vcpu->arch.shared->sprg1 = regs->sprg1; -	vcpu->arch.shared->sprg2 = regs->sprg2; -	vcpu->arch.shared->sprg3 = regs->sprg3; -	vcpu->arch.shared->sprg4 = regs->sprg4; -	vcpu->arch.shared->sprg5 = regs->sprg5; -	vcpu->arch.shared->sprg6 = regs->sprg6; -	vcpu->arch.shared->sprg7 = regs->sprg7; +	kvmppc_set_srr0(vcpu, regs->srr0); +	kvmppc_set_srr1(vcpu, regs->srr1); +	kvmppc_set_sprg0(vcpu, regs->sprg0); +	kvmppc_set_sprg1(vcpu, regs->sprg1); +	kvmppc_set_sprg2(vcpu, regs->sprg2); +	kvmppc_set_sprg3(vcpu, regs->sprg3); +	kvmppc_set_sprg4(vcpu, regs->sprg4); +	kvmppc_set_sprg5(vcpu, regs->sprg5); +	kvmppc_set_sprg6(vcpu, regs->sprg6); +	kvmppc_set_sprg7(vcpu, regs->sprg7);  	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)  		kvmppc_set_gpr(vcpu, i, regs->gpr[i]); @@ -495,23 +569,22 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)  	if (size > sizeof(val))  		return -EINVAL; -	r = kvmppc_get_one_reg(vcpu, reg->id, &val); - +	r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);  	if (r == -EINVAL) {  		r = 0;  		switch (reg->id) {  		case KVM_REG_PPC_DAR: -			val = get_reg_val(reg->id, vcpu->arch.shared->dar); +			val = get_reg_val(reg->id, kvmppc_get_dar(vcpu));  			break;  		case KVM_REG_PPC_DSISR: -			val = get_reg_val(reg->id, vcpu->arch.shared->dsisr); +			val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu));  			break;  		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:  			i = reg->id - KVM_REG_PPC_FPR0; -			val = get_reg_val(reg->id, vcpu->arch.fpr[i]); +			val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));  			break;  		case KVM_REG_PPC_FPSCR: -			val = get_reg_val(reg->id, vcpu->arch.fpscr); +			val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);  			break;  #ifdef CONFIG_ALTIVEC  		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: @@ -519,16 +592,30 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)  				r = -ENXIO;  				break;  			} -			val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0]; +			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];  			break;  		case KVM_REG_PPC_VSCR:  			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {  				r = -ENXIO;  				break;  			} -			val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); +			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); +			break; +		case KVM_REG_PPC_VRSAVE: +			val = get_reg_val(reg->id, vcpu->arch.vrsave);  			break;  #endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_VSX +		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: +			if (cpu_has_feature(CPU_FTR_VSX)) { +				long int i = reg->id - KVM_REG_PPC_VSR0; +				val.vsxval[0] = vcpu->arch.fp.fpr[i][0]; +				val.vsxval[1] = vcpu->arch.fp.fpr[i][1]; +			} else { +				r = -ENXIO; +			} +			break; +#endif /* CONFIG_VSX */  		case KVM_REG_PPC_DEBUG_INST: {  			u32 opcode = INS_TW;  			r = copy_to_user((u32 __user *)(long)reg->addr, @@ -544,6 +631,21 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)  			val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu));  			break;  #endif /* CONFIG_KVM_XICS */ +		case KVM_REG_PPC_FSCR: +			val = get_reg_val(reg->id, vcpu->arch.fscr); +			break; +		case KVM_REG_PPC_TAR: +			val = get_reg_val(reg->id, vcpu->arch.tar); +			break; +		case KVM_REG_PPC_EBBHR: +			val = get_reg_val(reg->id, vcpu->arch.ebbhr); +			break; +		case KVM_REG_PPC_EBBRR: +			val = get_reg_val(reg->id, vcpu->arch.ebbrr); +			break; +		case KVM_REG_PPC_BESCR: +			val = get_reg_val(reg->id, vcpu->arch.bescr); +			break;  		default:  			r = -EINVAL;  			break; @@ -572,23 +674,22 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)  	if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))  		return -EFAULT; -	r = kvmppc_set_one_reg(vcpu, reg->id, &val); - +	r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);  	if (r == -EINVAL) {  		r = 0;  		switch (reg->id) {  		case KVM_REG_PPC_DAR: -			vcpu->arch.shared->dar = set_reg_val(reg->id, val); +			kvmppc_set_dar(vcpu, set_reg_val(reg->id, val));  			break;  		case KVM_REG_PPC_DSISR: -			vcpu->arch.shared->dsisr = set_reg_val(reg->id, val); +			kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val));  			break;  		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:  			i = reg->id - KVM_REG_PPC_FPR0; -			vcpu->arch.fpr[i] = set_reg_val(reg->id, val); +			VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);  			break;  		case KVM_REG_PPC_FPSCR: -			vcpu->arch.fpscr = set_reg_val(reg->id, val); +			vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);  			break;  #ifdef CONFIG_ALTIVEC  		case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: @@ -596,16 +697,34 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)  				r = -ENXIO;  				break;  			} -			vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; +			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;  			break;  		case KVM_REG_PPC_VSCR:  			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {  				r = -ENXIO;  				break;  			} -			vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); +			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); +			break; +		case KVM_REG_PPC_VRSAVE: +			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { +				r = -ENXIO; +				break; +			} +			vcpu->arch.vrsave = set_reg_val(reg->id, val);  			break;  #endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_VSX +		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: +			if (cpu_has_feature(CPU_FTR_VSX)) { +				long int i = reg->id - KVM_REG_PPC_VSR0; +				vcpu->arch.fp.fpr[i][0] = val.vsxval[0]; +				vcpu->arch.fp.fpr[i][1] = val.vsxval[1]; +			} else { +				r = -ENXIO; +			} +			break; +#endif /* CONFIG_VSX */  #ifdef CONFIG_KVM_XICS  		case KVM_REG_PPC_ICP_STATE:  			if (!vcpu->arch.icp) { @@ -616,6 +735,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)  						set_reg_val(reg->id, val));  			break;  #endif /* CONFIG_KVM_XICS */ +		case KVM_REG_PPC_FSCR: +			vcpu->arch.fscr = set_reg_val(reg->id, val); +			break; +		case KVM_REG_PPC_TAR: +			vcpu->arch.tar = set_reg_val(reg->id, val); +			break; +		case KVM_REG_PPC_EBBHR: +			vcpu->arch.ebbhr = set_reg_val(reg->id, val); +			break; +		case KVM_REG_PPC_EBBRR: +			vcpu->arch.ebbrr = set_reg_val(reg->id, val); +			break; +		case KVM_REG_PPC_BESCR: +			vcpu->arch.bescr = set_reg_val(reg->id, val); +			break;  		default:  			r = -EINVAL;  			break; @@ -625,6 +759,27 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)  	return r;  } +void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ +	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); +} + +void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) +{ +	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); +} + +void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) +{ +	vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); +} +EXPORT_SYMBOL_GPL(kvmppc_set_msr); + +int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) +{ +	return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); +} +  int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,                                    struct kvm_translation *tr)  { @@ -644,3 +799,147 @@ void kvmppc_decrementer_func(unsigned long data)  	kvmppc_core_queue_dec(vcpu);  	kvm_vcpu_kick(vcpu);  } + +struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) +{ +	return kvm->arch.kvm_ops->vcpu_create(kvm, id); +} + +void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) +{ +	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); +} + +int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) +{ +	return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); +} + +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) +{ +	return kvm->arch.kvm_ops->get_dirty_log(kvm, log); +} + +void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, +			      struct kvm_memory_slot *dont) +{ +	kvm->arch.kvm_ops->free_memslot(free, dont); +} + +int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, +			       unsigned long npages) +{ +	return kvm->arch.kvm_ops->create_memslot(slot, npages); +} + +void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +	kvm->arch.kvm_ops->flush_memslot(kvm, memslot); +} + +int kvmppc_core_prepare_memory_region(struct kvm *kvm, +				struct kvm_memory_slot *memslot, +				struct kvm_userspace_memory_region *mem) +{ +	return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); +} + +void kvmppc_core_commit_memory_region(struct kvm *kvm, +				struct kvm_userspace_memory_region *mem, +				const struct kvm_memory_slot *old) +{ +	kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old); +} + +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +{ +	return kvm->arch.kvm_ops->unmap_hva(kvm, hva); +} +EXPORT_SYMBOL_GPL(kvm_unmap_hva); + +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +{ +	return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); +} + +int kvm_age_hva(struct kvm *kvm, unsigned long hva) +{ +	return kvm->arch.kvm_ops->age_hva(kvm, hva); +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ +	return kvm->arch.kvm_ops->test_age_hva(kvm, hva); +} + +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ +	kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); +} + +void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) +{ +	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); +} + +int kvmppc_core_init_vm(struct kvm *kvm) +{ + +#ifdef CONFIG_PPC64 +	INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); +	INIT_LIST_HEAD(&kvm->arch.rtas_tokens); +#endif + +	return kvm->arch.kvm_ops->init_vm(kvm); +} + +void kvmppc_core_destroy_vm(struct kvm *kvm) +{ +	kvm->arch.kvm_ops->destroy_vm(kvm); + +#ifdef CONFIG_PPC64 +	kvmppc_rtas_tokens_free(kvm); +	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); +#endif +} + +int kvmppc_core_check_processor_compat(void) +{ +	/* +	 * We always return 0 for book3s. We check +	 * for compatability while loading the HV +	 * or PR module +	 */ +	return 0; +} + +static int kvmppc_book3s_init(void) +{ +	int r; + +	r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); +	if (r) +		return r; +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER +	r = kvmppc_book3s_init_pr(); +#endif +	return r; + +} + +static void kvmppc_book3s_exit(void) +{ +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER +	kvmppc_book3s_exit_pr(); +#endif +	kvm_exit(); +} + +module_init(kvmppc_book3s_init); +module_exit(kvmppc_book3s_exit); + +/* On 32bit this is our one and only kernel module */ +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER +MODULE_ALIAS_MISCDEV(KVM_MINOR); +MODULE_ALIAS("devname:kvm"); +#endif  | 
