diff options
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
| -rw-r--r-- | arch/powerpc/kvm/powerpc.c | 293 | 
1 files changed, 183 insertions, 110 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 07c0106fab7..61c738ab128 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -26,6 +26,7 @@  #include <linux/fs.h>  #include <linux/slab.h>  #include <linux/file.h> +#include <linux/module.h>  #include <asm/cputable.h>  #include <asm/uaccess.h>  #include <asm/kvm_ppc.h> @@ -39,6 +40,12 @@  #define CREATE_TRACE_POINTS  #include "trace.h" +struct kvmppc_ops *kvmppc_hv_ops; +EXPORT_SYMBOL_GPL(kvmppc_hv_ops); +struct kvmppc_ops *kvmppc_pr_ops; +EXPORT_SYMBOL_GPL(kvmppc_pr_ops); + +  int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)  {  	return !!(v->arch.pending_exceptions) || @@ -50,7 +57,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)  	return 1;  } -#ifndef CONFIG_KVM_BOOK3S_64_HV  /*   * Common checks before entering the guest world.  Call with interrupts   * disabled. @@ -62,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)   */  int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)  { -	int r = 1; +	int r; + +	WARN_ON(irqs_disabled()); +	hard_irq_disable(); -	WARN_ON_ONCE(!irqs_disabled());  	while (true) {  		if (need_resched()) {  			local_irq_enable();  			cond_resched(); -			local_irq_disable(); +			hard_irq_disable();  			continue;  		} @@ -95,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)  			local_irq_enable();  			trace_kvm_check_requests(vcpu);  			r = kvmppc_core_check_requests(vcpu); -			local_irq_disable(); +			hard_irq_disable();  			if (r > 0)  				continue;  			break; @@ -107,25 +115,36 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)  			continue;  		} -#ifdef CONFIG_PPC64 -		/* lazy EE magic */ -		hard_irq_disable(); -		if (lazy_irq_pending()) { -			/* Got an interrupt in between, try again */ -			local_irq_enable(); -			local_irq_disable(); -			kvm_guest_exit(); -			continue; -		} -#endif -  		kvm_guest_enter(); -		break; +		return 1;  	} +	/* return to host */ +	local_irq_enable();  	return r;  } -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); + +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) +static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) +{ +	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; +	int i; + +	shared->sprg0 = swab64(shared->sprg0); +	shared->sprg1 = swab64(shared->sprg1); +	shared->sprg2 = swab64(shared->sprg2); +	shared->sprg3 = swab64(shared->sprg3); +	shared->srr0 = swab64(shared->srr0); +	shared->srr1 = swab64(shared->srr1); +	shared->dar = swab64(shared->dar); +	shared->msr = swab64(shared->msr); +	shared->dsisr = swab32(shared->dsisr); +	shared->int_pending = swab32(shared->int_pending); +	for (i = 0; i < ARRAY_SIZE(shared->sr); i++) +		shared->sr[i] = swab32(shared->sr[i]); +} +#endif  int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)  { @@ -137,7 +156,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)  	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);  	unsigned long r2 = 0; -	if (!(vcpu->arch.shared->msr & MSR_SF)) { +	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {  		/* 32 bit mode */  		param1 &= 0xffffffff;  		param2 &= 0xffffffff; @@ -148,8 +167,28 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)  	switch (nr) {  	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):  	{ -		vcpu->arch.magic_page_pa = param1; -		vcpu->arch.magic_page_ea = param2; +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) +		/* Book3S can be little endian, find it out here */ +		int shared_big_endian = true; +		if (vcpu->arch.intr_msr & MSR_LE) +			shared_big_endian = false; +		if (shared_big_endian != vcpu->arch.shared_big_endian) +			kvmppc_swab_shared(vcpu); +		vcpu->arch.shared_big_endian = shared_big_endian; +#endif + +		if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { +			/* +			 * Older versions of the Linux magic page code had +			 * a bug where they would map their trampoline code +			 * NX. If that's the case, remove !PR NX capability. +			 */ +			vcpu->arch.disable_kernel_nx = true; +			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); +		} + +		vcpu->arch.magic_page_pa = param1 & ~0xfffULL; +		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;  		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; @@ -179,6 +218,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)  	return r;  } +EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);  int kvmppc_sanity_check(struct kvm_vcpu *vcpu)  { @@ -192,11 +232,9 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)  	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)  		goto out; -#ifdef CONFIG_KVM_BOOK3S_64_HV  	/* HV KVM can only do PAPR mode for now */ -	if (!vcpu->arch.papr_enabled) +	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))  		goto out; -#endif  #ifdef CONFIG_KVM_BOOKE_HV  	if (!cpu_has_feature(CPU_FTR_EMB_HV)) @@ -209,6 +247,7 @@ out:  	vcpu->arch.sane = r;  	return r ? 0 : -EINVAL;  } +EXPORT_SYMBOL_GPL(kvmppc_sanity_check);  int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)  { @@ -243,6 +282,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)  	return r;  } +EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);  int kvm_arch_hardware_enable(void *garbage)  { @@ -269,10 +309,35 @@ void kvm_arch_check_processor_compat(void *rtn)  int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)  { -	if (type) -		return -EINVAL; - +	struct kvmppc_ops *kvm_ops = NULL; +	/* +	 * if we have both HV and PR enabled, default is HV +	 */ +	if (type == 0) { +		if (kvmppc_hv_ops) +			kvm_ops = kvmppc_hv_ops; +		else +			kvm_ops = kvmppc_pr_ops; +		if (!kvm_ops) +			goto err_out; +	} else	if (type == KVM_VM_PPC_HV) { +		if (!kvmppc_hv_ops) +			goto err_out; +		kvm_ops = kvmppc_hv_ops; +	} else if (type == KVM_VM_PPC_PR) { +		if (!kvmppc_pr_ops) +			goto err_out; +		kvm_ops = kvmppc_pr_ops; +	} else +		goto err_out; + +	if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) +		return -ENOENT; + +	kvm->arch.kvm_ops = kvm_ops;  	return kvmppc_core_init_vm(kvm); +err_out: +	return -EINVAL;  }  void kvm_arch_destroy_vm(struct kvm *kvm) @@ -292,6 +357,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)  	kvmppc_core_destroy_vm(kvm);  	mutex_unlock(&kvm->lock); + +	/* drop the module reference */ +	module_put(kvm->arch.kvm_ops->owner);  }  void kvm_arch_sync_events(struct kvm *kvm) @@ -301,6 +369,10 @@ void kvm_arch_sync_events(struct kvm *kvm)  int kvm_dev_ioctl_check_extension(long ext)  {  	int r; +	/* FIXME!! +	 * Should some of this be vm ioctl ? is it possible now ? +	 */ +	int hv_enabled = kvmppc_hv_ops ? 1 : 0;  	switch (ext) {  #ifdef CONFIG_BOOKE @@ -320,58 +392,68 @@ int kvm_dev_ioctl_check_extension(long ext)  	case KVM_CAP_DEVICE_CTRL:  		r = 1;  		break; -#ifndef CONFIG_KVM_BOOK3S_64_HV  	case KVM_CAP_PPC_PAIRED_SINGLES:  	case KVM_CAP_PPC_OSI:  	case KVM_CAP_PPC_GET_PVINFO:  #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)  	case KVM_CAP_SW_TLB:  #endif -#ifdef CONFIG_KVM_MPIC -	case KVM_CAP_IRQ_MPIC: -#endif -		r = 1; +		/* We support this only for PR */ +		r = !hv_enabled;  		break; +#ifdef CONFIG_KVM_MMIO  	case KVM_CAP_COALESCED_MMIO:  		r = KVM_COALESCED_MMIO_PAGE_OFFSET;  		break;  #endif +#ifdef CONFIG_KVM_MPIC +	case KVM_CAP_IRQ_MPIC: +		r = 1; +		break; +#endif +  #ifdef CONFIG_PPC_BOOK3S_64  	case KVM_CAP_SPAPR_TCE:  	case KVM_CAP_PPC_ALLOC_HTAB:  	case KVM_CAP_PPC_RTAS: +	case KVM_CAP_PPC_FIXUP_HCALL:  #ifdef CONFIG_KVM_XICS  	case KVM_CAP_IRQ_XICS:  #endif  		r = 1;  		break;  #endif /* CONFIG_PPC_BOOK3S_64 */ -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE  	case KVM_CAP_PPC_SMT: -		r = threads_per_core; +		if (hv_enabled) +			r = threads_per_subcore; +		else +			r = 0;  		break;  	case KVM_CAP_PPC_RMA: -		r = 1; +		r = hv_enabled;  		/* PPC970 requires an RMA */ -		if (cpu_has_feature(CPU_FTR_ARCH_201)) +		if (r && cpu_has_feature(CPU_FTR_ARCH_201))  			r = 2;  		break;  #endif  	case KVM_CAP_SYNC_MMU: -#ifdef CONFIG_KVM_BOOK3S_64_HV -		r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +		if (hv_enabled) +			r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; +		else +			r = 0;  #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)  		r = 1;  #else  		r = 0; -		break;  #endif -#ifdef CONFIG_KVM_BOOK3S_64_HV +		break; +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE  	case KVM_CAP_PPC_HTAB_FD: -		r = 1; +		r = hv_enabled;  		break;  #endif -		break;  	case KVM_CAP_NR_VCPUS:  		/*  		 * Recommending a number of CPUs is somewhat arbitrary; we @@ -379,11 +461,10 @@ int kvm_dev_ioctl_check_extension(long ext)  		 * will have secondary threads "offline"), and for other KVM  		 * implementations just count online CPUs.  		 */ -#ifdef CONFIG_KVM_BOOK3S_64_HV -		r = num_present_cpus(); -#else -		r = num_online_cpus(); -#endif +		if (hv_enabled) +			r = num_present_cpus(); +		else +			r = num_online_cpus();  		break;  	case KVM_CAP_MAX_VCPUS:  		r = KVM_MAX_VCPUS; @@ -407,15 +488,16 @@ long kvm_arch_dev_ioctl(struct file *filp,  	return -EINVAL;  } -void kvm_arch_free_memslot(struct kvm_memory_slot *free, +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,  			   struct kvm_memory_slot *dont)  { -	kvmppc_core_free_memslot(free, dont); +	kvmppc_core_free_memslot(kvm, free, dont);  } -int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, +			    unsigned long npages)  { -	return kvmppc_core_create_memslot(slot, npages); +	return kvmppc_core_create_memslot(kvm, slot, npages);  }  void kvm_arch_memslots_updated(struct kvm *kvm) @@ -608,14 +690,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,  		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);  		break;  	case KVM_MMIO_REG_FPR: -		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; +		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;  		break;  #ifdef CONFIG_PPC_BOOK3S  	case KVM_MMIO_REG_QPR:  		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;  		break;  	case KVM_MMIO_REG_FQPR: -		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; +		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;  		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;  		break;  #endif @@ -625,9 +707,19 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,  }  int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, -                       unsigned int rt, unsigned int bytes, int is_bigendian) +		       unsigned int rt, unsigned int bytes, +		       int is_default_endian)  {  	int idx, ret; +	int is_bigendian; + +	if (kvmppc_need_byteswap(vcpu)) { +		/* Default endianness is "little endian". */ +		is_bigendian = !is_default_endian; +	} else { +		/* Default endianness is "big endian". */ +		is_bigendian = is_default_endian; +	}  	if (bytes > sizeof(run->mmio.data)) {  		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, @@ -659,24 +751,35 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,  	return EMULATE_DO_MMIO;  } +EXPORT_SYMBOL_GPL(kvmppc_handle_load);  /* Same as above, but sign extends */  int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, -                        unsigned int rt, unsigned int bytes, int is_bigendian) +			unsigned int rt, unsigned int bytes, +			int is_default_endian)  {  	int r;  	vcpu->arch.mmio_sign_extend = 1; -	r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); +	r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);  	return r;  }  int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, -                        u64 val, unsigned int bytes, int is_bigendian) +			u64 val, unsigned int bytes, int is_default_endian)  {  	void *data = run->mmio.data;  	int idx, ret; +	int is_bigendian; + +	if (kvmppc_need_byteswap(vcpu)) { +		/* Default endianness is "little endian". */ +		is_bigendian = !is_default_endian; +	} else { +		/* Default endianness is "big endian". */ +		is_bigendian = is_default_endian; +	}  	if (bytes > sizeof(run->mmio.data)) {  		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, @@ -720,6 +823,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,  	return EMULATE_DO_MMIO;  } +EXPORT_SYMBOL_GPL(kvmppc_handle_store);  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)  { @@ -953,10 +1057,10 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)  	u32 inst_nop = 0x60000000;  #ifdef CONFIG_KVM_BOOKE_HV  	u32 inst_sc1 = 0x44000022; -	pvinfo->hcall[0] = inst_sc1; -	pvinfo->hcall[1] = inst_nop; -	pvinfo->hcall[2] = inst_nop; -	pvinfo->hcall[3] = inst_nop; +	pvinfo->hcall[0] = cpu_to_be32(inst_sc1); +	pvinfo->hcall[1] = cpu_to_be32(inst_nop); +	pvinfo->hcall[2] = cpu_to_be32(inst_nop); +	pvinfo->hcall[3] = cpu_to_be32(inst_nop);  #else  	u32 inst_lis = 0x3c000000;  	u32 inst_ori = 0x60000000; @@ -972,10 +1076,10 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)  	 *    sc  	 *    nop  	 */ -	pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); -	pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); -	pvinfo->hcall[2] = inst_sc; -	pvinfo->hcall[3] = inst_nop; +	pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); +	pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); +	pvinfo->hcall[2] = cpu_to_be32(inst_sc); +	pvinfo->hcall[3] = cpu_to_be32(inst_nop);  #endif  	pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; @@ -1024,52 +1128,12 @@ long kvm_arch_vm_ioctl(struct file *filp,  		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);  		goto out;  	} -#endif /* CONFIG_PPC_BOOK3S_64 */ - -#ifdef CONFIG_KVM_BOOK3S_64_HV -	case KVM_ALLOCATE_RMA: { -		struct kvm_allocate_rma rma; -		struct kvm *kvm = filp->private_data; - -		r = kvm_vm_ioctl_allocate_rma(kvm, &rma); -		if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) -			r = -EFAULT; -		break; -	} - -	case KVM_PPC_ALLOCATE_HTAB: { -		u32 htab_order; - -		r = -EFAULT; -		if (get_user(htab_order, (u32 __user *)argp)) -			break; -		r = kvmppc_alloc_reset_hpt(kvm, &htab_order); -		if (r) -			break; -		r = -EFAULT; -		if (put_user(htab_order, (u32 __user *)argp)) -			break; -		r = 0; -		break; -	} - -	case KVM_PPC_GET_HTAB_FD: { -		struct kvm_get_htab_fd ghf; - -		r = -EFAULT; -		if (copy_from_user(&ghf, argp, sizeof(ghf))) -			break; -		r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); -		break; -	} -#endif /* CONFIG_KVM_BOOK3S_64_HV */ - -#ifdef CONFIG_PPC_BOOK3S_64  	case KVM_PPC_GET_SMMU_INFO: {  		struct kvm_ppc_smmu_info info; +		struct kvm *kvm = filp->private_data;  		memset(&info, 0, sizeof(info)); -		r = kvm_vm_ioctl_get_smmu_info(kvm, &info); +		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);  		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))  			r = -EFAULT;  		break; @@ -1080,11 +1144,15 @@ long kvm_arch_vm_ioctl(struct file *filp,  		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);  		break;  	} -#endif /* CONFIG_PPC_BOOK3S_64 */ +	default: { +		struct kvm *kvm = filp->private_data; +		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); +	} +#else /* CONFIG_PPC_BOOK3S_64 */  	default:  		r = -ENOTTY; +#endif  	} -  out:  	return r;  } @@ -1106,22 +1174,26 @@ long kvmppc_alloc_lpid(void)  	return lpid;  } +EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);  void kvmppc_claim_lpid(long lpid)  {  	set_bit(lpid, lpid_inuse);  } +EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);  void kvmppc_free_lpid(long lpid)  {  	clear_bit(lpid, lpid_inuse);  } +EXPORT_SYMBOL_GPL(kvmppc_free_lpid);  void kvmppc_init_lpid(unsigned long nr_lpids_param)  {  	nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);  	memset(lpid_inuse, 0, sizeof(lpid_inuse));  } +EXPORT_SYMBOL_GPL(kvmppc_init_lpid);  int kvm_arch_init(void *opaque)  { @@ -1130,4 +1202,5 @@ int kvm_arch_init(void *opaque)  void kvm_arch_exit(void)  { +  }  | 
