diff options
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
| -rw-r--r-- | arch/s390/kvm/kvm-s390.c | 863 | 
1 files changed, 730 insertions, 133 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 776dafe918d..2f3e14fe91a 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -11,6 +11,7 @@   *               Christian Borntraeger <borntraeger@de.ibm.com>   *               Heiko Carstens <heiko.carstens@de.ibm.com>   *               Christian Ehrhardt <ehrhardt@de.ibm.com> + *               Jason J. Herne <jjherne@us.ibm.com>   */  #include <linux/compiler.h> @@ -51,6 +52,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {  	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },  	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },  	{ "instruction_lctl", VCPU_STAT(instruction_lctl) }, +	{ "instruction_stctl", VCPU_STAT(instruction_stctl) }, +	{ "instruction_stctg", VCPU_STAT(instruction_stctg) },  	{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },  	{ "deliver_external_call", VCPU_STAT(deliver_external_call) },  	{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, @@ -66,8 +69,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {  	{ "instruction_stpx", VCPU_STAT(instruction_stpx) },  	{ "instruction_stap", VCPU_STAT(instruction_stap) },  	{ "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, +	{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },  	{ "instruction_stsch", VCPU_STAT(instruction_stsch) },  	{ "instruction_chsc", VCPU_STAT(instruction_chsc) }, +	{ "instruction_essa", VCPU_STAT(instruction_essa) },  	{ "instruction_stsi", VCPU_STAT(instruction_stsi) },  	{ "instruction_stfl", VCPU_STAT(instruction_stfl) },  	{ "instruction_tprot", VCPU_STAT(instruction_tprot) }, @@ -89,7 +94,7 @@ unsigned long *vfacilities;  static struct gmap_notifier gmap_notifier;  /* test availability of vfacility */ -static inline int test_vfacility(unsigned long nr) +int test_vfacility(unsigned long nr)  {  	return __test_facility(nr, (void *) vfacilities);  } @@ -152,11 +157,16 @@ int kvm_dev_ioctl_check_extension(long ext)  #ifdef CONFIG_KVM_S390_UCONTROL  	case KVM_CAP_S390_UCONTROL:  #endif +	case KVM_CAP_ASYNC_PF:  	case KVM_CAP_SYNC_REGS:  	case KVM_CAP_ONE_REG:  	case KVM_CAP_ENABLE_CAP:  	case KVM_CAP_S390_CSS_SUPPORT: +	case KVM_CAP_IRQFD:  	case KVM_CAP_IOEVENTFD: +	case KVM_CAP_DEVICE_CTRL: +	case KVM_CAP_ENABLE_CAP_VM: +	case KVM_CAP_VM_ATTRIBUTES:  		r = 1;  		break;  	case KVM_CAP_NR_VCPUS: @@ -175,6 +185,25 @@ int kvm_dev_ioctl_check_extension(long ext)  	return r;  } +static void kvm_s390_sync_dirty_log(struct kvm *kvm, +					struct kvm_memory_slot *memslot) +{ +	gfn_t cur_gfn, last_gfn; +	unsigned long address; +	struct gmap *gmap = kvm->arch.gmap; + +	down_read(&gmap->mm->mmap_sem); +	/* Loop over all guest pages */ +	last_gfn = memslot->base_gfn + memslot->npages; +	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { +		address = gfn_to_hva_memslot(memslot, cur_gfn); + +		if (gmap_test_and_clear_dirty(address, gmap)) +			mark_page_dirty(kvm, cur_gfn); +	} +	up_read(&gmap->mm->mmap_sem); +} +  /* Section: vm related */  /*   * Get (and clear) the dirty memory log for a memory slot. @@ -182,7 +211,129 @@ int kvm_dev_ioctl_check_extension(long ext)  int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,  			       struct kvm_dirty_log *log)  { -	return 0; +	int r; +	unsigned long n; +	struct kvm_memory_slot *memslot; +	int is_dirty = 0; + +	mutex_lock(&kvm->slots_lock); + +	r = -EINVAL; +	if (log->slot >= KVM_USER_MEM_SLOTS) +		goto out; + +	memslot = id_to_memslot(kvm->memslots, log->slot); +	r = -ENOENT; +	if (!memslot->dirty_bitmap) +		goto out; + +	kvm_s390_sync_dirty_log(kvm, memslot); +	r = kvm_get_dirty_log(kvm, log, &is_dirty); +	if (r) +		goto out; + +	/* Clear the dirty log */ +	if (is_dirty) { +		n = kvm_dirty_bitmap_bytes(memslot); +		memset(memslot->dirty_bitmap, 0, n); +	} +	r = 0; +out: +	mutex_unlock(&kvm->slots_lock); +	return r; +} + +static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +{ +	int r; + +	if (cap->flags) +		return -EINVAL; + +	switch (cap->cap) { +	case KVM_CAP_S390_IRQCHIP: +		kvm->arch.use_irqchip = 1; +		r = 0; +		break; +	default: +		r = -EINVAL; +		break; +	} +	return r; +} + +static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) +{ +	int ret; +	unsigned int idx; +	switch (attr->attr) { +	case KVM_S390_VM_MEM_ENABLE_CMMA: +		ret = -EBUSY; +		mutex_lock(&kvm->lock); +		if (atomic_read(&kvm->online_vcpus) == 0) { +			kvm->arch.use_cmma = 1; +			ret = 0; +		} +		mutex_unlock(&kvm->lock); +		break; +	case KVM_S390_VM_MEM_CLR_CMMA: +		mutex_lock(&kvm->lock); +		idx = srcu_read_lock(&kvm->srcu); +		page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false); +		srcu_read_unlock(&kvm->srcu, idx); +		mutex_unlock(&kvm->lock); +		ret = 0; +		break; +	default: +		ret = -ENXIO; +		break; +	} +	return ret; +} + +static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ +	int ret; + +	switch (attr->group) { +	case KVM_S390_VM_MEM_CTRL: +		ret = kvm_s390_mem_control(kvm, attr); +		break; +	default: +		ret = -ENXIO; +		break; +	} + +	return ret; +} + +static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ +	return -ENXIO; +} + +static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ +	int ret; + +	switch (attr->group) { +	case KVM_S390_VM_MEM_CTRL: +		switch (attr->attr) { +		case KVM_S390_VM_MEM_ENABLE_CMMA: +		case KVM_S390_VM_MEM_CLR_CMMA: +			ret = 0; +			break; +		default: +			ret = -ENXIO; +			break; +		} +		break; +	default: +		ret = -ENXIO; +		break; +	} + +	return ret;  }  long kvm_arch_vm_ioctl(struct file *filp, @@ -190,6 +341,7 @@ long kvm_arch_vm_ioctl(struct file *filp,  {  	struct kvm *kvm = filp->private_data;  	void __user *argp = (void __user *)arg; +	struct kvm_device_attr attr;  	int r;  	switch (ioctl) { @@ -202,6 +354,47 @@ long kvm_arch_vm_ioctl(struct file *filp,  		r = kvm_s390_inject_vm(kvm, &s390int);  		break;  	} +	case KVM_ENABLE_CAP: { +		struct kvm_enable_cap cap; +		r = -EFAULT; +		if (copy_from_user(&cap, argp, sizeof(cap))) +			break; +		r = kvm_vm_ioctl_enable_cap(kvm, &cap); +		break; +	} +	case KVM_CREATE_IRQCHIP: { +		struct kvm_irq_routing_entry routing; + +		r = -EINVAL; +		if (kvm->arch.use_irqchip) { +			/* Set up dummy routing. */ +			memset(&routing, 0, sizeof(routing)); +			kvm_set_irq_routing(kvm, &routing, 0, 0); +			r = 0; +		} +		break; +	} +	case KVM_SET_DEVICE_ATTR: { +		r = -EFAULT; +		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) +			break; +		r = kvm_s390_vm_set_attr(kvm, &attr); +		break; +	} +	case KVM_GET_DEVICE_ATTR: { +		r = -EFAULT; +		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) +			break; +		r = kvm_s390_vm_get_attr(kvm, &attr); +		break; +	} +	case KVM_HAS_DEVICE_ATTR: { +		r = -EFAULT; +		if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) +			break; +		r = kvm_s390_vm_has_attr(kvm, &attr); +		break; +	}  	default:  		r = -ENOTTY;  	} @@ -213,6 +406,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)  {  	int rc;  	char debug_name[16]; +	static unsigned long sca_offset;  	rc = -EINVAL;  #ifdef CONFIG_KVM_S390_UCONTROL @@ -234,6 +428,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)  	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);  	if (!kvm->arch.sca)  		goto out_err; +	spin_lock(&kvm_lock); +	sca_offset = (sca_offset + 16) & 0x7f0; +	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); +	spin_unlock(&kvm_lock);  	sprintf(debug_name, "kvm-%u", current->pid); @@ -243,6 +441,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)  	spin_lock_init(&kvm->arch.float_int.lock);  	INIT_LIST_HEAD(&kvm->arch.float_int.list); +	init_waitqueue_head(&kvm->arch.ipte_wq);  	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);  	VM_EVENT(kvm, 3, "%s", "vm created"); @@ -254,9 +453,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)  		if (!kvm->arch.gmap)  			goto out_nogmap;  		kvm->arch.gmap->private = kvm; +		kvm->arch.gmap->pfault_enabled = 0;  	}  	kvm->arch.css_support = 0; +	kvm->arch.use_irqchip = 0; + +	spin_lock_init(&kvm->arch.start_stop_lock);  	return 0;  out_nogmap: @@ -271,6 +474,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)  {  	VCPU_EVENT(vcpu, 3, "%s", "free cpu");  	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); +	kvm_s390_clear_local_irqs(vcpu); +	kvm_clear_async_pf_completion_queue(vcpu);  	if (!kvm_is_ucontrol(vcpu->kvm)) {  		clear_bit(63 - vcpu->vcpu_id,  			  (unsigned long *) &vcpu->kvm->arch.sca->mcn); @@ -283,7 +488,10 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)  	if (kvm_is_ucontrol(vcpu->kvm))  		gmap_free(vcpu->arch.gmap); +	if (kvm_s390_cmma_enabled(vcpu->kvm)) +		kvm_s390_vcpu_unsetup_cmma(vcpu);  	free_page((unsigned long)(vcpu->arch.sie_block)); +  	kvm_vcpu_uninit(vcpu);  	kmem_cache_free(kvm_vcpu_cache, vcpu);  } @@ -315,11 +523,15 @@ void kvm_arch_destroy_vm(struct kvm *kvm)  	debug_unregister(kvm->arch.dbf);  	if (!kvm_is_ucontrol(kvm))  		gmap_free(kvm->arch.gmap); +	kvm_s390_destroy_adapters(kvm); +	kvm_s390_clear_float_irqs(kvm);  }  /* Section: vcpu related */  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)  { +	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; +	kvm_clear_async_pf_completion_queue(vcpu);  	if (kvm_is_ucontrol(vcpu->kvm)) {  		vcpu->arch.gmap = gmap_alloc(current->mm);  		if (!vcpu->arch.gmap) @@ -343,10 +555,11 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)  void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)  { -	save_fp_regs(&vcpu->arch.host_fpregs); +	save_fp_ctl(&vcpu->arch.host_fpregs.fpc); +	save_fp_regs(vcpu->arch.host_fpregs.fprs);  	save_access_regs(vcpu->arch.host_acrs); -	vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; -	restore_fp_regs(&vcpu->arch.guest_fpregs); +	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); +	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);  	restore_access_regs(vcpu->run->s.regs.acrs);  	gmap_enable(vcpu->arch.gmap);  	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); @@ -356,9 +569,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)  {  	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);  	gmap_disable(vcpu->arch.gmap); -	save_fp_regs(&vcpu->arch.guest_fpregs); +	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); +	save_fp_regs(vcpu->arch.guest_fpregs.fprs);  	save_access_regs(vcpu->run->s.regs.acrs); -	restore_fp_regs(&vcpu->arch.host_fpregs); +	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); +	restore_fp_regs(vcpu->arch.host_fpregs.fprs);  	restore_access_regs(vcpu->arch.host_acrs);  } @@ -377,7 +592,11 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)  	vcpu->arch.guest_fpregs.fpc = 0;  	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));  	vcpu->arch.sie_block->gbea = 1; -	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); +	vcpu->arch.sie_block->pp = 0; +	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; +	kvm_clear_async_pf_completion_queue(vcpu); +	kvm_s390_vcpu_stop(vcpu); +	kvm_s390_clear_local_irqs(vcpu);  }  int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) @@ -385,29 +604,62 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)  	return 0;  } +void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) +{ +	free_page(vcpu->arch.sie_block->cbrlo); +	vcpu->arch.sie_block->cbrlo = 0; +} + +int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) +{ +	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); +	if (!vcpu->arch.sie_block->cbrlo) +		return -ENOMEM; + +	vcpu->arch.sie_block->ecb2 |= 0x80; +	vcpu->arch.sie_block->ecb2 &= ~0x08; +	return 0; +} +  int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)  { +	int rc = 0; +  	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |  						    CPUSTAT_SM |  						    CPUSTAT_STOPPED |  						    CPUSTAT_GED);  	vcpu->arch.sie_block->ecb   = 6; +	if (test_vfacility(50) && test_vfacility(73)) +		vcpu->arch.sie_block->ecb |= 0x10; +  	vcpu->arch.sie_block->ecb2  = 8; -	vcpu->arch.sie_block->eca   = 0xC1002001U; +	vcpu->arch.sie_block->eca   = 0xD1002000U; +	if (sclp_has_siif()) +		vcpu->arch.sie_block->eca |= 1;  	vcpu->arch.sie_block->fac   = (int) (long) vfacilities; +	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | +				      ICTL_TPROT; + +	if (kvm_s390_cmma_enabled(vcpu->kvm)) { +		rc = kvm_s390_vcpu_setup_cmma(vcpu); +		if (rc) +			return rc; +	}  	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);  	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,  		     (unsigned long) vcpu);  	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;  	get_cpu_id(&vcpu->arch.cpu_id);  	vcpu->arch.cpu_id.version = 0xff; -	return 0; +	return rc;  }  struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,  				      unsigned int id)  {  	struct kvm_vcpu *vcpu; +	struct sie_page *sie_page;  	int rc = -EINVAL;  	if (id >= KVM_MAX_VCPUS) @@ -419,12 +671,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,  	if (!vcpu)  		goto out; -	vcpu->arch.sie_block = (struct kvm_s390_sie_block *) -					get_zeroed_page(GFP_KERNEL); - -	if (!vcpu->arch.sie_block) +	sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); +	if (!sie_page)  		goto out_free_cpu; +	vcpu->arch.sie_block = &sie_page->sie_block; +	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; +  	vcpu->arch.sie_block->icpua = id;  	if (!kvm_is_ucontrol(kvm)) {  		if (!kvm->arch.sca) { @@ -443,11 +696,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,  	spin_lock_init(&vcpu->arch.local_int.lock);  	INIT_LIST_HEAD(&vcpu->arch.local_int.list);  	vcpu->arch.local_int.float_int = &kvm->arch.float_int; -	spin_lock(&kvm->arch.float_int.lock); -	kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;  	vcpu->arch.local_int.wq = &vcpu->wq;  	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; -	spin_unlock(&kvm->arch.float_int.lock);  	rc = kvm_vcpu_init(vcpu, kvm, id);  	if (rc) @@ -467,9 +717,7 @@ out:  int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)  { -	/* kvm common code refers to this, but never calls it */ -	BUG(); -	return 0; +	return kvm_cpu_has_interrupt(vcpu);  }  void s390_vcpu_block(struct kvm_vcpu *vcpu) @@ -508,7 +756,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)  	kvm_for_each_vcpu(i, vcpu, kvm) {  		/* match against both prefix pages */ -		if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) { +		if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {  			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);  			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);  			exit_sie_sync(vcpu); @@ -545,6 +793,26 @@ static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,  		r = put_user(vcpu->arch.sie_block->ckc,  			     (u64 __user *)reg->addr);  		break; +	case KVM_REG_S390_PFTOKEN: +		r = put_user(vcpu->arch.pfault_token, +			     (u64 __user *)reg->addr); +		break; +	case KVM_REG_S390_PFCOMPARE: +		r = put_user(vcpu->arch.pfault_compare, +			     (u64 __user *)reg->addr); +		break; +	case KVM_REG_S390_PFSELECT: +		r = put_user(vcpu->arch.pfault_select, +			     (u64 __user *)reg->addr); +		break; +	case KVM_REG_S390_PP: +		r = put_user(vcpu->arch.sie_block->pp, +			     (u64 __user *)reg->addr); +		break; +	case KVM_REG_S390_GBEA: +		r = put_user(vcpu->arch.sie_block->gbea, +			     (u64 __user *)reg->addr); +		break;  	default:  		break;  	} @@ -574,6 +842,26 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,  		r = get_user(vcpu->arch.sie_block->ckc,  			     (u64 __user *)reg->addr);  		break; +	case KVM_REG_S390_PFTOKEN: +		r = get_user(vcpu->arch.pfault_token, +			     (u64 __user *)reg->addr); +		break; +	case KVM_REG_S390_PFCOMPARE: +		r = get_user(vcpu->arch.pfault_compare, +			     (u64 __user *)reg->addr); +		break; +	case KVM_REG_S390_PFSELECT: +		r = get_user(vcpu->arch.pfault_select, +			     (u64 __user *)reg->addr); +		break; +	case KVM_REG_S390_PP: +		r = get_user(vcpu->arch.sie_block->pp, +			     (u64 __user *)reg->addr); +		break; +	case KVM_REG_S390_GBEA: +		r = get_user(vcpu->arch.sie_block->gbea, +			     (u64 __user *)reg->addr); +		break;  	default:  		break;  	} @@ -618,9 +906,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,  int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)  { +	if (test_fp_ctl(fpu->fpc)) +		return -EINVAL;  	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); -	vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK; -	restore_fp_regs(&vcpu->arch.guest_fpregs); +	vcpu->arch.guest_fpregs.fpc = fpu->fpc; +	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); +	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);  	return 0;  } @@ -650,10 +941,40 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,  	return -EINVAL; /* not implemented yet */  } +#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ +			      KVM_GUESTDBG_USE_HW_BP | \ +			      KVM_GUESTDBG_ENABLE) +  int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,  					struct kvm_guest_debug *dbg)  { -	return -EINVAL; /* not implemented yet */ +	int rc = 0; + +	vcpu->guest_debug = 0; +	kvm_s390_clear_bp_data(vcpu); + +	if (dbg->control & ~VALID_GUESTDBG_FLAGS) +		return -EINVAL; + +	if (dbg->control & KVM_GUESTDBG_ENABLE) { +		vcpu->guest_debug = dbg->control; +		/* enforce guest PER */ +		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + +		if (dbg->control & KVM_GUESTDBG_USE_HW_BP) +			rc = kvm_s390_import_bp_data(vcpu, dbg); +	} else { +		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); +		vcpu->arch.guestdbg.last_bp = 0; +	} + +	if (rc) { +		vcpu->guest_debug = 0; +		kvm_s390_clear_bp_data(vcpu); +		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); +	} + +	return rc;  }  int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, @@ -668,8 +989,27 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,  	return -EINVAL; /* not implemented yet */  } +bool kvm_s390_cmma_enabled(struct kvm *kvm) +{ +	if (!MACHINE_IS_LPAR) +		return false; +	/* only enable for z10 and later */ +	if (!MACHINE_HAS_EDAT1) +		return false; +	if (!kvm->arch.use_cmma) +		return false; +	return true; +} + +static bool ibs_enabled(struct kvm_vcpu *vcpu) +{ +	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; +} +  static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)  { +retry: +	s390_vcpu_unblock(vcpu);  	/*  	 * We use MMU_RELOAD just to re-arm the ipte notifier for the  	 * guest prefix page. gmap_ipte_notify will wait on the ptl lock. @@ -677,28 +1017,153 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)  	 * already finished. We might race against a second unmapper that  	 * wants to set the blocking bit. Lets just retry the request loop.  	 */ -	while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { +	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {  		int rc;  		rc = gmap_ipte_notify(vcpu->arch.gmap, -				      vcpu->arch.sie_block->prefix, +				      kvm_s390_get_prefix(vcpu),  				      PAGE_SIZE * 2);  		if (rc)  			return rc; -		s390_vcpu_unblock(vcpu); +		goto retry;  	} + +	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { +		if (!ibs_enabled(vcpu)) { +			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); +			atomic_set_mask(CPUSTAT_IBS, +					&vcpu->arch.sie_block->cpuflags); +		} +		goto retry; +	} + +	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { +		if (ibs_enabled(vcpu)) { +			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); +			atomic_clear_mask(CPUSTAT_IBS, +					  &vcpu->arch.sie_block->cpuflags); +		} +		goto retry; +	} +  	return 0;  } -static int __vcpu_run(struct kvm_vcpu *vcpu) +/** + * kvm_arch_fault_in_page - fault-in guest page if necessary + * @vcpu: The corresponding virtual cpu + * @gpa: Guest physical address + * @writable: Whether the page should be writable or not + * + * Make sure that a guest page has been faulted-in on the host. + * + * Return: Zero on success, negative error code otherwise. + */ +long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) +{ +	struct mm_struct *mm = current->mm; +	hva_t hva; +	long rc; + +	hva = gmap_fault(gpa, vcpu->arch.gmap); +	if (IS_ERR_VALUE(hva)) +		return (long)hva; +	down_read(&mm->mmap_sem); +	rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL); +	up_read(&mm->mmap_sem); + +	return rc < 0 ? rc : 0; +} + +static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, +				      unsigned long token) +{ +	struct kvm_s390_interrupt inti; +	inti.parm64 = token; + +	if (start_token) { +		inti.type = KVM_S390_INT_PFAULT_INIT; +		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti)); +	} else { +		inti.type = KVM_S390_INT_PFAULT_DONE; +		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); +	} +} + +void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, +				     struct kvm_async_pf *work) +{ +	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); +	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); +} + +void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, +				 struct kvm_async_pf *work) +{ +	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); +	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); +} + +void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, +			       struct kvm_async_pf *work) +{ +	/* s390 will always inject the page directly */ +} + +bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) +{ +	/* +	 * s390 will always inject the page directly, +	 * but we still want check_async_completion to cleanup +	 */ +	return true; +} + +static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)  { +	hva_t hva; +	struct kvm_arch_async_pf arch;  	int rc; +	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) +		return 0; +	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != +	    vcpu->arch.pfault_compare) +		return 0; +	if (psw_extint_disabled(vcpu)) +		return 0; +	if (kvm_cpu_has_interrupt(vcpu)) +		return 0; +	if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) +		return 0; +	if (!vcpu->arch.gmap->pfault_enabled) +		return 0; + +	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); +	hva += current->thread.gmap_addr & ~PAGE_MASK; +	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) +		return 0; + +	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); +	return rc; +} + +static int vcpu_pre_run(struct kvm_vcpu *vcpu) +{ +	int rc, cpuflags; + +	/* +	 * On s390 notifications for arriving pages will be delivered directly +	 * to the guest but the house keeping for completed pfaults is +	 * handled outside the worker. +	 */ +	kvm_check_async_pf_completion(vcpu); +  	memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);  	if (need_resched())  		schedule(); -	if (test_thread_flag(TIF_MCCK_PENDING)) +	if (test_cpu_flag(CIF_MCCK_PENDING))  		s390_handle_mcck();  	if (!kvm_is_ucontrol(vcpu->kvm)) @@ -708,39 +1173,101 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)  	if (rc)  		return rc; +	if (guestdbg_enabled(vcpu)) { +		kvm_s390_backup_guest_per_regs(vcpu); +		kvm_s390_patch_guest_per_regs(vcpu); +	} +  	vcpu->arch.sie_block->icptcode = 0; -	VCPU_EVENT(vcpu, 6, "entering sie flags %x", -		   atomic_read(&vcpu->arch.sie_block->cpuflags)); -	trace_kvm_s390_sie_enter(vcpu, -				 atomic_read(&vcpu->arch.sie_block->cpuflags)); +	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); +	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); +	trace_kvm_s390_sie_enter(vcpu, cpuflags); -	/* -	 * As PF_VCPU will be used in fault handler, between guest_enter -	 * and guest_exit should be no uaccess. -	 */ -	preempt_disable(); -	kvm_guest_enter(); -	preempt_enable(); -	rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); -	kvm_guest_exit(); +	return 0; +} + +static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) +{ +	int rc = -1;  	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",  		   vcpu->arch.sie_block->icptcode);  	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); -	if (rc > 0) +	if (guestdbg_enabled(vcpu)) +		kvm_s390_restore_guest_per_regs(vcpu); + +	if (exit_reason >= 0) {  		rc = 0; -	if (rc < 0) { -		if (kvm_is_ucontrol(vcpu->kvm)) { -			rc = SIE_INTERCEPT_UCONTROL; +	} else if (kvm_is_ucontrol(vcpu->kvm)) { +		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; +		vcpu->run->s390_ucontrol.trans_exc_code = +						current->thread.gmap_addr; +		vcpu->run->s390_ucontrol.pgm_code = 0x10; +		rc = -EREMOTE; + +	} else if (current->thread.gmap_pfault) { +		trace_kvm_s390_major_guest_pfault(vcpu); +		current->thread.gmap_pfault = 0; +		if (kvm_arch_setup_async_pf(vcpu)) { +			rc = 0;  		} else { -			VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); -			trace_kvm_s390_sie_fault(vcpu); -			rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); +			gpa_t gpa = current->thread.gmap_addr; +			rc = kvm_arch_fault_in_page(vcpu, gpa, 1);  		}  	} +	if (rc == -1) { +		VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); +		trace_kvm_s390_sie_fault(vcpu); +		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); +	} +  	memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); + +	if (rc == 0) { +		if (kvm_is_ucontrol(vcpu->kvm)) +			/* Don't exit for host interrupts. */ +			rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; +		else +			rc = kvm_handle_sie_intercept(vcpu); +	} + +	return rc; +} + +static int __vcpu_run(struct kvm_vcpu *vcpu) +{ +	int rc, exit_reason; + +	/* +	 * We try to hold kvm->srcu during most of vcpu_run (except when run- +	 * ning the guest), so that memslots (and other stuff) are protected +	 */ +	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + +	do { +		rc = vcpu_pre_run(vcpu); +		if (rc) +			break; + +		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); +		/* +		 * As PF_VCPU will be used in fault handler, between +		 * guest_enter and guest_exit should be no uaccess. +		 */ +		preempt_disable(); +		kvm_guest_enter(); +		preempt_enable(); +		exit_reason = sie64a(vcpu->arch.sie_block, +				     vcpu->run->s.regs.gprs); +		kvm_guest_exit(); +		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + +		rc = vcpu_post_run(vcpu, exit_reason); +	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); + +	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);  	return rc;  } @@ -749,13 +1276,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)  	int rc;  	sigset_t sigsaved; -rerun_vcpu: +	if (guestdbg_exit_pending(vcpu)) { +		kvm_s390_prepare_debug_exit(vcpu); +		return 0; +	} +  	if (vcpu->sigset_active)  		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); -	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); - -	BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); +	kvm_s390_vcpu_start(vcpu);  	switch (kvm_run->exit_reason) {  	case KVM_EXIT_S390_SIEIC: @@ -764,6 +1293,7 @@ rerun_vcpu:  	case KVM_EXIT_S390_RESET:  	case KVM_EXIT_S390_UCONTROL:  	case KVM_EXIT_S390_TSCH: +	case KVM_EXIT_DEBUG:  		break;  	default:  		BUG(); @@ -782,34 +1312,17 @@ rerun_vcpu:  	}  	might_fault(); - -	do { -		rc = __vcpu_run(vcpu); -		if (rc) -			break; -		if (kvm_is_ucontrol(vcpu->kvm)) -			rc = -EOPNOTSUPP; -		else -			rc = kvm_handle_sie_intercept(vcpu); -	} while (!signal_pending(current) && !rc); - -	if (rc == SIE_INTERCEPT_RERUNVCPU) -		goto rerun_vcpu; +	rc = __vcpu_run(vcpu);  	if (signal_pending(current) && !rc) {  		kvm_run->exit_reason = KVM_EXIT_INTR;  		rc = -EINTR;  	} -#ifdef CONFIG_KVM_S390_UCONTROL -	if (rc == SIE_INTERCEPT_UCONTROL) { -		kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL; -		kvm_run->s390_ucontrol.trans_exc_code = -			current->thread.gmap_addr; -		kvm_run->s390_ucontrol.pgm_code = 0x10; +	if (guestdbg_exit_pending(vcpu) && !rc)  { +		kvm_s390_prepare_debug_exit(vcpu);  		rc = 0;  	} -#endif  	if (rc == -EOPNOTSUPP) {  		/* intercept cannot be handled in-kernel, prepare kvm-run */ @@ -828,7 +1341,7 @@ rerun_vcpu:  	kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;  	kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr; -	kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix; +	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);  	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);  	if (vcpu->sigset_active) @@ -838,89 +1351,169 @@ rerun_vcpu:  	return rc;  } -static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, -		       unsigned long n, int prefix) -{ -	if (prefix) -		return copy_to_guest(vcpu, guestdest, from, n); -	else -		return copy_to_guest_absolute(vcpu, guestdest, from, n); -} -  /*   * store status at address   * we use have two special cases:   * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit   * KVM_S390_STORE_STATUS_PREFIXED: -> prefix   */ -int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) +int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)  {  	unsigned char archmode = 1; -	int prefix; +	unsigned int px; +	u64 clkcomp; +	int rc; -	if (addr == KVM_S390_STORE_STATUS_NOADDR) { -		if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1)) +	if (gpa == KVM_S390_STORE_STATUS_NOADDR) { +		if (write_guest_abs(vcpu, 163, &archmode, 1))  			return -EFAULT; -		addr = SAVE_AREA_BASE; -		prefix = 0; -	} else if (addr == KVM_S390_STORE_STATUS_PREFIXED) { -		if (copy_to_guest(vcpu, 163ul, &archmode, 1)) +		gpa = SAVE_AREA_BASE; +	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { +		if (write_guest_real(vcpu, 163, &archmode, 1))  			return -EFAULT; -		addr = SAVE_AREA_BASE; -		prefix = 1; -	} else -		prefix = 0; +		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); +	} +	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), +			     vcpu->arch.guest_fpregs.fprs, 128); +	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), +			      vcpu->run->s.regs.gprs, 128); +	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), +			      &vcpu->arch.sie_block->gpsw, 16); +	px = kvm_s390_get_prefix(vcpu); +	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), +			      &px, 4); +	rc |= write_guest_abs(vcpu, +			      gpa + offsetof(struct save_area, fp_ctrl_reg), +			      &vcpu->arch.guest_fpregs.fpc, 4); +	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), +			      &vcpu->arch.sie_block->todpr, 4); +	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), +			      &vcpu->arch.sie_block->cputm, 8); +	clkcomp = vcpu->arch.sie_block->ckc >> 8; +	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), +			      &clkcomp, 8); +	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), +			      &vcpu->run->s.regs.acrs, 64); +	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), +			      &vcpu->arch.sie_block->gcr, 128); +	return rc ? -EFAULT : 0; +} +int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) +{  	/*  	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy  	 * copying in vcpu load/put. Lets update our copies before we save  	 * it into the save area  	 */ -	save_fp_regs(&vcpu->arch.guest_fpregs); +	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); +	save_fp_regs(vcpu->arch.guest_fpregs.fprs);  	save_access_regs(vcpu->run->s.regs.acrs); -	if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), -			vcpu->arch.guest_fpregs.fprs, 128, prefix)) -		return -EFAULT; +	return kvm_s390_store_status_unloaded(vcpu, addr); +} + +static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) +{ +	return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; +} -	if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), -			vcpu->run->s.regs.gprs, 128, prefix)) -		return -EFAULT; +static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) +{ +	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); +	kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); +	exit_sie_sync(vcpu); +} -	if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), -			&vcpu->arch.sie_block->gpsw, 16, prefix)) -		return -EFAULT; +static void __disable_ibs_on_all_vcpus(struct kvm *kvm) +{ +	unsigned int i; +	struct kvm_vcpu *vcpu; -	if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg), -			&vcpu->arch.sie_block->prefix, 4, prefix)) -		return -EFAULT; +	kvm_for_each_vcpu(i, vcpu, kvm) { +		__disable_ibs_on_vcpu(vcpu); +	} +} -	if (__guestcopy(vcpu, -			addr + offsetof(struct save_area, fp_ctrl_reg), -			&vcpu->arch.guest_fpregs.fpc, 4, prefix)) -		return -EFAULT; +static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) +{ +	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); +	kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); +	exit_sie_sync(vcpu); +} -	if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg), -			&vcpu->arch.sie_block->todpr, 4, prefix)) -		return -EFAULT; +void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) +{ +	int i, online_vcpus, started_vcpus = 0; -	if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer), -			&vcpu->arch.sie_block->cputm, 8, prefix)) -		return -EFAULT; +	if (!is_vcpu_stopped(vcpu)) +		return; -	if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp), -			&vcpu->arch.sie_block->ckc, 8, prefix)) -		return -EFAULT; +	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); +	/* Only one cpu at a time may enter/leave the STOPPED state. */ +	spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); +	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); -	if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), -			&vcpu->run->s.regs.acrs, 64, prefix)) -		return -EFAULT; +	for (i = 0; i < online_vcpus; i++) { +		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) +			started_vcpus++; +	} -	if (__guestcopy(vcpu, -			addr + offsetof(struct save_area, ctrl_regs), -			&vcpu->arch.sie_block->gcr, 128, prefix)) -		return -EFAULT; -	return 0; +	if (started_vcpus == 0) { +		/* we're the only active VCPU -> speed it up */ +		__enable_ibs_on_vcpu(vcpu); +	} else if (started_vcpus == 1) { +		/* +		 * As we are starting a second VCPU, we have to disable +		 * the IBS facility on all VCPUs to remove potentially +		 * oustanding ENABLE requests. +		 */ +		__disable_ibs_on_all_vcpus(vcpu->kvm); +	} + +	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); +	/* +	 * Another VCPU might have used IBS while we were offline. +	 * Let's play safe and flush the VCPU at startup. +	 */ +	vcpu->arch.sie_block->ihcpu  = 0xffff; +	spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); +	return; +} + +void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) +{ +	int i, online_vcpus, started_vcpus = 0; +	struct kvm_vcpu *started_vcpu = NULL; + +	if (is_vcpu_stopped(vcpu)) +		return; + +	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); +	/* Only one cpu at a time may enter/leave the STOPPED state. */ +	spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); +	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); + +	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); +	__disable_ibs_on_vcpu(vcpu); + +	for (i = 0; i < online_vcpus; i++) { +		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { +			started_vcpus++; +			started_vcpu = vcpu->kvm->vcpus[i]; +		} +	} + +	if (started_vcpus == 1) { +		/* +		 * As we only have one VCPU left, we want to enable the +		 * IBS facility for that VCPU to speed it up. +		 */ +		__enable_ibs_on_vcpu(started_vcpu); +	} + +	spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); +	return;  }  static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, @@ -951,6 +1544,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  {  	struct kvm_vcpu *vcpu = filp->private_data;  	void __user *argp = (void __user *)arg; +	int idx;  	long r;  	switch (ioctl) { @@ -964,7 +1558,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,  		break;  	}  	case KVM_S390_STORE_STATUS: +		idx = srcu_read_lock(&vcpu->kvm->srcu);  		r = kvm_s390_vcpu_store_status(vcpu, arg); +		srcu_read_unlock(&vcpu->kvm->srcu, idx);  		break;  	case KVM_S390_SET_INITIAL_PSW: {  		psw_t psw; @@ -1060,12 +1656,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)  	return VM_FAULT_SIGBUS;  } -void kvm_arch_free_memslot(struct kvm_memory_slot *free, +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,  			   struct kvm_memory_slot *dont)  {  } -int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, +			    unsigned long npages)  {  	return 0;  } @@ -1146,8 +1743,8 @@ static int __init kvm_s390_init(void)  		return -ENOMEM;  	}  	memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); -	vfacilities[0] &= 0xff82fff3f47c0000UL; -	vfacilities[1] &= 0x001c000000000000UL; +	vfacilities[0] &= 0xff82fff3f4fc2000UL; +	vfacilities[1] &= 0x005c000000000000UL;  	return 0;  }  | 
