aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/kvm/sigp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/sigp.c')
-rw-r--r--arch/s390/kvm/sigp.c457
1 files changed, 313 insertions, 144 deletions
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 702276f5e2f..43079a48cc9 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -1,7 +1,7 @@
/*
- * sigp.c - handlinge interprocessor communication
+ * handling interprocessor communication
*
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2013
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -15,58 +15,38 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/slab.h>
+#include <asm/sigp.h>
#include "gaccess.h"
#include "kvm-s390.h"
-
-/* sigp order codes */
-#define SIGP_SENSE 0x01
-#define SIGP_EXTERNAL_CALL 0x02
-#define SIGP_EMERGENCY 0x03
-#define SIGP_START 0x04
-#define SIGP_STOP 0x05
-#define SIGP_RESTART 0x06
-#define SIGP_STOP_STORE_STATUS 0x09
-#define SIGP_INITIAL_CPU_RESET 0x0b
-#define SIGP_CPU_RESET 0x0c
-#define SIGP_SET_PREFIX 0x0d
-#define SIGP_STORE_STATUS_ADDR 0x0e
-#define SIGP_SET_ARCH 0x12
-
-/* cpu status bits */
-#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
-#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
-#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
-#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
-#define SIGP_STAT_STOPPED 0x00000040UL
-#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
-#define SIGP_STAT_CHECK_STOP 0x00000010UL
-#define SIGP_STAT_INOPERATIVE 0x00000004UL
-#define SIGP_STAT_INVALID_ORDER 0x00000002UL
-#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
-
+#include "trace.h"
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
- unsigned long *reg)
+ u64 *reg)
{
- struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
+ struct kvm_s390_local_interrupt *li;
+ struct kvm_vcpu *dst_vcpu = NULL;
+ int cpuflags;
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
- return 3; /* not operational */
+ return SIGP_CC_NOT_OPERATIONAL;
- spin_lock(&fi->lock);
- if (fi->local_int[cpu_addr] == NULL)
- rc = 3; /* not operational */
- else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
- & CPUSTAT_RUNNING) {
- *reg &= 0xffffffff00000000UL;
- rc = 1; /* status stored */
- } else {
+ dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
+ li = &dst_vcpu->arch.local_int;
+
+ cpuflags = atomic_read(li->cpuflags);
+ if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
+ rc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ else {
*reg &= 0xffffffff00000000UL;
- *reg |= SIGP_STAT_STOPPED;
- rc = 1; /* status stored */
+ if (cpuflags & CPUSTAT_ECALL_PEND)
+ *reg |= SIGP_STATUS_EXT_CALL_PENDING;
+ if (cpuflags & CPUSTAT_STOPPED)
+ *reg |= SIGP_STATUS_STOPPED;
+ rc = SIGP_CC_STATUS_STORED;
}
- spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
return rc;
@@ -74,44 +54,81 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
- struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
- struct kvm_s390_local_interrupt *li;
- struct kvm_s390_interrupt_info *inti;
- int rc;
+ struct kvm_s390_interrupt s390int = {
+ .type = KVM_S390_INT_EMERGENCY,
+ .parm = vcpu->vcpu_id,
+ };
+ struct kvm_vcpu *dst_vcpu = NULL;
+ int rc = 0;
- if (cpu_addr >= KVM_MAX_VCPUS)
- return 3; /* not operational */
+ if (cpu_addr < KVM_MAX_VCPUS)
+ dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
- inti = kzalloc(sizeof(*inti), GFP_KERNEL);
- if (!inti)
- return -ENOMEM;
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
+ if (!rc)
+ VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
- inti->type = KVM_S390_INT_EMERGENCY;
+ return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
+}
- spin_lock(&fi->lock);
- li = fi->local_int[cpu_addr];
- if (li == NULL) {
- rc = 3; /* not operational */
- kfree(inti);
- goto unlock;
+static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
+ u16 asn, u64 *reg)
+{
+ struct kvm_vcpu *dst_vcpu = NULL;
+ const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
+ u16 p_asn, s_asn;
+ psw_t *psw;
+ u32 flags;
+
+ if (cpu_addr < KVM_MAX_VCPUS)
+ dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
+ flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
+ psw = &dst_vcpu->arch.sie_block->gpsw;
+ p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
+ s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
+
+ /* Deliver the emergency signal? */
+ if (!(flags & CPUSTAT_STOPPED)
+ || (psw->mask & psw_int_mask) != psw_int_mask
+ || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
+ || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
+ return __sigp_emergency(vcpu, cpu_addr);
+ } else {
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STATUS_INCORRECT_STATE;
+ return SIGP_CC_STATUS_STORED;
}
- spin_lock_bh(&li->lock);
- list_add_tail(&inti->list, &li->list);
- atomic_set(&li->active, 1);
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
- if (waitqueue_active(&li->wq))
- wake_up_interruptible(&li->wq);
- spin_unlock_bh(&li->lock);
- rc = 0; /* order accepted */
-unlock:
- spin_unlock(&fi->lock);
- VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
- return rc;
+}
+
+static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
+{
+ struct kvm_s390_interrupt s390int = {
+ .type = KVM_S390_INT_EXTERNAL_CALL,
+ .parm = vcpu->vcpu_id,
+ };
+ struct kvm_vcpu *dst_vcpu = NULL;
+ int rc;
+
+ if (cpu_addr < KVM_MAX_VCPUS)
+ dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
+
+ rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
+ if (!rc)
+ VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
+
+ return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
{
struct kvm_s390_interrupt_info *inti;
+ int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
if (!inti)
@@ -119,58 +136,71 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
inti->type = KVM_S390_SIGP_STOP;
spin_lock_bh(&li->lock);
+ if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
+ kfree(inti);
+ if ((action & ACTION_STORE_ON_STOP) != 0)
+ rc = -ESHUTDOWN;
+ goto out;
+ }
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
li->action_bits |= action;
- if (waitqueue_active(&li->wq))
- wake_up_interruptible(&li->wq);
+ if (waitqueue_active(li->wq))
+ wake_up_interruptible(li->wq);
+out:
spin_unlock_bh(&li->lock);
- return 0; /* order accepted */
+ return rc;
}
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{
- struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
+ struct kvm_vcpu *dst_vcpu = NULL;
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
- return 3; /* not operational */
+ return SIGP_CC_NOT_OPERATIONAL;
- spin_lock(&fi->lock);
- li = fi->local_int[cpu_addr];
- if (li == NULL) {
- rc = 3; /* not operational */
- goto unlock;
- }
+ dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
+ li = &dst_vcpu->arch.local_int;
rc = __inject_sigp_stop(li, action);
-unlock:
- spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
- return rc;
-}
-int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
-{
- struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- return __inject_sigp_stop(li, action);
+ if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
+ /* If the CPU has already been stopped, we still have
+ * to save the status when doing stop-and-store. This
+ * has to be done after unlocking all spinlocks. */
+ rc = kvm_s390_store_status_unloaded(dst_vcpu,
+ KVM_S390_STORE_STATUS_NOADDR);
+ }
+
+ return rc;
}
static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
{
int rc;
+ unsigned int i;
+ struct kvm_vcpu *v;
switch (parameter & 0xff) {
case 0:
- rc = 3; /* not operational */
+ rc = SIGP_CC_NOT_OPERATIONAL;
break;
case 1:
case 2:
- rc = 0; /* order accepted */
+ kvm_for_each_vcpu(i, v, vcpu->kvm) {
+ v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
+ kvm_clear_async_pf_completion_queue(v);
+ }
+
+ rc = SIGP_CC_ORDER_CODE_ACCEPTED;
break;
default:
rc = -EOPNOTSUPP;
@@ -179,44 +209,41 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
}
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
- unsigned long *reg)
+ u64 *reg)
{
- struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
- struct kvm_s390_local_interrupt *li = NULL;
+ struct kvm_s390_local_interrupt *li;
+ struct kvm_vcpu *dst_vcpu = NULL;
struct kvm_s390_interrupt_info *inti;
int rc;
- u8 tmp;
-
- /* make sure that the new value is valid memory */
- address = address & 0x7fffe000u;
- if ((copy_from_user(&tmp, (void __user *)
- (address + vcpu->arch.sie_block->gmsor) , 1)) ||
- (copy_from_user(&tmp, (void __user *)(address +
- vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) {
- *reg |= SIGP_STAT_INVALID_PARAMETER;
- return 1; /* invalid parameter */
+
+ if (cpu_addr < KVM_MAX_VCPUS)
+ dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
+ li = &dst_vcpu->arch.local_int;
+
+ /*
+ * Make sure the new value is valid memory. We only need to check the
+ * first page, since address is 8k aligned and memory pieces are always
+ * at least 1MB aligned and have at least a size of 1MB.
+ */
+ address &= 0x7fffe000u;
+ if (kvm_is_error_gpa(vcpu->kvm, address)) {
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STATUS_INVALID_PARAMETER;
+ return SIGP_CC_STATUS_STORED;
}
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
- return 2; /* busy */
-
- spin_lock(&fi->lock);
- if (cpu_addr < KVM_MAX_VCPUS)
- li = fi->local_int[cpu_addr];
-
- if (li == NULL) {
- rc = 1; /* incorrect state */
- *reg &= SIGP_STAT_INCORRECT_STATE;
- kfree(inti);
- goto out_fi;
- }
+ return SIGP_CC_BUSY;
spin_lock_bh(&li->lock);
/* cpu must be in stopped state */
- if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
- rc = 1; /* incorrect state */
- *reg &= SIGP_STAT_INCORRECT_STATE;
+ if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STATUS_INCORRECT_STATE;
+ rc = SIGP_CC_STATUS_STORED;
kfree(inti);
goto out_li;
}
@@ -226,15 +253,96 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
- if (waitqueue_active(&li->wq))
- wake_up_interruptible(&li->wq);
- rc = 0; /* order accepted */
+ if (waitqueue_active(li->wq))
+ wake_up_interruptible(li->wq);
+ rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
spin_unlock_bh(&li->lock);
-out_fi:
- spin_unlock(&fi->lock);
+ return rc;
+}
+
+static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
+ u32 addr, u64 *reg)
+{
+ struct kvm_vcpu *dst_vcpu = NULL;
+ int flags;
+ int rc;
+
+ if (cpu_id < KVM_MAX_VCPUS)
+ dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
+
+ spin_lock_bh(&dst_vcpu->arch.local_int.lock);
+ flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
+ spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
+ if (!(flags & CPUSTAT_STOPPED)) {
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STATUS_INCORRECT_STATE;
+ return SIGP_CC_STATUS_STORED;
+ }
+
+ addr &= 0x7ffffe00;
+ rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
+ if (rc == -EFAULT) {
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STATUS_INVALID_PARAMETER;
+ rc = SIGP_CC_STATUS_STORED;
+ }
+ return rc;
+}
+
+static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
+ u64 *reg)
+{
+ struct kvm_s390_local_interrupt *li;
+ struct kvm_vcpu *dst_vcpu = NULL;
+ int rc;
+
+ if (cpu_addr >= KVM_MAX_VCPUS)
+ return SIGP_CC_NOT_OPERATIONAL;
+
+ dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
+ li = &dst_vcpu->arch.local_int;
+ if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
+ /* running */
+ rc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ } else {
+ /* not running */
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STATUS_NOT_RUNNING;
+ rc = SIGP_CC_STATUS_STORED;
+ }
+
+ VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
+ rc);
+
+ return rc;
+}
+
+/* Test whether the destination CPU is available and not busy */
+static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
+{
+ struct kvm_s390_local_interrupt *li;
+ int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ struct kvm_vcpu *dst_vcpu = NULL;
+
+ if (cpu_addr >= KVM_MAX_VCPUS)
+ return SIGP_CC_NOT_OPERATIONAL;
+
+ dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
+ li = &dst_vcpu->arch.local_int;
+ spin_lock_bh(&li->lock);
+ if (li->action_bits & ACTION_STOP_ON_STOP)
+ rc = SIGP_CC_BUSY;
+ spin_unlock_bh(&li->lock);
+
return rc;
}
@@ -242,34 +350,34 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{
int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
- int base2 = vcpu->arch.sie_block->ipb >> 28;
- int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
u32 parameter;
- u16 cpu_addr = vcpu->arch.guest_gprs[r3];
+ u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
u8 order_code;
int rc;
/* sigp in userspace can exit */
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
- return kvm_s390_inject_program_int(vcpu,
- PGM_PRIVILEGED_OPERATION);
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- order_code = disp2;
- if (base2)
- order_code += vcpu->arch.guest_gprs[base2];
+ order_code = kvm_s390_get_base_disp_rs(vcpu);
if (r1 % 2)
- parameter = vcpu->arch.guest_gprs[r1];
+ parameter = vcpu->run->s.regs.gprs[r1];
else
- parameter = vcpu->arch.guest_gprs[r1 + 1];
+ parameter = vcpu->run->s.regs.gprs[r1 + 1];
+ trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
switch (order_code) {
case SIGP_SENSE:
vcpu->stat.instruction_sigp_sense++;
rc = __sigp_sense(vcpu, cpu_addr,
- &vcpu->arch.guest_gprs[r1]);
+ &vcpu->run->s.regs.gprs[r1]);
break;
- case SIGP_EMERGENCY:
+ case SIGP_EXTERNAL_CALL:
+ vcpu->stat.instruction_sigp_external_call++;
+ rc = __sigp_external_call(vcpu, cpu_addr);
+ break;
+ case SIGP_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_emergency++;
rc = __sigp_emergency(vcpu, cpu_addr);
break;
@@ -277,22 +385,49 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
vcpu->stat.instruction_sigp_stop++;
rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
break;
- case SIGP_STOP_STORE_STATUS:
+ case SIGP_STOP_AND_STORE_STATUS:
vcpu->stat.instruction_sigp_stop++;
- rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP);
+ rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
+ ACTION_STOP_ON_STOP);
+ break;
+ case SIGP_STORE_STATUS_AT_ADDRESS:
+ rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
+ &vcpu->run->s.regs.gprs[r1]);
break;
- case SIGP_SET_ARCH:
+ case SIGP_SET_ARCHITECTURE:
vcpu->stat.instruction_sigp_arch++;
rc = __sigp_set_arch(vcpu, parameter);
break;
case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
- &vcpu->arch.guest_gprs[r1]);
+ &vcpu->run->s.regs.gprs[r1]);
+ break;
+ case SIGP_COND_EMERGENCY_SIGNAL:
+ rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
+ &vcpu->run->s.regs.gprs[r1]);
+ break;
+ case SIGP_SENSE_RUNNING:
+ vcpu->stat.instruction_sigp_sense_running++;
+ rc = __sigp_sense_running(vcpu, cpu_addr,
+ &vcpu->run->s.regs.gprs[r1]);
+ break;
+ case SIGP_START:
+ rc = sigp_check_callable(vcpu, cpu_addr);
+ if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
+ rc = -EOPNOTSUPP; /* Handle START in user space */
break;
case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++;
- /* user space must know about restart */
+ rc = sigp_check_callable(vcpu, cpu_addr);
+ if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
+ VCPU_EVENT(vcpu, 4,
+ "sigp restart %x to handle userspace",
+ cpu_addr);
+ /* user space must know about restart */
+ rc = -EOPNOTSUPP;
+ }
+ break;
default:
return -EOPNOTSUPP;
}
@@ -300,7 +435,41 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
if (rc < 0)
return rc;
- vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
- vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
+ kvm_s390_set_psw_cc(vcpu, rc);
return 0;
}
+
+/*
+ * Handle SIGP partial execution interception.
+ *
+ * This interception will occur at the source cpu when a source cpu sends an
+ * external call to a target cpu and the target cpu has the WAIT bit set in
+ * its cpuflags. Interception will occurr after the interrupt indicator bits at
+ * the target cpu have been set. All error cases will lead to instruction
+ * interception, therefore nothing is to be checked or prepared.
+ */
+int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
+{
+ int r3 = vcpu->arch.sie_block->ipa & 0x000f;
+ u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
+ struct kvm_vcpu *dest_vcpu;
+ u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
+
+ trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
+
+ if (order_code == SIGP_EXTERNAL_CALL) {
+ dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ BUG_ON(dest_vcpu == NULL);
+
+ spin_lock_bh(&dest_vcpu->arch.local_int.lock);
+ if (waitqueue_active(&dest_vcpu->wq))
+ wake_up_interruptible(&dest_vcpu->wq);
+ dest_vcpu->preempted = true;
+ spin_unlock_bh(&dest_vcpu->arch.local_int.lock);
+
+ kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}