diff options
Diffstat (limited to 'arch/x86/xen/smp.c')
-rw-r--r-- | arch/x86/xen/smp.c | 15 |
1 files changed, 11 insertions, 4 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 4fa33c27ccb..c1b131bcdcb 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -356,6 +356,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) */ irq_enter(); (*func)(info); + __get_cpu_var(irq_stat).irq_call_count++; irq_exit(); if (wait) { @@ -370,7 +371,8 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait) { struct call_data_struct data; - int cpus; + int cpus, cpu; + bool yield; /* Holding any lock stops cpus from going down. */ spin_lock(&call_lock); @@ -399,9 +401,14 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), /* Send a message to other CPUs and wait for them to respond */ xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); - /* Make sure other vcpus get a chance to run. - XXX too severe? Maybe we should check the other CPU's states? */ - HYPERVISOR_sched_op(SCHEDOP_yield, 0); + /* Make sure other vcpus get a chance to run if they need to. */ + yield = false; + for_each_cpu_mask(cpu, mask) + if (xen_vcpu_stolen(cpu)) + yield = true; + + if (yield) + HYPERVISOR_sched_op(SCHEDOP_yield, 0); /* Wait for response */ while (atomic_read(&data.started) != cpus || |