aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/xen/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/irq.c')
-rw-r--r--arch/x86/xen/irq.c49
1 files changed, 26 insertions, 23 deletions
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 6a6fe893964..a1207cb6472 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -5,6 +5,8 @@
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
#include <xen/interface/vcpu.h>
+#include <xen/features.h>
+#include <xen/events.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@@ -21,12 +23,12 @@ void xen_force_evtchn_callback(void)
(void)HYPERVISOR_xen_version(0, NULL);
}
-static unsigned long xen_save_fl(void)
+asmlinkage __visible unsigned long xen_save_fl(void)
{
struct vcpu_info *vcpu;
unsigned long flags;
- vcpu = percpu_read(xen_vcpu);
+ vcpu = this_cpu_read(xen_vcpu);
/* flag has opposite sense of mask */
flags = !vcpu->evtchn_upcall_mask;
@@ -39,54 +41,51 @@ static unsigned long xen_save_fl(void)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
-static void xen_restore_fl(unsigned long flags)
+__visible void xen_restore_fl(unsigned long flags)
{
struct vcpu_info *vcpu;
/* convert from IF type flag */
flags = !(flags & X86_EFLAGS_IF);
- /* There's a one instruction preempt window here. We need to
- make sure we're don't switch CPUs between getting the vcpu
- pointer and updating the mask. */
+ /* See xen_irq_enable() for why preemption must be disabled. */
preempt_disable();
- vcpu = percpu_read(xen_vcpu);
+ vcpu = this_cpu_read(xen_vcpu);
vcpu->evtchn_upcall_mask = flags;
- preempt_enable_no_resched();
-
- /* Doesn't matter if we get preempted here, because any
- pending event will get dealt with anyway. */
if (flags == 0) {
- preempt_check_resched();
barrier(); /* unmask then check (avoid races) */
if (unlikely(vcpu->evtchn_upcall_pending))
xen_force_evtchn_callback();
- }
+ preempt_enable();
+ } else
+ preempt_enable_no_resched();
}
PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
-static void xen_irq_disable(void)
+asmlinkage __visible void xen_irq_disable(void)
{
/* There's a one instruction preempt window here. We need to
make sure we're don't switch CPUs between getting the vcpu
pointer and updating the mask. */
preempt_disable();
- percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
+ this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
preempt_enable_no_resched();
}
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
-static void xen_irq_enable(void)
+asmlinkage __visible void xen_irq_enable(void)
{
struct vcpu_info *vcpu;
- /* We don't need to worry about being preempted here, since
- either a) interrupts are disabled, so no preemption, or b)
- the caller is confused and is trying to re-enable interrupts
- on an indeterminate processor. */
+ /*
+ * We may be preempted as soon as vcpu->evtchn_upcall_mask is
+ * cleared, so disable preemption to ensure we check for
+ * events on the VCPU we are still running on.
+ */
+ preempt_disable();
- vcpu = percpu_read(xen_vcpu);
+ vcpu = this_cpu_read(xen_vcpu);
vcpu->evtchn_upcall_mask = 0;
/* Doesn't matter if we get preempted here, because any
@@ -95,6 +94,8 @@ static void xen_irq_enable(void)
barrier(); /* unmask then check (avoid races) */
if (unlikely(vcpu->evtchn_upcall_pending))
xen_force_evtchn_callback();
+
+ preempt_enable();
}
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
@@ -113,7 +114,7 @@ static void xen_halt(void)
xen_safe_halt();
}
-static const struct pv_irq_ops xen_irq_ops __initdata = {
+static const struct pv_irq_ops xen_irq_ops __initconst = {
.save_fl = PV_CALLEE_SAVE(xen_save_fl),
.restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
.irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
@@ -128,6 +129,8 @@ static const struct pv_irq_ops xen_irq_ops __initdata = {
void __init xen_init_irq_ops(void)
{
- pv_irq_ops = xen_irq_ops;
+ /* For PVH we use default pv_irq_ops settings. */
+ if (!xen_feature(XENFEAT_hvm_callback_vector))
+ pv_irq_ops = xen_irq_ops;
x86_init.irqs.intr_init = xen_init_IRQ;
}