aboutsummaryrefslogtreecommitdiff
path: root/kernel/time/tick-broadcast.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/tick-broadcast.c')
-rw-r--r--kernel/time/tick-broadcast.c544
1 files changed, 446 insertions, 98 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 48b2761b566..64c5990fd50 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -18,7 +18,8 @@
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
-#include <linux/tick.h>
+#include <linux/smp.h>
+#include <linux/module.h>
#include "tick-internal.h"
@@ -28,9 +29,9 @@
*/
static struct tick_device tick_broadcast_device;
-/* FIXME: Use cpumask_var_t. */
-static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
-static DECLARE_BITMAP(tmpmask, NR_CPUS);
+static cpumask_var_t tick_broadcast_mask;
+static cpumask_var_t tick_broadcast_on;
+static cpumask_var_t tmpmask;
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
static int tick_broadcast_force;
@@ -50,7 +51,7 @@ struct tick_device *tick_get_broadcast_device(void)
struct cpumask *tick_get_broadcast_mask(void)
{
- return to_cpumask(tick_broadcast_mask);
+ return tick_broadcast_mask;
}
/*
@@ -65,18 +66,50 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
/*
* Check, if the device can be utilized as broadcast device:
*/
-int tick_check_broadcast_device(struct clock_event_device *dev)
+static bool tick_check_broadcast_device(struct clock_event_device *curdev,
+ struct clock_event_device *newdev)
{
- if ((tick_broadcast_device.evtdev &&
- tick_broadcast_device.evtdev->rating >= dev->rating) ||
- (dev->features & CLOCK_EVT_FEAT_C3STOP))
- return 0;
+ if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
+ (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
+ (newdev->features & CLOCK_EVT_FEAT_C3STOP))
+ return false;
+
+ if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
+ !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
+ return false;
+
+ return !curdev || newdev->rating > curdev->rating;
+}
+
+/*
+ * Conditionally install/replace broadcast device
+ */
+void tick_install_broadcast_device(struct clock_event_device *dev)
+{
+ struct clock_event_device *cur = tick_broadcast_device.evtdev;
+
+ if (!tick_check_broadcast_device(cur, dev))
+ return;
- clockevents_exchange_device(NULL, dev);
+ if (!try_module_get(dev->owner))
+ return;
+
+ clockevents_exchange_device(cur, dev);
+ if (cur)
+ cur->event_handler = clockevents_handle_noop;
tick_broadcast_device.evtdev = dev;
- if (!cpumask_empty(tick_get_broadcast_mask()))
+ if (!cpumask_empty(tick_broadcast_mask))
tick_broadcast_start_periodic(dev);
- return 1;
+ /*
+ * Inform all cpus about this. We might be in a situation
+ * where we did not switch to oneshot mode because the per cpu
+ * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
+ * of a oneshot capable broadcast device. Without that
+ * notification the systems stays stuck in periodic mode
+ * forever.
+ */
+ if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
+ tick_clock_notify();
}
/*
@@ -87,14 +120,44 @@ int tick_is_broadcast_device(struct clock_event_device *dev)
return (dev && tick_broadcast_device.evtdev == dev);
}
+int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
+{
+ int ret = -ENODEV;
+
+ if (tick_is_broadcast_device(dev)) {
+ raw_spin_lock(&tick_broadcast_lock);
+ ret = __clockevents_update_freq(dev, freq);
+ raw_spin_unlock(&tick_broadcast_lock);
+ }
+ return ret;
+}
+
+
+static void err_broadcast(const struct cpumask *mask)
+{
+ pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
+}
+
+static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
+{
+ if (!dev->broadcast)
+ dev->broadcast = tick_broadcast;
+ if (!dev->broadcast) {
+ pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
+ dev->name);
+ dev->broadcast = err_broadcast;
+ }
+}
+
/*
* Check, if the device is disfunctional and a place holder, which
* needs to be handled by the broadcast device.
*/
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
unsigned long flags;
- int ret = 0;
+ int ret;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
@@ -106,26 +169,87 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
*/
if (!tick_device_is_functional(dev)) {
dev->event_handler = tick_handle_periodic;
- cpumask_set_cpu(cpu, tick_get_broadcast_mask());
- tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
+ tick_device_setup_broadcast_func(dev);
+ cpumask_set_cpu(cpu, tick_broadcast_mask);
+ if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
+ tick_broadcast_start_periodic(bc);
+ else
+ tick_broadcast_setup_oneshot(bc);
ret = 1;
} else {
/*
- * When the new device is not affected by the stop
- * feature and the cpu is marked in the broadcast mask
- * then clear the broadcast bit.
+ * Clear the broadcast bit for this cpu if the
+ * device is not power state affected.
*/
- if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
- int cpu = smp_processor_id();
+ if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
+ cpumask_clear_cpu(cpu, tick_broadcast_mask);
+ else
+ tick_device_setup_broadcast_func(dev);
- cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
+ /*
+ * Clear the broadcast bit if the CPU is not in
+ * periodic broadcast on state.
+ */
+ if (!cpumask_test_cpu(cpu, tick_broadcast_on))
+ cpumask_clear_cpu(cpu, tick_broadcast_mask);
+
+ switch (tick_broadcast_device.mode) {
+ case TICKDEV_MODE_ONESHOT:
+ /*
+ * If the system is in oneshot mode we can
+ * unconditionally clear the oneshot mask bit,
+ * because the CPU is running and therefore
+ * not in an idle state which causes the power
+ * state affected device to stop. Let the
+ * caller initialize the device.
+ */
tick_broadcast_clear_oneshot(cpu);
+ ret = 0;
+ break;
+
+ case TICKDEV_MODE_PERIODIC:
+ /*
+ * If the system is in periodic mode, check
+ * whether the broadcast device can be
+ * switched off now.
+ */
+ if (cpumask_empty(tick_broadcast_mask) && bc)
+ clockevents_shutdown(bc);
+ /*
+ * If we kept the cpu in the broadcast mask,
+ * tell the caller to leave the per cpu device
+ * in shutdown state. The periodic interrupt
+ * is delivered by the broadcast device.
+ */
+ ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
+ break;
+ default:
+ /* Nothing to do */
+ ret = 0;
+ break;
}
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return ret;
}
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+int tick_receive_broadcast(void)
+{
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+ struct clock_event_device *evt = td->evtdev;
+
+ if (!evt)
+ return -ENODEV;
+
+ if (!evt->event_handler)
+ return -EINVAL;
+
+ evt->event_handler(evt);
+ return 0;
+}
+#endif
+
/*
* Broadcast the event to the cpus, which are set in the mask (mangled).
*/
@@ -161,13 +285,8 @@ static void tick_do_broadcast(struct cpumask *mask)
*/
static void tick_do_periodic_broadcast(void)
{
- raw_spin_lock(&tick_broadcast_lock);
-
- cpumask_and(to_cpumask(tmpmask),
- cpu_online_mask, tick_get_broadcast_mask());
- tick_do_broadcast(to_cpumask(tmpmask));
-
- raw_spin_unlock(&tick_broadcast_lock);
+ cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
+ tick_do_broadcast(tmpmask);
}
/*
@@ -177,13 +296,15 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
ktime_t next;
+ raw_spin_lock(&tick_broadcast_lock);
+
tick_do_periodic_broadcast();
/*
* The device is in periodic mode. No reprogramming necessary:
*/
if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
- return;
+ goto unlock;
/*
* Setup the next period for devices, which do not have
@@ -195,10 +316,12 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
for (next = dev->next_event; ;) {
next = ktime_add(next, tick_period);
- if (!clockevents_program_event(dev, next, ktime_get()))
- return;
+ if (!clockevents_program_event(dev, next, false))
+ goto unlock;
tick_do_periodic_broadcast();
}
+unlock:
+ raw_spin_unlock(&tick_broadcast_lock);
}
/*
@@ -228,13 +351,13 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
if (!tick_device_is_functional(dev))
goto out;
- bc_stopped = cpumask_empty(tick_get_broadcast_mask());
+ bc_stopped = cpumask_empty(tick_broadcast_mask);
switch (*reason) {
case CLOCK_EVT_NOTIFY_BROADCAST_ON:
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
- if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
- cpumask_set_cpu(cpu, tick_get_broadcast_mask());
+ cpumask_set_cpu(cpu, tick_broadcast_on);
+ if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
clockevents_shutdown(dev);
@@ -243,9 +366,12 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
tick_broadcast_force = 1;
break;
case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
- if (!tick_broadcast_force &&
- cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
- cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
+ if (tick_broadcast_force)
+ break;
+ cpumask_clear_cpu(cpu, tick_broadcast_on);
+ if (!tick_device_is_functional(dev))
+ break;
+ if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
tick_setup_periodic(dev, 0);
@@ -253,7 +379,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
break;
}
- if (cpumask_empty(tick_get_broadcast_mask())) {
+ if (cpumask_empty(tick_broadcast_mask)) {
if (!bc_stopped)
clockevents_shutdown(bc);
} else if (bc_stopped) {
@@ -302,10 +428,11 @@ void tick_shutdown_broadcast(unsigned int *cpup)
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
- cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
+ cpumask_clear_cpu(cpu, tick_broadcast_mask);
+ cpumask_clear_cpu(cpu, tick_broadcast_on);
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
- if (bc && cpumask_empty(tick_get_broadcast_mask()))
+ if (bc && cpumask_empty(tick_broadcast_mask))
clockevents_shutdown(bc);
}
@@ -341,13 +468,14 @@ int tick_resume_broadcast(void)
switch (tick_broadcast_device.mode) {
case TICKDEV_MODE_PERIODIC:
- if (!cpumask_empty(tick_get_broadcast_mask()))
+ if (!cpumask_empty(tick_broadcast_mask))
tick_broadcast_start_periodic(bc);
broadcast = cpumask_test_cpu(smp_processor_id(),
- tick_get_broadcast_mask());
+ tick_broadcast_mask);
break;
case TICKDEV_MODE_ONESHOT:
- broadcast = tick_resume_broadcast_oneshot(bc);
+ if (!cpumask_empty(tick_broadcast_mask))
+ broadcast = tick_resume_broadcast_oneshot(bc);
break;
}
}
@@ -359,22 +487,58 @@ int tick_resume_broadcast(void)
#ifdef CONFIG_TICK_ONESHOT
-/* FIXME: use cpumask_var_t. */
-static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
+static cpumask_var_t tick_broadcast_oneshot_mask;
+static cpumask_var_t tick_broadcast_pending_mask;
+static cpumask_var_t tick_broadcast_force_mask;
/*
* Exposed for debugging: see timer_list.c
*/
struct cpumask *tick_get_broadcast_oneshot_mask(void)
{
- return to_cpumask(tick_broadcast_oneshot_mask);
+ return tick_broadcast_oneshot_mask;
}
-static int tick_broadcast_set_event(ktime_t expires, int force)
+/*
+ * Called before going idle with interrupts disabled. Checks whether a
+ * broadcast event from the other core is about to happen. We detected
+ * that in tick_broadcast_oneshot_control(). The callsite can use this
+ * to avoid a deep idle transition as we are about to get the
+ * broadcast IPI right away.
+ */
+int tick_check_broadcast_expired(void)
{
- struct clock_event_device *bc = tick_broadcast_device.evtdev;
+ return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
+}
+
+/*
+ * Set broadcast interrupt affinity
+ */
+static void tick_broadcast_set_affinity(struct clock_event_device *bc,
+ const struct cpumask *cpumask)
+{
+ if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
+ return;
+
+ if (cpumask_equal(bc->cpumask, cpumask))
+ return;
+
+ bc->cpumask = cpumask;
+ irq_set_affinity(bc->irq, bc->cpumask);
+}
+
+static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
+ ktime_t expires, int force)
+{
+ int ret;
+
+ if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
+ clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
- return tick_dev_program_event(bc, expires, force);
+ ret = clockevents_program_event(bc, expires, force);
+ if (!ret)
+ tick_broadcast_set_affinity(bc, cpumask_of(cpu));
+ return ret;
}
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
@@ -387,12 +551,20 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
* Called from irq_enter() when idle was interrupted to reenable the
* per cpu device.
*/
-void tick_check_oneshot_broadcast(int cpu)
+void tick_check_oneshot_broadcast_this_cpu(void)
{
- if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
- struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
+ if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
+ struct tick_device *td = &__get_cpu_var(tick_cpu_device);
- clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
+ /*
+ * We might be in the middle of switching over from
+ * periodic to oneshot. If the CPU has not yet
+ * switched over, leave the device alone.
+ */
+ if (td->mode == TICKDEV_MODE_ONESHOT) {
+ clockevents_set_mode(td->evtdev,
+ CLOCK_EVT_MODE_ONESHOT);
+ }
}
}
@@ -403,27 +575,52 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
struct tick_device *td;
ktime_t now, next_event;
- int cpu;
+ int cpu, next_cpu = 0;
raw_spin_lock(&tick_broadcast_lock);
again:
dev->next_event.tv64 = KTIME_MAX;
next_event.tv64 = KTIME_MAX;
- cpumask_clear(to_cpumask(tmpmask));
+ cpumask_clear(tmpmask);
now = ktime_get();
/* Find all expired events */
- for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
+ for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
td = &per_cpu(tick_cpu_device, cpu);
- if (td->evtdev->next_event.tv64 <= now.tv64)
- cpumask_set_cpu(cpu, to_cpumask(tmpmask));
- else if (td->evtdev->next_event.tv64 < next_event.tv64)
+ if (td->evtdev->next_event.tv64 <= now.tv64) {
+ cpumask_set_cpu(cpu, tmpmask);
+ /*
+ * Mark the remote cpu in the pending mask, so
+ * it can avoid reprogramming the cpu local
+ * timer in tick_broadcast_oneshot_control().
+ */
+ cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
+ } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
next_event.tv64 = td->evtdev->next_event.tv64;
+ next_cpu = cpu;
+ }
}
/*
+ * Remove the current cpu from the pending mask. The event is
+ * delivered immediately in tick_do_broadcast() !
+ */
+ cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
+
+ /* Take care of enforced broadcast requests */
+ cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
+ cpumask_clear(tick_broadcast_force_mask);
+
+ /*
+ * Sanity check. Catch the case where we try to broadcast to
+ * offline cpus.
+ */
+ if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
+ cpumask_and(tmpmask, tmpmask, cpu_online_mask);
+
+ /*
* Wakeup the cpus which have an expired event.
*/
- tick_do_broadcast(to_cpumask(tmpmask));
+ tick_do_broadcast(tmpmask);
/*
* Two reasons for reprogram:
@@ -440,59 +637,176 @@ again:
* Rearm the broadcast device. If event expired,
* repeat the above
*/
- if (tick_broadcast_set_event(next_event, 0))
+ if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
goto again;
}
raw_spin_unlock(&tick_broadcast_lock);
}
+static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
+{
+ if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
+ return 0;
+ if (bc->next_event.tv64 == KTIME_MAX)
+ return 0;
+ return bc->bound_on == cpu ? -EBUSY : 0;
+}
+
+static void broadcast_shutdown_local(struct clock_event_device *bc,
+ struct clock_event_device *dev)
+{
+ /*
+ * For hrtimer based broadcasting we cannot shutdown the cpu
+ * local device if our own event is the first one to expire or
+ * if we own the broadcast timer.
+ */
+ if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
+ if (broadcast_needs_cpu(bc, smp_processor_id()))
+ return;
+ if (dev->next_event.tv64 < bc->next_event.tv64)
+ return;
+ }
+ clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
+}
+
+static void broadcast_move_bc(int deadcpu)
+{
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+ if (!bc || !broadcast_needs_cpu(bc, deadcpu))
+ return;
+ /* This moves the broadcast assignment to this cpu */
+ clockevents_program_event(bc, bc->next_event, 1);
+}
+
/*
* Powerstate information: The system enters/leaves a state, where
* affected devices might stop
+ * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
*/
-void tick_broadcast_oneshot_control(unsigned long reason)
+int tick_broadcast_oneshot_control(unsigned long reason)
{
struct clock_event_device *bc, *dev;
struct tick_device *td;
unsigned long flags;
- int cpu;
-
- raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
+ ktime_t now;
+ int cpu, ret = 0;
/*
* Periodic mode does not care about the enter/exit of power
* states
*/
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
- goto out;
+ return 0;
- bc = tick_broadcast_device.evtdev;
+ /*
+ * We are called with preemtion disabled from the depth of the
+ * idle code, so we can't be moved away.
+ */
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
dev = td->evtdev;
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
- goto out;
+ return 0;
+ bc = tick_broadcast_device.evtdev;
+
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
- if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
- cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
- clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
- if (dev->next_event.tv64 < bc->next_event.tv64)
- tick_broadcast_set_event(dev->next_event, 1);
+ if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
+ WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
+ broadcast_shutdown_local(bc, dev);
+ /*
+ * We only reprogram the broadcast timer if we
+ * did not mark ourself in the force mask and
+ * if the cpu local event is earlier than the
+ * broadcast event. If the current CPU is in
+ * the force mask, then we are going to be
+ * woken by the IPI right away.
+ */
+ if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
+ dev->next_event.tv64 < bc->next_event.tv64)
+ tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
}
+ /*
+ * If the current CPU owns the hrtimer broadcast
+ * mechanism, it cannot go deep idle and we remove the
+ * CPU from the broadcast mask. We don't have to go
+ * through the EXIT path as the local timer is not
+ * shutdown.
+ */
+ ret = broadcast_needs_cpu(bc, cpu);
+ if (ret)
+ cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
} else {
- if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
- cpumask_clear_cpu(cpu,
- tick_get_broadcast_oneshot_mask());
+ if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
- if (dev->next_event.tv64 != KTIME_MAX)
- tick_program_event(dev->next_event, 1);
+ /*
+ * The cpu which was handling the broadcast
+ * timer marked this cpu in the broadcast
+ * pending mask and fired the broadcast
+ * IPI. So we are going to handle the expired
+ * event anyway via the broadcast IPI
+ * handler. No need to reprogram the timer
+ * with an already expired event.
+ */
+ if (cpumask_test_and_clear_cpu(cpu,
+ tick_broadcast_pending_mask))
+ goto out;
+
+ /*
+ * Bail out if there is no next event.
+ */
+ if (dev->next_event.tv64 == KTIME_MAX)
+ goto out;
+ /*
+ * If the pending bit is not set, then we are
+ * either the CPU handling the broadcast
+ * interrupt or we got woken by something else.
+ *
+ * We are not longer in the broadcast mask, so
+ * if the cpu local expiry time is already
+ * reached, we would reprogram the cpu local
+ * timer with an already expired event.
+ *
+ * This can lead to a ping-pong when we return
+ * to idle and therefor rearm the broadcast
+ * timer before the cpu local timer was able
+ * to fire. This happens because the forced
+ * reprogramming makes sure that the event
+ * will happen in the future and depending on
+ * the min_delta setting this might be far
+ * enough out that the ping-pong starts.
+ *
+ * If the cpu local next_event has expired
+ * then we know that the broadcast timer
+ * next_event has expired as well and
+ * broadcast is about to be handled. So we
+ * avoid reprogramming and enforce that the
+ * broadcast handler, which did not run yet,
+ * will invoke the cpu local handler.
+ *
+ * We cannot call the handler directly from
+ * here, because we might be in a NOHZ phase
+ * and we did not go through the irq_enter()
+ * nohz fixups.
+ */
+ now = ktime_get();
+ if (dev->next_event.tv64 <= now.tv64) {
+ cpumask_set_cpu(cpu, tick_broadcast_force_mask);
+ goto out;
+ }
+ /*
+ * We got woken by something else. Reprogram
+ * the cpu local timer device.
+ */
+ tick_program_event(dev->next_event, 1);
}
}
-
out:
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ return ret;
}
/*
@@ -502,7 +816,8 @@ out:
*/
static void tick_broadcast_clear_oneshot(int cpu)
{
- cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
+ cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
+ cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
}
static void tick_broadcast_init_next_event(struct cpumask *mask,
@@ -523,16 +838,13 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
*/
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
+ int cpu = smp_processor_id();
+
/* Set it up only once ! */
if (bc->event_handler != tick_handle_oneshot_broadcast) {
int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
- int cpu = smp_processor_id();
bc->event_handler = tick_handle_oneshot_broadcast;
- clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
-
- /* Take the do_timer update */
- tick_do_timer_cpu = cpu;
/*
* We must be careful here. There might be other CPUs
@@ -540,18 +852,27 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
* oneshot_mask bits for those and program the
* broadcast device to fire.
*/
- cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
- cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
- cpumask_or(tick_get_broadcast_oneshot_mask(),
- tick_get_broadcast_oneshot_mask(),
- to_cpumask(tmpmask));
-
- if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
- tick_broadcast_init_next_event(to_cpumask(tmpmask),
+ cpumask_copy(tmpmask, tick_broadcast_mask);
+ cpumask_clear_cpu(cpu, tmpmask);
+ cpumask_or(tick_broadcast_oneshot_mask,
+ tick_broadcast_oneshot_mask, tmpmask);
+
+ if (was_periodic && !cpumask_empty(tmpmask)) {
+ clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
+ tick_broadcast_init_next_event(tmpmask,
tick_next_period);
- tick_broadcast_set_event(tick_next_period, 1);
+ tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
} else
bc->next_event.tv64 = KTIME_MAX;
+ } else {
+ /*
+ * The first cpu which switches to oneshot mode sets
+ * the bit for all other cpus which are in the general
+ * (periodic) broadcast mask. So the bit is set and
+ * would prevent the first broadcast enter after this
+ * to program the bc device.
+ */
+ tick_broadcast_clear_oneshot(cpu);
}
}
@@ -569,6 +890,7 @@ void tick_broadcast_switch_to_oneshot(void)
bc = tick_broadcast_device.evtdev;
if (bc)
tick_broadcast_setup_oneshot(bc);
+
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
@@ -584,10 +906,14 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
- * Clear the broadcast mask flag for the dead cpu, but do not
- * stop the broadcast device!
+ * Clear the broadcast masks for the dead cpu, but do not stop
+ * the broadcast device!
*/
- cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
+ cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
+ cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
+ cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
+
+ broadcast_move_bc(cpu);
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
@@ -600,4 +926,26 @@ int tick_broadcast_oneshot_active(void)
return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}
+/*
+ * Check whether the broadcast device supports oneshot.
+ */
+bool tick_broadcast_oneshot_available(void)
+{
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+ return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
+}
+
#endif
+
+void __init tick_broadcast_init(void)
+{
+ zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
+ zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
+#ifdef CONFIG_TICK_ONESHOT
+ zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
+#endif
+}