aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell/spufs/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c144
1 files changed, 108 insertions, 36 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 00528ef84ad..4a0a64fe25d 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -22,11 +22,12 @@
#undef DEBUG
-#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/sched/rt.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
@@ -39,7 +40,6 @@
#include <linux/pid_namespace.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/marker.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
@@ -47,6 +47,8 @@
#include <asm/spu_csa.h>
#include <asm/spu_priv1.h>
#include "spufs.h"
+#define CREATE_TRACE_POINTS
+#include "sputrace.h"
struct spu_prio_array {
DECLARE_BITMAP(bitmap, MAX_PRIO);
@@ -81,7 +83,6 @@ static struct timer_list spuloadavg_timer;
#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
-#define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
#define SCALE_PRIO(x, prio) \
max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
@@ -139,7 +140,10 @@ void __spu_update_sched_info(struct spu_context *ctx)
* runqueue. The context will be rescheduled on the proper node
* if it is timesliced or preempted.
*/
- ctx->cpus_allowed = current->cpus_allowed;
+ cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current));
+
+ /* Save the current cpu id for spu interrupt routing. */
+ ctx->last_ran = raw_smp_processor_id();
}
void spu_update_sched_info(struct spu_context *ctx)
@@ -163,9 +167,9 @@ void spu_update_sched_info(struct spu_context *ctx)
static int __node_allowed(struct spu_context *ctx, int node)
{
if (nr_cpus_node(node)) {
- cpumask_t mask = node_to_cpumask(node);
+ const struct cpumask *mask = cpumask_of_node(node);
- if (cpus_intersects(mask, ctx->cpus_allowed))
+ if (cpumask_intersects(mask, &ctx->cpus_allowed))
return 1;
}
@@ -227,22 +231,26 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
ctx->stats.slb_flt_base = spu->stats.slb_flt;
ctx->stats.class2_intr_base = spu->stats.class2_intr;
+ spu_associate_mm(spu, ctx->owner);
+
+ spin_lock_irq(&spu->register_lock);
spu->ctx = ctx;
spu->flags = 0;
ctx->spu = spu;
ctx->ops = &spu_hw_ops;
spu->pid = current->pid;
spu->tgid = current->tgid;
- spu_associate_mm(spu, ctx->owner);
spu->ibox_callback = spufs_ibox_callback;
spu->wbox_callback = spufs_wbox_callback;
spu->stop_callback = spufs_stop_callback;
spu->mfc_callback = spufs_mfc_callback;
- mb();
+ spin_unlock_irq(&spu->register_lock);
+
spu_unmap_mappings(ctx);
+
+ spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
spu_restore(&ctx->csa, spu);
spu->timestamp = jiffies;
- spu_cpu_affinity_set(spu, raw_smp_processor_id());
spu_switch_notify(spu, ctx);
ctx->state = SPU_STATE_RUNNABLE;
@@ -305,11 +313,35 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
*/
node = cpu_to_node(raw_smp_processor_id());
for (n = 0; n < MAX_NUMNODES; n++, node++) {
+ /*
+ * "available_spus" counts how many spus are not potentially
+ * going to be used by other affinity gangs whose reference
+ * context is already in place. Although this code seeks to
+ * avoid having affinity gangs with a summed amount of
+ * contexts bigger than the amount of spus in the node,
+ * this may happen sporadically. In this case, available_spus
+ * becomes negative, which is harmless.
+ */
+ int available_spus;
+
node = (node < MAX_NUMNODES) ? node : 0;
if (!node_allowed(ctx, node))
continue;
+
+ available_spus = 0;
mutex_lock(&cbe_spu_info[node].list_mutex);
list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
+ if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
+ && spu->ctx->gang->aff_ref_spu)
+ available_spus -= spu->ctx->gang->contexts;
+ available_spus++;
+ }
+ if (available_spus < ctx->gang->contexts) {
+ mutex_unlock(&cbe_spu_info[node].list_mutex);
+ continue;
+ }
+
+ list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
if ((!mem_aff || spu->has_mem_affinity) &&
sched_spu(spu)) {
mutex_unlock(&cbe_spu_info[node].list_mutex);
@@ -382,6 +414,9 @@ static int has_affinity(struct spu_context *ctx)
if (list_empty(&ctx->aff_list))
return 0;
+ if (atomic_read(&ctx->gang->aff_sched_count) == 0)
+ ctx->gang->aff_ref_spu = NULL;
+
if (!gang->aff_ref_spu) {
if (!(gang->aff_flags & AFF_MERGED))
aff_merge_remaining_ctxs(gang);
@@ -400,6 +435,8 @@ static int has_affinity(struct spu_context *ctx)
*/
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
{
+ u32 status;
+
spu_context_trace(spu_unbind_context__enter, ctx, spu);
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
@@ -407,30 +444,34 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
if (spu->ctx->flags & SPU_CREATE_NOSCHED)
atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
- if (ctx->gang){
- mutex_lock(&ctx->gang->aff_mutex);
- if (has_affinity(ctx)) {
- if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
- ctx->gang->aff_ref_spu = NULL;
- }
- mutex_unlock(&ctx->gang->aff_mutex);
- }
+ if (ctx->gang)
+ /*
+ * If ctx->gang->aff_sched_count is positive, SPU affinity is
+ * being considered in this gang. Using atomic_dec_if_positive
+ * allow us to skip an explicit check for affinity in this gang
+ */
+ atomic_dec_if_positive(&ctx->gang->aff_sched_count);
spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
+ spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
+
+ spin_lock_irq(&spu->register_lock);
spu->timestamp = jiffies;
ctx->state = SPU_STATE_SAVED;
spu->ibox_callback = NULL;
spu->wbox_callback = NULL;
spu->stop_callback = NULL;
spu->mfc_callback = NULL;
- spu_associate_mm(spu, NULL);
spu->pid = 0;
spu->tgid = 0;
ctx->ops = &spu_backing_ops;
spu->flags = 0;
spu->ctx = NULL;
+ spin_unlock_irq(&spu->register_lock);
+
+ spu_associate_mm(spu, NULL);
ctx->stats.slb_flt +=
(spu->stats.slb_flt - ctx->stats.slb_flt_base);
@@ -440,6 +481,9 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
/* This maps the underlying spu state to idle */
spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
ctx->spu = NULL;
+
+ if (spu_stopped(ctx, &status))
+ wake_up_all(&ctx->stop_wq);
}
/**
@@ -465,7 +509,7 @@ static void __spu_add_to_rq(struct spu_context *ctx)
list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
set_bit(ctx->prio, spu_prio->bitmap);
if (!spu_prio->nr_waiting++)
- __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+ mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
}
}
@@ -545,10 +589,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
goto found;
mutex_unlock(&cbe_spu_info[node].list_mutex);
- mutex_lock(&ctx->gang->aff_mutex);
- if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
- ctx->gang->aff_ref_spu = NULL;
- mutex_unlock(&ctx->gang->aff_mutex);
+ atomic_dec(&ctx->gang->aff_sched_count);
goto not_found;
}
mutex_unlock(&ctx->gang->aff_mutex);
@@ -591,7 +632,7 @@ static struct spu *find_victim(struct spu_context *ctx)
struct spu *spu;
int node, n;
- spu_context_nospu_trace(spu_find_vitim__enter, ctx);
+ spu_context_nospu_trace(spu_find_victim__enter, ctx);
/*
* Look for a possible preemption candidate on the local node first.
@@ -613,9 +654,12 @@ static struct spu *find_victim(struct spu_context *ctx)
if (tmp && tmp->prio > ctx->prio &&
!(tmp->flags & SPU_CREATE_NOSCHED) &&
- (!victim || tmp->prio > victim->prio))
+ (!victim || tmp->prio > victim->prio)) {
victim = spu->ctx;
+ }
}
+ if (victim)
+ get_spu_context(victim);
mutex_unlock(&cbe_spu_info[node].list_mutex);
if (victim) {
@@ -630,6 +674,7 @@ static struct spu *find_victim(struct spu_context *ctx)
* look at another context or give up after X retries.
*/
if (!mutex_trylock(&victim->state_mutex)) {
+ put_spu_context(victim);
victim = NULL;
goto restart;
}
@@ -642,6 +687,7 @@ static struct spu *find_victim(struct spu_context *ctx)
* restart the search.
*/
mutex_unlock(&victim->state_mutex);
+ put_spu_context(victim);
victim = NULL;
goto restart;
}
@@ -655,9 +701,11 @@ static struct spu *find_victim(struct spu_context *ctx)
victim->stats.invol_ctx_switch++;
spu->stats.invol_ctx_switch++;
- spu_add_to_rq(victim);
+ if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
+ spu_add_to_rq(victim);
mutex_unlock(&victim->state_mutex);
+ put_spu_context(victim);
return spu;
}
@@ -693,17 +741,33 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx)
/* not a candidate for interruptible because it's called either
from the scheduler thread or from spu_deactivate */
mutex_lock(&ctx->state_mutex);
- __spu_schedule(spu, ctx);
+ if (ctx->state == SPU_STATE_SAVED)
+ __spu_schedule(spu, ctx);
spu_release(ctx);
}
-static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
+/**
+ * spu_unschedule - remove a context from a spu, and possibly release it.
+ * @spu: The SPU to unschedule from
+ * @ctx: The context currently scheduled on the SPU
+ * @free_spu Whether to free the SPU for other contexts
+ *
+ * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
+ * SPU is made available for other contexts (ie, may be returned by
+ * spu_get_idle). If this is zero, the caller is expected to schedule another
+ * context to this spu.
+ *
+ * Should be called with ctx->state_mutex held.
+ */
+static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
+ int free_spu)
{
int node = spu->node;
mutex_lock(&cbe_spu_info[node].list_mutex);
cbe_spu_info[node].nr_active--;
- spu->alloc_state = SPU_FREE;
+ if (free_spu)
+ spu->alloc_state = SPU_FREE;
spu_unbind_context(spu, ctx);
ctx->stats.invol_ctx_switch++;
spu->stats.invol_ctx_switch++;
@@ -781,7 +845,7 @@ static struct spu_context *grab_runnable_context(int prio, int node)
struct list_head *rq = &spu_prio->runq[best];
list_for_each_entry(ctx, rq, rq) {
- /* XXX(hch): check for affinity here aswell */
+ /* XXX(hch): check for affinity here as well */
if (__node_allowed(ctx, node)) {
__spu_del_from_rq(ctx);
goto found;
@@ -803,7 +867,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
if (spu) {
new = grab_runnable_context(max_prio, spu->node);
if (new || force) {
- spu_unschedule(spu, ctx);
+ spu_unschedule(spu, ctx, new == NULL);
if (new) {
if (new->flags & SPU_CREATE_NOSCHED)
wake_up(&new->stop_wq);
@@ -876,12 +940,13 @@ static noinline void spusched_tick(struct spu_context *ctx)
new = grab_runnable_context(ctx->prio + 1, spu->node);
if (new) {
- spu_unschedule(spu, ctx);
+ spu_unschedule(spu, ctx, 0);
if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
spu_add_to_rq(ctx);
} else {
spu_context_nospu_trace(spusched_tick__newslice, ctx);
- ctx->time_slice++;
+ if (!ctx->time_slice)
+ ctx->time_slice++;
}
out:
spu_release(ctx);
@@ -955,9 +1020,11 @@ static int spusched_thread(void *unused)
struct spu_context *ctx = spu->ctx;
if (ctx) {
+ get_spu_context(ctx);
mutex_unlock(mtx);
spusched_tick(ctx);
mutex_lock(mtx);
+ put_spu_context(ctx);
}
}
mutex_unlock(mtx);
@@ -975,6 +1042,7 @@ void spuctx_switch_state(struct spu_context *ctx,
struct timespec ts;
struct spu *spu;
enum spu_utilization_state old_state;
+ int node;
ktime_get_ts(&ts);
curtime = timespec_to_ns(&ts);
@@ -996,6 +1064,11 @@ void spuctx_switch_state(struct spu_context *ctx,
spu->stats.times[old_state] += delta;
spu->stats.util_state = new_state;
spu->stats.tstamp = curtime;
+ node = spu->node;
+ if (old_state == SPU_UTIL_USER)
+ atomic_dec(&cbe_spu_info[node].busy_spus);
+ if (new_state == SPU_UTIL_USER)
+ atomic_inc(&cbe_spu_info[node].busy_spus);
}
}
@@ -1021,7 +1094,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
LOAD_INT(c), LOAD_FRAC(c),
count_active_contexts(),
atomic_read(&nr_spu_contexts),
- current->nsproxy->pid_ns->last_pid);
+ task_active_pid_ns(current)->last_pid);
return 0;
}
@@ -1063,10 +1136,9 @@ int __init spu_sched_init(void)
mod_timer(&spuloadavg_timer, 0);
- entry = create_proc_entry("spu_loadavg", 0, NULL);
+ entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
if (!entry)
goto out_stop_kthread;
- entry->proc_fops = &spu_loadavg_fops;
pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);