diff options
author | Jeremy Kerr <jk@ozlabs.org> | 2008-09-04 15:02:47 +1000 |
---|---|---|
committer | Jeremy Kerr <jk@ozlabs.org> | 2008-09-05 10:52:03 +1000 |
commit | b65fe0356b5b732d7e1e0224c6a1cf2eb5255984 (patch) | |
tree | a5b1e1e16695c6335cdcb51aa2a4a0d47b2b690a /arch/powerpc/platforms/cell/spufs/sched.c | |
parent | 9f43e3914dceb0f8191875b3cdf4325b48d0d70a (diff) |
powerpc/spufs: Fix race for a free SPU
We currently have a race for a free SPE. With one thread doing a
spu_yield(), and another doing a spu_activate():
thread 1 thread 2
spu_yield(oldctx) spu_activate(ctx)
__spu_deactivate(oldctx)
spu_unschedule(oldctx, spu)
spu->alloc_state = SPU_FREE
spu = spu_get_idle(ctx)
- searches for a SPE in
state SPU_FREE, gets
the context just
freed by thread 1
spu_schedule(ctx, spu)
spu->alloc_state = SPU_USED
spu_schedule(newctx, spu)
- assumes spu is still free
- tries to schedule context on
already-used spu
This change introduces a 'free_spu' flag to spu_unschedule, to indicate
whether or not the function should free the spu after descheduling the
context. We only set this flag if we're not going to re-schedule
another context on this SPU.
Add a comment to document this behaviour.
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 9bb45c6b839..897c7406116 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -732,13 +732,28 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx) spu_release(ctx); } -static void spu_unschedule(struct spu *spu, struct spu_context *ctx) +/** + * spu_unschedule - remove a context from a spu, and possibly release it. + * @spu: The SPU to unschedule from + * @ctx: The context currently scheduled on the SPU + * @free_spu Whether to free the SPU for other contexts + * + * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the + * SPU is made available for other contexts (ie, may be returned by + * spu_get_idle). If this is zero, the caller is expected to schedule another + * context to this spu. + * + * Should be called with ctx->state_mutex held. + */ +static void spu_unschedule(struct spu *spu, struct spu_context *ctx, + int free_spu) { int node = spu->node; mutex_lock(&cbe_spu_info[node].list_mutex); cbe_spu_info[node].nr_active--; - spu->alloc_state = SPU_FREE; + if (free_spu) + spu->alloc_state = SPU_FREE; spu_unbind_context(spu, ctx); ctx->stats.invol_ctx_switch++; spu->stats.invol_ctx_switch++; @@ -838,7 +853,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) if (spu) { new = grab_runnable_context(max_prio, spu->node); if (new || force) { - spu_unschedule(spu, ctx); + spu_unschedule(spu, ctx, new == NULL); if (new) { if (new->flags & SPU_CREATE_NOSCHED) wake_up(&new->stop_wq); @@ -911,7 +926,7 @@ static noinline void spusched_tick(struct spu_context *ctx) new = grab_runnable_context(ctx->prio + 1, spu->node); if (new) { - spu_unschedule(spu, ctx); + spu_unschedule(spu, ctx, 0); if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spu_add_to_rq(ctx); } else { |