]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[POWERPC] spufs: reorganize spu_run_init
authorLuke Browning <lukebr@linux.vnet.ibm.com>
Thu, 20 Dec 2007 07:39:59 +0000 (16:39 +0900)
committerPaul Mackerras <paulus@samba.org>
Fri, 21 Dec 2007 08:46:20 +0000 (19:46 +1100)
This cleans up spu_run_init so that it does all of the spu
initialization for spufs_run_spu.  It initializes the spu context as
much as possible before it activates the spu and writes the runcntl
register.

Signed-off-by: Luke Browning <lukebr@linux.vnet.ibm.com>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/platforms/cell/spufs/run.c
arch/powerpc/platforms/cell/spufs/sched.c

index 3b3de6c7ee5b6116e769bf93261eb1b163d52155..652ae1366dc8cc1eb29075099ac5ff756715ff07 100644 (file)
@@ -152,23 +152,41 @@ out:
 static int spu_run_init(struct spu_context *ctx, u32 *npc)
 {
        unsigned long runcntl;
+       int ret;
 
        spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 
        if (ctx->flags & SPU_CREATE_ISOLATE) {
+               /*
+                * Force activation of spu.  Isolated state assumes that
+                * special loader context is loaded and running on spu.
+                */
+               if (ctx->state == SPU_STATE_SAVED) {
+                       spu_set_timeslice(ctx);
+
+                       ret = spu_activate(ctx, 0);
+                       if (ret)
+                               return ret;
+               }
 
                if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
-                       int ret = spu_setup_isolated(ctx);
+                       ret = spu_setup_isolated(ctx);
                        if (ret)
                                return ret;
                }
 
-               /* if userspace has set the runcntrl register (eg, to issue an
-                * isolated exit), we need to re-set it here */
+               /*
+                * If userspace has set the runcntrl register (eg, to
+                * issue an isolated exit), we need to re-set it here
+                */
                runcntl = ctx->ops->runcntl_read(ctx) &
                        (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
                if (runcntl == 0)
                        runcntl = SPU_RUNCNTL_RUNNABLE;
+
+               spuctx_switch_state(ctx, SPU_UTIL_USER);
+               ctx->ops->runcntl_write(ctx, runcntl);
+
        } else {
                unsigned long privcntl;
 
@@ -180,11 +198,17 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc)
 
                ctx->ops->npc_write(ctx, *npc);
                ctx->ops->privcntl_write(ctx, privcntl);
-       }
 
-       ctx->ops->runcntl_write(ctx, runcntl);
+               if (ctx->state == SPU_STATE_SAVED) {
+                       spu_set_timeslice(ctx);
+                       ret = spu_activate(ctx, 0);
+                       if (ret)
+                               return ret;
+               }
 
-       spuctx_switch_state(ctx, SPU_UTIL_USER);
+               spuctx_switch_state(ctx, SPU_UTIL_USER);
+               ctx->ops->runcntl_write(ctx, runcntl);
+       }
 
        return 0;
 }
@@ -323,25 +347,8 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
        ctx->event_return = 0;
 
        spu_acquire(ctx);
-       if (ctx->state == SPU_STATE_SAVED) {
-               __spu_update_sched_info(ctx);
-               spu_set_timeslice(ctx);
 
-               ret = spu_activate(ctx, 0);
-               if (ret) {
-                       spu_release(ctx);
-                       goto out;
-               }
-       } else {
-               /*
-                * We have to update the scheduling priority under active_mutex
-                * to protect against find_victim().
-                *
-                * No need to update the timeslice ASAP, it will get updated
-                * once the current one has expired.
-                */
-               spu_update_sched_info(ctx);
-       }
+       spu_update_sched_info(ctx);
 
        ret = spu_run_init(ctx, npc);
        if (ret) {
index 82ea576c53a30b1e929207e8c09ce80f9d4568d7..ef0e5e230fbb2f0f35bb5df5bf010bbd93872871 100644 (file)
@@ -104,6 +104,12 @@ void spu_set_timeslice(struct spu_context *ctx)
  */
 void __spu_update_sched_info(struct spu_context *ctx)
 {
+       /*
+        * assert that the context is not on the runqueue, so it is safe
+        * to change its scheduling parameters.
+        */
+       BUG_ON(!list_empty(&ctx->rq));
+
        /*
         * 32-Bit assignments are atomic on powerpc, and we don't care about
         * memory ordering here because retrieving the controlling thread is
@@ -124,23 +130,28 @@ void __spu_update_sched_info(struct spu_context *ctx)
        ctx->policy = current->policy;
 
        /*
-        * A lot of places that don't hold list_mutex poke into
-        * cpus_allowed, including grab_runnable_context which
-        * already holds the runq_lock.  So abuse runq_lock
-        * to protect this field as well.
+        * TO DO: the context may be loaded, so we may need to activate
+        * it again on a different node. But it shouldn't hurt anything
+        * to update its parameters, because we know that the scheduler
+        * is not actively looking at this field, since it is not on the
+        * runqueue. The context will be rescheduled on the proper node
+        * if it is timesliced or preempted.
         */
-       spin_lock(&spu_prio->runq_lock);
        ctx->cpus_allowed = current->cpus_allowed;
-       spin_unlock(&spu_prio->runq_lock);
 }
 
 void spu_update_sched_info(struct spu_context *ctx)
 {
-       int node = ctx->spu->node;
+       int node;
 
-       mutex_lock(&cbe_spu_info[node].list_mutex);
-       __spu_update_sched_info(ctx);
-       mutex_unlock(&cbe_spu_info[node].list_mutex);
+       if (ctx->state == SPU_STATE_RUNNABLE) {
+               node = ctx->spu->node;
+               mutex_lock(&cbe_spu_info[node].list_mutex);
+               __spu_update_sched_info(ctx);
+               mutex_unlock(&cbe_spu_info[node].list_mutex);
+       } else {
+               __spu_update_sched_info(ctx);
+       }
 }
 
 static int __node_allowed(struct spu_context *ctx, int node)
@@ -604,6 +615,10 @@ static struct spu *find_victim(struct spu_context *ctx)
                         * higher priority contexts before lower priority
                         * ones, so this is safe until we introduce
                         * priority inheritance schemes.
+                        *
+                        * XXX if the highest priority context is locked,
+                        * this can loop a long time.  Might be better to
+                        * look at another context or give up after X retries.
                         */
                        if (!mutex_trylock(&victim->state_mutex)) {
                                victim = NULL;