]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[POWERPC] spu sched: forced preemption at execution
authorChristoph Hellwig <hch@lst.de>
Tue, 13 Feb 2007 20:54:27 +0000 (21:54 +0100)
committerArnd Bergmann <arnd@klappe.arndb.de>
Tue, 13 Feb 2007 20:55:42 +0000 (21:55 +0100)
If we start a spu context with realtime priority we want it to run
immediately and not wait until some other lower priority thread has
finished.  Try to find a suitable victim and use it's spu in this
case.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
arch/powerpc/platforms/cell/spufs/context.c
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/cell/spufs/spufs.h

index 056a8ad0238530c8b7aee3e257679a00ed1d4c34..d581f4ec99baa93e76a162efb6cb93e1aeb56aeb 100644 (file)
@@ -53,6 +53,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
        ctx->owner = get_task_mm(current);
        if (gang)
                spu_gang_add_ctx(gang, ctx);
+       ctx->rt_priority = current->rt_priority;
        ctx->prio = current->prio;
        goto out;
 out_free:
index eb06a030ca094eed737f9e2de17e6fbfd7b45a16..814f65e025f1c22799ca3c09feae61318c9790a1 100644 (file)
@@ -281,6 +281,74 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
        return spu;
 }
 
+/**
+ * find_victim - find a lower priority context to preempt
+ * @ctx:       canidate context for running
+ *
+ * Returns the freed physical spu to run the new context on.
+ */
+static struct spu *find_victim(struct spu_context *ctx)
+{
+       struct spu_context *victim = NULL;
+       struct spu *spu;
+       int node, n;
+
+       /*
+        * Look for a possible preemption candidate on the local node first.
+        * If there is no candidate look at the other nodes.  This isn't
+        * exactly fair, but so far the whole spu schedule tries to keep
+        * a strong node affinity.  We might want to fine-tune this in
+        * the future.
+        */
+ restart:
+       node = cpu_to_node(raw_smp_processor_id());
+       for (n = 0; n < MAX_NUMNODES; n++, node++) {
+               node = (node < MAX_NUMNODES) ? node : 0;
+               if (!node_allowed(node))
+                       continue;
+
+               mutex_lock(&spu_prio->active_mutex[node]);
+               list_for_each_entry(spu, &spu_prio->active_list[node], list) {
+                       struct spu_context *tmp = spu->ctx;
+
+                       if (tmp->rt_priority < ctx->rt_priority &&
+                           (!victim || tmp->rt_priority < victim->rt_priority))
+                               victim = spu->ctx;
+               }
+               mutex_unlock(&spu_prio->active_mutex[node]);
+
+               if (victim) {
+                       /*
+                        * This nests ctx->state_mutex, but we always lock
+                        * higher priority contexts before lower priority
+                        * ones, so this is safe until we introduce
+                        * priority inheritance schemes.
+                        */
+                       if (!mutex_trylock(&victim->state_mutex)) {
+                               victim = NULL;
+                               goto restart;
+                       }
+
+                       spu = victim->spu;
+                       if (!spu) {
+                               /*
+                                * This race can happen because we've dropped
+                                * the active list mutex.  No a problem, just
+                                * restart the search.
+                                */
+                               mutex_unlock(&victim->state_mutex);
+                               victim = NULL;
+                               goto restart;
+                       }
+                       spu_unbind_context(spu, victim);
+                       mutex_unlock(&victim->state_mutex);
+                       return spu;
+               }
+       }
+
+       return NULL;
+}
+
 /**
  * spu_activate - find a free spu for a context and execute it
  * @ctx:       spu context to schedule
@@ -300,6 +368,12 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
                struct spu *spu;
 
                spu = spu_get_idle(ctx);
+               /*
+                * If this is a realtime thread we try to get it running by
+                * preempting a lower priority thread.
+                */
+               if (!spu && ctx->rt_priority)
+                       spu = find_victim(ctx);
                if (spu) {
                        spu_bind_context(spu, ctx);
                        return 0;
index 421f59167c559d94b97412c4f94bb80dac177a98..85b182d1646408a30487396019ebee0414289da2 100644 (file)
@@ -83,6 +83,7 @@ struct spu_context {
        /* scheduler fields */
        struct list_head rq;
        unsigned long sched_flags;
+       unsigned long rt_priority;
        int prio;
 };