]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[POWERPC] spufs: Use separate timer for /proc/spu_loadavg calculation
authorAegis Lin <aegislin@gmail.com>
Thu, 20 Dec 2007 07:39:59 +0000 (16:39 +0900)
committerPaul Mackerras <paulus@samba.org>
Fri, 21 Dec 2007 08:46:21 +0000 (19:46 +1100)
The original spusched_timer was designed to take effect only when
a context is waiting in the runqueue.

This change adds an additional lower-freq timer has been added to
purely handle the spu_load updates. The new timer will be triggered
per LOAD_FREQ ticks.

Signed-off-by: Aegis Lin <aegislin@gmail.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/platforms/cell/spufs/sched.c

index eee7cef28f1a916029d0167f3fb1266c57117786..8c8af11b35b4f0e99fb18504de8ea3389cc0c04e 100644 (file)
@@ -58,6 +58,7 @@ static unsigned long spu_avenrun[3];
 static struct spu_prio_array *spu_prio;
 static struct task_struct *spusched_task;
 static struct timer_list spusched_timer;
+static struct timer_list spuloadavg_timer;
 
 /*
  * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
@@ -922,35 +923,31 @@ static unsigned long count_active_contexts(void)
 }
 
 /**
- * spu_calc_load - given tick count, update the avenrun load estimates.
- * @tick:      tick count
+ * spu_calc_load - update the avenrun load estimates.
  *
  * No locking against reading these values from userspace, as for
  * the CPU loadavg code.
  */
-static void spu_calc_load(unsigned long ticks)
+static void spu_calc_load(void)
 {
        unsigned long active_tasks; /* fixed-point */
-       static int count = LOAD_FREQ;
-
-       count -= ticks;
-
-       if (unlikely(count < 0)) {
-               active_tasks = count_active_contexts() * FIXED_1;
-               do {
-                       CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
-                       CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
-                       CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
-                       count += LOAD_FREQ;
-               } while (count < 0);
-       }
+
+       active_tasks = count_active_contexts() * FIXED_1;
+       CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
+       CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
+       CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
 }
 
 static void spusched_wake(unsigned long data)
 {
        mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
        wake_up_process(spusched_task);
-       spu_calc_load(SPUSCHED_TICK);
+}
+
+static void spuloadavg_wake(unsigned long data)
+{
+       mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
+       spu_calc_load();
 }
 
 static int spusched_thread(void *unused)
@@ -1068,6 +1065,7 @@ int __init spu_sched_init(void)
        spin_lock_init(&spu_prio->runq_lock);
 
        setup_timer(&spusched_timer, spusched_wake, 0);
+       setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
 
        spusched_task = kthread_run(spusched_thread, NULL, "spusched");
        if (IS_ERR(spusched_task)) {
@@ -1075,6 +1073,8 @@ int __init spu_sched_init(void)
                goto out_free_spu_prio;
        }
 
+       mod_timer(&spuloadavg_timer, 0);
+
        entry = create_proc_entry("spu_loadavg", 0, NULL);
        if (!entry)
                goto out_stop_kthread;
@@ -1100,6 +1100,7 @@ void spu_sched_exit(void)
        remove_proc_entry("spu_loadavg", NULL);
 
        del_timer_sync(&spusched_timer);
+       del_timer_sync(&spuloadavg_timer);
        kthread_stop(spusched_task);
 
        for (node = 0; node < MAX_NUMNODES; node++) {