]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
sched: remove 'now' use from assignments
authorIngo Molnar <mingo@elte.hu>
Thu, 9 Aug 2007 09:16:47 +0000 (11:16 +0200)
committerIngo Molnar <mingo@elte.hu>
Thu, 9 Aug 2007 09:16:47 +0000 (11:16 +0200)
change all 'now' timestamp uses in assignments to rq->clock.

( this is an identity transformation that causes no functionality change:
  all such new rq->clock is necessarily preceded by an update_rq_clock()
  call. )

Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c
kernel/sched_fair.c
kernel/sched_rt.c

index 65eb484dc268d151c3921930e129cc972f348a9e..49a5fb0cdea08e15139855e8eb853dce51ba05f3 100644 (file)
@@ -788,8 +788,8 @@ static void update_curr_load(struct rq *rq, u64 now)
        u64 start;
 
        start = ls->load_update_start;
-       ls->load_update_start = now;
-       ls->delta_stat += now - start;
+       ls->load_update_start = rq->clock;
+       ls->delta_stat += rq->clock - start;
        /*
         * Stagger updates to ls->delta_fair. Very frequent updates
         * can be expensive.
@@ -1979,8 +1979,8 @@ static void update_cpu_load(struct rq *this_rq)
        exec_delta64 = ls->delta_exec + 1;
        ls->delta_exec = 0;
 
-       sample_interval64 = now - ls->load_update_last;
-       ls->load_update_last = now;
+       sample_interval64 = this_rq->clock - ls->load_update_last;
+       ls->load_update_last = this_rq->clock;
 
        if ((s64)sample_interval64 < (s64)TICK_NSEC)
                sample_interval64 = TICK_NSEC;
index bd20fad3deffa3baa08050122dacbf59b3106745..bcf5fc59e8e95f960a17caa03957c50ae5864f80 100644 (file)
@@ -333,7 +333,7 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now)
         * since the last time we changed load (this cannot
         * overflow on 32 bits):
         */
-       delta_exec = (unsigned long)(now - curr->exec_start);
+       delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start);
 
        curr->delta_exec += delta_exec;
 
@@ -341,14 +341,14 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now)
                __update_curr(cfs_rq, curr, now);
                curr->delta_exec = 0;
        }
-       curr->exec_start = now;
+       curr->exec_start = rq_of(cfs_rq)->clock;
 }
 
 static inline void
 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
 {
        se->wait_start_fair = cfs_rq->fair_clock;
-       schedstat_set(se->wait_start, now);
+       schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
 }
 
 /*
@@ -421,7 +421,8 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
 {
        unsigned long delta_fair = se->delta_fair_run;
 
-       schedstat_set(se->wait_max, max(se->wait_max, now - se->wait_start));
+       schedstat_set(se->wait_max, max(se->wait_max,
+                       rq_of(cfs_rq)->clock - se->wait_start));
 
        if (unlikely(se->load.weight != NICE_0_LOAD))
                delta_fair = calc_weighted(delta_fair, se->load.weight,
@@ -470,7 +471,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
        /*
         * We are starting a new run period:
         */
-       se->exec_start = now;
+       se->exec_start = rq_of(cfs_rq)->clock;
 }
 
 /*
@@ -545,7 +546,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
 
 #ifdef CONFIG_SCHEDSTATS
        if (se->sleep_start) {
-               u64 delta = now - se->sleep_start;
+               u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
 
                if ((s64)delta < 0)
                        delta = 0;
@@ -557,7 +558,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now)
                se->sum_sleep_runtime += delta;
        }
        if (se->block_start) {
-               u64 delta = now - se->block_start;
+               u64 delta = rq_of(cfs_rq)->clock - se->block_start;
 
                if ((s64)delta < 0)
                        delta = 0;
@@ -599,9 +600,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                        struct task_struct *tsk = task_of(se);
 
                        if (tsk->state & TASK_INTERRUPTIBLE)
-                               se->sleep_start = now;
+                               se->sleep_start = rq_of(cfs_rq)->clock;
                        if (tsk->state & TASK_UNINTERRUPTIBLE)
-                               se->block_start = now;
+                               se->block_start = rq_of(cfs_rq)->clock;
                }
                cfs_rq->wait_runtime -= se->wait_runtime;
 #endif
index 5b559e8c8aa6559a40c276dd268a5dcd35975b15..5fbd87ad0f561a9f020f9846fd56214adef6e37a 100644 (file)
@@ -15,14 +15,14 @@ static inline void update_curr_rt(struct rq *rq, u64 now)
        if (!task_has_rt_policy(curr))
                return;
 
-       delta_exec = now - curr->se.exec_start;
+       delta_exec = rq->clock - curr->se.exec_start;
        if (unlikely((s64)delta_exec < 0))
                delta_exec = 0;
 
        schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
 
        curr->se.sum_exec_runtime += delta_exec;
-       curr->se.exec_start = now;
+       curr->se.exec_start = rq->clock;
 }
 
 static void
@@ -89,7 +89,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now)
        queue = array->queue + idx;
        next = list_entry(queue->next, struct task_struct, run_list);
 
-       next->se.exec_start = now;
+       next->se.exec_start = rq->clock;
 
        return next;
 }