]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
sched: fix rq->clock overflows detection with CONFIG_NO_HZ
authorGuillaume Chazarain <guichaz@yahoo.fr>
Sat, 19 Apr 2008 17:44:57 +0000 (19:44 +0200)
committerIngo Molnar <mingo@elte.hu>
Sat, 19 Apr 2008 17:44:57 +0000 (19:44 +0200)
When using CONFIG_NO_HZ, rq->tick_timestamp is not updated every TICK_NSEC.
We check that the number of skipped ticks matches the clock jump seen in
__update_rq_clock().

Signed-off-by: Guillaume Chazarain <guichaz@yahoo.fr>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c

index 7fe334ead4f945b30a86986b0ff4563b0253f25f..d8456a9ac9af8f1c9ca9ef8747242d21f46a93d2 100644 (file)
@@ -397,6 +397,7 @@ struct rq {
        unsigned long cpu_load[CPU_LOAD_IDX_MAX];
        unsigned char idle_at_tick;
 #ifdef CONFIG_NO_HZ
+       unsigned long last_tick_seen;
        unsigned char in_nohz_recently;
 #endif
        /* capture load from *all* tasks on this cpu: */
@@ -500,6 +501,32 @@ static inline int cpu_of(struct rq *rq)
 #endif
 }
 
+#ifdef CONFIG_NO_HZ
+static inline bool nohz_on(int cpu)
+{
+       return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE;
+}
+
+static inline u64 max_skipped_ticks(struct rq *rq)
+{
+       return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1;
+}
+
+static inline void update_last_tick_seen(struct rq *rq)
+{
+       rq->last_tick_seen = jiffies;
+}
+#else
+static inline u64 max_skipped_ticks(struct rq *rq)
+{
+       return 1;
+}
+
+static inline void update_last_tick_seen(struct rq *rq)
+{
+}
+#endif
+
 /*
  * Update the per-runqueue clock, as finegrained as the platform can give
  * us, but without assuming monotonicity, etc.:
@@ -524,9 +551,12 @@ static void __update_rq_clock(struct rq *rq)
                /*
                 * Catch too large forward jumps too:
                 */
-               if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) {
-                       if (clock < rq->tick_timestamp + TICK_NSEC)
-                               clock = rq->tick_timestamp + TICK_NSEC;
+               u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC;
+               u64 max_time = rq->tick_timestamp + max_jump;
+
+               if (unlikely(clock + delta > max_time)) {
+                       if (clock < max_time)
+                               clock = max_time;
                        else
                                clock++;
                        rq->clock_overflows++;
@@ -3812,6 +3842,7 @@ void scheduler_tick(void)
                rq->clock_underflows++;
        }
        rq->tick_timestamp = rq->clock;
+       update_last_tick_seen(rq);
        update_cpu_load(rq);
        curr->sched_class->task_tick(rq, curr, 0);
        update_sched_rt_period(rq);
@@ -7261,6 +7292,7 @@ void __init sched_init(void)
                lockdep_set_class(&rq->lock, &rq->rq_lock_key);
                rq->nr_running = 0;
                rq->clock = 1;
+               update_last_tick_seen(rq);
                init_cfs_rq(&rq->cfs, rq);
                init_rt_rq(&rq->rt, rq);
 #ifdef CONFIG_FAIR_GROUP_SCHED