]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
x86: round_jiffies() for i386 and x86-64 non-critical/corrected MCE polling
authorVenki Pallipadi <venkatesh.pallipadi@intel.com>
Sat, 21 Jul 2007 15:10:44 +0000 (17:10 +0200)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Sun, 22 Jul 2007 01:37:10 +0000 (18:37 -0700)
This helps to reduce the frequency at which the CPU must be taken out of a
lower-power state.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Acked-by: Tim Hockin <thockin@hockin.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/i386/kernel/cpu/mcheck/non-fatal.c
arch/x86_64/kernel/mce.c

index 6b5d3518a1c09b3532871fc337419b9477dcbc0c..bf39409b383808897bb3d2495fa2b06988172264 100644 (file)
@@ -57,7 +57,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
 static void mce_work_fn(struct work_struct *work)
 { 
        on_each_cpu(mce_checkregs, NULL, 1, 1);
-       schedule_delayed_work(&mce_work, MCE_RATE);
+       schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
 } 
 
 static int __init init_nonfatal_mce_checker(void)
@@ -82,7 +82,7 @@ static int __init init_nonfatal_mce_checker(void)
        /*
         * Check for non-fatal errors every MCE_RATE s
         */
-       schedule_delayed_work(&mce_work, MCE_RATE);
+       schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
        printk(KERN_INFO "Machine check exception polling timer started.\n");
        return 0;
 }
index 7c8ab423abe362e624401d8cf43ae740954c77bd..4d8450ee3635e8ee587f5f639f7862b5a5155a74 100644 (file)
@@ -375,7 +375,8 @@ static void mcheck_timer(struct work_struct *work)
        if (mce_notify_user()) {
                next_interval = max(next_interval/2, HZ/100);
        } else {
-               next_interval = min(next_interval*2, check_interval*HZ);
+               next_interval = min(next_interval*2,
+                               (int)round_jiffies_relative(check_interval*HZ));
        }
 
        schedule_delayed_work(&mcheck_work, next_interval);
@@ -428,7 +429,8 @@ static __init int periodic_mcheck_init(void)
 { 
        next_interval = check_interval * HZ;
        if (next_interval)
-               schedule_delayed_work(&mcheck_work, next_interval);
+               schedule_delayed_work(&mcheck_work,
+                                     round_jiffies_relative(next_interval));
        idle_notifier_register(&mce_idle_notifier);
        return 0;
 } 
@@ -720,7 +722,8 @@ static void mce_restart(void)
        on_each_cpu(mce_init, NULL, 1, 1);       
        next_interval = check_interval * HZ;
        if (next_interval)
-               schedule_delayed_work(&mcheck_work, next_interval);
+               schedule_delayed_work(&mcheck_work,
+                                     round_jiffies_relative(next_interval));
 }
 
 static struct sysdev_class mce_sysclass = {