]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] vmscan: skip reclaim_mapped determination if we do not swap
authorChristoph Lameter <clameter@engr.sgi.com>
Sun, 12 Feb 2006 01:55:55 +0000 (17:55 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sun, 12 Feb 2006 05:41:11 +0000 (21:41 -0800)
This puts the variables and the way to get to reclaim_mapped in one block.
And allows zone_reclaim or other things to skip the determination (maybe
this whole block of code does not belong into refill_inactive_zone()?)

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
mm/vmscan.c

index 58ed5125b1a7f0db538d5d562d35fc5692263a8a..1838c15ca4fd8b3eb0ee70739ea63f765fb4689e 100644 (file)
@@ -1195,9 +1195,47 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
        struct page *page;
        struct pagevec pvec;
        int reclaim_mapped = 0;
-       long mapped_ratio;
-       long distress;
-       long swap_tendency;
+
+       if (unlikely(sc->may_swap)) {
+               long mapped_ratio;
+               long distress;
+               long swap_tendency;
+
+               /*
+                * `distress' is a measure of how much trouble we're having
+                * reclaiming pages.  0 -> no problems.  100 -> great trouble.
+                */
+               distress = 100 >> zone->prev_priority;
+
+               /*
+                * The point of this algorithm is to decide when to start
+                * reclaiming mapped memory instead of just pagecache.  Work out
+                * how much memory
+                * is mapped.
+                */
+               mapped_ratio = (sc->nr_mapped * 100) / total_memory;
+
+               /*
+                * Now decide how much we really want to unmap some pages.  The
+                * mapped ratio is downgraded - just because there's a lot of
+                * mapped memory doesn't necessarily mean that page reclaim
+                * isn't succeeding.
+                *
+                * The distress ratio is important - we don't want to start
+                * going oom.
+                *
+                * A 100% value of vm_swappiness overrides this algorithm
+                * altogether.
+                */
+               swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
+
+               /*
+                * Now use this metric to decide whether to start moving mapped
+                * memory onto the inactive list.
+                */
+               if (swap_tendency >= 100)
+                       reclaim_mapped = 1;
+       }
 
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
@@ -1207,37 +1245,6 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
        zone->nr_active -= pgmoved;
        spin_unlock_irq(&zone->lru_lock);
 
-       /*
-        * `distress' is a measure of how much trouble we're having reclaiming
-        * pages.  0 -> no problems.  100 -> great trouble.
-        */
-       distress = 100 >> zone->prev_priority;
-
-       /*
-        * The point of this algorithm is to decide when to start reclaiming
-        * mapped memory instead of just pagecache.  Work out how much memory
-        * is mapped.
-        */
-       mapped_ratio = (sc->nr_mapped * 100) / total_memory;
-
-       /*
-        * Now decide how much we really want to unmap some pages.  The mapped
-        * ratio is downgraded - just because there's a lot of mapped memory
-        * doesn't necessarily mean that page reclaim isn't succeeding.
-        *
-        * The distress ratio is important - we don't want to start going oom.
-        *
-        * A 100% value of vm_swappiness overrides this algorithm altogether.
-        */
-       swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
-
-       /*
-        * Now use this metric to decide whether to start moving mapped memory
-        * onto the inactive list.
-        */
-       if (swap_tendency >= 100 && sc->may_swap)
-               reclaim_mapped = 1;
-
        while (!list_empty(&l_hold)) {
                cond_resched();
                page = lru_to_page(&l_hold);