]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
slob: reduce list scanning
authorMatt Mackall <mpm@selenic.com>
Sat, 21 Jul 2007 11:37:40 +0000 (04:37 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Sun, 22 Jul 2007 00:49:16 +0000 (17:49 -0700)
The version of SLOB in -mm always scans its free list from the beginning,
which results in small allocations and free segments clustering at the
beginning of the list over time.  This causes the average search to scan
over a large stretch at the beginning on each allocation.

By starting each page search where the last one left off, we evenly
distribute the allocations and greatly shorten the average search.

Without this patch, kernel compiles on a 1.5G machine take a large amount
of system time for list scanning.  With this patch, compiles are within a
few seconds of performance of a SLAB kernel with no notable change in
system time.

Signed-off-by: Matt Mackall <mpm@selenic.com>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slob.c

index d50920ecc02bed44acc1dd3cdc2deed467b6b262..ec33fcdc852e2c72ecc95194ada98bf702d3a14a 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -293,6 +293,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
 {
        struct slob_page *sp;
+       struct list_head *prev;
        slob_t *b = NULL;
        unsigned long flags;
 
@@ -307,12 +308,22 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
                if (node != -1 && page_to_nid(&sp->page) != node)
                        continue;
 #endif
+               /* Enough room on this page? */
+               if (sp->units < SLOB_UNITS(size))
+                       continue;
 
-               if (sp->units >= SLOB_UNITS(size)) {
-                       b = slob_page_alloc(sp, size, align);
-                       if (b)
-                               break;
-               }
+               /* Attempt to alloc */
+               prev = sp->list.prev;
+               b = slob_page_alloc(sp, size, align);
+               if (!b)
+                       continue;
+
+               /* Improve fragment distribution and reduce our average
+                * search time by starting our next search here. (see
+                * Knuth vol 1, sec 2.5, pg 449) */
+               if (free_slob_pages.next != prev->next)
+                       list_move_tail(&free_slob_pages, prev->next);
+               break;
        }
        spin_unlock_irqrestore(&slob_lock, flags);