]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] slab: Remove SLAB_NO_REAP option
authorChristoph Lameter <clameter@engr.sgi.com>
Wed, 22 Mar 2006 08:08:15 +0000 (00:08 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 22 Mar 2006 15:53:59 +0000 (07:53 -0800)
SLAB_NO_REAP is documented as an option that will cause this slab not to be
reaped under memory pressure.  However, that is not what happens.  The only
thing that SLAB_NO_REAP controls at the moment is the reclaim of the unused
slab elements that were allocated in batch in cache_reap().  Cache_reap()
is run every few seconds independently of memory pressure.

Could we remove the whole thing?  Its only used by three slabs anyways and
I cannot find a reason for having this option.

There is an additional problem with SLAB_NO_REAP.  If set then the recovery
of objects from alien caches is switched off.  Objects not freed on the
same node where they were initially allocated will only be reused if a
certain amount of objects accumulates from one alien node (not very likely)
or if the cache is explicitly shrunk.  (Strangely __cache_shrink does not
check for SLAB_NO_REAP)

Getting rid of SLAB_NO_REAP fixes the problems with alien cache freeing.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Mark Fasheh <mark.fasheh@oracle.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
drivers/scsi/iscsi_tcp.c
fs/ocfs2/super.c
include/linux/slab.h
mm/slab.c

index ff79e68b347c5c15828884991ff03a74902099fe..7b82ff090d42026cc56cb9bc04ad55b396320b82 100644 (file)
@@ -3639,7 +3639,7 @@ iscsi_tcp_init(void)
 
        taskcache = kmem_cache_create("iscsi_taskcache",
                        sizeof(struct iscsi_data_task), 0,
-                       SLAB_HWCACHE_ALIGN | SLAB_NO_REAP, NULL, NULL);
+                       SLAB_HWCACHE_ALIGN, NULL, NULL);
        if (!taskcache)
                return -ENOMEM;
 
index 8dd3aafec49900145f4184954403f24fce1dab6b..09e1c57a86a08d7bd22883058ab439b923593e03 100644 (file)
@@ -959,7 +959,7 @@ static int ocfs2_initialize_mem_caches(void)
        ocfs2_lock_cache = kmem_cache_create("ocfs2_lock",
                                             sizeof(struct ocfs2_journal_lock),
                                             0,
-                                            SLAB_NO_REAP|SLAB_HWCACHE_ALIGN,
+                                            SLAB_HWCACHE_ALIGN,
                                             NULL, NULL);
        if (!ocfs2_lock_cache)
                return -ENOMEM;
index 38bed95dda7aaa20a348cc83bc7a87e3c70edb3f..2b28c849d75acf5dcbd7085c76256ec1054372a2 100644 (file)
@@ -38,7 +38,6 @@ typedef struct kmem_cache kmem_cache_t;
 #define        SLAB_DEBUG_INITIAL      0x00000200UL    /* Call constructor (as verifier) */
 #define        SLAB_RED_ZONE           0x00000400UL    /* Red zone objs in a cache */
 #define        SLAB_POISON             0x00000800UL    /* Poison objects */
-#define        SLAB_NO_REAP            0x00001000UL    /* never reap from the cache */
 #define        SLAB_HWCACHE_ALIGN      0x00002000UL    /* align objs on a h/w cache lines */
 #define SLAB_CACHE_DMA         0x00004000UL    /* use GFP_DMA memory */
 #define SLAB_MUST_HWCACHE_ALIGN        0x00008000UL    /* force alignment */
index 5c2574989834a3580b981800bb7b3a2c428d67cc..24235506b2a0b59ad54b592d71de189ec852e2d7 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #if DEBUG
 # define CREATE_MASK   (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
                         SLAB_POISON | SLAB_HWCACHE_ALIGN | \
-                        SLAB_NO_REAP | SLAB_CACHE_DMA | \
+                        SLAB_CACHE_DMA | \
                         SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU)
 #else
-# define CREATE_MASK   (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
+# define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
                         SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU)
@@ -662,7 +662,6 @@ static struct kmem_cache cache_cache = {
        .limit = BOOT_CPUCACHE_ENTRIES,
        .shared = 1,
        .buffer_size = sizeof(struct kmem_cache),
-       .flags = SLAB_NO_REAP,
        .name = "kmem_cache",
 #if DEBUG
        .obj_size = sizeof(struct kmem_cache),
@@ -1848,9 +1847,6 @@ static void setup_cpu_cache(struct kmem_cache *cachep)
  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
  * for buffer overruns.
  *
- * %SLAB_NO_REAP - Don't automatically reap this cache when we're under
- * memory pressure.
- *
  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
  * cacheline.  This can be beneficial if you're counting cycles as closely
  * as davem.
@@ -3584,10 +3580,6 @@ static void cache_reap(void *unused)
                struct slab *slabp;
 
                searchp = list_entry(walk, struct kmem_cache, next);
-
-               if (searchp->flags & SLAB_NO_REAP)
-                       goto next;
-
                check_irq_on();
 
                l3 = searchp->nodelists[numa_node_id()];
@@ -3635,7 +3627,6 @@ static void cache_reap(void *unused)
                } while (--tofree > 0);
 next_unlock:
                spin_unlock_irq(&l3->list_lock);
-next:
                cond_resched();
        }
        check_irq_on();