]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
ext4: allocate ->s_blockgroup_lock separately
authorPekka Enberg <penberg@cs.helsinki.fi>
Sun, 15 Feb 2009 23:07:52 +0000 (18:07 -0500)
committerTheodore Ts'o <tytso@mit.edu>
Sun, 15 Feb 2009 23:07:52 +0000 (18:07 -0500)
As spotted by kmemtrace, struct ext4_sb_info is 17664 bytes on 64-bit
which makes it a very bad fit for SLAB allocators.  The culprit of the
wasted memory is ->s_blockgroup_lock which can be as big as 16 KB when
NR_CPUS >= 32.

To fix that, allocate ->s_blockgroup_lock, which fits nicely in a order 2
page in the worst case, separately.  This shinks down struct ext4_sb_info
enough to fit a 2 KB slab cache so now we allocate 16 KB + 2 KB instead of
32 KB saving 14 KB of memory.

Acked-by: Andreas Dilger <adilger@sun.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: <linux-ext4@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
fs/ext4/ext4_sb.h
fs/ext4/super.c

index e318f486cc2438bced6e4cb5974032979b5ba89c..4e4d9cc3f40d855a0bcb3b98edb0c4ac8963cc62 100644 (file)
@@ -62,7 +62,7 @@ struct ext4_sb_info {
        struct percpu_counter s_freeinodes_counter;
        struct percpu_counter s_dirs_counter;
        struct percpu_counter s_dirtyblocks_counter;
-       struct blockgroup_lock s_blockgroup_lock;
+       struct blockgroup_lock *s_blockgroup_lock;
        struct proc_dir_entry *s_proc;
 
        /* Journaling */
@@ -149,7 +149,7 @@ struct ext4_sb_info {
 static inline spinlock_t *
 sb_bgl_lock(struct ext4_sb_info *sbi, unsigned int block_group)
 {
-       return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group);
+       return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
 }
 
 #endif /* _EXT4_SB */
index f7371a6a923d359480108517c1a3358276a42eb7..a3768709ce05f109ef749b421a3cc1a2a727bac4 100644 (file)
@@ -615,6 +615,7 @@ static void ext4_put_super(struct super_block *sb)
                ext4_blkdev_remove(sbi);
        }
        sb->s_fs_info = NULL;
+       kfree(sbi->s_blockgroup_lock);
        kfree(sbi);
        return;
 }
@@ -2021,6 +2022,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
        if (!sbi)
                return -ENOMEM;
+
+       sbi->s_blockgroup_lock =
+               kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
+       if (!sbi->s_blockgroup_lock) {
+               kfree(sbi);
+               return -ENOMEM;
+       }
        sb->s_fs_info = sbi;
        sbi->s_mount_opt = 0;
        sbi->s_resuid = EXT4_DEF_RESUID;
@@ -2332,7 +2340,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                                 &sbi->s_inode_readahead_blks);
 #endif
 
-       bgl_lock_init(&sbi->s_blockgroup_lock);
+       bgl_lock_init(sbi->s_blockgroup_lock);
 
        for (i = 0; i < db_count; i++) {
                block = descriptor_loc(sb, logical_sb_block, i);