]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] zoned vm counters: conversion of nr_pagetables to per zone counter
authorChristoph Lameter <clameter@sgi.com>
Fri, 30 Jun 2006 08:55:38 +0000 (01:55 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Fri, 30 Jun 2006 18:25:35 +0000 (11:25 -0700)
Conversion of nr_page_table_pages to a per zone counter

[akpm@osdl.org: bugfix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/arm/mm/mm-armv.c
arch/i386/mm/pgtable.c
arch/um/kernel/skas/mmu.c
drivers/base/node.c
fs/proc/proc_misc.c
include/linux/mmzone.h
include/linux/vmstat.h
mm/memory.c
mm/page_alloc.c
mm/vmstat.c

index 95273de4f772515780e2eac97ed2ae892ac52a0d..931be1798122f1e9d183dfed98b15ac766c48f3c 100644 (file)
@@ -227,7 +227,7 @@ void free_pgd_slow(pgd_t *pgd)
 
        pte = pmd_page(*pmd);
        pmd_clear(pmd);
-       dec_page_state(nr_page_table_pages);
+       dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
        pte_lock_deinit(pte);
        pte_free(pte);
        pmd_free(pmd);
index 73ac3599a0eac5d207caea768be6fd1e04f4fafd..0bb1e5c134422b6e0eff6a3f4118a3ed05d5d6b9 100644 (file)
@@ -63,7 +63,8 @@ void show_mem(void)
        printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback);
        printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
        printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
-       printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages);
+       printk(KERN_INFO "%lu pages pagetables\n",
+                                       global_page_state(NR_PAGETABLE));
 }
 
 /*
index c5c9885a82979f70c2a66a1753ef683a0114b932..624ca238d1fd3d753f261c0bb368128570976a96 100644 (file)
@@ -152,7 +152,7 @@ void destroy_context_skas(struct mm_struct *mm)
                free_page(mmu->id.stack);
                pte_lock_deinit(virt_to_page(mmu->last_page_table));
                pte_free_kernel((pte_t *) mmu->last_page_table);
-                dec_page_state(nr_page_table_pages);
+               dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
 #ifdef CONFIG_3_LEVEL_PGTABLES
                pmd_free((pmd_t *) mmu->last_pmd);
 #endif
index db116a8791c88a90a792642b86331973dfc47e12..c22fb67ec50c04fe13975277724d143c30faa831 100644 (file)
@@ -70,6 +70,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
                       "Node %d FilePages:    %8lu kB\n"
                       "Node %d Mapped:       %8lu kB\n"
                       "Node %d AnonPages:    %8lu kB\n"
+                      "Node %d PageTables:   %8lu kB\n"
                       "Node %d Slab:         %8lu kB\n",
                       nid, K(i.totalram),
                       nid, K(i.freeram),
@@ -85,6 +86,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
                       nid, K(node_page_state(nid, NR_FILE_PAGES)),
                       nid, K(node_page_state(nid, NR_FILE_MAPPED)),
                       nid, K(node_page_state(nid, NR_ANON_PAGES)),
+                      nid, K(node_page_state(nid, NR_PAGETABLE)),
                       nid, K(node_page_state(nid, NR_SLAB)));
        n += hugetlb_report_node_meminfo(nid, buf + n);
        return n;
index 16aaf7187bb3225ff50027638847605f1ca40f02..0eae68f8421069882f2057f7d352f01f64170b6a 100644 (file)
@@ -171,9 +171,9 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
                "AnonPages:    %8lu kB\n"
                "Mapped:       %8lu kB\n"
                "Slab:         %8lu kB\n"
+               "PageTables:   %8lu kB\n"
                "CommitLimit:  %8lu kB\n"
                "Committed_AS: %8lu kB\n"
-               "PageTables:   %8lu kB\n"
                "VmallocTotal: %8lu kB\n"
                "VmallocUsed:  %8lu kB\n"
                "VmallocChunk: %8lu kB\n",
@@ -195,9 +195,9 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
                K(global_page_state(NR_ANON_PAGES)),
                K(global_page_state(NR_FILE_MAPPED)),
                K(global_page_state(NR_SLAB)),
+               K(global_page_state(NR_PAGETABLE)),
                K(allowed),
                K(committed),
-               K(ps.nr_page_table_pages),
                (unsigned long)VMALLOC_TOTAL >> 10,
                vmi.used >> 10,
                vmi.largest_chunk >> 10
index 67e03fc8533e6a2fc234b8802a0308e85a434be7..15adb435f24067bd141674ee084485e14e9df1d4 100644 (file)
@@ -52,6 +52,7 @@ enum zone_stat_item {
                           only modified from process context */
        NR_FILE_PAGES,
        NR_SLAB,        /* Pages used by slab allocator */
+       NR_PAGETABLE,   /* used for pagetables */
        NR_VM_ZONE_STAT_ITEMS };
 
 struct per_cpu_pages {
index 4b97381a2937aa8ee4526d5b959d3480f470794b..56220441d7c92394a68061c66394f84d71a1d7b4 100644 (file)
@@ -25,8 +25,7 @@ struct page_state {
        unsigned long nr_dirty;         /* Dirty writeable pages */
        unsigned long nr_writeback;     /* Pages under writeback */
        unsigned long nr_unstable;      /* NFS unstable pages */
-       unsigned long nr_page_table_pages;/* Pages used for pagetables */
-#define GET_PAGE_STATE_LAST nr_page_table_pages
+#define GET_PAGE_STATE_LAST nr_unstable
 
        /*
         * The below are zeroed by get_page_state().  Use get_full_page_state()
index 247b5c312b9b073d1c76769a67e73d39ee8bdd39..1a78791590fad06e2297c68ecb8b5475a16e72b6 100644 (file)
@@ -126,7 +126,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
        pmd_clear(pmd);
        pte_lock_deinit(page);
        pte_free_tlb(tlb, page);
-       dec_page_state(nr_page_table_pages);
+       dec_zone_page_state(page, NR_PAGETABLE);
        tlb->mm->nr_ptes--;
 }
 
@@ -311,7 +311,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
                pte_free(new);
        } else {
                mm->nr_ptes++;
-               inc_page_state(nr_page_table_pages);
+               inc_zone_page_state(new, NR_PAGETABLE);
                pmd_populate(mm, pmd, new);
        }
        spin_unlock(&mm->page_table_lock);
index a38a11cfb4834d5b5dc443fdc03b25fb8f897b67..ed3f2a7b407134d7995756ea651611c13ebf0b08 100644 (file)
@@ -1320,7 +1320,7 @@ void show_free_areas(void)
                nr_free_pages(),
                global_page_state(NR_SLAB),
                global_page_state(NR_FILE_MAPPED),
-               ps.nr_page_table_pages);
+               global_page_state(NR_PAGETABLE));
 
        for_each_zone(zone) {
                int i;
index dc9e6920922302741baaf93a271f6634210623aa..292a35fe56c93e91345ad64c54e724c44719f4fc 100644 (file)
@@ -399,12 +399,12 @@ static char *vmstat_text[] = {
        "nr_mapped",
        "nr_file_pages",
        "nr_slab",
+       "nr_page_table_pages",
 
        /* Page state */
        "nr_dirty",
        "nr_writeback",
        "nr_unstable",
-       "nr_page_table_pages",
 
        "pgpgin",
        "pgpgout",