]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
memory cgroup enhancements: remember "a page is charged as page cache"
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Thu, 7 Feb 2008 08:14:17 +0000 (00:14 -0800)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Thu, 7 Feb 2008 16:42:20 +0000 (08:42 -0800)
Add a flag to page_cgroup to remember "this page is
charged as cache."
cache here includes page caches and swap cache.
This is useful for implementing precise accounting in memory cgroup.
TODO:
  distinguish page-cache and swap-cache

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Paul Menage <menage@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: David Rientjes <rientjes@google.com>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index c867612d9c04c7baa3a619bcc458058a5758b916..975e89935d5251a713916f197e9e7825e8f0cd37 100644 (file)
@@ -83,7 +83,9 @@ struct page_cgroup {
        struct mem_cgroup *mem_cgroup;
        atomic_t ref_cnt;               /* Helpful when pages move b/w  */
                                        /* mapped and cached states     */
+       int      flags;
 };
+#define PAGE_CGROUP_FLAG_CACHE (0x1)   /* charged as cache */
 
 enum {
        MEM_CGROUP_TYPE_UNSPEC = 0,
@@ -93,6 +95,11 @@ enum {
        MEM_CGROUP_TYPE_MAX,
 };
 
+enum charge_type {
+       MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
+       MEM_CGROUP_CHARGE_TYPE_MAPPED,
+};
+
 static struct mem_cgroup init_mem_cgroup;
 
 static inline
@@ -306,8 +313,8 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
  * 0 if the charge was successful
  * < 0 if the cgroup is over its limit
  */
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
-                               gfp_t gfp_mask)
+static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
+                               gfp_t gfp_mask, enum charge_type ctype)
 {
        struct mem_cgroup *mem;
        struct page_cgroup *pc;
@@ -409,6 +416,9 @@ noreclaim:
        atomic_set(&pc->ref_cnt, 1);
        pc->mem_cgroup = mem;
        pc->page = page;
+       pc->flags = 0;
+       if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
+               pc->flags |= PAGE_CGROUP_FLAG_CACHE;
        if (page_cgroup_assign_new_page_cgroup(page, pc)) {
                /*
                 * an another charge is added to this page already.
@@ -433,6 +443,13 @@ err:
        return -ENOMEM;
 }
 
+int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+                       gfp_t gfp_mask)
+{
+       return mem_cgroup_charge_common(page, mm, gfp_mask,
+                       MEM_CGROUP_CHARGE_TYPE_MAPPED);
+}
+
 /*
  * See if the cached pages should be charged at all?
  */
@@ -445,7 +462,8 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
 
        mem = rcu_dereference(mm->mem_cgroup);
        if (mem->control_type == MEM_CGROUP_TYPE_ALL)
-               return mem_cgroup_charge(page, mm, gfp_mask);
+               return mem_cgroup_charge_common(page, mm, gfp_mask,
+                               MEM_CGROUP_CHARGE_TYPE_CACHE);
        else
                return 0;
 }