]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
per-zone and reclaim enhancements for memory controller: calculate mapper_ratio per...
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Thu, 7 Feb 2008 08:14:32 +0000 (00:14 -0800)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Thu, 7 Feb 2008 16:42:21 +0000 (08:42 -0800)
Define function for calculating mapped_ratio in memory cgroup.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Paul Menage <menage@google.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c

index 4ec712967f7cdd717b05074cdccfa20c7669cf0d..085cdcd817b03e5f0b712f78a8f98a8584767d01 100644 (file)
@@ -64,6 +64,12 @@ extern int mem_cgroup_prepare_migration(struct page *page);
 extern void mem_cgroup_end_migration(struct page *page);
 extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
 
+/*
+ * For memory reclaim.
+ */
+extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
+
+
 #else /* CONFIG_CGROUP_MEM_CONT */
 static inline void mm_init_cgroup(struct mm_struct *mm,
                                        struct task_struct *p)
@@ -135,7 +141,10 @@ mem_cgroup_page_migration(struct page *page, struct page *newpage)
 {
 }
 
-
+static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
+{
+       return 0;
+}
 #endif /* CONFIG_CGROUP_MEM_CONT */
 
 #endif /* _LINUX_MEMCONTROL_H */
index 1637575d33390e44a48739731a094c51ed340c1e..2ef214ed5cf88c20a56cbe6d1721ebbd28c9cea2 100644 (file)
@@ -420,6 +420,23 @@ void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
        spin_unlock(&mem->lru_lock);
 }
 
+/*
+ * Calculate mapped_ratio under memory controller. This will be used in
+ * vmscan.c for deteremining we have to reclaim mapped pages.
+ */
+int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
+{
+       long total, rss;
+
+       /*
+        * usage is recorded in bytes. But, here, we assume the number of
+        * physical pages can be represented by "long" on any arch.
+        */
+       total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
+       rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
+       return (int)((rss * 100L) / total);
+}
+
 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        struct list_head *dst,
                                        unsigned long *scanned, int order,