]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] mm: simplify vmscan vs release refcounting
authorNick Piggin <npiggin@suse.de>
Wed, 22 Mar 2006 08:08:03 +0000 (00:08 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 22 Mar 2006 15:53:57 +0000 (07:53 -0800)
The VM has an interesting race where a page refcount can drop to zero, but it
is still on the LRU lists for a short time.  This was solved by testing a 0->1
refcount transition when picking up pages from the LRU, and dropping the
refcount in that case.

Instead, use atomic_add_unless to ensure we never pick up a 0 refcount page
from the LRU, thus a 0 refcount page will never have its refcount elevated
until it is allocated again.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/linux/mm.h
mm/vmscan.c

index 498ff8778fb6d6c39ef8fb8472f32d03cc33952f..b12d5c76420dc49049cc3f55e1cbc53265d05434 100644 (file)
@@ -301,17 +301,20 @@ struct page {
  * Drop a ref, return true if the logical refcount fell to zero (the page has
  * no users)
  */
-#define put_page_testzero(p)                           \
-       ({                                              \
-               BUG_ON(atomic_read(&(p)->_count) == -1);\
-               atomic_add_negative(-1, &(p)->_count);  \
-       })
+static inline int put_page_testzero(struct page *page)
+{
+       BUG_ON(atomic_read(&page->_count) == -1);
+       return atomic_add_negative(-1, &page->_count);
+}
 
 /*
- * Grab a ref, return true if the page previously had a logical refcount of
- * zero.  ie: returns true if we just grabbed an already-deemed-to-be-free page
+ * Try to grab a ref unless the page has a refcount of zero, return false if
+ * that is the case.
  */
-#define get_page_testone(p)    atomic_inc_and_test(&(p)->_count)
+static inline int get_page_unless_zero(struct page *page)
+{
+       return atomic_add_unless(&page->_count, 1, -1);
+}
 
 #define set_page_count(p,v)    atomic_set(&(p)->_count, (v) - 1)
 #define __put_page(p)          atomic_dec(&(p)->_count)
index 8e477b1a48380ec92171c0d38c1c586cab5f243e..e21bab4deda65ab3c35bb40f0fbdce2c22a36222 100644 (file)
@@ -1083,29 +1083,26 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
        int scan = 0;
 
        while (scan++ < nr_to_scan && !list_empty(src)) {
+               struct list_head *target;
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
 
                BUG_ON(!PageLRU(page));
 
                list_del(&page->lru);
-               if (unlikely(get_page_testone(page))) {
+               target = src;
+               if (likely(get_page_unless_zero(page))) {
                        /*
-                        * It is being freed elsewhere
+                        * Be careful not to clear PageLRU until after we're
+                        * sure the page is not being freed elsewhere -- the
+                        * page release code relies on it.
                         */
-                       __put_page(page);
-                       list_add(&page->lru, src);
-                       continue;
-               }
+                       ClearPageLRU(page);
+                       target = dst;
+                       nr_taken++;
+               } /* else it is being freed elsewhere */
 
-               /*
-                * Be careful not to clear PageLRU until after we're sure
-                * the page is not being freed elsewhere -- the page release
-                * code relies on it.
-                */
-               ClearPageLRU(page);
-               list_add(&page->lru, dst);
-               nr_taken++;
+               list_add(&page->lru, target);
        }
 
        *scanned = scan;