From e6a1530d692d6a60cdf15dfbcfea07f5324d7b9f Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 25 Jun 2006 05:46:49 -0700 Subject: [PATCH] [PATCH] Allow migration of mlocked pages Hugh clarified the role of VM_LOCKED. So we can now implement page migration for mlocked pages. Allow the migration of mlocked pages. This means that try_to_unmap must unmap mlocked pages in the migration case. Signed-off-by: Christoph Lameter Acked-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 10 ++++------ mm/rmap.c | 9 ++++----- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index 0576c053598..3f1e0c2c942 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -616,15 +616,13 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, /* * Establish migration ptes or remove ptes */ - if (try_to_unmap(page, 1) != SWAP_FAIL) { - if (!page_mapped(page)) - rc = move_to_new_page(newpage, page); - } else - /* A vma has VM_LOCKED set -> permanent failure */ - rc = -EPERM; + try_to_unmap(page, 1); + if (!page_mapped(page)) + rc = move_to_new_page(newpage, page); if (rc) remove_migration_ptes(page, page); + unlock: unlock_page(page); diff --git a/mm/rmap.c b/mm/rmap.c index 882a85826bb..e76909e880c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -562,9 +562,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * If it's recently referenced (perhaps page_referenced * skipped over this mm) then we should reactivate it. */ - if ((vma->vm_flags & VM_LOCKED) || - (ptep_clear_flush_young(vma, address, pte) - && !migration)) { + if (!migration && ((vma->vm_flags & VM_LOCKED) || + (ptep_clear_flush_young(vma, address, pte)))) { ret = SWAP_FAIL; goto out_unmap; } @@ -771,7 +770,7 @@ static int try_to_unmap_file(struct page *page, int migration) list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (vma->vm_flags & VM_LOCKED) + if ((vma->vm_flags & VM_LOCKED) && !migration) continue; cursor = (unsigned long) vma->vm_private_data; if (cursor > max_nl_cursor) @@ -805,7 +804,7 @@ static int try_to_unmap_file(struct page *page, int migration) do { list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (vma->vm_flags & VM_LOCKED) + if ((vma->vm_flags & VM_LOCKED) && !migration) continue; cursor = (unsigned long) vma->vm_private_data; while ( cursor < max_nl_cursor && -- 2.41.1