]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
hugetlbfs: common code update for s390
authorGerald Schaefer <gerald.schaefer@de.ibm.com>
Mon, 28 Apr 2008 09:13:29 +0000 (02:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 28 Apr 2008 15:58:25 +0000 (08:58 -0700)
Huge ptes have a special type on s390 and cannot be handled with the standard
pte functions in certain cases, e.g.  because of a different location of the
invalid bit.  This patch adds some new architecture- specific functions to
hugetlb common code, as a prerequisite for the s390 large page support.

This won't affect other architectures in functionality, but I need to add some
new dummy inline functions to the headers.

Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/asm-ia64/hugetlb.h
include/asm-powerpc/hugetlb.h
include/asm-sh/hugetlb.h
include/asm-sparc64/hugetlb.h
include/asm-x86/hugetlb.h
mm/hugetlb.c

index 5f543437497257e6a70df013c347d5f11f94fad2..f28a9701f1cfe94a9d9d55796513b9364c330fe1 100644 (file)
@@ -39,4 +39,41 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
 {
 }
 
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
 #endif /* _ASM_IA64_HUGETLB_H */
index bead2ff78493df8490f1488a3b321fff68d21c66..649c6c3b87b38a590be3e820c9ff415dc14f7fab 100644 (file)
@@ -39,4 +39,41 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
 {
 }
 
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
 #endif /* _ASM_POWERPC_HUGETLB_H */
index d1ed476467a1989a0f86f4a1abc1e19d050f558f..02402303d89b50111de303715a1560c2d6fdcea9 100644 (file)
@@ -51,4 +51,41 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
 {
 }
 
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
 #endif /* _ASM_SH_HUGETLB_H */
index 0b9e44c85c5da821e7c88a8cd2f04336f7bc2584..412af58926a0af752854dffdc113bc5dac04ae3a 100644 (file)
@@ -44,4 +44,41 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
 {
 }
 
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
 #endif /* _ASM_SPARC64_HUGETLB_H */
index f57236dfc8f4b007b921d6191435bb3345e3e7c3..14171a4924f695b1634ad0a322ae6a2813d8adc6 100644 (file)
@@ -51,4 +51,41 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
 {
 }
 
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
 #endif /* _ASM_X86_HUGETLB_H */
index 262d0a93d2b6d45112975357374496bf29d3138b..df28c1773fb2320b940080e4991e223cd065f4f1 100644 (file)
@@ -132,6 +132,7 @@ static void update_and_free_page(struct page *page)
        }
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
+       arch_release_hugepage(page);
        __free_pages(page, HUGETLB_PAGE_ORDER);
 }
 
@@ -201,6 +202,10 @@ static struct page *alloc_fresh_huge_page_node(int nid)
                htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
                HUGETLB_PAGE_ORDER);
        if (page) {
+               if (arch_prepare_hugepage(page)) {
+                       __free_pages(page, HUGETLB_PAGE_ORDER);
+                       return 0;
+               }
                set_compound_page_dtor(page, free_huge_page);
                spin_lock(&hugetlb_lock);
                nr_huge_pages++;
@@ -735,7 +740,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
                entry =
                    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
        } else {
-               entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
+               entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
        }
        entry = pte_mkyoung(entry);
        entry = pte_mkhuge(entry);
@@ -748,8 +753,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
 {
        pte_t entry;
 
-       entry = pte_mkwrite(pte_mkdirty(*ptep));
-       if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+       entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
                update_mmu_cache(vma, address, entry);
        }
 }
@@ -779,10 +784,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 
                spin_lock(&dst->page_table_lock);
                spin_lock(&src->page_table_lock);
-               if (!pte_none(*src_pte)) {
+               if (!huge_pte_none(huge_ptep_get(src_pte))) {
                        if (cow)
-                               ptep_set_wrprotect(src, addr, src_pte);
-                       entry = *src_pte;
+                               huge_ptep_set_wrprotect(src, addr, src_pte);
+                       entry = huge_ptep_get(src_pte);
                        ptepage = pte_page(entry);
                        get_page(ptepage);
                        set_huge_pte_at(dst, addr, dst_pte, entry);
@@ -826,7 +831,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                        continue;
 
                pte = huge_ptep_get_and_clear(mm, address, ptep);
-               if (pte_none(pte))
+               if (huge_pte_none(pte))
                        continue;
 
                page = pte_page(pte);
@@ -890,7 +895,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
        spin_lock(&mm->page_table_lock);
 
        ptep = huge_pte_offset(mm, address & HPAGE_MASK);
-       if (likely(pte_same(*ptep, pte))) {
+       if (likely(pte_same(huge_ptep_get(ptep), pte))) {
                /* Break COW */
                huge_ptep_clear_flush(vma, address, ptep);
                set_huge_pte_at(mm, address, ptep,
@@ -960,7 +965,7 @@ retry:
                goto backout;
 
        ret = 0;
-       if (!pte_none(*ptep))
+       if (!huge_pte_none(huge_ptep_get(ptep)))
                goto backout;
 
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
@@ -1002,8 +1007,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * the same page in the page cache.
         */
        mutex_lock(&hugetlb_instantiation_mutex);
-       entry = *ptep;
-       if (pte_none(entry)) {
+       entry = huge_ptep_get(ptep);
+       if (huge_pte_none(entry)) {
                ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
                mutex_unlock(&hugetlb_instantiation_mutex);
                return ret;
@@ -1013,7 +1018,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
-       if (likely(pte_same(entry, *ptep)))
+       if (likely(pte_same(entry, huge_ptep_get(ptep))))
                if (write_access && !pte_write(entry))
                        ret = hugetlb_cow(mm, vma, address, ptep, entry);
        spin_unlock(&mm->page_table_lock);
@@ -1043,7 +1048,8 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                 */
                pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
 
-               if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) {
+               if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
+                   (write && !pte_write(huge_ptep_get(pte)))) {
                        int ret;
 
                        spin_unlock(&mm->page_table_lock);
@@ -1059,7 +1065,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                }
 
                pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
-               page = pte_page(*pte);
+               page = pte_page(huge_ptep_get(pte));
 same_page:
                if (pages) {
                        get_page(page);
@@ -1108,7 +1114,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                        continue;
                if (huge_pmd_unshare(mm, &address, ptep))
                        continue;
-               if (!pte_none(*ptep)) {
+               if (!huge_pte_none(huge_ptep_get(ptep))) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(pte_modify(pte, newprot));
                        set_huge_pte_at(mm, address, ptep, pte);