]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
x86: PAT: remove follow_pfnmap_pte in favor of follow_phys
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>
Fri, 19 Dec 2008 21:47:28 +0000 (13:47 -0800)
committerH. Peter Anvin <hpa@zytor.com>
Fri, 19 Dec 2008 23:40:30 +0000 (15:40 -0800)
Impact: Cleanup - removes a new function in favor of a recently modified older one.

Replace follow_pfnmap_pte in pat code with follow_phys. follow_phys lso
returns protection eliminating the need of pte_pgprot call. Using follow_phys
also eliminates the need for pte_pa.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
arch/x86/include/asm/pgtable.h
arch/x86/mm/pat.c
include/linux/mm.h
mm/memory.c

index 579f8ceee9485cc5d518398f78e48d48ed60eddc..2aa792bbd7e0717db0985ec7b780be14e7f29e59 100644 (file)
@@ -230,11 +230,6 @@ static inline unsigned long pte_pfn(pte_t pte)
        return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
 }
 
-static inline u64 pte_pa(pte_t pte)
-{
-       return pte_val(pte) & PTE_PFN_MASK;
-}
-
 #define pte_page(pte)  pfn_to_page(pte_pfn(pte))
 
 static inline int pmd_large(pmd_t pte)
index d5254bae84f4907e7ea5af706cde0e72647d63a7..541bcc944a5b8c70450cb53a8a1a2b2a3ae31ddf 100644 (file)
@@ -685,8 +685,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
        int retval = 0;
        unsigned long i, j;
        u64 paddr;
-       pgprot_t prot;
-       pte_t pte;
+       unsigned long prot;
        unsigned long vma_start = vma->vm_start;
        unsigned long vma_end = vma->vm_end;
        unsigned long vma_size = vma_end - vma_start;
@@ -696,26 +695,22 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
 
        if (is_linear_pfn_mapping(vma)) {
                /*
-                * reserve the whole chunk starting from vm_pgoff,
-                * But, we have to get the protection from pte.
+                * reserve the whole chunk covered by vma. We need the
+                * starting address and protection from pte.
                 */
-               if (follow_pfnmap_pte(vma, vma_start, &pte)) {
+               if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
                        WARN_ON_ONCE(1);
-                       return -1;
+                       return -EINVAL;
                }
-               prot = pte_pgprot(pte);
-               paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
-               return reserve_pfn_range(paddr, vma_size, prot);
+               return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
        }
 
        /* reserve entire vma page by page, using pfn and prot from pte */
        for (i = 0; i < vma_size; i += PAGE_SIZE) {
-               if (follow_pfnmap_pte(vma, vma_start + i, &pte))
+               if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
                        continue;
 
-               paddr = pte_pa(pte);
-               prot = pte_pgprot(pte);
-               retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
+               retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
                if (retval)
                        goto cleanup_ret;
        }
@@ -724,10 +719,9 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
 cleanup_ret:
        /* Reserve error: Cleanup partial reservation and return error */
        for (j = 0; j < i; j += PAGE_SIZE) {
-               if (follow_pfnmap_pte(vma, vma_start + j, &pte))
+               if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
                        continue;
 
-               paddr = pte_pa(pte);
                free_pfn_range(paddr, PAGE_SIZE);
        }
 
@@ -797,6 +791,7 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
 {
        unsigned long i;
        u64 paddr;
+       unsigned long prot;
        unsigned long vma_start = vma->vm_start;
        unsigned long vma_end = vma->vm_end;
        unsigned long vma_size = vma_end - vma_start;
@@ -821,12 +816,9 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
        } else {
                /* free entire vma, page by page, using the pfn from pte */
                for (i = 0; i < vma_size; i += PAGE_SIZE) {
-                       pte_t pte;
-
-                       if (follow_pfnmap_pte(vma, vma_start + i, &pte))
+                       if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
                                continue;
 
-                       paddr = pte_pa(pte);
                        free_pfn_range(paddr, PAGE_SIZE);
                }
        }
index 2f6e2f886d4bf196bcefbaf97298681ab8949b30..36f9b3fa5e15ab58450103350abe0f02bbe4d116 100644 (file)
@@ -1239,9 +1239,6 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
 #define FOLL_GET       0x04    /* do get_page on page */
 #define FOLL_ANON      0x08    /* give ZERO_PAGE if no pgtable */
 
-int follow_pfnmap_pte(struct vm_area_struct *vma,
-                               unsigned long address, pte_t *ret_ptep);
-
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
                        void *data);
 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
index 79f28e35d4fc65c9efc69a58760ac2a4f76c056e..6b29f39a5a3ed9c42035415dd6d282ebb117cf81 100644 (file)
@@ -1168,49 +1168,6 @@ no_page_table:
        return page;
 }
 
-int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
-                       pte_t *ret_ptep)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *ptep, pte;
-       spinlock_t *ptl;
-       struct page *page;
-       struct mm_struct *mm = vma->vm_mm;
-
-       if (!is_pfn_mapping(vma))
-               goto err;
-
-       page = NULL;
-       pgd = pgd_offset(mm, address);
-       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               goto err;
-
-       pud = pud_offset(pgd, address);
-       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
-               goto err;
-
-       pmd = pmd_offset(pud, address);
-       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
-               goto err;
-
-       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
-
-       pte = *ptep;
-       if (!pte_present(pte))
-               goto err_unlock;
-
-       *ret_ptep = pte;
-       pte_unmap_unlock(ptep, ptl);
-       return 0;
-
-err_unlock:
-       pte_unmap_unlock(ptep, ptl);
-err:
-       return -EINVAL;
-}
-
 /* Can we do the FOLL_ANON optimization? */
 static inline int use_zero_page(struct vm_area_struct *vma)
 {