]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
KVM: switch to get_user_pages_fast
authorMarcelo Tosatti <mtosatti@redhat.com>
Tue, 16 Sep 2008 23:54:47 +0000 (20:54 -0300)
committerAvi Kivity <avi@redhat.com>
Wed, 15 Oct 2008 12:25:06 +0000 (14:25 +0200)
Convert gfn_to_pfn to use get_user_pages_fast, which can do lockless
pagetable lookups on x86. Kernel compilation on 4-way guest is 3.7%
faster on VMX.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/kvm/44x_tlb.c
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
virt/kvm/kvm_main.c

index 7b11fd7be5427b451382c99caacccca120b43009..2e227a412bc240f2bc889b0c7c1fe2e17a7bdd8c 100644 (file)
@@ -147,9 +147,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
        stlbe = &vcpu->arch.shadow_tlb[victim];
 
        /* Get reference to new page. */
-       down_read(&current->mm->mmap_sem);
        new_page = gfn_to_page(vcpu->kvm, gfn);
-       up_read(&current->mm->mmap_sem);
        if (is_error_page(new_page)) {
                printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
                kvm_release_page_clean(new_page);
index bce3e25ec79bee26e60abb8f883629ca590f5c30..5779a2323e230765111744a1c4bf946b2c8c51d5 100644 (file)
@@ -405,16 +405,19 @@ static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
 {
        struct vm_area_struct *vma;
        unsigned long addr;
+       int ret = 0;
 
        addr = gfn_to_hva(kvm, gfn);
        if (kvm_is_error_hva(addr))
-               return 0;
+               return ret;
 
+       down_read(&current->mm->mmap_sem);
        vma = find_vma(current->mm, addr);
        if (vma && is_vm_hugetlb_page(vma))
-               return 1;
+               ret = 1;
+       up_read(&current->mm->mmap_sem);
 
-       return 0;
+       return ret;
 }
 
 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
@@ -1140,9 +1143,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
        if (gpa == UNMAPPED_GVA)
                return NULL;
 
-       down_read(&current->mm->mmap_sem);
        page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-       up_read(&current->mm->mmap_sem);
 
        return page;
 }
@@ -1330,16 +1331,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
        pfn_t pfn;
        unsigned long mmu_seq;
 
-       down_read(&current->mm->mmap_sem);
        if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
                gfn &= ~(KVM_PAGES_PER_HPAGE-1);
                largepage = 1;
        }
 
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
-       /* implicit mb(), we'll read before PT lock is unlocked */
+       smp_rmb();
        pfn = gfn_to_pfn(vcpu->kvm, gfn);
-       up_read(&current->mm->mmap_sem);
 
        /* mmio */
        if (is_error_pfn(pfn)) {
@@ -1488,15 +1487,13 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
        if (r)
                return r;
 
-       down_read(&current->mm->mmap_sem);
        if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
                gfn &= ~(KVM_PAGES_PER_HPAGE-1);
                largepage = 1;
        }
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
-       /* implicit mb(), we'll read before PT lock is unlocked */
+       smp_rmb();
        pfn = gfn_to_pfn(vcpu->kvm, gfn);
-       up_read(&current->mm->mmap_sem);
        if (is_error_pfn(pfn)) {
                kvm_release_pfn_clean(pfn);
                return 1;
@@ -1809,15 +1806,13 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                return;
        gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
 
-       down_read(&current->mm->mmap_sem);
        if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
                gfn &= ~(KVM_PAGES_PER_HPAGE-1);
                vcpu->arch.update_pte.largepage = 1;
        }
        vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
-       /* implicit mb(), we'll read before PT lock is unlocked */
+       smp_rmb();
        pfn = gfn_to_pfn(vcpu->kvm, gfn);
-       up_read(&current->mm->mmap_sem);
 
        if (is_error_pfn(pfn)) {
                kvm_release_pfn_clean(pfn);
index b671f61be41e61a9aa277a549907874cb5b235a5..6dd08e096e2492cb420e7f50afc7b11deb1db4d3 100644 (file)
@@ -102,14 +102,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
        pt_element_t *table;
        struct page *page;
 
-       down_read(&current->mm->mmap_sem);
        page = gfn_to_page(kvm, table_gfn);
-       up_read(&current->mm->mmap_sem);
 
        table = kmap_atomic(page, KM_USER0);
-
        ret = CMPXCHG(&table[index], orig_pte, new_pte);
-
        kunmap_atomic(table, KM_USER0);
 
        kvm_release_page_dirty(page);
@@ -418,7 +414,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
                return 0;
        }
 
-       down_read(&current->mm->mmap_sem);
        if (walker.level == PT_DIRECTORY_LEVEL) {
                gfn_t large_gfn;
                large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
@@ -428,9 +423,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
                }
        }
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
-       /* implicit mb(), we'll read before PT lock is unlocked */
+       smp_rmb();
        pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
-       up_read(&current->mm->mmap_sem);
 
        /* mmio */
        if (is_error_pfn(pfn)) {
index 046a91b5a4baadbecec86b9761fa2f4f2540003f..025bf4011abc83ec1546011533b0d4cc8b602882 100644 (file)
@@ -2010,9 +2010,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
        if (r)
                goto out;
 
-       down_read(&current->mm->mmap_sem);
        kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
-       up_read(&current->mm->mmap_sem);
 out:
        up_write(&kvm->slots_lock);
        return r;
@@ -2034,10 +2032,8 @@ static int alloc_identity_pagetable(struct kvm *kvm)
        if (r)
                goto out;
 
-       down_read(&current->mm->mmap_sem);
        kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
                        VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT);
-       up_read(&current->mm->mmap_sem);
 out:
        up_write(&kvm->slots_lock);
        return r;
index 61eddbeabeb40626f3331faac3ef48d5adf7a687..108f07267e87b1c5c3ead8d1d6315d105383ffcb 100644 (file)
@@ -946,10 +946,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                /* ...but clean it before doing the actual write */
                vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
 
-               down_read(&current->mm->mmap_sem);
                vcpu->arch.time_page =
                                gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-               up_read(&current->mm->mmap_sem);
 
                if (is_error_page(vcpu->arch.time_page)) {
                        kvm_release_page_clean(vcpu->arch.time_page);
@@ -2322,9 +2320,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
 
                val = *(u64 *)new;
 
-               down_read(&current->mm->mmap_sem);
                page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-               up_read(&current->mm->mmap_sem);
 
                kaddr = kmap_atomic(page, KM_USER0);
                set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
@@ -3089,9 +3085,7 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
        if (!apic || !apic->vapic_addr)
                return;
 
-       down_read(&current->mm->mmap_sem);
        page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
-       up_read(&current->mm->mmap_sem);
 
        vcpu->arch.apic->vapic_page = page;
 }
index 2907d05cfcc348a4dd32aeab919b5a62dcaf1ab6..cd34f73513d3633b8ef1ad772ddc1650e5bde74f 100644 (file)
@@ -723,9 +723,6 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_hva);
 
-/*
- * Requires current->mm->mmap_sem to be held
- */
 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
 {
        struct page *page[1];
@@ -741,20 +738,23 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
                return page_to_pfn(bad_page);
        }
 
-       npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
-                               NULL);
+       npages = get_user_pages_fast(addr, 1, 1, page);
 
        if (unlikely(npages != 1)) {
                struct vm_area_struct *vma;
 
+               down_read(&current->mm->mmap_sem);
                vma = find_vma(current->mm, addr);
+
                if (vma == NULL || addr < vma->vm_start ||
                    !(vma->vm_flags & VM_PFNMAP)) {
+                       up_read(&current->mm->mmap_sem);
                        get_page(bad_page);
                        return page_to_pfn(bad_page);
                }
 
                pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+               up_read(&current->mm->mmap_sem);
                BUG_ON(!is_mmio_pfn(pfn));
        } else
                pfn = page_to_pfn(page[0]);