]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
KVM: MMU: Fix race when instantiating a shadow pte
authorAvi Kivity <avi@qumranet.com>
Tue, 26 Feb 2008 20:12:10 +0000 (22:12 +0200)
committerAvi Kivity <avi@qumranet.com>
Tue, 4 Mar 2008 13:19:49 +0000 (15:19 +0200)
For improved concurrency, the guest walk is performed concurrently with other
vcpus.  This means that we need to revalidate the guest ptes once we have
write-protected the guest page tables, at which point they can no longer be
modified.

The current code attempts to avoid this check if the shadow page table is not
new, on the assumption that if it has existed before, the guest could not have
modified the pte without the shadow lock.  However the assumption is incorrect,
as the racing vcpu could have modified the pte, then instantiated the shadow
page, before our vcpu regains control:

  vcpu0        vcpu1

  fault
  walk pte

               modify pte
               fault in same pagetable
               instantiate shadow page

  lookup shadow page
  conclude it is old
  instantiate spte based on stale guest pte

We could do something clever with generation counters, but a test run by
Marcelo suggests this is unnecessary and we can just do the revalidation
unconditionally.  The pte will be in the processor cache and the check can
be quite fast.

Signed-off-by: Avi Kivity <avi@qumranet.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index 194ece6974e6619953ef183a1dc207928dc6bca8..d8172aabc660de7503c43b697fea470d81f7eb9a 100644 (file)
@@ -681,8 +681,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             unsigned level,
                                             int metaphysical,
                                             unsigned access,
-                                            u64 *parent_pte,
-                                            bool *new_page)
+                                            u64 *parent_pte)
 {
        union kvm_mmu_page_role role;
        unsigned index;
@@ -722,8 +721,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        vcpu->arch.mmu.prefetch_page(vcpu, sp);
        if (!metaphysical)
                rmap_write_protect(vcpu->kvm, gfn);
-       if (new_page)
-               *new_page = 1;
        return sp;
 }
 
@@ -1006,8 +1003,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
                                >> PAGE_SHIFT;
                        new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
                                                     v, level - 1,
-                                                    1, ACC_ALL, &table[index],
-                                                    NULL);
+                                                    1, ACC_ALL, &table[index]);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
                                kvm_release_page_clean(page);
@@ -1100,7 +1096,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
 
                ASSERT(!VALID_PAGE(root));
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
-                                     PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
+                                     PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
                vcpu->arch.mmu.root_hpa = root;
@@ -1121,7 +1117,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                        root_gfn = 0;
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                      PT32_ROOT_LEVEL, !is_paging(vcpu),
-                                     ACC_ALL, NULL, NULL);
+                                     ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
                vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
index 5588fa594b61616af2daaea2852c471f97526c90..ecc0856268c47c8c7a5c5773ac0d239d37dbc58e 100644 (file)
@@ -300,7 +300,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                u64 shadow_pte;
                int metaphysical;
                gfn_t table_gfn;
-               bool new_page = 0;
 
                shadow_ent = ((u64 *)__va(shadow_addr)) + index;
                if (level == PT_PAGE_TABLE_LEVEL)
@@ -322,8 +321,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                }
                shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
                                               metaphysical, access,
-                                              shadow_ent, &new_page);
-               if (new_page && !metaphysical) {
+                                              shadow_ent);
+               if (!metaphysical) {
                        int r;
                        pt_element_t curr_pte;
                        r = kvm_read_guest_atomic(vcpu->kvm,