]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
KVM: Allow browsing memslots with mmu_lock
authorAndrea Arcangeli <andrea@qumranet.com>
Fri, 25 Jul 2008 14:32:03 +0000 (16:32 +0200)
committerAvi Kivity <avi@qumranet.com>
Tue, 29 Jul 2008 09:33:50 +0000 (12:33 +0300)
This allows reading memslots with only the mmu_lock hold for mmu
notifiers that runs in atomic context and with mmu_lock held.

Signed-off-by: Andrea Arcangeli <andrea@qumranet.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
arch/x86/kvm/x86.c
virt/kvm/kvm_main.c

index 9870ce4229209f8663b9dfc827899607a835ac9f..c7b01efe06469627b32d0e850a76347a5fbb9e40 100644 (file)
@@ -3974,16 +3974,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
         */
        if (!user_alloc) {
                if (npages && !old.rmap) {
+                       unsigned long userspace_addr;
+
                        down_write(&current->mm->mmap_sem);
-                       memslot->userspace_addr = do_mmap(NULL, 0,
-                                                    npages * PAGE_SIZE,
-                                                    PROT_READ | PROT_WRITE,
-                                                    MAP_SHARED | MAP_ANONYMOUS,
-                                                    0);
+                       userspace_addr = do_mmap(NULL, 0,
+                                                npages * PAGE_SIZE,
+                                                PROT_READ | PROT_WRITE,
+                                                MAP_SHARED | MAP_ANONYMOUS,
+                                                0);
                        up_write(&current->mm->mmap_sem);
 
-                       if (IS_ERR((void *)memslot->userspace_addr))
-                               return PTR_ERR((void *)memslot->userspace_addr);
+                       if (IS_ERR((void *)userspace_addr))
+                               return PTR_ERR((void *)userspace_addr);
+
+                       /* set userspace_addr atomically for kvm_hva_to_rmapp */
+                       spin_lock(&kvm->mmu_lock);
+                       memslot->userspace_addr = userspace_addr;
+                       spin_unlock(&kvm->mmu_lock);
                } else {
                        if (!old.user_alloc && old.rmap) {
                                int ret;
index a845890b680062365930b89a63ba4536c0fa6670..3735212cd3f807b7c7cb8b7b9135069f1cfe5935 100644 (file)
@@ -375,7 +375,15 @@ int __kvm_set_memory_region(struct kvm *kvm,
                memset(new.rmap, 0, npages * sizeof(*new.rmap));
 
                new.user_alloc = user_alloc;
-               new.userspace_addr = mem->userspace_addr;
+               /*
+                * hva_to_rmmap() serialzies with the mmu_lock and to be
+                * safe it has to ignore memslots with !user_alloc &&
+                * !userspace_addr.
+                */
+               if (user_alloc)
+                       new.userspace_addr = mem->userspace_addr;
+               else
+                       new.userspace_addr = 0;
        }
        if (npages && !new.lpage_info) {
                int largepages = npages / KVM_PAGES_PER_HPAGE;
@@ -408,17 +416,21 @@ int __kvm_set_memory_region(struct kvm *kvm,
        }
 #endif /* not defined CONFIG_S390 */
 
-       if (mem->slot >= kvm->nmemslots)
-               kvm->nmemslots = mem->slot + 1;
-
        if (!npages)
                kvm_arch_flush_shadow(kvm);
 
+       spin_lock(&kvm->mmu_lock);
+       if (mem->slot >= kvm->nmemslots)
+               kvm->nmemslots = mem->slot + 1;
+
        *memslot = new;
+       spin_unlock(&kvm->mmu_lock);
 
        r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
        if (r) {
+               spin_lock(&kvm->mmu_lock);
                *memslot = old;
+               spin_unlock(&kvm->mmu_lock);
                goto out_free;
        }