From: Andrea Arcangeli Date: Fri, 25 Jul 2008 14:32:03 +0000 (+0200) Subject: KVM: Allow browsing memslots with mmu_lock X-Git-Tag: v2.6.27-rc2~84^2~3 X-Git-Url: http://pilppa.com/gitweb/?a=commitdiff_plain;h=604b38ac0369bd50fcbb33344aa5553c071009f7;p=linux-2.6-omap-h63xx.git KVM: Allow browsing memslots with mmu_lock This allows reading memslots with only the mmu_lock hold for mmu notifiers that runs in atomic context and with mmu_lock held. Signed-off-by: Andrea Arcangeli Signed-off-by: Avi Kivity --- diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9870ce42292..c7b01efe064 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3974,16 +3974,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm, */ if (!user_alloc) { if (npages && !old.rmap) { + unsigned long userspace_addr; + down_write(¤t->mm->mmap_sem); - memslot->userspace_addr = do_mmap(NULL, 0, - npages * PAGE_SIZE, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - 0); + userspace_addr = do_mmap(NULL, 0, + npages * PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + 0); up_write(¤t->mm->mmap_sem); - if (IS_ERR((void *)memslot->userspace_addr)) - return PTR_ERR((void *)memslot->userspace_addr); + if (IS_ERR((void *)userspace_addr)) + return PTR_ERR((void *)userspace_addr); + + /* set userspace_addr atomically for kvm_hva_to_rmapp */ + spin_lock(&kvm->mmu_lock); + memslot->userspace_addr = userspace_addr; + spin_unlock(&kvm->mmu_lock); } else { if (!old.user_alloc && old.rmap) { int ret; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a845890b680..3735212cd3f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -375,7 +375,15 @@ int __kvm_set_memory_region(struct kvm *kvm, memset(new.rmap, 0, npages * sizeof(*new.rmap)); new.user_alloc = user_alloc; - new.userspace_addr = mem->userspace_addr; + /* + * hva_to_rmmap() serialzies with the mmu_lock and to be + * safe it has to ignore memslots with !user_alloc && + * !userspace_addr. + */ + if (user_alloc) + new.userspace_addr = mem->userspace_addr; + else + new.userspace_addr = 0; } if (npages && !new.lpage_info) { int largepages = npages / KVM_PAGES_PER_HPAGE; @@ -408,17 +416,21 @@ int __kvm_set_memory_region(struct kvm *kvm, } #endif /* not defined CONFIG_S390 */ - if (mem->slot >= kvm->nmemslots) - kvm->nmemslots = mem->slot + 1; - if (!npages) kvm_arch_flush_shadow(kvm); + spin_lock(&kvm->mmu_lock); + if (mem->slot >= kvm->nmemslots) + kvm->nmemslots = mem->slot + 1; + *memslot = new; + spin_unlock(&kvm->mmu_lock); r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); if (r) { + spin_lock(&kvm->mmu_lock); *memslot = old; + spin_unlock(&kvm->mmu_lock); goto out_free; }