From: Avi Kivity Date: Wed, 21 Nov 2007 00:06:21 +0000 (+0200) Subject: KVM: MMU: Avoid unnecessary remote tlb flushes when guest updates a pte X-Git-Tag: v2.6.25-rc1~1138^2~103 X-Git-Url: http://pilppa.com/gitweb/?a=commitdiff_plain;h=79539cec0c3c38d35a1e3e5310d2c562ae6e82b8;p=linux-2.6-omap-h63xx.git KVM: MMU: Avoid unnecessary remote tlb flushes when guest updates a pte If all we're doing is increasing permissions on a pte (typical for demand paging), then there's not need to flush remote tlbs. Worst case they'll get a spurious page fault. Signed-off-by: Avi Kivity --- diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 101cd5377a8..281dd5f9310 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c @@ -134,6 +134,8 @@ static int dbg = 1; #define PT32_DIR_BASE_ADDR_MASK \ (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) +#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ + | PT64_NX_MASK) #define PFERR_PRESENT_MASK (1U << 0) #define PFERR_WRITE_MASK (1U << 1) @@ -1227,7 +1229,6 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, } } set_shadow_pte(spte, shadow_trap_nonpresent_pte); - kvm_flush_remote_tlbs(vcpu->kvm); } static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, @@ -1250,6 +1251,27 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, offset_in_pte); } +static bool need_remote_flush(u64 old, u64 new) +{ + if (!is_shadow_present_pte(old)) + return false; + if (!is_shadow_present_pte(new)) + return true; + if ((old ^ new) & PT64_BASE_ADDR_MASK) + return true; + old ^= PT64_NX_MASK; + new ^= PT64_NX_MASK; + return (old & ~new & PT64_PERM_MASK) != 0; +} + +static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new) +{ + if (need_remote_flush(old, new)) + kvm_flush_remote_tlbs(vcpu->kvm); + else + kvm_mmu_flush_tlb(vcpu); +} + static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) { u64 *spte = vcpu->last_pte_updated; @@ -1265,6 +1287,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, struct hlist_node *node, *n; struct hlist_head *bucket; unsigned index; + u64 entry; u64 *spte; unsigned offset = offset_in_page(gpa); unsigned pte_size; @@ -1335,9 +1358,11 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, } spte = &page->spt[page_offset / sizeof(*spte)]; while (npte--) { + entry = *spte; mmu_pte_write_zap_pte(vcpu, page, spte); mmu_pte_write_new_pte(vcpu, page, spte, new, bytes, page_offset & (pte_size - 1)); + mmu_pte_write_flush_tlb(vcpu, entry, *spte); ++spte; } }