]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
KVM: Reduce stack usage in kvm_pv_mmu_op()
authorDave Hansen <dave@linux.vnet.ibm.com>
Mon, 11 Aug 2008 17:01:49 +0000 (10:01 -0700)
committerAvi Kivity <avi@qumranet.com>
Wed, 15 Oct 2008 08:15:18 +0000 (10:15 +0200)
We're in a hot path.  We can't use kmalloc() because
it might impact performance.  So, we just stick the buffer that
we need into the kvm_vcpu_arch structure.  This is used very
often, so it is not really a waste.

We also have to move the buffer structure's definition to the
arch-specific x86 kvm header.

Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
arch/x86/kvm/mmu.c
include/asm-x86/kvm_host.h

index c3afbfe6b0c187245218f7f7772de4215f7877a5..171bcea1be219e3ed93be697970f30a282dc993c 100644 (file)
@@ -135,13 +135,6 @@ module_param(dbg, bool, 0644);
 #define ACC_USER_MASK    PT_USER_MASK
 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
 
-struct kvm_pv_mmu_op_buffer {
-       void *ptr;
-       unsigned len;
-       unsigned processed;
-       char buf[512] __aligned(sizeof(long));
-};
-
 struct kvm_rmap_desc {
        u64 *shadow_ptes[RMAP_EXT];
        struct kvm_rmap_desc *more;
@@ -2292,18 +2285,18 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
                  gpa_t addr, unsigned long *ret)
 {
        int r;
-       struct kvm_pv_mmu_op_buffer buffer;
+       struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
 
-       buffer.ptr = buffer.buf;
-       buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
-       buffer.processed = 0;
+       buffer->ptr = buffer->buf;
+       buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
+       buffer->processed = 0;
 
-       r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
+       r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
        if (r)
                goto out;
 
-       while (buffer.len) {
-               r = kvm_pv_mmu_op_one(vcpu, &buffer);
+       while (buffer->len) {
+               r = kvm_pv_mmu_op_one(vcpu, buffer);
                if (r < 0)
                        goto out;
                if (r == 0)
@@ -2312,7 +2305,7 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
 
        r = 1;
 out:
-       *ret = buffer.processed;
+       *ret = buffer->processed;
        return r;
 }
 
index 99dddfcecf6082413c04beb818cfee4836ec1780..9cb4b4dae5c69d36ec5009e9471f0e37315cc4f9 100644 (file)
@@ -201,6 +201,13 @@ struct kvm_mmu_page {
        };
 };
 
+struct kvm_pv_mmu_op_buffer {
+       void *ptr;
+       unsigned len;
+       unsigned processed;
+       char buf[512] __aligned(sizeof(long));
+};
+
 /*
  * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
  * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu
@@ -248,6 +255,9 @@ struct kvm_vcpu_arch {
        bool tpr_access_reporting;
 
        struct kvm_mmu mmu;
+       /* only needed in kvm_pv_mmu_op() path, but it's hot so
+        * put it here to avoid allocation */
+       struct kvm_pv_mmu_op_buffer mmu_op_buffer;
 
        struct kvm_mmu_memory_cache mmu_pte_chain_cache;
        struct kvm_mmu_memory_cache mmu_rmap_desc_cache;