]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
KVM: VMX: Only save/restore MSR_K6_STAR if necessary
authorAvi Kivity <avi@qumranet.com>
Thu, 19 Apr 2007 11:28:44 +0000 (14:28 +0300)
committerAvi Kivity <avi@qumranet.com>
Thu, 3 May 2007 07:52:30 +0000 (10:52 +0300)
Intel hosts only support syscall/sysret in long more (and only if efer.sce
is enabled), so only reload the related MSR_K6_STAR if the guest will
actually be able to use it.

This reduces vmexit cost by about 500 cycles (6400 -> 5870) on my setup.

Signed-off-by: Avi Kivity <avi@qumranet.com>
drivers/kvm/vmx.c

index b61d4dd804e34d7339a2dc787038b2af2ff54d01..37537af126d1d778ef5843b93aee84759c26a029 100644 (file)
@@ -69,6 +69,10 @@ static struct kvm_vmx_segment_field {
        VMX_SEGMENT_FIELD(LDTR),
 };
 
+/*
+ * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
+ * away by decrementing the array size.
+ */
 static const u32 vmx_msr_index[] = {
 #ifdef CONFIG_X86_64
        MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
@@ -323,6 +327,18 @@ static void setup_msrs(struct kvm_vcpu *vcpu)
                nr_skip = NR_64BIT_MSRS;
        nr_good_msrs = vcpu->nmsrs - nr_skip;
 
+       /*
+        * MSR_K6_STAR is only needed on long mode guests, and only
+        * if efer.sce is enabled.
+        */
+       if (find_msr_entry(vcpu, MSR_K6_STAR)) {
+               --nr_good_msrs;
+#ifdef CONFIG_X86_64
+               if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE))
+                       ++nr_good_msrs;
+#endif
+       }
+
        vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
                    virt_to_phys(vcpu->guest_msrs + nr_skip));
        vmcs_writel(VM_EXIT_MSR_STORE_ADDR,