]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
KVM: x86: handle missing MPX in nested virtualization
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 5 Mar 2014 22:19:52 +0000 (23:19 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 17 Mar 2014 11:21:39 +0000 (12:21 +0100)
When doing nested virtualization, we may be able to read BNDCFGS but
still not be allowed to write to GUEST_BNDCFGS in the VMCS.  Guard
writes to the field with vmx_mpx_supported(), and similarly hide the
MSR from userspace if the processor does not support the field.

We could work around this with the generic MSR save/load machinery,
but there is only a limited number of MSR save/load slots and it is
not really worthwhile to waste one for a scenario that should not
happen except in the nested virtualization case.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/cpuid.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 18aefb4d0927258c434baf777ac757e5baee4faa..64fae65730f3a536f8586c9d0de26395c0c283db 100644 (file)
@@ -47,7 +47,7 @@ u64 kvm_supported_xcr0(void)
 {
        u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
 
-       if (!kvm_x86_ops->mpx_supported || !kvm_x86_ops->mpx_supported())
+       if (!kvm_x86_ops->mpx_supported())
                xcr0 &= ~(XSTATE_BNDREGS | XSTATE_BNDCSR);
 
        return xcr0;
@@ -259,8 +259,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 #endif
        unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
        unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
-       unsigned f_mpx = kvm_x86_ops->mpx_supported ?
-                        (kvm_x86_ops->mpx_supported() ? F(MPX) : 0) : 0;
+       unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
 
        /* cpuid 1.edx */
        const u32 kvm_supported_word0_x86_features =
index a449c3d76cba00890ee1fa83966f4bf14e258961..2136cb6ab1327a2ef645eb6451324b2ba8280a91 100644 (file)
@@ -4089,6 +4089,11 @@ static bool svm_invpcid_supported(void)
        return false;
 }
 
+static bool svm_mpx_supported(void)
+{
+       return false;
+}
+
 static bool svm_has_wbinvd_exit(void)
 {
        return true;
@@ -4371,6 +4376,7 @@ static struct kvm_x86_ops svm_x86_ops = {
 
        .rdtscp_supported = svm_rdtscp_supported,
        .invpcid_supported = svm_invpcid_supported,
+       .mpx_supported = svm_mpx_supported,
 
        .set_supported_cpuid = svm_set_supported_cpuid,
 
index c95bea17fc1ec78d7fd08f1d9e655ab209e18386..1320e0f8e61174d9a5c1da8d17e455f4a839cb1a 100644 (file)
@@ -729,6 +729,7 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
 static u64 construct_eptp(unsigned long root_hpa);
 static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
+static bool vmx_mpx_supported(void);
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
 static void vmx_set_segment(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg);
@@ -2501,6 +2502,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
                data = vmcs_readl(GUEST_SYSENTER_ESP);
                break;
        case MSR_IA32_BNDCFGS:
+               if (!vmx_mpx_supported())
+                       return 1;
                data = vmcs_read64(GUEST_BNDCFGS);
                break;
        case MSR_IA32_FEATURE_CONTROL:
@@ -2572,6 +2575,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vmcs_writel(GUEST_SYSENTER_ESP, data);
                break;
        case MSR_IA32_BNDCFGS:
+               if (!vmx_mpx_supported())
+                       return 1;
                vmcs_write64(GUEST_BNDCFGS, data);
                break;
        case MSR_IA32_TSC:
index 3f5fb4535f9c6f18d37daa16bbd33773be135352..aa986959f237644e80da91a2077bee1a6244591f 100644 (file)
@@ -3937,6 +3937,23 @@ static void kvm_init_msr_list(void)
        for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
                if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
                        continue;
+
+               /*
+                * Even MSRs that are valid in the host may not be exposed
+                * to the guests in some cases.  We could work around this
+                * in VMX with the generic MSR save/load machinery, but it
+                * is not really worthwhile since it will really only
+                * happen with nested virtualization.
+                */
+               switch (msrs_to_save[i]) {
+               case MSR_IA32_BNDCFGS:
+                       if (!kvm_x86_ops->mpx_supported())
+                               continue;
+                       break;
+               default:
+                       break;
+               }
+
                if (j < i)
                        msrs_to_save[j] = msrs_to_save[i];
                j++;