Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  3 ++-
 arch/x86/kvm/cpuid.c            |  5 ++++-
 arch/x86/kvm/cpuid.h            |  8 ++++++++
 arch/x86/kvm/emulate.c          | 13 +++++++++++++
 arch/x86/kvm/svm.c              |  6 ++++++
 arch/x86/kvm/vmx.c              |  6 ++++++
 arch/x86/kvm/x86.c              |  3 +++
 7 files changed, 42 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d8204817a5fe..9e1a1db65c7b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -86,7 +86,7 @@
                          | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | 
X86_CR4_PCIDE \
                          | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
                          | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP \
-                         | X86_CR4_PKE))
+                         | X86_CR4_PKE | X86_CR4_UMIP))
 
 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
 
@@ -971,6 +971,7 @@ struct kvm_x86_ops {
        void (*handle_external_intr)(struct kvm_vcpu *vcpu);
        bool (*mpx_supported)(void);
        bool (*xsaves_supported)(void);
+       bool (*umip_emulated)(void);
 
        int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7597b42a8a88..ac80e74056c4 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -315,6 +315,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 
*entry, u32 function,
        unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
        unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
        unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
+       unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
 
        /* cpuid 1.edx */
        const u32 kvm_cpuid_1_edx_x86_features =
@@ -373,7 +374,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 
*entry, u32 function,
                F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
 
        /* cpuid 7.0.ecx*/
-       const u32 kvm_cpuid_7_0_ecx_x86_features = F(PKU) | 0 /*OSPKE*/;
+       const u32 kvm_cpuid_7_0_ecx_x86_features = F(PKU) | 0 /*OSPKE*/ |
+               F(UMIP);
 
        /* all calls to cpuid_count() should be made on the same cpu */
        get_cpu();
@@ -454,6 +456,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 
*entry, u32 function,
                        entry->ebx |= F(TSC_ADJUST);
                        entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
                        cpuid_mask(&entry->ecx, CPUID_7_ECX);
+                       entry->ecx |= f_umip;
                        /* PKU is not yet implemented for shadow paging. */
                        if (!tdp_enabled)
                                entry->ecx &= ~F(PKU);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index e17a74b1d852..422de9db1785 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -88,6 +88,14 @@ static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
        return best && (best->ecx & bit(X86_FEATURE_PKU));
 }
 
+static inline bool guest_cpuid_has_umip(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 7, 0);
+       return best && (best->ecx & bit(X86_FEATURE_UMIP));
+}
+
 static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 3e09c7461ba8..d2f568a0fc46 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3579,6 +3579,11 @@ static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
 
 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
 {
+       if (segment > VCPU_SREG_GS &&
+           (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
+           ctxt->ops->cpl(ctxt) > 0)
+               return emulate_gp(ctxt, 0);
+
        ctxt->dst.val = get_segment_selector(ctxt, segment);
        if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
                ctxt->dst.bytes = 2;
@@ -3679,6 +3684,10 @@ static int emulate_store_desc_ptr(struct 
x86_emulate_ctxt *ctxt,
 {
        struct desc_ptr desc_ptr;
 
+       if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
+           ctxt->ops->cpl(ctxt) > 0)
+               return emulate_gp(ctxt, 0);
+
        if (ctxt->mode == X86EMUL_MODE_PROT64)
                ctxt->op_bytes = 8;
        get(ctxt, &desc_ptr);
@@ -3738,6 +3747,10 @@ static int em_lidt(struct x86_emulate_ctxt *ctxt)
 
 static int em_smsw(struct x86_emulate_ctxt *ctxt)
 {
+       if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
+           ctxt->ops->cpl(ctxt) > 0)
+               return emulate_gp(ctxt, 0);
+
        if (ctxt->dst.type == OP_MEM)
                ctxt->dst.bytes = 2;
        ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 5bfdbbf1ce79..227258ddab24 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4736,6 +4736,11 @@ static bool svm_xsaves_supported(void)
        return false;
 }
 
+static bool svm_umip_emulated(void)
+{
+       return false;
+}
+
 static bool svm_has_wbinvd_exit(void)
 {
        return true;
@@ -5054,6 +5059,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .invpcid_supported = svm_invpcid_supported,
        .mpx_supported = svm_mpx_supported,
        .xsaves_supported = svm_xsaves_supported,
+       .umip_emulated = svm_umip_emulated,
 
        .set_supported_cpuid = svm_set_supported_cpuid,
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a9e35b3f3ea6..b133fc3d6a7d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8595,6 +8595,11 @@ static bool vmx_xsaves_supported(void)
                SECONDARY_EXEC_XSAVES;
 }
 
+static bool vmx_umip_emulated(void)
+{
+       return false;
+}
+
 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
 {
        u32 exit_intr_info;
@@ -11239,6 +11244,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .handle_external_intr = vmx_handle_external_intr,
        .mpx_supported = vmx_mpx_supported,
        .xsaves_supported = vmx_xsaves_supported,
+       .umip_emulated = vmx_umip_emulated,
 
        .check_nested_events = vmx_check_nested_events,
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 548f546f92db..85c7e8135342 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -746,6 +746,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE))
                return 1;
 
+       if (!guest_cpuid_has_umip(vcpu) && (cr4 & X86_CR4_UMIP))
+               return 1;
+
        if (is_long_mode(vcpu)) {
                if (!(cr4 & X86_CR4_PAE))
                        return 1;
-- 
1.8.3.1


Reply via email to