This patch turns guest_cpuid_has_XYZ(cpuid) into guest_cpuid_has(cpuid,
X86_FEATURE_XYZ), which gets rid of many very similar helpers.

When seeing a X86_FEATURE_*, we can know which cpuid it belongs to, but
this information isn't in common code, so we recreate it for KVM.

Add some BUILD_BUG_ONs to make sure that it runs nicely.

Signed-off-by: Radim Krčmář <[email protected]>
---
 arch/x86/kvm/cpuid.h | 202 +++++++++++++++++----------------------------------
 arch/x86/kvm/mmu.c   |   7 +-
 arch/x86/kvm/mtrr.c  |   2 +-
 arch/x86/kvm/svm.c   |   2 +-
 arch/x86/kvm/vmx.c   |  26 +++----
 arch/x86/kvm/x86.c   |  38 +++++-----
 6 files changed, 105 insertions(+), 172 deletions(-)

diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index da6728383052..3b17d915b608 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -3,6 +3,7 @@
 
 #include "x86.h"
 #include <asm/cpu.h>
+#include <asm/processor.h>
 
 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
 bool kvm_mpx_supported(void);
@@ -29,95 +30,78 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
        return vcpu->arch.maxphyaddr;
 }
 
-static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+struct cpuid_reg {
+       u32 function;
+       u32 index;
+       int reg;
+};
+
+static const struct cpuid_reg reverse_cpuid[] = {
+       [CPUID_1_EDX]         = {         1, 0, CPUID_EDX},
+       [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
+       [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
+       [CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
+       [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
+       [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
+       [CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
+       [CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
+       [CPUID_F_0_EDX]       = {       0xf, 0, CPUID_EDX},
+       [CPUID_F_1_EDX]       = {       0xf, 1, CPUID_EDX},
+       [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
+       [CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
+       [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
+       [CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
+       [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
+};
+
+static inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
+{
+       unsigned x86_leaf = x86_feature / 32;
+
+       BUILD_BUG_ON(!__builtin_constant_p(x86_leaf));
+       BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
+       BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
+
+       return reverse_cpuid[x86_leaf];
+}
+
+static inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned 
x86_feature)
 {
        struct kvm_cpuid_entry2 *best;
+       struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
 
-       if (!static_cpu_has(X86_FEATURE_XSAVE))
+       best = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
+       if (!best)
+               return NULL;
+
+       switch (cpuid.reg) {
+       case CPUID_EAX:
+               return &best->eax;
+       case CPUID_EBX:
+               return &best->ebx;
+       case CPUID_ECX:
+               return &best->ecx;
+       case CPUID_EDX:
+               return &best->edx;
+       default:
+               BUILD_BUG();
+               return NULL;
+       }
+}
+
+static inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
+{
+       int *reg;
+
+       if (x86_feature == X86_FEATURE_XSAVE &&
+                       !static_cpu_has(X86_FEATURE_XSAVE))
                return false;
 
-       best = kvm_find_cpuid_entry(vcpu, 1, 0);
-       return best && (best->ecx & bit(X86_FEATURE_XSAVE));
-}
+       reg = guest_cpuid_get_register(vcpu, x86_feature);
+       if (!reg)
+               return false;
 
-static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 1, 0);
-       return best && (best->edx & bit(X86_FEATURE_MTRR));
-}
-
-static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 7, 0);
-       return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
-}
-
-static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 7, 0);
-       return best && (best->ebx & bit(X86_FEATURE_SMEP));
-}
-
-static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 7, 0);
-       return best && (best->ebx & bit(X86_FEATURE_SMAP));
-}
-
-static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 7, 0);
-       return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
-}
-
-static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 7, 0);
-       return best && (best->ecx & bit(X86_FEATURE_PKU));
-}
-
-static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-       return best && (best->edx & bit(X86_FEATURE_LM));
-}
-
-static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-       return best && (best->ecx & bit(X86_FEATURE_OSVW));
-}
-
-static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 1, 0);
-       return best && (best->ecx & bit(X86_FEATURE_PCID));
-}
-
-static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 1, 0);
-       return best && (best->ecx & bit(X86_FEATURE_X2APIC));
+       return *reg & bit(x86_feature);
 }
 
 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
@@ -128,58 +112,6 @@ static inline bool guest_cpuid_is_amd(struct kvm_vcpu 
*vcpu)
        return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
 }
 
-static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-       return best && (best->edx & bit(X86_FEATURE_GBPAGES));
-}
-
-static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 7, 0);
-       return best && (best->ebx & bit(X86_FEATURE_RTM));
-}
-
-static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 7, 0);
-       return best && (best->ebx & bit(X86_FEATURE_MPX));
-}
-
-static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
-       return best && (best->edx & bit(X86_FEATURE_RDTSCP));
-}
-
-/*
- * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
- */
-#define BIT_NRIPS      3
-
-static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best;
-
-       best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
-
-       /*
-        * NRIPS is a scattered cpuid feature, so we can't use
-        * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
-        * position 8, not 3).
-        */
-       return best && (best->edx & bit(BIT_NRIPS));
-}
-#undef BIT_NRIPS
-
 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9b1dd114956a..2fac6f78c420 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4052,7 +4052,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
 {
        __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
                                cpuid_maxphyaddr(vcpu), context->root_level,
-                               context->nx, guest_cpuid_has_gbpages(vcpu),
+                               context->nx,
+                               guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
                                is_pse(vcpu), guest_cpuid_is_amd(vcpu));
 }
 
@@ -4114,8 +4115,8 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct 
kvm_mmu *context)
        __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
                                boot_cpu_data.x86_phys_bits,
                                context->shadow_root_level, uses_nx,
-                               guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
-                               true);
+                               guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
+                               is_pse(vcpu), true);
 }
 EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
 
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 0149ac59c273..e9ea2d45ae66 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -130,7 +130,7 @@ static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
         * enable MTRRs and it is obviously undesirable to run the
         * guest entirely with UC memory and we use WB.
         */
-       if (guest_cpuid_has_mtrr(vcpu))
+       if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
                return MTRR_TYPE_UNCACHABLE;
        else
                return MTRR_TYPE_WRBACK;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 4d8141e533c3..fcdc1412792e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5068,7 +5068,7 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
        struct kvm_cpuid_entry2 *entry;
 
        /* Update nrips enabled cache */
-       svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
+       svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
 
        if (!kvm_vcpu_apicv_active(vcpu))
                return;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 29fd8af5c347..8367f901d681 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2568,7 +2568,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
                if (index >= 0)
                        move_msr_up(vmx, index, save_nmsrs++);
                index = __find_msr_index(vmx, MSR_TSC_AUX);
-               if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu))
+               if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_MPX))
                        move_msr_up(vmx, index, save_nmsrs++);
                /*
                 * MSR_STAR is only needed on long mode guests, and only
@@ -2628,12 +2628,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, 
u64 offset)
        }
 }
 
-static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
-{
-       struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
-       return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
-}
-
 /*
  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
@@ -2642,7 +2636,7 @@ static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
  */
 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
 {
-       return nested && guest_cpuid_has_vmx(vcpu);
+       return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
 }
 
 /*
@@ -3224,7 +3218,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                break;
        case MSR_IA32_BNDCFGS:
                if (!kvm_mpx_supported() ||
-                   (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
+                   (!msr_info->host_initiated &&
+                    !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
                        return 1;
                msr_info->data = vmcs_read64(GUEST_BNDCFGS);
                break;
@@ -3248,7 +3243,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                msr_info->data = vcpu->arch.ia32_xss;
                break;
        case MSR_TSC_AUX:
-               if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
+               if (!msr_info->host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_MPX))
                        return 1;
                /* Otherwise falls through */
        default:
@@ -3307,7 +3303,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                break;
        case MSR_IA32_BNDCFGS:
                if (!kvm_mpx_supported() ||
-                   (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
+                   (!msr_info->host_initiated &&
+                    !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
                        return 1;
                if (is_noncanonical_address(data & PAGE_MASK) ||
                    (data & MSR_IA32_BNDCFGS_RSVD))
@@ -3370,7 +3367,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                        clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
                break;
        case MSR_TSC_AUX:
-               if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
+               if (!msr_info->host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_MPX))
                        return 1;
                /* Check reserved bit, higher 32 bits should be zero */
                if ((data >> 32) != 0)
@@ -9383,7 +9381,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
        u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx);
 
        if (vmx_rdtscp_supported()) {
-               bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu);
+               bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
                if (!rdtscp_enabled)
                        secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP;
 
@@ -9401,7 +9399,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
        best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
        if (vmx_invpcid_supported() &&
            (!best || !(best->ebx & bit(X86_FEATURE_INVPCID)) ||
-           !guest_cpuid_has_pcid(vcpu))) {
+           !guest_cpuid_has(vcpu, X86_FEATURE_PCID))) {
                secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID;
 
                if (best)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 82a63c59f77b..d75997ba65b9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -310,8 +310,8 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
        u64 new_state = msr_info->data &
                (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
-       u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
-               0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
+       u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
+               (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
 
        if (!msr_info->host_initiated &&
            ((msr_info->data & reserved_bits) != 0 ||
@@ -754,19 +754,19 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        if (cr4 & CR4_RESERVED_BITS)
                return 1;
 
-       if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & 
X86_CR4_OSXSAVE))
                return 1;
 
-       if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
                return 1;
 
-       if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
                return 1;
 
-       if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & 
X86_CR4_FSGSBASE))
                return 1;
 
-       if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE))
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
                return 1;
 
        if (is_long_mode(vcpu)) {
@@ -779,7 +779,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                return 1;
 
        if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
-               if (!guest_cpuid_has_pcid(vcpu))
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
                        return 1;
 
                /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
@@ -883,7 +883,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
 {
        u64 fixed = DR6_FIXED_1;
 
-       if (!guest_cpuid_has_rtm(vcpu))
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
                fixed |= DR6_RTM;
        return fixed;
 }
@@ -1533,8 +1533,9 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data 
*msr)
        vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
        vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
 
-       if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
+       if (!msr->host_initiated && guest_cpuid_has(vcpu, 
X86_FEATURE_TSC_ADJUST))
                update_ia32_tsc_adjust_msr(vcpu, offset);
+
        kvm_vcpu_write_tsc_offset(vcpu, offset);
        raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
@@ -2184,7 +2185,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                kvm_set_lapic_tscdeadline_msr(vcpu, data);
                break;
        case MSR_IA32_TSC_ADJUST:
-               if (guest_cpuid_has_tsc_adjust(vcpu)) {
+               if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
                        if (!msr_info->host_initiated) {
                                s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
                                adjust_tsc_offset_guest(vcpu, adj);
@@ -2306,12 +2307,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, 
data);
                break;
        case MSR_AMD64_OSVW_ID_LENGTH:
-               if (!guest_cpuid_has_osvw(vcpu))
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
                        return 1;
                vcpu->arch.osvw.length = data;
                break;
        case MSR_AMD64_OSVW_STATUS:
-               if (!guest_cpuid_has_osvw(vcpu))
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
                        return 1;
                vcpu->arch.osvw.status = data;
                break;
@@ -2536,12 +2537,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                msr_info->data = 0xbe702111;
                break;
        case MSR_AMD64_OSVW_ID_LENGTH:
-               if (!guest_cpuid_has_osvw(vcpu))
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
                        return 1;
                msr_info->data = vcpu->arch.osvw.length;
                break;
        case MSR_AMD64_OSVW_STATUS:
-               if (!guest_cpuid_has_osvw(vcpu))
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
                        return 1;
                msr_info->data = vcpu->arch.osvw.status;
                break;
@@ -6601,7 +6602,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
        trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
        vcpu->arch.hflags |= HF_SMM_MASK;
        memset(buf, 0, 512);
-       if (guest_cpuid_has_longmode(vcpu))
+       if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                enter_smm_save_state_64(vcpu, buf);
        else
                enter_smm_save_state_32(vcpu, buf);
@@ -6653,7 +6654,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
        kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
        kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
 
-       if (guest_cpuid_has_longmode(vcpu))
+       if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                kvm_x86_ops->set_efer(vcpu, 0);
 
        kvm_update_cpuid(vcpu);
@@ -7419,7 +7420,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        int pending_vec, max_bits, idx;
        struct desc_ptr dt;
 
-       if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+                       (sregs->cr4 & X86_CR4_OSXSAVE))
                return -EINVAL;
 
        dt.size = sregs->idt.limit;
-- 
2.13.3

Reply via email to