Delete the boot time rendering of advanced features.  It's entirely ad-hoc and
not even everything printed here is used by Xen.  It is available in
`xen-cpuid` now.

With (only) svm_load_segs_{,prefetch}() declared now in svm.h, only svm.c and
domain.c which need the header.  Clean up all others.

No functional change.

Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com>
---
CC: Jan Beulich <jbeul...@suse.com>
CC: Roger Pau Monné <roger....@citrix.com>
CC: Stefano Stabellini <sstabell...@kernel.org>
CC: Xenia Ragiadakou <xenia.ragiada...@amd.com>
CC: Sergiy Kibrik <sergiy_kib...@epam.com>
CC: George Dunlap <george.dun...@citrix.com>
CC: Andrei Semenov <andrei.seme...@vates.fr>
CC: Vaishali Thakkar <vaishali.thak...@vates.tech>
---
 xen/arch/x86/hvm/svm/asid.c            |  5 ++-
 xen/arch/x86/hvm/svm/emulate.c         |  3 +-
 xen/arch/x86/hvm/svm/intr.c            |  1 -
 xen/arch/x86/hvm/svm/nestedsvm.c       | 14 ++++----
 xen/arch/x86/hvm/svm/svm.c             | 50 +++++++-------------------
 xen/arch/x86/hvm/svm/vmcb.c            |  1 -
 xen/arch/x86/include/asm/cpufeature.h  | 10 ++++++
 xen/arch/x86/include/asm/hvm/svm/svm.h | 36 -------------------
 8 files changed, 31 insertions(+), 89 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/asid.c b/xen/arch/x86/hvm/svm/asid.c
index 7977a8e86b53..6117a362d310 100644
--- a/xen/arch/x86/hvm/svm/asid.c
+++ b/xen/arch/x86/hvm/svm/asid.c
@@ -6,7 +6,6 @@
 
 #include <asm/amd.h>
 #include <asm/hvm/nestedhvm.h>
-#include <asm/hvm/svm/svm.h>
 
 #include "svm.h"
 
@@ -39,7 +38,7 @@ void svm_asid_handle_vmrun(void)
     {
         vmcb_set_asid(vmcb, true);
         vmcb->tlb_control =
-            cpu_has_svm_flushbyasid ? TLB_CTRL_FLUSH_ASID : TLB_CTRL_FLUSH_ALL;
+            cpu_has_flush_by_asid ? TLB_CTRL_FLUSH_ASID : TLB_CTRL_FLUSH_ALL;
         return;
     }
 
@@ -48,7 +47,7 @@ void svm_asid_handle_vmrun(void)
 
     vmcb->tlb_control =
         !need_flush ? TLB_CTRL_NO_FLUSH :
-        cpu_has_svm_flushbyasid ? TLB_CTRL_FLUSH_ASID : TLB_CTRL_FLUSH_ALL;
+        cpu_has_flush_by_asid ? TLB_CTRL_FLUSH_ASID : TLB_CTRL_FLUSH_ALL;
 }
 
 /*
diff --git a/xen/arch/x86/hvm/svm/emulate.c b/xen/arch/x86/hvm/svm/emulate.c
index 93ac1d3435f9..da6e21b2e270 100644
--- a/xen/arch/x86/hvm/svm/emulate.c
+++ b/xen/arch/x86/hvm/svm/emulate.c
@@ -11,7 +11,6 @@
 #include <asm/msr.h>
 #include <asm/hvm/emulate.h>
 #include <asm/hvm/hvm.h>
-#include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/vmcb.h>
 
 #include "svm.h"
@@ -20,7 +19,7 @@ static unsigned long svm_nextrip_insn_length(struct vcpu *v)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
-    if ( !cpu_has_svm_nrips )
+    if ( !cpu_has_nrips )
         return 0;
 
 #ifndef NDEBUG
diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index 4805c5567213..facd2894a2c6 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -17,7 +17,6 @@
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/io.h>
 #include <asm/hvm/vlapic.h>
-#include <asm/hvm/svm/svm.h>
 #include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */
 #include <asm/vm_event.h>
 #include <xen/event.h>
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 35a2cbfd7d13..255af112661f 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -6,7 +6,6 @@
  */
 
 #include <asm/hvm/support.h>
-#include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/vmcb.h>
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/svm/svmdebug.h>
@@ -1620,7 +1619,7 @@ void svm_nested_features_on_efer_update(struct vcpu *v)
     {
         if ( !vmcb->virt_ext.fields.vloadsave_enable &&
              paging_mode_hap(v->domain) &&
-             cpu_has_svm_vloadsave )
+             cpu_has_v_loadsave )
         {
             vmcb->virt_ext.fields.vloadsave_enable = 1;
             general2_intercepts  = vmcb_get_general2_intercepts(vmcb);
@@ -1629,8 +1628,7 @@ void svm_nested_features_on_efer_update(struct vcpu *v)
             vmcb_set_general2_intercepts(vmcb, general2_intercepts);
         }
 
-        if ( !vmcb->_vintr.fields.vgif_enable &&
-             cpu_has_svm_vgif )
+        if ( !vmcb->_vintr.fields.vgif_enable && cpu_has_v_gif )
         {
             vintr = vmcb_get_vintr(vmcb);
             vintr.fields.vgif = svm->ns_gif;
@@ -1675,8 +1673,8 @@ void __init start_nested_svm(struct hvm_function_table 
*hvm_function_table)
      */
     hvm_function_table->caps.nested_virt =
         hvm_function_table->caps.hap && 
-        cpu_has_svm_lbrv &&
-        cpu_has_svm_nrips &&
-        cpu_has_svm_flushbyasid &&
-        cpu_has_svm_decode;
+        cpu_has_v_lbr &&
+        cpu_has_nrips &&
+        cpu_has_flush_by_asid &&
+        cpu_has_decode_assist;
 }
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 4719fffae589..16eb875aab94 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1287,7 +1287,7 @@ static void cf_check svm_inject_event(const struct 
x86_event *event)
      * that hardware doesn't perform DPL checking on injection.
      */
     if ( event->type == X86_EVENTTYPE_PRI_SW_EXCEPTION ||
-         (!cpu_has_svm_nrips && (event->type >= X86_EVENTTYPE_SW_INTERRUPT)) )
+         (!cpu_has_nrips && (event->type >= X86_EVENTTYPE_SW_INTERRUPT)) )
         svm_emul_swint_injection(&_event);
 
     switch ( _event.vector | -(_event.type == X86_EVENTTYPE_SW_INTERRUPT) )
@@ -1341,7 +1341,7 @@ static void cf_check svm_inject_event(const struct 
x86_event *event)
     switch ( _event.type )
     {
     case X86_EVENTTYPE_SW_INTERRUPT: /* int $n */
-        if ( cpu_has_svm_nrips )
+        if ( cpu_has_nrips )
             vmcb->nextrip = regs->rip + _event.insn_len;
         else
             regs->rip += _event.insn_len;
@@ -1355,7 +1355,7 @@ static void cf_check svm_inject_event(const struct 
x86_event *event)
          * semantics.
          */
         regs->rip += _event.insn_len;
-        if ( cpu_has_svm_nrips )
+        if ( cpu_has_nrips )
             vmcb->nextrip = regs->rip;
         eventinj.type = X86_EVENTTYPE_HW_EXCEPTION;
         break;
@@ -1365,7 +1365,7 @@ static void cf_check svm_inject_event(const struct 
x86_event *event)
          * Hardware special cases HW_EXCEPTION with vectors 3 and 4 as having
          * trap semantics, and will perform DPL checks.
          */
-        if ( cpu_has_svm_nrips )
+        if ( cpu_has_nrips )
             vmcb->nextrip = regs->rip + _event.insn_len;
         else
             regs->rip += _event.insn_len;
@@ -1982,7 +1982,7 @@ static int cf_check svm_msr_write_intercept(
 
     case MSR_IA32_DEBUGCTLMSR:
         vmcb_set_debugctlmsr(vmcb, msr_content);
-        if ( !msr_content || !cpu_has_svm_lbrv )
+        if ( !msr_content || !cpu_has_v_lbr )
             break;
         vmcb->virt_ext.fields.lbr_enable = 1;
         svm_disable_intercept_for_msr(v, MSR_IA32_DEBUGCTLMSR);
@@ -2480,8 +2480,6 @@ static struct hvm_function_table __initdata_cf_clobber 
svm_function_table = {
 
 const struct hvm_function_table * __init start_svm(void)
 {
-    bool printed = false;
-
     svm_host_osvw_reset();
 
     if ( _svm_cpu_up(true) )
@@ -2493,38 +2491,14 @@ const struct hvm_function_table * __init start_svm(void)
 
     setup_vmcb_dump();
 
-    if ( boot_cpu_data.extended_cpuid_level >= 0x8000000aU )
-        svm_feature_flags = cpuid_edx(0x8000000aU);
-
-    printk("SVM: Supported advanced features:\n");
-
     /* DecodeAssists fast paths assume nextrip is valid for fast rIP update. */
-    if ( !cpu_has_svm_nrips )
-        __clear_bit(SVM_FEATURE_DECODEASSISTS, &svm_feature_flags);
+    if ( !cpu_has_nrips )
+        setup_clear_cpu_cap(X86_FEATURE_DECODE_ASSIST);
 
     if ( cpu_has_tsc_ratio )
         svm_function_table.tsc_scaling.ratio_frac_bits = 32;
 
-#define P(p,s) if ( p ) { printk(" - %s\n", s); printed = 1; }
-    P(cpu_has_svm_npt, "Nested Page Tables (NPT)");
-    P(cpu_has_svm_lbrv, "Last Branch Record (LBR) Virtualisation");
-    P(cpu_has_svm_nrips, "Next-RIP Saved on #VMEXIT");
-    P(cpu_has_svm_cleanbits, "VMCB Clean Bits");
-    P(cpu_has_svm_flushbyasid, "TLB flush by ASID");
-    P(cpu_has_svm_decode, "DecodeAssists");
-    P(cpu_has_svm_vloadsave, "Virtual VMLOAD/VMSAVE");
-    P(cpu_has_svm_vgif, "Virtual GIF");
-    P(cpu_has_pause_filter, "Pause-Intercept Filter");
-    P(cpu_has_pause_thresh, "Pause-Intercept Filter Threshold");
-    P(cpu_has_tsc_ratio, "TSC Rate MSR");
-    P(cpu_has_svm_sss, "NPT Supervisor Shadow Stack");
-    P(cpu_has_svm_spec_ctrl, "MSR_SPEC_CTRL virtualisation");
-#undef P
-
-    if ( !printed )
-        printk(" - none\n");
-
-    svm_function_table.caps.hap = cpu_has_svm_npt;
+    svm_function_table.caps.hap = cpu_has_npt;
     svm_function_table.caps.hap_superpage_2mb = true;
     svm_function_table.caps.hap_superpage_1gb = cpu_has_page1gb;
 
@@ -2761,7 +2735,7 @@ void asmlinkage svm_vmexit_handler(void)
                     regs->rax, regs->rbx, regs->rcx,
                     regs->rdx, regs->rsi, regs->rdi);
 
-        if ( cpu_has_svm_decode )
+        if ( cpu_has_decode_assist )
             v->arch.hvm.svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
         rc = paging_fault(va, regs);
         v->arch.hvm.svm.cached_insn_len = 0;
@@ -2906,14 +2880,14 @@ void asmlinkage svm_vmexit_handler(void)
 
     case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
     case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:
-        if ( cpu_has_svm_decode && vmcb->ei.mov_cr.mov_insn )
+        if ( cpu_has_decode_assist && vmcb->ei.mov_cr.mov_insn )
             svm_vmexit_do_cr_access(vmcb, regs);
         else if ( !hvm_emulate_one_insn(x86_insn_is_cr_access, "CR access") )
             hvm_inject_hw_exception(X86_EXC_GP, 0);
         break;
 
     case VMEXIT_INVLPG:
-        if ( cpu_has_svm_decode )
+        if ( cpu_has_decode_assist )
         {
             svm_invlpg_intercept(vmcb->exitinfo1);
             __update_guest_eip(regs, vmcb->nextrip - vmcb->rip);
@@ -2994,7 +2968,7 @@ void asmlinkage svm_vmexit_handler(void)
         break;
 
     case VMEXIT_NPF:
-        if ( cpu_has_svm_decode )
+        if ( cpu_has_decode_assist )
             v->arch.hvm.svm.cached_insn_len = vmcb->guest_ins_len & 0xf;
         rc = vmcb->ei.npf.ec & PFEC_page_present
              ? p2m_pt_handle_deferred_changes(vmcb->ei.npf.gpa) : 0;
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 4e1f61dbe038..4452ab1263d4 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -15,7 +15,6 @@
 #include <asm/hvm/svm/vmcb.h>
 #include <asm/msr-index.h>
 #include <asm/p2m.h>
-#include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/svmdebug.h>
 #include <asm/spec_ctrl.h>
 
diff --git a/xen/arch/x86/include/asm/cpufeature.h 
b/xen/arch/x86/include/asm/cpufeature.h
index 77cfd900cb56..b6fb8c24423c 100644
--- a/xen/arch/x86/include/asm/cpufeature.h
+++ b/xen/arch/x86/include/asm/cpufeature.h
@@ -218,6 +218,16 @@ static inline bool boot_cpu_has(unsigned int feat)
 #define cpu_has_rfds_clear      boot_cpu_has(X86_FEATURE_RFDS_CLEAR)
 
 /* CPUID level 0x8000000a.edx */
+#define cpu_has_npt             boot_cpu_has(X86_FEATURE_NPT)
+#define cpu_has_v_lbr           boot_cpu_has(X86_FEATURE_V_LBR)
+#define cpu_has_nrips           boot_cpu_has(X86_FEATURE_NRIPS)
+#define cpu_has_tsc_ratio       boot_cpu_has(X86_FEATURE_V_TSC_RATE)
+#define cpu_has_flush_by_asid   boot_cpu_has(X86_FEATURE_FLUSH_BY_ASID)
+#define cpu_has_decode_assist   boot_cpu_has(X86_FEATURE_DECODE_ASSIST)
+#define cpu_has_pause_filter    boot_cpu_has(X86_FEATURE_PAUSE_FILTER)
+#define cpu_has_pause_thresh    boot_cpu_has(X86_FEATURE_PAUSE_THRESH)
+#define cpu_has_v_loadsave      boot_cpu_has(X86_FEATURE_V_LOADSAVE)
+#define cpu_has_v_gif           boot_cpu_has(X86_FEATURE_V_GIF)
 #define cpu_has_v_spec_ctrl     boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)
 
 /* Synthesized. */
diff --git a/xen/arch/x86/include/asm/hvm/svm/svm.h 
b/xen/arch/x86/include/asm/hvm/svm/svm.h
index 4eeeb25da90c..06a951225e64 100644
--- a/xen/arch/x86/include/asm/hvm/svm/svm.h
+++ b/xen/arch/x86/include/asm/hvm/svm/svm.h
@@ -21,40 +21,4 @@ bool svm_load_segs(unsigned int ldt_ents, unsigned long 
ldt_base,
                    unsigned long fs_base, unsigned long gs_base,
                    unsigned long gs_shadow);
 
-extern u32 svm_feature_flags;
-
-#define SVM_FEATURE_NPT            0 /* Nested page table support */
-#define SVM_FEATURE_LBRV           1 /* LBR virtualization support */
-#define SVM_FEATURE_SVML           2 /* SVM locking MSR support */
-#define SVM_FEATURE_NRIPS          3 /* Next RIP save on VMEXIT support */
-#define SVM_FEATURE_TSCRATEMSR     4 /* TSC ratio MSR support */
-#define SVM_FEATURE_VMCBCLEAN      5 /* VMCB clean bits support */
-#define SVM_FEATURE_FLUSHBYASID    6 /* TLB flush by ASID support */
-#define SVM_FEATURE_DECODEASSISTS  7 /* Decode assists support */
-#define SVM_FEATURE_PAUSEFILTER   10 /* Pause intercept filter support */
-#define SVM_FEATURE_PAUSETHRESH   12 /* Pause intercept filter support */
-#define SVM_FEATURE_VLOADSAVE     15 /* virtual vmload/vmsave */
-#define SVM_FEATURE_VGIF          16 /* Virtual GIF */
-#define SVM_FEATURE_SSS           19 /* NPT Supervisor Shadow Stacks */
-#define SVM_FEATURE_SPEC_CTRL     20 /* MSR_SPEC_CTRL virtualisation */
-
-static inline bool cpu_has_svm_feature(unsigned int feat)
-{
-    return svm_feature_flags & (1u << feat);
-}
-#define cpu_has_svm_npt       cpu_has_svm_feature(SVM_FEATURE_NPT)
-#define cpu_has_svm_lbrv      cpu_has_svm_feature(SVM_FEATURE_LBRV)
-#define cpu_has_svm_svml      cpu_has_svm_feature(SVM_FEATURE_SVML)
-#define cpu_has_svm_nrips     cpu_has_svm_feature(SVM_FEATURE_NRIPS)
-#define cpu_has_svm_cleanbits cpu_has_svm_feature(SVM_FEATURE_VMCBCLEAN)
-#define cpu_has_svm_flushbyasid cpu_has_svm_feature(SVM_FEATURE_FLUSHBYASID)
-#define cpu_has_svm_decode    cpu_has_svm_feature(SVM_FEATURE_DECODEASSISTS)
-#define cpu_has_svm_vgif      cpu_has_svm_feature(SVM_FEATURE_VGIF)
-#define cpu_has_pause_filter  cpu_has_svm_feature(SVM_FEATURE_PAUSEFILTER)
-#define cpu_has_pause_thresh  cpu_has_svm_feature(SVM_FEATURE_PAUSETHRESH)
-#define cpu_has_tsc_ratio     cpu_has_svm_feature(SVM_FEATURE_TSCRATEMSR)
-#define cpu_has_svm_vloadsave cpu_has_svm_feature(SVM_FEATURE_VLOADSAVE)
-#define cpu_has_svm_sss       cpu_has_svm_feature(SVM_FEATURE_SSS)
-#define cpu_has_svm_spec_ctrl cpu_has_svm_feature(SVM_FEATURE_SPEC_CTRL)
-
 #endif /* __ASM_X86_HVM_SVM_H__ */
-- 
2.30.2


Reply via email to