With cross-vendor support gone, it's no longer needed.
Signed-off-by: Alejandro Vallejo <[email protected]>
---
xen/arch/x86/hvm/svm/svm.c | 42 +++++++++++-------------
xen/arch/x86/hvm/svm/vmcb.c | 3 ++
xen/arch/x86/include/asm/hvm/svm-types.h | 10 ------
3 files changed, 22 insertions(+), 33 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 0658ca990f..e8f19dec04 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -401,10 +401,6 @@ static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu
*c)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs;
- c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
- c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
-
if ( vmcb->event_inj.v &&
hvm_event_needs_reinjection(vmcb->event_inj.type,
vmcb->event_inj.vector) )
@@ -468,11 +464,6 @@ static int svm_vmcb_restore(struct vcpu *v, struct
hvm_hw_cpu *c)
svm_update_guest_cr(v, 0, 0);
svm_update_guest_cr(v, 4, 0);
- /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
- vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = c->sysenter_cs;
- vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = c->sysenter_esp;
- vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = c->sysenter_eip;
-
if ( paging_mode_hap(v->domain) )
{
vmcb_set_np(vmcb, true);
@@ -501,6 +492,9 @@ static void svm_save_cpu_state(struct vcpu *v, struct
hvm_hw_cpu *data)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+ data->sysenter_cs = vmcb->sysenter_cs;
+ data->sysenter_esp = vmcb->sysenter_esp;
+ data->sysenter_eip = vmcb->sysenter_eip;
data->shadow_gs = vmcb->kerngsbase;
data->msr_lstar = vmcb->lstar;
data->msr_star = vmcb->star;
@@ -512,11 +506,14 @@ static void svm_load_cpu_state(struct vcpu *v, struct
hvm_hw_cpu *data)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- vmcb->kerngsbase = data->shadow_gs;
- vmcb->lstar = data->msr_lstar;
- vmcb->star = data->msr_star;
- vmcb->cstar = data->msr_cstar;
- vmcb->sfmask = data->msr_syscall_mask;
+ vmcb->sysenter_cs = data->sysenter_cs;
+ vmcb->sysenter_esp = data->sysenter_esp;
+ vmcb->sysenter_eip = data->sysenter_eip;
+ vmcb->kerngsbase = data->shadow_gs;
+ vmcb->lstar = data->msr_lstar;
+ vmcb->star = data->msr_star;
+ vmcb->cstar = data->msr_cstar;
+ vmcb->sfmask = data->msr_syscall_mask;
v->arch.hvm.guest_efer = data->msr_efer;
svm_update_guest_efer(v);
}
@@ -1720,12 +1717,9 @@ static int cf_check svm_msr_read_intercept(
switch ( msr )
{
- /*
- * Sync not needed while the cross-vendor logic is in unilateral
effect.
case MSR_IA32_SYSENTER_CS:
case MSR_IA32_SYSENTER_ESP:
case MSR_IA32_SYSENTER_EIP:
- */
case MSR_STAR:
case MSR_LSTAR:
case MSR_CSTAR:
@@ -1740,13 +1734,15 @@ static int cf_check svm_msr_read_intercept(
switch ( msr )
{
case MSR_IA32_SYSENTER_CS:
- *msr_content = v->arch.hvm.svm.guest_sysenter_cs;
+ *msr_content = vmcb->sysenter_cs;
break;
+
case MSR_IA32_SYSENTER_ESP:
- *msr_content = v->arch.hvm.svm.guest_sysenter_esp;
+ *msr_content = vmcb->sysenter_esp;
break;
+
case MSR_IA32_SYSENTER_EIP:
- *msr_content = v->arch.hvm.svm.guest_sysenter_eip;
+ *msr_content = vmcb->sysenter_eip;
break;
case MSR_STAR:
@@ -1940,11 +1936,11 @@ static int cf_check svm_msr_write_intercept(
switch ( msr )
{
case MSR_IA32_SYSENTER_ESP:
- vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp =
msr_content;
+ vmcb->sysenter_esp = msr_content;
break;
case MSR_IA32_SYSENTER_EIP:
- vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip =
msr_content;
+ vmcb->sysenter_eip = msr_content;
break;
case MSR_LSTAR:
@@ -1970,7 +1966,7 @@ static int cf_check svm_msr_write_intercept(
break;
case MSR_IA32_SYSENTER_CS:
- vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = msr_content;
+ vmcb->sysenter_cs = msr_content;
break;
case MSR_STAR:
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index e583ef8548..76fcaf15c2 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -97,6 +97,9 @@ static int construct_vmcb(struct vcpu *v)
svm_disable_intercept_for_msr(v, MSR_LSTAR);
svm_disable_intercept_for_msr(v, MSR_STAR);
svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
vmcb->_msrpm_base_pa = virt_to_maddr(svm->msrpm);
vmcb->_iopm_base_pa = __pa(v->domain->arch.hvm.io_bitmap);
diff --git a/xen/arch/x86/include/asm/hvm/svm-types.h
b/xen/arch/x86/include/asm/hvm/svm-types.h
index 051b235d8f..aaee91b4b6 100644
--- a/xen/arch/x86/include/asm/hvm/svm-types.h
+++ b/xen/arch/x86/include/asm/hvm/svm-types.h
@@ -27,16 +27,6 @@ struct svm_vcpu {
/* VMCB has a cached instruction from #PF/#NPF Decode Assist? */
uint8_t cached_insn_len; /* Zero if no cached instruction. */
-
- /*
- * Upper four bytes are undefined in the VMCB, therefore we can't use the
- * fields in the VMCB. Write a 64bit value and then read a 64bit value is
- * fine unless there's a VMRUN/VMEXIT in between which clears the upper
- * four bytes.
- */
- uint64_t guest_sysenter_cs;
- uint64_t guest_sysenter_esp;
- uint64_t guest_sysenter_eip;
};
struct nestedsvm {
--
2.43.0