> From: Lengyel, Tamas <tamas.leng...@intel.com>
> Sent: Friday, March 11, 2022 2:45 AM
> 
> During VM fork resetting a failed vmentry has been observed when the reset
> is performed immediately after a STI instruction executed. This is due to
> the guest interruptibility state in the VMCS being modified by STI but the
> subsequent reset removes the IF bit from FLAGS, causing the failed vmentry.

I didn't get the rationale here. Before this patch the interruptibility state 
is 
not saved/restored thus I suppose after reset it will be cleared thus aligned
with RFLAGS.IF=0. Can you elaborate a bit how exactly above problem is
caused?

> 
> Include the interruptibility state information in the public hvm_hw_cpu struct
> so that the CPU can be safely saved/restored.
> 
> Signed-off-by: Tamas K Lengyel <tamas.leng...@intel.com>
> ---
>  xen/arch/x86/hvm/hvm.c                 |  9 +++++----
>  xen/arch/x86/hvm/vmx/vmx.c             |  4 ++++
>  xen/arch/x86/include/asm/hvm/hvm.h     | 26

Why is this change only applied to vmx instead of svm?

> ++++++++++++++++++++++++++
>  xen/include/public/arch-x86/hvm/save.h |  3 ++-
>  4 files changed, 37 insertions(+), 5 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 709a4191ef..b239c72215 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -897,6 +897,8 @@ static int cf_check hvm_save_cpu_ctxt(struct vcpu *v,
> hvm_domain_context_t *h)
>          ctxt.flags = XEN_X86_FPU_INITIALISED;
>      }
> 
> +    ctxt.interruptibility_info = hvm_get_interrupt_shadow(v);
> +
>      return hvm_save_entry(CPU, v->vcpu_id, h, &ctxt);
>  }
> 
> @@ -990,9 +992,6 @@ static int cf_check hvm_load_cpu_ctxt(struct domain
> *d, hvm_domain_context_t *h)
>      if ( hvm_load_entry_zeroextend(CPU, h, &ctxt) != 0 )
>          return -EINVAL;
> 
> -    if ( ctxt.pad0 != 0 )
> -        return -EINVAL;
> -
>      /* Sanity check some control registers. */
>      if ( (ctxt.cr0 & HVM_CR0_GUEST_RESERVED_BITS) ||
>           !(ctxt.cr0 & X86_CR0_ET) ||
> @@ -1155,6 +1154,8 @@ static int cf_check hvm_load_cpu_ctxt(struct
> domain *d, hvm_domain_context_t *h)
>      v->arch.dr6   = ctxt.dr6;
>      v->arch.dr7   = ctxt.dr7;
> 
> +    hvm_set_interrupt_shadow(v, ctxt.interruptibility_info);
> +
>      hvmemul_cancel(v);
> 
>      /* Auxiliary processors should be woken immediately. */
> @@ -3888,7 +3889,7 @@ enum hvm_intblk hvm_interrupt_blocked(struct
> vcpu *v, struct hvm_intack intack)
>           !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
>          return hvm_intblk_rflags_ie;
> 
> -    intr_shadow = alternative_call(hvm_funcs.get_interrupt_shadow, v);
> +    intr_shadow = hvm_get_interrupt_shadow(v);
> 
>      if ( intr_shadow &
> (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) )
>          return hvm_intblk_shadow;
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index c075370f64..e13817431a 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -1323,7 +1323,9 @@ static unsigned int cf_check
> vmx_get_interrupt_shadow(struct vcpu *v)
>  {
>      unsigned long intr_shadow;
> 
> +    vmx_vmcs_enter(v);
>      __vmread(GUEST_INTERRUPTIBILITY_INFO, &intr_shadow);
> +    vmx_vmcs_exit(v);
> 
>      return intr_shadow;
>  }
> @@ -1331,7 +1333,9 @@ static unsigned int cf_check
> vmx_get_interrupt_shadow(struct vcpu *v)
>  static void cf_check vmx_set_interrupt_shadow(
>      struct vcpu *v, unsigned int intr_shadow)
>  {
> +    vmx_vmcs_enter(v);
>      __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
> +    vmx_vmcs_exit(v);
>  }
> 
>  static void vmx_load_pdptrs(struct vcpu *v)
> diff --git a/xen/arch/x86/include/asm/hvm/hvm.h
> b/xen/arch/x86/include/asm/hvm/hvm.h
> index 5b7ec0cf69..2fb7865a05 100644
> --- a/xen/arch/x86/include/asm/hvm/hvm.h
> +++ b/xen/arch/x86/include/asm/hvm/hvm.h
> @@ -720,6 +720,22 @@ static inline int hvm_vmtrace_reset(struct vcpu *v)
>      return -EOPNOTSUPP;
>  }
> 
> +static inline unsigned long hvm_get_interrupt_shadow(struct vcpu *v)
> +{
> +    if ( hvm_funcs.get_interrupt_shadow )
> +        return alternative_call(hvm_funcs.get_interrupt_shadow, v);
> +
> +    return -EOPNOTSUPP;
> +}
> +
> +static inline void
> +hvm_set_interrupt_shadow(struct vcpu *v, unsigned long val)
> +{
> +    if ( hvm_funcs.set_interrupt_shadow )
> +        alternative_vcall(hvm_funcs.set_interrupt_shadow, v, val);
> +}
> +
> +
>  /*
>   * Accessors for registers which have per-guest-type or per-vendor locations
>   * (e.g. VMCS, msr load/save lists, VMCB, VMLOAD lazy, etc).
> @@ -863,6 +879,16 @@ static inline void hvm_set_reg(struct vcpu *v,
> unsigned int reg, uint64_t val)
>      ASSERT_UNREACHABLE();
>  }
> 
> +static inline unsigned long hvm_get_interrupt_shadow(struct vcpu *v)
> +{
> +    ASSERT_UNREACHABLE();
> +    return 0;
> +}
> +static inline void hvm_set_interrupt_shadow(struct vcpu *v, unsigned long
> val)
> +{
> +    ASSERT_UNREACHABLE();
> +}
> +
>  #define is_viridian_domain(d) ((void)(d), false)
>  #define is_viridian_vcpu(v) ((void)(v), false)
>  #define has_viridian_time_ref_count(d) ((void)(d), false)
> diff --git a/xen/include/public/arch-x86/hvm/save.h
> b/xen/include/public/arch-x86/hvm/save.h
> index 773a380bc2..e944b1756a 100644
> --- a/xen/include/public/arch-x86/hvm/save.h
> +++ b/xen/include/public/arch-x86/hvm/save.h
> @@ -165,7 +165,8 @@ struct hvm_hw_cpu {
>  #define _XEN_X86_FPU_INITIALISED        0
>  #define XEN_X86_FPU_INITIALISED         (1U<<_XEN_X86_FPU_INITIALISED)
>      uint32_t flags;
> -    uint32_t pad0;
> +
> +    uint32_t interruptibility_info;
>  };
> 
>  struct hvm_hw_cpu_compat {
> --
> 2.25.1


Reply via email to