Re: [Xen-devel] [PATCH v4 02/12] x86/HVM: patch indirect calls through hvm_funcs to direct ones

2018-10-04 Thread Jan Beulich
>>> On 03.10.18 at 20:55,  wrote:
> On 02/10/18 11:12, Jan Beulich wrote:
>> This is intentionally not touching hooks used rarely (or not at all)
>> during the lifetime of a VM, like {domain,vcpu}_initialise or cpu_up,
>> as well as nested, VM event, and altp2m ones (they can all be done
>> later, if so desired). Virtual Interrupt delivery ones will be dealt
>> with in a subsequent patch.
>>
>> Signed-off-by: Jan Beulich 
>> Reviewed-by: Wei Liu 
> 
> Acked-by: Andrew Cooper 

Thanks.

> It is a shame that we don't have a variation such as cond_alt_vcall()
> which nops out the entire call when the function pointer is NULL, but I
> can't think of any sane way of trying to make that happen.

I think this could be made work, e.g. by further utilizing special values
of the displacement of the CALL insn (out of the non-sensible ones we
currently use only -5; arguably using -4 ... -1 would be liable to
conflict with not entirely dumb disassemblers, which may imply an
instruction boundary at the target of any CALL/JMP without special
casing such bogus values).

If we thought this was a worthwhile avenue to explore, non-void
calls could be patched this way too, as long as the replacement
"return" value is a compile time constant (i.e. we'd have a compile
time "MOV $, %eax" to patch in). We'd merely have to
sort out where to place this alternative replacement code.

Jan



___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH v4 02/12] x86/HVM: patch indirect calls through hvm_funcs to direct ones

2018-10-02 Thread Paul Durrant
> -Original Message-
> From: Jan Beulich [mailto:jbeul...@suse.com]
> Sent: 02 October 2018 11:13
> To: xen-devel 
> Cc: Andrew Cooper ; Paul Durrant
> ; Wei Liu 
> Subject: [PATCH v4 02/12] x86/HVM: patch indirect calls through hvm_funcs
> to direct ones
> 
> This is intentionally not touching hooks used rarely (or not at all)
> during the lifetime of a VM, like {domain,vcpu}_initialise or cpu_up,
> as well as nested, VM event, and altp2m ones (they can all be done
> later, if so desired). Virtual Interrupt delivery ones will be dealt
> with in a subsequent patch.
> 
> Signed-off-by: Jan Beulich 

Reviewed-by: Paul Durrant 

> Reviewed-by: Wei Liu 
> ---
> v3: Re-base.
> v2: Drop open-coded numbers from macro invocations. Re-base.
> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -2104,7 +2104,7 @@ static int hvmemul_write_msr(
>  static int hvmemul_wbinvd(
>  struct x86_emulate_ctxt *ctxt)
>  {
> -hvm_funcs.wbinvd_intercept();
> +alternative_vcall(hvm_funcs.wbinvd_intercept);
>  return X86EMUL_OKAY;
>  }
> 
> @@ -2122,7 +2122,7 @@ static int hvmemul_get_fpu(
>  struct vcpu *curr = current;
> 
>  if ( !curr->fpu_dirtied )
> -hvm_funcs.fpu_dirty_intercept();
> +alternative_vcall(hvm_funcs.fpu_dirty_intercept);
>  else if ( type == X86EMUL_FPU_fpu )
>  {
>  const typeof(curr->arch.xsave_area->fpu_sse) *fpu_ctxt =
> @@ -2239,7 +2239,7 @@ static void hvmemul_put_fpu(
>  {
>  curr->fpu_dirtied = false;
>  stts();
> -hvm_funcs.fpu_leave(curr);
> +alternative_vcall(hvm_funcs.fpu_leave, curr);
>  }
>  }
>  }
> @@ -2401,7 +2401,8 @@ static int _hvm_emulate_one(struct hvm_e
>  if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )
>  {
>  hvmemul_ctxt->intr_shadow = new_intr_shadow;
> -hvm_funcs.set_interrupt_shadow(curr, new_intr_shadow);
> +alternative_vcall(hvm_funcs.set_interrupt_shadow,
> +  curr, new_intr_shadow);
>  }
> 
>  if ( hvmemul_ctxt->ctxt.retire.hlt &&
> @@ -2538,7 +2539,8 @@ void hvm_emulate_init_once(
> 
>  memset(hvmemul_ctxt, 0, sizeof(*hvmemul_ctxt));
> 
> -hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(curr);
> +hvmemul_ctxt->intr_shadow =
> +alternative_call(hvm_funcs.get_interrupt_shadow, curr);
>  hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
>  hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
> 
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -272,12 +272,12 @@ void hvm_set_rdtsc_exiting(struct domain
>  struct vcpu *v;
> 
>  for_each_vcpu ( d, v )
> -hvm_funcs.set_rdtsc_exiting(v, enable);
> +alternative_vcall(hvm_funcs.set_rdtsc_exiting, v, enable);
>  }
> 
>  void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
>  {
> -if ( !hvm_funcs.get_guest_pat(v, guest_pat) )
> +if ( !alternative_call(hvm_funcs.get_guest_pat, v, guest_pat) )
>  *guest_pat = v->arch.hvm.pat_cr;
>  }
> 
> @@ -302,7 +302,7 @@ int hvm_set_guest_pat(struct vcpu *v, u6
>  return 0;
>  }
> 
> -if ( !hvm_funcs.set_guest_pat(v, guest_pat) )
> +if ( !alternative_call(hvm_funcs.set_guest_pat, v, guest_pat) )
>  v->arch.hvm.pat_cr = guest_pat;
> 
>  return 1;
> @@ -342,7 +342,7 @@ bool hvm_set_guest_bndcfgs(struct vcpu *
>  /* nothing, best effort only */;
>  }
> 
> -return hvm_funcs.set_guest_bndcfgs(v, val);
> +return alternative_call(hvm_funcs.set_guest_bndcfgs, v, val);
>  }
> 
>  /*
> @@ -500,7 +500,8 @@ void hvm_migrate_pirqs(struct vcpu *v)
>  static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info)
>  {
>  info->cr2 = v->arch.hvm.guest_cr[2];
> -return hvm_funcs.get_pending_event(v, info);
> +
> +return alternative_call(hvm_funcs.get_pending_event, v, info);
>  }
> 
>  void hvm_do_resume(struct vcpu *v)
> @@ -1651,7 +1652,7 @@ void hvm_inject_event(const struct x86_e
>  }
>  }
> 
> -hvm_funcs.inject_event(event);
> +alternative_vcall(hvm_funcs.inject_event, event);
>  }
> 
>  int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
> @@ -2238,7 +2239,7 @@ int hvm_set_cr0(unsigned long value, boo
>   (!rangeset_is_empty(d->iomem_caps) ||
>!rangeset_is_empty(d->arch.ioport_caps) ||
>has_arch_pdevs(d)) )
> -hvm_funcs.handle_cd(v, value);
> +alternative_vcall(hvm_funcs.handle_cd, v, value);
> 
>  hvm_update_cr(v, 0, value);
> 
> @@ -3477,7 +3478,8 @@ int hvm_msr_read_intercept(unsigned int
>  goto gp_fault;
>  /* If ret == 0 then this is not an MCE MSR, see other MSRs. */
>  ret = ((ret == 0)
> -   ? hvm_funcs.msr_read_intercept(msr, msr_content)
> +   ? alternative_call(hvm_funcs.msr_read_intercept,
> +  msr, msr_content)
> : X86EMUL_OKAY);

[Xen-devel] [PATCH v4 02/12] x86/HVM: patch indirect calls through hvm_funcs to direct ones

2018-10-02 Thread Jan Beulich
This is intentionally not touching hooks used rarely (or not at all)
during the lifetime of a VM, like {domain,vcpu}_initialise or cpu_up,
as well as nested, VM event, and altp2m ones (they can all be done
later, if so desired). Virtual Interrupt delivery ones will be dealt
with in a subsequent patch.

Signed-off-by: Jan Beulich 
Reviewed-by: Wei Liu 
---
v3: Re-base.
v2: Drop open-coded numbers from macro invocations. Re-base.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2104,7 +2104,7 @@ static int hvmemul_write_msr(
 static int hvmemul_wbinvd(
 struct x86_emulate_ctxt *ctxt)
 {
-hvm_funcs.wbinvd_intercept();
+alternative_vcall(hvm_funcs.wbinvd_intercept);
 return X86EMUL_OKAY;
 }
 
@@ -2122,7 +2122,7 @@ static int hvmemul_get_fpu(
 struct vcpu *curr = current;
 
 if ( !curr->fpu_dirtied )
-hvm_funcs.fpu_dirty_intercept();
+alternative_vcall(hvm_funcs.fpu_dirty_intercept);
 else if ( type == X86EMUL_FPU_fpu )
 {
 const typeof(curr->arch.xsave_area->fpu_sse) *fpu_ctxt =
@@ -2239,7 +2239,7 @@ static void hvmemul_put_fpu(
 {
 curr->fpu_dirtied = false;
 stts();
-hvm_funcs.fpu_leave(curr);
+alternative_vcall(hvm_funcs.fpu_leave, curr);
 }
 }
 }
@@ -2401,7 +2401,8 @@ static int _hvm_emulate_one(struct hvm_e
 if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )
 {
 hvmemul_ctxt->intr_shadow = new_intr_shadow;
-hvm_funcs.set_interrupt_shadow(curr, new_intr_shadow);
+alternative_vcall(hvm_funcs.set_interrupt_shadow,
+  curr, new_intr_shadow);
 }
 
 if ( hvmemul_ctxt->ctxt.retire.hlt &&
@@ -2538,7 +2539,8 @@ void hvm_emulate_init_once(
 
 memset(hvmemul_ctxt, 0, sizeof(*hvmemul_ctxt));
 
-hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(curr);
+hvmemul_ctxt->intr_shadow =
+alternative_call(hvm_funcs.get_interrupt_shadow, curr);
 hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
 hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
 
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -272,12 +272,12 @@ void hvm_set_rdtsc_exiting(struct domain
 struct vcpu *v;
 
 for_each_vcpu ( d, v )
-hvm_funcs.set_rdtsc_exiting(v, enable);
+alternative_vcall(hvm_funcs.set_rdtsc_exiting, v, enable);
 }
 
 void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat)
 {
-if ( !hvm_funcs.get_guest_pat(v, guest_pat) )
+if ( !alternative_call(hvm_funcs.get_guest_pat, v, guest_pat) )
 *guest_pat = v->arch.hvm.pat_cr;
 }
 
@@ -302,7 +302,7 @@ int hvm_set_guest_pat(struct vcpu *v, u6
 return 0;
 }
 
-if ( !hvm_funcs.set_guest_pat(v, guest_pat) )
+if ( !alternative_call(hvm_funcs.set_guest_pat, v, guest_pat) )
 v->arch.hvm.pat_cr = guest_pat;
 
 return 1;
@@ -342,7 +342,7 @@ bool hvm_set_guest_bndcfgs(struct vcpu *
 /* nothing, best effort only */;
 }
 
-return hvm_funcs.set_guest_bndcfgs(v, val);
+return alternative_call(hvm_funcs.set_guest_bndcfgs, v, val);
 }
 
 /*
@@ -500,7 +500,8 @@ void hvm_migrate_pirqs(struct vcpu *v)
 static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info)
 {
 info->cr2 = v->arch.hvm.guest_cr[2];
-return hvm_funcs.get_pending_event(v, info);
+
+return alternative_call(hvm_funcs.get_pending_event, v, info);
 }
 
 void hvm_do_resume(struct vcpu *v)
@@ -1651,7 +1652,7 @@ void hvm_inject_event(const struct x86_e
 }
 }
 
-hvm_funcs.inject_event(event);
+alternative_vcall(hvm_funcs.inject_event, event);
 }
 
 int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
@@ -2238,7 +2239,7 @@ int hvm_set_cr0(unsigned long value, boo
  (!rangeset_is_empty(d->iomem_caps) ||
   !rangeset_is_empty(d->arch.ioport_caps) ||
   has_arch_pdevs(d)) )
-hvm_funcs.handle_cd(v, value);
+alternative_vcall(hvm_funcs.handle_cd, v, value);
 
 hvm_update_cr(v, 0, value);
 
@@ -3477,7 +3478,8 @@ int hvm_msr_read_intercept(unsigned int
 goto gp_fault;
 /* If ret == 0 then this is not an MCE MSR, see other MSRs. */
 ret = ((ret == 0)
-   ? hvm_funcs.msr_read_intercept(msr, msr_content)
+   ? alternative_call(hvm_funcs.msr_read_intercept,
+  msr, msr_content)
: X86EMUL_OKAY);
 break;
 }
@@ -3637,7 +3639,8 @@ int hvm_msr_write_intercept(unsigned int
 goto gp_fault;
 /* If ret == 0 then this is not an MCE MSR, see other MSRs. */
 ret = ((ret == 0)
-   ? hvm_funcs.msr_write_intercept(msr, msr_content)
+   ? alternative_call(hvm_funcs.msr_write_intercept,
+  msr, msr_content)
: X86EMUL_OKAY);
 break;
 }
@@ -3829,7 +3832,7 @@ void hvm_hypercall_page_initialise(struc