Re: [PATCH v2 31/50] KVM: x86: Do CPU compatibility checks in x86 code

2022-12-05 Thread Sean Christopherson
On Mon, Dec 05, 2022, Isaku Yamahata wrote:
> On Wed, Nov 30, 2022 at 11:09:15PM +,
> > index 66f16458aa97..3571bc968cf8 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -9277,10 +9277,36 @@ static inline void kvm_ops_update(struct 
> > kvm_x86_init_ops *ops)
> > kvm_pmu_ops_update(ops->pmu_ops);
> >  }
> >  
> > +struct kvm_cpu_compat_check {
> > +   struct kvm_x86_init_ops *ops;
> > +   int *ret;
> 
> minor nitpick: just int ret. I don't see the necessity of the pointer.
> Anyway overall it looks good to me.

...

> > @@ -9360,6 +9386,14 @@ static int __kvm_x86_vendor_init(struct 
> > kvm_x86_init_ops *ops)
> > if (r != 0)
> > goto out_mmu_exit;
> >  
> > +   c.ret = 
> > +   c.ops = ops;
> > +   for_each_online_cpu(cpu) {
> > +   smp_call_function_single(cpu, kvm_x86_check_cpu_compat, , 1);
> > +   if (r < 0)
> 
> Here it can be "c.ret < 0".

No, because the below goto leads to "return r", i.e. "c.ret" needs to be 
propagated
to "r".  That's why the code does the admittedly funky "int *ret" thing.

FWIW, this gets cleanup in the end.  "struct kvm_cpu_compat_check" goes away and
"" is passed directly to kvm_x86_check_cpu_compat.

> > +   goto out_hardware_unsetup;
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v2 31/50] KVM: x86: Do CPU compatibility checks in x86 code

2022-12-05 Thread Isaku Yamahata
On Wed, Nov 30, 2022 at 11:09:15PM +,
Sean Christopherson  wrote:

> Move the CPU compatibility checks to pure x86 code, i.e. drop x86's use
> of the common kvm_x86_check_cpu_compat() arch hook.  x86 is the only
> architecture that "needs" to do per-CPU compatibility checks, moving
> the logic to x86 will allow dropping the common code, and will also
> give x86 more control over when/how the compatibility checks are
> performed, e.g. TDX will need to enable hardware (do VMXON) in order to
> perform compatibility checks.
> 
> Signed-off-by: Sean Christopherson 
> ---
>  arch/x86/kvm/svm/svm.c |  2 +-
>  arch/x86/kvm/vmx/vmx.c |  2 +-
>  arch/x86/kvm/x86.c | 49 --
>  3 files changed, 40 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 19e81a99c58f..d7ea1c1175c2 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -5103,7 +5103,7 @@ static int __init svm_init(void)
>* Common KVM initialization _must_ come last, after this, /dev/kvm is
>* exposed to userspace!
>*/
> - r = kvm_init(_init_ops, sizeof(struct vcpu_svm),
> + r = kvm_init(NULL, sizeof(struct vcpu_svm),
>__alignof__(struct vcpu_svm), THIS_MODULE);
>   if (r)
>   goto err_kvm_init;
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 654d81f781da..8deb1bd60c10 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -8592,7 +8592,7 @@ static int __init vmx_init(void)
>* Common KVM initialization _must_ come last, after this, /dev/kvm is
>* exposed to userspace!
>*/
> - r = kvm_init(_init_ops, sizeof(struct vcpu_vmx),
> + r = kvm_init(NULL, sizeof(struct vcpu_vmx),
>__alignof__(struct vcpu_vmx), THIS_MODULE);
>   if (r)
>   goto err_kvm_init;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 66f16458aa97..3571bc968cf8 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -9277,10 +9277,36 @@ static inline void kvm_ops_update(struct 
> kvm_x86_init_ops *ops)
>   kvm_pmu_ops_update(ops->pmu_ops);
>  }
>  
> +struct kvm_cpu_compat_check {
> + struct kvm_x86_init_ops *ops;
> + int *ret;

minor nitpick: just int ret. I don't see the necessity of the pointer.
Anyway overall it looks good to me.

Reviewed-by: Isaku Yamahata 

> +};
> +
> +static int kvm_x86_check_processor_compatibility(struct kvm_x86_init_ops 
> *ops)
> +{
> + struct cpuinfo_x86 *c = _data(smp_processor_id());
> +
> + WARN_ON(!irqs_disabled());
> +
> + if (__cr4_reserved_bits(cpu_has, c) !=
> + __cr4_reserved_bits(cpu_has, _cpu_data))
> + return -EIO;
> +
> + return ops->check_processor_compatibility();
> +}
> +
> +static void kvm_x86_check_cpu_compat(void *data)
> +{
> + struct kvm_cpu_compat_check *c = data;
> +
> + *c->ret = kvm_x86_check_processor_compatibility(c->ops);
> +}
> +
>  static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
>  {
> + struct kvm_cpu_compat_check c;
>   u64 host_pat;
> - int r;
> + int r, cpu;
>  
>   if (kvm_x86_ops.hardware_enable) {
>   pr_err("kvm: already loaded vendor module '%s'\n", 
> kvm_x86_ops.name);
> @@ -9360,6 +9386,14 @@ static int __kvm_x86_vendor_init(struct 
> kvm_x86_init_ops *ops)
>   if (r != 0)
>   goto out_mmu_exit;
>  
> + c.ret = 
> + c.ops = ops;
> + for_each_online_cpu(cpu) {
> + smp_call_function_single(cpu, kvm_x86_check_cpu_compat, , 1);
> + if (r < 0)

Here it can be "c.ret < 0".

> + goto out_hardware_unsetup;
> + }
> +
>   /*
>* Point of no return!  DO NOT add error paths below this point unless
>* absolutely necessary, as most operations from this point forward
> @@ -9402,6 +9436,8 @@ static int __kvm_x86_vendor_init(struct 
> kvm_x86_init_ops *ops)
>   kvm_init_msr_list();
>   return 0;
>  
> +out_hardware_unsetup:
> + ops->runtime_ops->hardware_unsetup();
>  out_mmu_exit:
>   kvm_mmu_vendor_module_exit();
>  out_free_percpu:
> @@ -12037,16 +12073,7 @@ void kvm_arch_hardware_disable(void)
>  
>  int kvm_arch_check_processor_compat(void *opaque)
>  {
> - struct cpuinfo_x86 *c = _data(smp_processor_id());
> - struct kvm_x86_init_ops *ops = opaque;
> -
> - WARN_ON(!irqs_disabled());
> -
> - if (__cr4_reserved_bits(cpu_has, c) !=
> - __cr4_reserved_bits(cpu_has, _cpu_data))
> - return -EIO;
> -
> - return ops->check_processor_compatibility();
> + return 0;
>  }
>  
>  bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
> -- 
> 2.38.1.584.g0f3c55d4c2-goog
> 

-- 
Isaku Yamahata 
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v2 31/50] KVM: x86: Do CPU compatibility checks in x86 code

2022-12-02 Thread Huang, Kai
On Wed, 2022-11-30 at 23:09 +, Sean Christopherson wrote:
> Move the CPU compatibility checks to pure x86 code, i.e. drop x86's use
> of the common kvm_x86_check_cpu_compat() arch hook.  x86 is the only
^
kvm_arch_check_processor_compat()

> architecture that "needs" to do per-CPU compatibility checks, moving
> the logic to x86 will allow dropping the common code, and will also
> give x86 more control over when/how the compatibility checks are
> performed, e.g. TDX will need to enable hardware (do VMXON) in order to
> perform compatibility checks.
> 
> Signed-off-by: Sean Christopherson 

Reviewed-by: Kai Huang 

> ---
>  arch/x86/kvm/svm/svm.c |  2 +-
>  arch/x86/kvm/vmx/vmx.c |  2 +-
>  arch/x86/kvm/x86.c | 49 --
>  3 files changed, 40 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 19e81a99c58f..d7ea1c1175c2 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -5103,7 +5103,7 @@ static int __init svm_init(void)
>* Common KVM initialization _must_ come last, after this, /dev/kvm is
>* exposed to userspace!
>*/
> - r = kvm_init(_init_ops, sizeof(struct vcpu_svm),
> + r = kvm_init(NULL, sizeof(struct vcpu_svm),
>__alignof__(struct vcpu_svm), THIS_MODULE);
>   if (r)
>   goto err_kvm_init;
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 654d81f781da..8deb1bd60c10 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -8592,7 +8592,7 @@ static int __init vmx_init(void)
>* Common KVM initialization _must_ come last, after this, /dev/kvm is
>* exposed to userspace!
>*/
> - r = kvm_init(_init_ops, sizeof(struct vcpu_vmx),
> + r = kvm_init(NULL, sizeof(struct vcpu_vmx),
>__alignof__(struct vcpu_vmx), THIS_MODULE);
>   if (r)
>   goto err_kvm_init;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 66f16458aa97..3571bc968cf8 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -9277,10 +9277,36 @@ static inline void kvm_ops_update(struct 
> kvm_x86_init_ops *ops)
>   kvm_pmu_ops_update(ops->pmu_ops);
>  }
>  
> +struct kvm_cpu_compat_check {
> + struct kvm_x86_init_ops *ops;
> + int *ret;
> +};
> +
> +static int kvm_x86_check_processor_compatibility(struct kvm_x86_init_ops 
> *ops)
> +{
> + struct cpuinfo_x86 *c = _data(smp_processor_id());
> +
> + WARN_ON(!irqs_disabled());
> +
> + if (__cr4_reserved_bits(cpu_has, c) !=
> + __cr4_reserved_bits(cpu_has, _cpu_data))
> + return -EIO;
> +
> + return ops->check_processor_compatibility();
> +}
> +
> +static void kvm_x86_check_cpu_compat(void *data)
> +{
> + struct kvm_cpu_compat_check *c = data;
> +
> + *c->ret = kvm_x86_check_processor_compatibility(c->ops);
> +}
> +
>  static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
>  {
> + struct kvm_cpu_compat_check c;
>   u64 host_pat;
> - int r;
> + int r, cpu;
>  
>   if (kvm_x86_ops.hardware_enable) {
>   pr_err("kvm: already loaded vendor module '%s'\n", 
> kvm_x86_ops.name);
> @@ -9360,6 +9386,14 @@ static int __kvm_x86_vendor_init(struct 
> kvm_x86_init_ops *ops)
>   if (r != 0)
>   goto out_mmu_exit;
>  
> + c.ret = 
> + c.ops = ops;
> + for_each_online_cpu(cpu) {
> + smp_call_function_single(cpu, kvm_x86_check_cpu_compat, , 1);
> + if (r < 0)
> + goto out_hardware_unsetup;
> + }
> +
>   /*
>* Point of no return!  DO NOT add error paths below this point unless
>* absolutely necessary, as most operations from this point forward
> @@ -9402,6 +9436,8 @@ static int __kvm_x86_vendor_init(struct 
> kvm_x86_init_ops *ops)
>   kvm_init_msr_list();
>   return 0;
>  
> +out_hardware_unsetup:
> + ops->runtime_ops->hardware_unsetup();
>  out_mmu_exit:
>   kvm_mmu_vendor_module_exit();
>  out_free_percpu:
> @@ -12037,16 +12073,7 @@ void kvm_arch_hardware_disable(void)
>  
>  int kvm_arch_check_processor_compat(void *opaque)
>  {
> - struct cpuinfo_x86 *c = _data(smp_processor_id());
> - struct kvm_x86_init_ops *ops = opaque;
> -
> - WARN_ON(!irqs_disabled());
> -
> - if (__cr4_reserved_bits(cpu_has, c) !=
> - __cr4_reserved_bits(cpu_has, _cpu_data))
> - return -EIO;
> -
> - return ops->check_processor_compatibility();
> + return 0;
>  }
>  
>  bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
> -- 
> 2.38.1.584.g0f3c55d4c2-goog
> 

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v2 31/50] KVM: x86: Do CPU compatibility checks in x86 code

2022-11-30 Thread Sean Christopherson
Move the CPU compatibility checks to pure x86 code, i.e. drop x86's use
of the common kvm_x86_check_cpu_compat() arch hook.  x86 is the only
architecture that "needs" to do per-CPU compatibility checks, moving
the logic to x86 will allow dropping the common code, and will also
give x86 more control over when/how the compatibility checks are
performed, e.g. TDX will need to enable hardware (do VMXON) in order to
perform compatibility checks.

Signed-off-by: Sean Christopherson 
---
 arch/x86/kvm/svm/svm.c |  2 +-
 arch/x86/kvm/vmx/vmx.c |  2 +-
 arch/x86/kvm/x86.c | 49 --
 3 files changed, 40 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 19e81a99c58f..d7ea1c1175c2 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -5103,7 +5103,7 @@ static int __init svm_init(void)
 * Common KVM initialization _must_ come last, after this, /dev/kvm is
 * exposed to userspace!
 */
-   r = kvm_init(_init_ops, sizeof(struct vcpu_svm),
+   r = kvm_init(NULL, sizeof(struct vcpu_svm),
 __alignof__(struct vcpu_svm), THIS_MODULE);
if (r)
goto err_kvm_init;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 654d81f781da..8deb1bd60c10 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8592,7 +8592,7 @@ static int __init vmx_init(void)
 * Common KVM initialization _must_ come last, after this, /dev/kvm is
 * exposed to userspace!
 */
-   r = kvm_init(_init_ops, sizeof(struct vcpu_vmx),
+   r = kvm_init(NULL, sizeof(struct vcpu_vmx),
 __alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
goto err_kvm_init;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 66f16458aa97..3571bc968cf8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9277,10 +9277,36 @@ static inline void kvm_ops_update(struct 
kvm_x86_init_ops *ops)
kvm_pmu_ops_update(ops->pmu_ops);
 }
 
+struct kvm_cpu_compat_check {
+   struct kvm_x86_init_ops *ops;
+   int *ret;
+};
+
+static int kvm_x86_check_processor_compatibility(struct kvm_x86_init_ops *ops)
+{
+   struct cpuinfo_x86 *c = _data(smp_processor_id());
+
+   WARN_ON(!irqs_disabled());
+
+   if (__cr4_reserved_bits(cpu_has, c) !=
+   __cr4_reserved_bits(cpu_has, _cpu_data))
+   return -EIO;
+
+   return ops->check_processor_compatibility();
+}
+
+static void kvm_x86_check_cpu_compat(void *data)
+{
+   struct kvm_cpu_compat_check *c = data;
+
+   *c->ret = kvm_x86_check_processor_compatibility(c->ops);
+}
+
 static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
 {
+   struct kvm_cpu_compat_check c;
u64 host_pat;
-   int r;
+   int r, cpu;
 
if (kvm_x86_ops.hardware_enable) {
pr_err("kvm: already loaded vendor module '%s'\n", 
kvm_x86_ops.name);
@@ -9360,6 +9386,14 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops 
*ops)
if (r != 0)
goto out_mmu_exit;
 
+   c.ret = 
+   c.ops = ops;
+   for_each_online_cpu(cpu) {
+   smp_call_function_single(cpu, kvm_x86_check_cpu_compat, , 1);
+   if (r < 0)
+   goto out_hardware_unsetup;
+   }
+
/*
 * Point of no return!  DO NOT add error paths below this point unless
 * absolutely necessary, as most operations from this point forward
@@ -9402,6 +9436,8 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops 
*ops)
kvm_init_msr_list();
return 0;
 
+out_hardware_unsetup:
+   ops->runtime_ops->hardware_unsetup();
 out_mmu_exit:
kvm_mmu_vendor_module_exit();
 out_free_percpu:
@@ -12037,16 +12073,7 @@ void kvm_arch_hardware_disable(void)
 
 int kvm_arch_check_processor_compat(void *opaque)
 {
-   struct cpuinfo_x86 *c = _data(smp_processor_id());
-   struct kvm_x86_init_ops *ops = opaque;
-
-   WARN_ON(!irqs_disabled());
-
-   if (__cr4_reserved_bits(cpu_has, c) !=
-   __cr4_reserved_bits(cpu_has, _cpu_data))
-   return -EIO;
-
-   return ops->check_processor_compatibility();
+   return 0;
 }
 
 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
-- 
2.38.1.584.g0f3c55d4c2-goog

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm