On 1/13/21 6:37 PM, Sean Christopherson wrote:
> Move the allocation of the SEV VMCB array to sev.c to help pave the way
> toward encapsulating SEV enabling wholly within sev.c.
>
> No functional change intended.
>
> Signed-off-by: Sean Christopherson <sea...@google.com>
> ---
>  arch/x86/kvm/svm/sev.c | 13 +++++++++++++
>  arch/x86/kvm/svm/svm.c | 17 ++++++++---------
>  arch/x86/kvm/svm/svm.h |  1 +
>  3 files changed, 22 insertions(+), 9 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 1a143340103e..a2c3e2d42a7f 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -1330,6 +1330,19 @@ void sev_hardware_teardown(void)
>       sev_flush_asids();
>  }
>  
> +int sev_cpu_init(struct svm_cpu_data *sd)
> +{
> +     if (!svm_sev_enabled())
> +             return 0;
> +
> +     sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1, sizeof(void *),
> +                                   GFP_KERNEL | __GFP_ZERO);


I saw Tom recommended to use kzalloc.. instead of __GFP_ZERO in previous
patch. With that fixed,

Reviewed-by: Brijesh Singh <brijesh.si...@amd.com>


> +     if (!sd->sev_vmcbs)
> +             return -ENOMEM;
> +
> +     return 0;
> +}
> +
>  /*
>   * Pages used by hardware to hold guest encrypted state must be flushed 
> before
>   * returning them to the system.
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index bb7b99743bea..89b95fb87a0c 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -552,23 +552,22 @@ static void svm_cpu_uninit(int cpu)
>  static int svm_cpu_init(int cpu)
>  {
>       struct svm_cpu_data *sd;
> +     int ret;
>  
>       sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
>       if (!sd)
>               return -ENOMEM;
>       sd->cpu = cpu;
>       sd->save_area = alloc_page(GFP_KERNEL);
> -     if (!sd->save_area)
> +     if (!sd->save_area) {
> +             ret = -ENOMEM;
>               goto free_cpu_data;
> +     }
>       clear_page(page_address(sd->save_area));
>  
> -     if (svm_sev_enabled()) {
> -             sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
> -                                           sizeof(void *),
> -                                           GFP_KERNEL | __GFP_ZERO);
> -             if (!sd->sev_vmcbs)
> -                     goto free_save_area;
> -     }
> +     ret = sev_cpu_init(sd);
> +     if (ret)
> +             goto free_save_area;
>  
>       per_cpu(svm_data, cpu) = sd;
>  
> @@ -578,7 +577,7 @@ static int svm_cpu_init(int cpu)
>       __free_page(sd->save_area);
>  free_cpu_data:
>       kfree(sd);
> -     return -ENOMEM;
> +     return ret;
>  
>  }
>  
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 8e169835f52a..4eb4bab0ca3e 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -583,6 +583,7 @@ int svm_unregister_enc_region(struct kvm *kvm,
>  void pre_sev_run(struct vcpu_svm *svm, int cpu);
>  void __init sev_hardware_setup(void);
>  void sev_hardware_teardown(void);
> +int sev_cpu_init(struct svm_cpu_data *sd);
>  void sev_free_vcpu(struct kvm_vcpu *vcpu);
>  int sev_handle_vmgexit(struct vcpu_svm *svm);
>  int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int 
> in);

Reply via email to