Nicholas Piggin <npig...@gmail.com> writes:

> This should be no functional difference but makes the caller easier
> to read.
>
> Signed-off-by: Nicholas Piggin <npig...@gmail.com>

Reviewed-by: Fabiano Rosas <faro...@linux.ibm.com>

> ---
>  arch/powerpc/kvm/book3s_hv.c | 65 +++++++++++++++++++++++-------------
>  1 file changed, 41 insertions(+), 24 deletions(-)
>
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index c2c72875fca9..45211458ac05 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -4062,6 +4062,44 @@ static void store_spr_state(struct kvm_vcpu *vcpu)
>       vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
>  }
>
> +/* Returns true if current MSR and/or guest MSR may have changed */
> +static bool load_vcpu_state(struct kvm_vcpu *vcpu,
> +                        struct p9_host_os_sprs *host_os_sprs)
> +{
> +     bool ret = false;
> +
> +     if (cpu_has_feature(CPU_FTR_TM) ||
> +         cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
> +             kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
> +             ret = true;
> +     }
> +
> +     load_spr_state(vcpu, host_os_sprs);
> +
> +     load_fp_state(&vcpu->arch.fp);
> +#ifdef CONFIG_ALTIVEC
> +     load_vr_state(&vcpu->arch.vr);
> +#endif
> +     mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
> +
> +     return ret;
> +}
> +
> +static void store_vcpu_state(struct kvm_vcpu *vcpu)
> +{
> +     store_spr_state(vcpu);
> +
> +     store_fp_state(&vcpu->arch.fp);
> +#ifdef CONFIG_ALTIVEC
> +     store_vr_state(&vcpu->arch.vr);
> +#endif
> +     vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
> +
> +     if (cpu_has_feature(CPU_FTR_TM) ||
> +         cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
> +             kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
> +}
> +
>  static void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
>  {
>       if (!cpu_has_feature(CPU_FTR_ARCH_31))
> @@ -4169,19 +4207,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
> u64 time_limit,
>
>       vcpu_vpa_increment_dispatch(vcpu);
>
> -     if (cpu_has_feature(CPU_FTR_TM) ||
> -         cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
> -             kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
> -             msr = mfmsr(); /* TM restore can update msr */
> -     }
> -
> -     load_spr_state(vcpu, &host_os_sprs);
> -
> -     load_fp_state(&vcpu->arch.fp);
> -#ifdef CONFIG_ALTIVEC
> -     load_vr_state(&vcpu->arch.vr);
> -#endif
> -     mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
> +     if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
> +             msr = mfmsr(); /* MSR may have been updated */
>
>       switch_pmu_to_guest(vcpu, &host_os_sprs);
>
> @@ -4285,17 +4312,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
> u64 time_limit,
>
>       switch_pmu_to_host(vcpu, &host_os_sprs);
>
> -     store_spr_state(vcpu);
> -
> -     store_fp_state(&vcpu->arch.fp);
> -#ifdef CONFIG_ALTIVEC
> -     store_vr_state(&vcpu->arch.vr);
> -#endif
> -     vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
> -
> -     if (cpu_has_feature(CPU_FTR_TM) ||
> -         cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
> -             kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
> +     store_vcpu_state(vcpu);
>
>       vcpu_vpa_increment_dispatch(vcpu);

Reply via email to