On 10/05/2016 17:43, Paolo Bonzini wrote:
> 
> 
> On 04/05/2016 21:09, Suravee Suthikulpanit wrote:
>> From: Suravee Suthikulpanit <suravee.suthikulpa...@amd.com>
>>
>> When a vcpu is loaded/unloaded to a physical core, we need to update
>> host physical APIC ID information in the Physical APIC-ID table
>> accordingly.
>>
>> Also, when vCPU is blocking/un-blocking (due to halt instruction),
>> we need to make sure that the is-running bit in set accordingly in the
>> physical APIC-ID table.
>>
>> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpa...@amd.com>
>> Reviewed-by: Radim Krčmář <rkrc...@redhat.com>
>> ---
> 
> I think this is the only patch that needs a little more work, because 
> there are a bunch of unused return values that really should be 
> WARN_ON.  In addition the load and put cases are different enough that 
> they should be separate functions.
> 
> Can you please test this?
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index f3dbf1d33a61..3168d6c8d24f 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -184,7 +184,7 @@ struct vcpu_svm {
>       u32 ldr_reg;
>       struct page *avic_backing_page;
>       u64 *avic_physical_id_cache;
> -     bool avic_is_blocking;
> +     bool avic_is_running;
>  };
>  
>  #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
> @@ -1321,18 +1321,20 @@ free_avic:
>  /**
>   * This function is called during VCPU halt/unhalt.
>   */
> -static int avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
> +static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
>  {
>       u64 entry;
>       int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu);
>       struct vcpu_svm *svm = to_svm(vcpu);
>  
>       if (!kvm_vcpu_apicv_active(vcpu))
> -             return 0;
> +             return;
> +
> +     svm->avic_is_running = is_run;
>  
>       /* ID = 0xff (broadcast), ID > 0xff (reserved) */
> -     if (h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT)
> -             return -EINVAL;
> +     if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
> +             return;
>  
>       entry = READ_ONCE(*(svm->avic_physical_id_cache));
>       WARN_ON(is_run == !!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK));
> @@ -1341,36 +1343,45 @@ static int avic_set_running(struct kvm_vcpu *vcpu, 
> bool is_run)
>       if (is_run)
>               entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
>       WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
> -
> -     return 0;
>  }
>  
> -static int avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool is_load)
> +static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  {
>       u64 entry;
> +     /* ID = 0xff (broadcast), ID > 0xff (reserved) */
>       int h_physical_id = __default_cpu_present_to_apicid(cpu);
>       struct vcpu_svm *svm = to_svm(vcpu);
>  
>       if (!kvm_vcpu_apicv_active(vcpu))
> -             return 0;
> +             return;
>  
> -     /* ID = 0xff (broadcast), ID > 0xff (reserved) */
> -     if (h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT)
> -             return -EINVAL;
> +     if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
> +             return;
>  
>       entry = READ_ONCE(*(svm->avic_physical_id_cache));
> -     WARN_ON(is_load && (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK));
> +     WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
> +
> +     entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
> +     entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
>  
>       entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
> -     if (is_load) {
> -             entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
> -             entry |= (h_physical_id & 
> AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
> -             if (!svm->avic_is_blocking)
> -                     entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
> -     }
> +     if (svm->avic_is_running)
> +             entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
> +
>       WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
> +}
>  
> -     return 0;
> +static void avic_vcpu_put(struct kvm_vcpu *vcpu)
> +{
> +     u64 entry;
> +     struct vcpu_svm *svm = to_svm(vcpu);
> +
> +     if (!kvm_vcpu_apicv_active(vcpu))
> +             return;
> +
> +     entry = READ_ONCE(*(svm->avic_physical_id_cache));
> +     entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
> +     WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
>  }
>  
>  static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
> @@ -1436,10 +1447,10 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm 
> *kvm, unsigned int id)
>                       goto free_page4;
>       }
>  
> -     /* We initialize this flag to one to make sure that the is_running
> +     /* We initialize this flag to true to make sure that the is_running
>        * bit would be set the first time the vcpu is loaded.
>        */
> -     svm->avic_is_blocking = false;
> +     svm->avic_is_running = true;
>  
>       svm->nested.hsave = page_address(hsave_page);
>  
> @@ -1518,7 +1529,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int 
> cpu)
>       if (static_cpu_has(X86_FEATURE_RDTSCP))
>               wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
>  
> -     avic_vcpu_load(vcpu, cpu, true);
> +     avic_vcpu_load(vcpu, cpu);
>  }
>  
>  static void svm_vcpu_put(struct kvm_vcpu *vcpu)
> @@ -1526,7 +1537,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
>       struct vcpu_svm *svm = to_svm(vcpu);
>       int i;
>  
> -     avic_vcpu_load(vcpu, 0, false);
> +     avic_vcpu_put(vcpu);
>  
>       ++vcpu->stat.host_state_reload;
>       kvm_load_ldt(svm->host.ldt);
> @@ -1545,13 +1556,11 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
>  
>  static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
>  {
> -     to_svm(vcpu)->avic_is_blocking = true;
>       avic_set_running(vcpu, false);
>  }
>  
>  static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
>  {
> -     to_svm(vcpu)->avic_is_blocking = false;
>       avic_set_running(vcpu, true);
>  }
>  
> 
> The two functions now have the same signature as their callers,
> svm_vcpu_load and svm_vcpu_put.

Radim, does this look sane?  I plan to include it in my pull request
(I'm running AMD autotest now and it passed the first few tests).

Thanks,

Paolo

Reply via email to