Re: [PATCH v3 4/9] KVM: VMX: Configure runtime hooks using vmx_x86_ops

2020-03-23 Thread Paolo Bonzini
On 23/03/20 13:27, Vitaly Kuznetsov wrote:
>> -kvm_x86_ops->check_nested_events = vmx_check_nested_events;
>> -kvm_x86_ops->get_nested_state = vmx_get_nested_state;
>> -kvm_x86_ops->set_nested_state = vmx_set_nested_state;
>> -kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages;
>> -kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
>> -kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
>> +ops->check_nested_events = vmx_check_nested_events;
>> +ops->get_nested_state = vmx_get_nested_state;
>> +ops->set_nested_state = vmx_set_nested_state;
>> +ops->get_vmcs12_pages = nested_get_vmcs12_pages;
>> +ops->nested_enable_evmcs = nested_enable_evmcs;
>> +ops->nested_get_evmcs_version = nested_get_evmcs_version;
> 
> A lazy guy like me would appreciate 'ops' -> 'vmx_x86_ops' rename as it
> would make 'git grep vmx_x86_ops' output more complete.
> 

I would prefer even more a kvm_x86_ops.nested struct but I would be okay
with a separate patch.

Paolo

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v3 4/9] KVM: VMX: Configure runtime hooks using vmx_x86_ops

2020-03-23 Thread Sean Christopherson
On Mon, Mar 23, 2020 at 01:27:28PM +0100, Vitaly Kuznetsov wrote:
> Sean Christopherson  writes:
> 
> > Configure VMX's runtime hooks by modifying vmx_x86_ops directly instead
> > of using the global kvm_x86_ops.  This sets the stage for waiting until
> > after ->hardware_setup() to set kvm_x86_ops with the vendor's
> > implementation.
> >
> > Signed-off-by: Sean Christopherson 
> > ---
> >  arch/x86/kvm/vmx/nested.c | 15 ---
> >  arch/x86/kvm/vmx/nested.h |  3 ++-
> >  arch/x86/kvm/vmx/vmx.c| 27 ++-
> >  3 files changed, 24 insertions(+), 21 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> > index 4ff859c99946..87fea22c3799 100644
> > --- a/arch/x86/kvm/vmx/nested.c
> > +++ b/arch/x86/kvm/vmx/nested.c
> > @@ -6241,7 +6241,8 @@ void nested_vmx_hardware_unsetup(void)
> > }
> >  }
> >  
> > -__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct 
> > kvm_vcpu *))
> > +__init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
> > +int (*exit_handlers[])(struct kvm_vcpu *))
> >  {
> > int i;
> >  
> > @@ -6277,12 +6278,12 @@ __init int nested_vmx_hardware_setup(int 
> > (*exit_handlers[])(struct kvm_vcpu *))
> > exit_handlers[EXIT_REASON_INVVPID]  = handle_invvpid;
> > exit_handlers[EXIT_REASON_VMFUNC]   = handle_vmfunc;
> >  
> > -   kvm_x86_ops->check_nested_events = vmx_check_nested_events;
> > -   kvm_x86_ops->get_nested_state = vmx_get_nested_state;
> > -   kvm_x86_ops->set_nested_state = vmx_set_nested_state;
> > -   kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages;
> > -   kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
> > -   kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
> > +   ops->check_nested_events = vmx_check_nested_events;
> > +   ops->get_nested_state = vmx_get_nested_state;
> > +   ops->set_nested_state = vmx_set_nested_state;
> > +   ops->get_vmcs12_pages = nested_get_vmcs12_pages;
> > +   ops->nested_enable_evmcs = nested_enable_evmcs;
> > +   ops->nested_get_evmcs_version = nested_get_evmcs_version;
> 
> 
> A lazy guy like me would appreciate 'ops' -> 'vmx_x86_ops' rename as it
> would make 'git grep vmx_x86_ops' output more complete.

Ah, didn't think about that, obviously.
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH v3 4/9] KVM: VMX: Configure runtime hooks using vmx_x86_ops

2020-03-23 Thread Vitaly Kuznetsov
Sean Christopherson  writes:

> Configure VMX's runtime hooks by modifying vmx_x86_ops directly instead
> of using the global kvm_x86_ops.  This sets the stage for waiting until
> after ->hardware_setup() to set kvm_x86_ops with the vendor's
> implementation.
>
> Signed-off-by: Sean Christopherson 
> ---
>  arch/x86/kvm/vmx/nested.c | 15 ---
>  arch/x86/kvm/vmx/nested.h |  3 ++-
>  arch/x86/kvm/vmx/vmx.c| 27 ++-
>  3 files changed, 24 insertions(+), 21 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 4ff859c99946..87fea22c3799 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -6241,7 +6241,8 @@ void nested_vmx_hardware_unsetup(void)
>   }
>  }
>  
> -__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu 
> *))
> +__init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
> +  int (*exit_handlers[])(struct kvm_vcpu *))
>  {
>   int i;
>  
> @@ -6277,12 +6278,12 @@ __init int nested_vmx_hardware_setup(int 
> (*exit_handlers[])(struct kvm_vcpu *))
>   exit_handlers[EXIT_REASON_INVVPID]  = handle_invvpid;
>   exit_handlers[EXIT_REASON_VMFUNC]   = handle_vmfunc;
>  
> - kvm_x86_ops->check_nested_events = vmx_check_nested_events;
> - kvm_x86_ops->get_nested_state = vmx_get_nested_state;
> - kvm_x86_ops->set_nested_state = vmx_set_nested_state;
> - kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages;
> - kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
> - kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
> + ops->check_nested_events = vmx_check_nested_events;
> + ops->get_nested_state = vmx_get_nested_state;
> + ops->set_nested_state = vmx_set_nested_state;
> + ops->get_vmcs12_pages = nested_get_vmcs12_pages;
> + ops->nested_enable_evmcs = nested_enable_evmcs;
> + ops->nested_get_evmcs_version = nested_get_evmcs_version;


A lazy guy like me would appreciate 'ops' -> 'vmx_x86_ops' rename as it
would make 'git grep vmx_x86_ops' output more complete.

>  
>   return 0;
>  }
> diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
> index f70968b76d33..ac56aefa49e3 100644
> --- a/arch/x86/kvm/vmx/nested.h
> +++ b/arch/x86/kvm/vmx/nested.h
> @@ -19,7 +19,8 @@ enum nvmx_vmentry_status {
>  void vmx_leave_nested(struct kvm_vcpu *vcpu);
>  void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
>  void nested_vmx_hardware_unsetup(void);
> -__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu 
> *));
> +__init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
> +  int (*exit_handlers[])(struct kvm_vcpu *));
>  void nested_vmx_set_vmcs_shadowing_bitmap(void);
>  void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
>  enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu 
> *vcpu,
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 82dab775d520..cfa9093bdc06 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7860,16 +7860,16 @@ static __init int hardware_setup(void)
>* using the APIC_ACCESS_ADDR VMCS field.
>*/
>   if (!flexpriority_enabled)
> - kvm_x86_ops->set_apic_access_page_addr = NULL;
> + vmx_x86_ops.set_apic_access_page_addr = NULL;
>  
>   if (!cpu_has_vmx_tpr_shadow())
> - kvm_x86_ops->update_cr8_intercept = NULL;
> + vmx_x86_ops.update_cr8_intercept = NULL;
>  
>  #if IS_ENABLED(CONFIG_HYPERV)
>   if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
>   && enable_ept) {
> - kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb;
> - kvm_x86_ops->tlb_remote_flush_with_range =
> + vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb;
> + vmx_x86_ops.tlb_remote_flush_with_range =
>   hv_remote_flush_tlb_with_range;
>   }
>  #endif
> @@ -7884,7 +7884,7 @@ static __init int hardware_setup(void)
>  
>   if (!cpu_has_vmx_apicv()) {
>   enable_apicv = 0;
> - kvm_x86_ops->sync_pir_to_irr = NULL;
> + vmx_x86_ops.sync_pir_to_irr = NULL;
>   }
>  
>   if (cpu_has_vmx_tsc_scaling()) {
> @@ -7916,10 +7916,10 @@ static __init int hardware_setup(void)
>   enable_pml = 0;
>  
>   if (!enable_pml) {
> - kvm_x86_ops->slot_enable_log_dirty = NULL;
> - kvm_x86_ops->slot_disable_log_dirty = NULL;
> - kvm_x86_ops->flush_log_dirty = NULL;
> - kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
> + vmx_x86_ops.slot_enable_log_dirty = NULL;
> + vmx_x86_ops.slot_disable_log_dirty = NULL;
> + vmx_x86_ops.flush_log_dirty = NULL;
> + vmx_x86_ops.enable_log_dirty_pt_masked = NULL;
>   }
>  
>   i

[PATCH v3 4/9] KVM: VMX: Configure runtime hooks using vmx_x86_ops

2020-03-21 Thread Sean Christopherson
Configure VMX's runtime hooks by modifying vmx_x86_ops directly instead
of using the global kvm_x86_ops.  This sets the stage for waiting until
after ->hardware_setup() to set kvm_x86_ops with the vendor's
implementation.

Signed-off-by: Sean Christopherson 
---
 arch/x86/kvm/vmx/nested.c | 15 ---
 arch/x86/kvm/vmx/nested.h |  3 ++-
 arch/x86/kvm/vmx/vmx.c| 27 ++-
 3 files changed, 24 insertions(+), 21 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 4ff859c99946..87fea22c3799 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -6241,7 +6241,8 @@ void nested_vmx_hardware_unsetup(void)
}
 }
 
-__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
+__init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
+int (*exit_handlers[])(struct kvm_vcpu *))
 {
int i;
 
@@ -6277,12 +6278,12 @@ __init int nested_vmx_hardware_setup(int 
(*exit_handlers[])(struct kvm_vcpu *))
exit_handlers[EXIT_REASON_INVVPID]  = handle_invvpid;
exit_handlers[EXIT_REASON_VMFUNC]   = handle_vmfunc;
 
-   kvm_x86_ops->check_nested_events = vmx_check_nested_events;
-   kvm_x86_ops->get_nested_state = vmx_get_nested_state;
-   kvm_x86_ops->set_nested_state = vmx_set_nested_state;
-   kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages;
-   kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
-   kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
+   ops->check_nested_events = vmx_check_nested_events;
+   ops->get_nested_state = vmx_get_nested_state;
+   ops->set_nested_state = vmx_set_nested_state;
+   ops->get_vmcs12_pages = nested_get_vmcs12_pages;
+   ops->nested_enable_evmcs = nested_enable_evmcs;
+   ops->nested_get_evmcs_version = nested_get_evmcs_version;
 
return 0;
 }
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index f70968b76d33..ac56aefa49e3 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -19,7 +19,8 @@ enum nvmx_vmentry_status {
 void vmx_leave_nested(struct kvm_vcpu *vcpu);
 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
 void nested_vmx_hardware_unsetup(void);
-__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu 
*));
+__init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops,
+int (*exit_handlers[])(struct kvm_vcpu *));
 void nested_vmx_set_vmcs_shadowing_bitmap(void);
 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 82dab775d520..cfa9093bdc06 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7860,16 +7860,16 @@ static __init int hardware_setup(void)
 * using the APIC_ACCESS_ADDR VMCS field.
 */
if (!flexpriority_enabled)
-   kvm_x86_ops->set_apic_access_page_addr = NULL;
+   vmx_x86_ops.set_apic_access_page_addr = NULL;
 
if (!cpu_has_vmx_tpr_shadow())
-   kvm_x86_ops->update_cr8_intercept = NULL;
+   vmx_x86_ops.update_cr8_intercept = NULL;
 
 #if IS_ENABLED(CONFIG_HYPERV)
if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
&& enable_ept) {
-   kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb;
-   kvm_x86_ops->tlb_remote_flush_with_range =
+   vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb;
+   vmx_x86_ops.tlb_remote_flush_with_range =
hv_remote_flush_tlb_with_range;
}
 #endif
@@ -7884,7 +7884,7 @@ static __init int hardware_setup(void)
 
if (!cpu_has_vmx_apicv()) {
enable_apicv = 0;
-   kvm_x86_ops->sync_pir_to_irr = NULL;
+   vmx_x86_ops.sync_pir_to_irr = NULL;
}
 
if (cpu_has_vmx_tsc_scaling()) {
@@ -7916,10 +7916,10 @@ static __init int hardware_setup(void)
enable_pml = 0;
 
if (!enable_pml) {
-   kvm_x86_ops->slot_enable_log_dirty = NULL;
-   kvm_x86_ops->slot_disable_log_dirty = NULL;
-   kvm_x86_ops->flush_log_dirty = NULL;
-   kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
+   vmx_x86_ops.slot_enable_log_dirty = NULL;
+   vmx_x86_ops.slot_disable_log_dirty = NULL;
+   vmx_x86_ops.flush_log_dirty = NULL;
+   vmx_x86_ops.enable_log_dirty_pt_masked = NULL;
}
 
if (!cpu_has_vmx_preemption_timer())
@@ -7947,9 +7947,9 @@ static __init int hardware_setup(void)
}
 
if (!enable_preemption_timer) {
-   kvm_x86_ops->set_hv_timer = NULL;
-   kvm_x86_ops->cancel_hv_timer = NULL;
-   kvm_x86_ops