On 20/09/19 23:24, Andrea Arcangeli wrote:
> diff --git a/arch/x86/kvm/svm_ops.c b/arch/x86/kvm/svm_ops.c
> new file mode 100644
> index ..2aaabda92179
> --- /dev/null
> +++ b/arch/x86/kvm/svm_ops.c
> @@ -0,0 +1,672 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * arch/x86/kvm/svm_ops.c
> + *
> + * Copyright 2019 Red Hat, Inc.
> + */
> +
> +int kvm_x86_ops_cpu_has_kvm_support(void)
> +{
> + return has_svm();
> +}
Can you just rename all the functions in vmx/ and svm.c, instead of
adding forwarders?
Thanks,
Paolo
> +int kvm_x86_ops_disabled_by_bios(void)
> +{
> + return is_disabled();
> +}
> +
> +int kvm_x86_ops_hardware_enable(void)
> +{
> + return svm_hardware_enable();
> +}
> +
> +void kvm_x86_ops_hardware_disable(void)
> +{
> + svm_hardware_disable();
> +}
> +
> +__init int kvm_x86_ops_check_processor_compatibility(void)
> +{
> + return svm_check_processor_compat();
> +}
> +
> +__init int kvm_x86_ops_hardware_setup(void)
> +{
> + return svm_hardware_setup();
> +}
> +
> +void kvm_x86_ops_hardware_unsetup(void)
> +{
> + svm_hardware_unsetup();
> +}
> +
> +bool kvm_x86_ops_cpu_has_accelerated_tpr(void)
> +{
> + return svm_cpu_has_accelerated_tpr();
> +}
> +
> +bool kvm_x86_ops_has_emulated_msr(int index)
> +{
> + return svm_has_emulated_msr(index);
> +}
> +
> +void kvm_x86_ops_cpuid_update(struct kvm_vcpu *vcpu)
> +{
> + svm_cpuid_update(vcpu);
> +}
> +
> +struct kvm *kvm_x86_ops_vm_alloc(void)
> +{
> + return svm_vm_alloc();
> +}
> +
> +void kvm_x86_ops_vm_free(struct kvm *kvm)
> +{
> + svm_vm_free(kvm);
> +}
> +
> +int kvm_x86_ops_vm_init(struct kvm *kvm)
> +{
> + return avic_vm_init(kvm);
> +}
> +
> +void kvm_x86_ops_vm_destroy(struct kvm *kvm)
> +{
> + svm_vm_destroy(kvm);
> +}
> +
> +struct kvm_vcpu *kvm_x86_ops_vcpu_create(struct kvm *kvm, unsigned id)
> +{
> + return svm_create_vcpu(kvm, id);
> +}
> +
> +void kvm_x86_ops_vcpu_free(struct kvm_vcpu *vcpu)
> +{
> + svm_free_vcpu(vcpu);
> +}
> +
> +void kvm_x86_ops_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
> +{
> + svm_vcpu_reset(vcpu, init_event);
> +}
> +
> +void kvm_x86_ops_prepare_guest_switch(struct kvm_vcpu *vcpu)
> +{
> + svm_prepare_guest_switch(vcpu);
> +}
> +
> +void kvm_x86_ops_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> +{
> + svm_vcpu_load(vcpu, cpu);
> +}
> +
> +void kvm_x86_ops_vcpu_put(struct kvm_vcpu *vcpu)
> +{
> + svm_vcpu_put(vcpu);
> +}
> +
> +void kvm_x86_ops_update_bp_intercept(struct kvm_vcpu *vcpu)
> +{
> + update_bp_intercept(vcpu);
> +}
> +
> +int kvm_x86_ops_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
> +{
> + return svm_get_msr(vcpu, msr);
> +}
> +
> +int kvm_x86_ops_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
> +{
> + return svm_set_msr(vcpu, msr);
> +}
> +
> +u64 kvm_x86_ops_get_segment_base(struct kvm_vcpu *vcpu, int seg)
> +{
> + return svm_get_segment_base(vcpu, seg);
> +}
> +
> +void kvm_x86_ops_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
> + int seg)
> +{
> + svm_get_segment(vcpu, var, seg);
> +}
> +
> +int kvm_x86_ops_get_cpl(struct kvm_vcpu *vcpu)
> +{
> + return svm_get_cpl(vcpu);
> +}
> +
> +void kvm_x86_ops_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
> + int seg)
> +{
> + svm_set_segment(vcpu, var, seg);
> +}
> +
> +void kvm_x86_ops_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
> +{
> + kvm_get_cs_db_l_bits(vcpu, db, l);
> +}
> +
> +void kvm_x86_ops_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
> +{
> + svm_decache_cr0_guest_bits(vcpu);
> +}
> +
> +void kvm_x86_ops_decache_cr3(struct kvm_vcpu *vcpu)
> +{
> + svm_decache_cr3(vcpu);
> +}
> +
> +void kvm_x86_ops_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
> +{
> + svm_decache_cr4_guest_bits(vcpu);
> +}
> +
> +void kvm_x86_ops_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
> +{
> + svm_set_cr0(vcpu, cr0);
> +}
> +
> +void kvm_x86_ops_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
> +{
> + svm_set_cr3(vcpu, cr3);
> +}
> +
> +int kvm_x86_ops_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
> +{
> + return svm_set_cr4(vcpu, cr4);
> +}
> +
> +void kvm_x86_ops_set_efer(struct kvm_vcpu *vcpu, u64 efer)
> +{
> + svm_set_efer(vcpu, efer);
> +}
> +
> +void kvm_x86_ops_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> + svm_get_idt(vcpu, dt);
> +}
> +
> +void kvm_x86_ops_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> + svm_set_idt(vcpu, dt);
> +}
> +
> +void kvm_x86_ops_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> + svm_get_gdt(vcpu, dt);
> +}
> +
> +void kvm_x86_ops_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> + svm_set_gdt(vcpu, dt);
> +}
> +
> +u64 kvm_x86_ops_get_dr6(struct kvm_vcpu *vcpu)
> +{
> + return svm_get_dr6(vcpu);
> +}
> +
> +void kvm_x86_ops_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
> +{
> +