Use static calls to improve kvm_x86_ops performance. Introduce the
definitions that will be used by a subsequent patch to actualize the
savings.

Note that all kvm_x86_ops are covered here except for 'pmu_ops' and
'nested ops'. I think they can be covered by static calls in a simlilar
manner, but were omitted from this series to reduce scope and because
I don't think they have as large of a performance impact.

Cc: Paolo Bonzini <pbonz...@redhat.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Andrea Arcangeli <aarca...@redhat.com>
Signed-off-by: Jason Baron <jba...@akamai.com>
---
 arch/x86/include/asm/kvm_host.h | 65 +++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/x86.c              |  5 ++++
 2 files changed, 70 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3ab7b46..e947522 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1087,6 +1087,65 @@ static inline u16 kvm_lapic_irq_dest_mode(bool 
dest_mode_logical)
        return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
 }
 
+/*
+ * static calls cover all kvm_x86_ops except for functions under pmu_ops and
+ * nested_ops.
+ */
+#define FOREACH_KVM_X86_OPS(F) \
+       F(hardware_enable); F(hardware_disable); F(hardware_unsetup);          \
+       F(cpu_has_accelerated_tpr); F(has_emulated_msr);                       \
+       F(vcpu_after_set_cpuid); F(vm_init); F(vm_destroy); F(vcpu_create);    \
+       F(vcpu_free); F(vcpu_reset); F(prepare_guest_switch); F(vcpu_load);    \
+       F(vcpu_put); F(update_exception_bitmap); F(get_msr); F(set_msr);       \
+       F(get_segment_base); F(get_segment); F(get_cpl); F(set_segment);       \
+       F(get_cs_db_l_bits); F(set_cr0); F(is_valid_cr4); F(set_cr4);          \
+       F(set_efer); F(get_idt); F(set_idt); F(get_gdt); F(set_gdt);           \
+       F(sync_dirty_debug_regs); F(set_dr7); F(cache_reg); F(get_rflags);     \
+       F(set_rflags); F(tlb_flush_all); F(tlb_flush_current);                 \
+       F(tlb_remote_flush); F(tlb_remote_flush_with_range); F(tlb_flush_gva); \
+       F(tlb_flush_guest); F(run); F(handle_exit);                            \
+       F(skip_emulated_instruction); F(update_emulated_instruction);          \
+       F(set_interrupt_shadow); F(get_interrupt_shadow); F(patch_hypercall);  \
+       F(set_irq); F(set_nmi); F(queue_exception); F(cancel_injection);       \
+       F(interrupt_allowed); F(nmi_allowed); F(get_nmi_mask); F(set_nmi_mask);\
+       F(enable_nmi_window); F(enable_irq_window); F(update_cr8_intercept);   \
+       F(check_apicv_inhibit_reasons); F(pre_update_apicv_exec_ctrl);         \
+       F(refresh_apicv_exec_ctrl); F(hwapic_irr_update); F(hwapic_isr_update);\
+       F(guest_apic_has_interrupt); F(load_eoi_exitmap);                      \
+       F(set_virtual_apic_mode); F(set_apic_access_page_addr);                \
+       F(deliver_posted_interrupt); F(sync_pir_to_irr); F(set_tss_addr);      \
+       F(set_identity_map_addr); F(get_mt_mask); F(load_mmu_pgd);             \
+       F(has_wbinvd_exit); F(write_l1_tsc_offset); F(get_exit_info);          \
+       F(check_intercept); F(handle_exit_irqoff); F(request_immediate_exit);  \
+       F(sched_in); F(slot_enable_log_dirty); F(slot_disable_log_dirty);      \
+       F(flush_log_dirty); F(enable_log_dirty_pt_masked);                     \
+       F(cpu_dirty_log_size); F(pre_block); F(post_block); F(vcpu_blocking);  \
+       F(vcpu_unblocking); F(update_pi_irte); F(apicv_post_state_restore);    \
+       F(dy_apicv_has_pending_interrupt); F(set_hv_timer); F(cancel_hv_timer);\
+       F(setup_mce); F(smi_allowed); F(pre_enter_smm); F(pre_leave_smm);      \
+       F(enable_smi_window); F(mem_enc_op); F(mem_enc_reg_region);            \
+       F(mem_enc_unreg_region); F(get_msr_feature);                           \
+       F(can_emulate_instruction); F(apic_init_signal_blocked);               \
+       F(enable_direct_tlbflush); F(migrate_timers); F(msr_filter_changed);   \
+       F(complete_emulated_msr)
+
+#define DEFINE_KVM_OPS_STATIC_CALL(func)       \
+       DEFINE_STATIC_CALL_NULL(kvm_x86_##func, \
+                               *(((struct kvm_x86_ops *)0)->func))
+#define DEFINE_KVM_OPS_STATIC_CALLS() \
+       FOREACH_KVM_X86_OPS(DEFINE_KVM_OPS_STATIC_CALL)
+
+#define DECLARE_KVM_OPS_STATIC_CALL(func)      \
+       DECLARE_STATIC_CALL(kvm_x86_##func,     \
+                           *(((struct kvm_x86_ops *)0)->func))
+#define DECLARE_KVM_OPS_STATIC_CALLS()         \
+       FOREACH_KVM_X86_OPS(DECLARE_KVM_OPS_STATIC_CALL)
+
+#define KVM_OPS_STATIC_CALL_UPDATE(func)       \
+       static_call_update(kvm_x86_##func, kvm_x86_ops.func)
+#define KVM_OPS_STATIC_CALL_UPDATES()          \
+       FOREACH_KVM_X86_OPS(KVM_OPS_STATIC_CALL_UPDATE)
+
 struct kvm_x86_ops {
        int (*hardware_enable)(void);
        void (*hardware_disable)(void);
@@ -1326,6 +1385,12 @@ extern u64 __read_mostly host_efer;
 extern bool __read_mostly allow_smaller_maxphyaddr;
 extern struct kvm_x86_ops kvm_x86_ops;
 
+DECLARE_KVM_OPS_STATIC_CALLS();
+static inline void kvm_ops_static_call_update(void)
+{
+       KVM_OPS_STATIC_CALL_UPDATES();
+}
+
 #define __KVM_HAVE_ARCH_VM_ALLOC
 static inline struct kvm *kvm_arch_alloc_vm(void)
 {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3f7c1fc..6ae32ab 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -113,6 +113,11 @@ static int sync_regs(struct kvm_vcpu *vcpu);
 struct kvm_x86_ops kvm_x86_ops __read_mostly;
 EXPORT_SYMBOL_GPL(kvm_x86_ops);
 
+DEFINE_KVM_OPS_STATIC_CALLS();
+EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits);
+EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg);
+EXPORT_STATIC_CALL_GPL(kvm_x86_tlb_flush_current);
+
 static bool __read_mostly ignore_msrs = 0;
 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
 
-- 
2.7.4

Reply via email to