Gleb Natapov wrote on 2013-01-10:
> On Thu, Jan 10, 2013 at 03:26:08PM +0800, Yang Zhang wrote:
>> From: Yang Zhang <yang.z.zh...@intel.com>
>> 
>> Virtual interrupt delivery avoids KVM to inject vAPIC interrupts
>> manually, which is fully taken care of by the hardware. This needs
>> some special awareness into existing interrupr injection path:
>> 
>> - for pending interrupt, instead of direct injection, we may need
>>   update architecture specific indicators before resuming to guest.
>> - A pending interrupt, which is masked by ISR, should be also
>>   considered in above update action, since hardware will decide
>>   when to inject it at right time. Current has_interrupt and
>>   get_interrupt only returns a valid vector from injection p.o.v.
>> Signed-off-by: Kevin Tian <kevin.t...@intel.com>
>> Signed-off-by: Yang Zhang <yang.z.zh...@intel.com>
>> ---
>>  arch/x86/include/asm/kvm_host.h |    5 + arch/x86/include/asm/vmx.h   
>>    |   11 +++ arch/x86/kvm/irq.c              |   56 +++++++++++-
>>  arch/x86/kvm/lapic.c            |   72 +++++++++------
>>  arch/x86/kvm/lapic.h            |   23 +++++ arch/x86/kvm/svm.c       
>>        |   18 ++++ arch/x86/kvm/vmx.c              |  191
>>  +++++++++++++++++++++++++++++++++++++-- arch/x86/kvm/x86.c            
>>   |   14 +++- include/linux/kvm_host.h        |    3 +
>>  virt/kvm/ioapic.c               |   18 ++++ virt/kvm/ioapic.h         
>>       |    4 + virt/kvm/irq_comm.c             |   22 +++++
>>  virt/kvm/kvm_main.c             |    5 + 13 files changed, 399
>>  insertions(+), 43 deletions(-)
>> diff --git a/arch/x86/include/asm/kvm_host.h
>> b/arch/x86/include/asm/kvm_host.h index 572a562..f471856 100644 ---
>> a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -697,6 +697,10 @@ struct kvm_x86_ops {
>>      void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
>>      void (*enable_irq_window)(struct kvm_vcpu *vcpu);
>>      void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
>> +    int (*has_virtual_interrupt_delivery)(struct kvm_vcpu *vcpu);
>> +    void (*update_apic_irq)(struct kvm_vcpu *vcpu, int max_irr);
>> +    void (*update_eoi_exitmap)(struct kvm_vcpu *vcpu);
>> +    void (*set_svi)(int isr);
>>      void (*enable_virtual_x2apic_mode)(struct kvm_vcpu *vcpu);      void
>>  (*disable_virtual_x2apic_mode)(struct kvm_vcpu *vcpu);      int
>>  (*set_tss_addr)(struct kvm *kvm, unsigned int addr); @@ -993,6 +997,7
>>  @@ int kvm_age_hva(struct kvm *kvm, unsigned long hva); int
>>  kvm_test_age_hva(struct kvm *kvm, unsigned long hva); void
>>  kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int
>>  cpuid_maxphyaddr(struct kvm_vcpu *vcpu); +int
>>  kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int
>>  kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int
>>  kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); int
>>  kvm_cpu_get_interrupt(struct kvm_vcpu *v);
>> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
>> index 0a54df0..694586c 100644
>> --- a/arch/x86/include/asm/vmx.h
>> +++ b/arch/x86/include/asm/vmx.h
>> @@ -62,6 +62,7 @@
>>  #define EXIT_REASON_MCE_DURING_VMENTRY  41 #define
>>  EXIT_REASON_TPR_BELOW_THRESHOLD 43 #define EXIT_REASON_APIC_ACCESS    
>>      44 +#define EXIT_REASON_EOI_INDUCED         45 #define
>>  EXIT_REASON_EPT_VIOLATION       48 #define EXIT_REASON_EPT_MISCONFIG  
>>      49 #define EXIT_REASON_WBINVD              54 @@ -144,6 +145,7 @@
>>  #define SECONDARY_EXEC_WBINVD_EXITING               0x00000040 #define
>>  SECONDARY_EXEC_UNRESTRICTED_GUEST   0x00000080 #define
>>  SECONDARY_EXEC_APIC_REGISTER_VIRT       0x00000100 +#define
>>  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY    0x00000200 #define
>>  SECONDARY_EXEC_PAUSE_LOOP_EXITING   0x00000400 #define
>>  SECONDARY_EXEC_ENABLE_INVPCID               0x00001000
>> @@ -181,6 +183,7 @@ enum vmcs_field {
>>      GUEST_GS_SELECTOR               = 0x0000080a,   GUEST_LDTR_SELECTOR   
>>           = 0x0000080c,      GUEST_TR_SELECTOR               = 0x0000080e,
>>  +   GUEST_INTR_STATUS               = 0x00000810,   HOST_ES_SELECTOR     
>>            = 0x00000c00,     HOST_CS_SELECTOR                = 0x00000c02,
>>      HOST_SS_SELECTOR                = 0x00000c04, @@ -208,6 +211,14 @@
>>  enum vmcs_field {   APIC_ACCESS_ADDR_HIGH           = 0x00002015,   
>> EPT_POINTER  
>>                    = 0x0000201a,     EPT_POINTER_HIGH                =
>>  0x0000201b,
>> +    EOI_EXIT_BITMAP0                = 0x0000201c,
>> +    EOI_EXIT_BITMAP0_HIGH           = 0x0000201d,
>> +    EOI_EXIT_BITMAP1                = 0x0000201e,
>> +    EOI_EXIT_BITMAP1_HIGH           = 0x0000201f,
>> +    EOI_EXIT_BITMAP2                = 0x00002020,
>> +    EOI_EXIT_BITMAP2_HIGH           = 0x00002021,
>> +    EOI_EXIT_BITMAP3                = 0x00002022,
>> +    EOI_EXIT_BITMAP3_HIGH           = 0x00002023,
>>      GUEST_PHYSICAL_ADDRESS          = 0x00002400,
>>      GUEST_PHYSICAL_ADDRESS_HIGH     = 0x00002401,
>>      VMCS_LINK_POINTER               = 0x00002800,
>> diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
>> index b111aee..e113440 100644
>> --- a/arch/x86/kvm/irq.c
>> +++ b/arch/x86/kvm/irq.c
>> @@ -38,6 +38,38 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu
> *vcpu)
>>  EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
>>  
>>  /*
>> + * check if there is pending interrupt from
>> + * non-APIC source without intack.
>> + */
>> +static int kvm_cpu_has_extint(struct kvm_vcpu *v)
>> +{
>> +    if (kvm_apic_accept_pic_intr(v))
>> +            return pic_irqchip(v->kvm)->output;     /* PIC */
>> +    else
>> +            return 0;
>> +}
>> +
>> +/*
>> + * check if there is injectable interrupt:
>> + * when virtual interrupt delivery enabled,
>> + * interrupt from apic will handled by hardware,
>> + * we don't need to check it here.
>> + */
>> +int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
>> +{
>> +    if (!irqchip_in_kernel(v->kvm))
>> +            return v->arch.interrupt.pending;
>> +
>> +    if (kvm_cpu_has_extint(v))
>> +            return 1;
>> +
>> +    if (kvm_apic_vid_enabled(v))
>> +            return 0;
>> +
>> +    return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
>> +}
>> +
>> +/*
>>   * check if there is pending interrupt without
>>   * intack.
>>   */
>> @@ -46,27 +78,41 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
>>      if (!irqchip_in_kernel(v->kvm))
>>              return v->arch.interrupt.pending;
>> -    if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output)
>> -            return pic_irqchip(v->kvm)->output;     /* PIC */
>> +    if (kvm_cpu_has_extint(v))
>> +            return 1;
>> 
>>      return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
>>  }
>>  EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
>>  
>>  /*
>> + * Read pending interrupt(from non-APIC source)
>> + * vector and intack.
>> + */
>> +static int kvm_cpu_get_extint(struct kvm_vcpu *v)
>> +{
>> +    if (kvm_cpu_has_extint(v))
>> +            return kvm_pic_read_irq(v->kvm); /* PIC */
>> +    return -1;
>> +}
>> +
>> +/*
>>   * Read pending interrupt vector and intack.
>>   */
>>  int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
>>  {
>> +    int vector;
>> +
>>      if (!irqchip_in_kernel(v->kvm))
>>              return v->arch.interrupt.nr;
>> -    if (kvm_apic_accept_pic_intr(v) && pic_irqchip(v->kvm)->output)
>> -            return kvm_pic_read_irq(v->kvm);        /* PIC */
>> +    vector = kvm_cpu_get_extint(v);
>> +
>> +    if (kvm_apic_vid_enabled(v) || vector != -1)
>> +            return vector;                  /* PIC */
>> 
>>      return kvm_get_apic_interrupt(v);       /* APIC */
>>  }
>> -EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
>> 
>>  void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
>>  {
>> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
>> index ec38906..d219f41 100644
>> --- a/arch/x86/kvm/lapic.c
>> +++ b/arch/x86/kvm/lapic.c
>> @@ -150,23 +150,6 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
>>      return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
>>  }
>> -static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
>> -{
>> -    u16 cid;
>> -    ldr >>= 32 - map->ldr_bits;
>> -    cid = (ldr >> map->cid_shift) & map->cid_mask;
>> -
>> -    BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
>> -
>> -    return cid;
>> -}
>> -
>> -static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
>> -{
>> -    ldr >>= (32 - map->ldr_bits);
>> -    return ldr & map->lid_mask;
>> -}
>> -
>>  static void recalculate_apic_map(struct kvm *kvm)
>>  {
>>      struct kvm_apic_map *new, *old = NULL;
>> @@ -236,12 +219,14 @@ static inline void kvm_apic_set_id(struct kvm_lapic
> *apic, u8 id)
>>  {   apic_set_reg(apic, APIC_ID, id << 24);
>>      recalculate_apic_map(apic->vcpu->kvm);
>>  +   ioapic_update_eoi_exitmap(apic->vcpu->kvm); }
>>  
>>  static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) {
>>      apic_set_reg(apic, APIC_LDR, id);
>>      recalculate_apic_map(apic->vcpu->kvm);
>>  +   ioapic_update_eoi_exitmap(apic->vcpu->kvm); }
>>  
>>  static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
>> @@ -345,6 +330,8 @@ static inline int apic_find_highest_irr(struct kvm_lapic
> *apic)
>>  {
>>      int result;
>> +    /* Note that irr_pending is just a hint. It will be always
>> +     * true with virtual interrupt delivery enabled. */
>>      if (!apic->irr_pending)
>>              return -1;
>> @@ -461,6 +448,8 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
>>  static inline int apic_find_highest_isr(struct kvm_lapic *apic)
>>  {
>>      int result;
>> +
>> +    /* Note that isr_count is always 1 with vid enabled*/
>>      if (!apic->isr_count)
>>              return -1;
>>      if (likely(apic->highest_isr_cache != -1))
>> @@ -740,6 +729,19 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1,
> struct kvm_vcpu *vcpu2)
>>      return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
>>  }
>> +static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
>> +{
>> +    if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
>> +        kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
>> +            int trigger_mode;
>> +            if (apic_test_vector(vector, apic->regs + APIC_TMR))
>> +                    trigger_mode = IOAPIC_LEVEL_TRIG;
>> +            else
>> +                    trigger_mode = IOAPIC_EDGE_TRIG;
>> +            kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
>> +    }
>> +}
>> +
>>  static int apic_set_eoi(struct kvm_lapic *apic) {   int vector =
>>  apic_find_highest_isr(apic); @@ -756,19 +758,26 @@ static int
>>  apic_set_eoi(struct kvm_lapic *apic)        apic_clear_isr(vector, apic);
>>      apic_update_ppr(apic);
>> -    if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
>> -        kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
>> -            int trigger_mode;
>> -            if (apic_test_vector(vector, apic->regs + APIC_TMR))
>> -                    trigger_mode = IOAPIC_LEVEL_TRIG;
>> -            else
>> -                    trigger_mode = IOAPIC_EDGE_TRIG;
>> -            kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
>> -    }
>> +    kvm_ioapic_send_eoi(apic, vector);
>>      kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
>>      return vector;
>>  }
>> +/*
>> + * this interface assumes a trap-like exit, which has already finished
>> + * desired side effect including vISR and vPPR update.
>> + */
>> +void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
>> +{
>> +    struct kvm_lapic *apic = vcpu->arch.apic;
>> +
>> +    trace_kvm_eoi(apic, vector);
>> +
>> +    kvm_ioapic_send_eoi(apic, vector);
>> +    kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
>> +}
>> +EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
>> +
>>  static void apic_send_ipi(struct kvm_lapic *apic)
>>  {
>>      u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR);
>> @@ -1071,6 +1080,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32
> reg, u32 val)
>>              if (!apic_x2apic_mode(apic)) {                  
>> apic_set_reg(apic, APIC_DFR, val |
>>  0x0FFFFFFF);                        recalculate_apic_map(apic->vcpu->kvm);
>>  +                   ioapic_update_eoi_exitmap(apic->vcpu->kvm);             
>> } else                  ret = 1;
>>              break;
>> @@ -1318,6 +1328,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64
> value)
>>              else                    
>> static_key_slow_inc(&apic_hw_disabled.key);
>>              recalculate_apic_map(vcpu->kvm);
>>  +           ioapic_update_eoi_exitmap(apic->vcpu->kvm);     }
>>  
>>      if (!kvm_vcpu_is_bsp(apic->vcpu)) @@ -1377,8 +1388,9 @@ void
>>  kvm_lapic_reset(struct kvm_vcpu *vcpu)              apic_set_reg(apic, 
>> APIC_ISR +
>>  0x10 * i, 0);               apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);     
>> }
>> -    apic->irr_pending = false;
>> -    apic->isr_count = 0;
>> +    apic->irr_pending = kvm_apic_vid_enabled(vcpu);
>> +    apic->isr_count = kvm_apic_vid_enabled(vcpu) ?
>> +                            1 : 0;
> Why not just "apic->isr_count = kvm_apic_vid_enabled(vcpu)"?
Ok. 

>>      apic->highest_isr_cache = -1;
>>      update_divide_count(apic);
>>      atomic_set(&apic->lapic_timer.pending, 0);
>> @@ -1593,8 +1605,10 @@ void kvm_apic_post_state_restore(struct kvm_vcpu
> *vcpu,
>>      update_divide_count(apic);
>>      start_apic_timer(apic);
>>      apic->irr_pending = true;
>> -    apic->isr_count = count_vectors(apic->regs + APIC_ISR);
>> +    apic->isr_count = kvm_apic_vid_enabled(vcpu) ?
>> +                            1 : count_vectors(apic->regs + APIC_ISR);
>>      apic->highest_isr_cache = -1;
>>  +   kvm_x86_ops->set_svi(apic_find_highest_isr(apic));
>>      kvm_make_request(KVM_REQ_EVENT, vcpu); }
>> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
>> index 9a8ee22..fed6538 100644
>> --- a/arch/x86/kvm/lapic.h
>> +++ b/arch/x86/kvm/lapic.h
>> @@ -65,6 +65,7 @@ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu
> *vcpu);
>>  void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
>>  
>>  void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
>> +void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
>> 
>>  void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
>>  void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
>> @@ -126,4 +127,26 @@ static inline int kvm_lapic_enabled(struct kvm_vcpu
> *vcpu)
>>      return kvm_apic_present(vcpu) &&
>>  kvm_apic_sw_enabled(vcpu->arch.apic); }
>> +static inline bool kvm_apic_vid_enabled(struct kvm_vcpu *vcpu)
>> +{
>> +    return kvm_x86_ops->has_virtual_interrupt_delivery(vcpu);
>> +}
>> +
>> +static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
>> +{
>> +    u16 cid;
>> +    ldr >>= 32 - map->ldr_bits;
>> +    cid = (ldr >> map->cid_shift) & map->cid_mask;
>> +
>> +    BUG_ON(cid >= ARRAY_SIZE(map->logical_map));
>> +
>> +    return cid;
>> +}
>> +
>> +static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
>> +{
>> +    ldr >>= (32 - map->ldr_bits);
>> +    return ldr & map->lid_mask;
>> +}
>> +
>>  #endif
>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>> index 0b82cb1..0ce6543 100644
>> --- a/arch/x86/kvm/svm.c
>> +++ b/arch/x86/kvm/svm.c
>> @@ -3576,6 +3576,21 @@ static void svm_enable_virtual_x2apic_mode(struct
> kvm_vcpu *vcpu)
>>      return;
>>  }
>> +static int svm_has_virtual_interrupt_delivery(struct kvm_vcpu *vcpu)
>> +{
>> +    return 0;
>> +}
>> +
>> +static void svm_update_eoi_exitmap(struct kvm_vcpu *vcpu)
>> +{
>> +    return;
>> +}
>> +
>> +static void svm_set_svi(int isr)
>> +{
>> +    return;
>> +}
>> +
>>  static int svm_nmi_allowed(struct kvm_vcpu *vcpu) {         struct vcpu_svm
>>  *svm = to_svm(vcpu); @@ -4296,6 +4311,9 @@ static struct kvm_x86_ops
>>  svm_x86_ops = {     .enable_irq_window = enable_irq_window,
>>      .update_cr8_intercept = update_cr8_intercept,
>>      .enable_virtual_x2apic_mode = svm_enable_virtual_x2apic_mode,
>> +    .has_virtual_interrupt_delivery = svm_has_virtual_interrupt_delivery,
>> +    .update_eoi_exitmap = svm_update_eoi_exitmap,
>> +    .set_svi = svm_set_svi,
>> 
>>      .set_tss_addr = svm_set_tss_addr,
>>      .get_tdp_level = get_npt_level,
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index b203ce7..990409a 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -434,6 +434,7 @@ struct vcpu_vmx {
>>      bool rdtscp_enabled;
>>  
>>      bool virtual_x2apic_enabled;
>> +    unsigned long eoi_exit_bitmap[4];
>> 
>>      /* Support for a guest hypervisor (nested VMX) */
>>      struct nested_vmx nested;
>> @@ -783,7 +784,8 @@ static inline bool cpu_has_vmx_apic_register_virt(void)
>> 
>>  static inline bool cpu_has_vmx_virtual_intr_delivery(void)
>>  {
>> -    return false;
>> +    return vmcs_config.cpu_based_2nd_exec_ctrl &
>> +            SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
>>  }
>>  
>>  static inline bool cpu_has_vmx_flexpriority(void)
>> @@ -2565,7 +2567,8 @@ static __init int setup_vmcs_config(struct
> vmcs_config *vmcs_conf)
>>                      SECONDARY_EXEC_PAUSE_LOOP_EXITING |
>>                      SECONDARY_EXEC_RDTSCP |
>>                      SECONDARY_EXEC_ENABLE_INVPCID |
>> -                    SECONDARY_EXEC_APIC_REGISTER_VIRT;
>> +                    SECONDARY_EXEC_APIC_REGISTER_VIRT |
>> +                    SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
>>              if (adjust_vmx_controls(min2, opt2,
>>                                      MSR_IA32_VMX_PROCBASED_CTLS2,
>>                                      &_cpu_based_2nd_exec_control) < 0)
>> @@ -2579,7 +2582,8 @@ static __init int setup_vmcs_config(struct
>> vmcs_config *vmcs_conf)
>> 
>>      if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
>>              _cpu_based_2nd_exec_control &= ~(
>> -                            SECONDARY_EXEC_APIC_REGISTER_VIRT);
>> +                            SECONDARY_EXEC_APIC_REGISTER_VIRT |
>> +                            SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
>> 
>>      if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {          
>> /*
>>  CR3 accesses and invlpg don't need to cause VM Exits when EPT @@
>>  -2778,9 +2782,15 @@ static __init int hardware_setup(void)  if
>>  (!cpu_has_vmx_ple())                ple_gap = 0;
>> -    if (!cpu_has_vmx_apic_register_virt())
>> +    if (!cpu_has_vmx_apic_register_virt() ||
>> +                            !cpu_has_vmx_virtual_intr_delivery())
>>              enable_apicv_reg_vid = 0;
>> +    if (enable_apicv_reg_vid)
>> +            kvm_x86_ops->update_cr8_intercept = NULL;
>> +    else
>> +            kvm_x86_ops->update_apic_irq = NULL;
>> +
>>      if (nested)
>>              nested_vmx_setup_ctls_msrs();
>> @@ -3961,7 +3971,8 @@ static u32 vmx_secondary_exec_control(struct
> vcpu_vmx *vmx)
>>      if (!ple_gap)
>>              exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
>>      if (!enable_apicv_reg_vid)
>> -            exec_control &= ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
>> +            exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
>> +                              SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
>>      exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;         return
>>  exec_control; } @@ -4007,6 +4018,15 @@ static int
>>  vmx_vcpu_setup(struct vcpu_vmx *vmx)
>>                              vmx_secondary_exec_control(vmx));       }
>> +    if (enable_apicv_reg_vid) {
>> +            vmcs_write64(EOI_EXIT_BITMAP0, 0);
>> +            vmcs_write64(EOI_EXIT_BITMAP1, 0);
>> +            vmcs_write64(EOI_EXIT_BITMAP2, 0);
>> +            vmcs_write64(EOI_EXIT_BITMAP3, 0);
>> +
>> +            vmcs_write16(GUEST_INTR_STATUS, 0);
>> +    }
>> +
>>      if (ple_gap) {
>>              vmcs_write32(PLE_GAP, ple_gap);
>>              vmcs_write32(PLE_WINDOW, ple_window);
>> @@ -4924,6 +4944,16 @@ static int handle_apic_access(struct kvm_vcpu
> *vcpu)
>>      return emulate_instruction(vcpu, 0) == EMULATE_DONE;
>>  }
>> +static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
>> +{
>> +    unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
>> +    int vector = exit_qualification & 0xff;
>> +
>> +    /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
>> +    kvm_apic_set_eoi_accelerated(vcpu, vector);
>> +    return 1;
>> +}
>> +
>>  static int handle_apic_write(struct kvm_vcpu *vcpu)
>>  {
>>      unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
>> @@ -5869,6 +5899,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct
> kvm_vcpu *vcpu) = {
>>      [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
>>      [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
>>      [EXIT_REASON_APIC_WRITE]              = handle_apic_write,
>>  +   [EXIT_REASON_EOI_INDUCED]             = handle_apic_eoi_induced,
>>      [EXIT_REASON_WBINVD]                  = handle_wbinvd,
>>      [EXIT_REASON_XSETBV]                  = handle_xsetbv,
>>      [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
>> @@ -6238,7 +6269,7 @@ static void vmx_enable_virtual_x2apic_mode(struct
> kvm_vcpu *vcpu)
>>      vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
>>      vmx->virtual_x2apic_enabled = true;
>> -    if (!cpu_has_vmx_virtual_intr_delivery())
>> +    if (!enable_apicv_reg_vid)
>>              return;
>>  
>>      for (msr = 0x800; msr <= 0x8ff; msr++)
>> @@ -6274,7 +6305,7 @@ static void vmx_disable_virtual_x2apic_mode(struct
> kvm_vcpu *vcpu)
>>      vmcs_write32(SECONDARY_VM_EXEC_CONTROL, second_exec_control);
>>      vmx->virtual_x2apic_enabled = false;
>> -    if (!cpu_has_vmx_virtual_intr_delivery())
>> +    if (!enable_apicv_reg_vid)
>>              return;
>>  
>>      for (msr = 0x800; msr <= 0x8ff; msr++)
>> @@ -6288,6 +6319,148 @@ static void
> vmx_disable_virtual_x2apic_mode(struct kvm_vcpu *vcpu)
>>      vmx_intercept_for_msr_write(0x83f, false, true);
>>  }
>> +static int vmx_has_virtual_interrupt_delivery(struct kvm_vcpu *vcpu)
>> +{
>> +    return enable_apicv_reg_vid;
>> +}
> Why it needs vcpu parameter if it does not use it? It gets you in
> trouble later.
Right. The vcpu parameter used on old patch, in this patch, it is needless.
 
>> + +static void vmx_set_svi(int isr) +{ +     u16 status; +   u8 old; + +     
>> if
>> (!enable_apicv_reg_vid) +            return; + +     if (isr == -1) +        
>>         isr = 0; +
>> +    status = vmcs_read16(GUEST_INTR_STATUS); +      old = status >> 8; +    
>> if
>> (isr != old) { +             status &= 0xff; +               status |= isr 
>> << 8;
>> +            vmcs_write16(GUEST_INTR_STATUS, status); +      } +} + +static 
>> void
>> vmx_set_rvi(int vector) +{ + u16 status; +   u8 old; + +     status =
>> vmcs_read16(GUEST_INTR_STATUS); +    old = (u8)status & 0xff; +      if
>> ((u8)vector != old) { +              status &= ~0xff; +              status 
>> |= (u8)vector;
>> +            vmcs_write16(GUEST_INTR_STATUS, status); +      } +} + +static 
>> void
>> vmx_update_apic_irq(struct kvm_vcpu *vcpu, int max_irr) +{ + if
>> (max_irr == -1) +            return; + +     vmx_set_rvi(max_irr); +} + 
>> +static void
>> set_eoi_exitmap_one(struct kvm_vcpu *vcpu, +                         u32 
>> vector) +{ +        struct
>> vcpu_vmx *vmx = to_vmx(vcpu); + +    if (WARN_ONCE((vector > 255), +         
>> "KVM
>> VMX: vector (%d) out of range\n", vector)) +         return; +
>> +    __set_bit(vector, vmx->eoi_exit_bitmap); +} + +void
>> vmx_check_ioapic_entry(struct kvm_vcpu *vcpu, struct kvm_lapic_irq
>> *irq) +{ +   struct kvm_lapic **dst; +       struct kvm_apic_map *map;
>> +    unsigned long bitmap = 1; +     int i; + +      rcu_read_lock(); +      
>> map =
>> rcu_dereference(vcpu->kvm->arch.apic_map); + +       if (unlikely(!map)) {
>> +            set_eoi_exitmap_one(vcpu, irq->vector); +               goto 
>> out; +     } + +   if
>> (irq->dest_mode == 0) { /* physical mode */ +                if 
>> (irq->delivery_mode
>> == APIC_DM_LOWEST || +                               irq->dest_id == 0xff) {
>> +                    set_eoi_exitmap_one(vcpu, irq->vector); +               
>>         goto out; +             } +             dst =
>> &map->phys_map[irq->dest_id & 0xff]; +       } else { +              u32 mda 
>> =
>> irq->dest_id << (32 - map->ldr_bits); + +            dst =
>> map->logical_map[apic_cluster_id(map, mda)]; + +             bitmap =
>> apic_logical_id(map, mda); + } + +   for_each_set_bit(i, &bitmap, 16) {
>> +            if (!dst[i]) +                  continue; +             if 
>> (dst[i]->vcpu == vcpu) {
>> +                    set_eoi_exitmap_one(vcpu, irq->vector); +               
>>         break; +                } +     } + +out:
>> +    rcu_read_unlock(); +} + +static void vmx_load_eoi_exitmap(struct
>> kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); +
>> +    vmcs_write64(EOI_EXIT_BITMAP0, vmx->eoi_exit_bitmap[0]);
>> +    vmcs_write64(EOI_EXIT_BITMAP1, vmx->eoi_exit_bitmap[1]);
>> +    vmcs_write64(EOI_EXIT_BITMAP2, vmx->eoi_exit_bitmap[2]);
>> +    vmcs_write64(EOI_EXIT_BITMAP3, vmx->eoi_exit_bitmap[3]); +} + +static
>> void vmx_update_eoi_exitmap(struct kvm_vcpu *vcpu) +{ +      struct
>> kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; +      union
>> kvm_ioapic_redirect_entry *e; +      struct kvm_lapic_irq irqe; +    int 
>> index;
>> +    struct vcpu_vmx *vmx = to_vmx(vcpu); + +        /* clear eoi exit 
>> bitmap */
>> +    memset(vmx->eoi_exit_bitmap, 0, 32); + +        /* traverse ioapic 
>> entry to
>> set eoi exit bitmap*/ +      for (index = 0; index < IOAPIC_NUM_PINS;
>> index++) { +         e = &ioapic->redirtbl[index]; +         if 
>> (!e->fields.mask &&
>> +                    (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || +          
>>         
>> kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, +                      
>>          index))) {
>> +                    irqe.dest_id = e->fields.dest_id; +                     
>> irqe.vector =
>> e->fields.vector; +                  irqe.dest_mode = e->fields.dest_mode;
>> +                    irqe.delivery_mode = e->fields.delivery_mode << 8;
>> +                    vmx_check_ioapic_entry(vcpu, &irqe); + +                
>> } +     }
> This logic should sit in ioapic.c and you cannot access ioapic without
> holding ioapic lock.
You are right. I have another version patch which used the eoimap_lock to 
protect the access of eoi exit bitmap. Obviously, as you mentioned below, i 
misuse eoimap_lock as ioapic->lock.
 
>> +
>> +    vmx_load_eoi_exitmap(vcpu);
>> +}
>> +
>>  static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) {        u32
>>  exit_intr_info; @@ -7553,6 +7726,10 @@ static struct kvm_x86_ops
>>  vmx_x86_ops = {     .update_cr8_intercept = update_cr8_intercept,
>>      .enable_virtual_x2apic_mode = vmx_enable_virtual_x2apic_mode,
>>      .disable_virtual_x2apic_mode = vmx_disable_virtual_x2apic_mode,
>> +    .has_virtual_interrupt_delivery = vmx_has_virtual_interrupt_delivery,
>> +    .update_apic_irq = vmx_update_apic_irq,
>> +    .update_eoi_exitmap = vmx_update_eoi_exitmap,
>> +    .set_svi = vmx_set_svi,
>> 
>>      .set_tss_addr = vmx_set_tss_addr,
>>      .get_tdp_level = get_ept_level,
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 1c9c834..e6d8227 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -5527,7 +5527,7 @@ static void inject_pending_event(struct kvm_vcpu
> *vcpu)
>>                      vcpu->arch.nmi_injected = true;
>>                      kvm_x86_ops->set_nmi(vcpu);
>>              }
>> -    } else if (kvm_cpu_has_interrupt(vcpu)) {
>> +    } else if (kvm_cpu_has_injectable_intr(vcpu)) {
>>              if (kvm_x86_ops->interrupt_allowed(vcpu)) {
>>                      kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),  
>>                                    
>>  false); @@ -5648,6 +5648,11 @@ static int vcpu_enter_guest(struct
>>  kvm_vcpu *vcpu)                     kvm_handle_pmu_event(vcpu);             
>> if
>>  (kvm_check_request(KVM_REQ_PMI, vcpu))                      
>> kvm_deliver_pmi(vcpu);
>> +            if (kvm_check_request(KVM_REQ_EOIBITMAP, vcpu)) {
>> +                    mutex_lock(&vcpu->kvm->arch.vioapic->eoimap_lock);
> You need to hold ioapic lock, not useless eoimap_lock that protects
> nothing. And not, do not take it here. Call function in ioapic.c
Yes. eoimap_lock is useless in this patch.
 
>> +                    kvm_x86_ops->update_eoi_exitmap(vcpu);
>> +                    mutex_unlock(&vcpu->kvm->arch.vioapic->eoimap_lock);
>> +            }
>>      }
>>  
>>      if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
>> @@ -5656,10 +5661,15 @@ static int vcpu_enter_guest(struct kvm_vcpu
> *vcpu)
>>              /* enable NMI/IRQ window open exits if needed */
>>              if (vcpu->arch.nmi_pending)
>>                      kvm_x86_ops->enable_nmi_window(vcpu);
>> -            else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
>> +            else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
>>                      kvm_x86_ops->enable_irq_window(vcpu);
>>  
>>              if (kvm_lapic_enabled(vcpu)) {
>> +                    /* update architecture specific hints for APIC
>> +                     * virtual interrupt delivery */
>> +                    if (kvm_x86_ops->update_apic_irq)
>> +                            kvm_x86_ops->update_apic_irq(vcpu,
>> +                                          kvm_lapic_find_highest_irr(vcpu));
>>                      update_cr8_intercept(vcpu);
>>                      kvm_lapic_sync_to_vapic(vcpu);
>>              }
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index cbe0d68..bc0e261 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -122,6 +122,7 @@ static inline bool is_error_page(struct page *page)
>>  #define KVM_REQ_WATCHDOG          18
>>  #define KVM_REQ_MASTERCLOCK_UPDATE 19
>>  #define KVM_REQ_MCLOCK_INPROGRESS 20
>> +#define KVM_REQ_EOIBITMAP         21
>> 
>>  #define KVM_USERSPACE_IRQ_SOURCE_ID         0 #define
>>  KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID    1 @@ -537,6 +538,7 @@ void
>>  kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void
>>  kvm_flush_remote_tlbs(struct kvm *kvm); void
>>  kvm_reload_remote_mmus(struct kvm *kvm); void
>>  kvm_make_mclock_inprogress_request(struct kvm *kvm);
>> +void kvm_make_update_eoibitmap_request(struct kvm *kvm);
>> 
>>  long kvm_arch_dev_ioctl(struct file *filp,
>>                      unsigned int ioctl, unsigned long arg);
>> @@ -690,6 +692,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32
> irq, int level);
>>  int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq,
>>  int level); int kvm_set_msi(struct kvm_kernel_irq_routing_entry
>>  *irq_entry, struct kvm *kvm,                int irq_source_id, int level); 
>> +bool
>>  kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
>>  void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned
>>  pin); void kvm_register_irq_ack_notifier(struct kvm *kvm,                   
>>           
>>  struct kvm_irq_ack_notifier *kian);
>> diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
>> index f3abbef..e5ccb8f 100644
>> --- a/virt/kvm/ioapic.c
>> +++ b/virt/kvm/ioapic.c
>> @@ -115,6 +115,20 @@ static void update_handled_vectors(struct kvm_ioapic
> *ioapic)
>>      smp_wmb();
>>  }
>> +void ioapic_update_eoi_exitmap(struct kvm *kvm)
>> +{
>> +#ifdef CONFIG_X86
> Define kvm_apic_vid_enabled() in IA64 instead.
sure.
 
>> +    struct kvm_vcpu *vcpu = kvm->vcpus[0];
>> +    struct kvm_ioapic *ioapic = kvm->arch.vioapic;
>> +
>> +    /* If vid is enabled in one of vcpus, then other
>> +     * vcpus also enabled it. */
> Vid state is global for all VM instances.  kvm_apic_vid_enabled() should
> not get vcpu as a parameter.
Agree.
 
>> +    if (!kvm_apic_vid_enabled(vcpu) || !ioapic)
>> +            return;
>> +    kvm_make_update_eoibitmap_request(kvm);
>> +#endif
>> +}
>> +
>>  static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
>>  {
>>      unsigned index;
>> @@ -156,6 +170,7 @@ static void ioapic_write_indirect(struct kvm_ioapic
> *ioapic, u32 val)
>>              if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG                && 
>> ioapic->irr &
>>  (1 << index))                       ioapic_service(ioapic, index);
>>  +           ioapic_update_eoi_exitmap(ioapic->kvm);                 break;  
>> } } @@ -415,6
>>  +430,9 @@ int kvm_ioapic_init(struct kvm *kvm)      ret =
>>  kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,            
>>                  
>>      IOAPIC_MEM_LENGTH, &ioapic->dev);       mutex_unlock(&kvm->slots_lock);
>> +#ifdef CONFIG_X86
>> +    mutex_init(&ioapic->eoimap_lock);
>> +#endif
>>      if (ret < 0) {
>>              kvm->arch.vioapic = NULL;
>>              kfree(ioapic);
>> diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
>> index a30abfe..34544ce 100644
>> --- a/virt/kvm/ioapic.h
>> +++ b/virt/kvm/ioapic.h
>> @@ -47,6 +47,9 @@ struct kvm_ioapic {
>>      void (*ack_notifier)(void *opaque, int irq);
>>      spinlock_t lock;
>>      DECLARE_BITMAP(handled_vectors, 256);
>> +#ifdef CONFIG_X86
>> +    struct mutex eoimap_lock;
>> +#endif
> This lock protects nothing. Drop it.
Right. This is a misue in this patch.
 
>>  };
>>  
>>  #ifdef DEBUG
>> @@ -82,5 +85,6 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct
> kvm_lapic *src,
>>              struct kvm_lapic_irq *irq);
>>  int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
>>  int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
>> +void ioapic_update_eoi_exitmap(struct kvm *kvm);
>> 
>>  #endif
>> diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
>> index 656fa45..64aa1ab 100644
>> --- a/virt/kvm/irq_comm.c
>> +++ b/virt/kvm/irq_comm.c
>> @@ -22,6 +22,7 @@
>> 
>>  #include <linux/kvm_host.h> #include <linux/slab.h> +#include
>>  <linux/export.h> #include <trace/events/kvm.h>
>>  
>>  #include <asm/msidef.h>
>> @@ -237,6 +238,25 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int
> irq_source_id, u32 irq, int level)
>>      return ret;
>>  }
>> +bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
>> +{
>> +    struct kvm_irq_ack_notifier *kian;
>> +    struct hlist_node *n;
>> +    int gsi;
>> +
>> +    rcu_read_lock();
>> +    gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
>> +    if (gsi != -1)
>> +            hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list,
>> +                                     link)
>> +                    if (kian->gsi == gsi)
>> +                            return true;
>> +    rcu_read_unlock();
>> +
>> +    return false;
>> +}
>> +EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
>> +
>>  void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned
>>  pin) {      struct kvm_irq_ack_notifier *kian; @@ -261,6 +281,7 @@ void
>>  kvm_register_irq_ack_notifier(struct kvm *kvm,
>>      mutex_lock(&kvm->irq_lock);     hlist_add_head_rcu(&kian->link,
>>  &kvm->irq_ack_notifier_list);       mutex_unlock(&kvm->irq_lock);
>>  +   ioapic_update_eoi_exitmap(kvm); }
>>  
>>  void kvm_unregister_irq_ack_notifier(struct kvm *kvm, @@ -270,6 +291,7
>>  @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
>>      hlist_del_init_rcu(&kian->link);        mutex_unlock(&kvm->irq_lock);
>>      synchronize_rcu(); +    ioapic_update_eoi_exitmap(kvm); }
>>  
>>  int kvm_request_irq_source_id(struct kvm *kvm)
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index e45c20c..cc465c6 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -217,6 +217,11 @@ void kvm_make_mclock_inprogress_request(struct
> kvm *kvm)
>>      make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
>>  }
>> +void kvm_make_update_eoibitmap_request(struct kvm *kvm)
>> +{
>> +    make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP);
>> +}
>> +
>>  int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
>>  {
>>      struct page *page;
>> --
>> 1.7.1
> 
> --
>                       Gleb.


Best regards,
Yang


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to