Hi Li,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on kvm/linux-next]
[also build test ERROR on next-20200505]
[cannot apply to tip/auto-latest linus/master linux/master v5.7-rc4]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:    
https://github.com/0day-ci/linux/commits/Li-RongQing/KVM-X86-support-APERF-MPERF-registers/20200507-023327
base:   https://git.kernel.org/pub/scm/virt/kvm/kvm.git linux-next
config: x86_64-rhel (attached as .config)
compiler: gcc-7 (Ubuntu 7.5.0-6ubuntu2) 7.5.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot <l...@intel.com>

Note: it may well be a FALSE warning. FWIW you are at least aware of it now.
http://gcc.gnu.org/wiki/Better_Uninitialized_Warnings

All errors (new ones prefixed by >>):

   arch/x86/kvm/x86.c: In function 'vcpu_enter_guest':
>> arch/x86/kvm/x86.c:8219:13: error: 'aperf' may be used uninitialized in this 
>> function [-Werror=maybe-uninitialized]
     u64 mperf, aperf;
                ^~~~~
>> arch/x86/kvm/x86.c:8219:6: error: 'mperf' may be used uninitialized in this 
>> function [-Werror=maybe-uninitialized]
     u64 mperf, aperf;
         ^~~~~
   cc1: all warnings being treated as errors

vim +/aperf +8219 arch/x86/kvm/x86.c

  8203  
  8204  /*
  8205   * Returns 1 to let vcpu_run() continue the guest execution loop without
  8206   * exiting to the userspace.  Otherwise, the value will be returned to 
the
  8207   * userspace.
  8208   */
  8209  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
  8210  {
  8211          int r;
  8212          bool req_int_win =
  8213                  dm_request_for_irq_injection(vcpu) &&
  8214                  kvm_cpu_accept_dm_intr(vcpu);
  8215          enum exit_fastpath_completion exit_fastpath;
  8216  
  8217          bool enable_aperfmperf = guest_aperfmperf_soft(vcpu->kvm);
  8218          bool req_immediate_exit = false;
> 8219          u64 mperf, aperf;
  8220  
  8221          if (kvm_request_pending(vcpu)) {
  8222                  if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
  8223                          if 
(unlikely(!kvm_x86_ops.nested_ops->get_vmcs12_pages(vcpu))) {
  8224                                  r = 0;
  8225                                  goto out;
  8226                          }
  8227                  }
  8228                  if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
  8229                          kvm_mmu_unload(vcpu);
  8230                  if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
  8231                          __kvm_migrate_timers(vcpu);
  8232                  if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
  8233                          kvm_gen_update_masterclock(vcpu->kvm);
  8234                  if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, 
vcpu))
  8235                          kvm_gen_kvmclock_update(vcpu);
  8236                  if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
  8237                          r = kvm_guest_time_update(vcpu);
  8238                          if (unlikely(r))
  8239                                  goto out;
  8240                  }
  8241                  if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
  8242                          kvm_mmu_sync_roots(vcpu);
  8243                  if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
  8244                          kvm_mmu_load_pgd(vcpu);
  8245                  if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
  8246                          kvm_vcpu_flush_tlb_all(vcpu);
  8247  
  8248                          /* Flushing all ASIDs flushes the current 
ASID... */
  8249                          kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, 
vcpu);
  8250                  }
  8251                  if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
  8252                          kvm_vcpu_flush_tlb_current(vcpu);
  8253                  if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
  8254                          kvm_vcpu_flush_tlb_guest(vcpu);
  8255  
  8256                  if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) 
{
  8257                          vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
  8258                          r = 0;
  8259                          goto out;
  8260                  }
  8261                  if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
  8262                          vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
  8263                          vcpu->mmio_needed = 0;
  8264                          r = 0;
  8265                          goto out;
  8266                  }
  8267                  if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
  8268                          /* Page is swapped out. Do synthetic halt */
  8269                          vcpu->arch.apf.halted = true;
  8270                          r = 1;
  8271                          goto out;
  8272                  }
  8273                  if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
  8274                          record_steal_time(vcpu);
  8275                  if (kvm_check_request(KVM_REQ_SMI, vcpu))
  8276                          process_smi(vcpu);
  8277                  if (kvm_check_request(KVM_REQ_NMI, vcpu))
  8278                          process_nmi(vcpu);
  8279                  if (kvm_check_request(KVM_REQ_PMU, vcpu))
  8280                          kvm_pmu_handle_event(vcpu);
  8281                  if (kvm_check_request(KVM_REQ_PMI, vcpu))
  8282                          kvm_pmu_deliver_pmi(vcpu);
  8283                  if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
  8284                          BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
  8285                          if (test_bit(vcpu->arch.pending_ioapic_eoi,
  8286                                       
vcpu->arch.ioapic_handled_vectors)) {
  8287                                  vcpu->run->exit_reason = 
KVM_EXIT_IOAPIC_EOI;
  8288                                  vcpu->run->eoi.vector =
  8289                                                  
vcpu->arch.pending_ioapic_eoi;
  8290                                  r = 0;
  8291                                  goto out;
  8292                          }
  8293                  }
  8294                  if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
  8295                          vcpu_scan_ioapic(vcpu);
  8296                  if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu))
  8297                          vcpu_load_eoi_exitmap(vcpu);
  8298                  if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
  8299                          kvm_vcpu_reload_apic_access_page(vcpu);
  8300                  if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
  8301                          vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
  8302                          vcpu->run->system_event.type = 
KVM_SYSTEM_EVENT_CRASH;
  8303                          r = 0;
  8304                          goto out;
  8305                  }
  8306                  if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
  8307                          vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
  8308                          vcpu->run->system_event.type = 
KVM_SYSTEM_EVENT_RESET;
  8309                          r = 0;
  8310                          goto out;
  8311                  }
  8312                  if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
  8313                          vcpu->run->exit_reason = KVM_EXIT_HYPERV;
  8314                          vcpu->run->hyperv = vcpu->arch.hyperv.exit;
  8315                          r = 0;
  8316                          goto out;
  8317                  }
  8318  
  8319                  /*
  8320                   * KVM_REQ_HV_STIMER has to be processed after
  8321                   * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
  8322                   * depend on the guest clock being up-to-date
  8323                   */
  8324                  if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
  8325                          kvm_hv_process_stimers(vcpu);
  8326                  if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
  8327                          kvm_vcpu_update_apicv(vcpu);
  8328          }
  8329  
  8330          if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
  8331                  ++vcpu->stat.req_event;
  8332                  kvm_apic_accept_events(vcpu);
  8333                  if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
  8334                          r = 1;
  8335                          goto out;
  8336                  }
  8337  
  8338                  if (inject_pending_event(vcpu) != 0)
  8339                          req_immediate_exit = true;
  8340                  else {
  8341                          /* Enable SMI/NMI/IRQ window open exits if 
needed.
  8342                           *
  8343                           * SMIs have three cases:
  8344                           * 1) They can be nested, and then there is 
nothing to
  8345                           *    do here because RSM will cause a vmexit 
anyway.
  8346                           * 2) There is an ISA-specific reason why SMI 
cannot be
  8347                           *    injected, and the moment when this 
changes can be
  8348                           *    intercepted.
  8349                           * 3) Or the SMI can be pending because
  8350                           *    inject_pending_event has completed the 
injection
  8351                           *    of an IRQ or NMI from the previous 
vmexit, and
  8352                           *    then we request an immediate exit to 
inject the
  8353                           *    SMI.
  8354                           */
  8355                          if (vcpu->arch.smi_pending && !is_smm(vcpu))
  8356                                  if 
(!kvm_x86_ops.enable_smi_window(vcpu))
  8357                                          req_immediate_exit = true;
  8358                          if (vcpu->arch.nmi_pending)
  8359                                  kvm_x86_ops.enable_nmi_window(vcpu);
  8360                          if (kvm_cpu_has_injectable_intr(vcpu) || 
req_int_win)
  8361                                  kvm_x86_ops.enable_irq_window(vcpu);
  8362                          WARN_ON(vcpu->arch.exception.pending);
  8363                  }
  8364  
  8365                  if (kvm_lapic_enabled(vcpu)) {
  8366                          update_cr8_intercept(vcpu);
  8367                          kvm_lapic_sync_to_vapic(vcpu);
  8368                  }
  8369          }
  8370  
  8371          r = kvm_mmu_reload(vcpu);
  8372          if (unlikely(r)) {
  8373                  goto cancel_injection;
  8374          }
  8375  
  8376          preempt_disable();
  8377  
  8378          if (unlikely(enable_aperfmperf))
  8379                  guest_enter_aperfmperf(&mperf, &aperf);
  8380  
  8381          kvm_x86_ops.prepare_guest_switch(vcpu);
  8382  
  8383          /*
  8384           * Disable IRQs before setting IN_GUEST_MODE.  Posted interrupt
  8385           * IPI are then delayed after guest entry, which ensures that 
they
  8386           * result in virtual interrupt delivery.
  8387           */
  8388          local_irq_disable();
  8389          vcpu->mode = IN_GUEST_MODE;
  8390  
  8391          srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  8392  
  8393          /*
  8394           * 1) We should set ->mode before checking ->requests.  Please 
see
  8395           * the comment in kvm_vcpu_exiting_guest_mode().
  8396           *
  8397           * 2) For APICv, we should set ->mode before checking PID.ON. 
This
  8398           * pairs with the memory barrier implicit in pi_test_and_set_on
  8399           * (see vmx_deliver_posted_interrupt).
  8400           *
  8401           * 3) This also orders the write to mode from any reads to the 
page
  8402           * tables done while the VCPU is running.  Please see the 
comment
  8403           * in kvm_flush_remote_tlbs.
  8404           */
  8405          smp_mb__after_srcu_read_unlock();
  8406  
  8407          /*
  8408           * This handles the case where a posted interrupt was
  8409           * notified with kvm_vcpu_kick.
  8410           */
  8411          if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
  8412                  kvm_x86_ops.sync_pir_to_irr(vcpu);
  8413  
  8414          if (vcpu->mode == EXITING_GUEST_MODE || 
kvm_request_pending(vcpu)
  8415              || need_resched() || signal_pending(current)) {
  8416                  vcpu->mode = OUTSIDE_GUEST_MODE;
  8417                  smp_wmb();
  8418                  local_irq_enable();
  8419                  preempt_enable();
  8420                  vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  8421                  r = 1;
  8422                  goto cancel_injection;
  8423          }
  8424  
  8425          if (req_immediate_exit) {
  8426                  kvm_make_request(KVM_REQ_EVENT, vcpu);
  8427                  kvm_x86_ops.request_immediate_exit(vcpu);
  8428          }
  8429  
  8430          trace_kvm_entry(vcpu->vcpu_id);
  8431          guest_enter_irqoff();
  8432  
  8433          fpregs_assert_state_consistent();
  8434          if (test_thread_flag(TIF_NEED_FPU_LOAD))
  8435                  switch_fpu_return();
  8436  
  8437          if (unlikely(vcpu->arch.switch_db_regs)) {
  8438                  set_debugreg(0, 7);
  8439                  set_debugreg(vcpu->arch.eff_db[0], 0);
  8440                  set_debugreg(vcpu->arch.eff_db[1], 1);
  8441                  set_debugreg(vcpu->arch.eff_db[2], 2);
  8442                  set_debugreg(vcpu->arch.eff_db[3], 3);
  8443                  set_debugreg(vcpu->arch.dr6, 6);
  8444                  vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
  8445          }
  8446  
  8447          exit_fastpath = kvm_x86_ops.run(vcpu);
  8448  
  8449          /*
  8450           * Do this here before restoring debug registers on the host.  
And
  8451           * since we do this before handling the vmexit, a DR access 
vmexit
  8452           * can (a) read the correct value of the debug registers, (b) 
set
  8453           * KVM_DEBUGREG_WONT_EXIT again.
  8454           */
  8455          if (unlikely(vcpu->arch.switch_db_regs & 
KVM_DEBUGREG_WONT_EXIT)) {
  8456                  WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
  8457                  kvm_x86_ops.sync_dirty_debug_regs(vcpu);
  8458                  kvm_update_dr0123(vcpu);
  8459                  kvm_update_dr6(vcpu);
  8460                  kvm_update_dr7(vcpu);
  8461                  vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
  8462          }
  8463  
  8464          /*
  8465           * If the guest has used debug registers, at least dr7
  8466           * will be disabled while returning to the host.
  8467           * If we don't have active breakpoints in the host, we don't
  8468           * care about the messed up debug address registers. But if
  8469           * we have some of them active, restore the old state.
  8470           */
  8471          if (hw_breakpoint_active())
  8472                  hw_breakpoint_restore();
  8473  
  8474          vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
  8475  
  8476          vcpu->mode = OUTSIDE_GUEST_MODE;
  8477          smp_wmb();
  8478  
  8479          kvm_x86_ops.handle_exit_irqoff(vcpu);
  8480  
  8481          /*
  8482           * Consume any pending interrupts, including the possible 
source of
  8483           * VM-Exit on SVM and any ticks that occur between VM-Exit and 
now.
  8484           * An instruction is required after local_irq_enable() to fully 
unblock
  8485           * interrupts on processors that implement an interrupt shadow, 
the
  8486           * stat.exits increment will do nicely.
  8487           */
  8488          kvm_before_interrupt(vcpu);
  8489          local_irq_enable();
  8490          ++vcpu->stat.exits;
  8491          local_irq_disable();
  8492          kvm_after_interrupt(vcpu);
  8493  
  8494          guest_exit_irqoff();
  8495          if (lapic_in_kernel(vcpu)) {
  8496                  s64 delta = 
vcpu->arch.apic->lapic_timer.advance_expire_delta;
  8497                  if (delta != S64_MIN) {
  8498                          trace_kvm_wait_lapic_expire(vcpu->vcpu_id, 
delta);
  8499                          
vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN;
  8500                  }
  8501          }
  8502  
  8503          local_irq_enable();
  8504  
  8505          if (unlikely(enable_aperfmperf))
  8506                  guest_exit_aperfmperf(vcpu, mperf, aperf);
  8507  
  8508          preempt_enable();
  8509  
  8510          vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  8511  
  8512          /*
  8513           * Profile KVM exit RIPs:
  8514           */
  8515          if (unlikely(prof_on == KVM_PROFILING)) {
  8516                  unsigned long rip = kvm_rip_read(vcpu);
  8517                  profile_hit(KVM_PROFILING, (void *)rip);
  8518          }
  8519  
  8520          if (unlikely(vcpu->arch.tsc_always_catchup))
  8521                  kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  8522  
  8523          if (vcpu->arch.apic_attention)
  8524                  kvm_lapic_sync_from_vapic(vcpu);
  8525  
  8526          r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath);
  8527          return r;
  8528  
  8529  cancel_injection:
  8530          kvm_x86_ops.cancel_injection(vcpu);
  8531          if (unlikely(vcpu->arch.apic_attention))
  8532                  kvm_lapic_sync_from_vapic(vcpu);
  8533  out:
  8534          return r;
  8535  }
  8536  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org

Attachment: .config.gz
Description: application/gzip

Reply via email to