tree:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
head:   3ce5aceb5dee298b082adfa2baa0df5a447c1b0b
commit: faaeff98666c24376cebd0b106504d05a36881d1 [24/33] perf/x86/intel: Add 
more Icelake CPUIDs
config: x86_64-rhel-7.6 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
        git checkout faaeff98666c24376cebd0b106504d05a36881d1
        # save the attached .config to linux build tree
        make ARCH=x86_64 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <l...@intel.com>

All errors (new ones prefixed by >>):

   arch/x86//events/intel/core.c: In function 'intel_pmu_init':
   arch/x86//events/intel/core.c:4989:7: error: 'INTEL_FAM6_ICELAKE_X' 
undeclared (first use in this function); did you mean 'INTEL_FAM6_SKYLAKE_X'?
     case INTEL_FAM6_ICELAKE_X:
          ^~~~~~~~~~~~~~~~~~~~
          INTEL_FAM6_SKYLAKE_X
   arch/x86//events/intel/core.c:4989:7: note: each undeclared identifier is 
reported only once for each function it appears in
>> arch/x86//events/intel/core.c:4990:7: error: 'INTEL_FAM6_ICELAKE_XEON_D' 
>> undeclared (first use in this function); did you mean 'INTEL_FAM6_ICELAKE_X'?
     case INTEL_FAM6_ICELAKE_XEON_D:
          ^~~~~~~~~~~~~~~~~~~~~~~~~
          INTEL_FAM6_ICELAKE_X
   arch/x86//events/intel/core.c:4993:7: error: 'INTEL_FAM6_ICELAKE_DESKTOP' 
undeclared (first use in this function); did you mean 
'INTEL_FAM6_SKYLAKE_DESKTOP'?
     case INTEL_FAM6_ICELAKE_DESKTOP:
          ^~~~~~~~~~~~~~~~~~~~~~~~~~
          INTEL_FAM6_SKYLAKE_DESKTOP

vim +4990 arch/x86//events/intel/core.c

  4474  
  4475  __init int intel_pmu_init(void)
  4476  {
  4477          struct attribute **extra_skl_attr = &empty_attrs;
  4478          struct attribute **extra_attr = &empty_attrs;
  4479          struct attribute **td_attr    = &empty_attrs;
  4480          struct attribute **mem_attr   = &empty_attrs;
  4481          struct attribute **tsx_attr   = &empty_attrs;
  4482          union cpuid10_edx edx;
  4483          union cpuid10_eax eax;
  4484          union cpuid10_ebx ebx;
  4485          struct event_constraint *c;
  4486          unsigned int unused;
  4487          struct extra_reg *er;
  4488          bool pmem = false;
  4489          int version, i;
  4490          char *name;
  4491  
  4492          if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
  4493                  switch (boot_cpu_data.x86) {
  4494                  case 0x6:
  4495                          return p6_pmu_init();
  4496                  case 0xb:
  4497                          return knc_pmu_init();
  4498                  case 0xf:
  4499                          return p4_pmu_init();
  4500                  }
  4501                  return -ENODEV;
  4502          }
  4503  
  4504          /*
  4505           * Check whether the Architectural PerfMon supports
  4506           * Branch Misses Retired hw_event or not.
  4507           */
  4508          cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
  4509          if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
  4510                  return -ENODEV;
  4511  
  4512          version = eax.split.version_id;
  4513          if (version < 2)
  4514                  x86_pmu = core_pmu;
  4515          else
  4516                  x86_pmu = intel_pmu;
  4517  
  4518          x86_pmu.version                 = version;
  4519          x86_pmu.num_counters            = eax.split.num_counters;
  4520          x86_pmu.cntval_bits             = eax.split.bit_width;
  4521          x86_pmu.cntval_mask             = (1ULL << eax.split.bit_width) 
- 1;
  4522  
  4523          x86_pmu.events_maskl            = ebx.full;
  4524          x86_pmu.events_mask_len         = eax.split.mask_length;
  4525  
  4526          x86_pmu.max_pebs_events         = min_t(unsigned, 
MAX_PEBS_EVENTS, x86_pmu.num_counters);
  4527  
  4528          /*
  4529           * Quirk: v2 perfmon does not report fixed-purpose events, so
  4530           * assume at least 3 events, when not running in a hypervisor:
  4531           */
  4532          if (version > 1) {
  4533                  int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
  4534  
  4535                  x86_pmu.num_counters_fixed =
  4536                          max((int)edx.split.num_counters_fixed, assume);
  4537          }
  4538  
  4539          if (version >= 4)
  4540                  x86_pmu.counter_freezing = !disable_counter_freezing;
  4541  
  4542          if (boot_cpu_has(X86_FEATURE_PDCM)) {
  4543                  u64 capabilities;
  4544  
  4545                  rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
  4546                  x86_pmu.intel_cap.capabilities = capabilities;
  4547          }
  4548  
  4549          intel_ds_init();
  4550  
  4551          x86_add_quirk(intel_arch_events_quirk); /* Install first, so it 
runs last */
  4552  
  4553          /*
  4554           * Install the hw-cache-events table:
  4555           */
  4556          switch (boot_cpu_data.x86_model) {
  4557          case INTEL_FAM6_CORE_YONAH:
  4558                  pr_cont("Core events, ");
  4559                  name = "core";
  4560                  break;
  4561  
  4562          case INTEL_FAM6_CORE2_MEROM:
  4563                  x86_add_quirk(intel_clovertown_quirk);
  4564                  /* fall through */
  4565  
  4566          case INTEL_FAM6_CORE2_MEROM_L:
  4567          case INTEL_FAM6_CORE2_PENRYN:
  4568          case INTEL_FAM6_CORE2_DUNNINGTON:
  4569                  memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
  4570                         sizeof(hw_cache_event_ids));
  4571  
  4572                  intel_pmu_lbr_init_core();
  4573  
  4574                  x86_pmu.event_constraints = 
intel_core2_event_constraints;
  4575                  x86_pmu.pebs_constraints = 
intel_core2_pebs_event_constraints;
  4576                  pr_cont("Core2 events, ");
  4577                  name = "core2";
  4578                  break;
  4579  
  4580          case INTEL_FAM6_NEHALEM:
  4581          case INTEL_FAM6_NEHALEM_EP:
  4582          case INTEL_FAM6_NEHALEM_EX:
  4583                  memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
  4584                         sizeof(hw_cache_event_ids));
  4585                  memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
  4586                         sizeof(hw_cache_extra_regs));
  4587  
  4588                  intel_pmu_lbr_init_nhm();
  4589  
  4590                  x86_pmu.event_constraints = 
intel_nehalem_event_constraints;
  4591                  x86_pmu.pebs_constraints = 
intel_nehalem_pebs_event_constraints;
  4592                  x86_pmu.enable_all = intel_pmu_nhm_enable_all;
  4593                  x86_pmu.extra_regs = intel_nehalem_extra_regs;
  4594  
  4595                  mem_attr = nhm_mem_events_attrs;
  4596  
  4597                  /* UOPS_ISSUED.STALLED_CYCLES */
  4598                  
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
  4599                          X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, 
.cmask=1);
  4600                  /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
  4601                  
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
  4602                          X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, 
.cmask=1);
  4603  
  4604                  intel_pmu_pebs_data_source_nhm();
  4605                  x86_add_quirk(intel_nehalem_quirk);
  4606                  x86_pmu.pebs_no_tlb = 1;
  4607                  extra_attr = nhm_format_attr;
  4608  
  4609                  pr_cont("Nehalem events, ");
  4610                  name = "nehalem";
  4611                  break;
  4612  
  4613          case INTEL_FAM6_ATOM_BONNELL:
  4614          case INTEL_FAM6_ATOM_BONNELL_MID:
  4615          case INTEL_FAM6_ATOM_SALTWELL:
  4616          case INTEL_FAM6_ATOM_SALTWELL_MID:
  4617          case INTEL_FAM6_ATOM_SALTWELL_TABLET:
  4618                  memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
  4619                         sizeof(hw_cache_event_ids));
  4620  
  4621                  intel_pmu_lbr_init_atom();
  4622  
  4623                  x86_pmu.event_constraints = intel_gen_event_constraints;
  4624                  x86_pmu.pebs_constraints = 
intel_atom_pebs_event_constraints;
  4625                  x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
  4626                  pr_cont("Atom events, ");
  4627                  name = "bonnell";
  4628                  break;
  4629  
  4630          case INTEL_FAM6_ATOM_SILVERMONT:
  4631          case INTEL_FAM6_ATOM_SILVERMONT_X:
  4632          case INTEL_FAM6_ATOM_SILVERMONT_MID:
  4633          case INTEL_FAM6_ATOM_AIRMONT:
  4634          case INTEL_FAM6_ATOM_AIRMONT_MID:
  4635                  memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
  4636                          sizeof(hw_cache_event_ids));
  4637                  memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
  4638                         sizeof(hw_cache_extra_regs));
  4639  
  4640                  intel_pmu_lbr_init_slm();
  4641  
  4642                  x86_pmu.event_constraints = intel_slm_event_constraints;
  4643                  x86_pmu.pebs_constraints = 
intel_slm_pebs_event_constraints;
  4644                  x86_pmu.extra_regs = intel_slm_extra_regs;
  4645                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  4646                  td_attr = slm_events_attrs;
  4647                  extra_attr = slm_format_attr;
  4648                  pr_cont("Silvermont events, ");
  4649                  name = "silvermont";
  4650                  break;
  4651  
  4652          case INTEL_FAM6_ATOM_GOLDMONT:
  4653          case INTEL_FAM6_ATOM_GOLDMONT_X:
  4654                  x86_add_quirk(intel_counter_freezing_quirk);
  4655                  memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
  4656                         sizeof(hw_cache_event_ids));
  4657                  memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
  4658                         sizeof(hw_cache_extra_regs));
  4659  
  4660                  intel_pmu_lbr_init_skl();
  4661  
  4662                  x86_pmu.event_constraints = intel_slm_event_constraints;
  4663                  x86_pmu.pebs_constraints = 
intel_glm_pebs_event_constraints;
  4664                  x86_pmu.extra_regs = intel_glm_extra_regs;
  4665                  /*
  4666                   * It's recommended to use CPU_CLK_UNHALTED.CORE_P + 
NPEBS
  4667                   * for precise cycles.
  4668                   * :pp is identical to :ppp
  4669                   */
  4670                  x86_pmu.pebs_aliases = NULL;
  4671                  x86_pmu.pebs_prec_dist = true;
  4672                  x86_pmu.lbr_pt_coexist = true;
  4673                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  4674                  td_attr = glm_events_attrs;
  4675                  extra_attr = slm_format_attr;
  4676                  pr_cont("Goldmont events, ");
  4677                  name = "goldmont";
  4678                  break;
  4679  
  4680          case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
  4681                  x86_add_quirk(intel_counter_freezing_quirk);
  4682                  memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
  4683                         sizeof(hw_cache_event_ids));
  4684                  memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
  4685                         sizeof(hw_cache_extra_regs));
  4686  
  4687                  intel_pmu_lbr_init_skl();
  4688  
  4689                  x86_pmu.event_constraints = intel_slm_event_constraints;
  4690                  x86_pmu.extra_regs = intel_glm_extra_regs;
  4691                  /*
  4692                   * It's recommended to use CPU_CLK_UNHALTED.CORE_P + 
NPEBS
  4693                   * for precise cycles.
  4694                   */
  4695                  x86_pmu.pebs_aliases = NULL;
  4696                  x86_pmu.pebs_prec_dist = true;
  4697                  x86_pmu.lbr_pt_coexist = true;
  4698                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  4699                  x86_pmu.flags |= PMU_FL_PEBS_ALL;
  4700                  x86_pmu.get_event_constraints = 
glp_get_event_constraints;
  4701                  td_attr = glm_events_attrs;
  4702                  /* Goldmont Plus has 4-wide pipeline */
  4703                  event_attr_td_total_slots_scale_glm.event_str = "4";
  4704                  extra_attr = slm_format_attr;
  4705                  pr_cont("Goldmont plus events, ");
  4706                  name = "goldmont_plus";
  4707                  break;
  4708  
  4709          case INTEL_FAM6_ATOM_TREMONT_X:
  4710                  x86_pmu.late_ack = true;
  4711                  memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
  4712                         sizeof(hw_cache_event_ids));
  4713                  memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
  4714                         sizeof(hw_cache_extra_regs));
  4715                  
hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
  4716  
  4717                  intel_pmu_lbr_init_skl();
  4718  
  4719                  x86_pmu.event_constraints = intel_slm_event_constraints;
  4720                  x86_pmu.extra_regs = intel_tnt_extra_regs;
  4721                  /*
  4722                   * It's recommended to use CPU_CLK_UNHALTED.CORE_P + 
NPEBS
  4723                   * for precise cycles.
  4724                   */
  4725                  x86_pmu.pebs_aliases = NULL;
  4726                  x86_pmu.pebs_prec_dist = true;
  4727                  x86_pmu.lbr_pt_coexist = true;
  4728                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  4729                  x86_pmu.get_event_constraints = 
tnt_get_event_constraints;
  4730                  extra_attr = slm_format_attr;
  4731                  pr_cont("Tremont events, ");
  4732                  name = "Tremont";
  4733                  break;
  4734  
  4735          case INTEL_FAM6_WESTMERE:
  4736          case INTEL_FAM6_WESTMERE_EP:
  4737          case INTEL_FAM6_WESTMERE_EX:
  4738                  memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
  4739                         sizeof(hw_cache_event_ids));
  4740                  memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
  4741                         sizeof(hw_cache_extra_regs));
  4742  
  4743                  intel_pmu_lbr_init_nhm();
  4744  
  4745                  x86_pmu.event_constraints = 
intel_westmere_event_constraints;
  4746                  x86_pmu.enable_all = intel_pmu_nhm_enable_all;
  4747                  x86_pmu.pebs_constraints = 
intel_westmere_pebs_event_constraints;
  4748                  x86_pmu.extra_regs = intel_westmere_extra_regs;
  4749                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  4750  
  4751                  mem_attr = nhm_mem_events_attrs;
  4752  
  4753                  /* UOPS_ISSUED.STALLED_CYCLES */
  4754                  
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
  4755                          X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, 
.cmask=1);
  4756                  /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
  4757                  
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
  4758                          X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, 
.cmask=1);
  4759  
  4760                  intel_pmu_pebs_data_source_nhm();
  4761                  extra_attr = nhm_format_attr;
  4762                  pr_cont("Westmere events, ");
  4763                  name = "westmere";
  4764                  break;
  4765  
  4766          case INTEL_FAM6_SANDYBRIDGE:
  4767          case INTEL_FAM6_SANDYBRIDGE_X:
  4768                  x86_add_quirk(intel_sandybridge_quirk);
  4769                  x86_add_quirk(intel_ht_bug);
  4770                  memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
  4771                         sizeof(hw_cache_event_ids));
  4772                  memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
  4773                         sizeof(hw_cache_extra_regs));
  4774  
  4775                  intel_pmu_lbr_init_snb();
  4776  
  4777                  x86_pmu.event_constraints = intel_snb_event_constraints;
  4778                  x86_pmu.pebs_constraints = 
intel_snb_pebs_event_constraints;
  4779                  x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
  4780                  if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
  4781                          x86_pmu.extra_regs = intel_snbep_extra_regs;
  4782                  else
  4783                          x86_pmu.extra_regs = intel_snb_extra_regs;
  4784  
  4785  
  4786                  /* all extra regs are per-cpu when HT is on */
  4787                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  4788                  x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
  4789  
  4790                  td_attr  = snb_events_attrs;
  4791                  mem_attr = snb_mem_events_attrs;
  4792  
  4793                  /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
  4794                  
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
  4795                          X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, 
.cmask=1);
  4796                  /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall 
cycles*/
  4797                  
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
  4798                          X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, 
.cmask=1);
  4799  
  4800                  extra_attr = nhm_format_attr;
  4801  
  4802                  pr_cont("SandyBridge events, ");
  4803                  name = "sandybridge";
  4804                  break;
  4805  
  4806          case INTEL_FAM6_IVYBRIDGE:
  4807          case INTEL_FAM6_IVYBRIDGE_X:
  4808                  x86_add_quirk(intel_ht_bug);
  4809                  memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
  4810                         sizeof(hw_cache_event_ids));
  4811                  /* dTLB-load-misses on IVB is different than SNB */
  4812                  hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] 
= 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
  4813  
  4814                  memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
  4815                         sizeof(hw_cache_extra_regs));
  4816  
  4817                  intel_pmu_lbr_init_snb();
  4818  
  4819                  x86_pmu.event_constraints = intel_ivb_event_constraints;
  4820                  x86_pmu.pebs_constraints = 
intel_ivb_pebs_event_constraints;
  4821                  x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
  4822                  x86_pmu.pebs_prec_dist = true;
  4823                  if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
  4824                          x86_pmu.extra_regs = intel_snbep_extra_regs;
  4825                  else
  4826                          x86_pmu.extra_regs = intel_snb_extra_regs;
  4827                  /* all extra regs are per-cpu when HT is on */
  4828                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  4829                  x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
  4830  
  4831                  td_attr  = snb_events_attrs;
  4832                  mem_attr = snb_mem_events_attrs;
  4833  
  4834                  /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
  4835                  
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
  4836                          X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, 
.cmask=1);
  4837  
  4838                  extra_attr = nhm_format_attr;
  4839  
  4840                  pr_cont("IvyBridge events, ");
  4841                  name = "ivybridge";
  4842                  break;
  4843  
  4844  
  4845          case INTEL_FAM6_HASWELL_CORE:
  4846          case INTEL_FAM6_HASWELL_X:
  4847          case INTEL_FAM6_HASWELL_ULT:
  4848          case INTEL_FAM6_HASWELL_GT3E:
  4849                  x86_add_quirk(intel_ht_bug);
  4850                  x86_add_quirk(intel_pebs_isolation_quirk);
  4851                  x86_pmu.late_ack = true;
  4852                  memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, 
sizeof(hw_cache_event_ids));
  4853                  memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, 
sizeof(hw_cache_extra_regs));
  4854  
  4855                  intel_pmu_lbr_init_hsw();
  4856  
  4857                  x86_pmu.event_constraints = intel_hsw_event_constraints;
  4858                  x86_pmu.pebs_constraints = 
intel_hsw_pebs_event_constraints;
  4859                  x86_pmu.extra_regs = intel_snbep_extra_regs;
  4860                  x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
  4861                  x86_pmu.pebs_prec_dist = true;
  4862                  /* all extra regs are per-cpu when HT is on */
  4863                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  4864                  x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
  4865  
  4866                  x86_pmu.hw_config = hsw_hw_config;
  4867                  x86_pmu.get_event_constraints = 
hsw_get_event_constraints;
  4868                  x86_pmu.lbr_double_abort = true;
  4869                  extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
  4870                          hsw_format_attr : nhm_format_attr;
  4871                  td_attr  = hsw_events_attrs;
  4872                  mem_attr = hsw_mem_events_attrs;
  4873                  tsx_attr = hsw_tsx_events_attrs;
  4874                  pr_cont("Haswell events, ");
  4875                  name = "haswell";
  4876                  break;
  4877  
  4878          case INTEL_FAM6_BROADWELL_CORE:
  4879          case INTEL_FAM6_BROADWELL_XEON_D:
  4880          case INTEL_FAM6_BROADWELL_GT3E:
  4881          case INTEL_FAM6_BROADWELL_X:
  4882                  x86_add_quirk(intel_pebs_isolation_quirk);
  4883                  x86_pmu.late_ack = true;
  4884                  memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, 
sizeof(hw_cache_event_ids));
  4885                  memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, 
sizeof(hw_cache_extra_regs));
  4886  
  4887                  /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
  4888                  hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] 
= HSW_DEMAND_READ |
  4889                                                                          
 BDW_L3_MISS|HSW_SNOOP_DRAM;
  4890                  hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] 
= HSW_DEMAND_WRITE|BDW_L3_MISS|
  4891                                                                          
  HSW_SNOOP_DRAM;
  4892                  
hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
  4893                                                                          
     BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
  4894                  
hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
  4895                                                                          
      BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
  4896  
  4897                  intel_pmu_lbr_init_hsw();
  4898  
  4899                  x86_pmu.event_constraints = intel_bdw_event_constraints;
  4900                  x86_pmu.pebs_constraints = 
intel_bdw_pebs_event_constraints;
  4901                  x86_pmu.extra_regs = intel_snbep_extra_regs;
  4902                  x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
  4903                  x86_pmu.pebs_prec_dist = true;
  4904                  /* all extra regs are per-cpu when HT is on */
  4905                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  4906                  x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
  4907  
  4908                  x86_pmu.hw_config = hsw_hw_config;
  4909                  x86_pmu.get_event_constraints = 
hsw_get_event_constraints;
  4910                  x86_pmu.limit_period = bdw_limit_period;
  4911                  extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
  4912                          hsw_format_attr : nhm_format_attr;
  4913                  td_attr  = hsw_events_attrs;
  4914                  mem_attr = hsw_mem_events_attrs;
  4915                  tsx_attr = hsw_tsx_events_attrs;
  4916                  pr_cont("Broadwell events, ");
  4917                  name = "broadwell";
  4918                  break;
  4919  
  4920          case INTEL_FAM6_XEON_PHI_KNL:
  4921          case INTEL_FAM6_XEON_PHI_KNM:
  4922                  memcpy(hw_cache_event_ids,
  4923                         slm_hw_cache_event_ids, 
sizeof(hw_cache_event_ids));
  4924                  memcpy(hw_cache_extra_regs,
  4925                         knl_hw_cache_extra_regs, 
sizeof(hw_cache_extra_regs));
  4926                  intel_pmu_lbr_init_knl();
  4927  
  4928                  x86_pmu.event_constraints = intel_slm_event_constraints;
  4929                  x86_pmu.pebs_constraints = 
intel_slm_pebs_event_constraints;
  4930                  x86_pmu.extra_regs = intel_knl_extra_regs;
  4931  
  4932                  /* all extra regs are per-cpu when HT is on */
  4933                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  4934                  x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
  4935                  extra_attr = slm_format_attr;
  4936                  pr_cont("Knights Landing/Mill events, ");
  4937                  name = "knights-landing";
  4938                  break;
  4939  
  4940          case INTEL_FAM6_SKYLAKE_X:
  4941                  pmem = true;
  4942          case INTEL_FAM6_SKYLAKE_MOBILE:
  4943          case INTEL_FAM6_SKYLAKE_DESKTOP:
  4944          case INTEL_FAM6_KABYLAKE_MOBILE:
  4945          case INTEL_FAM6_KABYLAKE_DESKTOP:
  4946                  x86_add_quirk(intel_pebs_isolation_quirk);
  4947                  x86_pmu.late_ack = true;
  4948                  memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, 
sizeof(hw_cache_event_ids));
  4949                  memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, 
sizeof(hw_cache_extra_regs));
  4950                  intel_pmu_lbr_init_skl();
  4951  
  4952                  /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
  4953                  event_attr_td_recovery_bubbles.event_str_noht =
  4954                          "event=0xd,umask=0x1,cmask=1";
  4955                  event_attr_td_recovery_bubbles.event_str_ht =
  4956                          "event=0xd,umask=0x1,cmask=1,any=1";
  4957  
  4958                  x86_pmu.event_constraints = intel_skl_event_constraints;
  4959                  x86_pmu.pebs_constraints = 
intel_skl_pebs_event_constraints;
  4960                  x86_pmu.extra_regs = intel_skl_extra_regs;
  4961                  x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
  4962                  x86_pmu.pebs_prec_dist = true;
  4963                  /* all extra regs are per-cpu when HT is on */
  4964                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  4965                  x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
  4966  
  4967                  x86_pmu.hw_config = hsw_hw_config;
  4968                  x86_pmu.get_event_constraints = 
hsw_get_event_constraints;
  4969                  extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
  4970                          hsw_format_attr : nhm_format_attr;
  4971                  extra_skl_attr = skl_format_attr;
  4972                  td_attr  = hsw_events_attrs;
  4973                  mem_attr = hsw_mem_events_attrs;
  4974                  tsx_attr = hsw_tsx_events_attrs;
  4975                  intel_pmu_pebs_data_source_skl(pmem);
  4976  
  4977                  if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
  4978                          x86_pmu.flags |= PMU_FL_TFA;
  4979                          x86_pmu.get_event_constraints = 
tfa_get_event_constraints;
  4980                          x86_pmu.enable_all = intel_tfa_pmu_enable_all;
  4981                          x86_pmu.commit_scheduling = 
intel_tfa_commit_scheduling;
  4982                          intel_pmu_attrs[1] = 
&dev_attr_allow_tsx_force_abort.attr;
  4983                  }
  4984  
  4985                  pr_cont("Skylake events, ");
  4986                  name = "skylake";
  4987                  break;
  4988  
> 4989          case INTEL_FAM6_ICELAKE_X:
> 4990          case INTEL_FAM6_ICELAKE_XEON_D:
  4991                  pmem = true;
  4992          case INTEL_FAM6_ICELAKE_MOBILE:
  4993          case INTEL_FAM6_ICELAKE_DESKTOP:
  4994                  x86_pmu.late_ack = true;
  4995                  memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, 
sizeof(hw_cache_event_ids));
  4996                  memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, 
sizeof(hw_cache_extra_regs));
  4997                  
hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
  4998                  intel_pmu_lbr_init_skl();
  4999  
  5000                  x86_pmu.event_constraints = intel_icl_event_constraints;
  5001                  x86_pmu.pebs_constraints = 
intel_icl_pebs_event_constraints;
  5002                  x86_pmu.extra_regs = intel_icl_extra_regs;
  5003                  x86_pmu.pebs_aliases = NULL;
  5004                  x86_pmu.pebs_prec_dist = true;
  5005                  x86_pmu.flags |= PMU_FL_HAS_RSP_1;
  5006                  x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
  5007  
  5008                  x86_pmu.hw_config = hsw_hw_config;
  5009                  x86_pmu.get_event_constraints = 
icl_get_event_constraints;
  5010                  extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
  5011                          hsw_format_attr : nhm_format_attr;
  5012                  extra_skl_attr = skl_format_attr;
  5013                  mem_attr = icl_events_attrs;
  5014                  tsx_attr = icl_tsx_events_attrs;
  5015                  x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, 
.umask=0x02);
  5016                  x86_pmu.lbr_pt_coexist = true;
  5017                  intel_pmu_pebs_data_source_skl(pmem);
  5018                  pr_cont("Icelake events, ");
  5019                  name = "icelake";
  5020                  break;
  5021  
  5022          default:
  5023                  switch (x86_pmu.version) {
  5024                  case 1:
  5025                          x86_pmu.event_constraints = 
intel_v1_event_constraints;
  5026                          pr_cont("generic architected perfmon v1, ");
  5027                          name = "generic_arch_v1";
  5028                          break;
  5029                  default:
  5030                          /*
  5031                           * default constraints for v2 and up
  5032                           */
  5033                          x86_pmu.event_constraints = 
intel_gen_event_constraints;
  5034                          pr_cont("generic architected perfmon, ");
  5035                          name = "generic_arch_v2+";
  5036                          break;
  5037                  }
  5038          }
  5039  
  5040          snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
  5041  
  5042  
  5043          group_events_td.attrs  = td_attr;
  5044          group_events_mem.attrs = mem_attr;
  5045          group_events_tsx.attrs = tsx_attr;
  5046          group_format_extra.attrs = extra_attr;
  5047          group_format_extra_skl.attrs = extra_skl_attr;
  5048  
  5049          x86_pmu.attr_update = attr_update;
  5050  
  5051          if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
  5052                  WARN(1, KERN_ERR "hw perf events %d > max(%d), 
clipping!",
  5053                       x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
  5054                  x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
  5055          }
  5056          x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
  5057  
  5058          if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
  5059                  WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), 
clipping!",
  5060                       x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
  5061                  x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
  5062          }
  5063  
  5064          x86_pmu.intel_ctrl |=
  5065                  ((1LL << x86_pmu.num_counters_fixed)-1) << 
INTEL_PMC_IDX_FIXED;
  5066  
  5067          if (x86_pmu.event_constraints) {
  5068                  /*
  5069                   * event on fixed counter2 (REF_CYCLES) only works on 
this
  5070                   * counter, so do not extend mask to generic counters
  5071                   */
  5072                  for_each_event_constraint(c, x86_pmu.event_constraints) 
{
  5073                          if (c->cmask == FIXED_EVENT_FLAGS
  5074                              && c->idxmsk64 != 
INTEL_PMC_MSK_FIXED_REF_CYCLES) {
  5075                                  c->idxmsk64 |= (1ULL << 
x86_pmu.num_counters) - 1;
  5076                          }
  5077                          c->idxmsk64 &=
  5078                                  ~(~0ULL << (INTEL_PMC_IDX_FIXED + 
x86_pmu.num_counters_fixed));
  5079                          c->weight = hweight64(c->idxmsk64);
  5080                  }
  5081          }
  5082  
  5083          /*
  5084           * Access LBR MSR may cause #GP under certain circumstances.
  5085           * E.g. KVM doesn't support LBR MSR
  5086           * Check all LBT MSR here.
  5087           * Disable LBR access if any LBR MSRs can not be accessed.
  5088           */
  5089          if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
  5090                  x86_pmu.lbr_nr = 0;
  5091          for (i = 0; i < x86_pmu.lbr_nr; i++) {
  5092                  if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
  5093                        check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
  5094                          x86_pmu.lbr_nr = 0;
  5095          }
  5096  
  5097          if (x86_pmu.lbr_nr)
  5098                  pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
  5099  
  5100          /*
  5101           * Access extra MSR may cause #GP under certain circumstances.
  5102           * E.g. KVM doesn't support offcore event
  5103           * Check all extra_regs here.
  5104           */
  5105          if (x86_pmu.extra_regs) {
  5106                  for (er = x86_pmu.extra_regs; er->msr; er++) {
  5107                          er->extra_msr_access = check_msr(er->msr, 
0x11UL);
  5108                          /* Disable LBR select mapping */
  5109                          if ((er->idx == EXTRA_REG_LBR) && 
!er->extra_msr_access)
  5110                                  x86_pmu.lbr_sel_map = NULL;
  5111                  }
  5112          }
  5113  
  5114          /* Support full width counters using alternative MSR range */
  5115          if (x86_pmu.intel_cap.full_width_write) {
  5116                  x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
  5117                  x86_pmu.perfctr = MSR_IA32_PMC0;
  5118                  pr_cont("full-width counters, ");
  5119          }
  5120  
  5121          /*
  5122           * For arch perfmon 4 use counter freezing to avoid
  5123           * several MSR accesses in the PMI.
  5124           */
  5125          if (x86_pmu.counter_freezing)
  5126                  x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
  5127  
  5128          return 0;
  5129  }
  5130  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to