I made the below changes, does that work?

---

--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3627,6 +3627,15 @@ static int core_pmu_hw_config(struct per
        return intel_pmu_bts_config(event);
 }
 
+#define INTEL_TD_METRIC_AVAILABLE_MAX  (INTEL_TD_METRIC_RETIRING + \
+                                        ((x86_pmu.num_topdown_events - 1) << 
8))
+
+static bool is_available_metric_event(struct perf_event *event)
+{
+       return is_metric_event(event) &&
+               event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
+}
+
 static inline bool is_mem_loads_event(struct perf_event *event)
 {
        return (event->attr.config & INTEL_ARCH_EVENT_MASK) == 
X86_CONFIG(.event=0xcd, .umask=0x01);
@@ -3711,7 +3720,7 @@ static int intel_pmu_hw_config(struct pe
                if (event->attr.config & X86_ALL_EVENT_FLAGS)
                        return -EINVAL;
 
-               if (is_metric_event(event)) {
+               if (is_available_metric_event(event)) {
                        struct perf_event *leader = event->group_leader;
 
                        /* The metric events don't support sampling. */
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -87,7 +87,14 @@ static inline bool is_topdown_count(stru
        return event->hw.flags & PERF_X86_EVENT_TOPDOWN;
 }
 
-static inline bool is_metric_event(struct perf_event *event);
+static inline bool is_metric_event(struct perf_event *event)
+{
+       u64 config = event->attr.config;
+
+       return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) &&
+               ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING)  
&&
+               ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX);
+}
 
 static inline bool is_slots_event(struct perf_event *event)
 {
@@ -901,18 +908,6 @@ static struct perf_pmu_events_ht_attr ev
 struct pmu *x86_get_pmu(void);
 extern struct x86_pmu x86_pmu __read_mostly;
 
-#define INTEL_TD_METRIC_AVAILABLE_MAX  (INTEL_TD_METRIC_RETIRING + \
-                                        ((x86_pmu.num_topdown_events - 1) << 
8))
-
-static inline bool is_metric_event(struct perf_event *event)
-{
-       u64 config = event->attr.config;
-
-       return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) &&
-               ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING)  
&&
-               ((config & INTEL_ARCH_EVENT_MASK) <= 
INTEL_TD_METRIC_AVAILABLE_MAX);
-}
-
 static __always_inline struct x86_perf_task_context_opt *task_context_opt(void 
*ctx)
 {
        if (static_cpu_has(X86_FEATURE_ARCH_LBR))
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -284,10 +284,12 @@ struct x86_pmu_capability {
 #define INTEL_TD_METRIC_BAD_SPEC               0x8100  /* Bad speculation 
metric */
 #define INTEL_TD_METRIC_FE_BOUND               0x8200  /* FE bound metric */
 #define INTEL_TD_METRIC_BE_BOUND               0x8300  /* BE bound metric */
-#define INTEL_TD_METRIC_HEAVY_OPS              0x8400  /* Heavy Operations 
metric */
-#define INTEL_TD_METRIC_BR_MISPREDICT          0x8500  /* Branch Mispredict 
metric */
-#define INTEL_TD_METRIC_FETCH_LAT              0x8600  /* Fetch Latency metric 
*/
-#define INTEL_TD_METRIC_MEM_BOUND              0x8700  /* Memory bound metric 
*/
+/* Level 2 metrics */
+#define INTEL_TD_METRIC_HEAVY_OPS              0x8400  /* Heavy Operations 
metric */
+#define INTEL_TD_METRIC_BR_MISPREDICT          0x8500  /* Branch Mispredict 
metric */
+#define INTEL_TD_METRIC_FETCH_LAT              0x8600  /* Fetch Latency metric 
*/
+#define INTEL_TD_METRIC_MEM_BOUND              0x8700  /* Memory bound metric 
*/
+
 #define INTEL_TD_METRIC_MAX                    INTEL_TD_METRIC_MEM_BOUND
 #define INTEL_TD_METRIC_NUM                    8
 
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -908,7 +908,6 @@ enum perf_event_type {
         *                      u32     var1_dw;
         *              } && PERF_SAMPLE_WEIGHT_STRUCT
         *      #endif
-        *
         *       }
         *      }
         *      { u64                   data_src; } && PERF_SAMPLE_DATA_SRC
@@ -1276,29 +1275,23 @@ struct perf_branch_entry {
                reserved:40;
 };
 
-#if defined(__LITTLE_ENDIAN_BITFIELD)
 union perf_sample_weight {
        __u64           full;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
        struct {
                __u32   var1_dw;
                __u16   var2_w;
                __u16   var3_w;
        };
-};
-
 #elif defined(__BIG_ENDIAN_BITFIELD)
-
-union perf_sample_weight {
-       __u64           full;
        struct {
                __u16   var3_w;
                __u16   var2_w;
                __u32   var1_dw;
        };
-};
-
 #else
 #error "Unknown endianness"
 #endif
+};
 
 #endif /* _UAPI_LINUX_PERF_EVENT_H */

Reply via email to