On Mon, Apr 05, 2021 at 08:10:50AM -0700, kan.li...@linux.intel.com wrote:
> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
> index 0bd9554..d71ca69 100644
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -356,6 +356,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct 
> perf_event *event)
>  {
>       struct perf_event_attr *attr = &event->attr;
>       unsigned int cache_type, cache_op, cache_result;
> +     struct x86_hybrid_pmu *pmu = is_hybrid() ? hybrid_pmu(event->pmu) : 
> NULL;
>       u64 config, val;
>  
>       config = attr->config;
> @@ -375,7 +376,10 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct 
> perf_event *event)
>               return -EINVAL;
>       cache_result = array_index_nospec(cache_result, 
> PERF_COUNT_HW_CACHE_RESULT_MAX);
>  
> -     val = hw_cache_event_ids[cache_type][cache_op][cache_result];
> +     if (pmu)
> +             val = 
> pmu->hw_cache_event_ids[cache_type][cache_op][cache_result];
> +     else
> +             val = hw_cache_event_ids[cache_type][cache_op][cache_result];
>  
>       if (val == 0)
>               return -ENOENT;
> @@ -384,7 +388,10 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct 
> perf_event *event)
>               return -EINVAL;
>  
>       hwc->config |= val;
> -     attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
> +     if (pmu)
> +             attr->config1 = 
> pmu->hw_cache_extra_regs[cache_type][cache_op][cache_result];
> +     else
> +             attr->config1 = 
> hw_cache_extra_regs[cache_type][cache_op][cache_result];
>       return x86_pmu_extra_regs(val, event);
>  }

So I'm still bugged by this, and you have the same pattern for
unconstrained, plus that other issue you couldn't use hybrid() for.

How's something like this on top?

---
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -356,7 +356,6 @@ set_ext_hw_attr(struct hw_perf_event *hw
 {
        struct perf_event_attr *attr = &event->attr;
        unsigned int cache_type, cache_op, cache_result;
-       struct x86_hybrid_pmu *pmu = is_hybrid() ? hybrid_pmu(event->pmu) : 
NULL;
        u64 config, val;
 
        config = attr->config;
@@ -376,11 +375,7 @@ set_ext_hw_attr(struct hw_perf_event *hw
                return -EINVAL;
        cache_result = array_index_nospec(cache_result, 
PERF_COUNT_HW_CACHE_RESULT_MAX);
 
-       if (pmu)
-               val = 
pmu->hw_cache_event_ids[cache_type][cache_op][cache_result];
-       else
-               val = hw_cache_event_ids[cache_type][cache_op][cache_result];
-
+       val = hybrid_var(event->pmu, 
hw_cache_event_ids)[cache_type][cache_op][cache_result];
        if (val == 0)
                return -ENOENT;
 
@@ -388,10 +383,8 @@ set_ext_hw_attr(struct hw_perf_event *hw
                return -EINVAL;
 
        hwc->config |= val;
-       if (pmu)
-               attr->config1 = 
pmu->hw_cache_extra_regs[cache_type][cache_op][cache_result];
-       else
-               attr->config1 = 
hw_cache_extra_regs[cache_type][cache_op][cache_result];
+       attr->config1 = hybrid_var(event->pmu, 
hw_cache_extra_regs)[cache_type][cache_op][cache_result];
+
        return x86_pmu_extra_regs(val, event);
 }
 
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -660,14 +660,24 @@ static __always_inline struct x86_hybrid
 #define is_hybrid()                    (!!x86_pmu.num_hybrid_pmus)
 
 #define hybrid(_pmu, _field)                           \
-({                                                     \
-       typeof(x86_pmu._field) __F = x86_pmu._field;    \
+(*({                                                   \
+       typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \
                                                        \
        if (is_hybrid() && (_pmu))                      \
-               __F = hybrid_pmu(_pmu)->_field;         \
+               __Fp = &hybrid_pmu(_pmu)->_field;       \
                                                        \
-       __F;                                            \
-})
+       __Fp;                                           \
+}))
+
+#define hybrid_var(_pmu, _var)                         \
+(*({                                                   \
+       typeof(&_var) __Fp = &_var;                     \
+                                                       \
+       if (is_hybrid() && (_pmu))                      \
+               __Fp = &hybrid_pmu(_pmu)->_var;         \
+                                                       \
+       __Fp;                                           \
+}))
 
 /*
  * struct x86_pmu - generic x86 pmu
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3147,10 +3147,7 @@ x86_get_event_constraints(struct cpu_hw_
                }
        }
 
-       if (!is_hybrid() || !cpuc->pmu)
-               return &unconstrained;
-
-       return &hybrid_pmu(cpuc->pmu)->unconstrained;
+       return &hybrid_var(cpuc->pmu, unconstrained);
 }
 
 static struct event_constraint *
@@ -3656,10 +3653,7 @@ static inline bool is_mem_loads_aux_even
 
 static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
 {
-       union perf_capabilities *intel_cap;
-
-       intel_cap = is_hybrid() ? &hybrid_pmu(event->pmu)->intel_cap :
-                                 &x86_pmu.intel_cap;
+       union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
 
        return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
 }

Reply via email to