On Thu, 8 Sep 2016, Fenghua Yu wrote: > /* > + * Minimum bits required in Cache bitmask. > + */ > +unsigned int min_bitmask_len = 1;
Global variable w/o an corresponding declaration in a header file? > +/* > * Mask of CPUs for writing CBM values. We only need one CPU per-socket. > */ > static cpumask_t rdt_cpumask; > @@ -51,6 +55,42 @@ struct rdt_remote_data { > u64 val; > }; > > +/* > + * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs > + * as it does not have CPUID enumeration support for Cache allocation. > + * > + * Probes by writing to the high 32 bits(CLOSid) of the IA32_PQR_MSR and > + * testing if the bits stick. Max CLOSids is always 4 and max cbm length > + * is always 20 on hsw server parts. The minimum cache bitmask length > + * allowed for HSW server is always 2 bits. Hardcode all of them. > + */ > +static inline bool cache_alloc_hsw_probe(void) > +{ > + u32 l, h_old, h_new, h_tmp; > + > + if (rdmsr_safe(MSR_IA32_PQR_ASSOC, &l, &h_old)) > + return false; > + > + /* > + * Default value is always 0 if feature is present. > + */ > + h_tmp = h_old ^ 0x1U; > + if (wrmsr_safe(MSR_IA32_PQR_ASSOC, l, h_tmp) || > + rdmsr_safe(MSR_IA32_PQR_ASSOC, &l, &h_new)) > + return false; > + > + if (h_tmp != h_new) > + return false; > + > + wrmsr_safe(MSR_IA32_PQR_ASSOC, l, h_old); > + > + boot_cpu_data.x86_cache_max_closid = 4; > + boot_cpu_data.x86_cache_max_cbm_len = 20; > + min_bitmask_len = 2; So min_bitmask_len gets updated here, but it's not used anywhere. Neither that cache_alloc_hsw_probe() function is used .... > + return true; > +} > + > void __intel_rdt_sched_in(void *dummy) > { > struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); > @@ -225,9 +265,6 @@ static int __init intel_rdt_late_init(void) > u32 maxid; > int err = 0, size, i; > > - if (!cpu_has(c, X86_FEATURE_CAT_L3)) > - return -ENODEV; And we now initialize the thing unconditionally no matter whether the feature is available or not. Interesting. The changelog does tell a different story than the patch .... Thanks, tglx