Try this patch with usual config and usual set of kernel boot options.
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 570fa2b..d8f6138 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -319,14 +319,8 @@ static unsigned int get_cur_freq(unsigne
{
unsigned l, h;
unsigned clock_freq;
- cpumask_t saved_mask;
- saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
- if (smp_processor_id() != cpu)
- return 0;
-
- rdmsr(MSR_IA32_PERF_STATUS, l, h);
+ rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
clock_freq = extract_clock(l, cpu, 0);
if (unlikely(clock_freq == 0)) {
@@ -336,11 +330,10 @@ static unsigned int get_cur_freq(unsigne
* P-state transition (like TM2). Get the last freq set
* in PERF_CTL.
*/
- rdmsr(MSR_IA32_PERF_CTL, l, h);
+ rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
clock_freq = extract_clock(l, cpu, 1);
}
- set_cpus_allowed(current, saved_mask);
return clock_freq;
}
@@ -550,15 +543,15 @@ static int centrino_cpu_init(struct cpuf
/* Check to see if Enhanced SpeedStep is enabled, and try to
enable it if not. */
- rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+ rdmsr_on_cpu(policy->cpu, MSR_IA32_MISC_ENABLE, &l, &h);
if (!(l & (1<<16))) {
l |= (1<<16);
dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);
- wrmsr(MSR_IA32_MISC_ENABLE, l, h);
+ wrmsr_on_cpu(policy->cpu, MSR_IA32_MISC_ENABLE, l, h);
/* check to see if it stuck */
- rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+ rdmsr_on_cpu(policy->cpu, MSR_IA32_MISC_ENABLE, &l, &h);
if (!(l & (1<<16))) {
printk(KERN_INFO PFX "couldn't enable Enhanced
SpeedStep\n");
return -ENODEV;
@@ -676,8 +669,7 @@ #endif
else
cpu_set(j, set_mask);
- set_cpus_allowed(current, set_mask);
- if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
+ if (unlikely(!cpu_isset(j, set_mask))) {
dprintk("couldn't limit to CPUs in this domain\n");
retval = -EAGAIN;
if (first_cpu) {
@@ -690,7 +682,7 @@ #endif
msr = centrino_model[cpu]->op_points[newstate].index;
if (first_cpu) {
- rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ rdmsr_on_cpu(j, MSR_IA32_PERF_CTL, &oldmsr, &h);
if (msr == (oldmsr & 0xffff)) {
dprintk("no change needed - msr was and needs "
"to be %x\n", oldmsr);
@@ -717,7 +709,7 @@ #endif
oldmsr |= msr;
}
- wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
break;
@@ -739,8 +731,7 @@ #endif
if (!cpus_empty(covered_cpus)) {
for_each_cpu_mask(j, covered_cpus) {
- set_cpus_allowed(current, cpumask_of_cpu(j));
- wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
}
}
@@ -755,7 +746,6 @@ #endif
}
migrate_end:
- set_cpus_allowed(current, saved_mask);
return 0;
}
--
To UNSUBSCRIBE, email to [EMAIL PROTECTED]
with a subject of "unsubscribe". Trouble? Contact [EMAIL PROTECTED]