Re: [PATCH] [15/20] x86: Move X86_FEATURE_CONSTANT_TSC into early cpu feature detection
* Andi Kleen <[EMAIL PROTECTED]> wrote: > Need this in the next patch in time_init and that happens early. > > This includes a minor fix on i386 where early_intel_workarounds() > [which is now called early_init_intel] really executes early as the > comments say. thanks, applied. I'll wait for Thomas to comment on the TSC bits though. (but as long as we carry this patch in x86.git it should make your future patching efforts easier.) Ingo -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH] [15/20] x86: Move X86_FEATURE_CONSTANT_TSC into early cpu feature detection
* Andi Kleen [EMAIL PROTECTED] wrote: Need this in the next patch in time_init and that happens early. This includes a minor fix on i386 where early_intel_workarounds() [which is now called early_init_intel] really executes early as the comments say. thanks, applied. I'll wait for Thomas to comment on the TSC bits though. (but as long as we carry this patch in x86.git it should make your future patching efforts easier.) Ingo -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
[PATCH] [15/20] x86: Move X86_FEATURE_CONSTANT_TSC into early cpu feature detection
Need this in the next patch in time_init and that happens early. This includes a minor fix on i386 where early_intel_workarounds() [which is now called early_init_intel] really executes early as the comments say. Signed-off-by: Andi Kleen <[EMAIL PROTECTED]> --- arch/x86/kernel/cpu/amd.c| 17 +++-- arch/x86/kernel/cpu/common.c | 11 +-- arch/x86/kernel/cpu/cpu.h|3 ++- arch/x86/kernel/cpu/intel.c | 13 ++--- arch/x86/kernel/setup_64.c | 39 +++ 5 files changed, 59 insertions(+), 24 deletions(-) Index: linux/arch/x86/kernel/setup_64.c === --- linux.orig/arch/x86/kernel/setup_64.c +++ linux/arch/x86/kernel/setup_64.c @@ -553,9 +553,6 @@ static void __cpuinit display_cacheinfo( printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", c->x86_cache_size, ecx & 0xFF); } - - if (n >= 0x8007) - cpuid(0x8007, , , , >x86_power); if (n >= 0x8008) { cpuid(0x8008, , , , ); c->x86_virt_bits = (eax >> 8) & 0xff; @@ -633,7 +630,7 @@ static void __init amd_detect_cmp(struct #endif } -static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) +static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP unsigned bits, ecx; @@ -691,6 +688,15 @@ static __cpuinit int amd_apic_timer_brok return 0; } +static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) +{ + early_init_amd_mc(c); + + /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ + if (c->x86_power & (1<<8)) + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); +} + static void __cpuinit init_amd(struct cpuinfo_x86 *c) { unsigned level; @@ -740,10 +746,6 @@ static void __cpuinit init_amd(struct cp } display_cacheinfo(c); - /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ - if (c->x86_power & (1<<8)) - set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); - /* Multi core CPU? */ if (c->extended_cpuid_level >= 0x8008) amd_detect_cmp(c); @@ -850,6 +852,13 @@ static void srat_detect_node(void) #endif } +static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) +{ + if ((c->x86 == 0xf && c->x86_model >= 0x03) || + (c->x86 == 0x6 && c->x86_model >= 0x0e)) + set_bit(X86_FEATURE_CONSTANT_TSC, >x86_capability); +} + static void __cpuinit init_intel(struct cpuinfo_x86 *c) { /* Cache sizes */ @@ -1061,6 +1070,20 @@ void __cpuinit identify_cpu(struct cpuin #ifdef CONFIG_NUMA numa_add_cpu(smp_processor_id()); #endif + + c->extended_cpuid_level = cpuid_eax(0x8000); + + if (c->extended_cpuid_level >= 0x8007) + c->x86_power = cpuid_edx(0x8007); + + switch (c->x86_vendor) { + case X86_VENDOR_AMD: + early_init_amd(c); + break; + case X86_VENDOR_INTEL: + early_init_intel(c); + break; + } } void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) Index: linux/arch/x86/kernel/cpu/amd.c === --- linux.orig/arch/x86/kernel/cpu/amd.c +++ linux/arch/x86/kernel/cpu/amd.c @@ -63,6 +63,15 @@ static __cpuinit int amd_apic_timer_brok int force_mwait __cpuinitdata; +void __cpuinit early_init_amd(struct cpuinfo_x86 *c) +{ + if (cpuid_eax(0x8000) >= 0x8007) { + c->x86_power = cpuid_edx(0x8007); + if (c->x86_power & (1<<8)) + set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); + } +} + static void __cpuinit init_amd(struct cpuinfo_x86 *c) { u32 l, h; @@ -85,6 +94,8 @@ static void __cpuinit init_amd(struct cp } #endif + early_init_amd(c); + /* * FIXME: We should handle the K5 here. Set up the write * range and also turn on MSR 83 bits 4 and 31 (write alloc, @@ -257,12 +268,6 @@ static void __cpuinit init_amd(struct cp c->x86_max_cores = (cpuid_ecx(0x8008) & 0xff) + 1; } - if (cpuid_eax(0x8000) >= 0x8007) { - c->x86_power = cpuid_edx(0x8007); - if (c->x86_power & (1<<8)) - set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); - } - #ifdef CONFIG_X86_HT /* * On a AMD multi core setup the lower bits of the APIC id Index: linux/arch/x86/kernel/cpu/common.c === --- linux.orig/arch/x86/kernel/cpu/common.c +++ linux/arch/x86/kernel/cpu/common.c @@ -307,6 +307,15 @@ static void __init early_cpu_detect(void cpu_detect(c); get_cpu_vendor(c, 1); + + switch (c->x86_vendor) { + case
[PATCH] [15/20] x86: Move X86_FEATURE_CONSTANT_TSC into early cpu feature detection
Need this in the next patch in time_init and that happens early. This includes a minor fix on i386 where early_intel_workarounds() [which is now called early_init_intel] really executes early as the comments say. Signed-off-by: Andi Kleen [EMAIL PROTECTED] --- arch/x86/kernel/cpu/amd.c| 17 +++-- arch/x86/kernel/cpu/common.c | 11 +-- arch/x86/kernel/cpu/cpu.h|3 ++- arch/x86/kernel/cpu/intel.c | 13 ++--- arch/x86/kernel/setup_64.c | 39 +++ 5 files changed, 59 insertions(+), 24 deletions(-) Index: linux/arch/x86/kernel/setup_64.c === --- linux.orig/arch/x86/kernel/setup_64.c +++ linux/arch/x86/kernel/setup_64.c @@ -553,9 +553,6 @@ static void __cpuinit display_cacheinfo( printk(KERN_INFO CPU: L2 Cache: %dK (%d bytes/line)\n, c-x86_cache_size, ecx 0xFF); } - - if (n = 0x8007) - cpuid(0x8007, dummy, dummy, dummy, c-x86_power); if (n = 0x8008) { cpuid(0x8008, eax, dummy, dummy, dummy); c-x86_virt_bits = (eax 8) 0xff; @@ -633,7 +630,7 @@ static void __init amd_detect_cmp(struct #endif } -static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) +static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP unsigned bits, ecx; @@ -691,6 +688,15 @@ static __cpuinit int amd_apic_timer_brok return 0; } +static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) +{ + early_init_amd_mc(c); + + /* c-x86_power is 8000_0007 edx. Bit 8 is constant TSC */ + if (c-x86_power (18)) + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); +} + static void __cpuinit init_amd(struct cpuinfo_x86 *c) { unsigned level; @@ -740,10 +746,6 @@ static void __cpuinit init_amd(struct cp } display_cacheinfo(c); - /* c-x86_power is 8000_0007 edx. Bit 8 is constant TSC */ - if (c-x86_power (18)) - set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); - /* Multi core CPU? */ if (c-extended_cpuid_level = 0x8008) amd_detect_cmp(c); @@ -850,6 +852,13 @@ static void srat_detect_node(void) #endif } +static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) +{ + if ((c-x86 == 0xf c-x86_model = 0x03) || + (c-x86 == 0x6 c-x86_model = 0x0e)) + set_bit(X86_FEATURE_CONSTANT_TSC, c-x86_capability); +} + static void __cpuinit init_intel(struct cpuinfo_x86 *c) { /* Cache sizes */ @@ -1061,6 +1070,20 @@ void __cpuinit identify_cpu(struct cpuin #ifdef CONFIG_NUMA numa_add_cpu(smp_processor_id()); #endif + + c-extended_cpuid_level = cpuid_eax(0x8000); + + if (c-extended_cpuid_level = 0x8007) + c-x86_power = cpuid_edx(0x8007); + + switch (c-x86_vendor) { + case X86_VENDOR_AMD: + early_init_amd(c); + break; + case X86_VENDOR_INTEL: + early_init_intel(c); + break; + } } void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) Index: linux/arch/x86/kernel/cpu/amd.c === --- linux.orig/arch/x86/kernel/cpu/amd.c +++ linux/arch/x86/kernel/cpu/amd.c @@ -63,6 +63,15 @@ static __cpuinit int amd_apic_timer_brok int force_mwait __cpuinitdata; +void __cpuinit early_init_amd(struct cpuinfo_x86 *c) +{ + if (cpuid_eax(0x8000) = 0x8007) { + c-x86_power = cpuid_edx(0x8007); + if (c-x86_power (18)) + set_bit(X86_FEATURE_CONSTANT_TSC, c-x86_capability); + } +} + static void __cpuinit init_amd(struct cpuinfo_x86 *c) { u32 l, h; @@ -85,6 +94,8 @@ static void __cpuinit init_amd(struct cp } #endif + early_init_amd(c); + /* * FIXME: We should handle the K5 here. Set up the write * range and also turn on MSR 83 bits 4 and 31 (write alloc, @@ -257,12 +268,6 @@ static void __cpuinit init_amd(struct cp c-x86_max_cores = (cpuid_ecx(0x8008) 0xff) + 1; } - if (cpuid_eax(0x8000) = 0x8007) { - c-x86_power = cpuid_edx(0x8007); - if (c-x86_power (18)) - set_bit(X86_FEATURE_CONSTANT_TSC, c-x86_capability); - } - #ifdef CONFIG_X86_HT /* * On a AMD multi core setup the lower bits of the APIC id Index: linux/arch/x86/kernel/cpu/common.c === --- linux.orig/arch/x86/kernel/cpu/common.c +++ linux/arch/x86/kernel/cpu/common.c @@ -307,6 +307,15 @@ static void __init early_cpu_detect(void cpu_detect(c); get_cpu_vendor(c, 1); + + switch (c-x86_vendor) { + case X86_VENDOR_AMD: +