Hi,

Le Mon, 11 Feb 2008 16:54:30 -0800,
"H. Peter Anvin" <[EMAIL PROTECTED]> a écrit :

> b) would be my first choice, and yes, it would be a good thing to
> have a generalized mechanism for this.  For the registrant, it's
> pretty easy: just add a macro that adds a pointer to a named
> section.  We then need a way to get the base address and length of
> each such section in order to be able to execute each function in
> sequence.

You'll find below a tentative patch that implements this. Tuple
(vendor, pointer to cpu_dev structure) are stored in a
x86cpuvendor.init section of the kernel, which is then read by the
generic CPU code in arch/x86/kernel/cpu/common.c to fill the cpu_devs[]
function.

Moreover the early_init_...() calls are integrated into that mechanism
using a new c_early_init() member of the cpu_dev structure.

The patch is for review only at the moment. Disabling compilation of
unused CPU support code will be done in a separate patch (I've taken
over Michael Opdenacker's work for the moment, with his agreement).

Thanks for your review and comments,

Thomas

---

Replace the hardcoded list of initialization functions for each CPU
vendor by a list in an ELF section, which is read at initialization in
arch/x86/kernel/cpu/cpu.c to fill the cpu_devs[] array. The ELF
section, named .x86cpuvendor.init, is reclaimed after boot, and
contains entries of type "struct cpu_vendor_dev" which associates a
vendor number with a pointer to a "struct cpu_dev" structure.

This first modification allows to remove all the VENDOR_init_cpu()
functions.

This patch also removes the hardcoded calls to early_init_amd() and
early_init_intel(). Instead, we add a "c_early_init" member to the
cpu_dev structure, which is then called if not NULL by the generic CPU
initialization code. Unfortunately, in early_cpu_detect(), this_cpu is
not yet set, so we have to use the cpu_devs[] array directly.

This patch is part of the Linux Tiny project, and is needed for
further patch that will allow to disable compilation of unused CPU
support code.

Signed-off-by: Thomas Petazzoni <[EMAIL PROTECTED]>

---
 arch/x86/kernel/cpu/amd.c        |    5 ++++-
 arch/x86/kernel/cpu/centaur.c    |    6 +-----
 arch/x86/kernel/cpu/common.c     |   33 ++++++++++-----------------------
 arch/x86/kernel/cpu/cpu.h        |   26 +++++++++++++-------------
 arch/x86/kernel/cpu/cyrix.c      |   13 ++-----------
 arch/x86/kernel/cpu/intel.c      |    9 +++------
 arch/x86/kernel/cpu/transmeta.c  |    6 +-----
 arch/x86/kernel/cpu/umc.c        |    7 ++-----
 arch/x86/kernel/vmlinux_32.lds.S |    5 +++++
 arch/x86/kernel/vmlinux_64.lds.S |    5 +++++
 10 files changed, 46 insertions(+), 69 deletions(-)

Index: linux/arch/x86/kernel/cpu/amd.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/amd.c
+++ linux/arch/x86/kernel/cpu/amd.c
@@ -63,7 +63,7 @@
 
 int force_mwait __cpuinitdata;
 
-void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
+static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
 {
        if (cpuid_eax(0x80000000) >= 0x80000007) {
                c->x86_power = cpuid_edx(0x80000007);
@@ -336,6 +336,7 @@
                  }
                },
        },
+       .c_early_init   = early_init_amd,
        .c_init         = init_amd,
        .c_size_cache   = amd_size_cache,
 };
@@ -345,3 +346,5 @@
        cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
        return 0;
 }
+
+cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);
Index: linux/arch/x86/kernel/cpu/centaur.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/centaur.c
+++ linux/arch/x86/kernel/cpu/centaur.c
@@ -464,8 +464,4 @@
        .c_size_cache   = centaur_size_cache,
 };
 
-int __init centaur_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
-       return 0;
-}
+cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev);
Index: linux/arch/x86/kernel/cpu/common.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/common.c
+++ linux/arch/x86/kernel/cpu/common.c
@@ -328,14 +328,9 @@
 
        get_cpu_vendor(c, 1);
 
-       switch (c->x86_vendor) {
-       case X86_VENDOR_AMD:
-               early_init_amd(c);
-               break;
-       case X86_VENDOR_INTEL:
-               early_init_intel(c);
-               break;
-       }
+       if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
+           cpu_devs[c->x86_vendor]->c_early_init)
+               cpu_devs[c->x86_vendor]->c_early_init(c);
 
        early_get_cap(c);
 }
@@ -616,23 +611,15 @@
 
 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
 
-/* This is hacky. :)
- * We're emulating future behavior.
- * In the future, the cpu-specific init functions will be called implicitly
- * via the magic of initcalls.
- * They will insert themselves into the cpu_devs structure.
- * Then, when cpu_init() is called, we can just iterate over that array.
- */
 void __init early_cpu_init(void)
 {
-       intel_cpu_init();
-       cyrix_init_cpu();
-       nsc_init_cpu();
-       amd_init_cpu();
-       centaur_init_cpu();
-       transmeta_init_cpu();
-       nexgen_init_cpu();
-       umc_init_cpu();
+       struct cpu_vendor_dev *cvdev;
+
+       for (cvdev = __x86cpuvendor_start ;
+            cvdev < __x86cpuvendor_end   ;
+            cvdev++)
+               cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
+
        early_cpu_detect();
 }
 
Index: linux/arch/x86/kernel/cpu/cpu.h
===================================================================
--- linux.orig/arch/x86/kernel/cpu/cpu.h
+++ linux/arch/x86/kernel/cpu/cpu.h
@@ -14,6 +14,7 @@
 
        struct          cpu_model_info c_models[4];
 
+       void            (*c_early_init)(struct cpuinfo_x86 *c);
        void            (*c_init)(struct cpuinfo_x86 * c);
        void            (*c_identify)(struct cpuinfo_x86 * c);
        unsigned int    (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int 
size);
@@ -21,18 +22,17 @@
 
 extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
 
-extern int get_model_name(struct cpuinfo_x86 *c);
-extern void display_cacheinfo(struct cpuinfo_x86 *c);
+struct cpu_vendor_dev {
+       int vendor;
+       struct cpu_dev *cpu_dev;
+};
 
-extern void early_init_intel(struct cpuinfo_x86 *c);
-extern void early_init_amd(struct cpuinfo_x86 *c);
+#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \
+       static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \
+       __attribute__((__section__(".x86cpuvendor.init"))) = \
+       { cpu_vendor_id, cpu_dev }
 
-/* Specific CPU type init functions */
-int intel_cpu_init(void);
-int amd_init_cpu(void);
-int cyrix_init_cpu(void);
-int nsc_init_cpu(void);
-int centaur_init_cpu(void);
-int transmeta_init_cpu(void);
-int nexgen_init_cpu(void);
-int umc_init_cpu(void);
+extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[];
+
+extern int get_model_name(struct cpuinfo_x86 *c);
+extern void display_cacheinfo(struct cpuinfo_x86 *c);
Index: linux/arch/x86/kernel/cpu/cyrix.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/cyrix.c
+++ linux/arch/x86/kernel/cpu/cyrix.c
@@ -439,11 +439,7 @@
        .c_identify     = cyrix_identify,
 };
 
-int __init cyrix_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
-       return 0;
-}
+cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev);
 
 static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
        .c_vendor       = "NSC",
@@ -451,9 +447,4 @@
        .c_init         = init_nsc,
 };
 
-int __init nsc_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
-       return 0;
-}
-
+cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev);
Index: linux/arch/x86/kernel/cpu/intel.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/intel.c
+++ linux/arch/x86/kernel/cpu/intel.c
@@ -30,7 +30,7 @@
 struct movsl_mask movsl_mask __read_mostly;
 #endif
 
-void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
+static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
 {
        /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
        if (c->x86 == 15 && c->x86_cache_alignment == 64)
@@ -290,15 +290,12 @@
                  }
                },
        },
+       .c_early_init   = early_init_intel,
        .c_init         = init_intel,
        .c_size_cache   = intel_size_cache,
 };
 
-__init int intel_cpu_init(void)
-{
-       cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
-       return 0;
-}
+cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
 
 #ifndef CONFIG_X86_CMPXCHG
 unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
Index: linux/arch/x86/kernel/cpu/transmeta.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/transmeta.c
+++ linux/arch/x86/kernel/cpu/transmeta.c
@@ -109,8 +109,4 @@
        .c_identify     = transmeta_identify,
 };
 
-int __init transmeta_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
-       return 0;
-}
+cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev);
Index: linux/arch/x86/kernel/cpu/umc.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/umc.c
+++ linux/arch/x86/kernel/cpu/umc.c
@@ -19,8 +19,5 @@
        },
 };
 
-int __init umc_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev;
-       return 0;
-}
+cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev);
+
Index: linux/arch/x86/kernel/vmlinux_32.lds.S
===================================================================
--- linux.orig/arch/x86/kernel/vmlinux_32.lds.S
+++ linux/arch/x86/kernel/vmlinux_32.lds.S
@@ -149,6 +149,11 @@
        *(.con_initcall.init)
        __con_initcall_end = .;
   }
+  .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
+       __x86cpuvendor_start = .;
+       *(.x86cpuvendor.init)
+       __x86cpuvendor_end = .;
+  }
   SECURITY_INIT
   . = ALIGN(4);
   .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
Index: linux/arch/x86/kernel/vmlinux_64.lds.S
===================================================================
--- linux.orig/arch/x86/kernel/vmlinux_64.lds.S
+++ linux/arch/x86/kernel/vmlinux_64.lds.S
@@ -177,6 +177,11 @@
        *(.con_initcall.init)
   }
   __con_initcall_end = .;
+  __x86cpuvendor_start = .;
+  .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
+       *(.x86cpuvendor.init)
+  }
+  __x86cpuvendor_end = .;
   SECURITY_INIT
 
   . = ALIGN(8);


-- 
Thomas Petazzoni, Free Electrons
Free Embedded Linux Training Materials
on http://free-electrons.com/training
(More than 1500 pages!)

Attachment: signature.asc
Description: PGP signature

Reply via email to