The INIT_TSS is unnecessary.  Just define the initial TSS where cpu_tss
is defined.

While we're at it, merge the 32-bit and 64-bit definitions.  The only
syntactic change is that 32-bit kernels were computing sp0 as long, but
now they compute it as unsigned long.

Verified by objdump: the contents and relocations of
.data..percpu..shared_aligned are unchanged on 32-bit and 64-bit
kernels.

Signed-off-by: Andy Lutomirski <l...@amacapital.net>
---
 arch/x86/include/asm/processor.h | 20 --------------------
 arch/x86/kernel/process.c        | 20 +++++++++++++++++++-
 2 files changed, 19 insertions(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 117ee65473e2..f5e3ec63767d 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -818,22 +818,6 @@ static inline void spin_lock_prefetch(const void *x)
        .io_bitmap_ptr          = NULL,                                   \
 }
 
-/*
- * Note that the .io_bitmap member must be extra-big. This is because
- * the CPU will access an additional byte beyond the end of the IO
- * permission bitmap. The extra byte must be all 1 bits, and must
- * be within the limit.
- */
-#define INIT_TSS  {                                                      \
-       .x86_tss = {                                                      \
-               .sp0            = sizeof(init_stack) + (long)&init_stack, \
-               .ss0            = __KERNEL_DS,                            \
-               .ss1            = __KERNEL_CS,                            \
-               .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,               \
-        },                                                               \
-       .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },       \
-}
-
 extern unsigned long thread_saved_pc(struct task_struct *tsk);
 
 #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
@@ -892,10 +876,6 @@ extern unsigned long thread_saved_pc(struct task_struct 
*tsk);
        .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
 }
 
-#define INIT_TSS  { \
-       .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
-}
-
 /*
  * Return saved PC of a blocked thread.
  * What is this good for? it will be always the scheduler or ret_from_fork.
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6f6087349231..f4c0af7fc3a0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -37,7 +37,25 @@
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = INIT_TSS;
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+       .x86_tss = {
+               .sp0 = (unsigned long)&init_stack + sizeof(init_stack),
+#ifdef CONFIG_X86_32
+               .ss0 = __KERNEL_DS,
+               .ss1 = __KERNEL_CS,
+               .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
+#endif
+        },
+#ifdef CONFIG_X86_32
+        /*
+         * Note that the .io_bitmap member must be extra-big. This is because
+         * the CPU will access an additional byte beyond the end of the IO
+         * permission bitmap. The extra byte must be all 1 bits, and must
+         * be within the limit.
+         */
+       .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },
+#endif
+};
 EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss);
 
 #ifdef CONFIG_X86_64
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to