x86: cleanup smp.h variants

Bring the smp.h variants into sync to prepare merging and
paravirt support.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index d0a221f..18d932d 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -42,6 +42,7 @@
 
 extern int apic_runs_main_timer;
 extern int ioapic_force;
+extern int disable_apic;
 extern int disable_apic_timer;
 extern unsigned boot_cpu_id;
 
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
index a2a6b2e..781ad74 100644
--- a/include/asm-x86/mpspec.h
+++ b/include/asm-x86/mpspec.h
@@ -13,8 +13,11 @@
 
 extern unsigned int def_to_bigsmp;
 extern int apic_version[MAX_APICS];
+extern u8 apicid_2_node[];
 extern int pic_mode;
 
+#define MAX_APICID 256
+
 #else
 
 #define MAX_MP_BUSSES 256
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
index e10b7af..c69e960 100644
--- a/include/asm-x86/smp_32.h
+++ b/include/asm-x86/smp_32.h
@@ -1,58 +1,51 @@
 #ifndef __ASM_SMP_H
 #define __ASM_SMP_H
 
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+#include <linux/init.h>
+
 /*
  * We need the APIC definitions automatically as part of 'smp.h'
  */
-#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
+#ifdef CONFIG_X86_LOCAL_APIC
+# include <asm/mpspec.h>
+# include <asm/apic.h>
+# ifdef CONFIG_X86_IO_APIC
+#  include <asm/io_apic.h>
+# endif
 #endif
 
-#if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__)
-#include <linux/bitops.h>
-#include <asm/mpspec.h>
-#include <asm/apic.h>
-#ifdef CONFIG_X86_IO_APIC
-#include <asm/io_apic.h>
-#endif
-#endif
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_callin_map;
 
-#define BAD_APICID 0xFFu
-#ifdef CONFIG_SMP
-#ifndef __ASSEMBLY__
-
-/*
- * Private routines/data
- */
- 
-extern void smp_alloc_memory(void);
-extern int pic_mode;
 extern int smp_num_siblings;
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+extern unsigned int num_processors;
 
-extern void (*mtrr_hook) (void);
-extern void zap_low_mappings (void);
+extern void smp_alloc_memory(void);
 extern void lock_ipi_call_lock(void);
 extern void unlock_ipi_call_lock(void);
 
-#define MAX_APICID 256
+extern void (*mtrr_hook) (void);
+extern void zap_low_mappings (void);
+
 extern u8 __initdata x86_cpu_to_apicid_init[];
 extern void *x86_cpu_to_apicid_ptr;
+
+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+DECLARE_PER_CPU(u8, cpu_llc_id);
 DECLARE_PER_CPU(u8, x86_cpu_to_apicid);
 
-#define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
-
-extern void set_cpu_sibling_map(int cpu);
-
 #ifdef CONFIG_HOTPLUG_CPU
 extern void cpu_exit_clear(void);
 extern void cpu_uninit(void);
 extern void remove_siblinginfo(int cpu);
 #endif
 
+/* Globals due to paravirt */
+extern void set_cpu_sibling_map(int cpu);
+
 struct smp_ops
 {
 	void (*smp_prepare_boot_cpu)(void);
@@ -67,6 +60,7 @@
 				      int wait);
 };
 
+#ifdef CONFIG_SMP
 extern struct smp_ops smp_ops;
 
 static inline void smp_prepare_boot_cpu(void)
@@ -107,10 +101,12 @@
 void native_smp_cpus_done(unsigned int max_cpus);
 
 #ifndef CONFIG_PARAVIRT
-#define startup_ipi_hook(phys_apicid, start_eip, start_esp) 		\
-do { } while (0)
+#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
 #endif
 
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+
 /*
  * This function is needed by all SMP systems. It must _always_ be valid
  * from the initial startup. We map APIC_BASE very early in page_setup(),
@@ -119,9 +115,11 @@
 DECLARE_PER_CPU(int, cpu_number);
 #define raw_smp_processor_id() (x86_read_percpu(cpu_number))
 
-extern cpumask_t cpu_callout_map;
-extern cpumask_t cpu_callin_map;
-extern cpumask_t cpu_possible_map;
+#define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
+
+extern int safe_smp_processor_id(void);
+
+void __cpuinit smp_store_cpu_info(int id);
 
 /* We don't mark CPUs online until __cpu_up(), so we need another measure */
 static inline int num_booting_cpus(void)
@@ -129,56 +127,39 @@
 	return cpus_weight(cpu_callout_map);
 }
 
-extern int safe_smp_processor_id(void);
-extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
-extern unsigned int num_processors;
-
-void __cpuinit smp_store_cpu_info(int id);
-
-#endif /* !__ASSEMBLY__ */
-
 #else /* CONFIG_SMP */
 
 #define safe_smp_processor_id()		0
 #define cpu_physical_id(cpu)		boot_cpu_physical_apicid
 
-#define NO_PROC_ID		0xFF		/* No processor magic marker */
-
-#endif /* CONFIG_SMP */
-
-#ifndef __ASSEMBLY__
+#endif /* !CONFIG_SMP */
 
 #ifdef CONFIG_X86_LOCAL_APIC
 
-#ifdef APIC_DEFINITION
-extern int hard_smp_processor_id(void);
-#else
-#include <mach_apicdef.h>
-static inline int hard_smp_processor_id(void)
-{
-	/* we don't want to mark this access volatile - bad code generation */
-	return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
-}
-#endif /* APIC_DEFINITION */
-
-#else /* CONFIG_X86_LOCAL_APIC */
-
-#ifndef CONFIG_SMP
-#define hard_smp_processor_id()		0
-#endif
-
-#endif /* CONFIG_X86_LOCAL_APIC */
-
-extern u8 apicid_2_node[];
-
-#ifdef CONFIG_X86_LOCAL_APIC
 static __inline int logical_smp_processor_id(void)
 {
 	/* we don't want to mark this access volatile - bad code generation */
-	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+	return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
 }
-#endif
-#endif
 
+# ifdef APIC_DEFINITION
+extern int hard_smp_processor_id(void);
+# else
+#  include <mach_apicdef.h>
+static inline int hard_smp_processor_id(void)
+{
+	/* we don't want to mark this access volatile - bad code generation */
+	return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID));
+}
+# endif /* APIC_DEFINITION */
+
+#else /* CONFIG_X86_LOCAL_APIC */
+
+# ifndef CONFIG_SMP
+#  define hard_smp_processor_id()	0
+# endif
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#endif /* !ASSEMBLY */
 #endif
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h
index ab612b0..2feddda 100644
--- a/include/asm-x86/smp_64.h
+++ b/include/asm-x86/smp_64.h
@@ -1,97 +1,39 @@
 #ifndef __ASM_SMP_H
 #define __ASM_SMP_H
 
+#include <linux/cpumask.h>
+#include <linux/init.h>
+
 /*
  * We need the APIC definitions automatically as part of 'smp.h'
  */
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/bitops.h>
-#include <linux/init.h>
-extern int disable_apic;
-
-#include <asm/mpspec.h>
 #include <asm/apic.h>
 #include <asm/io_apic.h>
+#include <asm/mpspec.h>
+#include <asm/pda.h>
 #include <asm/thread_info.h>
 
-#ifdef CONFIG_SMP
-
-#include <asm/pda.h>
-
-struct pt_regs;
-
-extern cpumask_t cpu_present_mask;
-extern cpumask_t cpu_possible_map;
-extern cpumask_t cpu_online_map;
 extern cpumask_t cpu_callout_map;
 extern cpumask_t cpu_initialized;
 
-/*
- * Private routines/data
- */
- 
+extern int smp_num_siblings;
+extern unsigned int num_processors;
+
 extern void smp_alloc_memory(void);
-extern volatile unsigned long smp_invalidate_needed;
 extern void lock_ipi_call_lock(void);
 extern void unlock_ipi_call_lock(void);
-extern int smp_num_siblings;
-extern void smp_send_reschedule(int cpu);
+
 extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
 				  void *info, int wait);
 
-/*
- * cpu_sibling_map and cpu_core_map now live
- * in the per cpu area
- *
- * extern cpumask_t cpu_sibling_map[NR_CPUS];
- * extern cpumask_t cpu_core_map[NR_CPUS];
- */
+extern u8 __initdata x86_cpu_to_apicid_init[];
+extern void *x86_cpu_to_apicid_ptr;
+extern u8 bios_cpu_apicid[];
+
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 DECLARE_PER_CPU(u8, cpu_llc_id);
-
-#define SMP_TRAMPOLINE_BASE 0x6000
-
-/*
- * On x86 all CPUs are mapped 1:1 to the APIC space.
- * This simplifies scheduling and IPI sending and
- * compresses data structures.
- */
-
-static inline int num_booting_cpus(void)
-{
-	return cpus_weight(cpu_callout_map);
-}
-
-#define raw_smp_processor_id() read_pda(cpunumber)
-
-extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
-extern void prefill_possible_map(void);
-extern unsigned num_processors;
-extern unsigned __cpuinitdata disabled_cpus;
-
-#define NO_PROC_ID		0xFF		/* No processor magic marker */
-
-#endif /* CONFIG_SMP */
-
-#define safe_smp_processor_id()		smp_processor_id()
-
-static inline int hard_smp_processor_id(void)
-{
-	/* we don't want to mark this access volatile - bad code generation */
-	return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
-}
-
-/*
- * Some lowlevel functions might want to know about
- * the real APIC ID <-> CPU # mapping.
- */
-extern u8 __initdata x86_cpu_to_apicid_init[];
-extern void *x86_cpu_to_apicid_ptr;
-DECLARE_PER_CPU(u8, x86_cpu_to_apicid);	/* physical ID */
-extern u8 bios_cpu_apicid[];
+DECLARE_PER_CPU(u8, x86_cpu_to_apicid);
 
 static inline int cpu_present_to_apicid(int mps_cpu)
 {
@@ -101,30 +43,57 @@
 		return BAD_APICID;
 }
 
-#ifndef CONFIG_SMP
-#define stack_smp_processor_id() 0
-#define cpu_logical_map(x) (x)
-#else
-#include <asm/thread_info.h>
-#define stack_smp_processor_id() \
-({ 								\
-	struct thread_info *ti;					\
+#ifdef CONFIG_SMP
+
+#define SMP_TRAMPOLINE_BASE 0x6000
+
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+extern void prefill_possible_map(void);
+extern unsigned __cpuinitdata disabled_cpus;
+
+#define raw_smp_processor_id()	read_pda(cpunumber)
+#define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
+
+#define stack_smp_processor_id()					\
+	({								\
+	struct thread_info *ti;						\
 	__asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
-	ti->cpu;						\
+	ti->cpu;							\
 })
-#endif
+
+/*
+ * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies
+ * scheduling and IPI sending and compresses data structures.
+ */
+static inline int num_booting_cpus(void)
+{
+	return cpus_weight(cpu_callout_map);
+}
+
+extern void smp_send_reschedule(int cpu);
+
+#else /* CONFIG_SMP */
+
+extern unsigned int boot_cpu_id;
+#define cpu_physical_id(cpu)	boot_cpu_id
+#define stack_smp_processor_id() 0
+
+#endif /* !CONFIG_SMP */
+
+#define safe_smp_processor_id()		smp_processor_id()
 
 static __inline int logical_smp_processor_id(void)
 {
 	/* we don't want to mark this access volatile - bad code generation */
-	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+	return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
 }
 
-#ifdef CONFIG_SMP
-#define cpu_physical_id(cpu)		per_cpu(x86_cpu_to_apicid, cpu)
-#else
-extern unsigned int boot_cpu_id;
-#define cpu_physical_id(cpu)		boot_cpu_id
-#endif /* !CONFIG_SMP */
+static inline int hard_smp_processor_id(void)
+{
+	/* we don't want to mark this access volatile - bad code generation */
+	return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID));
+}
+
 #endif
 
diff --git a/include/asm-x86/topology_64.h b/include/asm-x86/topology_64.h
index a718dda..407b22d 100644
--- a/include/asm-x86/topology_64.h
+++ b/include/asm-x86/topology_64.h
@@ -7,8 +7,6 @@
 #include <asm/mpspec.h>
 #include <linux/bitops.h>
 
-extern cpumask_t cpu_online_map;
-
 extern unsigned char cpu_to_node[];
 extern cpumask_t     node_to_cpumask[];