From: Adrian Bunk <bunk@stusta.de>

This patch contains the following cleanups on several architectures:
- make some needlessly global code static
- remove the following write-only (except for printk's) variables:
  - cache_decay_ticks
  - smp_threads_ready
  - cacheflush_time

I've only tried the compilation on i386, but I hope all mistakes I made 
are on unimportant architectures.  ;-)

Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/arch/alpha/kernel/smp.c              |   11 -----------
 25-akpm/arch/i386/kernel/smpboot.c           |   27 ++++++---------------------
 25-akpm/arch/i386/mach-voyager/voyager_smp.c |   12 ------------
 25-akpm/arch/ia64/kernel/smpboot.c           |   15 ---------------
 25-akpm/arch/m32r/kernel/smpboot.c           |    5 -----
 25-akpm/arch/mips/kernel/smp.c               |   20 +-------------------
 25-akpm/arch/parisc/kernel/smp.c             |    4 ----
 25-akpm/arch/ppc/kernel/smp.c                |    2 --
 25-akpm/arch/ppc64/kernel/smp.c              |    2 --
 25-akpm/arch/s390/kernel/smp.c               |    3 ---
 25-akpm/arch/sh/kernel/smp.c                 |    3 ---
 25-akpm/arch/sparc/kernel/smp.c              |    3 ---
 25-akpm/arch/sparc/kernel/sun4d_smp.c        |    1 -
 25-akpm/arch/sparc/kernel/sun4m_smp.c        |    1 -
 25-akpm/arch/sparc64/kernel/smp.c            |   27 +++------------------------
 25-akpm/arch/um/kernel/smp.c                 |    6 ------
 25-akpm/arch/x86_64/kernel/smpboot.c         |   25 ++-----------------------
 25-akpm/include/asm-alpha/timex.h            |    1 -
 25-akpm/include/asm-arm/timex.h              |    2 --
 25-akpm/include/asm-arm26/timex.h            |    2 --
 25-akpm/include/asm-i386/smp.h               |    3 ---
 25-akpm/include/asm-i386/timex.h             |    2 --
 25-akpm/include/asm-m32r/timex.h             |    2 --
 25-akpm/include/asm-mips/timex.h             |    1 -
 25-akpm/include/asm-parisc/timex.h           |    2 --
 25-akpm/include/asm-ppc/timex.h              |    2 --
 25-akpm/include/asm-s390/timex.h             |    2 --
 25-akpm/include/asm-sh/timex.h               |    2 --
 25-akpm/include/asm-sh64/timex.h             |    2 --
 25-akpm/include/asm-sparc/timex.h            |    1 -
 25-akpm/include/asm-um/timex.h               |    2 --
 25-akpm/include/asm-x86_64/timex.h           |    2 --
 25-akpm/include/linux/sched.h                |    1 -
 25-akpm/include/linux/smp.h                  |    6 ------
 25-akpm/init/main.c                          |    1 -
 35 files changed, 12 insertions(+), 191 deletions(-)

diff -puN arch/alpha/kernel/smp.c~smpbootc-cleanups arch/alpha/kernel/smp.c
--- 25/arch/alpha/kernel/smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/alpha/kernel/smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -78,8 +78,6 @@ static unsigned long hwrpb_cpu_present_m
 
 int smp_num_probed;		/* Internal processor count */
 int smp_num_cpus = 1;		/* Number that came online.  */
-cycles_t cacheflush_time;
-unsigned long cache_decay_ticks;
 
 extern void calibrate_delay(void);
 
@@ -217,15 +215,6 @@ smp_tune_scheduling (int cpuid)
 	}
 
 	freq = hwrpb->cycle_freq ? : est_cycle_freq;
-
-	cacheflush_time = (freq / 1000000) * (on_chip_cache << 10) / bandwidth;
-	cache_decay_ticks = cacheflush_time / (freq / 1000) * HZ / 1000;
-
-	printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
-	       cacheflush_time/(freq/1000000),
-	       (cacheflush_time*100/(freq/1000000)) % 100);
-	printk("task migration cache decay timeout: %ld msecs.\n",
-	       (cache_decay_ticks + 1) * 1000 / HZ);
 }
 
 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
diff -puN arch/i386/kernel/smpboot.c~smpbootc-cleanups arch/i386/kernel/smpboot.c
--- 25/arch/i386/kernel/smpboot.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/i386/kernel/smpboot.c	2005-02-22 18:19:57.000000000 -0800
@@ -80,9 +80,6 @@ u8 x86_cpu_to_apicid[NR_CPUS] =
 			{ [0 ... NR_CPUS-1] = 0xff };
 EXPORT_SYMBOL(x86_cpu_to_apicid);
 
-/* Set when the idlers are all forked */
-int smp_threads_ready;
-
 /*
  * Trampoline 80x86 program as an array.
  */
@@ -95,6 +92,8 @@ static int trampoline_exec;
 /* State of each CPU. */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
+static void map_cpu_to_logical_apicid(void);
+
 /*
  * Currently trivial. Write the real->protected mode
  * bootstrap into the page concerned. The caller
@@ -325,7 +324,7 @@ extern void calibrate_delay(void);
 
 static atomic_t init_deasserted;
 
-void __init smp_callin(void)
+static void __init smp_callin(void)
 {
 	int cpuid, phys_id;
 	unsigned long timeout;
@@ -414,7 +413,7 @@ void __init smp_callin(void)
 		synchronize_tsc_ap();
 }
 
-int cpucount;
+static int cpucount;
 
 /*
  * Activate a secondary processor.
@@ -512,7 +511,7 @@ static inline void unmap_cpu_to_node(int
 
 u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
 
-void map_cpu_to_logical_apicid(void)
+static void map_cpu_to_logical_apicid(void)
 {
 	int cpu = smp_processor_id();
 	int apicid = logical_smp_processor_id();
@@ -521,7 +520,7 @@ void map_cpu_to_logical_apicid(void)
 	map_cpu_to_node(cpu, apicid_to_node(apicid));
 }
 
-void unmap_cpu_to_logical_apicid(int cpu)
+static void unmap_cpu_to_logical_apicid(int cpu)
 {
 	cpu_2_logical_apicid[cpu] = BAD_APICID;
 	unmap_cpu_to_node(cpu);
@@ -853,9 +852,6 @@ static int __init do_boot_cpu(int apicid
 	return boot_error;
 }
 
-cycles_t cacheflush_time;
-unsigned long cache_decay_ticks;
-
 static void smp_tune_scheduling (void)
 {
 	unsigned long cachesize;       /* kB   */
@@ -876,7 +872,6 @@ static void smp_tune_scheduling (void)
 		 * this basically disables processor-affinity
 		 * scheduling on SMP without a TSC.
 		 */
-		cacheflush_time = 0;
 		return;
 	} else {
 		cachesize = boot_cpu_data.x86_cache_size;
@@ -884,17 +879,7 @@ static void smp_tune_scheduling (void)
 			cachesize = 16; /* Pentiums, 2x8kB cache */
 			bandwidth = 100;
 		}
-
-		cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
 	}
-
-	cache_decay_ticks = (long)cacheflush_time/cpu_khz + 1;
-
-	printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
-		(long)cacheflush_time/(cpu_khz/1000),
-		((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
-	printk("task migration cache decay timeout: %ld msecs.\n",
-		cache_decay_ticks);
 }
 
 /*
diff -puN arch/i386/mach-voyager/voyager_smp.c~smpbootc-cleanups arch/i386/mach-voyager/voyager_smp.c
--- 25/arch/i386/mach-voyager/voyager_smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/i386/mach-voyager/voyager_smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -37,10 +37,6 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlb
 /* CPU IRQ affinity -- set to all ones initially */
 static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1]  = ~0UL };
 
-/* Set when the idlers are all forked - Set in main.c but not actually
- * used by any other parts of the kernel */
-int smp_threads_ready = 0;
-
 /* per CPU data structure (for /proc/cpuinfo et al), visible externally
  * indexed physically */
 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
@@ -81,14 +77,6 @@ cpumask_t cpu_online_map = CPU_MASK_NONE
  * by scheduler but indexed physically */
 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
 
-/* estimate of time used to flush the SMP-local cache - used in
- * processor affinity calculations */
-cycles_t cacheflush_time = 0;
-
-/* cache decay ticks for scheduler---a fairly useless quantity for the
-   voyager system with its odd affinity and huge L3 cache */
-unsigned long cache_decay_ticks = 20;
-
 
 /* The internal functions */
 static void send_CPI(__u32 cpuset, __u8 cpi);
diff -puN arch/ia64/kernel/smpboot.c~smpbootc-cleanups arch/ia64/kernel/smpboot.c
--- 25/arch/ia64/kernel/smpboot.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/ia64/kernel/smpboot.c	2005-02-22 18:19:57.000000000 -0800
@@ -427,26 +427,12 @@ decay (char *str)
 {
 	int ticks;
 	get_option (&str, &ticks);
-	cache_decay_ticks = ticks;
 	return 1;
 }
 
 __setup("decay=", decay);
 
 /*
- * # of ticks an idle task is considered cache-hot.  Highly application-dependent.  There
- * are apps out there which are known to suffer significantly with values >= 4.
- */
-unsigned long cache_decay_ticks = 10;	/* equal to MIN_TIMESLICE */
-
-static void
-smp_tune_scheduling (void)
-{
-	printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n",
-	       (cache_decay_ticks + 1) * 1000 / HZ);
-}
-
-/*
  * Initialize the logical CPU number to SAPICID mapping
  */
 void __init
@@ -544,7 +530,6 @@ smp_prepare_cpus (unsigned int max_cpus)
 	printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
 
 	current_thread_info()->cpu = 0;
-	smp_tune_scheduling();
 
 	/*
 	 * If SMP should be disabled, then really disable it!
diff -puN arch/m32r/kernel/smpboot.c~smpbootc-cleanups arch/m32r/kernel/smpboot.c
--- 25/arch/m32r/kernel/smpboot.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/m32r/kernel/smpboot.c	2005-02-22 18:19:57.000000000 -0800
@@ -81,9 +81,6 @@ static cpumask_t cpu_callin_map;
 /* Per CPU bogomips and other parameters */
 struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
 
-/* Set when the idlers are all forked */
-int smp_threads_ready;
-
 static int cpucount;
 static cpumask_t smp_commenced_mask;
 
@@ -106,8 +103,6 @@ spinlock_t ipi_lock[NR_IPIS];
 
 static unsigned int calibration_result;
 
-unsigned long cache_decay_ticks = HZ / 100;
-
 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
 /* Function Prototypes                                                       */
 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
diff -puN arch/mips/kernel/smp.c~smpbootc-cleanups arch/mips/kernel/smp.c
--- 25/arch/mips/kernel/smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/mips/kernel/smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -46,9 +46,6 @@ int __cpu_logical_map[NR_CPUS];		/* Map 
 EXPORT_SYMBOL(phys_cpu_present_map);
 EXPORT_SYMBOL(cpu_online_map);
 
-cycles_t cacheflush_time;
-unsigned long cache_decay_ticks;
-
 static void smp_tune_scheduling (void)
 {
 	struct cache_desc *cd = &current_cpu_data.scache;
@@ -71,25 +68,10 @@ static void smp_tune_scheduling (void)
 	 *  L1 cache), on PIIs it's around 50-100 usecs, depending on
 	 *  the cache size)
 	 */
-	if (!cpu_khz) {
-		/*
-		 * This basically disables processor-affinity scheduling on SMP
-		 * without a cycle counter.  Currently all SMP capable MIPS
-		 * processors have a cycle counter.
-		 */
-		cacheflush_time = 0;
+	if (!cpu_khz)
 		return;
-	}
 
 	cachesize = cd->linesz * cd->sets * cd->ways;
-	cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
-	cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000;
-
-	printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
-		(long)cacheflush_time/(cpu_khz/1000),
-		((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
-	printk("task migration cache decay timeout: %ld msecs.\n",
-		(cache_decay_ticks + 1) * 1000 / HZ);
 }
 
 extern void __init calibrate_delay(void);
diff -puN arch/parisc/kernel/smp.c~smpbootc-cleanups arch/parisc/kernel/smp.c
--- 25/arch/parisc/kernel/smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/parisc/kernel/smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -60,8 +60,6 @@ volatile struct task_struct *smp_init_cu
 
 static volatile int cpu_now_booting = 0;	/* track which CPU is booting */
 
-unsigned long cache_decay_ticks;	/* declared by include/linux/sched.h */
-
 static int parisc_max_cpus = 1;
 
 /* online cpus are ones that we've managed to bring up completely
@@ -583,8 +581,6 @@ void __devinit smp_prepare_boot_cpu(void
 
 	cpu_set(bootstrap_processor, cpu_online_map);
 	cpu_set(bootstrap_processor, cpu_present_map);
-
-	cache_decay_ticks = HZ/100;	/* FIXME very rough.  */
 }
 
 
diff -puN arch/ppc64/kernel/smp.c~smpbootc-cleanups arch/ppc64/kernel/smp.c
--- 25/arch/ppc64/kernel/smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/ppc64/kernel/smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -54,8 +54,6 @@
 #define DBG(fmt...)
 #endif
 
-int smp_threads_ready;
-
 cpumask_t cpu_possible_map = CPU_MASK_NONE;
 cpumask_t cpu_online_map = CPU_MASK_NONE;
 cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
diff -puN arch/ppc/kernel/smp.c~smpbootc-cleanups arch/ppc/kernel/smp.c
--- 25/arch/ppc/kernel/smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/ppc/kernel/smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -35,14 +35,12 @@
 #include <asm/tlbflush.h>
 #include <asm/xmon.h>
 
-int smp_threads_ready;
 volatile int smp_commenced;
 int smp_tb_synchronized;
 struct cpuinfo_PPC cpu_data[NR_CPUS];
 struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
 atomic_t ipi_recv;
 atomic_t ipi_sent;
-unsigned long cache_decay_ticks = HZ/100;
 cpumask_t cpu_online_map;
 cpumask_t cpu_possible_map;
 int smp_hw_index[NR_CPUS];
diff -puN arch/s390/kernel/smp.c~smpbootc-cleanups arch/s390/kernel/smp.c
--- 25/arch/s390/kernel/smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/s390/kernel/smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -50,12 +50,9 @@ extern volatile int __cpu_logical_map[];
  */
 
 struct _lowcore *lowcore_ptr[NR_CPUS];
-cycles_t         cacheflush_time=0;
-int              smp_threads_ready=0;      /* Set when the idlers are all forked. */
 
 cpumask_t cpu_online_map;
 cpumask_t cpu_possible_map;
-unsigned long    cache_decay_ticks = 0;
 
 static struct task_struct *current_set[NR_CPUS];
 
diff -puN arch/sh/kernel/smp.c~smpbootc-cleanups arch/sh/kernel/smp.c
--- 25/arch/sh/kernel/smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/sh/kernel/smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -34,14 +34,12 @@
  * but is designed to be usable regardless if there's an MMU
  * present or not.
  */
-int smp_threads_ready = 0;
 struct sh_cpuinfo cpu_data[NR_CPUS];
 
 extern void per_cpu_trap_init(void);
 
 cpumask_t cpu_possible_map;
 cpumask_t cpu_online_map;
-unsigned long cache_decay_ticks = HZ / 100;
 static atomic_t cpus_booted = ATOMIC_INIT(0);
 
 /* These are defined by the board-specific code. */
@@ -129,7 +127,6 @@ int start_secondary(void *unused)
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
-	smp_threads_ready = 1;
 	smp_mb();
 }
 
diff -puN arch/sparc64/kernel/smp.c~smpbootc-cleanups arch/sparc64/kernel/smp.c
--- 25/arch/sparc64/kernel/smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/sparc64/kernel/smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -1055,9 +1055,6 @@ void __init smp_tick_init(void)
 	prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
 }
 
-cycles_t cacheflush_time;
-unsigned long cache_decay_ticks;
-
 extern unsigned long cheetah_tune_scheduling(void);
 
 static void __init smp_tune_scheduling(void)
@@ -1078,10 +1075,8 @@ static void __init smp_tune_scheduling(v
 	 * of moving a process from one cpu to another).
 	 */
 	printk("SMP: Calibrating ecache flush... ");
-	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-		cacheflush_time = cheetah_tune_scheduling();
-		goto report;
-	}
+	if (tlb_type == cheetah || tlb_type == cheetah_plus)
+		return;
 
 	cpu_find_by_instance(0, &cpu_node, NULL);
 	ecache_size = prom_getintdefault(cpu_node,
@@ -1124,24 +1119,8 @@ static void __init smp_tune_scheduling(v
 
 		raw = (tick2 - tick1);
 
-		/* Dampen it a little, considering two processes
-		 * sharing the cache and fitting.
-		 */
-		cacheflush_time = (raw - (raw >> 2));
-
 		free_pages(orig_flush_base, order);
-	} else {
-		cacheflush_time = ((ecache_size << 2) +
-				   (ecache_size << 1));
-	}
-report:
-	/* Convert ticks/sticks to jiffies. */
-	cache_decay_ticks = cacheflush_time / timer_tick_offset;
-	if (cache_decay_ticks < 1)
-		cache_decay_ticks = 1;
-
-	printk("Using heuristic of %ld cycles, %ld ticks.\n",
-	       cacheflush_time, cache_decay_ticks);
+	}
 }
 
 /* /proc/profile writes can call this, don't __init it please. */
diff -puN arch/sparc/kernel/smp.c~smpbootc-cleanups arch/sparc/kernel/smp.c
--- 25/arch/sparc/kernel/smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/sparc/kernel/smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -36,15 +36,12 @@
 
 volatile int smp_processors_ready = 0;
 int smp_num_cpus = 1;
-int smp_threads_ready=0;
 volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
 unsigned char boot_cpu_id = 0;
 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
 int smp_activated = 0;
 volatile int __cpu_number_map[NR_CPUS];
 volatile int __cpu_logical_map[NR_CPUS];
-cycles_t cacheflush_time = 0; /* XXX */
-unsigned long cache_decay_ticks = 100;
 
 cpumask_t cpu_online_map = CPU_MASK_NONE;
 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
diff -puN arch/sparc/kernel/sun4d_smp.c~smpbootc-cleanups arch/sparc/kernel/sun4d_smp.c
--- 25/arch/sparc/kernel/sun4d_smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/sparc/kernel/sun4d_smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -45,7 +45,6 @@ extern void calibrate_delay(void);
 extern volatile int smp_processors_ready;
 extern int smp_num_cpus;
 static int smp_highest_cpu;
-extern int smp_threads_ready;
 extern volatile unsigned long cpu_callin_map[NR_CPUS];
 extern struct cpuinfo_sparc cpu_data[NR_CPUS];
 extern unsigned char boot_cpu_id;
diff -puN arch/sparc/kernel/sun4m_smp.c~smpbootc-cleanups arch/sparc/kernel/sun4m_smp.c
--- 25/arch/sparc/kernel/sun4m_smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/sparc/kernel/sun4m_smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -41,7 +41,6 @@ extern void calibrate_delay(void);
 
 extern volatile int smp_processors_ready;
 extern int smp_num_cpus;
-extern int smp_threads_ready;
 extern volatile unsigned long cpu_callin_map[NR_CPUS];
 extern unsigned char boot_cpu_id;
 extern int smp_activated;
diff -puN arch/um/kernel/smp.c~smpbootc-cleanups arch/um/kernel/smp.c
--- 25/arch/um/kernel/smp.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/um/kernel/smp.c	2005-02-22 18:19:57.000000000 -0800
@@ -41,15 +41,9 @@ EXPORT_SYMBOL(cpu_possible_map);
  */
 struct cpuinfo_um cpu_data[NR_CPUS];
 
-/* Set when the idlers are all forked */
-int smp_threads_ready = 0;
-
 /* A statistic, can be a little off */
 int num_reschedules_sent = 0;
 
-/* Small, random number, never changed */
-unsigned long cache_decay_ticks = 5;
-
 /* Not changed after boot */
 struct task_struct *idle_threads[NR_CPUS];
 
diff -puN arch/x86_64/kernel/smpboot.c~smpbootc-cleanups arch/x86_64/kernel/smpboot.c
--- 25/arch/x86_64/kernel/smpboot.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/arch/x86_64/kernel/smpboot.c	2005-02-22 18:19:57.000000000 -0800
@@ -70,9 +70,6 @@ static cpumask_t smp_commenced_mask;
 /* Per CPU bogomips and other parameters */
 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
 
-/* Set when the idlers are all forked */
-int smp_threads_ready;
-
 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
 
 /*
@@ -247,7 +244,7 @@ static void __init synchronize_tsc_ap (v
 
 static atomic_t init_deasserted;
 
-void __init smp_callin(void)
+static void __init smp_callin(void)
 {
 	int cpuid, phys_id;
 	unsigned long timeout;
@@ -336,7 +333,7 @@ void __init smp_callin(void)
 		synchronize_tsc_ap();
 }
 
-int cpucount;
+static int cpucount;
 
 /*
  * Activate a secondary processor.
@@ -659,9 +656,6 @@ static void __init do_boot_cpu (int apic
 	}
 }
 
-cycles_t cacheflush_time;
-unsigned long cache_decay_ticks;
-
 static void smp_tune_scheduling (void)
 {
 	int cachesize;       /* kB   */
@@ -678,11 +672,6 @@ static void smp_tune_scheduling (void)
 	 */
 
 	if (!cpu_khz) {
-		/*
-		 * this basically disables processor-affinity
-		 * scheduling on SMP without a TSC.
-		 */
-		cacheflush_time = 0;
 		return;
 	} else {
 		cachesize = boot_cpu_data.x86_cache_size;
@@ -690,17 +679,7 @@ static void smp_tune_scheduling (void)
 			cachesize = 16; /* Pentiums, 2x8kB cache */
 			bandwidth = 100;
 		}
-
-		cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
 	}
-
-	cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000;
-
-	printk(KERN_INFO "per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
-		(long)cacheflush_time/(cpu_khz/1000),
-		((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
-	printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n",
-		(cache_decay_ticks + 1) * 1000 / HZ);
 }
 
 /*
diff -puN include/asm-alpha/timex.h~smpbootc-cleanups include/asm-alpha/timex.h
--- 25/include/asm-alpha/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-alpha/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -20,7 +20,6 @@
  */
 
 typedef unsigned int cycles_t;
-extern cycles_t cacheflush_time;
 
 static inline cycles_t get_cycles (void)
 {
diff -puN include/asm-arm26/timex.h~smpbootc-cleanups include/asm-arm26/timex.h
--- 25/include/asm-arm26/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-arm26/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -21,8 +21,6 @@
 
 typedef unsigned long cycles_t;
 
-extern cycles_t cacheflush_time;
-
 static inline cycles_t get_cycles (void)
 {
 	return 0;
diff -puN include/asm-arm/timex.h~smpbootc-cleanups include/asm-arm/timex.h
--- 25/include/asm-arm/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-arm/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -16,8 +16,6 @@
 
 typedef unsigned long cycles_t;
 
-extern cycles_t cacheflush_time;
-
 static inline cycles_t get_cycles (void)
 {
 	return 0;
diff -puN include/asm-i386/smp.h~smpbootc-cleanups include/asm-i386/smp.h
--- 25/include/asm-i386/smp.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-i386/smp.h	2005-02-22 18:19:57.000000000 -0800
@@ -62,9 +62,6 @@ static inline int num_booting_cpus(void)
 	return cpus_weight(cpu_callout_map);
 }
 
-extern void map_cpu_to_logical_apicid(void);
-extern void unmap_cpu_to_logical_apicid(int cpu);
-
 #ifdef CONFIG_X86_LOCAL_APIC
 
 #ifdef APIC_DEFINITION
diff -puN include/asm-i386/timex.h~smpbootc-cleanups include/asm-i386/timex.h
--- 25/include/asm-i386/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-i386/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -32,8 +32,6 @@
  */
 typedef unsigned long long cycles_t;
 
-extern cycles_t cacheflush_time;
-
 static inline cycles_t get_cycles (void)
 {
 	unsigned long long ret=0;
diff -puN include/asm-m32r/timex.h~smpbootc-cleanups include/asm-m32r/timex.h
--- 25/include/asm-m32r/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-m32r/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -25,8 +25,6 @@
 
 typedef unsigned long long cycles_t;
 
-extern cycles_t cacheflush_time;
-
 static __inline__ cycles_t get_cycles (void)
 {
 	return 0;
diff -puN include/asm-mips/timex.h~smpbootc-cleanups include/asm-mips/timex.h
--- 25/include/asm-mips/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-mips/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -45,7 +45,6 @@
  */
 
 typedef unsigned int cycles_t;
-extern cycles_t cacheflush_time;
 
 static inline cycles_t get_cycles (void)
 {
diff -puN include/asm-parisc/timex.h~smpbootc-cleanups include/asm-parisc/timex.h
--- 25/include/asm-parisc/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-parisc/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -12,8 +12,6 @@
 
 typedef unsigned long cycles_t;
 
-extern cycles_t cacheflush_time;
-
 static inline cycles_t get_cycles (void)
 {
 	return mfctl(16);
diff -puN include/asm-ppc/timex.h~smpbootc-cleanups include/asm-ppc/timex.h
--- 25/include/asm-ppc/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-ppc/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -19,8 +19,6 @@ typedef unsigned long cycles_t;
  * Currently only used on SMP.
  */
 
-extern cycles_t cacheflush_time;
-
 static inline cycles_t get_cycles(void)
 {
 	cycles_t ret = 0;
diff -puN include/asm-s390/timex.h~smpbootc-cleanups include/asm-s390/timex.h
--- 25/include/asm-s390/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-s390/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -15,8 +15,6 @@
 
 typedef unsigned long long cycles_t;
 
-extern cycles_t cacheflush_time;
-
 static inline cycles_t get_cycles(void)
 {
 	cycles_t cycles;
diff -puN include/asm-sh64/timex.h~smpbootc-cleanups include/asm-sh64/timex.h
--- 25/include/asm-sh64/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-sh64/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -23,8 +23,6 @@
 
 typedef unsigned long cycles_t;
 
-extern cycles_t cacheflush_time;
-
 static __inline__ cycles_t get_cycles (void)
 {
 	return 0;
diff -puN include/asm-sh/timex.h~smpbootc-cleanups include/asm-sh/timex.h
--- 25/include/asm-sh/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-sh/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -10,8 +10,6 @@
 
 typedef unsigned long long cycles_t;
 
-extern cycles_t cacheflush_time;
-
 static __inline__ cycles_t get_cycles (void)
 {
 	return 0;
diff -puN include/asm-sparc/timex.h~smpbootc-cleanups include/asm-sparc/timex.h
--- 25/include/asm-sparc/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-sparc/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -10,7 +10,6 @@
 
 /* XXX Maybe do something better at some point... -DaveM */
 typedef unsigned long cycles_t;
-extern cycles_t cacheflush_time;
 #define get_cycles()	(0)
 
 #endif
diff -puN include/asm-um/timex.h~smpbootc-cleanups include/asm-um/timex.h
--- 25/include/asm-um/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-um/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -3,8 +3,6 @@
 
 typedef unsigned long cycles_t;
 
-#define cacheflush_time (0)
-
 static inline cycles_t get_cycles (void)
 {
 	return 0;
diff -puN include/asm-x86_64/timex.h~smpbootc-cleanups include/asm-x86_64/timex.h
--- 25/include/asm-x86_64/timex.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/asm-x86_64/timex.h	2005-02-22 18:19:57.000000000 -0800
@@ -16,8 +16,6 @@
 
 typedef unsigned long long cycles_t;
 
-extern cycles_t cacheflush_time;
-
 static inline cycles_t get_cycles (void)
 {
 	unsigned long long ret;
diff -puN include/linux/sched.h~smpbootc-cleanups include/linux/sched.h
--- 25/include/linux/sched.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/linux/sched.h	2005-02-22 18:19:57.000000000 -0800
@@ -175,7 +175,6 @@ extern void cpu_init (void);
 extern void trap_init(void);
 extern void update_process_times(int user);
 extern void scheduler_tick(void);
-extern unsigned long cache_decay_ticks;
 
 #ifdef CONFIG_DETECT_SOFTLOCKUP
 extern void softlockup_tick(struct pt_regs *regs);
diff -puN include/linux/smp.h~smpbootc-cleanups include/linux/smp.h
--- 25/include/linux/smp.h~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/include/linux/smp.h	2005-02-22 18:19:57.000000000 -0800
@@ -71,11 +71,6 @@ static inline int on_each_cpu(void (*fun
 	return ret;
 }
 
-/*
- * True once the per process idle is forked
- */
-extern int smp_threads_ready;
-
 #define MSG_ALL_BUT_SELF	0x8000	/* Assume <32768 CPU's */
 #define MSG_ALL			0x8001
 
@@ -102,7 +97,6 @@ void smp_prepare_boot_cpu(void);
 # define smp_processor_id()			0
 #endif
 #define hard_smp_processor_id()			0
-#define smp_threads_ready			1
 #define smp_call_function(func,info,retry,wait)	({ 0; })
 #define on_each_cpu(func,info,retry,wait)	({ func(info); 0; })
 static inline void smp_send_reschedule(int cpu) { }
diff -puN init/main.c~smpbootc-cleanups init/main.c
--- 25/init/main.c~smpbootc-cleanups	2005-02-22 18:19:57.000000000 -0800
+++ 25-akpm/init/main.c	2005-02-22 18:19:57.000000000 -0800
@@ -353,7 +353,6 @@ static void __init smp_init(void)
 #if 0
 	/* Get other processors into their bootup holding patterns. */
 
-	smp_threads_ready=1;
 	smp_commence();
 #endif
 }
_