From: Ingo Molnar <mingo@elte.hu>

add per-arch sched_cacheflush() which is a write-back cacheflush used by
the migration-cost calibration code at bootup time.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 arch/ia64/kernel/setup.c     |   10 ++++++++++
 include/asm-alpha/system.h   |   10 ++++++++++
 include/asm-arm/system.h     |   10 ++++++++++
 include/asm-arm26/system.h   |   10 ++++++++++
 include/asm-i386/system.h    |    9 +++++++++
 include/asm-ia64/system.h    |    1 +
 include/asm-m32r/system.h    |   10 ++++++++++
 include/asm-mips/system.h    |   10 ++++++++++
 include/asm-parisc/system.h  |    9 +++++++++
 include/asm-ppc/system.h     |   10 ++++++++++
 include/asm-ppc64/system.h   |    9 +++++++++
 include/asm-s390/system.h    |   10 ++++++++++
 include/asm-sh/system.h      |   10 ++++++++++
 include/asm-sparc/system.h   |   10 ++++++++++
 include/asm-sparc64/system.h |   10 ++++++++++
 include/asm-x86_64/system.h  |    9 +++++++++
 sched.c                      |    0 
 17 files changed, 147 insertions(+)

diff -puN include/asm-i386/system.h~sched-add-cacheflush-asm include/asm-i386/system.h
--- devel/include/asm-i386/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-i386/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -386,6 +386,15 @@ void enable_hlt(void);
 extern int es7000_plat;
 void cpu_idle_wait(void);
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible:
+ */
+static inline void sched_cacheflush(void)
+{
+	wbinvd();
+}
+
 extern unsigned long arch_align_stack(unsigned long sp);
 
 #endif
diff -puN include/asm-ia64/system.h~sched-add-cacheflush-asm include/asm-ia64/system.h
--- devel/include/asm-ia64/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-ia64/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -279,6 +279,7 @@ extern void ia64_load_extra (struct task
 #define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
 
 void cpu_idle_wait(void);
+void sched_cacheflush(void);
 
 #define arch_align_stack(x) (x)
 
diff -puN include/asm-ppc64/system.h~sched-add-cacheflush-asm include/asm-ppc64/system.h
--- devel/include/asm-ppc64/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-ppc64/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -141,6 +141,15 @@ struct thread_struct;
 extern struct task_struct * _switch(struct thread_struct *prev,
 				    struct thread_struct *next);
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ * TODO: how do you cacheflush on ppc64?
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 static inline int __is_processor(unsigned long pv)
 {
 	unsigned long pvr;
diff -puN include/asm-x86_64/system.h~sched-add-cacheflush-asm include/asm-x86_64/system.h
--- devel/include/asm-x86_64/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-x86_64/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -180,6 +180,15 @@ static inline void write_cr4(unsigned lo
 #define wbinvd() \
 	__asm__ __volatile__ ("wbinvd": : :"memory");
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+static inline void sched_cacheflush(void)
+{
+	wbinvd();
+}
+
 #endif	/* __KERNEL__ */
 
 #define nop() __asm__ __volatile__ ("nop")
diff -puN kernel/sched.c~sched-add-cacheflush-asm kernel/sched.c
diff -puN include/asm-alpha/system.h~sched-add-cacheflush-asm include/asm-alpha/system.h
--- devel/include/asm-alpha/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-alpha/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -139,6 +139,16 @@ extern void halt(void) __attribute__((no
 struct task_struct;
 extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 #define mb() \
 __asm__ __volatile__("mb": : :"memory")
 
diff -puN include/asm-arm26/system.h~sched-add-cacheflush-asm include/asm-arm26/system.h
--- devel/include/asm-arm26/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-arm26/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -115,6 +115,16 @@ do {									\
 } while (0)
 
 /*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
  * Save the current interrupt enable state & disable IRQs
  */
 #define local_irq_save(x)                               \
diff -puN include/asm-arm/system.h~sched-add-cacheflush-asm include/asm-arm/system.h
--- devel/include/asm-arm/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-arm/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -172,6 +172,16 @@ do {									\
 } while (0)
 
 /*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
  * CPU interrupt mask handling.
  */
 #if __LINUX_ARM_ARCH__ >= 6
diff -puN include/asm-m32r/system.h~sched-add-cacheflush-asm include/asm-m32r/system.h
--- devel/include/asm-m32r/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-m32r/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -67,6 +67,16 @@
 	last = __last; \
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 /* Interrupt Control */
 #if !defined(CONFIG_CHIP_M32102)
 #define local_irq_enable() \
diff -puN include/asm-mips/system.h~sched-add-cacheflush-asm include/asm-mips/system.h
--- devel/include/asm-mips/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-mips/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -159,6 +159,16 @@ do { \
 	(last) = resume(prev, next, next->thread_info); \
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 #define ROT_IN_PIECES							\
 	"	.set	noreorder	\n"				\
 	"	.set	reorder		\n"
diff -puN include/asm-parisc/system.h~sched-add-cacheflush-asm include/asm-parisc/system.h
--- devel/include/asm-parisc/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-parisc/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(st
 	(last) = _switch_to(prev, next);			\
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
 
 
 /* interrupt control */
diff -puN include/asm-ppc/system.h~sched-add-cacheflush-asm include/asm-ppc/system.h
--- devel/include/asm-ppc/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-ppc/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -100,6 +100,16 @@ extern struct task_struct *__switch_to(s
 	struct task_struct *);
 #define switch_to(prev, next, last)	((last) = __switch_to((prev), (next)))
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 struct thread_struct;
 extern struct task_struct *_switch(struct thread_struct *prev,
 				   struct thread_struct *next);
diff -puN include/asm-s390/system.h~sched-add-cacheflush-asm include/asm-s390/system.h
--- devel/include/asm-s390/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-s390/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -104,6 +104,16 @@ static inline void restore_access_regs(u
 	prev = __switch_to(prev,next);					     \
 } while (0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 extern void account_user_vtime(struct task_struct *);
 extern void account_system_vtime(struct task_struct *);
diff -puN include/asm-sh/system.h~sched-add-cacheflush-asm include/asm-sh/system.h
--- devel/include/asm-sh/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-sh/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -57,6 +57,16 @@
 	last = __last;							\
 } while (0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 #define nop() __asm__ __volatile__ ("nop")
 
 
diff -puN include/asm-sparc64/system.h~sched-add-cacheflush-asm include/asm-sparc64/system.h
--- devel/include/asm-sparc64/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-sparc64/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -222,6 +222,16 @@ do {	if (test_thread_flag(TIF_PERFCTR)) 
 	}								\
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
 {
 	unsigned long tmp1, tmp2;
diff -puN include/asm-sparc/system.h~sched-add-cacheflush-asm include/asm-sparc/system.h
--- devel/include/asm-sparc/system.h~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/include/asm-sparc/system.h	2005-09-07 20:11:02.000000000 -0700
@@ -166,6 +166,16 @@ extern void fpsave(unsigned long *fpregs
 	} while(0)
 
 /*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
  * Changing the IRQ level on the Sparc.
  */
 extern void local_irq_restore(unsigned long);
diff -puN arch/ia64/kernel/setup.c~sched-add-cacheflush-asm arch/ia64/kernel/setup.c
--- devel/arch/ia64/kernel/setup.c~sched-add-cacheflush-asm	2005-09-07 20:11:01.000000000 -0700
+++ devel-akpm/arch/ia64/kernel/setup.c	2005-09-07 20:11:02.000000000 -0700
@@ -59,6 +59,7 @@
 #include <asm/smp.h>
 #include <asm/system.h>
 #include <asm/unistd.h>
+#include <asm/system.h>
 
 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
 # error "struct cpuinfo_ia64 too big!"
@@ -827,3 +828,12 @@ check_bugs (void)
 	ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
 			       (unsigned long) __end___mckinley_e9_bundles);
 }
+
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+void sched_cacheflush(void)
+{
+	ia64_sal_cache_flush(3);
+}
_