From: Rusty Russell <rusty@rustcorp.com.au>

The cpucontrol mutex is not required when no cpus can go up and down. 
Andrew wrote a wrapper for it to avoid #ifdefs, this expands that to only
be defined for CONFIG_HOTPLUG_CPU, and uses it everywhere.

The only downside is that the cpucontrol lock was overloaded by my recent
patch to net/core/flow.c to protect it from reentrance, so this
reintroduces the local flow_flush_sem.  This code isn't speed critical, so
taking two locks when CONFIG_HOTPLUG_CPU=y is not really an issue.



---

 include/linux/cpu.h |   14 ++++++++------
 kernel/module.c     |    6 +++---
 net/core/flow.c     |    6 ++++--
 3 files changed, 15 insertions(+), 11 deletions(-)

diff -puN include/linux/cpu.h~lock_cpu_hotplug-fixes include/linux/cpu.h
--- 25/include/linux/cpu.h~lock_cpu_hotplug-fixes	2004-01-29 22:30:27.000000000 -0800
+++ 25-akpm/include/linux/cpu.h	2004-01-29 22:30:27.000000000 -0800
@@ -38,9 +38,6 @@ extern void unregister_cpu_notifier(stru
 
 int cpu_up(unsigned int cpu);
 
-#define lock_cpu_hotplug()	down(&cpucontrol)
-#define unlock_cpu_hotplug()	up(&cpucontrol)
-
 #else
 
 static inline int register_cpu_notifier(struct notifier_block *nb)
@@ -51,12 +48,17 @@ static inline void unregister_cpu_notifi
 {
 }
 
-#define lock_cpu_hotplug()	do { } while (0)
-#define unlock_cpu_hotplug()		do { } while (0)
-
 #endif /* CONFIG_SMP */
 extern struct sysdev_class cpu_sysdev_class;
 
+#ifdef CONFIG_HOTPLUG_CPU
 /* Stop CPUs going up and down. */
 extern struct semaphore cpucontrol;
+#define lock_cpu_hotplug()	down(&cpucontrol)
+#define unlock_cpu_hotplug()	up(&cpucontrol)
+#else
+#define lock_cpu_hotplug()	do { } while (0)
+#define unlock_cpu_hotplug()	do { } while (0)
+#endif
+
 #endif /* _LINUX_CPU_H_ */
diff -puN kernel/module.c~lock_cpu_hotplug-fixes kernel/module.c
--- 25/kernel/module.c~lock_cpu_hotplug-fixes	2004-01-29 22:30:27.000000000 -0800
+++ 25-akpm/kernel/module.c	2004-01-29 22:30:27.000000000 -0800
@@ -554,7 +554,7 @@ static int stop_refcounts(void)
 	stopref_state = STOPREF_WAIT;
 
 	/* No CPUs can come up or down during this. */
-	down(&cpucontrol);
+	lock_cpu_hotplug();
 
 	for (i = 0; i < NR_CPUS; i++) {
 		if (i == cpu || !cpu_online(i))
@@ -572,7 +572,7 @@ static int stop_refcounts(void)
 	/* If some failed, kill them all. */
 	if (ret < 0) {
 		stopref_set_state(STOPREF_EXIT, 1);
-		up(&cpucontrol);
+		unlock_cpu_hotplug();
 		return ret;
 	}
 
@@ -595,7 +595,7 @@ static void restart_refcounts(void)
 	stopref_set_state(STOPREF_EXIT, 0);
 	local_irq_enable();
 	preempt_enable();
-	up(&cpucontrol);
+	unlock_cpu_hotplug();
 }
 #else /* ...!SMP */
 static inline int stop_refcounts(void)
diff -puN net/core/flow.c~lock_cpu_hotplug-fixes net/core/flow.c
--- 25/net/core/flow.c~lock_cpu_hotplug-fixes	2004-01-29 22:30:27.000000000 -0800
+++ 25-akpm/net/core/flow.c	2004-01-29 22:30:27.000000000 -0800
@@ -283,10 +283,11 @@ static void flow_cache_flush_per_cpu(voi
 void flow_cache_flush(void)
 {
 	struct flow_flush_info info;
+	static DECLARE_MUTEX(flow_flush_sem);
 
-	/* Don't want cpus going down or up during this, also protects
-	 * against multiple callers. */
+	/* Don't want cpus going down or up during this. */
 	lock_cpu_hotplug();
+	down(&flow_flush_sem);
 	atomic_set(&info.cpuleft, num_online_cpus());
 	init_completion(&info.completion);
 
@@ -296,6 +297,7 @@ void flow_cache_flush(void)
 	local_bh_enable();
 
 	wait_for_completion(&info.completion);
+	up(&flow_flush_sem);
 	unlock_cpu_hotplug();
 }
 

_