From: Zwane Mwaikambo <zwane@linuxpower.ca>

The following patch from Shaohua Li fixes a race with preempt enabled when
a module containing a pm_idle callback is unloaded.  Cached values in local
variables need to be protected as RCU critical sections so that the
synchronize_kernel() call in the unload path waits for all processors. 
There original bugzilla entry can be found at

Shaohua, i had to make a small change (variable declaration after code in
code block) so that it compiles with geriatric compilers such as the ones
Andrew is attached to ;)

http://bugzilla.kernel.org/show_bug.cgi?id=1716

Signed-off-by: Li Shaohua <shaohua.li@intel.com>
Signed-off-by: Zwane Mwaikambo <zwane@linuxpower.ca>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/arch/i386/kernel/apm.c       |    9 ++++++++-
 25-akpm/arch/i386/kernel/process.c   |   10 +++++++++-
 25-akpm/arch/ia64/kernel/process.c   |   16 ++++++++++++----
 25-akpm/arch/x86_64/kernel/process.c |   17 +++++++++++++----
 25-akpm/drivers/acpi/processor.c     |    5 +++++
 5 files changed, 47 insertions(+), 10 deletions(-)

diff -puN arch/i386/kernel/apm.c~close-race-with-preempt-and-modular-pm_idle-callbacks arch/i386/kernel/apm.c
--- 25/arch/i386/kernel/apm.c~close-race-with-preempt-and-modular-pm_idle-callbacks	Thu Sep  9 15:41:44 2004
+++ 25-akpm/arch/i386/kernel/apm.c	Thu Sep  9 15:41:45 2004
@@ -2362,8 +2362,15 @@ static void __exit apm_exit(void)
 {
 	int	error;
 
-	if (set_pm_idle)
+	if (set_pm_idle) {
 		pm_idle = original_pm_idle;
+		/*
+		 * We are about to unload the current idle thread pm callback
+		 * (pm_idle), Wait for all processors to update cached/local
+		 * copies of pm_idle before proceeding.
+		 */
+		synchronize_kernel();
+	}
 	if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
 	    && (apm_info.connection_version > 0x0100)) {
 		error = apm_engage_power_management(APM_DEVICE_ALL, 0);
diff -puN arch/i386/kernel/process.c~close-race-with-preempt-and-modular-pm_idle-callbacks arch/i386/kernel/process.c
--- 25/arch/i386/kernel/process.c~close-race-with-preempt-and-modular-pm_idle-callbacks	Thu Sep  9 15:41:44 2004
+++ 25-akpm/arch/i386/kernel/process.c	Thu Sep  9 15:41:45 2004
@@ -175,7 +175,14 @@ void cpu_idle (void)
 	/* endless idle loop with no priority at all */
 	while (1) {
 		while (!need_resched()) {
-			void (*idle)(void) = pm_idle;
+			void (*idle)(void);
+			/*
+			 * Mark this as an RCU critical section so that
+			 * synchronize_kernel() in the unload path waits
+			 * for our completion.
+			 */
+			rcu_read_lock();
+			idle = pm_idle;
 
 			if (!idle)
 				idle = default_idle;
@@ -184,6 +191,7 @@ void cpu_idle (void)
 				play_dead();
 			irq_stat[smp_processor_id()].idle_timestamp = jiffies;
 			idle();
+			rcu_read_unlock();
 		}
 		schedule();
 	}
diff -puN arch/ia64/kernel/process.c~close-race-with-preempt-and-modular-pm_idle-callbacks arch/ia64/kernel/process.c
--- 25/arch/ia64/kernel/process.c~close-race-with-preempt-and-modular-pm_idle-callbacks	Thu Sep  9 15:41:45 2004
+++ 25-akpm/arch/ia64/kernel/process.c	Thu Sep  9 15:41:45 2004
@@ -228,18 +228,26 @@ cpu_idle (void *unused)
 
 	/* endless idle loop with no priority at all */
 	while (1) {
-		void (*idle)(void) = pm_idle;
-		if (!idle)
-			idle = default_idle;
-
 #ifdef CONFIG_SMP
 		if (!need_resched())
 			min_xtp();
 #endif
 		while (!need_resched()) {
+			void (*idle)(void);
+
 			if (mark_idle)
 				(*mark_idle)(1);
+			/*
+			 * Mark this as an RCU critical section so that
+			 * synchronize_kernel() in the unload path waits
+			 * for our completion.
+			 */
+			rcu_read_lock();
+			idle = pm_idle;
+			if (!idle)
+				idle = default_idle;
 			(*idle)();
+			rcu_read_unlock();
 		}
 
 		if (mark_idle)
diff -puN arch/x86_64/kernel/process.c~close-race-with-preempt-and-modular-pm_idle-callbacks arch/x86_64/kernel/process.c
--- 25/arch/x86_64/kernel/process.c~close-race-with-preempt-and-modular-pm_idle-callbacks	Thu Sep  9 15:41:45 2004
+++ 25-akpm/arch/x86_64/kernel/process.c	Thu Sep  9 15:41:45 2004
@@ -131,11 +131,20 @@ void cpu_idle (void)
 {
 	/* endless idle loop with no priority at all */
 	while (1) {
-		void (*idle)(void) = pm_idle;
-		if (!idle)
-			idle = default_idle;
-		while (!need_resched())
+		while (!need_resched()) {
+			void (*idle)(void);
+			/*
+			 * Mark this as an RCU critical section so that
+			 * synchronize_kernel() in the unload path waits
+			 * for our completion.
+			 */
+			rcu_read_lock();
+			idle = pm_idle;
+			if (!idle)
+				idle = default_idle;
 			idle();
+			rcu_read_unlock();
+		}
 		schedule();
 	}
 }
diff -puN drivers/acpi/processor.c~close-race-with-preempt-and-modular-pm_idle-callbacks drivers/acpi/processor.c
--- 25/drivers/acpi/processor.c~close-race-with-preempt-and-modular-pm_idle-callbacks	Thu Sep  9 15:41:45 2004
+++ 25-akpm/drivers/acpi/processor.c	Thu Sep  9 15:41:45 2004
@@ -2424,6 +2424,11 @@ acpi_processor_remove (
 	/* Unregister the idle handler when processor #0 is removed. */
 	if (pr->id == 0) {
 		pm_idle = pm_idle_save;
+		/*
+		 * We are about to unload the current idle thread pm callback
+		 * (pm_idle), Wait for all processors to update cached/local
+		 * copies of pm_idle before proceeding.
+		 */
 		synchronize_kernel();
 	}
 
_