From: Nick Piggin <piggin@cyberone.com.au>




 kernel/sched.c |    5 +++--
 1 files changed, 3 insertions(+), 2 deletions(-)

diff -puN kernel/sched.c~set_cpus_allowed-locking-fix-fix kernel/sched.c
--- 25/kernel/sched.c~set_cpus_allowed-locking-fix-fix	2004-01-04 00:58:31.000000000 -0800
+++ 25-akpm/kernel/sched.c	2004-01-04 00:58:31.000000000 -0800
@@ -575,7 +575,6 @@ static int __set_cpus_allowed(task_t *p,
 	init_completion(&req->done);
 	req->task = p;
 	list_add(&req->list, &rq->migration_queue);
-	wake_up_process(rq->migration_thread);
 	return 1;
 }
 
@@ -1016,6 +1015,7 @@ static void sched_migrate_task(task_t *p
 	if (__set_cpus_allowed(p, new_mask, &req)) {
 		/* Need to wait for migration thread. */
 		task_rq_unlock(rq, &flags);
+		wake_up_process(rq->migration_thread);
 		wait_for_completion(&req.done);
 
 		/* If we raced with sys_sched_setaffinity, don't
@@ -1024,7 +1024,7 @@ static void sched_migrate_task(task_t *p
 		if (likely(cpus_equal(p->cpus_allowed, new_mask))) {
 			/* Restore old mask: won't need migration
 			 * thread, since current cpu is allowed. */
-			__set_cpus_allowed(p, old_mask, NULL);
+			BUG_ON(__set_cpus_allowed(p, old_mask, NULL));
 		}
 	}
 out:
@@ -2708,6 +2708,7 @@ int set_cpus_allowed(task_t *p, cpumask_
 	if (__set_cpus_allowed(p, new_mask, &req)) {
 		/* Need help from migration thread: drop lock and wait. */
 		task_rq_unlock(rq, &flags);
+		wake_up_process(rq->migration_thread);
 		wait_for_completion(&req.done);
 		return 0;
 	}

_