From: Nick Piggin <nickpiggin@yahoo.com.au>

This actually does produce better code, especially under the locked
section.

Turns a conditional + unconditional jump under the lock in the unlikely
case into a cmov outside the lock.


---

 25-akpm/kernel/sched.c |    9 +++++----
 1 files changed, 5 insertions(+), 4 deletions(-)

diff -puN kernel/sched.c~sched-wake_up-speedup kernel/sched.c
--- 25/kernel/sched.c~sched-wake_up-speedup	Mon Apr 26 15:31:46 2004
+++ 25-akpm/kernel/sched.c	Mon Apr 26 15:31:46 2004
@@ -2400,15 +2400,16 @@ void fastcall __wake_up_locked(wait_queu
 void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 {
 	unsigned long flags;
+	int sync = 1;
 
 	if (unlikely(!q))
 		return;
 
+	if (unlikely(!nr_exclusive))
+		sync = 0;
+
 	spin_lock_irqsave(&q->lock, flags);
-	if (likely(nr_exclusive))
-		__wake_up_common(q, mode, nr_exclusive, 1);
-	else
-		__wake_up_common(q, mode, nr_exclusive, 0);
+	__wake_up_common(q, mode, nr_exclusive, sync);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */

_