From: Nick Piggin <nickpiggin@yahoo.com.au>

Minor cleanups from Ingo's patch including task_hot (do it right in
try_to_wake_up too).


---

 25-akpm/include/linux/sched.h |    6 ++---
 25-akpm/kernel/sched.c        |   47 ++++++++++++++++--------------------------
 2 files changed, 22 insertions(+), 31 deletions(-)

diff -puN include/linux/sched.h~sched-minor-cleanups include/linux/sched.h
--- 25/include/linux/sched.h~sched-minor-cleanups	2004-04-27 20:37:35.189590240 -0700
+++ 25-akpm/include/linux/sched.h	2004-04-27 20:37:35.195589328 -0700
@@ -602,9 +602,9 @@ struct sched_domain {
 	.cache_nice_tries	= 0,			\
 	.per_cpu_gain		= 15,			\
 	.flags			= SD_BALANCE_NEWIDLE	\
-				 | SD_WAKE_AFFINE	\
-				 | SD_WAKE_IDLE		\
-				 | SD_SHARE_CPUPOWER,	\
+				| SD_WAKE_AFFINE	\
+				| SD_WAKE_IDLE		\
+				| SD_SHARE_CPUPOWER,	\
 	.last_balance		= jiffies,		\
 	.balance_interval	= 1,			\
 	.nr_balance_failed	= 0,			\
diff -puN kernel/sched.c~sched-minor-cleanups kernel/sched.c
--- 25/kernel/sched.c~sched-minor-cleanups	2004-04-27 20:37:35.191589936 -0700
+++ 25-akpm/kernel/sched.c	2004-04-27 20:37:35.198588872 -0700
@@ -187,6 +187,8 @@ static inline unsigned int task_timeslic
 	return BASE_TIMESLICE(p);
 }
 
+#define task_hot(p, now, sd) ((now) - (p)->timestamp < (sd)->cache_hot_time)
+
 /*
  * These are the runqueue data structures:
  */
@@ -704,13 +706,11 @@ static inline int wake_idle(int cpu, tas
  */
 static int try_to_wake_up(task_t * p, unsigned int state, int sync)
 {
+	int cpu, this_cpu, success = 0;
 	unsigned long flags;
-	int success = 0;
 	long old_state;
 	runqueue_t *rq;
-	int cpu, this_cpu;
 #ifdef CONFIG_SMP
-	unsigned long long now;
 	unsigned long load, this_load;
 	struct sched_domain *sd;
 	int new_cpu;
@@ -753,8 +753,6 @@ static int try_to_wake_up(task_t * p, un
 	if (load > this_load + SCHED_LOAD_SCALE*2)
 		goto out_set_cpu;
 
-	now = sched_clock();
-
 	/*
 	 * Migrate the task to the waking domain.
 	 * Do not violate hard affinity.
@@ -762,7 +760,7 @@ static int try_to_wake_up(task_t * p, un
 	for_each_domain(this_cpu, sd) {
 		if (!(sd->flags & SD_WAKE_AFFINE))
 			break;
-		if (rq->timestamp_last_tick - p->timestamp < sd->cache_hot_time)
+		if (task_hot(p, rq->timestamp_last_tick, sd))
 			break;
 
 		if (cpu_isset(cpu, sd->span))
@@ -774,22 +772,18 @@ out_set_cpu:
 	new_cpu = wake_idle(new_cpu, p);
 	if (new_cpu != cpu && cpu_isset(new_cpu, p->cpus_allowed)) {
 		set_task_cpu(p, new_cpu);
-		goto repeat_lock_task;
-	}
-	goto out_activate;
-
-repeat_lock_task:
-	task_rq_unlock(rq, &flags);
-	rq = task_rq_lock(p, &flags);
-	old_state = p->state;
-	if (!(old_state & state))
-		goto out;
-
-	if (p->array)
-		goto out_running;
+		task_rq_unlock(rq, &flags);
+		/* might preempt at this point */
+		rq = task_rq_lock(p, &flags);
+		old_state = p->state;
+		if (!(old_state & state))
+			goto out;
+		if (p->array)
+			goto out_running;
 
-	this_cpu = smp_processor_id();
-	cpu = task_cpu(p);
+		this_cpu = smp_processor_id();
+		cpu = task_cpu(p);
+	}
 
 out_activate:
 #endif /* CONFIG_SMP */
@@ -1301,7 +1295,7 @@ int can_migrate_task(task_t *p, runqueue
 	/* Aggressive migration if we've failed balancing */
 	if (idle == NEWLY_IDLE ||
 			sd->nr_balance_failed < sd->cache_nice_tries) {
-		if (rq->timestamp_last_tick - p->timestamp < sd->cache_hot_time)
+		if (task_hot(p, rq->timestamp_last_tick, sd))
 			return 0;
 	}
 
@@ -1319,10 +1313,9 @@ static int move_tasks(runqueue_t *this_r
 			unsigned long max_nr_move, struct sched_domain *sd,
 			enum idle_type idle)
 {
-	int idx;
-	int pulled = 0;
 	prio_array_t *array, *dst_array;
 	struct list_head *head, *curr;
+	int idx, pulled = 0;
 	task_t *tmp;
 
 	if (max_nr_move <= 0 || busiest->nr_running <= 1)
@@ -1411,10 +1404,8 @@ find_busiest_group(struct sched_domain *
 		/* Tally up the load of all CPUs in the group */
 		avg_load = 0;
 		cpus_and(tmp, group->cpumask, cpu_online_map);
-		if (unlikely(cpus_empty(tmp))) {
-			WARN_ON(1);
-			return NULL;
-		}
+		if (unlikely(cpus_empty(tmp)))
+			goto nextgroup;
 
 		for_each_cpu_mask(i, tmp) {
 			/* Bias balancing toward cpus of our domain */

_