Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/include/linux/sched.h |   18 +++++++++---------
 25-akpm/kernel/sched.c        |   41 +++++++++++++++++++++--------------------
 2 files changed, 30 insertions(+), 29 deletions(-)

diff -puN include/linux/sched.h~schedstats-2-namespace-fix include/linux/sched.h
--- 25/include/linux/sched.h~schedstats-2-namespace-fix	2004-08-04 19:37:49.915624200 -0700
+++ 25-akpm/include/linux/sched.h	2004-08-04 19:40:17.717154952 -0700
@@ -602,10 +602,10 @@ do { if (atomic_dec_and_test(&(tsk)->usa
 
 enum idle_type
 {
-	IDLE,
-	NOT_IDLE,
-	NEWLY_IDLE,
-	MAX_IDLE_TYPES
+	IT_IDLE,
+	IT_NOT_IDLE,
+	IT_NEWLY_IDLE,
+	IT_MAX_IDLE_TYPES
 };
 
 struct sched_group {
@@ -642,11 +642,11 @@ struct sched_domain {
 
 #ifdef CONFIG_SCHEDSTATS
 	/* load_balance() stats */
-	unsigned long lb_cnt[MAX_IDLE_TYPES];
-	unsigned long lb_failed[MAX_IDLE_TYPES];
-	unsigned long lb_imbalance[MAX_IDLE_TYPES];
-	unsigned long lb_nobusyg[MAX_IDLE_TYPES];
-	unsigned long lb_nobusyq[MAX_IDLE_TYPES];
+	unsigned long lb_cnt[IT_MAX_IDLE_TYPES];
+	unsigned long lb_failed[IT_MAX_IDLE_TYPES];
+	unsigned long lb_imbalance[IT_MAX_IDLE_TYPES];
+	unsigned long lb_nobusyg[IT_MAX_IDLE_TYPES];
+	unsigned long lb_nobusyq[IT_MAX_IDLE_TYPES];
 
 	/* sched_balance_exec() stats */
 	unsigned long sbe_attempts;
diff -puN kernel/sched.c~schedstats-2-namespace-fix kernel/sched.c
--- 25/kernel/sched.c~schedstats-2-namespace-fix	2004-08-04 19:37:49.933621464 -0700
+++ 25-akpm/kernel/sched.c	2004-08-04 19:40:29.299394184 -0700
@@ -254,8 +254,8 @@ struct runqueue {
 	unsigned long sched_goidle;
 
 	/* pull_task() stats */
-	unsigned long pt_gained[MAX_IDLE_TYPES];
-	unsigned long pt_lost[MAX_IDLE_TYPES];
+	unsigned long pt_gained[IT_MAX_IDLE_TYPES];
+	unsigned long pt_lost[IT_MAX_IDLE_TYPES];
 
 	/* active_load_balance() stats */
 	unsigned long alb_cnt;
@@ -360,7 +360,7 @@ static int show_schedstat(struct seq_fil
 		    rq->smt_cnt, rq->sbe_cnt, rq->rq_sched_info.cpu_time,
 		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
 
-		for (itype = IDLE; itype < MAX_IDLE_TYPES; itype++)
+		for (itype = IT_IDLE; itype < IT_MAX_IDLE_TYPES; itype++)
 		    seq_printf(seq, " %lu %lu", rq->pt_gained[itype],
 			rq->pt_lost[itype]);
 
@@ -372,7 +372,8 @@ static int show_schedstat(struct seq_fil
 
 			cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
 			seq_printf(seq, "domain%d %s", dcnt++, mask_str);
-			for (itype = IDLE; itype < MAX_IDLE_TYPES; itype++) {
+			for (itype = IT_IDLE; itype < IT_MAX_IDLE_TYPES;
+					itype++) {
 				seq_printf(seq, " %lu %lu %lu %lu %lu",
 				    sd->lb_cnt[itype],
 				    sd->lb_failed[itype],
@@ -1625,7 +1626,7 @@ int can_migrate_task(task_t *p, runqueue
 		return 0;
 
 	/* Aggressive migration if we've failed balancing */
-	if (idle == NEWLY_IDLE ||
+	if (idle == IT_NEWLY_IDLE ||
 			sd->nr_balance_failed < sd->cache_nice_tries) {
 		if (task_hot(p, rq->timestamp_last_tick, sd))
 			return 0;
@@ -1852,8 +1853,8 @@ nextgroup:
 	return busiest;
 
 out_balanced:
-	if (busiest && (idle == NEWLY_IDLE ||
-			(idle == IDLE && max_load > SCHED_LOAD_SCALE)) ) {
+	if (busiest && (idle == IT_NEWLY_IDLE ||
+			(idle == IT_IDLE && max_load > SCHED_LOAD_SCALE)) ) {
 		*imbalance = 1;
 		return busiest;
 	}
@@ -1986,7 +1987,7 @@ out_balanced:
  * Check this_cpu to ensure it is balanced within domain. Attempt to move
  * tasks if there is an imbalance.
  *
- * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
+ * Called from schedule when this_rq is about to become idle (IT_NEWLY_IDLE).
  * this_rq is locked.
  */
 static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
@@ -1997,28 +1998,28 @@ static int load_balance_newidle(int this
 	unsigned long imbalance;
 	int nr_moved = 0;
 
-	schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
-	group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE);
+	schedstat_inc(sd, lb_cnt[IT_NEWLY_IDLE]);
+	group = find_busiest_group(sd, this_cpu, &imbalance, IT_NEWLY_IDLE);
 	if (!group) {
-		schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
+		schedstat_inc(sd, lb_nobusyg[IT_NEWLY_IDLE]);
 		goto out;
 	}
 
 	busiest = find_busiest_queue(group);
 	if (!busiest || busiest == this_rq) {
-		schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
+		schedstat_inc(sd, lb_nobusyq[IT_NEWLY_IDLE]);
 		goto out;
 	}
 
 	/* Attempt to move tasks */
 	double_lock_balance(this_rq, busiest);
 
-	schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
+	schedstat_add(sd, lb_imbalance[IT_NEWLY_IDLE], imbalance);
 	nr_moved = move_tasks(this_rq, this_cpu, busiest,
-					imbalance, sd, NEWLY_IDLE);
+					imbalance, sd, IT_NEWLY_IDLE);
 
 	if (!nr_moved)
-	    schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
+	    schedstat_inc(sd, lb_failed[IT_NEWLY_IDLE]);
 
 	spin_unlock(&busiest->lock);
 
@@ -2105,7 +2106,7 @@ static void active_load_balance(runqueue
 		if (unlikely(busiest == rq))
 			goto next_group;
 		double_lock_balance(busiest, rq);
-		if (move_tasks(rq, push_cpu, busiest, 1, sd, IDLE)) {
+		if (move_tasks(rq, push_cpu, busiest, 1, sd, IT_IDLE)) {
 			schedstat_inc(busiest, alb_lost);
 			schedstat_inc(rq, alb_gained);
 		} else {
@@ -2151,7 +2152,7 @@ static void rebalance_tick(int this_cpu,
 	for_each_domain(this_cpu, sd) {
 		unsigned long interval = sd->balance_interval;
 
-		if (idle != IDLE)
+		if (idle != IT_IDLE)
 			interval *= sd->busy_factor;
 
 		/* scale ms to jiffies */
@@ -2162,7 +2163,7 @@ static void rebalance_tick(int this_cpu,
 		if (j - sd->last_balance >= interval) {
 			if (load_balance(this_cpu, this_rq, sd, idle)) {
 				/* We've pulled tasks over so no longer idle */
-				idle = NOT_IDLE;
+				idle = IT_NOT_IDLE;
 			}
 			sd->last_balance += interval;
 		}
@@ -2250,7 +2251,7 @@ void scheduler_tick(int user_ticks, int 
 			cpustat->idle += sys_ticks;
 		if (wake_priority_sleeper(rq))
 			goto out;
-		rebalance_tick(cpu, rq, IDLE);
+		rebalance_tick(cpu, rq, IT_IDLE);
 		return;
 	}
 	if (TASK_NICE(p) > 0)
@@ -2334,7 +2335,7 @@ void scheduler_tick(int user_ticks, int 
 out_unlock:
 	spin_unlock(&rq->lock);
 out:
-	rebalance_tick(cpu, rq, NOT_IDLE);
+	rebalance_tick(cpu, rq, IT_NOT_IDLE);
 }
 
 #ifdef CONFIG_SCHED_SMT
_