From: "Martin J. Bligh" <mbligh@aracnet.com>,
      Nick Piggin <piggin@cyberone.com.au>

arch_init_sched_domains is using cpu_callout_map (via cpu_possible)
instead of cpu_online_map. That's really only intended for cpu bootstrap,
and won't work properly if we called out to a cpu, but it failed to 
respond. The normal way is to use cpu_online_map for this stuff, and it
even cleans up the existing code a bit. (it's just a case of
s/all_cpus/cpu_online_map/ and removing the loop that builds all_cpus).

I tested this out on the NUMA-Q, and it works fine.



---

 25-akpm/arch/i386/kernel/smpboot.c |   38 ++++++++++---------------------------
 25-akpm/kernel/sched.c             |   34 ++++++++-------------------------
 2 files changed, 20 insertions(+), 52 deletions(-)

diff -puN arch/i386/kernel/smpboot.c~sched-arch_init_sched_domains-fix arch/i386/kernel/smpboot.c
--- 25/arch/i386/kernel/smpboot.c~sched-arch_init_sched_domains-fix	Wed Feb  4 12:30:06 2004
+++ 25-akpm/arch/i386/kernel/smpboot.c	Wed Feb  4 12:30:06 2004
@@ -1134,18 +1134,10 @@ static DEFINE_PER_CPU(struct sched_domai
 __init void arch_init_sched_domains(void)
 {
 	int i;
-	cpumask_t all_cpus = CPU_MASK_NONE;
 	struct sched_group *first_cpu = NULL, *last_cpu = NULL;
 
-	for (i = 0; i < NR_CPUS; i++) {
-		if (!cpu_possible(i))
-			continue;
-
-		cpu_set(i, all_cpus);
-	}
-
 	/* Set up domains */
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		struct sched_domain *cpu_domain = cpu_sched_domain(i);
 		struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
 		struct sched_domain *node_domain = &per_cpu(node_domains, i);
@@ -1160,11 +1152,11 @@ __init void arch_init_sched_domains(void
 		phys_domain->flags |= SD_FLAG_IDLE;
 
 		*node_domain = SD_NODE_INIT;
-		node_domain->span = all_cpus;
+		node_domain->span = cpu_online_map;
 	}
 
 	/* Set up CPU (sibling) groups */
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		struct sched_domain *cpu_domain = cpu_sched_domain(i);
 		int j;
 		first_cpu = last_cpu = NULL;
@@ -1190,7 +1182,7 @@ __init void arch_init_sched_domains(void
 	for (i = 0; i < MAX_NUMNODES; i++) {
 		int j;
 		cpumask_t nodemask;
-		cpus_and(nodemask, node_to_cpumask(i), all_cpus);
+		cpus_and(nodemask, node_to_cpumask(i), cpu_online_map);
 
 		first_cpu = last_cpu = NULL;
 		/* Set up physical groups */
@@ -1217,7 +1209,7 @@ __init void arch_init_sched_domains(void
 	for (i = 0; i < MAX_NUMNODES; i++) {
 		struct sched_group *cpu = &sched_group_nodes[i];
 		cpumask_t nodemask;
-		cpus_and(nodemask, node_to_cpumask(i), all_cpus);
+		cpus_and(nodemask, node_to_cpumask(i), cpu_online_map);
 
 		if (cpus_empty(nodemask))
 			continue;
@@ -1234,7 +1226,7 @@ __init void arch_init_sched_domains(void
 
 
 	mb();
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		int node = cpu_to_node(i);
 		struct sched_domain *cpu_domain = cpu_sched_domain(i);
 		struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
@@ -1258,18 +1250,10 @@ static DEFINE_PER_CPU(struct sched_domai
 __init void arch_init_sched_domains(void)
 {
 	int i;
-	cpumask_t all_cpus = CPU_MASK_NONE;
 	struct sched_group *first_cpu = NULL, *last_cpu = NULL;
 
-	for (i = 0; i < NR_CPUS; i++) {
-		if (!cpu_possible(i))
-			continue;
-
-		cpu_set(i, all_cpus);
-	}
-
 	/* Set up domains */
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		struct sched_domain *cpu_domain = cpu_sched_domain(i);
 		struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
 
@@ -1277,12 +1261,12 @@ __init void arch_init_sched_domains(void
 		cpu_domain->span = cpu_sibling_map[i];
 
 		*phys_domain = SD_CPU_INIT;
-		phys_domain->span = all_cpus;
+		phys_domain->span = cpu_online_map;
 		phys_domain->flags |= SD_FLAG_IDLE;
 	}
 
 	/* Set up CPU (sibling) groups */
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		struct sched_domain *cpu_domain = cpu_sched_domain(i);
 		int j;
 		first_cpu = last_cpu = NULL;
@@ -1307,7 +1291,7 @@ __init void arch_init_sched_domains(void
 
 	first_cpu = last_cpu = NULL;
 	/* Set up physical groups */
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		struct sched_domain *cpu_domain = cpu_sched_domain(i);
 		struct sched_group *cpu = &sched_group_phys[i];
 
@@ -1325,7 +1309,7 @@ __init void arch_init_sched_domains(void
 	last_cpu->next = first_cpu;
 
 	mb();
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		struct sched_domain *cpu_domain = cpu_sched_domain(i);
 		struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
 		struct sched_group *cpu_group = &sched_group_cpus[i];
diff -puN kernel/sched.c~sched-arch_init_sched_domains-fix kernel/sched.c
--- 25/kernel/sched.c~sched-arch_init_sched_domains-fix	Wed Feb  4 12:30:06 2004
+++ 25-akpm/kernel/sched.c	Wed Feb  4 12:30:06 2004
@@ -3235,28 +3235,20 @@ DEFINE_PER_CPU(struct sched_domain, node
 static void __init arch_init_sched_domains(void)
 {
 	int i;
-	cpumask_t all_cpus = CPU_MASK_NONE;
 	struct sched_group *first_node = NULL, *last_node = NULL;
 
-	for (i = 0; i < NR_CPUS; i++) {
-		if (!cpu_possible(i))
-			continue;
-
-		cpu_set(i, all_cpus);
-	}
-
 	/* Set up domains */
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		int node = cpu_to_node(i);
 		cpumask_t nodemask = node_to_cpumask(node);
 		struct sched_domain *node_domain = &per_cpu(node_domains, i);
 		struct sched_domain *cpu_domain = cpu_sched_domain(i);
 
 		*node_domain = SD_NODE_INIT;
-		node_domain->span = all_cpus;
+		node_domain->span = cpu_online_map;
 
 		*cpu_domain = SD_CPU_INIT;
-		cpus_and(cpu_domain->span, nodemask, all_cpus);
+		cpus_and(cpu_domain->span, nodemask, cpu_online_map);
 		cpu_domain->parent = node_domain;
 	}
 
@@ -3267,7 +3259,7 @@ static void __init arch_init_sched_domai
 		cpumask_t nodemask;
 		struct sched_group *node = &sched_group_nodes[i];
 
-		cpus_and(nodemask, node_to_cpumask(i), all_cpus);
+		cpus_and(nodemask, node_to_cpumask(i), cpu_online_map);
 
 		if (cpus_empty(nodemask))
 			continue;
@@ -3301,7 +3293,7 @@ static void __init arch_init_sched_domai
 	last_node->next = first_node;
 
 	mb();
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		struct sched_domain *node_domain = &per_cpu(node_domains, i);
 		struct sched_domain *cpu_domain = cpu_sched_domain(i);
 		node_domain->groups = &sched_group_nodes[cpu_to_node(i)];
@@ -3313,26 +3305,18 @@ static void __init arch_init_sched_domai
 static void __init arch_init_sched_domains(void)
 {
 	int i;
-	cpumask_t all_cpus = CPU_MASK_NONE;
 	struct sched_group *first_cpu = NULL, *last_cpu = NULL;
 
-	for (i = 0; i < NR_CPUS; i++) {
-		if (!cpu_possible(i))
-			continue;
-
-		cpu_set(i, all_cpus);
-	}
-
 	/* Set up domains */
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		struct sched_domain *cpu_domain = cpu_sched_domain(i);
 
 		*cpu_domain = SD_CPU_INIT;
-		cpu_domain->span = all_cpus;
+		cpu_domain->span = cpu_online_map;
 	}
 
 	/* Set up CPU groups */
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		struct sched_group *cpu = &sched_group_cpus[i];
 
 		cpus_clear(cpu->cpumask);
@@ -3347,7 +3331,7 @@ static void __init arch_init_sched_domai
 	last_cpu->next = first_cpu;
 
 	mb();
-	for_each_cpu_mask(i, all_cpus) {
+	for_each_cpu_mask(i, cpu_online_map) {
 		struct sched_domain *cpu_domain = cpu_sched_domain(i);
 		cpu_domain->groups = &sched_group_cpus[i];
 	}

_