patch-2.4.21 linux-2.4.21/arch/alpha/kernel/smp.c
Next file: linux-2.4.21/arch/alpha/kernel/srmcons.c
Previous file: linux-2.4.21/arch/alpha/kernel/setup.c
Back to the patch index
Back to the overall index
- Lines: 192
- Date:
2003-06-13 07:51:29.000000000 -0700
- Orig file:
linux-2.4.20/arch/alpha/kernel/smp.c
- Orig date:
2002-08-02 17:39:42.000000000 -0700
diff -urN linux-2.4.20/arch/alpha/kernel/smp.c linux-2.4.21/arch/alpha/kernel/smp.c
@@ -77,11 +77,10 @@
/* cpus reported in the hwrpb */
static unsigned long hwrpb_cpu_present_mask __initdata = 0;
-static int max_cpus = -1; /* Command-line limitation. */
+static int max_cpus = NR_CPUS; /* Command-line limitation. */
int smp_num_probed; /* Internal processor count */
int smp_num_cpus = 1; /* Number that came online. */
int smp_threads_ready; /* True once the per process idle is forked. */
-cycles_t cacheflush_time;
int __cpu_number_map[NR_CPUS];
int __cpu_logical_map[NR_CPUS];
@@ -104,7 +103,7 @@
return 1;
}
-__setup("maxcpus", maxcpus);
+__setup("maxcpus=", maxcpus);
/*
@@ -175,6 +174,9 @@
/* Get our local ticker going. */
smp_setup_percpu_timer(cpuid);
+ /* Call platform-specific callin, if specified */
+ if (alpha_mv.smp_callin) alpha_mv.smp_callin();
+
/* Must have completely accurate bogos. */
__sti();
@@ -184,20 +186,20 @@
*/
wait_boot_cpu_to_stop(cpuid);
mb();
- try_again:
+
calibrate_delay();
smp_store_cpu_info(cpuid);
{
#define LPJ(c) ((long)cpu_data[c].loops_per_jiffy)
- static int tries = 3;
long diff = LPJ(boot_cpuid) - LPJ(cpuid);
if (diff < 0) diff = -diff;
- if (diff > LPJ(boot_cpuid)/10 && --tries) {
- printk("Bogus BogoMIPS for cpu %d - retrying...\n", cpuid);
- goto try_again;
+ if (diff > LPJ(boot_cpuid)/10) {
+ printk("Bogus BogoMIPS for cpu %d - trusting boot CPU\n",
+ cpuid);
+ loops_per_jiffy = LPJ(cpuid) = LPJ(boot_cpuid);
}
}
@@ -225,64 +227,6 @@
cpu_idle();
}
-
-/*
- * Rough estimation for SMP scheduling, this is the number of cycles it
- * takes for a fully memory-limited process to flush the SMP-local cache.
- *
- * We are not told how much cache there is, so we have to guess.
- */
-static void __init
-smp_tune_scheduling (int cpuid)
-{
- struct percpu_struct *cpu;
- unsigned long on_chip_cache;
- unsigned long freq;
-
- cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset
- + cpuid * hwrpb->processor_size);
- switch (cpu->type)
- {
- case EV45_CPU:
- on_chip_cache = 16 + 16;
- break;
-
- case EV5_CPU:
- case EV56_CPU:
- on_chip_cache = 8 + 8 + 96;
- break;
-
- case PCA56_CPU:
- on_chip_cache = 16 + 8;
- break;
-
- case EV6_CPU:
- case EV67_CPU:
- on_chip_cache = 64 + 64;
- break;
-
- default:
- on_chip_cache = 8 + 8;
- break;
- }
-
- freq = hwrpb->cycle_freq ? : est_cycle_freq;
-
-#if 0
- /* Magic estimation stolen from x86 port. */
- cacheflush_time = freq / 1024L * on_chip_cache / 5000L;
-
- printk("Using heuristic of %d cycles.\n",
- cacheflush_time);
-#else
- /* Magic value to force potential preemption of other CPUs. */
- cacheflush_time = INT_MAX;
-
- printk("Using heuristic of %d cycles.\n",
- cacheflush_time);
-#endif
-}
-
/*
* Send a message to a secondary's console. "START" is one such
* interesting message. ;-)
@@ -622,7 +566,6 @@
current->processor = boot_cpuid;
smp_store_cpu_info(boot_cpuid);
- smp_tune_scheduling(boot_cpuid);
smp_setup_percpu_timer(boot_cpuid);
init_idle();
@@ -641,6 +584,9 @@
cpu_count = 1;
for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_count >= max_cpus)
+ break;
+
if (i == boot_cpuid)
continue;
@@ -666,8 +612,8 @@
}
printk(KERN_INFO "SMP: Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
- cpu_count, (bogosum + 2500) / (500000/HZ),
- ((bogosum + 2500) / (5000/HZ)) % 100);
+ cpu_count, bogosum / (500000/HZ),
+ (bogosum / (5000/HZ)) % 100);
smp_num_cpus = cpu_count;
}
@@ -922,11 +868,36 @@
&& time_before (jiffies, timeout))
barrier();
- /* We either got one or timed out -- clear the lock. */
+ /* If there's no response yet, log a message but allow a longer
+ * timeout period -- if we get a response this time, log
+ * a message saying when we got it..
+ */
+ if (atomic_read(&data.unstarted_count) > 0) {
+ long start_time = jiffies;
+ printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
+ __FUNCTION__);
+ timeout = jiffies + 30 * HZ;
+ while (atomic_read(&data.unstarted_count) > 0
+ && time_before(jiffies, timeout))
+ barrier();
+ if (atomic_read(&data.unstarted_count) <= 0) {
+ long delta = jiffies - start_time;
+ printk(KERN_ERR
+ "%s: response %ld.%ld seconds into long wait\n",
+ __FUNCTION__, delta / HZ,
+ (100 * (delta - ((delta / HZ) * HZ))) / HZ);
+ }
+ }
+
+ /* We either got one or timed out -- clear the lock. */
mb();
smp_call_function_data = 0;
- if (atomic_read (&data.unstarted_count) > 0)
- return -ETIMEDOUT;
+
+ /*
+ * If after both the initial and long timeout periods we still don't
+ * have a response, something is very wrong...
+ */
+ BUG_ON(atomic_read (&data.unstarted_count) > 0);
/* Wait for a complete response, if needed. */
if (wait) {
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)