patch-2.4.2 linux/arch/s390/kernel/smp.c
Next file: linux/arch/s390/kernel/sys_s390.c
Previous file: linux/arch/s390/kernel/signal.c
Back to the patch index
Back to the overall index
- Lines: 478
- Date:
Tue Feb 13 14:13:44 2001
- Orig file:
v2.4.1/linux/arch/s390/kernel/smp.c
- Orig date:
Tue Sep 5 13:50:01 2000
diff -u --recursive --new-file v2.4.1/linux/arch/s390/kernel/smp.c linux/arch/s390/kernel/smp.c
@@ -32,16 +32,15 @@
#include <asm/sigp.h>
#include <asm/pgalloc.h>
#include <asm/irq.h>
+#include <asm/s390_ext.h>
#include "cpcmd.h"
/* prototypes */
-extern void update_one_process( struct task_struct *p,
- unsigned long ticks, unsigned long user,
- unsigned long system, int cpu);
extern int cpu_idle(void * unused);
extern __u16 boot_cpu_addr;
+extern volatile int __cpu_logical_map[];
/*
* An array with a pointer the lowcore of every CPU.
@@ -52,10 +51,8 @@
unsigned int prof_multiplier[NR_CPUS];
unsigned int prof_old_multiplier[NR_CPUS];
unsigned int prof_counter[NR_CPUS];
-volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
cycles_t cacheflush_time=0;
int smp_threads_ready=0; /* Set when the idlers are all forked. */
-unsigned long ipi_count=0; /* Number of IPIs delivered. */
static atomic_t smp_commenced = ATOMIC_INIT(0);
spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
@@ -104,7 +101,7 @@
void machine_restart(char * __unused)
{
if (smp_processor_id() != 0) {
- smp_ext_call_async(0, ec_restart);
+ smp_ext_bitcall(0, ec_restart);
for (;;);
} else
do_machine_restart();
@@ -115,13 +112,13 @@
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
cpcmd(vmhalt_cmd, NULL, 0);
- disabled_wait(0);
+ signal_processor(smp_processor_id(), sigp_stop_and_store_status);
}
void machine_halt(void)
{
if (smp_processor_id() != 0) {
- smp_ext_call_async(0, ec_halt);
+ smp_ext_bitcall(0, ec_halt);
for (;;);
} else
do_machine_halt();
@@ -132,13 +129,13 @@
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
cpcmd(vmpoff_cmd, NULL, 0);
- disabled_wait(0);
+ signal_processor(smp_processor_id(), sigp_stop_and_store_status);
}
void machine_power_off(void)
{
if (smp_processor_id() != 0) {
- smp_ext_call_async(0, ec_power_off);
+ smp_ext_bitcall(0, ec_power_off);
for (;;);
} else
do_machine_power_off();
@@ -149,7 +146,7 @@
* cpus are handled.
*/
-void do_ext_call_interrupt(__u16 source_cpu_addr)
+void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
{
ec_ext_call *ec, *next;
int bits;
@@ -172,6 +169,8 @@
do_machine_halt();
if (test_bit(ec_power_off, &bits))
do_machine_power_off();
+ if (test_bit(ec_ptlb, &bits))
+ local_flush_tlb();
/*
* Handle external call commands with a parameter area
@@ -184,7 +183,7 @@
return; /* no command signals */
/* Make a fifo out of the lifo */
- next = ec;
+ next = ec->next;
ec->next = NULL;
while (next != NULL) {
ec_ext_call *tmp = next->next;
@@ -196,61 +195,21 @@
/* Execute every sigp command on the queue */
while (ec != NULL) {
switch (ec->cmd) {
- case ec_get_ctl: {
- ec_creg_parms *pp;
- pp = (ec_creg_parms *) ec->parms;
- atomic_set(&ec->status,ec_executing);
- asm volatile (
- " bras 1,0f\n"
- " stctl 0,0,0(%0)\n"
- "0: ex %1,0(1)\n"
- : : "a" (pp->cregs+pp->start_ctl),
- "a" ((pp->start_ctl<<4) + pp->end_ctl)
- : "memory", "1" );
- atomic_set(&ec->status,ec_done);
- return;
- }
- case ec_set_ctl: {
- ec_creg_parms *pp;
- pp = (ec_creg_parms *) ec->parms;
+ case ec_callback_async: {
+ void (*func)(void *info);
+ void *info;
+
+ func = ec->func;
+ info = ec->info;
atomic_set(&ec->status,ec_executing);
- asm volatile (
- " bras 1,0f\n"
- " lctl 0,0,0(%0)\n"
- "0: ex %1,0(1)\n"
- : : "a" (pp->cregs+pp->start_ctl),
- "a" ((pp->start_ctl<<4) + pp->end_ctl)
- : "memory", "1" );
- atomic_set(&ec->status,ec_done);
+ (func)(info);
return;
}
- case ec_set_ctl_masked: {
- ec_creg_mask_parms *pp;
- u32 cregs[16];
- int i;
-
- pp = (ec_creg_mask_parms *) ec->parms;
+ case ec_callback_sync:
atomic_set(&ec->status,ec_executing);
- asm volatile (
- " bras 1,0f\n"
- " stctl 0,0,0(%0)\n"
- "0: ex %1,0(1)\n"
- : : "a" (cregs+pp->start_ctl),
- "a" ((pp->start_ctl<<4) + pp->end_ctl)
- : "memory", "1" );
- for (i = pp->start_ctl; i <= pp->end_ctl; i++)
- cregs[i] = (cregs[i] & pp->andvals[i])
- | pp->orvals[i];
- asm volatile (
- " bras 1,0f\n"
- " lctl 0,0,0(%0)\n"
- "0: ex %1,0(1)\n"
- : : "a" (cregs+pp->start_ctl),
- "a" ((pp->start_ctl<<4) + pp->end_ctl)
- : "memory", "1" );
+ (ec->func)(ec->info);
atomic_set(&ec->status,ec_done);
return;
- }
default:
}
ec = ec->next;
@@ -258,17 +217,19 @@
}
/*
- * Send an external call sigp to another cpu and wait for its completion.
+ * Send a callback sigp to another cpu.
*/
-sigp_ccode smp_ext_call_sync(int cpu, ec_cmd_sig cmd, void *parms)
+sigp_ccode
+smp_ext_call(int cpu, void (*func)(void *info), void *info, int wait)
{
struct _lowcore *lowcore = &get_cpu_lowcore(cpu);
sigp_ccode ccode;
ec_ext_call ec;
- ec.cmd = cmd;
+ ec.cmd = wait ? ec_callback_sync : ec_callback_async;
atomic_set(&ec.status, ec_pending);
- ec.parms = parms;
+ ec.func = func;
+ ec.info = info;
do {
ec.next = (ec_ext_call*) atomic_read(&lowcore->ext_call_queue);
} while (atomic_compare_and_swap((int) ec.next, (int)(&ec),
@@ -288,34 +249,15 @@
if (ccode != sigp_not_operational)
/* wait for completion, FIXME: possible seed of a deadlock */
- while (atomic_read(&ec.status) != ec_done);
-
- return ccode;
-}
-
-/*
- * Send an external call sigp to another cpu and return without waiting
- * for its completion. Currently we do not support parameters with
- * asynchronous sigps.
- */
-sigp_ccode smp_ext_call_async(int cpu, ec_bit_sig sig)
-{
- struct _lowcore *lowcore = &get_cpu_lowcore(cpu);
- sigp_ccode ccode;
+ while (atomic_read(&ec.status) != (wait?ec_done:ec_executing));
- /*
- * Set signaling bit in lowcore of target cpu and kick it
- */
- atomic_set_mask(1<<sig, &lowcore->ext_call_fast);
- ccode = signal_processor(cpu, sigp_external_call);
return ccode;
}
/*
- * Send an external call sigp to every other cpu in the system and
- * wait for the completion of the sigps.
+ * Send a callback sigp to every other cpu in the system.
*/
-void smp_ext_call_sync_others(ec_cmd_sig cmd, void *parms)
+void smp_ext_call_others(void (*func)(void *info), void *info, int wait)
{
struct _lowcore *lowcore;
ec_ext_call ec[NR_CPUS];
@@ -326,9 +268,10 @@
if (smp_processor_id() == i)
continue;
lowcore = &get_cpu_lowcore(i);
- ec[i].cmd = cmd;
- atomic_set(&ec[i].status, ec_pending);
- ec[i].parms = parms;
+ ec[i].cmd = wait ? ec_callback_sync : ec_callback_async;
+ atomic_set(&ec[i].status, ec_pending);
+ ec[i].func = func;
+ ec[i].info = info;
do {
ec[i].next = (ec_ext_call *)
atomic_read(&lowcore->ext_call_queue);
@@ -341,16 +284,33 @@
for (i = 0; i < smp_num_cpus; i++) {
if (smp_processor_id() == i)
continue;
- while (atomic_read(&ec[i].status) != ec_done);
+ while (atomic_read(&ec[i].status) !=
+ (wait ? ec_done:ec_executing));
}
}
/*
+ * Send an external call sigp to another cpu and return without waiting
+ * for its completion.
+ */
+sigp_ccode smp_ext_bitcall(int cpu, ec_bit_sig sig)
+{
+ struct _lowcore *lowcore = &get_cpu_lowcore(cpu);
+ sigp_ccode ccode;
+
+ /*
+ * Set signaling bit in lowcore of target cpu and kick it
+ */
+ atomic_set_mask(1<<sig, &lowcore->ext_call_fast);
+ ccode = signal_processor(cpu, sigp_external_call);
+ return ccode;
+}
+
+/*
* Send an external call sigp to every other cpu in the system and
- * return without waiting for the completion of the sigps. Currently
- * we do not support parameters with asynchronous sigps.
+ * return without waiting for its completion.
*/
-void smp_ext_call_async_others(ec_bit_sig sig)
+void smp_ext_bitcall_others(ec_bit_sig sig)
{
struct _lowcore *lowcore;
sigp_ccode ccode;
@@ -420,7 +380,46 @@
void smp_send_stop(void)
{
- smp_signal_others(sigp_stop, 0, 1, NULL);
+ int i;
+ u32 dummy;
+ unsigned long low_core_addr;
+
+ /* write magic number to zero page (absolute 0) */
+
+ get_cpu_lowcore(smp_processor_id()).panic_magic = __PANIC_MAGIC;
+
+ /* stop all processors */
+
+ smp_signal_others(sigp_stop, 0, TRUE, NULL);
+
+ /* store status of all processors in their lowcores (real 0) */
+
+ for (i = 0; i < smp_num_cpus; i++) {
+ if (smp_processor_id() != i) {
+ int ccode;
+ low_core_addr = (unsigned long)&get_cpu_lowcore(i);
+ do {
+ ccode = signal_processor_ps(
+ &dummy,
+ low_core_addr,
+ i,
+ sigp_store_status_at_address);
+ } while(ccode == sigp_busy);
+ }
+ }
+}
+
+/*
+ * this function sends a 'purge tlb' signal to another CPU.
+ */
+void smp_ptlb_callback(void *info)
+{
+ local_flush_tlb();
+}
+
+void smp_ptlb_all(void)
+{
+ smp_ext_call_others(smp_ptlb_callback, NULL, 1);
}
/*
@@ -431,7 +430,44 @@
void smp_send_reschedule(int cpu)
{
- smp_ext_call_async(cpu, ec_schedule);
+ smp_ext_bitcall(cpu, ec_schedule);
+}
+
+/*
+ * parameter area for the set/clear control bit callbacks
+ */
+typedef struct
+{
+ __u16 start_ctl;
+ __u16 end_ctl;
+ __u32 orvals[16];
+ __u32 andvals[16];
+} ec_creg_mask_parms;
+
+/*
+ * callback for setting/clearing control bits
+ */
+void smp_ctl_bit_callback(void *info) {
+ ec_creg_mask_parms *pp;
+ u32 cregs[16];
+ int i;
+
+ pp = (ec_creg_mask_parms *) info;
+ asm volatile (" bras 1,0f\n"
+ " stctl 0,0,0(%0)\n"
+ "0: ex %1,0(1)\n"
+ : : "a" (cregs+pp->start_ctl),
+ "a" ((pp->start_ctl<<4) + pp->end_ctl)
+ : "memory", "1" );
+ for (i = pp->start_ctl; i <= pp->end_ctl; i++)
+ cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
+ asm volatile (" bras 1,0f\n"
+ " lctl 0,0,0(%0)\n"
+ "0: ex %1,0(1)\n"
+ : : "a" (cregs+pp->start_ctl),
+ "a" ((pp->start_ctl<<4) + pp->end_ctl)
+ : "memory", "1" );
+ return;
}
/*
@@ -445,7 +481,7 @@
parms.end_ctl = cr;
parms.orvals[cr] = 1 << bit;
parms.andvals[cr] = 0xFFFFFFFF;
- smp_ext_call_sync_others(ec_set_ctl_masked,&parms);
+ smp_ext_call_others(smp_ctl_bit_callback, &parms, 1);
}
__ctl_set_bit(cr, bit);
}
@@ -461,11 +497,35 @@
parms.end_ctl = cr;
parms.orvals[cr] = 0x00000000;
parms.andvals[cr] = ~(1 << bit);
- smp_ext_call_sync_others(ec_set_ctl_masked,&parms);
+ smp_ext_call_others(smp_ctl_bit_callback, &parms, 1);
}
__ctl_clear_bit(cr, bit);
}
+/*
+ * Call a function on all other processors
+ */
+
+int
+smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
+/*
+ * [SUMMARY] Run a function on all other CPUs.
+ * <func> The function to run. This must be fast and non-blocking.
+ * <info> An arbitrary pointer to pass to the function.
+ * <retry> currently unused.
+ * <wait> If true, wait (atomically) until function has completed on other CPUs.
+ * [RETURNS] 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler, you may call it from a bottom half handler.
+ */
+{
+ if (atomic_read(&smp_commenced) != 0)
+ smp_ext_call_others(func, info, 1);
+ (func)(info);
+ return 0;
+}
/*
* Lets check how many CPUs we have.
@@ -475,7 +535,6 @@
{
int curr_cpu;
- __cpu_logical_map[0] = boot_cpu_addr;
current->processor = 0;
smp_num_cpus = 1;
for (curr_cpu = 0;
@@ -527,7 +586,7 @@
struct pt_regs regs;
/* don't care about the psw and regs settings since we'll never
reschedule the forked task. */
- memset(®s,sizeof(pt_regs),0);
+ memset(®s,0,sizeof(pt_regs));
return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
}
@@ -594,7 +653,10 @@
struct _lowcore *curr_lowcore;
sigp_ccode ccode;
int i;
-
+
+ /* request the 0x1202 external interrupt */
+ if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
+ panic("Couldn't request external interrupt 0x1202");
smp_count_cpus();
memset(lowcore_ptr,0,sizeof(lowcore_ptr));
@@ -705,24 +767,7 @@
*/
irq_enter(cpu, 0);
- update_one_process(p, 1, user, system, cpu);
- if (p->pid) {
- p->counter -= 1;
- if (p->counter <= 0) {
- p->counter = 0;
- p->need_resched = 1;
- }
- if (p->nice > 0) {
- kstat.cpu_nice += user;
- kstat.per_cpu_nice[cpu] += user;
- } else {
- kstat.cpu_user += user;
- kstat.per_cpu_user[cpu] += user;
- }
- kstat.cpu_system += system;
- kstat.per_cpu_system[cpu] += system;
-
- }
+ update_process_times(user);
irq_exit(cpu, 0);
}
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)