patch-2.4.22 linux-2.4.22/arch/ia64/kernel/ptrace.c
Next file: linux-2.4.22/arch/ia64/kernel/sal.c
Previous file: linux-2.4.22/arch/ia64/kernel/process.c
Back to the patch index
Back to the overall index
- Lines: 278
- Date:
2003-08-25 04:44:39.000000000 -0700
- Orig file:
linux-2.4.21/arch/ia64/kernel/ptrace.c
- Orig date:
2003-06-13 07:51:29.000000000 -0700
diff -urN linux-2.4.21/arch/ia64/kernel/ptrace.c linux-2.4.22/arch/ia64/kernel/ptrace.c
@@ -1,7 +1,7 @@
/*
* Kernel support for the ptrace() and syscall tracing interfaces.
*
- * Copyright (C) 1999-2002 Hewlett-Packard Co
+ * Copyright (C) 1999-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Derived from the x86 and Alpha versions. Most of the code in here
@@ -25,7 +25,7 @@
#include <asm/uaccess.h>
#include <asm/unwind.h>
#ifdef CONFIG_PERFMON
-# include <asm/perfmon.h>
+#include <asm/perfmon.h>
#endif
/*
@@ -198,20 +198,27 @@
* rnat0/rnat1 gets its value from sw->ar_rnat.
*/
static unsigned long
-get_rnat (struct pt_regs *pt, struct switch_stack *sw,
- unsigned long *krbs, unsigned long *urnat_addr)
+get_rnat (struct task_struct *task, struct switch_stack *sw,
+ unsigned long *krbs, unsigned long *urnat_addr, unsigned long *urbs_end)
{
- unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, kmask = ~0UL;
+ unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, umask = 0, mask, m;
unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
- long num_regs;
+ long num_regs, nbits;
+ struct pt_regs *pt;
+ pt = ia64_task_regs(task);
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
+
+ if (urbs_end < urnat_addr)
+ nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
+ else
+ nbits = 63;
+ mask = (1UL << nbits) - 1;
/*
- * First, figure out which bit number slot 0 in user-land maps
- * to in the kernel rnat. Do this by figuring out how many
- * register slots we're beyond the user's backingstore and
- * then computing the equivalent address in kernel space.
+ * First, figure out which bit number slot 0 in user-land maps to in the kernel
+ * rnat. Do this by figuring out how many register slots we're beyond the user's
+ * backingstore and then computing the equivalent address in kernel space.
*/
num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
@@ -221,20 +228,26 @@
if (ubspstore + 63 > urnat_addr) {
/* some bits need to be merged in from pt->ar_rnat */
- kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1);
- urnat = (pt->ar_rnat & ~kmask);
+ umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1) & mask;
+ urnat = (pt->ar_rnat & umask);
+ mask &= ~umask;
+ if (!mask)
+ return urnat;
}
- if (rnat0_kaddr >= kbsp) {
+
+ m = mask << shift;
+ if (rnat0_kaddr >= kbsp)
rnat0 = sw->ar_rnat;
- } else if (rnat0_kaddr > krbs) {
+ else if (rnat0_kaddr > krbs)
rnat0 = *rnat0_kaddr;
- }
- if (rnat1_kaddr >= kbsp) {
+ urnat |= (rnat0 & m) >> shift;
+
+ m = mask >> (63 - shift);
+ if (rnat1_kaddr >= kbsp)
rnat1 = sw->ar_rnat;
- } else if (rnat1_kaddr > krbs) {
+ else if (rnat1_kaddr > krbs)
rnat1 = *rnat1_kaddr;
- }
- urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & kmask;
+ urnat |= (rnat1 & m) << (63 - shift);
return urnat;
}
@@ -242,22 +255,49 @@
* The reverse of get_rnat.
*/
static void
-put_rnat (struct pt_regs *pt, struct switch_stack *sw,
- unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat)
+put_rnat (struct task_struct *task, struct switch_stack *sw,
+ unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
+ unsigned long *urbs_end)
{
- unsigned long rnat0 = 0, rnat1 = 0, rnat = 0, *slot0_kaddr, kmask = ~0UL, mask;
+ unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
- long num_regs;
+ long num_regs, nbits;
+ struct pt_regs *pt;
+ unsigned long cfm, *urbs_kargs;
+ struct unw_frame_info info;
+ pt = ia64_task_regs(task);
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
+
+ urbs_kargs = urbs_end;
+ if ((long)pt->cr_ifs >= 0) {
+ /*
+ * If entered via syscall, don't allow user to set rnat bits
+ * for syscall args.
+ */
+ unw_init_from_blocked_task(&info,task);
+ if (unw_unwind_to_user(&info) == 0) {
+ unw_get_cfm(&info,&cfm);
+ urbs_kargs = ia64_rse_skip_regs(urbs_end,-(cfm & 0x7f));
+ }
+ }
+
+ if (urbs_kargs >= urnat_addr)
+ nbits = 63;
+ else {
+ if ((urnat_addr - 63) >= urbs_kargs)
+ return;
+ nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
+ }
+ mask = (1UL << nbits) - 1;
+
/*
- * First, figure out which bit number slot 0 in user-land maps
- * to in the kernel rnat. Do this by figuring out how many
- * register slots we're beyond the user's backingstore and
- * then computing the equivalent address in kernel space.
+ * First, figure out which bit number slot 0 in user-land maps to in the kernel
+ * rnat. Do this by figuring out how many register slots we're beyond the user's
+ * backingstore and then computing the equivalent address in kernel space.
*/
- num_regs = (long) ia64_rse_num_regs(ubspstore, urnat_addr + 1);
+ num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
shift = ia64_rse_slot_num(slot0_kaddr);
rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
@@ -265,28 +305,29 @@
if (ubspstore + 63 > urnat_addr) {
/* some bits need to be place in pt->ar_rnat: */
- kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1);
- pt->ar_rnat = (pt->ar_rnat & kmask) | (rnat & ~kmask);
+ umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1) & mask;
+ pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
+ mask &= ~umask;
+ if (!mask)
+ return;
}
/*
* Note: Section 11.1 of the EAS guarantees that bit 63 of an
* rnat slot is ignored. so we don't have to clear it here.
*/
rnat0 = (urnat << shift);
- mask = ~0UL << shift;
- if (rnat0_kaddr >= kbsp) {
- sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat0 & mask);
- } else if (rnat0_kaddr > krbs) {
- *rnat0_kaddr = ((*rnat0_kaddr & ~mask) | (rnat0 & mask));
- }
+ m = mask << shift;
+ if (rnat0_kaddr >= kbsp)
+ sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
+ else if (rnat0_kaddr > krbs)
+ *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
rnat1 = (urnat >> (63 - shift));
- mask = ~0UL >> (63 - shift);
- if (rnat1_kaddr >= kbsp) {
- sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat1 & mask);
- } else if (rnat1_kaddr > krbs) {
- *rnat1_kaddr = ((*rnat1_kaddr & ~mask) | (rnat1 & mask));
- }
+ m = mask >> (63 - shift);
+ if (rnat1_kaddr >= kbsp)
+ sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
+ else if (rnat1_kaddr > krbs)
+ *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
}
/*
@@ -319,7 +360,7 @@
* read the corresponding bits in the kernel RBS.
*/
rnat_addr = ia64_rse_rnat_addr(laddr);
- ret = get_rnat(child_regs, child_stack, krbs, rnat_addr);
+ ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
if (laddr == rnat_addr) {
/* return NaT collection word itself */
@@ -370,7 +411,7 @@
* => write the corresponding bits in the kernel RBS.
*/
if (ia64_rse_is_rnat_slot(laddr))
- put_rnat(child_regs, child_stack, krbs, laddr, val);
+ put_rnat(child, child_stack, krbs, laddr, val, urbs_end);
else {
if (laddr < urbs_end) {
regnum = ia64_rse_num_regs(bspstore, laddr);
@@ -578,16 +619,11 @@
ia64_flush_fph (struct task_struct *task)
{
struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
-#ifdef CONFIG_SMP
- struct task_struct *fpu_owner = current;
-#else
- struct task_struct *fpu_owner = ia64_get_fpu_owner();
-#endif
- if (task == fpu_owner && psr->mfh) {
+ if (ia64_is_local_fpu_owner(task) && psr->mfh) {
psr->mfh = 0;
- ia64_save_fpu(&task->thread.fph[0]);
task->thread.flags |= IA64_THREAD_FPH_VALID;
+ ia64_save_fpu(&task->thread.fph[0]);
}
}
@@ -609,10 +645,7 @@
task->thread.flags |= IA64_THREAD_FPH_VALID;
memset(&task->thread.fph, 0, sizeof(task->thread.fph));
}
-#ifndef CONFIG_SMP
- if (ia64_get_fpu_owner() == task)
- ia64_set_fpu_owner(0);
-#endif
+ ia64_drop_fpu(task);
psr->dfh = 1;
}
@@ -701,7 +734,9 @@
case PT_R4: case PT_R5: case PT_R6: case PT_R7:
if (write_access) {
/* read NaT bit first: */
- ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, data, &nat);
+ unsigned long dummy;
+
+ ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, &dummy, &nat);
if (ret < 0)
return ret;
}
@@ -846,8 +881,7 @@
* IA64_THREAD_DBG_VALID. The registers are restored by the PMU context
* switch code.
*/
- if (pfm_use_debug_registers(child))
- return -1;
+ if (pfm_use_debug_registers(child)) return -1;
#endif
if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
@@ -1344,10 +1378,11 @@
set_current_state(TASK_STOPPED);
notify_parent(current, SIGCHLD);
schedule();
+
/*
- * This isn't the same as continuing with a signal, but it
- * will do for normal use. strace only continues with a
- * signal if the stopping signal is not SIGTRAP. -brl
+ * This isn't the same as continuing with a signal, but it will do for normal use.
+ * strace only continues with a signal if the stopping signal is not SIGTRAP.
+ * -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)