patch-2.4.4 linux/arch/ia64/kernel/ptrace.c
Next file: linux/arch/ia64/kernel/semaphore.c
Previous file: linux/arch/ia64/kernel/process.c
Back to the patch index
Back to the overall index
- Lines: 800
- Date:
Thu Apr 5 12:51:47 2001
- Orig file:
v2.4.3/linux/arch/ia64/kernel/ptrace.c
- Orig date:
Tue Feb 13 14:13:43 2001
diff -u --recursive --new-file v2.4.3/linux/arch/ia64/kernel/ptrace.c linux/arch/ia64/kernel/ptrace.c
@@ -1,8 +1,8 @@
/*
* Kernel support for the ptrace() and syscall tracing interfaces.
*
- * Copyright (C) 1999-2000 Hewlett-Packard Co
- * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999-2001 Hewlett-Packard Co
+ * Copyright (C) 1999-2001 David Mosberger-Tang <davidm@hpl.hp.com>
*
* Derived from the x86 and Alpha versions. Most of the code in here
* could actually be factored into a common set of routines.
@@ -22,6 +22,7 @@
#include <asm/rse.h>
#include <asm/system.h>
#include <asm/uaccess.h>
+#include <asm/unwind.h>
/*
* Bits in the PSR that we allow ptrace() to change:
@@ -36,8 +37,6 @@
(IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
#define IPSR_READ_MASK IPSR_WRITE_MASK
-#ifdef CONFIG_IA64_NEW_UNWIND
-
#define PTRACE_DEBUG 1
#if PTRACE_DEBUG
@@ -97,57 +96,6 @@
# undef PUT_BITS
}
-#else /* !CONFIG_IA64_NEW_UNWIND */
-
-/*
- * Collect the NaT bits for r1-r31 from sw->caller_unat and
- * sw->ar_unat and return a NaT bitset where bit i is set iff the NaT
- * bit of register i is set.
- */
-long
-ia64_get_nat_bits (struct pt_regs *pt, struct switch_stack *sw)
-{
-# define GET_BITS(str, first, last, unat) \
- ({ \
- unsigned long bit = ia64_unat_pos(&str->r##first); \
- unsigned long mask = ((1UL << (last - first + 1)) - 1) << first; \
- (ia64_rotl(unat, first) >> bit) & mask; \
- })
- unsigned long val;
-
- val = GET_BITS(pt, 1, 3, sw->caller_unat);
- val |= GET_BITS(pt, 12, 15, sw->caller_unat);
- val |= GET_BITS(pt, 8, 11, sw->caller_unat);
- val |= GET_BITS(pt, 16, 31, sw->caller_unat);
- val |= GET_BITS(sw, 4, 7, sw->ar_unat);
- return val;
-
-# undef GET_BITS
-}
-
-/*
- * Store the NaT bitset NAT in pt->caller_unat and sw->ar_unat.
- */
-void
-ia64_put_nat_bits (struct pt_regs *pt, struct switch_stack *sw, unsigned long nat)
-{
-# define PUT_BITS(str, first, last, nat) \
- ({ \
- unsigned long bit = ia64_unat_pos(&str->r##first); \
- unsigned long mask = ((1UL << (last - first + 1)) - 1) << bit; \
- (ia64_rotr(nat, first) << bit) & mask; \
- })
- sw->caller_unat = PUT_BITS(pt, 1, 3, nat);
- sw->caller_unat |= PUT_BITS(pt, 12, 15, nat);
- sw->caller_unat |= PUT_BITS(pt, 8, 11, nat);
- sw->caller_unat |= PUT_BITS(pt, 16, 31, nat);
- sw->ar_unat = PUT_BITS(sw, 4, 7, nat);
-
-# undef PUT_BITS
-}
-
-#endif /* !CONFIG_IA64_NEW_UNWIND */
-
#define IA64_MLX_TEMPLATE 0x2
#define IA64_MOVL_OPCODE 6
@@ -215,7 +163,7 @@
* | slot01 | > child_regs->ar_rnat
* +--------+ |
* | slot02 | / kernel rbs
- * +--------+ +--------+
+ * +--------+ +--------+
* <- child_regs->ar_bspstore | slot61 | <-- krbs
* +- - - - + +--------+
* | slot62 |
@@ -275,7 +223,7 @@
/* some bits need to be merged in from pt->ar_rnat */
kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1);
urnat = (pt->ar_rnat & ~kmask);
- }
+ }
if (rnat0_kaddr >= kbsp) {
rnat0 = sw->ar_rnat;
} else if (rnat0_kaddr > krbs) {
@@ -319,7 +267,7 @@
/* some bits need to be place in pt->ar_rnat: */
kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1);
pt->ar_rnat = (pt->ar_rnat & kmask) | (rnat & ~kmask);
- }
+ }
/*
* Note: Section 11.1 of the EAS guarantees that bit 63 of an
* rnat slot is ignored. so we don't have to clear it here.
@@ -342,9 +290,9 @@
}
long
-ia64_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr, long *val)
+ia64_peek (struct task_struct *child, unsigned long user_bsp, unsigned long addr, long *val)
{
- unsigned long *bspstore, *krbs, krbs_num_regs, regnum, *rbs_end, *laddr;
+ unsigned long *bspstore, *krbs, regnum, *laddr, *ubsp = (long *) user_bsp;
struct switch_stack *child_stack;
struct pt_regs *child_regs;
size_t copied;
@@ -352,35 +300,22 @@
laddr = (unsigned long *) addr;
child_regs = ia64_task_regs(child);
-#ifdef CONFIG_IA64_NEW_UNWIND
child_stack = (struct switch_stack *) (child->thread.ksp + 16);
-#else
- child_stack = (struct switch_stack *) child_regs - 1;
-#endif
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
- krbs_num_regs = ia64_rse_num_regs(krbs, (unsigned long *) child_stack->ar_bspstore);
- rbs_end = ia64_rse_skip_regs(bspstore, krbs_num_regs);
- if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(rbs_end)) {
+ if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(ubsp)) {
/*
- * Attempt to read the RBS in an area that's actually
- * on the kernel RBS => read the corresponding bits in
- * the kernel RBS.
+ * Attempt to read the RBS in an area that's actually on the kernel RBS =>
+ * read the corresponding bits in the kernel RBS.
*/
if (ia64_rse_is_rnat_slot(laddr))
ret = get_rnat(child_regs, child_stack, krbs, laddr);
else {
- regnum = ia64_rse_num_regs(bspstore, laddr);
- laddr = ia64_rse_skip_regs(krbs, regnum);
- if (regnum >= krbs_num_regs) {
+ if (laddr >= ubsp)
ret = 0;
- } else {
- if ((unsigned long) laddr >= (unsigned long) high_memory) {
- printk("yikes: trying to access long at %p\n",
- (void *) laddr);
- return -EIO;
- }
- ret = *laddr;
+ else {
+ regnum = ia64_rse_num_regs(bspstore, laddr);
+ ret = *ia64_rse_skip_regs(krbs, regnum);
}
}
} else {
@@ -393,36 +328,28 @@
}
long
-ia64_poke (struct pt_regs *regs, struct task_struct *child, unsigned long addr, long val)
+ia64_poke (struct task_struct *child, unsigned long user_bsp, unsigned long addr, long val)
{
- unsigned long *bspstore, *krbs, krbs_num_regs, regnum, *rbs_end, *laddr;
+ unsigned long *bspstore, *krbs, regnum, *laddr, *ubsp = (long *) user_bsp;
struct switch_stack *child_stack;
struct pt_regs *child_regs;
laddr = (unsigned long *) addr;
child_regs = ia64_task_regs(child);
-#ifdef CONFIG_IA64_NEW_UNWIND
child_stack = (struct switch_stack *) (child->thread.ksp + 16);
-#else
- child_stack = (struct switch_stack *) child_regs - 1;
-#endif
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
- krbs_num_regs = ia64_rse_num_regs(krbs, (unsigned long *) child_stack->ar_bspstore);
- rbs_end = ia64_rse_skip_regs(bspstore, krbs_num_regs);
- if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(rbs_end)) {
+ if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(ubsp)) {
/*
- * Attempt to write the RBS in an area that's actually
- * on the kernel RBS => write the corresponding bits
- * in the kernel RBS.
+ * Attempt to write the RBS in an area that's actually on the kernel RBS
+ * => write the corresponding bits in the kernel RBS.
*/
if (ia64_rse_is_rnat_slot(laddr))
put_rnat(child_regs, child_stack, krbs, laddr, val);
else {
- regnum = ia64_rse_num_regs(bspstore, laddr);
- laddr = ia64_rse_skip_regs(krbs, regnum);
- if (regnum < krbs_num_regs) {
- *laddr = val;
+ if (laddr < ubsp) {
+ regnum = ia64_rse_num_regs(bspstore, laddr);
+ *ia64_rse_skip_regs(krbs, regnum) = val;
}
}
} else if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) {
@@ -432,83 +359,76 @@
}
/*
- * Synchronize (i.e, write) the RSE backing store living in kernel
- * space to the VM of the indicated child process.
- *
- * If new_bsp is non-zero, the bsp will (effectively) be updated to
- * the new value upon resumption of the child process. This is
- * accomplished by setting the loadrs value to zero and the bspstore
- * value to the new bsp value.
- *
- * When new_bsp and force_loadrs_to_zero are both 0, the register
- * backing store in kernel space is written to user space and the
- * loadrs and bspstore values are left alone.
- *
- * When new_bsp is zero and force_loadrs_to_zero is 1 (non-zero),
- * loadrs is set to 0, and the bspstore value is set to the old bsp
- * value. This will cause the stacked registers (r32 and up) to be
- * obtained entirely from the child's memory space rather than
- * from the kernel. (This makes it easier to write code for
- * modifying the stacked registers in multi-threaded programs.)
- *
- * Note: I had originally written this function without the
- * force_loadrs_to_zero parameter; it was written so that loadrs would
- * always be set to zero. But I had problems with certain system
- * calls apparently causing a portion of the RBS to be zeroed. (I
- * still don't understand why this was happening.) Anyway, it'd
- * definitely less intrusive to leave loadrs and bspstore alone if
- * possible.
+ * Calculate the user-level address that would have been in ar.bsp had the user executed a
+ * "cover" instruction right before entering the kernel.
*/
-static long
-sync_kernel_register_backing_store (struct task_struct *child,
- long new_bsp,
- int force_loadrs_to_zero)
+unsigned long
+ia64_get_user_bsp (struct task_struct *child, struct pt_regs *pt)
{
- unsigned long *krbs, bspstore, *kbspstore, bsp, rbs_end, addr, val;
- long ndirty, ret = 0;
- struct pt_regs *child_regs = ia64_task_regs(child);
-
-#ifdef CONFIG_IA64_NEW_UNWIND
+ unsigned long *krbs, *bspstore, cfm;
struct unw_frame_info info;
- unsigned long cfm, sof;
-
- unw_init_from_blocked_task(&info, child);
- if (unw_unwind_to_user(&info) < 0)
- return -1;
-
- unw_get_bsp(&info, (unsigned long *) &kbspstore);
+ long ndirty;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
- ndirty = ia64_rse_num_regs(krbs, krbs + (child_regs->loadrs >> 19));
- bspstore = child_regs->ar_bspstore;
- bsp = (long) ia64_rse_skip_regs((long *)bspstore, ndirty);
-
- cfm = child_regs->cr_ifs;
- if (!(cfm & (1UL << 63)))
- unw_get_cfm(&info, &cfm);
- sof = (cfm & 0x7f);
- rbs_end = (long) ia64_rse_skip_regs((long *)bspstore, sof);
-#else
- struct switch_stack *child_stack;
- unsigned long krbs_num_regs;
+ bspstore = (unsigned long *) pt->ar_bspstore;
+ ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
- child_stack = (struct switch_stack *) child_regs - 1;
- kbspstore = (unsigned long *) child_stack->ar_bspstore;
- krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
- ndirty = ia64_rse_num_regs(krbs, krbs + (child_regs->loadrs >> 19));
- bspstore = child_regs->ar_bspstore;
- bsp = (long) ia64_rse_skip_regs((long *)bspstore, ndirty);
- krbs_num_regs = ia64_rse_num_regs(krbs, kbspstore);
- rbs_end = (long) ia64_rse_skip_regs((long *)bspstore, krbs_num_regs);
-#endif
+ if ((long) pt->cr_ifs >= 0) {
+ /*
+ * If bit 63 of cr.ifs is cleared, the kernel was entered via a system
+ * call and we need to recover the CFM that existed on entry to the
+ * kernel by unwinding the kernel stack.
+ */
+ unw_init_from_blocked_task(&info, child);
+ if (unw_unwind_to_user(&info) == 0) {
+ unw_get_cfm(&info, &cfm);
+ ndirty += (cfm & 0x7f);
+ }
+ }
+ return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
+}
- /* Return early if nothing to do */
- if (bsp == new_bsp)
+/*
+ * Synchronize (i.e, write) the RSE backing store living in kernel space to the VM of the
+ * indicated child process.
+ *
+ * If new_bsp is non-zero, the bsp will (effectively) be updated to the new value upon
+ * resumption of the child process. This is accomplished by setting the loadrs value to
+ * zero and the bspstore value to the new bsp value.
+ *
+ * When new_bsp and flush_user_rbs are both 0, the register backing store in kernel space
+ * is written to user space and the loadrs and bspstore values are left alone.
+ *
+ * When new_bsp is zero and flush_user_rbs is 1 (non-zero), loadrs is set to 0, and the
+ * bspstore value is set to the old bsp value. This will cause the stacked registers (r32
+ * and up) to be obtained entirely from the child's memory space rather than from the
+ * kernel. (This makes it easier to write code for modifying the stacked registers in
+ * multi-threaded programs.)
+ *
+ * Note: I had originally written this function without the flush_user_rbs parameter; it
+ * was written so that loadrs would always be set to zero. But I had problems with
+ * certain system calls apparently causing a portion of the RBS to be zeroed. (I still
+ * don't understand why this was happening.) Anyway, it'd definitely less intrusive to
+ * leave loadrs and bspstore alone if possible.
+ */
+static long
+sync_kernel_register_backing_store (struct task_struct *child, long user_bsp, long new_bsp,
+ int flush_user_rbs)
+{
+ struct pt_regs *child_regs = ia64_task_regs(child);
+ unsigned long addr, val;
+ long ret;
+
+ /*
+ * Return early if nothing to do. Note that new_bsp will be zero if the caller
+ * wants to force synchronization without changing bsp.
+ */
+ if (user_bsp == new_bsp)
return 0;
/* Write portion of backing store living on kernel stack to the child's VM. */
- for (addr = bspstore; addr < rbs_end; addr += 8) {
- ret = ia64_peek(child_regs, child, addr, &val);
+ for (addr = child_regs->ar_bspstore; addr < user_bsp; addr += 8) {
+ ret = ia64_peek(child, user_bsp, addr, &val);
if (ret != 0)
return ret;
if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
@@ -516,27 +436,26 @@
}
if (new_bsp != 0) {
- force_loadrs_to_zero = 1;
- bsp = new_bsp;
+ flush_user_rbs = 1;
+ user_bsp = new_bsp;
}
- if (force_loadrs_to_zero) {
+ if (flush_user_rbs) {
child_regs->loadrs = 0;
- child_regs->ar_bspstore = bsp;
+ child_regs->ar_bspstore = user_bsp;
}
-
- return ret;
+ return 0;
}
static void
-sync_thread_rbs (struct task_struct *child, struct mm_struct *mm, int make_writable)
+sync_thread_rbs (struct task_struct *child, long bsp, struct mm_struct *mm, int make_writable)
{
struct task_struct *p;
read_lock(&tasklist_lock);
{
for_each_task(p) {
if (p->mm == mm && p->state != TASK_RUNNING)
- sync_kernel_register_backing_store(p, 0, make_writable);
+ sync_kernel_register_backing_store(p, bsp, 0, make_writable);
}
}
read_unlock(&tasklist_lock);
@@ -588,10 +507,6 @@
psr->dfh = 1;
}
-#ifdef CONFIG_IA64_NEW_UNWIND
-
-#include <asm/unwind.h>
-
static int
access_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, int write_access)
{
@@ -613,7 +528,7 @@
static int
access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access)
{
- unsigned long *ptr, *rbs, *bspstore, ndirty, regnum;
+ unsigned long *ptr, regnum, bsp, rnat_addr;
struct switch_stack *sw;
struct unw_frame_info info;
struct pt_regs *pt;
@@ -710,36 +625,16 @@
/* scratch state */
switch (addr) {
case PT_AR_BSP:
+ bsp = ia64_get_user_bsp(child, pt);
if (write_access)
- /* FIXME? Account for lack of ``cover'' in the syscall case */
- return sync_kernel_register_backing_store(child, *data, 1);
+ return sync_kernel_register_backing_store(child, bsp, *data, 1);
else {
- rbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
- bspstore = (unsigned long *) pt->ar_bspstore;
- ndirty = ia64_rse_num_regs(rbs, rbs + (pt->loadrs >> 19));
-
- /*
- * If we're in a system call, no ``cover'' was done. So to
- * make things uniform, we'll add the appropriate displacement
- * onto bsp if we're in a system call.
- */
- if (!(pt->cr_ifs & (1UL << 63))) {
- struct unw_frame_info info;
- unsigned long cfm;
-
- unw_init_from_blocked_task(&info, child);
- if (unw_unwind_to_user(&info) < 0)
- return -1;
-
- unw_get_cfm(&info, &cfm);
- ndirty += cfm & 0x7f;
- }
- *data = (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
+ *data = bsp;
return 0;
}
case PT_CFM:
- if (pt->cr_ifs & (1UL << 63)) {
+ if ((long) pt->cr_ifs < 0) {
if (write_access)
pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL)
| (*data & 0x3fffffffffUL));
@@ -770,7 +665,15 @@
*data = (pt->cr_ipsr & IPSR_READ_MASK);
return 0;
- case PT_R1: case PT_R2: case PT_R3:
+ case PT_AR_RNAT:
+ bsp = ia64_get_user_bsp(child, pt);
+ rnat_addr = (long) ia64_rse_rnat_addr((long *) bsp - 1);
+ if (write_access)
+ return ia64_poke(child, bsp, rnat_addr, *data);
+ else
+ return ia64_peek(child, bsp, rnat_addr, data);
+
+ case PT_R1: case PT_R2: case PT_R3:
case PT_R8: case PT_R9: case PT_R10: case PT_R11:
case PT_R12: case PT_R13: case PT_R14: case PT_R15:
case PT_R16: case PT_R17: case PT_R18: case PT_R19:
@@ -781,7 +684,7 @@
case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
case PT_AR_BSPSTORE:
- case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS: case PT_AR_RNAT:
+ case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS:
case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR:
/* scratch register */
ptr = (unsigned long *) ((long) pt + addr - PT_CR_IPSR);
@@ -799,7 +702,7 @@
if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
child->thread.flags |= IA64_THREAD_DBG_VALID;
memset(child->thread.dbr, 0, sizeof(child->thread.dbr));
- memset(child->thread.ibr, 0, sizeof( child->thread.ibr));
+ memset(child->thread.ibr, 0, sizeof(child->thread.ibr));
}
if (addr >= PT_IBR) {
regnum = (addr - PT_IBR) >> 3;
@@ -830,173 +733,13 @@
return 0;
}
-#else /* !CONFIG_IA64_NEW_UNWIND */
-
-static int
-access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access)
-{
- unsigned long *ptr = NULL, *rbs, *bspstore, ndirty, regnum;
- struct switch_stack *sw;
- struct pt_regs *pt;
-
- if ((addr & 0x7) != 0)
- return -1;
-
- if (addr < PT_F127+16) {
- /* accessing fph */
- if (write_access)
- ia64_sync_fph(child);
- else
- ia64_flush_fph(child);
- ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr);
- } else if (addr < PT_F9+16) {
- /* accessing switch_stack or pt_regs: */
- pt = ia64_task_regs(child);
- sw = (struct switch_stack *) pt - 1;
-
- switch (addr) {
- case PT_NAT_BITS:
- if (write_access)
- ia64_put_nat_bits(pt, sw, *data);
- else
- *data = ia64_get_nat_bits(pt, sw);
- return 0;
-
- case PT_AR_BSP:
- if (write_access)
- /* FIXME? Account for lack of ``cover'' in the syscall case */
- return sync_kernel_register_backing_store(child, *data, 1);
- else {
- rbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
- bspstore = (unsigned long *) pt->ar_bspstore;
- ndirty = ia64_rse_num_regs(rbs, rbs + (pt->loadrs >> 19));
-
- /*
- * If we're in a system call, no ``cover'' was done. So to
- * make things uniform, we'll add the appropriate displacement
- * onto bsp if we're in a system call.
- */
- if (!(pt->cr_ifs & (1UL << 63)))
- ndirty += sw->ar_pfs & 0x7f;
- *data = (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
- return 0;
- }
-
- case PT_CFM:
- if (write_access) {
- if (pt->cr_ifs & (1UL << 63))
- pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL)
- | (*data & 0x3fffffffffUL));
- else
- sw->ar_pfs = ((sw->ar_pfs & ~0x3fffffffffUL)
- | (*data & 0x3fffffffffUL));
- return 0;
- } else {
- if ((pt->cr_ifs & (1UL << 63)) == 0)
- *data = sw->ar_pfs;
- else
- /* return only the CFM */
- *data = pt->cr_ifs & 0x3fffffffffUL;
- return 0;
- }
-
- case PT_CR_IPSR:
- if (write_access)
- pt->cr_ipsr = ((*data & IPSR_WRITE_MASK)
- | (pt->cr_ipsr & ~IPSR_WRITE_MASK));
- else
- *data = (pt->cr_ipsr & IPSR_READ_MASK);
- return 0;
-
- case PT_AR_EC:
- if (write_access)
- sw->ar_pfs = (((*data & 0x3f) << 52)
- | (sw->ar_pfs & ~(0x3fUL << 52)));
- else
- *data = (sw->ar_pfs >> 52) & 0x3f;
- break;
-
- case PT_R1: case PT_R2: case PT_R3:
- case PT_R4: case PT_R5: case PT_R6: case PT_R7:
- case PT_R8: case PT_R9: case PT_R10: case PT_R11:
- case PT_R12: case PT_R13: case PT_R14: case PT_R15:
- case PT_R16: case PT_R17: case PT_R18: case PT_R19:
- case PT_R20: case PT_R21: case PT_R22: case PT_R23:
- case PT_R24: case PT_R25: case PT_R26: case PT_R27:
- case PT_R28: case PT_R29: case PT_R30: case PT_R31:
- case PT_B0: case PT_B1: case PT_B2: case PT_B3:
- case PT_B4: case PT_B5: case PT_B6: case PT_B7:
- case PT_F2: case PT_F2+8: case PT_F3: case PT_F3+8:
- case PT_F4: case PT_F4+8: case PT_F5: case PT_F5+8:
- case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
- case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
- case PT_F10: case PT_F10+8: case PT_F11: case PT_F11+8:
- case PT_F12: case PT_F12+8: case PT_F13: case PT_F13+8:
- case PT_F14: case PT_F14+8: case PT_F15: case PT_F15+8:
- case PT_F16: case PT_F16+8: case PT_F17: case PT_F17+8:
- case PT_F18: case PT_F18+8: case PT_F19: case PT_F19+8:
- case PT_F20: case PT_F20+8: case PT_F21: case PT_F21+8:
- case PT_F22: case PT_F22+8: case PT_F23: case PT_F23+8:
- case PT_F24: case PT_F24+8: case PT_F25: case PT_F25+8:
- case PT_F26: case PT_F26+8: case PT_F27: case PT_F27+8:
- case PT_F28: case PT_F28+8: case PT_F29: case PT_F29+8:
- case PT_F30: case PT_F30+8: case PT_F31: case PT_F31+8:
- case PT_AR_BSPSTORE:
- case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS: case PT_AR_RNAT:
- case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR:
- case PT_AR_LC:
- ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS);
- break;
-
- default:
- /* disallow accessing anything else... */
- return -1;
- }
- } else {
-
- /* access debug registers */
-
- if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
- child->thread.flags |= IA64_THREAD_DBG_VALID;
- memset(child->thread.dbr, 0, sizeof child->thread.dbr);
- memset(child->thread.ibr, 0, sizeof child->thread.ibr);
- }
- if (addr >= PT_IBR) {
- regnum = (addr - PT_IBR) >> 3;
- ptr = &child->thread.ibr[0];
- } else {
- regnum = (addr - PT_DBR) >> 3;
- ptr = &child->thread.dbr[0];
- }
-
- if (regnum >= 8)
- return -1;
-
- ptr += regnum;
-
- if (write_access)
- /* don't let the user set kernel-level breakpoints... */
- *ptr = *data & ~(7UL << 56);
- else
- *data = *ptr;
- return 0;
- }
- if (write_access)
- *ptr = *data;
- else
- *data = *ptr;
- return 0;
-}
-
-#endif /* !CONFIG_IA64_NEW_UNWIND */
-
asmlinkage long
sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
long arg4, long arg5, long arg6, long arg7, long stack)
{
- struct pt_regs *regs = (struct pt_regs *) &stack;
+ struct pt_regs *pt, *regs = (struct pt_regs *) &stack;
struct task_struct *child;
- unsigned long flags;
+ unsigned long flags, bsp;
long ret;
lock_kernel();
@@ -1031,10 +774,10 @@
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
- (current->gid != child->egid) ||
- (current->gid != child->sgid) ||
- (!cap_issubset(child->cap_permitted, current->cap_permitted)) ||
- (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
+ (current->gid != child->egid) ||
+ (current->gid != child->sgid) ||
+ (!cap_issubset(child->cap_permitted, current->cap_permitted)) ||
+ (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
goto out_tsk;
/* the same process cannot be attached many times */
if (child->ptrace & PT_PTRACED)
@@ -1065,10 +808,13 @@
if (child->p_pptr != current)
goto out_tsk;
+ pt = ia64_task_regs(child);
+
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA: /* read word at location addr */
- if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)) {
+ bsp = ia64_get_user_bsp(child, pt);
+ if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)) {
struct mm_struct *mm;
long do_sync;
@@ -1079,9 +825,9 @@
}
task_unlock(child);
if (do_sync)
- sync_thread_rbs(child, mm, 0);
+ sync_thread_rbs(child, bsp, mm, 0);
}
- ret = ia64_peek(regs, child, addr, &data);
+ ret = ia64_peek(child, bsp, addr, &data);
if (ret == 0) {
ret = data;
regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */
@@ -1090,7 +836,8 @@
case PTRACE_POKETEXT:
case PTRACE_POKEDATA: /* write the word at location addr */
- if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)) {
+ bsp = ia64_get_user_bsp(child, pt);
+ if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)) {
struct mm_struct *mm;
long do_sync;
@@ -1101,9 +848,9 @@
}
task_unlock(child);
if (do_sync)
- sync_thread_rbs(child, mm, 1);
+ sync_thread_rbs(child, bsp, mm, 1);
}
- ret = ia64_poke(regs, child, addr, data);
+ ret = ia64_poke(child, bsp, addr, data);
goto out_tsk;
case PTRACE_PEEKUSR: /* read the word at addr in the USER area */
@@ -1125,21 +872,19 @@
case PTRACE_GETSIGINFO:
ret = -EIO;
- if (!access_ok(VERIFY_WRITE, data, sizeof (siginfo_t))
- || child->thread.siginfo == 0)
+ if (!access_ok(VERIFY_WRITE, data, sizeof (siginfo_t)) || !child->thread.siginfo)
goto out_tsk;
- copy_to_user((siginfo_t *) data, child->thread.siginfo, sizeof (siginfo_t));
- ret = 0;
+ ret = copy_siginfo_to_user((siginfo_t *) data, child->thread.siginfo);
goto out_tsk;
- break;
+
case PTRACE_SETSIGINFO:
ret = -EIO;
if (!access_ok(VERIFY_READ, data, sizeof (siginfo_t))
|| child->thread.siginfo == 0)
goto out_tsk;
- copy_from_user(child->thread.siginfo, (siginfo_t *) data, sizeof (siginfo_t));
- ret = 0;
+ ret = copy_siginfo_from_user(child->thread.siginfo, (siginfo_t *) data);
goto out_tsk;
+
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT: /* restart after signal. */
ret = -EIO;
@@ -1152,8 +897,8 @@
child->exit_code = data;
/* make sure the single step/take-branch tra bits are not set: */
- ia64_psr(ia64_task_regs(child))->ss = 0;
- ia64_psr(ia64_task_regs(child))->tb = 0;
+ ia64_psr(pt)->ss = 0;
+ ia64_psr(pt)->tb = 0;
/* Turn off flag indicating that the KRBS is sync'd with child's VM: */
child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
@@ -1173,8 +918,8 @@
child->exit_code = SIGKILL;
/* make sure the single step/take-branch tra bits are not set: */
- ia64_psr(ia64_task_regs(child))->ss = 0;
- ia64_psr(ia64_task_regs(child))->tb = 0;
+ ia64_psr(pt)->ss = 0;
+ ia64_psr(pt)->tb = 0;
/* Turn off flag indicating that the KRBS is sync'd with child's VM: */
child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
@@ -1191,9 +936,9 @@
child->ptrace &= ~PT_TRACESYS;
if (request == PTRACE_SINGLESTEP) {
- ia64_psr(ia64_task_regs(child))->ss = 1;
+ ia64_psr(pt)->ss = 1;
} else {
- ia64_psr(ia64_task_regs(child))->tb = 1;
+ ia64_psr(pt)->tb = 1;
}
child->exit_code = data;
@@ -1219,8 +964,8 @@
write_unlock_irqrestore(&tasklist_lock, flags);
/* make sure the single step/take-branch tra bits are not set: */
- ia64_psr(ia64_task_regs(child))->ss = 0;
- ia64_psr(ia64_task_regs(child))->tb = 0;
+ ia64_psr(pt)->ss = 0;
+ ia64_psr(pt)->tb = 0;
/* Turn off flag indicating that the KRBS is sync'd with child's VM: */
child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)