X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fx86_64%2Fia32%2Fptrace32.c;h=f3b7d31fccede4d6debc3869cb29d66b760c317c;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=23a4515a73b4024c9fb4254ec769e465e0c6fec8;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c index 23a4515a7..f3b7d31fc 100644 --- a/arch/x86_64/ia32/ptrace32.c +++ b/arch/x86_64/ia32/ptrace32.c @@ -7,8 +7,6 @@ * * This allows to access 64bit processes too; but there is no way to see the extended * register contents. - * - * $Id: ptrace32.c,v 1.16 2003/03/14 16:06:35 ak Exp $ */ #include @@ -18,7 +16,11 @@ #include #include #include +#include +#include +#include #include +#include #include #include #include @@ -27,6 +29,8 @@ #include #include #include +#include +#include /* * Determines which flags the user has access to [1 = access, 0 = no access]. @@ -36,34 +40,33 @@ #define FLAG_MASK 0x54dd5UL #define R32(l,q) \ - case offsetof(struct user32, regs.l): stack[offsetof(struct pt_regs, q)/8] = val; break + case offsetof(struct user_regs_struct32, l): stack[offsetof(struct pt_regs, q)/8] = val; break static int putreg32(struct task_struct *child, unsigned regno, u32 val) { - int i; __u64 *stack = (__u64 *)task_pt_regs(child); switch (regno) { - case offsetof(struct user32, regs.fs): + case offsetof(struct user_regs_struct32, fs): if (val && (val & 3) != 3) return -EIO; child->thread.fsindex = val & 0xffff; break; - case offsetof(struct user32, regs.gs): + case offsetof(struct user_regs_struct32, gs): if (val && (val & 3) != 3) return -EIO; child->thread.gsindex = val & 0xffff; break; - case offsetof(struct user32, regs.ds): + case offsetof(struct user_regs_struct32, ds): if (val && (val & 3) != 3) return -EIO; child->thread.ds = val & 0xffff; break; - case offsetof(struct user32, regs.es): + case offsetof(struct user_regs_struct32, es): child->thread.es = val & 0xffff; break; - case offsetof(struct user32, regs.ss): + case offsetof(struct user_regs_struct32, ss): if ((val & 3) != 3) return -EIO; stack[offsetof(struct pt_regs, ss)/8] = val & 0xffff; break; - case offsetof(struct user32, regs.cs): + case offsetof(struct user_regs_struct32, cs): if ((val & 3) != 3) return -EIO; stack[offsetof(struct pt_regs, cs)/8] = val & 0xffff; break; @@ -79,53 +82,16 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 val) R32(eip, rip); R32(esp, rsp); - case offsetof(struct user32, regs.eflags): { + case offsetof(struct user_regs_struct32, eflags): { __u64 *flags = &stack[offsetof(struct pt_regs, eflags)/8]; val &= FLAG_MASK; *flags = val | (*flags & ~FLAG_MASK); + clear_tsk_thread_flag(child, TIF_FORCED_TF); break; } - case offsetof(struct user32, u_debugreg[4]): - case offsetof(struct user32, u_debugreg[5]): - return -EIO; - - case offsetof(struct user32, u_debugreg[0]): - child->thread.debugreg0 = val; - break; - - case offsetof(struct user32, u_debugreg[1]): - child->thread.debugreg1 = val; - break; - - case offsetof(struct user32, u_debugreg[2]): - child->thread.debugreg2 = val; - break; - - case offsetof(struct user32, u_debugreg[3]): - child->thread.debugreg3 = val; - break; - - case offsetof(struct user32, u_debugreg[6]): - child->thread.debugreg6 = val; - break; - - case offsetof(struct user32, u_debugreg[7]): - val &= ~DR_CONTROL_RESERVED; - /* See arch/i386/kernel/ptrace.c for an explanation of - * this awkward check.*/ - for(i=0; i<4; i++) - if ((0x5454 >> ((val >> (16 + 4*i)) & 0xf)) & 1) - return -EIO; - child->thread.debugreg7 = val; - break; - default: - if (regno > sizeof(struct user32) || (regno & 3)) - return -EIO; - - /* Other dummy fields in the virtual user structure are ignored */ - break; + BUG(); } return 0; } @@ -133,24 +99,25 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 val) #undef R32 #define R32(l,q) \ - case offsetof(struct user32, regs.l): *val = stack[offsetof(struct pt_regs, q)/8]; break + case offsetof(struct user_regs_struct32, l): val = stack[offsetof(struct pt_regs, q)/8]; break -static int getreg32(struct task_struct *child, unsigned regno, u32 *val) +static int getreg32(struct task_struct *child, unsigned regno) { __u64 *stack = (__u64 *)task_pt_regs(child); + u32 val; switch (regno) { - case offsetof(struct user32, regs.fs): - *val = child->thread.fsindex; + case offsetof(struct user_regs_struct32, fs): + val = child->thread.fsindex; break; - case offsetof(struct user32, regs.gs): - *val = child->thread.gsindex; + case offsetof(struct user_regs_struct32, gs): + val = child->thread.gsindex; break; - case offsetof(struct user32, regs.ds): - *val = child->thread.ds; + case offsetof(struct user_regs_struct32, ds): + val = child->thread.ds; break; - case offsetof(struct user32, regs.es): - *val = child->thread.es; + case offsetof(struct user_regs_struct32, es): + val = child->thread.es; break; R32(cs, cs); @@ -164,197 +131,504 @@ static int getreg32(struct task_struct *child, unsigned regno, u32 *val) R32(eax, rax); R32(orig_eax, orig_rax); R32(eip, rip); - R32(eflags, eflags); R32(esp, rsp); - case offsetof(struct user32, u_debugreg[0]): - *val = child->thread.debugreg0; - break; - case offsetof(struct user32, u_debugreg[1]): - *val = child->thread.debugreg1; - break; - case offsetof(struct user32, u_debugreg[2]): - *val = child->thread.debugreg2; - break; - case offsetof(struct user32, u_debugreg[3]): - *val = child->thread.debugreg3; - break; - case offsetof(struct user32, u_debugreg[6]): - *val = child->thread.debugreg6; - break; - case offsetof(struct user32, u_debugreg[7]): - *val = child->thread.debugreg7; + case offsetof(struct user_regs_struct32, eflags): + val = stack[offsetof(struct pt_regs, eflags) / 8]; + if (test_tsk_thread_flag(child, TIF_FORCED_TF)) + val &= ~X86_EFLAGS_TF; break; default: - if (regno > sizeof(struct user32) || (regno & 3)) - return -EIO; - - /* Other dummy fields in the virtual user structure are ignored */ - *val = 0; + BUG(); + val = -1; break; } - return 0; + + return val; } #undef R32 -asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) +static int +ia32_genregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) { - struct task_struct *child; - struct pt_regs *childregs; - void __user *datap = compat_ptr(data); - int ret; - __u32 val; + if (kbuf) { + u32 *kp = kbuf; + while (count > 0) { + *kp++ = getreg32(target, pos); + pos += 4; + count -= 4; + } + } + else { + u32 __user *up = ubuf; + while (count > 0) { + if (__put_user(getreg32(target, pos), up++)) + return -EFAULT; + pos += 4; + count -= 4; + } + } - switch (request) { - default: - return sys_ptrace(request, pid, addr, data); + return 0; +} - case PTRACE_PEEKTEXT: - case PTRACE_PEEKDATA: - case PTRACE_POKEDATA: - case PTRACE_POKETEXT: - case PTRACE_POKEUSR: - case PTRACE_PEEKUSR: - case PTRACE_GETREGS: - case PTRACE_SETREGS: - case PTRACE_SETFPREGS: - case PTRACE_GETFPREGS: - case PTRACE_SETFPXREGS: - case PTRACE_GETFPXREGS: - case PTRACE_GETEVENTMSG: - break; - } +static int +ia32_genregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret = 0; + + if (kbuf) { + const u32 *kp = kbuf; + while (!ret && count > 0) { + ret = putreg32(target, pos, *kp++); + pos += 4; + count -= 4; + } + } + else { + int ret = 0; + const u32 __user *up = ubuf; + while (!ret && count > 0) { + u32 val; + ret = __get_user(val, up++); + if (!ret) + ret = putreg32(target, pos, val); + pos += 4; + count -= 4; + } + } - if (request == PTRACE_TRACEME) - return ptrace_traceme(); + return ret; +} - child = ptrace_get_task_struct(pid); - if (IS_ERR(child)) - return PTR_ERR(child); +static int +ia32_fpregs_active(struct task_struct *target, + const struct utrace_regset *regset) +{ + return tsk_used_math(target) ? regset->n : 0; +} - ret = ptrace_check_attach(child, request == PTRACE_KILL); - if (ret < 0) - goto out; +static int +ia32_fpregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct user_i387_ia32_struct fp; + int ret; - childregs = task_pt_regs(child); + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); + } + else + init_fpu(target); - switch (request) { - case PTRACE_PEEKDATA: - case PTRACE_PEEKTEXT: - ret = 0; - if (access_process_vm(child, addr, &val, sizeof(u32), 0)!=sizeof(u32)) - ret = -EIO; - else - ret = put_user(val, (unsigned int __user *)datap); - break; + ret = get_fpregs32(&fp, target); + if (ret == 0) + ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + &fp, 0, -1); - case PTRACE_POKEDATA: - case PTRACE_POKETEXT: - ret = 0; - if (access_process_vm(child, addr, &data, sizeof(u32), 1)!=sizeof(u32)) - ret = -EIO; - break; + return ret; +} - case PTRACE_PEEKUSR: - ret = getreg32(child, addr, &val); +static int +ia32_fpregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct user_i387_ia32_struct fp; + int ret; + + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); + } + else if (pos == 0 && count == sizeof(fp)) + set_stopped_child_used_math(target); + else + init_fpu(target); + + if (pos > 0 || count < sizeof(fp)) { + ret = get_fpregs32(&fp, target); if (ret == 0) - ret = put_user(val, (__u32 __user *)datap); - break; + ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, + &fp, 0, -1); + if (ret) + return ret; + kbuf = &fp; + } + else if (kbuf == NULL) { + if (__copy_from_user(&fp, ubuf, sizeof(fp))) + return -EFAULT; + kbuf = &fp; + } - case PTRACE_POKEUSR: - ret = putreg32(child, addr, data); - break; + return set_fpregs32(target, kbuf); +} - case PTRACE_GETREGS: { /* Get all gp regs from the child. */ - int i; - if (!access_ok(VERIFY_WRITE, datap, 16*4)) { - ret = -EIO; - break; +static int +ia32_fpxregs_active(struct task_struct *target, + const struct utrace_regset *regset) +{ + return tsk_used_math(target) ? regset->n : 0; +} + +static int +ia32_fpxregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); + } + else + init_fpu(target); + + return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.i387.fxsave, 0, -1); +} + +static int +ia32_fpxregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) + +{ + int ret; + + if (tsk_used_math(target)) { + if (target == current) + unlazy_fpu(target); + } + else if (pos == 0 && count == sizeof(struct i387_fxsave_struct)) + set_stopped_child_used_math(target); + else + init_fpu(target); + + ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.i387.fxsave, 0, -1); + + target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; + + return ret; +} + +static int +ia32_dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset) +{ + if (tsk->thread.debugreg6 | tsk->thread.debugreg7) + return 8; + return 0; +} + +static int +ia32_dbregs_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) { + u32 val; + + /* + * The hardware updates the status register on a debug trap, + * but do_debug (traps.c) saves it for us when that happens. + * So whether the target is current or not, debugregN is good. + */ + val = 0; + switch (pos) { + case 0: val = target->thread.debugreg0; break; + case 1: val = target->thread.debugreg1; break; + case 2: val = target->thread.debugreg2; break; + case 3: val = target->thread.debugreg3; break; + case 6: val = target->thread.debugreg6; break; + case 7: val = target->thread.debugreg7; break; } - ret = 0; - for ( i = 0; i <= 16*4 ; i += sizeof(__u32) ) { - getreg32(child, i, &val); - ret |= __put_user(val,(u32 __user *)datap); - datap += sizeof(u32); + + if (kbuf) { + *(u32 *) kbuf = val; + kbuf += sizeof(u32); + } + else { + if (__put_user(val, (u32 __user *) ubuf)) + return -EFAULT; + ubuf += sizeof(u32); } - break; } - case PTRACE_SETREGS: { /* Set all gp regs in the child. */ - unsigned long tmp; - int i; - if (!access_ok(VERIFY_READ, datap, 16*4)) { - ret = -EIO; - break; + return 0; +} + +static int +ia32_dbregs_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + /* + * We'll just hijack the native setter to do the real work for us. + */ + const struct utrace_regset *dbregset = &utrace_x86_64_native.regsets[2]; + + int ret = 0; + + for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) { + unsigned long val; + + if (kbuf) { + val = *(const u32 *) kbuf; + kbuf += sizeof(u32); } - ret = 0; - for ( i = 0; i <= 16*4; i += sizeof(u32) ) { - ret |= __get_user(tmp, (u32 __user *)datap); - putreg32(child, i, tmp); - datap += sizeof(u32); + else { + if (__get_user(val, (u32 __user *) ubuf)) + return -EFAULT; + ubuf += sizeof(u32); } - break; - } - case PTRACE_GETFPREGS: - ret = -EIO; - if (!access_ok(VERIFY_READ, compat_ptr(data), - sizeof(struct user_i387_struct))) - break; - save_i387_ia32(child, datap, childregs, 1); - ret = 0; + ret = (*dbregset->set)(target, dbregset, pos * sizeof(long), + sizeof(val), &val, NULL); + if (ret) break; + } - case PTRACE_SETFPREGS: - ret = -EIO; - if (!access_ok(VERIFY_WRITE, datap, - sizeof(struct user_i387_struct))) - break; - ret = 0; - /* don't check EFAULT to be bug-to-bug compatible to i386 */ - restore_i387_ia32(child, datap, 1); - break; + return ret; +} - case PTRACE_GETFPXREGS: { - struct user32_fxsr_struct __user *u = datap; - init_fpu(child); - ret = -EIO; - if (!access_ok(VERIFY_WRITE, u, sizeof(*u))) - break; - ret = -EFAULT; - if (__copy_to_user(u, &child->thread.i387.fxsave, sizeof(*u))) - break; - ret = __put_user(childregs->cs, &u->fcs); - ret |= __put_user(child->thread.ds, &u->fos); - break; - } - case PTRACE_SETFPXREGS: { - struct user32_fxsr_struct __user *u = datap; - unlazy_fpu(child); - ret = -EIO; - if (!access_ok(VERIFY_READ, u, sizeof(*u))) - break; - /* no checking to be bug-to-bug compatible with i386 */ - __copy_from_user(&child->thread.i387.fxsave, u, sizeof(*u)); - set_stopped_child_used_math(child); - child->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask; - ret = 0; - break; + +/* + * Perform get_thread_area on behalf of the traced child. + */ +static int +ia32_tls_get(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct user_desc info, *ip; + const struct n_desc_struct *desc; + const struct n_desc_struct *tls; + +/* + * Get the current Thread-Local Storage area: + */ + +#define GET_BASE(desc) ( \ + (((desc)->a >> 16) & 0x0000ffff) | \ + (((desc)->b << 16) & 0x00ff0000) | \ + ( (desc)->b & 0xff000000) ) + +#define GET_LIMIT(desc) ( \ + ((desc)->a & 0x0ffff) | \ + ((desc)->b & 0xf0000) ) + +#define GET_32BIT(desc) (((desc)->b >> 22) & 1) +#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) +#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) +#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) +#define GET_PRESENT(desc) (((desc)->b >> 15) & 1) +#define GET_USEABLE(desc) (((desc)->b >> 20) & 1) + + tls = (struct n_desc_struct *) target->thread.tls_array; + desc = &tls[pos]; + ip = kbuf ?: &info; + memset(ip, 0, sizeof *ip); + for (; count > 0; count -= sizeof(struct user_desc), ++desc) { + ip->entry_number = desc - tls + GDT_ENTRY_TLS_MIN; + ip->base_addr = GET_BASE(desc); + ip->limit = GET_LIMIT(desc); + ip->seg_32bit = GET_32BIT(desc); + ip->contents = GET_CONTENTS(desc); + ip->read_exec_only = !GET_WRITABLE(desc); + ip->limit_in_pages = GET_LIMIT_PAGES(desc); + ip->seg_not_present = !GET_PRESENT(desc); + ip->useable = GET_USEABLE(desc); + + if (kbuf) + ++ip; + else { + if (__copy_to_user(ubuf, &info, sizeof(info))) + return -EFAULT; + ubuf += sizeof(info); + } } - case PTRACE_GETEVENTMSG: - ret = put_user(child->ptrace_message,(unsigned int __user *)compat_ptr(data)); - break; + return 0; +} - default: - ret = -EINVAL; - break; +/* + * Perform set_thread_area on behalf of the traced child. + */ +static int +ia32_tls_set(struct task_struct *target, + const struct utrace_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct user_desc info; + struct n_desc_struct *desc; + struct n_desc_struct newtls[GDT_ENTRY_TLS_ENTRIES]; + unsigned int i; + int cpu; + + pos /= sizeof(struct user_desc); + count /= sizeof(struct user_desc); + + desc = &newtls[pos]; + for (i = 0; i < count; ++i, ++desc) { + const struct user_desc *ip; + if (kbuf) { + ip = kbuf; + kbuf += sizeof(struct user_desc); + } + else { + ip = &info; + if (__copy_from_user(&info, ubuf, sizeof(info))) + return -EFAULT; + ubuf += sizeof(struct user_desc); + } + + if (LDT_empty(ip)) { + desc->a = 0; + desc->b = 0; + } else { + desc->a = LDT_entry_a(ip); + desc->b = LDT_entry_b(ip); + } } - out: - put_task_struct(child); - return ret; + /* + * We must not get preempted while modifying the TLS. + */ + cpu = get_cpu(); + memcpy(&target->thread.tls_array[pos], newtls, + count * sizeof(newtls[0])); + if (target == current) + load_TLS(&target->thread, cpu); + put_cpu(); + + return 0; +} + +/* + * Determine how many TLS slots are in use. + */ +static int +ia32_tls_active(struct task_struct *target, const struct utrace_regset *regset) +{ + int i; + for (i = GDT_ENTRY_TLS_ENTRIES; i > 0; --i) { + struct n_desc_struct *desc = (struct n_desc_struct *) + &target->thread.tls_array[i - 1]; + if ((desc->a | desc->b) != 0) + break; + } + return i; } + +/* + * This should match arch/i386/kernel/ptrace.c:native_regsets. + * XXX ioperm? vm86? + */ +static const struct utrace_regset ia32_regsets[] = { + { + .n = sizeof(struct user_regs_struct32)/4, + .size = 4, .align = 4, + .get = ia32_genregs_get, .set = ia32_genregs_set + }, + { + .n = sizeof(struct user_i387_ia32_struct) / 4, + .size = 4, .align = 4, + .active = ia32_fpregs_active, + .get = ia32_fpregs_get, .set = ia32_fpregs_set + }, + { + .n = sizeof(struct user32_fxsr_struct) / 4, + .size = 4, .align = 4, + .active = ia32_fpxregs_active, + .get = ia32_fpxregs_get, .set = ia32_fpxregs_set + }, + { + .n = GDT_ENTRY_TLS_ENTRIES, + .bias = GDT_ENTRY_TLS_MIN, + .size = sizeof(struct user_desc), + .align = sizeof(struct user_desc), + .active = ia32_tls_active, + .get = ia32_tls_get, .set = ia32_tls_set + }, + { + .n = 8, .size = 4, .align = 4, + .active = ia32_dbregs_active, + .get = ia32_dbregs_get, .set = ia32_dbregs_set + }, +}; + +const struct utrace_regset_view utrace_ia32_view = { + .name = "i386", .e_machine = EM_386, + .regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets) +}; +EXPORT_SYMBOL_GPL(utrace_ia32_view); + + +#ifdef CONFIG_PTRACE +/* + * This matches the arch/i386/kernel/ptrace.c definitions. + */ + +static const struct ptrace_layout_segment ia32_uarea[] = { + {0, sizeof(struct user_regs_struct32), 0, 0}, + {sizeof(struct user_regs_struct32), + offsetof(struct user32, u_debugreg[0]), -1, 0}, + {offsetof(struct user32, u_debugreg[0]), + offsetof(struct user32, u_debugreg[8]), 4, 0}, + {0, 0, -1, 0} +}; + +int arch_compat_ptrace(compat_long_t *req, struct task_struct *child, + struct utrace_attached_engine *engine, + compat_ulong_t addr, compat_ulong_t data, + compat_long_t *val) +{ + switch (*req) { + case PTRACE_PEEKUSR: + return ptrace_compat_peekusr(child, engine, ia32_uarea, + addr, data); + case PTRACE_POKEUSR: + return ptrace_compat_pokeusr(child, engine, ia32_uarea, + addr, data); + case PTRACE_GETREGS: + return ptrace_whole_regset(child, engine, data, 0, 0); + case PTRACE_SETREGS: + return ptrace_whole_regset(child, engine, data, 0, 1); + case PTRACE_GETFPREGS: + return ptrace_whole_regset(child, engine, data, 1, 0); + case PTRACE_SETFPREGS: + return ptrace_whole_regset(child, engine, data, 1, 1); + case PTRACE_GETFPXREGS: + return ptrace_whole_regset(child, engine, data, 2, 0); + case PTRACE_SETFPXREGS: + return ptrace_whole_regset(child, engine, data, 2, 1); + case PTRACE_GET_THREAD_AREA: + case PTRACE_SET_THREAD_AREA: + return ptrace_onereg_access(child, engine, + &utrace_ia32_view, 3, + addr, + (void __user *)(unsigned long)data, + *req == PTRACE_SET_THREAD_AREA); + } + return -ENOSYS; +} +#endif /* CONFIG_PTRACE */