786130d1b44e6323ea313fe9d2967aa3bd4fe2d7
[linux-2.6.git] / kernel / ptrace.c
1 /*
2  * linux/kernel/ptrace.c
3  *
4  * (C) Copyright 1999 Linus Torvalds
5  *
6  * Common interfaces for "ptrace()" which we do not want
7  * to continually duplicate across every architecture.
8  */
9
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21
22 #include <asm/pgtable.h>
23 #include <asm/uaccess.h>
24
25 #ifdef CONFIG_PTRACE
26 #include <linux/utrace.h>
27 #include <linux/tracehook.h>
28 #include <asm/tracehook.h>
29 #endif
30
31 int getrusage(struct task_struct *, int, struct rusage __user *);
32
33 //#define PTRACE_DEBUG
34
35 int __ptrace_may_attach(struct task_struct *task)
36 {
37         /* May we inspect the given task?
38          * This check is used both for attaching with ptrace
39          * and for allowing access to sensitive information in /proc.
40          *
41          * ptrace_attach denies several cases that /proc allows
42          * because setting up the necessary parent/child relationship
43          * or halting the specified task is impossible.
44          */
45         int dumpable = 0;
46         /* Don't let security modules deny introspection */
47         if (task == current)
48                 return 0;
49         if (((current->uid != task->euid) ||
50              (current->uid != task->suid) ||
51              (current->uid != task->uid) ||
52              (current->gid != task->egid) ||
53              (current->gid != task->sgid) ||
54              (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
55                 return -EPERM;
56         smp_rmb();
57         if (task->mm)
58                 dumpable = task->mm->dumpable;
59         if (!dumpable && !capable(CAP_SYS_PTRACE))
60                 return -EPERM;
61
62         return security_ptrace(current, task);
63 }
64
65 int ptrace_may_attach(struct task_struct *task)
66 {
67         int err;
68         task_lock(task);
69         err = __ptrace_may_attach(task);
70         task_unlock(task);
71         return !err;
72 }
73
74 /*
75  * Access another process' address space.
76  * Source/target buffer must be kernel space, 
77  * Do not walk the page table directly, use get_user_pages
78  */
79
80 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
81 {
82         struct mm_struct *mm;
83         struct vm_area_struct *vma;
84         struct page *page;
85         void *old_buf = buf;
86
87         mm = get_task_mm(tsk);
88         if (!mm)
89                 return 0;
90
91         down_read(&mm->mmap_sem);
92         /* ignore errors, just check how much was sucessfully transfered */
93         while (len) {
94                 int bytes, ret, offset;
95                 void *maddr;
96
97                 ret = get_user_pages(tsk, mm, addr, 1,
98                                 write, 1, &page, &vma);
99                 if (ret <= 0)
100                         break;
101
102                 bytes = len;
103                 offset = addr & (PAGE_SIZE-1);
104                 if (bytes > PAGE_SIZE-offset)
105                         bytes = PAGE_SIZE-offset;
106
107                 maddr = kmap(page);
108                 if (write) {
109                         copy_to_user_page(vma, page, addr,
110                                           maddr + offset, buf, bytes);
111                         set_page_dirty_lock(page);
112                 } else {
113                         copy_from_user_page(vma, page, addr,
114                                             buf, maddr + offset, bytes);
115                 }
116                 kunmap(page);
117                 page_cache_release(page);
118                 len -= bytes;
119                 buf += bytes;
120                 addr += bytes;
121         }
122         up_read(&mm->mmap_sem);
123         mmput(mm);
124         
125         return buf - old_buf;
126 }
127
128
129 #ifndef CONFIG_PTRACE
130
131 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
132 {
133         return -ENOSYS;
134 }
135
136 #else
137
138 struct ptrace_state
139 {
140         /*
141          * These elements are always available, even when the struct is
142          * awaiting destruction at the next RCU callback point.
143          */
144         struct utrace_attached_engine *engine;
145         struct task_struct *task; /* Target task.  */
146         struct task_struct *parent; /* Whom we report to.  */
147         struct list_head entry; /* Entry on parent->ptracees list.  */
148
149         union {
150                 struct rcu_head dead;
151                 struct {
152                         u8 options; /* PTRACE_SETOPTIONS bits.  */
153                         unsigned int stopped:1; /* Stopped for report.  */
154                         unsigned int reported:1; /* wait already reported.  */
155                         unsigned int syscall:1; /* Reporting for syscall.  */
156 #ifdef PTRACE_SYSEMU
157                         unsigned int sysemu:1; /* PTRACE_SYSEMU in progress. */
158 #endif
159                         unsigned int have_eventmsg:1; /* u.eventmsg valid. */
160                         unsigned int cap_sys_ptrace:1; /* Tracer capable.  */
161
162                         union
163                         {
164                                 unsigned long eventmsg;
165                                 siginfo_t *siginfo;
166                         } u;
167                 } live;
168         } u;
169 };
170
171 static const struct utrace_engine_ops ptrace_utrace_ops; /* Initialized below. */
172
173
174 static void
175 ptrace_state_link(struct ptrace_state *state)
176 {
177         task_lock(state->parent);
178         list_add_rcu(&state->entry, &state->parent->ptracees);
179         task_unlock(state->parent);
180 }
181
182 static void
183 ptrace_state_unlink(struct ptrace_state *state)
184 {
185         task_lock(state->parent);
186         list_del_rcu(&state->entry);
187         task_unlock(state->parent);
188 }
189
190 static int
191 ptrace_setup(struct task_struct *target, struct utrace_attached_engine *engine,
192              struct task_struct *parent, u8 options, int cap_sys_ptrace)
193 {
194         struct ptrace_state *state = kzalloc(sizeof *state, GFP_USER);
195         if (unlikely(state == NULL))
196                 return -ENOMEM;
197
198         state->engine = engine;
199         state->task = target;
200         state->parent = parent;
201         state->u.live.options = options;
202         state->u.live.cap_sys_ptrace = cap_sys_ptrace;
203         ptrace_state_link(state);
204
205         BUG_ON(engine->data != 0);
206         rcu_assign_pointer(engine->data, (unsigned long) state);
207
208         return 0;
209 }
210
211 static void
212 ptrace_state_free(struct rcu_head *rhead)
213 {
214         struct ptrace_state *state = container_of(rhead,
215                                                   struct ptrace_state, u.dead);
216         kfree(state);
217 }
218
219 static void
220 ptrace_done(struct ptrace_state *state)
221 {
222         INIT_RCU_HEAD(&state->u.dead);
223         call_rcu(&state->u.dead, ptrace_state_free);
224 }
225
226 /*
227  * Update the tracing engine state to match the new ptrace state.
228  */
229 static void
230 ptrace_update(struct task_struct *target, struct utrace_attached_engine *engine,
231               unsigned long flags)
232 {
233         struct ptrace_state *state = (struct ptrace_state *) engine->data;
234
235         /*
236          * These events are always reported.
237          */
238         flags |= (UTRACE_EVENT(DEATH) | UTRACE_EVENT(EXEC)
239                   | UTRACE_EVENT_SIGNAL_ALL);
240
241         /*
242          * We always have to examine clone events to check for CLONE_PTRACE.
243          */
244         flags |= UTRACE_EVENT(CLONE);
245
246         /*
247          * PTRACE_SETOPTIONS can request more events.
248          */
249         if (state->u.live.options & PTRACE_O_TRACEEXIT)
250                 flags |= UTRACE_EVENT(EXIT);
251         if (state->u.live.options & PTRACE_O_TRACEVFORKDONE)
252                 flags |= UTRACE_EVENT(VFORK_DONE);
253
254         /*
255          * ptrace always inhibits normal parent reaping.
256          * But for a corner case we sometimes see the REAP event instead.
257          */
258         flags |= UTRACE_ACTION_NOREAP | UTRACE_EVENT(REAP);
259
260         state->u.live.stopped = (flags & UTRACE_ACTION_QUIESCE) != 0;
261         if (!state->u.live.stopped) {
262                 if (!state->u.live.have_eventmsg)
263                         state->u.live.u.siginfo = NULL;
264                 if (!(target->flags & PF_EXITING))
265                         target->exit_code = 0;
266         }
267         utrace_set_flags(target, engine, flags);
268 }
269
270 static int ptrace_traceme(void)
271 {
272         struct utrace_attached_engine *engine;
273         int retval;
274
275         engine = utrace_attach(current, (UTRACE_ATTACH_CREATE
276                                          | UTRACE_ATTACH_EXCLUSIVE
277                                          | UTRACE_ATTACH_MATCH_OPS),
278                                &ptrace_utrace_ops, 0UL);
279
280         if (IS_ERR(engine)) {
281                 retval = PTR_ERR(engine);
282                 if (retval == -EEXIST)
283                         retval = -EPERM;
284         }
285         else {
286                 task_lock(current);
287                 retval = security_ptrace(current->parent, current);
288                 task_unlock(current);
289                 if (!retval)
290                         retval = ptrace_setup(current, engine,
291                                               current->parent, 0, 0);
292                 if (retval)
293                         utrace_detach(current, engine);
294                 else
295                         ptrace_update(current, engine, 0);
296         }
297
298         return retval;
299 }
300
301 static int ptrace_attach(struct task_struct *task)
302 {
303         struct utrace_attached_engine *engine;
304         int retval;
305
306         retval = -EPERM;
307         if (task->pid <= 1)
308                 goto bad;
309         if (task->tgid == current->tgid)
310                 goto bad;
311         if (!task->mm)          /* kernel threads */
312                 goto bad;
313
314         engine = utrace_attach(task, (UTRACE_ATTACH_CREATE
315                                       | UTRACE_ATTACH_EXCLUSIVE
316                                       | UTRACE_ATTACH_MATCH_OPS),
317                                &ptrace_utrace_ops, 0);
318         if (IS_ERR(engine)) {
319                 retval = PTR_ERR(engine);
320                 if (retval == -EEXIST)
321                         retval = -EPERM;
322                 goto bad;
323         }
324
325         if (ptrace_may_attach(task))
326                 retval = ptrace_setup(task, engine, current, 0,
327                                       capable(CAP_SYS_PTRACE));
328         if (retval)
329                 utrace_detach(task, engine);
330         else {
331                 int stopped;
332
333                 /* Go */
334                 ptrace_update(task, engine, 0);
335                 force_sig_specific(SIGSTOP, task);
336
337                 spin_lock_irq(&task->sighand->siglock);
338                 stopped = (task->state == TASK_STOPPED);
339                 spin_unlock_irq(&task->sighand->siglock);
340
341                 if (stopped) {
342                         /*
343                          * Do now the regset 0 writeback that we do on every
344                          * stop, since it's never been done.  On register
345                          * window machines, this makes sure the user memory
346                          * backing the register data is up to date.
347                          */
348                         const struct utrace_regset *regset;
349                         regset = utrace_regset(task, engine,
350                                                utrace_native_view(task), 0);
351                         if (regset->writeback)
352                                 (*regset->writeback)(task, regset, 1);
353                 }
354         }
355
356 bad:
357         return retval;
358 }
359
360 static int ptrace_detach(struct task_struct *task,
361                          struct utrace_attached_engine *engine)
362 {
363         struct ptrace_state *state = (struct ptrace_state *) engine->data;
364         /*
365          * Clearing ->data before detach makes sure an unrelated task
366          * calling into ptrace_tracer_task won't try to touch stale state.
367          */
368         rcu_assign_pointer(engine->data, 0UL);
369         utrace_detach(task, engine);
370         ptrace_state_unlink(state);
371         ptrace_done(state);
372         return 0;
373 }
374
375
376 /*
377  * This is called when we are exiting.  We must stop all our ptracing.
378  */
379 void
380 ptrace_exit(struct task_struct *tsk)
381 {
382         rcu_read_lock();
383         if (unlikely(!list_empty(&tsk->ptracees))) {
384                 struct ptrace_state *state, *next;
385
386                 /*
387                  * First detach the utrace layer from all the tasks.
388                  * We don't want to hold any locks while calling utrace_detach.
389                  */
390                 list_for_each_entry_rcu(state, &tsk->ptracees, entry) {
391                         rcu_assign_pointer(state->engine->data, 0UL);
392                         utrace_detach(state->task, state->engine);
393                 }
394
395                 /*
396                  * Now clear out our list and clean up our data structures.
397                  * The task_lock protects our list structure.
398                  */
399                 task_lock(tsk);
400                 list_for_each_entry_safe(state, next, &tsk->ptracees, entry) {
401                         list_del_rcu(&state->entry);
402                         ptrace_done(state);
403                 }
404                 task_unlock(tsk);
405         }
406         rcu_read_unlock();
407
408         BUG_ON(!list_empty(&tsk->ptracees));
409 }
410
411 static int
412 ptrace_induce_signal(struct task_struct *target,
413                      struct utrace_attached_engine *engine,
414                      long signr)
415 {
416         struct ptrace_state *state = (struct ptrace_state *) engine->data;
417
418         if (signr == 0)
419                 return 0;
420
421         if (!valid_signal(signr))
422                 return -EIO;
423
424         if (state->u.live.syscall) {
425                 /*
426                  * This is the traditional ptrace behavior when given
427                  * a signal to resume from a syscall tracing stop.
428                  */
429                 send_sig(signr, target, 1);
430         }
431         else if (!state->u.live.have_eventmsg && state->u.live.u.siginfo) {
432                 siginfo_t *info = state->u.live.u.siginfo;
433
434                 /* Update the siginfo structure if the signal has
435                    changed.  If the debugger wanted something
436                    specific in the siginfo structure then it should
437                    have updated *info via PTRACE_SETSIGINFO.  */
438                 if (signr != info->si_signo) {
439                         info->si_signo = signr;
440                         info->si_errno = 0;
441                         info->si_code = SI_USER;
442                         info->si_pid = current->pid;
443                         info->si_uid = current->uid;
444                 }
445
446                 return utrace_inject_signal(target, engine,
447                                             UTRACE_ACTION_RESUME, info, NULL);
448         }
449
450         return 0;
451 }
452
453 fastcall int
454 ptrace_regset_access(struct task_struct *target,
455                      struct utrace_attached_engine *engine,
456                      const struct utrace_regset_view *view,
457                      int setno, unsigned long offset, unsigned int size,
458                      void __user *data, int write)
459 {
460         const struct utrace_regset *regset = utrace_regset(target, engine,
461                                                            view, setno);
462         int ret;
463
464         if (unlikely(regset == NULL))
465                 return -EIO;
466
467         if (size == (unsigned int) -1)
468                 size = regset->size * regset->n;
469
470         if (write) {
471                 if (!access_ok(VERIFY_READ, data, size))
472                         ret = -EIO;
473                 else
474                         ret = (*regset->set)(target, regset,
475                                              offset, size, NULL, data);
476         }
477         else {
478                 if (!access_ok(VERIFY_WRITE, data, size))
479                         ret = -EIO;
480                 else
481                         ret = (*regset->get)(target, regset,
482                                              offset, size, NULL, data);
483         }
484
485         return ret;
486 }
487
488 fastcall int
489 ptrace_onereg_access(struct task_struct *target,
490                      struct utrace_attached_engine *engine,
491                      const struct utrace_regset_view *view,
492                      int setno, unsigned long regno,
493                      void __user *data, int write)
494 {
495         const struct utrace_regset *regset = utrace_regset(target, engine,
496                                                            view, setno);
497         unsigned int pos;
498         int ret;
499
500         if (unlikely(regset == NULL))
501                 return -EIO;
502
503         if (regno < regset->bias || regno >= regset->bias + regset->n)
504                 return -EINVAL;
505
506         pos = (regno - regset->bias) * regset->size;
507
508         if (write) {
509                 if (!access_ok(VERIFY_READ, data, regset->size))
510                         ret = -EIO;
511                 else
512                         ret = (*regset->set)(target, regset, pos, regset->size,
513                                              NULL, data);
514         }
515         else {
516                 if (!access_ok(VERIFY_WRITE, data, regset->size))
517                         ret = -EIO;
518                 else
519                         ret = (*regset->get)(target, regset, pos, regset->size,
520                                              NULL, data);
521         }
522
523         return ret;
524 }
525
526 fastcall int
527 ptrace_layout_access(struct task_struct *target,
528                      struct utrace_attached_engine *engine,
529                      const struct utrace_regset_view *view,
530                      const struct ptrace_layout_segment layout[],
531                      unsigned long addr, unsigned int size,
532                      void __user *udata, void *kdata, int write)
533 {
534         const struct ptrace_layout_segment *seg;
535         int ret = -EIO;
536
537         if (kdata == NULL &&
538             !access_ok(write ? VERIFY_READ : VERIFY_WRITE, udata, size))
539                 return -EIO;
540
541         seg = layout;
542         do {
543                 unsigned int pos, n;
544
545                 while (addr >= seg->end && seg->end != 0)
546                         ++seg;
547
548                 if (addr < seg->start || addr >= seg->end)
549                         return -EIO;
550
551                 pos = addr - seg->start + seg->offset;
552                 n = min(size, seg->end - (unsigned int) addr);
553
554                 if (unlikely(seg->regset == (unsigned int) -1)) {
555                         /*
556                          * This is a no-op/zero-fill portion of struct user.
557                          */
558                         ret = 0;
559                         if (!write) {
560                                 if (kdata)
561                                         memset(kdata, 0, n);
562                                 else if (clear_user(udata, n))
563                                         ret = -EFAULT;
564                         }
565                 }
566                 else {
567                         unsigned int align;
568                         const struct utrace_regset *regset = utrace_regset(
569                                 target, engine, view, seg->regset);
570                         if (unlikely(regset == NULL))
571                                 return -EIO;
572
573                         /*
574                          * A ptrace compatibility layout can do a misaligned
575                          * regset access, e.g. word access to larger data.
576                          * An arch's compat layout can be this way only if
577                          * it is actually ok with the regset code despite the
578                          * regset->align setting.
579                          */
580                         align = min(regset->align, size);
581                         if ((pos & (align - 1))
582                             || pos >= regset->n * regset->size)
583                                 return -EIO;
584
585                         if (write)
586                                 ret = (*regset->set)(target, regset,
587                                                      pos, n, kdata, udata);
588                         else
589                                 ret = (*regset->get)(target, regset,
590                                                      pos, n, kdata, udata);
591                 }
592
593                 if (kdata)
594                         kdata += n;
595                 else
596                         udata += n;
597                 addr += n;
598                 size -= n;
599         } while (ret == 0 && size > 0);
600
601         return ret;
602 }
603
604
605 static int
606 ptrace_start(long pid, long request,
607              struct task_struct **childp,
608              struct utrace_attached_engine **enginep,
609              struct ptrace_state **statep)
610
611 {
612         struct task_struct *child;
613         struct utrace_attached_engine *engine;
614         struct ptrace_state *state;
615         int ret;
616
617         if (request == PTRACE_TRACEME)
618                 return ptrace_traceme();
619
620         ret = -ESRCH;
621         read_lock(&tasklist_lock);
622         child = find_task_by_pid(pid);
623         if (child)
624                 get_task_struct(child);
625         read_unlock(&tasklist_lock);
626 #ifdef PTRACE_DEBUG
627         printk("ptrace pid %ld => %p\n", pid, child);
628 #endif
629         if (!child)
630                 goto out;
631
632         ret = -EPERM;
633         if (pid == 1)           /* you may not mess with init */
634                 goto out_tsk;
635
636         if (!vx_check(vx_task_xid(child), VX_WATCH|VX_IDENT))
637                 goto out_tsk;
638
639         if (request == PTRACE_ATTACH) {
640                 ret = ptrace_attach(child);
641                 goto out_tsk;
642         }
643
644         engine = utrace_attach(child, UTRACE_ATTACH_MATCH_OPS,
645                                &ptrace_utrace_ops, 0);
646         ret = -ESRCH;
647         if (IS_ERR(engine) || engine == NULL)
648                 goto out_tsk;
649         rcu_read_lock();
650         state = rcu_dereference((struct ptrace_state *) engine->data);
651         if (state == NULL || state->parent != current) {
652                 rcu_read_unlock();
653                 goto out_tsk;
654         }
655         rcu_read_unlock();
656
657         /*
658          * Traditional ptrace behavior demands that the target already be
659          * quiescent, but not dead.
660          */
661         if (request != PTRACE_KILL && !state->u.live.stopped) {
662 #ifdef PTRACE_DEBUG
663                 printk("%d not stopped (%lx)\n", child->pid, child->state);
664 #endif
665                 if (child->state != TASK_STOPPED)
666                         goto out_tsk;
667                 utrace_set_flags(child, engine,
668                                  engine->flags | UTRACE_ACTION_QUIESCE);
669         }
670
671         /*
672          * We do this for all requests to match traditional ptrace behavior.
673          * If the machine state synchronization done at context switch time
674          * includes e.g. writing back to user memory, we want to make sure
675          * that has finished before a PTRACE_PEEKDATA can fetch the results.
676          * On most machines, only regset data is affected by context switch
677          * and calling utrace_regset later on will take care of that, so
678          * this is superfluous.
679          *
680          * To do this purely in utrace terms, we could do:
681          *  (void) utrace_regset(child, engine, utrace_native_view(child), 0);
682          */
683         wait_task_inactive(child);
684
685         if (child->exit_state)
686                 goto out_tsk;
687
688         *childp = child;
689         *enginep = engine;
690         *statep = state;
691         return -EIO;
692
693 out_tsk:
694         put_task_struct(child);
695 out:
696         return ret;
697 }
698
699 static int
700 ptrace_common(long request, struct task_struct *child,
701               struct utrace_attached_engine *engine,
702               struct ptrace_state *state,
703               unsigned long addr, long data)
704 {
705         unsigned long flags;
706         int ret = -EIO;
707
708         switch (request) {
709         case PTRACE_DETACH:
710                 /*
711                  * Detach a process that was attached.
712                  */
713                 ret = ptrace_induce_signal(child, engine, data);
714                 if (!ret)
715                         ret = ptrace_detach(child, engine);
716                 break;
717
718                 /*
719                  * These are the operations that resume the child running.
720                  */
721         case PTRACE_KILL:
722                 data = SIGKILL;
723         case PTRACE_CONT:
724         case PTRACE_SYSCALL:
725 #ifdef PTRACE_SYSEMU
726         case PTRACE_SYSEMU:
727         case PTRACE_SYSEMU_SINGLESTEP:
728 #endif
729 #ifdef PTRACE_SINGLEBLOCK
730         case PTRACE_SINGLEBLOCK:
731 # ifdef ARCH_HAS_BLOCK_STEP
732                 if (! ARCH_HAS_BLOCK_STEP)
733 # endif
734                         if (request == PTRACE_SINGLEBLOCK)
735                                 break;
736 #endif
737         case PTRACE_SINGLESTEP:
738 #ifdef ARCH_HAS_SINGLE_STEP
739                 if (! ARCH_HAS_SINGLE_STEP)
740 #endif
741                         if (request == PTRACE_SINGLESTEP
742 #ifdef PTRACE_SYSEMU_SINGLESTEP
743                             || request == PTRACE_SYSEMU_SINGLESTEP
744 #endif
745                                 )
746                                 break;
747
748                 ret = ptrace_induce_signal(child, engine, data);
749                 if (ret)
750                         break;
751
752
753                 /*
754                  * Reset the action flags without QUIESCE, so it resumes.
755                  */
756                 flags = 0;
757 #ifdef PTRACE_SYSEMU
758                 state->u.live.sysemu = (request == PTRACE_SYSEMU_SINGLESTEP
759                                         || request == PTRACE_SYSEMU);
760 #endif
761                 if (request == PTRACE_SINGLESTEP
762 #ifdef PTRACE_SYSEMU
763                     || request == PTRACE_SYSEMU_SINGLESTEP
764 #endif
765                         )
766                         flags |= UTRACE_ACTION_SINGLESTEP;
767 #ifdef PTRACE_SINGLEBLOCK
768                 else if (request == PTRACE_SINGLEBLOCK)
769                         flags |= UTRACE_ACTION_BLOCKSTEP;
770 #endif
771                 if (request == PTRACE_SYSCALL)
772                         flags |= UTRACE_EVENT_SYSCALL;
773 #ifdef PTRACE_SYSEMU
774                 else if (request == PTRACE_SYSEMU
775                          || request == PTRACE_SYSEMU_SINGLESTEP)
776                         flags |= UTRACE_EVENT(SYSCALL_ENTRY);
777 #endif
778                 ptrace_update(child, engine, flags);
779                 ret = 0;
780                 break;
781
782 #ifdef PTRACE_OLDSETOPTIONS
783         case PTRACE_OLDSETOPTIONS:
784 #endif
785         case PTRACE_SETOPTIONS:
786                 ret = -EINVAL;
787                 if (data & ~PTRACE_O_MASK)
788                         break;
789                 state->u.live.options = data;
790                 ptrace_update(child, engine, UTRACE_ACTION_QUIESCE);
791                 ret = 0;
792                 break;
793         }
794
795         return ret;
796 }
797
798
799 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
800 {
801         struct task_struct *child;
802         struct utrace_attached_engine *engine;
803         struct ptrace_state *state;
804         long ret, val;
805
806 #ifdef PTRACE_DEBUG
807         printk("%d sys_ptrace(%ld, %ld, %lx, %lx)\n",
808                current->pid, request, pid, addr, data);
809 #endif
810
811         ret = ptrace_start(pid, request, &child, &engine, &state);
812         if (ret != -EIO)
813                 goto out;
814
815         val = 0;
816         ret = arch_ptrace(&request, child, engine, addr, data, &val);
817         if (ret != -ENOSYS) {
818                 if (ret == 0) {
819                         ret = val;
820                         force_successful_syscall_return();
821                 }
822                 goto out_tsk;
823         }
824
825         switch (request) {
826         default:
827                 ret = ptrace_common(request, child, engine, state, addr, data);
828                 break;
829
830         case PTRACE_PEEKTEXT: /* read word at location addr. */
831         case PTRACE_PEEKDATA: {
832                 unsigned long tmp;
833                 int copied;
834
835                 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
836                 ret = -EIO;
837                 if (copied != sizeof(tmp))
838                         break;
839                 ret = put_user(tmp, (unsigned long __user *) data);
840                 break;
841         }
842
843         case PTRACE_POKETEXT: /* write the word at location addr. */
844         case PTRACE_POKEDATA:
845                 ret = 0;
846                 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
847                         break;
848                 ret = -EIO;
849                 break;
850
851         case PTRACE_GETEVENTMSG:
852                 ret = put_user(state->u.live.have_eventmsg
853                                ? state->u.live.u.eventmsg : 0L,
854                                (unsigned long __user *) data);
855                 break;
856         case PTRACE_GETSIGINFO:
857                 ret = -EINVAL;
858                 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo)
859                         ret = copy_siginfo_to_user((siginfo_t __user *) data,
860                                                    state->u.live.u.siginfo);
861                 break;
862         case PTRACE_SETSIGINFO:
863                 ret = -EINVAL;
864                 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo
865                     && copy_from_user(state->u.live.u.siginfo,
866                                       (siginfo_t __user *) data,
867                                       sizeof(siginfo_t)))
868                         ret = -EFAULT;
869                 break;
870         }
871
872 out_tsk:
873         put_task_struct(child);
874 out:
875 #ifdef PTRACE_DEBUG
876         printk("%d ptrace -> %x\n", current->pid, ret);
877 #endif
878         return ret;
879 }
880
881
882 #ifdef CONFIG_COMPAT
883 #include <linux/compat.h>
884
885 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
886                                   compat_ulong_t addr, compat_long_t cdata)
887 {
888         const unsigned long data = (unsigned long) (compat_ulong_t) cdata;
889         struct task_struct *child;
890         struct utrace_attached_engine *engine;
891         struct ptrace_state *state;
892         compat_long_t ret, val;
893
894 #ifdef PTRACE_DEBUG
895         printk("%d compat_sys_ptrace(%d, %d, %x, %x)\n",
896                current->pid, request, pid, addr, cdata);
897 #endif
898         ret = ptrace_start(pid, request, &child, &engine, &state);
899         if (ret != -EIO)
900                 goto out;
901
902         val = 0;
903         ret = arch_compat_ptrace(&request, child, engine, addr, cdata, &val);
904         if (ret != -ENOSYS) {
905                 if (ret == 0) {
906                         ret = val;
907                         force_successful_syscall_return();
908                 }
909                 goto out_tsk;
910         }
911
912         switch (request) {
913         default:
914                 ret = ptrace_common(request, child, engine, state, addr, data);
915                 break;
916
917         case PTRACE_PEEKTEXT: /* read word at location addr. */
918         case PTRACE_PEEKDATA: {
919                 compat_ulong_t tmp;
920                 int copied;
921
922                 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
923                 ret = -EIO;
924                 if (copied != sizeof(tmp))
925                         break;
926                 ret = put_user(tmp, (compat_ulong_t __user *) data);
927                 break;
928         }
929
930         case PTRACE_POKETEXT: /* write the word at location addr. */
931         case PTRACE_POKEDATA:
932                 ret = 0;
933                 if (access_process_vm(child, addr, &cdata, sizeof(cdata), 1) == sizeof(cdata))
934                         break;
935                 ret = -EIO;
936                 break;
937
938         case PTRACE_GETEVENTMSG:
939                 ret = put_user(state->u.live.have_eventmsg
940                                ? state->u.live.u.eventmsg : 0L,
941                                (compat_long_t __user *) data);
942                 break;
943         case PTRACE_GETSIGINFO:
944                 ret = -EINVAL;
945                 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo)
946                         ret = copy_siginfo_to_user32(
947                                 (struct compat_siginfo __user *) data,
948                                 state->u.live.u.siginfo);
949                 break;
950         case PTRACE_SETSIGINFO:
951                 ret = -EINVAL;
952                 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo
953                     && copy_siginfo_from_user32(
954                             state->u.live.u.siginfo,
955                             (struct compat_siginfo __user *) data))
956                         ret = -EFAULT;
957                 break;
958         }
959
960 out_tsk:
961         put_task_struct(child);
962 out:
963 #ifdef PTRACE_DEBUG
964         printk("%d ptrace -> %x\n", current->pid, ret);
965 #endif
966         return ret;
967 }
968 #endif
969
970
971 /*
972  * We're called with tasklist_lock held for reading.
973  * If we return -ECHILD or zero, next_thread(tsk) must still be valid to use.
974  * If we return another error code, or a successful PID value, we
975  * release tasklist_lock first.
976  */
977 int
978 ptrace_do_wait(struct task_struct *tsk,
979                pid_t pid, int options, struct siginfo __user *infop,
980                int __user *stat_addr, struct rusage __user *rusagep)
981 {
982         struct ptrace_state *state;
983         struct task_struct *p;
984         int err = -ECHILD;
985         int why, status;
986
987         rcu_read_lock();
988         list_for_each_entry_rcu(state, &tsk->ptracees, entry) {
989                 p = state->task;
990
991                 if (pid > 0) {
992                         if (p->pid != pid)
993                                 continue;
994                 } else if (!pid) {
995                         if (process_group(p) != process_group(current))
996                                 continue;
997                 } else if (pid != -1) {
998                         if (process_group(p) != -pid)
999                                 continue;
1000                 }
1001                 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1002                     && !(options & __WALL))
1003                         continue;
1004                 if (security_task_wait(p))
1005                         continue;
1006
1007                 err = 0;
1008                 if (state->u.live.reported)
1009                         continue;
1010
1011                 if (state->u.live.stopped)
1012                         goto found;
1013                 if ((p->state & (TASK_TRACED | TASK_STOPPED))
1014                     && (p->signal->flags & SIGNAL_STOP_STOPPED))
1015                         goto found;
1016                 if (p->exit_state == EXIT_ZOMBIE) {
1017                         if (!likely(options & WEXITED))
1018                                 continue;
1019                         if (delay_group_leader(p))
1020                                 continue;
1021                         goto found;
1022                 }
1023                 // XXX should handle WCONTINUED
1024         }
1025         rcu_read_unlock();
1026         return err;
1027
1028 found:
1029         rcu_read_unlock();
1030
1031         BUG_ON(state->parent != tsk);
1032
1033         if (p->exit_state) {
1034                 if (unlikely(p->parent == state->parent))
1035                         /*
1036                          * This is our natural child we were ptracing.
1037                          * When it dies it detaches (see ptrace_report_death).
1038                          * So we're seeing it here in a race.  When it
1039                          * finishes detaching it will become reapable in
1040                          * the normal wait_task_zombie path instead.
1041                          */
1042                         return 0;
1043                 if ((p->exit_code & 0x7f) == 0) {
1044                         why = CLD_EXITED;
1045                         status = p->exit_code >> 8;
1046                 } else {
1047                         why = (p->exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1048                         status = p->exit_code & 0xff;
1049                 }
1050         }
1051         else {
1052                 why = CLD_TRAPPED;
1053                 status = (p->exit_code << 8) | 0x7f;
1054         }
1055
1056         /*
1057          * At this point we are committed to a successful return
1058          * or a user error return.  Release the tasklist_lock.
1059          */
1060         read_unlock(&tasklist_lock);
1061
1062         if (rusagep)
1063                 err = getrusage(p, RUSAGE_BOTH, rusagep);
1064         if (infop) {
1065                 if (!err)
1066                         err = put_user(SIGCHLD, &infop->si_signo);
1067                 if (!err)
1068                         err = put_user(0, &infop->si_errno);
1069                 if (!err)
1070                         err = put_user((short)why, &infop->si_code);
1071                 if (!err)
1072                         err = put_user(p->pid, &infop->si_pid);
1073                 if (!err)
1074                         err = put_user(p->uid, &infop->si_uid);
1075                 if (!err)
1076                         err = put_user(status, &infop->si_status);
1077         }
1078         if (!err && stat_addr)
1079                 err = put_user(status, stat_addr);
1080
1081         if (!err) {
1082                 struct utrace *utrace;
1083
1084                 err = p->pid;
1085
1086                 /*
1087                  * If this was a non-death report, the child might now be
1088                  * detaching on death in the same race possible in the
1089                  * p->exit_state check above.  So check for p->utrace being
1090                  * NULL, then we don't need to update the state any more.
1091                  */
1092                 rcu_read_lock();
1093                 utrace = rcu_dereference(p->utrace);
1094                 if (likely(utrace != NULL)) {
1095                         utrace_lock(utrace);
1096                         if (unlikely(state->u.live.reported))
1097                                 /*
1098                                  * Another thread in the group got here
1099                                  * first and reaped it before we locked.
1100                                  */
1101                                 err = -ERESTARTNOINTR;
1102                         state->u.live.reported = 1;
1103                         utrace_unlock(utrace);
1104                 }
1105                 rcu_read_unlock();
1106
1107                 if (err > 0 && why != CLD_TRAPPED)
1108                         ptrace_detach(p, state->engine);
1109         }
1110
1111         return err;
1112 }
1113
1114 static void
1115 do_notify(struct task_struct *tsk, struct task_struct *parent, int why)
1116 {
1117         struct siginfo info;
1118         unsigned long flags;
1119         struct sighand_struct *sighand;
1120         int sa_mask;
1121
1122         info.si_signo = SIGCHLD;
1123         info.si_errno = 0;
1124         info.si_pid = tsk->pid;
1125         info.si_uid = tsk->uid;
1126
1127         /* FIXME: find out whether or not this is supposed to be c*time. */
1128         info.si_utime = cputime_to_jiffies(tsk->utime);
1129         info.si_stime = cputime_to_jiffies(tsk->stime);
1130
1131         sa_mask = SA_NOCLDSTOP;
1132         info.si_code = why;
1133         info.si_status = tsk->exit_code & 0x7f;
1134         if (why == CLD_CONTINUED)
1135                 info.si_status = SIGCONT;
1136         else if (why == CLD_STOPPED)
1137                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1138         else if (why == CLD_EXITED) {
1139                 sa_mask = SA_NOCLDWAIT;
1140                 if (tsk->exit_code & 0x80)
1141                         info.si_code = CLD_DUMPED;
1142                 else if (tsk->exit_code & 0x7f)
1143                         info.si_code = CLD_KILLED;
1144                 else {
1145                         info.si_code = CLD_EXITED;
1146                         info.si_status = tsk->exit_code >> 8;
1147                 }
1148         }
1149
1150         sighand = parent->sighand;
1151         spin_lock_irqsave(&sighand->siglock, flags);
1152         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1153             !(sighand->action[SIGCHLD-1].sa.sa_flags & sa_mask))
1154                 __group_send_sig_info(SIGCHLD, &info, parent);
1155         /*
1156          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1157          */
1158         wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1159         spin_unlock_irqrestore(&sighand->siglock, flags);
1160 }
1161
1162 static u32
1163 ptrace_report(struct utrace_attached_engine *engine, struct task_struct *tsk,
1164               int code)
1165 {
1166         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1167         const struct utrace_regset *regset;
1168
1169 #ifdef PTRACE_DEBUG
1170         printk("%d ptrace_report %d engine %p state %p code %x parent %d (%p)\n",
1171                current->pid, tsk->pid, engine, state, code,
1172                state->parent->pid, state->parent);
1173         if (!state->u.live.have_eventmsg && state->u.live.u.siginfo) {
1174                 const siginfo_t *si = state->u.live.u.siginfo;
1175                 printk("  si %d code %x errno %d addr %p\n",
1176                        si->si_signo, si->si_code, si->si_errno,
1177                        si->si_addr);
1178         }
1179 #endif
1180
1181         BUG_ON(state->u.live.stopped);
1182
1183         /*
1184          * Set our QUIESCE flag right now, before notifying the tracer.
1185          * We do this before setting state->u.live.stopped rather than
1186          * by using UTRACE_ACTION_NEWSTATE in our return value, to
1187          * ensure that the tracer can't get the notification and then
1188          * try to resume us with PTRACE_CONT before we set the flag.
1189          */
1190         utrace_set_flags(tsk, engine, engine->flags | UTRACE_ACTION_QUIESCE);
1191
1192         /*
1193          * If regset 0 has a writeback call, do it now.  On register window
1194          * machines, this makes sure the user memory backing the register
1195          * data is up to date by the time wait_task_inactive returns to
1196          * ptrace_start in our tracer doing a PTRACE_PEEKDATA or the like.
1197          */
1198         regset = utrace_regset(tsk, engine, utrace_native_view(tsk), 0);
1199         if (regset->writeback)
1200                 (*regset->writeback)(tsk, regset, 0);
1201
1202         state->u.live.stopped = 1;
1203         state->u.live.reported = 0;
1204         tsk->exit_code = code;
1205         do_notify(tsk, state->parent, CLD_TRAPPED);
1206
1207 #ifdef PTRACE_DEBUG
1208         printk("%d ptrace_report quiescing exit_code %x\n",
1209                current->pid, current->exit_code);
1210 #endif
1211
1212         return UTRACE_ACTION_RESUME;
1213 }
1214
1215 static inline u32
1216 ptrace_event(struct utrace_attached_engine *engine, struct task_struct *tsk,
1217              int event)
1218 {
1219         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1220         state->u.live.syscall = 0;
1221         return ptrace_report(engine, tsk, (event << 8) | SIGTRAP);
1222 }
1223
1224
1225 static u32
1226 ptrace_report_death(struct utrace_attached_engine *engine,
1227                     struct task_struct *tsk)
1228 {
1229         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1230
1231         if (tsk->parent == state->parent) {
1232                 /*
1233                  * This is a natural child, so we detach and let the normal
1234                  * reporting happen once our NOREAP action is gone.  But
1235                  * first, generate a SIGCHLD for those cases where normal
1236                  * behavior won't.  A ptrace'd child always generates SIGCHLD.
1237                  */
1238                 if (tsk->exit_signal == -1 || !thread_group_empty(tsk))
1239                         do_notify(tsk, state->parent, CLD_EXITED);
1240                 ptrace_state_unlink(state);
1241                 rcu_assign_pointer(engine->data, 0UL);
1242                 ptrace_done(state);
1243                 return UTRACE_ACTION_DETACH;
1244         }
1245
1246         state->u.live.reported = 0;
1247         do_notify(tsk, state->parent, CLD_EXITED);
1248         return UTRACE_ACTION_RESUME;
1249 }
1250
1251 /*
1252  * We get this only in the case where our UTRACE_ACTION_NOREAP was ignored.
1253  * That happens solely when a non-leader exec reaps the old leader.
1254  */
1255 static void
1256 ptrace_report_reap(struct utrace_attached_engine *engine,
1257                    struct task_struct *tsk)
1258 {
1259         struct ptrace_state *state;
1260         rcu_read_lock();
1261         state = rcu_dereference((struct ptrace_state *) engine->data);
1262         if (state != NULL) {
1263                 ptrace_state_unlink(state);
1264                 rcu_assign_pointer(engine->data, 0UL);
1265                 ptrace_done(state);
1266         }
1267         rcu_read_unlock();
1268 }
1269
1270
1271 static u32
1272 ptrace_report_clone(struct utrace_attached_engine *engine,
1273                     struct task_struct *parent,
1274                     unsigned long clone_flags, struct task_struct *child)
1275 {
1276         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1277         struct utrace_attached_engine *child_engine;
1278         int event = PTRACE_EVENT_FORK;
1279         int option = PTRACE_O_TRACEFORK;
1280
1281 #ifdef PTRACE_DEBUG
1282         printk("%d (%p) engine %p ptrace_report_clone child %d (%p) fl %lx\n",
1283                parent->pid, parent, engine, child->pid, child, clone_flags);
1284 #endif
1285
1286         if (clone_flags & CLONE_UNTRACED)
1287                 goto out;
1288
1289         if (clone_flags & CLONE_VFORK) {
1290                 event = PTRACE_EVENT_VFORK;
1291                 option = PTRACE_O_TRACEVFORK;
1292         }
1293         else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1294                 event = PTRACE_EVENT_CLONE;
1295                 option = PTRACE_O_TRACECLONE;
1296         }
1297
1298         if (!(clone_flags & CLONE_PTRACE) && !(state->u.live.options & option))
1299                 goto out;
1300
1301         child_engine = utrace_attach(child, (UTRACE_ATTACH_CREATE
1302                                              | UTRACE_ATTACH_EXCLUSIVE
1303                                              | UTRACE_ATTACH_MATCH_OPS),
1304                                      &ptrace_utrace_ops, 0UL);
1305         if (unlikely(IS_ERR(child_engine))) {
1306                 BUG_ON(PTR_ERR(child_engine) != -ENOMEM);
1307                 printk(KERN_ERR
1308                        "ptrace out of memory, lost child %d of %d",
1309                        child->pid, parent->pid);
1310         }
1311         else {
1312                 int ret = ptrace_setup(child, child_engine,
1313                                        state->parent,
1314                                        state->u.live.options,
1315                                        state->u.live.cap_sys_ptrace);
1316                 if (unlikely(ret != 0)) {
1317                         BUG_ON(ret != -ENOMEM);
1318                         printk(KERN_ERR
1319                                "ptrace out of memory, lost child %d of %d",
1320                                child->pid, parent->pid);
1321                         utrace_detach(child, child_engine);
1322                 }
1323                 else {
1324                         sigaddset(&child->pending.signal, SIGSTOP);
1325                         set_tsk_thread_flag(child, TIF_SIGPENDING);
1326                         ptrace_update(child, child_engine, 0);
1327                 }
1328         }
1329
1330         if (state->u.live.options & option) {
1331                 state->u.live.have_eventmsg = 1;
1332                 state->u.live.u.eventmsg = child->pid;
1333                 return ptrace_event(engine, parent, event);
1334         }
1335
1336 out:
1337         return UTRACE_ACTION_RESUME;
1338 }
1339
1340
1341 static u32
1342 ptrace_report_vfork_done(struct utrace_attached_engine *engine,
1343                          struct task_struct *parent, pid_t child_pid)
1344 {
1345         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1346         state->u.live.have_eventmsg = 1;
1347         state->u.live.u.eventmsg = child_pid;
1348         return ptrace_event(engine, parent, PTRACE_EVENT_VFORK_DONE);
1349 }
1350
1351
1352 static u32
1353 ptrace_report_signal(struct utrace_attached_engine *engine,
1354                      struct task_struct *tsk, struct pt_regs *regs,
1355                      u32 action, siginfo_t *info,
1356                      const struct k_sigaction *orig_ka,
1357                      struct k_sigaction *return_ka)
1358 {
1359         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1360         int signo = info == NULL ? SIGTRAP : info->si_signo;
1361         state->u.live.syscall = 0;
1362         state->u.live.have_eventmsg = 0;
1363         state->u.live.u.siginfo = info;
1364         return ptrace_report(engine, tsk, signo) | UTRACE_SIGNAL_IGN;
1365 }
1366
1367 static u32
1368 ptrace_report_jctl(struct utrace_attached_engine *engine,
1369                    struct task_struct *tsk, int type)
1370 {
1371         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1372         do_notify(tsk, state->parent, type);
1373         return UTRACE_JCTL_NOSIGCHLD;
1374 }
1375
1376 static u32
1377 ptrace_report_exec(struct utrace_attached_engine *engine,
1378                    struct task_struct *tsk,
1379                    const struct linux_binprm *bprm,
1380                    struct pt_regs *regs)
1381 {
1382         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1383         if (state->u.live.options & PTRACE_O_TRACEEXEC)
1384                 return ptrace_event(engine, tsk, PTRACE_EVENT_EXEC);
1385         state->u.live.syscall = 0;
1386         return ptrace_report(engine, tsk, SIGTRAP);
1387 }
1388
1389 static u32
1390 ptrace_report_syscall(struct utrace_attached_engine *engine,
1391                       struct task_struct *tsk, struct pt_regs *regs,
1392                       int entry)
1393 {
1394         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1395 #ifdef PTRACE_SYSEMU
1396         if (entry && state->u.live.sysemu)
1397                 tracehook_abort_syscall(regs);
1398 #endif
1399         state->u.live.syscall = 1;
1400         return ptrace_report(engine, tsk,
1401                              ((state->u.live.options & PTRACE_O_TRACESYSGOOD)
1402                               ? 0x80 : 0) | SIGTRAP);
1403 }
1404
1405 static u32
1406 ptrace_report_syscall_entry(struct utrace_attached_engine *engine,
1407                             struct task_struct *tsk, struct pt_regs *regs)
1408 {
1409         return ptrace_report_syscall(engine, tsk, regs, 1);
1410 }
1411
1412 static u32
1413 ptrace_report_syscall_exit(struct utrace_attached_engine *engine,
1414                             struct task_struct *tsk, struct pt_regs *regs)
1415 {
1416         return ptrace_report_syscall(engine, tsk, regs, 0);
1417 }
1418
1419 static u32
1420 ptrace_report_exit(struct utrace_attached_engine *engine,
1421                    struct task_struct *tsk, long orig_code, long *code)
1422 {
1423         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1424         state->u.live.have_eventmsg = 1;
1425         state->u.live.u.eventmsg = *code;
1426         return ptrace_event(engine, tsk, PTRACE_EVENT_EXIT);
1427 }
1428
1429 static int
1430 ptrace_unsafe_exec(struct utrace_attached_engine *engine,
1431                    struct task_struct *tsk)
1432 {
1433         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1434         int unsafe = LSM_UNSAFE_PTRACE;
1435         if (state->u.live.cap_sys_ptrace)
1436                 unsafe = LSM_UNSAFE_PTRACE_CAP;
1437         return unsafe;
1438 }
1439
1440 static struct task_struct *
1441 ptrace_tracer_task(struct utrace_attached_engine *engine,
1442                    struct task_struct *target)
1443 {
1444         struct ptrace_state *state;
1445
1446         /*
1447          * This call is not necessarily made by the target task,
1448          * so ptrace might be getting detached while we run here.
1449          * The state pointer will be NULL if that happens.
1450          */
1451         state = rcu_dereference((struct ptrace_state *) engine->data);
1452
1453         return state == NULL ? NULL : state->parent;
1454 }
1455
1456 static int
1457 ptrace_allow_access_process_vm(struct utrace_attached_engine *engine,
1458                                struct task_struct *target,
1459                                struct task_struct *caller)
1460 {
1461         struct ptrace_state *state;
1462         int ours;
1463
1464         /*
1465          * This call is not necessarily made by the target task,
1466          * so ptrace might be getting detached while we run here.
1467          * The state pointer will be NULL if that happens.
1468          */
1469         rcu_read_lock();
1470         state = rcu_dereference((struct ptrace_state *) engine->data);
1471         ours = (state != NULL
1472                 && ((engine->flags & UTRACE_ACTION_QUIESCE)
1473                     || (target->state == TASK_STOPPED))
1474                 && state->parent == caller);
1475         rcu_read_unlock();
1476
1477         return ours && security_ptrace(caller, target) == 0;
1478 }
1479
1480
1481 static const struct utrace_engine_ops ptrace_utrace_ops =
1482 {
1483         .report_syscall_entry = ptrace_report_syscall_entry,
1484         .report_syscall_exit = ptrace_report_syscall_exit,
1485         .report_exec = ptrace_report_exec,
1486         .report_jctl = ptrace_report_jctl,
1487         .report_signal = ptrace_report_signal,
1488         .report_vfork_done = ptrace_report_vfork_done,
1489         .report_clone = ptrace_report_clone,
1490         .report_exit = ptrace_report_exit,
1491         .report_death = ptrace_report_death,
1492         .report_reap = ptrace_report_reap,
1493         .unsafe_exec = ptrace_unsafe_exec,
1494         .tracer_task = ptrace_tracer_task,
1495         .allow_access_process_vm = ptrace_allow_access_process_vm,
1496 };
1497
1498 #endif