backported vs2.1.x fix to irq handling, which caused incorrect scheduler behavior
[linux-2.6.git] / kernel / ptrace.c
1 /*
2  * linux/kernel/ptrace.c
3  *
4  * (C) Copyright 1999 Linus Torvalds
5  *
6  * Common interfaces for "ptrace()" which we do not want
7  * to continually duplicate across every architecture.
8  */
9
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21
22 #include <asm/pgtable.h>
23 #include <asm/uaccess.h>
24
25 #ifdef CONFIG_PTRACE
26 #include <linux/utrace.h>
27 #include <linux/tracehook.h>
28 #include <asm/tracehook.h>
29 #endif
30
31 #include <linux/vs_base.h>
32
33 int getrusage(struct task_struct *, int, struct rusage __user *);
34
35 //#define PTRACE_DEBUG
36
37 int __ptrace_may_attach(struct task_struct *task)
38 {
39         /* May we inspect the given task?
40          * This check is used both for attaching with ptrace
41          * and for allowing access to sensitive information in /proc.
42          *
43          * ptrace_attach denies several cases that /proc allows
44          * because setting up the necessary parent/child relationship
45          * or halting the specified task is impossible.
46          */
47         int dumpable = 0;
48         /* Don't let security modules deny introspection */
49         if (task == current)
50                 return 0;
51         if (((current->uid != task->euid) ||
52              (current->uid != task->suid) ||
53              (current->uid != task->uid) ||
54              (current->gid != task->egid) ||
55              (current->gid != task->sgid) ||
56              (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
57                 return -EPERM;
58         smp_rmb();
59         if (task->mm)
60                 dumpable = task->mm->dumpable;
61         if (!dumpable && !capable(CAP_SYS_PTRACE))
62                 return -EPERM;
63
64         return security_ptrace(current, task);
65 }
66
67 int ptrace_may_attach(struct task_struct *task)
68 {
69         int err;
70         task_lock(task);
71         err = __ptrace_may_attach(task);
72         task_unlock(task);
73         return !err;
74 }
75
76 /*
77  * Access another process' address space.
78  * Source/target buffer must be kernel space, 
79  * Do not walk the page table directly, use get_user_pages
80  */
81
82 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
83 {
84         struct mm_struct *mm;
85         struct vm_area_struct *vma;
86         struct page *page;
87         void *old_buf = buf;
88
89         mm = get_task_mm(tsk);
90         if (!mm)
91                 return 0;
92
93         down_read(&mm->mmap_sem);
94         /* ignore errors, just check how much was sucessfully transfered */
95         while (len) {
96                 int bytes, ret, offset;
97                 void *maddr;
98
99                 ret = get_user_pages(tsk, mm, addr, 1,
100                                 write, 1, &page, &vma);
101                 if (ret <= 0)
102                         break;
103
104                 bytes = len;
105                 offset = addr & (PAGE_SIZE-1);
106                 if (bytes > PAGE_SIZE-offset)
107                         bytes = PAGE_SIZE-offset;
108
109                 maddr = kmap(page);
110                 if (write) {
111                         copy_to_user_page(vma, page, addr,
112                                           maddr + offset, buf, bytes);
113                         set_page_dirty_lock(page);
114                 } else {
115                         copy_from_user_page(vma, page, addr,
116                                             buf, maddr + offset, bytes);
117                 }
118                 kunmap(page);
119                 page_cache_release(page);
120                 len -= bytes;
121                 buf += bytes;
122                 addr += bytes;
123         }
124         up_read(&mm->mmap_sem);
125         mmput(mm);
126         
127         return buf - old_buf;
128 }
129
130
131 #ifndef CONFIG_PTRACE
132
133 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
134 {
135         return -ENOSYS;
136 }
137
138 #else
139
140 struct ptrace_state
141 {
142         /*
143          * These elements are always available, even when the struct is
144          * awaiting destruction at the next RCU callback point.
145          */
146         struct utrace_attached_engine *engine;
147         struct task_struct *task; /* Target task.  */
148         struct task_struct *parent; /* Whom we report to.  */
149         struct list_head entry; /* Entry on parent->ptracees list.  */
150
151         union {
152                 struct rcu_head dead;
153                 struct {
154                         u8 options; /* PTRACE_SETOPTIONS bits.  */
155                         unsigned int stopped:1; /* Stopped for report.  */
156                         unsigned int reported:1; /* wait already reported.  */
157                         unsigned int syscall:1; /* Reporting for syscall.  */
158 #ifdef PTRACE_SYSEMU
159                         unsigned int sysemu:1; /* PTRACE_SYSEMU in progress. */
160 #endif
161                         unsigned int have_eventmsg:1; /* u.eventmsg valid. */
162                         unsigned int cap_sys_ptrace:1; /* Tracer capable.  */
163
164                         union
165                         {
166                                 unsigned long eventmsg;
167                                 siginfo_t *siginfo;
168                         } u;
169                 } live;
170         } u;
171 };
172
173 static const struct utrace_engine_ops ptrace_utrace_ops; /* Initialized below. */
174
175
176 static void
177 ptrace_state_link(struct ptrace_state *state)
178 {
179         task_lock(state->parent);
180         list_add_rcu(&state->entry, &state->parent->ptracees);
181         task_unlock(state->parent);
182 }
183
184 static void
185 ptrace_state_unlink(struct ptrace_state *state)
186 {
187         task_lock(state->parent);
188         list_del_rcu(&state->entry);
189         task_unlock(state->parent);
190 }
191
192 static int
193 ptrace_setup(struct task_struct *target, struct utrace_attached_engine *engine,
194              struct task_struct *parent, u8 options, int cap_sys_ptrace)
195 {
196         struct ptrace_state *state = kzalloc(sizeof *state, GFP_USER);
197         if (unlikely(state == NULL))
198                 return -ENOMEM;
199
200         state->engine = engine;
201         state->task = target;
202         state->parent = parent;
203         state->u.live.options = options;
204         state->u.live.cap_sys_ptrace = cap_sys_ptrace;
205         ptrace_state_link(state);
206
207         BUG_ON(engine->data != 0);
208         rcu_assign_pointer(engine->data, (unsigned long) state);
209
210         return 0;
211 }
212
213 static void
214 ptrace_state_free(struct rcu_head *rhead)
215 {
216         struct ptrace_state *state = container_of(rhead,
217                                                   struct ptrace_state, u.dead);
218         kfree(state);
219 }
220
221 static void
222 ptrace_done(struct ptrace_state *state)
223 {
224         INIT_RCU_HEAD(&state->u.dead);
225         call_rcu(&state->u.dead, ptrace_state_free);
226 }
227
228 /*
229  * Update the tracing engine state to match the new ptrace state.
230  */
231 static void
232 ptrace_update(struct task_struct *target, struct utrace_attached_engine *engine,
233               unsigned long flags)
234 {
235         struct ptrace_state *state = (struct ptrace_state *) engine->data;
236
237         /*
238          * These events are always reported.
239          */
240         flags |= (UTRACE_EVENT(DEATH) | UTRACE_EVENT(EXEC)
241                   | UTRACE_EVENT_SIGNAL_ALL);
242
243         /*
244          * We always have to examine clone events to check for CLONE_PTRACE.
245          */
246         flags |= UTRACE_EVENT(CLONE);
247
248         /*
249          * PTRACE_SETOPTIONS can request more events.
250          */
251         if (state->u.live.options & PTRACE_O_TRACEEXIT)
252                 flags |= UTRACE_EVENT(EXIT);
253         if (state->u.live.options & PTRACE_O_TRACEVFORKDONE)
254                 flags |= UTRACE_EVENT(VFORK_DONE);
255
256         /*
257          * ptrace always inhibits normal parent reaping.
258          * But for a corner case we sometimes see the REAP event instead.
259          */
260         flags |= UTRACE_ACTION_NOREAP | UTRACE_EVENT(REAP);
261
262         state->u.live.stopped = (flags & UTRACE_ACTION_QUIESCE) != 0;
263         if (!state->u.live.stopped) {
264                 if (!state->u.live.have_eventmsg)
265                         state->u.live.u.siginfo = NULL;
266                 if (!(target->flags & PF_EXITING))
267                         target->exit_code = 0;
268         }
269         utrace_set_flags(target, engine, flags);
270 }
271
272 static int ptrace_traceme(void)
273 {
274         struct utrace_attached_engine *engine;
275         int retval;
276
277         engine = utrace_attach(current, (UTRACE_ATTACH_CREATE
278                                          | UTRACE_ATTACH_EXCLUSIVE
279                                          | UTRACE_ATTACH_MATCH_OPS),
280                                &ptrace_utrace_ops, 0UL);
281
282         if (IS_ERR(engine)) {
283                 retval = PTR_ERR(engine);
284                 if (retval == -EEXIST)
285                         retval = -EPERM;
286         }
287         else {
288                 task_lock(current);
289                 retval = security_ptrace(current->parent, current);
290                 task_unlock(current);
291                 if (!retval)
292                         retval = ptrace_setup(current, engine,
293                                               current->parent, 0, 0);
294                 if (retval)
295                         utrace_detach(current, engine);
296                 else
297                         ptrace_update(current, engine, 0);
298         }
299
300         return retval;
301 }
302
303 static int ptrace_attach(struct task_struct *task)
304 {
305         struct utrace_attached_engine *engine;
306         int retval;
307
308         retval = -EPERM;
309         if (task->pid <= 1)
310                 goto bad;
311         if (task->tgid == current->tgid)
312                 goto bad;
313         if (!task->mm)          /* kernel threads */
314                 goto bad;
315
316         engine = utrace_attach(task, (UTRACE_ATTACH_CREATE
317                                       | UTRACE_ATTACH_EXCLUSIVE
318                                       | UTRACE_ATTACH_MATCH_OPS),
319                                &ptrace_utrace_ops, 0);
320         if (IS_ERR(engine)) {
321                 retval = PTR_ERR(engine);
322                 if (retval == -EEXIST)
323                         retval = -EPERM;
324                 goto bad;
325         }
326
327         if (ptrace_may_attach(task))
328                 retval = ptrace_setup(task, engine, current, 0,
329                                       capable(CAP_SYS_PTRACE));
330         if (retval)
331                 utrace_detach(task, engine);
332         else {
333                 int stopped;
334
335                 /* Go */
336                 ptrace_update(task, engine, 0);
337                 force_sig_specific(SIGSTOP, task);
338
339                 spin_lock_irq(&task->sighand->siglock);
340                 stopped = (task->state == TASK_STOPPED);
341                 spin_unlock_irq(&task->sighand->siglock);
342
343                 if (stopped) {
344                         /*
345                          * Do now the regset 0 writeback that we do on every
346                          * stop, since it's never been done.  On register
347                          * window machines, this makes sure the user memory
348                          * backing the register data is up to date.
349                          */
350                         const struct utrace_regset *regset;
351                         regset = utrace_regset(task, engine,
352                                                utrace_native_view(task), 0);
353                         if (regset->writeback)
354                                 (*regset->writeback)(task, regset, 1);
355                 }
356         }
357
358 bad:
359         return retval;
360 }
361
362 static int ptrace_detach(struct task_struct *task,
363                          struct utrace_attached_engine *engine)
364 {
365         struct ptrace_state *state = (struct ptrace_state *) engine->data;
366         /*
367          * Clearing ->data before detach makes sure an unrelated task
368          * calling into ptrace_tracer_task won't try to touch stale state.
369          */
370         rcu_assign_pointer(engine->data, 0UL);
371         utrace_detach(task, engine);
372         ptrace_state_unlink(state);
373         ptrace_done(state);
374         return 0;
375 }
376
377
378 /*
379  * This is called when we are exiting.  We must stop all our ptracing.
380  */
381 void
382 ptrace_exit(struct task_struct *tsk)
383 {
384         rcu_read_lock();
385         if (unlikely(!list_empty(&tsk->ptracees))) {
386                 struct ptrace_state *state, *next;
387
388                 /*
389                  * First detach the utrace layer from all the tasks.
390                  * We don't want to hold any locks while calling utrace_detach.
391                  */
392                 list_for_each_entry_rcu(state, &tsk->ptracees, entry) {
393                         rcu_assign_pointer(state->engine->data, 0UL);
394                         utrace_detach(state->task, state->engine);
395                 }
396
397                 /*
398                  * Now clear out our list and clean up our data structures.
399                  * The task_lock protects our list structure.
400                  */
401                 task_lock(tsk);
402                 list_for_each_entry_safe(state, next, &tsk->ptracees, entry) {
403                         list_del_rcu(&state->entry);
404                         ptrace_done(state);
405                 }
406                 task_unlock(tsk);
407         }
408         rcu_read_unlock();
409
410         BUG_ON(!list_empty(&tsk->ptracees));
411 }
412
413 static int
414 ptrace_induce_signal(struct task_struct *target,
415                      struct utrace_attached_engine *engine,
416                      long signr)
417 {
418         struct ptrace_state *state = (struct ptrace_state *) engine->data;
419
420         if (signr == 0)
421                 return 0;
422
423         if (!valid_signal(signr))
424                 return -EIO;
425
426         if (state->u.live.syscall) {
427                 /*
428                  * This is the traditional ptrace behavior when given
429                  * a signal to resume from a syscall tracing stop.
430                  */
431                 send_sig(signr, target, 1);
432         }
433         else if (!state->u.live.have_eventmsg && state->u.live.u.siginfo) {
434                 siginfo_t *info = state->u.live.u.siginfo;
435
436                 /* Update the siginfo structure if the signal has
437                    changed.  If the debugger wanted something
438                    specific in the siginfo structure then it should
439                    have updated *info via PTRACE_SETSIGINFO.  */
440                 if (signr != info->si_signo) {
441                         info->si_signo = signr;
442                         info->si_errno = 0;
443                         info->si_code = SI_USER;
444                         info->si_pid = current->pid;
445                         info->si_uid = current->uid;
446                 }
447
448                 return utrace_inject_signal(target, engine,
449                                             UTRACE_ACTION_RESUME, info, NULL);
450         }
451
452         return 0;
453 }
454
455 fastcall int
456 ptrace_regset_access(struct task_struct *target,
457                      struct utrace_attached_engine *engine,
458                      const struct utrace_regset_view *view,
459                      int setno, unsigned long offset, unsigned int size,
460                      void __user *data, int write)
461 {
462         const struct utrace_regset *regset = utrace_regset(target, engine,
463                                                            view, setno);
464         int ret;
465
466         if (unlikely(regset == NULL))
467                 return -EIO;
468
469         if (size == (unsigned int) -1)
470                 size = regset->size * regset->n;
471
472         if (write) {
473                 if (!access_ok(VERIFY_READ, data, size))
474                         ret = -EIO;
475                 else
476                         ret = (*regset->set)(target, regset,
477                                              offset, size, NULL, data);
478         }
479         else {
480                 if (!access_ok(VERIFY_WRITE, data, size))
481                         ret = -EIO;
482                 else
483                         ret = (*regset->get)(target, regset,
484                                              offset, size, NULL, data);
485         }
486
487         return ret;
488 }
489
490 fastcall int
491 ptrace_onereg_access(struct task_struct *target,
492                      struct utrace_attached_engine *engine,
493                      const struct utrace_regset_view *view,
494                      int setno, unsigned long regno,
495                      void __user *data, int write)
496 {
497         const struct utrace_regset *regset = utrace_regset(target, engine,
498                                                            view, setno);
499         unsigned int pos;
500         int ret;
501
502         if (unlikely(regset == NULL))
503                 return -EIO;
504
505         if (regno < regset->bias || regno >= regset->bias + regset->n)
506                 return -EINVAL;
507
508         pos = (regno - regset->bias) * regset->size;
509
510         if (write) {
511                 if (!access_ok(VERIFY_READ, data, regset->size))
512                         ret = -EIO;
513                 else
514                         ret = (*regset->set)(target, regset, pos, regset->size,
515                                              NULL, data);
516         }
517         else {
518                 if (!access_ok(VERIFY_WRITE, data, regset->size))
519                         ret = -EIO;
520                 else
521                         ret = (*regset->get)(target, regset, pos, regset->size,
522                                              NULL, data);
523         }
524
525         return ret;
526 }
527
528 fastcall int
529 ptrace_layout_access(struct task_struct *target,
530                      struct utrace_attached_engine *engine,
531                      const struct utrace_regset_view *view,
532                      const struct ptrace_layout_segment layout[],
533                      unsigned long addr, unsigned int size,
534                      void __user *udata, void *kdata, int write)
535 {
536         const struct ptrace_layout_segment *seg;
537         int ret = -EIO;
538
539         if (kdata == NULL &&
540             !access_ok(write ? VERIFY_READ : VERIFY_WRITE, udata, size))
541                 return -EIO;
542
543         seg = layout;
544         do {
545                 unsigned int pos, n;
546
547                 while (addr >= seg->end && seg->end != 0)
548                         ++seg;
549
550                 if (addr < seg->start || addr >= seg->end)
551                         return -EIO;
552
553                 pos = addr - seg->start + seg->offset;
554                 n = min(size, seg->end - (unsigned int) addr);
555
556                 if (unlikely(seg->regset == (unsigned int) -1)) {
557                         /*
558                          * This is a no-op/zero-fill portion of struct user.
559                          */
560                         ret = 0;
561                         if (!write) {
562                                 if (kdata)
563                                         memset(kdata, 0, n);
564                                 else if (clear_user(udata, n))
565                                         ret = -EFAULT;
566                         }
567                 }
568                 else {
569                         unsigned int align;
570                         const struct utrace_regset *regset = utrace_regset(
571                                 target, engine, view, seg->regset);
572                         if (unlikely(regset == NULL))
573                                 return -EIO;
574
575                         /*
576                          * A ptrace compatibility layout can do a misaligned
577                          * regset access, e.g. word access to larger data.
578                          * An arch's compat layout can be this way only if
579                          * it is actually ok with the regset code despite the
580                          * regset->align setting.
581                          */
582                         align = min(regset->align, size);
583                         if ((pos & (align - 1))
584                             || pos >= regset->n * regset->size)
585                                 return -EIO;
586
587                         if (write)
588                                 ret = (*regset->set)(target, regset,
589                                                      pos, n, kdata, udata);
590                         else
591                                 ret = (*regset->get)(target, regset,
592                                                      pos, n, kdata, udata);
593                 }
594
595                 if (kdata)
596                         kdata += n;
597                 else
598                         udata += n;
599                 addr += n;
600                 size -= n;
601         } while (ret == 0 && size > 0);
602
603         return ret;
604 }
605
606
607 static int
608 ptrace_start(long pid, long request,
609              struct task_struct **childp,
610              struct utrace_attached_engine **enginep,
611              struct ptrace_state **statep)
612
613 {
614         struct task_struct *child;
615         struct utrace_attached_engine *engine;
616         struct ptrace_state *state;
617         int ret;
618
619         if (request == PTRACE_TRACEME)
620                 return ptrace_traceme();
621
622         ret = -ESRCH;
623         read_lock(&tasklist_lock);
624         child = find_task_by_pid(pid);
625         if (child)
626                 get_task_struct(child);
627         read_unlock(&tasklist_lock);
628 #ifdef PTRACE_DEBUG
629         printk("ptrace pid %ld => %p\n", pid, child);
630 #endif
631         if (!child)
632                 goto out;
633
634         ret = -EPERM;
635         if (pid == 1)           /* you may not mess with init */
636                 goto out_tsk;
637
638         if (!vx_check(vx_task_xid(child), VX_WATCH|VX_IDENT))
639                 goto out_tsk;
640
641         if (request == PTRACE_ATTACH) {
642                 ret = ptrace_attach(child);
643                 goto out_tsk;
644         }
645
646         engine = utrace_attach(child, UTRACE_ATTACH_MATCH_OPS,
647                                &ptrace_utrace_ops, 0);
648         ret = -ESRCH;
649         if (IS_ERR(engine) || engine == NULL)
650                 goto out_tsk;
651         rcu_read_lock();
652         state = rcu_dereference((struct ptrace_state *) engine->data);
653         if (state == NULL || state->parent != current) {
654                 rcu_read_unlock();
655                 goto out_tsk;
656         }
657         rcu_read_unlock();
658
659         /*
660          * Traditional ptrace behavior demands that the target already be
661          * quiescent, but not dead.
662          */
663         if (request != PTRACE_KILL && !state->u.live.stopped) {
664 #ifdef PTRACE_DEBUG
665                 printk("%d not stopped (%lx)\n", child->pid, child->state);
666 #endif
667                 if (child->state != TASK_STOPPED)
668                         goto out_tsk;
669                 utrace_set_flags(child, engine,
670                                  engine->flags | UTRACE_ACTION_QUIESCE);
671         }
672
673         /*
674          * We do this for all requests to match traditional ptrace behavior.
675          * If the machine state synchronization done at context switch time
676          * includes e.g. writing back to user memory, we want to make sure
677          * that has finished before a PTRACE_PEEKDATA can fetch the results.
678          * On most machines, only regset data is affected by context switch
679          * and calling utrace_regset later on will take care of that, so
680          * this is superfluous.
681          *
682          * To do this purely in utrace terms, we could do:
683          *  (void) utrace_regset(child, engine, utrace_native_view(child), 0);
684          */
685         wait_task_inactive(child);
686
687         if (child->exit_state)
688                 goto out_tsk;
689
690         *childp = child;
691         *enginep = engine;
692         *statep = state;
693         return -EIO;
694
695 out_tsk:
696         put_task_struct(child);
697 out:
698         return ret;
699 }
700
701 static int
702 ptrace_common(long request, struct task_struct *child,
703               struct utrace_attached_engine *engine,
704               struct ptrace_state *state,
705               unsigned long addr, long data)
706 {
707         unsigned long flags;
708         int ret = -EIO;
709
710         switch (request) {
711         case PTRACE_DETACH:
712                 /*
713                  * Detach a process that was attached.
714                  */
715                 ret = ptrace_induce_signal(child, engine, data);
716                 if (!ret)
717                         ret = ptrace_detach(child, engine);
718                 break;
719
720                 /*
721                  * These are the operations that resume the child running.
722                  */
723         case PTRACE_KILL:
724                 data = SIGKILL;
725         case PTRACE_CONT:
726         case PTRACE_SYSCALL:
727 #ifdef PTRACE_SYSEMU
728         case PTRACE_SYSEMU:
729         case PTRACE_SYSEMU_SINGLESTEP:
730 #endif
731 #ifdef PTRACE_SINGLEBLOCK
732         case PTRACE_SINGLEBLOCK:
733 # ifdef ARCH_HAS_BLOCK_STEP
734                 if (! ARCH_HAS_BLOCK_STEP)
735 # endif
736                         if (request == PTRACE_SINGLEBLOCK)
737                                 break;
738 #endif
739         case PTRACE_SINGLESTEP:
740 #ifdef ARCH_HAS_SINGLE_STEP
741                 if (! ARCH_HAS_SINGLE_STEP)
742 #endif
743                         if (request == PTRACE_SINGLESTEP
744 #ifdef PTRACE_SYSEMU_SINGLESTEP
745                             || request == PTRACE_SYSEMU_SINGLESTEP
746 #endif
747                                 )
748                                 break;
749
750                 ret = ptrace_induce_signal(child, engine, data);
751                 if (ret)
752                         break;
753
754
755                 /*
756                  * Reset the action flags without QUIESCE, so it resumes.
757                  */
758                 flags = 0;
759 #ifdef PTRACE_SYSEMU
760                 state->u.live.sysemu = (request == PTRACE_SYSEMU_SINGLESTEP
761                                         || request == PTRACE_SYSEMU);
762 #endif
763                 if (request == PTRACE_SINGLESTEP
764 #ifdef PTRACE_SYSEMU
765                     || request == PTRACE_SYSEMU_SINGLESTEP
766 #endif
767                         )
768                         flags |= UTRACE_ACTION_SINGLESTEP;
769 #ifdef PTRACE_SINGLEBLOCK
770                 else if (request == PTRACE_SINGLEBLOCK)
771                         flags |= UTRACE_ACTION_BLOCKSTEP;
772 #endif
773                 if (request == PTRACE_SYSCALL)
774                         flags |= UTRACE_EVENT_SYSCALL;
775 #ifdef PTRACE_SYSEMU
776                 else if (request == PTRACE_SYSEMU
777                          || request == PTRACE_SYSEMU_SINGLESTEP)
778                         flags |= UTRACE_EVENT(SYSCALL_ENTRY);
779 #endif
780                 ptrace_update(child, engine, flags);
781                 ret = 0;
782                 break;
783
784 #ifdef PTRACE_OLDSETOPTIONS
785         case PTRACE_OLDSETOPTIONS:
786 #endif
787         case PTRACE_SETOPTIONS:
788                 ret = -EINVAL;
789                 if (data & ~PTRACE_O_MASK)
790                         break;
791                 state->u.live.options = data;
792                 ptrace_update(child, engine, UTRACE_ACTION_QUIESCE);
793                 ret = 0;
794                 break;
795         }
796
797         return ret;
798 }
799
800
801 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
802 {
803         struct task_struct *child;
804         struct utrace_attached_engine *engine;
805         struct ptrace_state *state;
806         long ret, val;
807
808 #ifdef PTRACE_DEBUG
809         printk("%d sys_ptrace(%ld, %ld, %lx, %lx)\n",
810                current->pid, request, pid, addr, data);
811 #endif
812
813         ret = ptrace_start(pid, request, &child, &engine, &state);
814         if (ret != -EIO)
815                 goto out;
816
817         val = 0;
818         ret = arch_ptrace(&request, child, engine, addr, data, &val);
819         if (ret != -ENOSYS) {
820                 if (ret == 0) {
821                         ret = val;
822                         force_successful_syscall_return();
823                 }
824                 goto out_tsk;
825         }
826
827         switch (request) {
828         default:
829                 ret = ptrace_common(request, child, engine, state, addr, data);
830                 break;
831
832         case PTRACE_PEEKTEXT: /* read word at location addr. */
833         case PTRACE_PEEKDATA: {
834                 unsigned long tmp;
835                 int copied;
836
837                 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
838                 ret = -EIO;
839                 if (copied != sizeof(tmp))
840                         break;
841                 ret = put_user(tmp, (unsigned long __user *) data);
842                 break;
843         }
844
845         case PTRACE_POKETEXT: /* write the word at location addr. */
846         case PTRACE_POKEDATA:
847                 ret = 0;
848                 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
849                         break;
850                 ret = -EIO;
851                 break;
852
853         case PTRACE_GETEVENTMSG:
854                 ret = put_user(state->u.live.have_eventmsg
855                                ? state->u.live.u.eventmsg : 0L,
856                                (unsigned long __user *) data);
857                 break;
858         case PTRACE_GETSIGINFO:
859                 ret = -EINVAL;
860                 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo)
861                         ret = copy_siginfo_to_user((siginfo_t __user *) data,
862                                                    state->u.live.u.siginfo);
863                 break;
864         case PTRACE_SETSIGINFO:
865                 ret = -EINVAL;
866                 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo
867                     && copy_from_user(state->u.live.u.siginfo,
868                                       (siginfo_t __user *) data,
869                                       sizeof(siginfo_t)))
870                         ret = -EFAULT;
871                 break;
872         }
873
874 out_tsk:
875         put_task_struct(child);
876 out:
877 #ifdef PTRACE_DEBUG
878         printk("%d ptrace -> %x\n", current->pid, ret);
879 #endif
880         return ret;
881 }
882
883
884 #ifdef CONFIG_COMPAT
885 #include <linux/compat.h>
886
887 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
888                                   compat_ulong_t addr, compat_long_t cdata)
889 {
890         const unsigned long data = (unsigned long) (compat_ulong_t) cdata;
891         struct task_struct *child;
892         struct utrace_attached_engine *engine;
893         struct ptrace_state *state;
894         compat_long_t ret, val;
895
896 #ifdef PTRACE_DEBUG
897         printk("%d compat_sys_ptrace(%d, %d, %x, %x)\n",
898                current->pid, request, pid, addr, cdata);
899 #endif
900         ret = ptrace_start(pid, request, &child, &engine, &state);
901         if (ret != -EIO)
902                 goto out;
903
904         val = 0;
905         ret = arch_compat_ptrace(&request, child, engine, addr, cdata, &val);
906         if (ret != -ENOSYS) {
907                 if (ret == 0) {
908                         ret = val;
909                         force_successful_syscall_return();
910                 }
911                 goto out_tsk;
912         }
913
914         switch (request) {
915         default:
916                 ret = ptrace_common(request, child, engine, state, addr, data);
917                 break;
918
919         case PTRACE_PEEKTEXT: /* read word at location addr. */
920         case PTRACE_PEEKDATA: {
921                 compat_ulong_t tmp;
922                 int copied;
923
924                 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
925                 ret = -EIO;
926                 if (copied != sizeof(tmp))
927                         break;
928                 ret = put_user(tmp, (compat_ulong_t __user *) data);
929                 break;
930         }
931
932         case PTRACE_POKETEXT: /* write the word at location addr. */
933         case PTRACE_POKEDATA:
934                 ret = 0;
935                 if (access_process_vm(child, addr, &cdata, sizeof(cdata), 1) == sizeof(cdata))
936                         break;
937                 ret = -EIO;
938                 break;
939
940         case PTRACE_GETEVENTMSG:
941                 ret = put_user(state->u.live.have_eventmsg
942                                ? state->u.live.u.eventmsg : 0L,
943                                (compat_long_t __user *) data);
944                 break;
945         case PTRACE_GETSIGINFO:
946                 ret = -EINVAL;
947                 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo)
948                         ret = copy_siginfo_to_user32(
949                                 (struct compat_siginfo __user *) data,
950                                 state->u.live.u.siginfo);
951                 break;
952         case PTRACE_SETSIGINFO:
953                 ret = -EINVAL;
954                 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo
955                     && copy_siginfo_from_user32(
956                             state->u.live.u.siginfo,
957                             (struct compat_siginfo __user *) data))
958                         ret = -EFAULT;
959                 break;
960         }
961
962 out_tsk:
963         put_task_struct(child);
964 out:
965 #ifdef PTRACE_DEBUG
966         printk("%d ptrace -> %x\n", current->pid, ret);
967 #endif
968         return ret;
969 }
970 #endif
971
972
973 /*
974  * We're called with tasklist_lock held for reading.
975  * If we return -ECHILD or zero, next_thread(tsk) must still be valid to use.
976  * If we return another error code, or a successful PID value, we
977  * release tasklist_lock first.
978  */
979 int
980 ptrace_do_wait(struct task_struct *tsk,
981                pid_t pid, int options, struct siginfo __user *infop,
982                int __user *stat_addr, struct rusage __user *rusagep)
983 {
984         struct ptrace_state *state;
985         struct task_struct *p;
986         int err = -ECHILD;
987         int why, status;
988
989         rcu_read_lock();
990         list_for_each_entry_rcu(state, &tsk->ptracees, entry) {
991                 p = state->task;
992
993                 if (pid > 0) {
994                         if (p->pid != pid)
995                                 continue;
996                 } else if (!pid) {
997                         if (process_group(p) != process_group(current))
998                                 continue;
999                 } else if (pid != -1) {
1000                         if (process_group(p) != -pid)
1001                                 continue;
1002                 }
1003                 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1004                     && !(options & __WALL))
1005                         continue;
1006                 if (security_task_wait(p))
1007                         continue;
1008
1009                 err = 0;
1010                 if (state->u.live.reported)
1011                         continue;
1012
1013                 if (state->u.live.stopped)
1014                         goto found;
1015                 if ((p->state & (TASK_TRACED | TASK_STOPPED))
1016                     && (p->signal->flags & SIGNAL_STOP_STOPPED))
1017                         goto found;
1018                 if (p->exit_state == EXIT_ZOMBIE) {
1019                         if (!likely(options & WEXITED))
1020                                 continue;
1021                         if (delay_group_leader(p))
1022                                 continue;
1023                         goto found;
1024                 }
1025                 // XXX should handle WCONTINUED
1026         }
1027         rcu_read_unlock();
1028         return err;
1029
1030 found:
1031         rcu_read_unlock();
1032
1033         BUG_ON(state->parent != tsk);
1034
1035         if (p->exit_state) {
1036                 if (unlikely(p->parent == state->parent))
1037                         /*
1038                          * This is our natural child we were ptracing.
1039                          * When it dies it detaches (see ptrace_report_death).
1040                          * So we're seeing it here in a race.  When it
1041                          * finishes detaching it will become reapable in
1042                          * the normal wait_task_zombie path instead.
1043                          */
1044                         return 0;
1045                 if ((p->exit_code & 0x7f) == 0) {
1046                         why = CLD_EXITED;
1047                         status = p->exit_code >> 8;
1048                 } else {
1049                         why = (p->exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1050                         status = p->exit_code & 0xff;
1051                 }
1052         }
1053         else {
1054                 why = CLD_TRAPPED;
1055                 status = (p->exit_code << 8) | 0x7f;
1056         }
1057
1058         /*
1059          * At this point we are committed to a successful return
1060          * or a user error return.  Release the tasklist_lock.
1061          */
1062         read_unlock(&tasklist_lock);
1063
1064         if (rusagep)
1065                 err = getrusage(p, RUSAGE_BOTH, rusagep);
1066         if (infop) {
1067                 if (!err)
1068                         err = put_user(SIGCHLD, &infop->si_signo);
1069                 if (!err)
1070                         err = put_user(0, &infop->si_errno);
1071                 if (!err)
1072                         err = put_user((short)why, &infop->si_code);
1073                 if (!err)
1074                         err = put_user(p->pid, &infop->si_pid);
1075                 if (!err)
1076                         err = put_user(p->uid, &infop->si_uid);
1077                 if (!err)
1078                         err = put_user(status, &infop->si_status);
1079         }
1080         if (!err && stat_addr)
1081                 err = put_user(status, stat_addr);
1082
1083         if (!err) {
1084                 struct utrace *utrace;
1085
1086                 err = p->pid;
1087
1088                 /*
1089                  * If this was a non-death report, the child might now be
1090                  * detaching on death in the same race possible in the
1091                  * p->exit_state check above.  So check for p->utrace being
1092                  * NULL, then we don't need to update the state any more.
1093                  */
1094                 rcu_read_lock();
1095                 utrace = rcu_dereference(p->utrace);
1096                 if (likely(utrace != NULL)) {
1097                         utrace_lock(utrace);
1098                         if (unlikely(state->u.live.reported))
1099                                 /*
1100                                  * Another thread in the group got here
1101                                  * first and reaped it before we locked.
1102                                  */
1103                                 err = -ERESTARTNOINTR;
1104                         state->u.live.reported = 1;
1105                         utrace_unlock(utrace);
1106                 }
1107                 rcu_read_unlock();
1108
1109                 if (err > 0 && why != CLD_TRAPPED)
1110                         ptrace_detach(p, state->engine);
1111         }
1112
1113         return err;
1114 }
1115
1116 static void
1117 do_notify(struct task_struct *tsk, struct task_struct *parent, int why)
1118 {
1119         struct siginfo info;
1120         unsigned long flags;
1121         struct sighand_struct *sighand;
1122         int sa_mask;
1123
1124         info.si_signo = SIGCHLD;
1125         info.si_errno = 0;
1126         info.si_pid = tsk->pid;
1127         info.si_uid = tsk->uid;
1128
1129         /* FIXME: find out whether or not this is supposed to be c*time. */
1130         info.si_utime = cputime_to_jiffies(tsk->utime);
1131         info.si_stime = cputime_to_jiffies(tsk->stime);
1132
1133         sa_mask = SA_NOCLDSTOP;
1134         info.si_code = why;
1135         info.si_status = tsk->exit_code & 0x7f;
1136         if (why == CLD_CONTINUED)
1137                 info.si_status = SIGCONT;
1138         else if (why == CLD_STOPPED)
1139                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1140         else if (why == CLD_EXITED) {
1141                 sa_mask = SA_NOCLDWAIT;
1142                 if (tsk->exit_code & 0x80)
1143                         info.si_code = CLD_DUMPED;
1144                 else if (tsk->exit_code & 0x7f)
1145                         info.si_code = CLD_KILLED;
1146                 else {
1147                         info.si_code = CLD_EXITED;
1148                         info.si_status = tsk->exit_code >> 8;
1149                 }
1150         }
1151
1152         sighand = parent->sighand;
1153         spin_lock_irqsave(&sighand->siglock, flags);
1154         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1155             !(sighand->action[SIGCHLD-1].sa.sa_flags & sa_mask))
1156                 __group_send_sig_info(SIGCHLD, &info, parent);
1157         /*
1158          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1159          */
1160         wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1161         spin_unlock_irqrestore(&sighand->siglock, flags);
1162 }
1163
1164 static u32
1165 ptrace_report(struct utrace_attached_engine *engine, struct task_struct *tsk,
1166               int code)
1167 {
1168         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1169         const struct utrace_regset *regset;
1170
1171 #ifdef PTRACE_DEBUG
1172         printk("%d ptrace_report %d engine %p state %p code %x parent %d (%p)\n",
1173                current->pid, tsk->pid, engine, state, code,
1174                state->parent->pid, state->parent);
1175         if (!state->u.live.have_eventmsg && state->u.live.u.siginfo) {
1176                 const siginfo_t *si = state->u.live.u.siginfo;
1177                 printk("  si %d code %x errno %d addr %p\n",
1178                        si->si_signo, si->si_code, si->si_errno,
1179                        si->si_addr);
1180         }
1181 #endif
1182
1183         /*
1184          * Set our QUIESCE flag right now, before notifying the tracer.
1185          * We do this before setting state->u.live.stopped rather than
1186          * by using UTRACE_ACTION_NEWSTATE in our return value, to
1187          * ensure that the tracer can't get the notification and then
1188          * try to resume us with PTRACE_CONT before we set the flag.
1189          */
1190         utrace_set_flags(tsk, engine, engine->flags | UTRACE_ACTION_QUIESCE);
1191
1192         /*
1193          * If regset 0 has a writeback call, do it now.  On register window
1194          * machines, this makes sure the user memory backing the register
1195          * data is up to date by the time wait_task_inactive returns to
1196          * ptrace_start in our tracer doing a PTRACE_PEEKDATA or the like.
1197          */
1198         regset = utrace_regset(tsk, engine, utrace_native_view(tsk), 0);
1199         if (regset->writeback)
1200                 (*regset->writeback)(tsk, regset, 0);
1201
1202         state->u.live.stopped = 1;
1203         state->u.live.reported = 0;
1204         tsk->exit_code = code;
1205         do_notify(tsk, state->parent, CLD_TRAPPED);
1206
1207 #ifdef PTRACE_DEBUG
1208         printk("%d ptrace_report quiescing exit_code %x\n",
1209                current->pid, current->exit_code);
1210 #endif
1211
1212         return UTRACE_ACTION_RESUME;
1213 }
1214
1215 static inline u32
1216 ptrace_event(struct utrace_attached_engine *engine, struct task_struct *tsk,
1217              int event)
1218 {
1219         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1220         state->u.live.syscall = 0;
1221         return ptrace_report(engine, tsk, (event << 8) | SIGTRAP);
1222 }
1223
1224
1225 static u32
1226 ptrace_report_death(struct utrace_attached_engine *engine,
1227                     struct task_struct *tsk)
1228 {
1229         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1230
1231         if (tsk->parent == state->parent) {
1232                 /*
1233                  * This is a natural child, so we detach and let the normal
1234                  * reporting happen once our NOREAP action is gone.  But
1235                  * first, generate a SIGCHLD for those cases where normal
1236                  * behavior won't.  A ptrace'd child always generates SIGCHLD.
1237                  */
1238                 if (tsk->exit_signal == -1 || !thread_group_empty(tsk))
1239                         do_notify(tsk, state->parent, CLD_EXITED);
1240                 ptrace_state_unlink(state);
1241                 rcu_assign_pointer(engine->data, 0UL);
1242                 ptrace_done(state);
1243                 return UTRACE_ACTION_DETACH;
1244         }
1245
1246         state->u.live.reported = 0;
1247         do_notify(tsk, state->parent, CLD_EXITED);
1248         return UTRACE_ACTION_RESUME;
1249 }
1250
1251 /*
1252  * We get this only in the case where our UTRACE_ACTION_NOREAP was ignored.
1253  * That happens solely when a non-leader exec reaps the old leader.
1254  */
1255 static void
1256 ptrace_report_reap(struct utrace_attached_engine *engine,
1257                    struct task_struct *tsk)
1258 {
1259         struct ptrace_state *state;
1260         rcu_read_lock();
1261         state = rcu_dereference((struct ptrace_state *) engine->data);
1262         if (state != NULL) {
1263                 ptrace_state_unlink(state);
1264                 rcu_assign_pointer(engine->data, 0UL);
1265                 ptrace_done(state);
1266         }
1267         rcu_read_unlock();
1268 }
1269
1270
1271 static u32
1272 ptrace_report_clone(struct utrace_attached_engine *engine,
1273                     struct task_struct *parent,
1274                     unsigned long clone_flags, struct task_struct *child)
1275 {
1276         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1277         struct utrace_attached_engine *child_engine;
1278         int event = PTRACE_EVENT_FORK;
1279         int option = PTRACE_O_TRACEFORK;
1280
1281 #ifdef PTRACE_DEBUG
1282         printk("%d (%p) engine %p ptrace_report_clone child %d (%p) fl %lx\n",
1283                parent->pid, parent, engine, child->pid, child, clone_flags);
1284 #endif
1285
1286         if (clone_flags & CLONE_UNTRACED)
1287                 goto out;
1288
1289         if (clone_flags & CLONE_VFORK) {
1290                 event = PTRACE_EVENT_VFORK;
1291                 option = PTRACE_O_TRACEVFORK;
1292         }
1293         else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1294                 event = PTRACE_EVENT_CLONE;
1295                 option = PTRACE_O_TRACECLONE;
1296         }
1297
1298         if (!(clone_flags & CLONE_PTRACE) && !(state->u.live.options & option))
1299                 goto out;
1300
1301         child_engine = utrace_attach(child, (UTRACE_ATTACH_CREATE
1302                                              | UTRACE_ATTACH_EXCLUSIVE
1303                                              | UTRACE_ATTACH_MATCH_OPS),
1304                                      &ptrace_utrace_ops, 0UL);
1305         if (unlikely(IS_ERR(child_engine))) {
1306                 BUG_ON(PTR_ERR(child_engine) != -ENOMEM);
1307                 printk(KERN_ERR
1308                        "ptrace out of memory, lost child %d of %d",
1309                        child->pid, parent->pid);
1310         }
1311         else {
1312                 int ret = ptrace_setup(child, child_engine,
1313                                        state->parent,
1314                                        state->u.live.options,
1315                                        state->u.live.cap_sys_ptrace);
1316                 if (unlikely(ret != 0)) {
1317                         BUG_ON(ret != -ENOMEM);
1318                         printk(KERN_ERR
1319                                "ptrace out of memory, lost child %d of %d",
1320                                child->pid, parent->pid);
1321                         utrace_detach(child, child_engine);
1322                 }
1323                 else {
1324                         sigaddset(&child->pending.signal, SIGSTOP);
1325                         set_tsk_thread_flag(child, TIF_SIGPENDING);
1326                         ptrace_update(child, child_engine, 0);
1327                 }
1328         }
1329
1330         if (state->u.live.options & option) {
1331                 state->u.live.have_eventmsg = 1;
1332                 state->u.live.u.eventmsg = child->pid;
1333                 return ptrace_event(engine, parent, event);
1334         }
1335
1336 out:
1337         return UTRACE_ACTION_RESUME;
1338 }
1339
1340
1341 static u32
1342 ptrace_report_vfork_done(struct utrace_attached_engine *engine,
1343                          struct task_struct *parent, pid_t child_pid)
1344 {
1345         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1346         state->u.live.have_eventmsg = 1;
1347         state->u.live.u.eventmsg = child_pid;
1348         return ptrace_event(engine, parent, PTRACE_EVENT_VFORK_DONE);
1349 }
1350
1351
1352 static u32
1353 ptrace_report_signal(struct utrace_attached_engine *engine,
1354                      struct task_struct *tsk, struct pt_regs *regs,
1355                      u32 action, siginfo_t *info,
1356                      const struct k_sigaction *orig_ka,
1357                      struct k_sigaction *return_ka)
1358 {
1359         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1360         int signo = info == NULL ? SIGTRAP : info->si_signo;
1361         state->u.live.syscall = 0;
1362         state->u.live.have_eventmsg = 0;
1363         state->u.live.u.siginfo = info;
1364         return ptrace_report(engine, tsk, signo) | UTRACE_SIGNAL_IGN;
1365 }
1366
1367 static u32
1368 ptrace_report_jctl(struct utrace_attached_engine *engine,
1369                    struct task_struct *tsk, int type)
1370 {
1371         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1372         do_notify(tsk, state->parent, type);
1373         return UTRACE_JCTL_NOSIGCHLD;
1374 }
1375
1376 static u32
1377 ptrace_report_exec(struct utrace_attached_engine *engine,
1378                    struct task_struct *tsk,
1379                    const struct linux_binprm *bprm,
1380                    struct pt_regs *regs)
1381 {
1382         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1383         if (state->u.live.options & PTRACE_O_TRACEEXEC)
1384                 return ptrace_event(engine, tsk, PTRACE_EVENT_EXEC);
1385         state->u.live.syscall = 0;
1386         return ptrace_report(engine, tsk, SIGTRAP);
1387 }
1388
1389 static u32
1390 ptrace_report_syscall(struct utrace_attached_engine *engine,
1391                       struct task_struct *tsk, struct pt_regs *regs,
1392                       int entry)
1393 {
1394         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1395 #ifdef PTRACE_SYSEMU
1396         if (entry && state->u.live.sysemu)
1397                 tracehook_abort_syscall(regs);
1398 #endif
1399         state->u.live.syscall = 1;
1400         return ptrace_report(engine, tsk,
1401                              ((state->u.live.options & PTRACE_O_TRACESYSGOOD)
1402                               ? 0x80 : 0) | SIGTRAP);
1403 }
1404
1405 static u32
1406 ptrace_report_syscall_entry(struct utrace_attached_engine *engine,
1407                             struct task_struct *tsk, struct pt_regs *regs)
1408 {
1409         return ptrace_report_syscall(engine, tsk, regs, 1);
1410 }
1411
1412 static u32
1413 ptrace_report_syscall_exit(struct utrace_attached_engine *engine,
1414                             struct task_struct *tsk, struct pt_regs *regs)
1415 {
1416         return ptrace_report_syscall(engine, tsk, regs, 0);
1417 }
1418
1419 static u32
1420 ptrace_report_exit(struct utrace_attached_engine *engine,
1421                    struct task_struct *tsk, long orig_code, long *code)
1422 {
1423         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1424         state->u.live.have_eventmsg = 1;
1425         state->u.live.u.eventmsg = *code;
1426         return ptrace_event(engine, tsk, PTRACE_EVENT_EXIT);
1427 }
1428
1429 static int
1430 ptrace_unsafe_exec(struct utrace_attached_engine *engine,
1431                    struct task_struct *tsk)
1432 {
1433         struct ptrace_state *state = (struct ptrace_state *) engine->data;
1434         int unsafe = LSM_UNSAFE_PTRACE;
1435         if (state->u.live.cap_sys_ptrace)
1436                 unsafe = LSM_UNSAFE_PTRACE_CAP;
1437         return unsafe;
1438 }
1439
1440 static struct task_struct *
1441 ptrace_tracer_task(struct utrace_attached_engine *engine,
1442                    struct task_struct *target)
1443 {
1444         struct ptrace_state *state;
1445
1446         /*
1447          * This call is not necessarily made by the target task,
1448          * so ptrace might be getting detached while we run here.
1449          * The state pointer will be NULL if that happens.
1450          */
1451         state = rcu_dereference((struct ptrace_state *) engine->data);
1452
1453         return state == NULL ? NULL : state->parent;
1454 }
1455
1456 static int
1457 ptrace_allow_access_process_vm(struct utrace_attached_engine *engine,
1458                                struct task_struct *target,
1459                                struct task_struct *caller)
1460 {
1461         struct ptrace_state *state;
1462         int ours;
1463
1464         /*
1465          * This call is not necessarily made by the target task,
1466          * so ptrace might be getting detached while we run here.
1467          * The state pointer will be NULL if that happens.
1468          */
1469         rcu_read_lock();
1470         state = rcu_dereference((struct ptrace_state *) engine->data);
1471         ours = (state != NULL
1472                 && ((engine->flags & UTRACE_ACTION_QUIESCE)
1473                     || (target->state == TASK_STOPPED))
1474                 && state->parent == caller);
1475         rcu_read_unlock();
1476
1477         return ours && security_ptrace(caller, target) == 0;
1478 }
1479
1480
1481 static const struct utrace_engine_ops ptrace_utrace_ops =
1482 {
1483         .report_syscall_entry = ptrace_report_syscall_entry,
1484         .report_syscall_exit = ptrace_report_syscall_exit,
1485         .report_exec = ptrace_report_exec,
1486         .report_jctl = ptrace_report_jctl,
1487         .report_signal = ptrace_report_signal,
1488         .report_vfork_done = ptrace_report_vfork_done,
1489         .report_clone = ptrace_report_clone,
1490         .report_exit = ptrace_report_exit,
1491         .report_death = ptrace_report_death,
1492         .report_reap = ptrace_report_reap,
1493         .unsafe_exec = ptrace_unsafe_exec,
1494         .tracer_task = ptrace_tracer_task,
1495         .allow_access_process_vm = ptrace_allow_access_process_vm,
1496 };
1497
1498 #endif