fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / kernel / utrace.c
1 /*
2  * utrace infrastructure interface for debugging user processes
3  *
4  * Copyright (C) 2006, 2007 Red Hat, Inc.  All rights reserved.
5  *
6  * This copyrighted material is made available to anyone wishing to use,
7  * modify, copy, or redistribute it subject to the terms and conditions
8  * of the GNU General Public License v.2.
9  *
10  * Red Hat Author: Roland McGrath.
11  */
12
13 #include <linux/utrace.h>
14 #include <linux/tracehook.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <asm/tracehook.h>
21
22
23 static struct kmem_cache *utrace_cachep;
24 static struct kmem_cache *utrace_engine_cachep;
25
26 static int __init
27 utrace_init(void)
28 {
29         utrace_cachep =
30                 kmem_cache_create("utrace_cache",
31                                   sizeof(struct utrace), 0,
32                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
33         utrace_engine_cachep =
34                 kmem_cache_create("utrace_engine_cache",
35                                   sizeof(struct utrace_attached_engine), 0,
36                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
37         return 0;
38 }
39 subsys_initcall(utrace_init);
40
41
42 /*
43  * Make sure target->utrace is allocated, and return with it locked on
44  * success.  This function mediates startup races.  The creating parent
45  * task has priority, and other callers will delay here to let its call
46  * succeed and take the new utrace lock first.
47  */
48 static struct utrace *
49 utrace_first_engine(struct task_struct *target,
50                     struct utrace_attached_engine *engine)
51 {
52         struct utrace *utrace, *ret;
53
54         /*
55          * If this is a newborn thread and we are not the creator,
56          * we have to wait for it.  The creator gets the first chance
57          * to attach.  The PF_STARTING flag is cleared after its
58          * report_clone hook has had a chance to run.
59          */
60         if ((target->flags & PF_STARTING)
61             && (current->utrace == NULL
62                 || current->utrace->u.live.cloning != target)) {
63                 yield();
64                 return (signal_pending(current)
65                         ? ERR_PTR(-ERESTARTNOINTR) : NULL);
66         }
67
68         utrace = kmem_cache_alloc(utrace_cachep, GFP_KERNEL);
69         if (unlikely(utrace == NULL))
70                 return ERR_PTR(-ENOMEM);
71
72         utrace->u.live.cloning = NULL;
73         utrace->u.live.signal = NULL;
74         INIT_LIST_HEAD(&utrace->engines);
75         list_add(&engine->entry, &utrace->engines);
76         spin_lock_init(&utrace->lock);
77
78         ret = utrace;
79         spin_lock(&utrace->lock);
80         task_lock(target);
81         if (likely(target->utrace == NULL)) {
82                 rcu_assign_pointer(target->utrace, utrace);
83                 /*
84                  * The task_lock protects us against another thread doing
85                  * the same thing.  We might still be racing against
86                  * tracehook_release_task.  It's called with ->exit_state
87                  * set to EXIT_DEAD and then checks ->utrace with an
88                  * smp_mb() in between.  If EXIT_DEAD is set, then
89                  * release_task might have checked ->utrace already and saw
90                  * it NULL; we can't attach.  If we see EXIT_DEAD not yet
91                  * set after our barrier, then we know release_task will
92                  * see our target->utrace pointer.
93                  */
94                 smp_mb();
95                 if (target->exit_state == EXIT_DEAD) {
96                         /*
97                          * The target has already been through release_task.
98                          */
99                         target->utrace = NULL;
100                         goto cannot_attach;
101                 }
102                 task_unlock(target);
103         }
104         else {
105                 /*
106                  * Another engine attached first, so there is a struct already.
107                  * A null return says to restart looking for the existing one.
108                  */
109         cannot_attach:
110                 ret = NULL;
111                 task_unlock(target);
112                 spin_unlock(&utrace->lock);
113                 kmem_cache_free(utrace_cachep, utrace);
114         }
115
116         return ret;
117 }
118
119 static void
120 utrace_free(struct rcu_head *rhead)
121 {
122         struct utrace *utrace = container_of(rhead, struct utrace, u.dead);
123         kmem_cache_free(utrace_cachep, utrace);
124 }
125
126 /*
127  * Called with utrace locked.  Clean it up and free it via RCU.
128  */
129 static void
130 rcu_utrace_free(struct utrace *utrace)
131 {
132         spin_unlock(&utrace->lock);
133         INIT_RCU_HEAD(&utrace->u.dead);
134         call_rcu(&utrace->u.dead, utrace_free);
135 }
136
137 static void
138 utrace_engine_free(struct rcu_head *rhead)
139 {
140         struct utrace_attached_engine *engine =
141                 container_of(rhead, struct utrace_attached_engine, rhead);
142         kmem_cache_free(utrace_engine_cachep, engine);
143 }
144
145 /*
146  * Remove the utrace pointer from the task, unless there is a pending
147  * forced signal (or it's quiescent in utrace_get_signal).
148  */
149 static inline void
150 utrace_clear_tsk(struct task_struct *tsk, struct utrace *utrace)
151 {
152         if (utrace->u.live.signal == NULL) {
153                 task_lock(tsk);
154                 if (likely(tsk->utrace != NULL)) {
155                         rcu_assign_pointer(tsk->utrace, NULL);
156                         tsk->utrace_flags &= UTRACE_ACTION_NOREAP;
157                 }
158                 task_unlock(tsk);
159         }
160 }
161
162 /*
163  * Called with utrace locked and the target quiescent (maybe current).
164  * If this was the last engine and there is no parting forced signal
165  * pending, utrace is left locked and not freed, but is removed from the task.
166  */
167 static void
168 remove_engine(struct utrace_attached_engine *engine,
169               struct task_struct *tsk, struct utrace *utrace)
170 {
171         list_del_rcu(&engine->entry);
172         if (list_empty(&utrace->engines))
173                 utrace_clear_tsk(tsk, utrace);
174         call_rcu(&engine->rhead, utrace_engine_free);
175 }
176
177
178 /*
179  * Called with utrace locked, after remove_engine may have run.
180  * Passed the flags from all remaining engines, i.e. zero if none
181  * left.  Install the flags in tsk->utrace_flags and return with
182  * utrace unlocked.  If no engines are left and there is no parting
183  * forced signal pending, utrace is freed.
184  */
185 static void
186 check_dead_utrace(struct task_struct *tsk, struct utrace *utrace,
187                   unsigned long flags)
188 {
189         long exit_state = 0;
190
191         if (!tsk->exit_state && utrace->u.live.signal != NULL)
192                 /*
193                  * There is a pending forced signal.  It may have been
194                  * left by an engine now detached.  The empty utrace
195                  * remains attached until it can be processed.
196                  */
197                 flags |= UTRACE_ACTION_QUIESCE;
198
199         /*
200          * If tracing was preventing a SIGCHLD or self-reaping
201          * and is no longer, we'll do that report or reaping now.
202          */
203         if (((tsk->utrace_flags &~ flags) & UTRACE_ACTION_NOREAP)
204             && tsk->exit_state) {
205                 /*
206                  * While holding the utrace lock, mark that it's been done.
207                  * For self-reaping, we need to change tsk->exit_state
208                  * before clearing tsk->utrace_flags, so that the real
209                  * parent can't see it in EXIT_ZOMBIE momentarily and reap
210                  * it.  If tsk was the group_leader, an exec by another
211                  * thread can release_task it despite our NOREAP.  Holding
212                  * tasklist_lock for reading excludes de_thread until we
213                  * decide what to do.
214                  */
215                 read_lock(&tasklist_lock);
216                 if (tsk->exit_signal == -1) { /* Self-reaping thread.  */
217                         exit_state = xchg(&tsk->exit_state, EXIT_DEAD);
218                         read_unlock(&tasklist_lock);
219
220                         BUG_ON(exit_state != EXIT_ZOMBIE);
221                         exit_state = EXIT_DEAD; /* Reap it below.  */
222
223                         /*
224                          * Now that we've changed its state to DEAD,
225                          * it's safe to install the new tsk->utrace_flags
226                          * value without the UTRACE_ACTION_NOREAP bit set.
227                          */
228                 }
229                 else if (thread_group_empty(tsk)) /* Normal solo zombie.  */
230                         /*
231                          * We need to prevent the real parent from reaping
232                          * until after we've called do_notify_parent, below.
233                          * It can get into wait_task_zombie any time after
234                          * the UTRACE_ACTION_NOREAP bit is cleared.  It's
235                          * safe for that to do everything it does until its
236                          * release_task call starts tearing things down.
237                          * Holding tasklist_lock for reading prevents
238                          * release_task from proceeding until we've done
239                          * everything we need to do.
240                          */
241                         exit_state = EXIT_ZOMBIE;
242                 else
243                         /*
244                          * Delayed group leader, nothing to do yet.
245                          * This is also the situation with the old
246                          * group leader in an exec by another thread,
247                          * which will call release_task itself.
248                          */
249                         read_unlock(&tasklist_lock);
250
251         }
252
253         tsk->utrace_flags = flags;
254         if (flags)
255                 spin_unlock(&utrace->lock);
256         else
257                 rcu_utrace_free(utrace);
258
259         /*
260          * Now we're finished updating the utrace state.
261          * Do a pending self-reaping or parent notification.
262          */
263         if (exit_state == EXIT_ZOMBIE) {
264                 do_notify_parent(tsk, tsk->exit_signal);
265
266                 /*
267                  * If SIGCHLD was ignored, that set tsk->exit_signal = -1
268                  * to tell us to reap it immediately.
269                  */
270                 if (tsk->exit_signal == -1) {
271                         exit_state = xchg(&tsk->exit_state, EXIT_DEAD);
272                         BUG_ON(exit_state != EXIT_ZOMBIE);
273                         exit_state = EXIT_DEAD; /* Reap it below.  */
274                 }
275                 read_unlock(&tasklist_lock); /* See comment above.  */
276         }
277         if (exit_state == EXIT_DEAD)
278                 /*
279                  * Note this can wind up in utrace_reap and do more callbacks.
280                  * Our callers must be in places where that is OK.
281                  */
282                 release_task(tsk);
283 }
284
285
286
287 /*
288  * Get the target thread to quiesce.  Return nonzero if it's already quiescent.
289  * Return zero if it will report a QUIESCE event soon.
290  * If interrupt is nonzero, wake it like a signal would so it quiesces ASAP.
291  * If interrupt is zero, just make sure it quiesces before going to user mode.
292  */
293 static int
294 quiesce(struct task_struct *target, int interrupt)
295 {
296         int quiescent;
297
298         target->utrace_flags |= UTRACE_ACTION_QUIESCE;
299         read_barrier_depends();
300
301         quiescent = (target->exit_state
302                      || target->state & (TASK_TRACED | TASK_STOPPED));
303
304         if (!quiescent) {
305                 spin_lock_irq(&target->sighand->siglock);
306                 quiescent = (unlikely(target->exit_state)
307                              || unlikely(target->state
308                                          & (TASK_TRACED | TASK_STOPPED)));
309                 if (!quiescent) {
310                         if (interrupt)
311                                 signal_wake_up(target, 0);
312                         else {
313                                 set_tsk_thread_flag(target, TIF_SIGPENDING);
314                                 kick_process(target);
315                         }
316                 }
317                 spin_unlock_irq(&target->sighand->siglock);
318         }
319
320         return quiescent;
321 }
322
323
324 static struct utrace_attached_engine *
325 matching_engine(struct utrace *utrace, int flags,
326                 const struct utrace_engine_ops *ops, unsigned long data)
327 {
328         struct utrace_attached_engine *engine;
329         list_for_each_entry_rcu(engine, &utrace->engines, entry) {
330                 if ((flags & UTRACE_ATTACH_MATCH_OPS)
331                     && engine->ops != ops)
332                         continue;
333                 if ((flags & UTRACE_ATTACH_MATCH_DATA)
334                     && engine->data != data)
335                         continue;
336                 return engine;
337         }
338         return ERR_PTR(-ENOENT);
339 }
340
341 /*
342   option to stop it?
343   option to match existing on ops, ops+data, return it; nocreate:lookup only
344  */
345 struct utrace_attached_engine *
346 utrace_attach(struct task_struct *target, int flags,
347              const struct utrace_engine_ops *ops, unsigned long data)
348 {
349         struct utrace *utrace;
350         struct utrace_attached_engine *engine;
351
352 restart:
353         rcu_read_lock();
354         utrace = rcu_dereference(target->utrace);
355         smp_rmb();
356         if (unlikely(target->exit_state == EXIT_DEAD)) {
357                 /*
358                  * The target has already been reaped.
359                  * Check this first; a race with reaping may lead to restart.
360                  */
361                 rcu_read_unlock();
362                 return ERR_PTR(-ESRCH);
363         }
364         if (utrace == NULL) {
365                 rcu_read_unlock();
366
367                 if (!(flags & UTRACE_ATTACH_CREATE))
368                         return ERR_PTR(-ENOENT);
369
370                 engine = kmem_cache_alloc(utrace_engine_cachep, GFP_KERNEL);
371                 if (unlikely(engine == NULL))
372                         return ERR_PTR(-ENOMEM);
373                 engine->flags = 0;
374
375         first:
376                 utrace = utrace_first_engine(target, engine);
377                 if (IS_ERR(utrace) || unlikely(utrace == NULL)) {
378                         kmem_cache_free(utrace_engine_cachep, engine);
379                         if (unlikely(utrace == NULL)) /* Race condition.  */
380                                 goto restart;
381                         return ERR_PTR(PTR_ERR(utrace));
382                 }
383         }
384         else {
385                 if (!(flags & UTRACE_ATTACH_CREATE)) {
386                         engine = matching_engine(utrace, flags, ops, data);
387                         rcu_read_unlock();
388                         return engine;
389                 }
390                 rcu_read_unlock();
391
392                 engine = kmem_cache_alloc(utrace_engine_cachep, GFP_KERNEL);
393                 if (unlikely(engine == NULL))
394                         return ERR_PTR(-ENOMEM);
395                 engine->flags = 0;
396
397                 rcu_read_lock();
398                 utrace = rcu_dereference(target->utrace);
399                 if (unlikely(utrace == NULL)) { /* Race with detach.  */
400                         rcu_read_unlock();
401                         goto first;
402                 }
403                 spin_lock(&utrace->lock);
404
405                 if (flags & UTRACE_ATTACH_EXCLUSIVE) {
406                         struct utrace_attached_engine *old;
407                         old = matching_engine(utrace, flags, ops, data);
408                         if (!IS_ERR(old)) {
409                                 spin_unlock(&utrace->lock);
410                                 rcu_read_unlock();
411                                 kmem_cache_free(utrace_engine_cachep, engine);
412                                 return ERR_PTR(-EEXIST);
413                         }
414                 }
415
416                 if (unlikely(rcu_dereference(target->utrace) != utrace)) {
417                         /*
418                          * We lost a race with other CPUs doing a sequence
419                          * of detach and attach before we got in.
420                          */
421                         spin_unlock(&utrace->lock);
422                         rcu_read_unlock();
423                         kmem_cache_free(utrace_engine_cachep, engine);
424                         goto restart;
425                 }
426                 rcu_read_unlock();
427
428                 list_add_tail_rcu(&engine->entry, &utrace->engines);
429         }
430
431         engine->ops = ops;
432         engine->data = data;
433
434         spin_unlock(&utrace->lock);
435
436         return engine;
437 }
438 EXPORT_SYMBOL_GPL(utrace_attach);
439
440 /*
441  * When an engine is detached, the target thread may still see it and make
442  * callbacks until it quiesces.  We reset its event flags to just QUIESCE
443  * and install a special ops vector whose callback is dead_engine_delete.
444  * When the target thread quiesces, it can safely free the engine itself.
445  */
446 static u32
447 dead_engine_delete(struct utrace_attached_engine *engine,
448                    struct task_struct *tsk)
449 {
450         return UTRACE_ACTION_DETACH;
451 }
452
453 static const struct utrace_engine_ops dead_engine_ops =
454 {
455         .report_quiesce = &dead_engine_delete
456 };
457
458
459 /*
460  * Called with utrace locked.  Recompute the union of engines' flags.
461  */
462 static inline unsigned long
463 rescan_flags(struct utrace *utrace)
464 {
465         struct utrace_attached_engine *engine;
466         unsigned long flags = 0;
467         list_for_each_entry(engine, &utrace->engines, entry)
468                 flags |= engine->flags | UTRACE_EVENT(REAP);
469         return flags;
470 }
471
472 /*
473  * Only these flags matter any more for a dead task (exit_state set).
474  * We use this mask on flags installed in ->utrace_flags after
475  * exit_notify (and possibly utrace_report_death) has run.
476  * This ensures that utrace_release_task knows positively that
477  * utrace_report_death will not run later.
478  */
479 #define DEAD_FLAGS_MASK (UTRACE_EVENT(REAP) | UTRACE_ACTION_NOREAP)
480
481 /*
482  * Flags bits in utrace->u.exit.flags word.  These are private
483  * communication among utrace_report_death, utrace_release_task,
484  * utrace_detach, and utrace_set_flags.
485  */
486 #define EXIT_FLAG_DEATH                 1 /* utrace_report_death running */
487 #define EXIT_FLAG_DELAYED_GROUP_LEADER  2 /* utrace_delayed_group_leader ran */
488 #define EXIT_FLAG_REAP                  4 /* release_task ran */
489
490
491 /*
492  * We may have been the one keeping the target thread quiescent.
493  * Check if it should wake up now.
494  * Called with utrace locked, and unlocks it on return.
495  * If we were keeping it stopped, resume it.
496  * If we were keeping its zombie from reporting/self-reap, do it now.
497  */
498 static void
499 wake_quiescent(unsigned long old_flags,
500                struct utrace *utrace, struct task_struct *target)
501 {
502         unsigned long flags;
503
504         /*
505          * Update the set of events of interest from the union
506          * of the interests of the remaining tracing engines.
507          */
508         flags = rescan_flags(utrace);
509         if (target->exit_state) {
510                 BUG_ON(utrace->u.exit.flags & EXIT_FLAG_DEATH);
511                 flags &= DEAD_FLAGS_MASK;
512         }
513         check_dead_utrace(target, utrace, flags);
514
515         if (target->exit_state || (flags & UTRACE_ACTION_QUIESCE))
516                 return;
517
518         read_lock(&tasklist_lock);
519         if (!unlikely(target->exit_state)) {
520                 /*
521                  * The target is not dead and should not be in tracing stop
522                  * any more.  Wake it unless it's in job control stop.
523                  */
524                 spin_lock_irq(&target->sighand->siglock);
525                 if (target->signal->flags & SIGNAL_STOP_STOPPED) {
526                         int stop_count = target->signal->group_stop_count;
527                         target->state = TASK_STOPPED;
528                         spin_unlock_irq(&target->sighand->siglock);
529
530                         /*
531                          * If tracing was preventing a CLD_STOPPED report
532                          * and is no longer, do that report right now.
533                          */
534                         if (stop_count == 0
535                             && ((old_flags &~ flags) & UTRACE_ACTION_NOREAP))
536                                 do_notify_parent_cldstop(target, CLD_STOPPED);
537                 }
538                 else {
539                         /*
540                          * Wake the task up.
541                          */
542                         recalc_sigpending_tsk(target);
543                         wake_up_state(target, TASK_STOPPED | TASK_TRACED);
544                         spin_unlock_irq(&target->sighand->siglock);
545                 }
546         }
547         read_unlock(&tasklist_lock);
548 }
549
550 /*
551  * The engine is supposed to be attached.  The caller really needs
552  * rcu_read_lock if it wants to look at the engine struct
553  * (e.g. engine->data), to be sure it hasn't been freed by utrace_reap
554  * asynchronously--unless he has synchronized with his report_reap
555  * callback, which would have happened before then.  A simultaneous
556  * utrace_detach call or UTRACE_ACTION_DETACH return from a callback can
557  * also free the engine if rcu_read_lock is not held, but that is in the
558  * tracing engine's power to avoid.
559  *
560  * Get the utrace lock for the target task.
561  * Returns the struct if locked, or ERR_PTR(-errno).
562  *
563  * This has to be robust against races with:
564  *      utrace_detach calls
565  *      UTRACE_ACTION_DETACH after reports
566  *      utrace_report_death
567  *      utrace_release_task
568  */
569 static struct utrace *
570 get_utrace_lock_attached(struct task_struct *target,
571                          struct utrace_attached_engine *engine)
572 {
573         struct utrace *utrace;
574
575         rcu_read_lock();
576         utrace = rcu_dereference(target->utrace);
577         smp_rmb();
578         if (unlikely(target->exit_state == EXIT_DEAD)) {
579                 /*
580                  * Called after utrace_release_task might have started.
581                  * A call to this engine's report_reap callback might
582                  * already be in progress or engine might even have been
583                  * freed already.
584                  */
585                 utrace = ERR_PTR(-ESRCH);
586         }
587         else {
588                 spin_lock(&utrace->lock);
589                 if (unlikely(rcu_dereference(target->utrace) != utrace)
590                     || unlikely(rcu_dereference(engine->ops)
591                                 == &dead_engine_ops)) {
592                         /*
593                          * By the time we got the utrace lock,
594                          * it had been reaped or detached already.
595                          */
596                         spin_unlock(&utrace->lock);
597                         utrace = ERR_PTR(-ESRCH);
598                 }
599         }
600         rcu_read_unlock();
601
602         return utrace;
603 }
604
605 int
606 utrace_detach(struct task_struct *target,
607               struct utrace_attached_engine *engine)
608 {
609         struct utrace *utrace;
610         unsigned long flags;
611
612         utrace = get_utrace_lock_attached(target, engine);
613         if (unlikely(IS_ERR(utrace)))
614                 return PTR_ERR(utrace);
615
616         if (target->exit_state
617             && unlikely(utrace->u.exit.flags & (EXIT_FLAG_DEATH
618                                                 | EXIT_FLAG_REAP))) {
619                 /*
620                  * We have already started the death report, or
621                  * even entered release_task.  We can't prevent
622                  * the report_death and report_reap callbacks,
623                  * so tell the caller they will happen.
624                  */
625                 int ret = ((utrace->u.exit.flags & EXIT_FLAG_REAP)
626                            ? -ESRCH : -EALREADY);
627                 spin_unlock(&utrace->lock);
628                 return ret;
629         }
630
631         flags = engine->flags;
632         engine->flags = UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_QUIESCE;
633         rcu_assign_pointer(engine->ops, &dead_engine_ops);
634
635         if (quiesce(target, 1)) {
636                 remove_engine(engine, target, utrace);
637                 wake_quiescent(flags, utrace, target);
638         }
639         else
640                 spin_unlock(&utrace->lock);
641
642
643         return 0;
644 }
645 EXPORT_SYMBOL_GPL(utrace_detach);
646
647
648 /*
649  * Called with utrace->lock held.
650  * Notify and clean up all engines, then free utrace.
651  */
652 static void
653 utrace_reap(struct task_struct *target, struct utrace *utrace)
654 {
655         struct utrace_attached_engine *engine, *next;
656         const struct utrace_engine_ops *ops;
657
658 restart:
659         list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
660                 list_del_rcu(&engine->entry);
661
662                 /*
663                  * Now nothing else refers to this engine.
664                  */
665                 if (engine->flags & UTRACE_EVENT(REAP)) {
666                         ops = rcu_dereference(engine->ops);
667                         if (ops != &dead_engine_ops) {
668                                 spin_unlock(&utrace->lock);
669                                 (*ops->report_reap)(engine, target);
670                                 call_rcu(&engine->rhead, utrace_engine_free);
671                                 spin_lock(&utrace->lock);
672                                 goto restart;
673                         }
674                 }
675                 call_rcu(&engine->rhead, utrace_engine_free);
676         }
677
678         rcu_utrace_free(utrace);
679 }
680
681 /*
682  * Called by release_task.  After this, target->utrace must be cleared.
683  */
684 void
685 utrace_release_task(struct task_struct *target)
686 {
687         struct utrace *utrace;
688
689         task_lock(target);
690         utrace = target->utrace;
691         rcu_assign_pointer(target->utrace, NULL);
692         task_unlock(target);
693
694         if (unlikely(utrace == NULL))
695                 return;
696
697         spin_lock(&utrace->lock);
698         utrace->u.exit.flags |= EXIT_FLAG_REAP;
699
700         if (target->utrace_flags & (UTRACE_EVENT(DEATH)
701                                     | UTRACE_EVENT(QUIESCE)))
702                 /*
703                  * The target will do some final callbacks but hasn't
704                  * finished them yet.  We know because it clears these
705                  * event bits after it's done.  Instead of cleaning up here
706                  * and requiring utrace_report_death to cope with it, we
707                  * delay the REAP report and the teardown until after the
708                  * target finishes its death reports.
709                  */
710                 spin_unlock(&utrace->lock);
711         else
712                 utrace_reap(target, utrace); /* Unlocks and frees.  */
713 }
714
715
716 int
717 utrace_set_flags(struct task_struct *target,
718                  struct utrace_attached_engine *engine,
719                  unsigned long flags)
720 {
721         struct utrace *utrace;
722         int report;
723         unsigned long old_flags, old_utrace_flags;
724         int ret = -EALREADY;
725
726 #ifdef ARCH_HAS_SINGLE_STEP
727         if (! ARCH_HAS_SINGLE_STEP)
728 #endif
729                 WARN_ON(flags & UTRACE_ACTION_SINGLESTEP);
730 #ifdef ARCH_HAS_BLOCK_STEP
731         if (! ARCH_HAS_BLOCK_STEP)
732 #endif
733                 WARN_ON(flags & UTRACE_ACTION_BLOCKSTEP);
734
735         utrace = get_utrace_lock_attached(target, engine);
736         if (unlikely(IS_ERR(utrace)))
737                 return PTR_ERR(utrace);
738
739 restart:                        /* See below. */
740
741         old_utrace_flags = target->utrace_flags;
742         old_flags = engine->flags;
743
744         if (target->exit_state
745             && (((flags &~ old_flags) & (UTRACE_ACTION_QUIESCE
746                                          | UTRACE_ACTION_NOREAP
747                                          | UTRACE_EVENT(DEATH)
748                                          | UTRACE_EVENT(QUIESCE)))
749                 || ((utrace->u.exit.flags & EXIT_FLAG_DEATH)
750                     && ((old_flags &~ flags) & (UTRACE_EVENT(DEATH) |
751                                                 UTRACE_EVENT(QUIESCE))))
752                 || ((utrace->u.exit.flags & EXIT_FLAG_REAP)
753                     && ((old_flags &~ flags) & UTRACE_EVENT(REAP))))) {
754                 spin_unlock(&utrace->lock);
755                 return ret;
756         }
757
758         /*
759          * When setting these flags, it's essential that we really
760          * synchronize with exit_notify.  They cannot be set after
761          * exit_notify takes the tasklist_lock.  By holding the read
762          * lock here while setting the flags, we ensure that the calls
763          * to tracehook_notify_death and tracehook_report_death will
764          * see the new flags.  This ensures that utrace_release_task
765          * knows positively that utrace_report_death will be called or
766          * that it won't.
767          */
768         if ((flags &~ old_utrace_flags) & (UTRACE_ACTION_NOREAP
769                                            | UTRACE_EVENT(DEATH)
770                                            | UTRACE_EVENT(QUIESCE))) {
771                 read_lock(&tasklist_lock);
772                 if (unlikely(target->exit_state)) {
773                         read_unlock(&tasklist_lock);
774                         spin_unlock(&utrace->lock);
775                         return ret;
776                 }
777                 target->utrace_flags |= flags;
778                 read_unlock(&tasklist_lock);
779         }
780
781         engine->flags = flags;
782         target->utrace_flags |= flags;
783         ret = 0;
784
785         report = 0;
786         if ((old_flags ^ flags) & UTRACE_ACTION_QUIESCE) {
787                 if (flags & UTRACE_ACTION_QUIESCE) {
788                         report = (quiesce(target, 1)
789                                   && (flags & UTRACE_EVENT(QUIESCE)));
790                         spin_unlock(&utrace->lock);
791                 }
792                 else
793                         wake_quiescent(old_flags, utrace, target);
794         }
795         else if (((old_flags &~ flags) & UTRACE_ACTION_NOREAP)
796                  && target->exit_state)
797                         wake_quiescent(old_flags, utrace, target);
798         else {
799                 /*
800                  * If we're asking for single-stepping or syscall tracing,
801                  * we need to pass through utrace_quiescent before resuming
802                  * in user mode to get those effects, even if the target is
803                  * not going to be quiescent right now.
804                  */
805                 if (!(target->utrace_flags & UTRACE_ACTION_QUIESCE)
806                     && !target->exit_state
807                     && ((flags &~ old_utrace_flags)
808                         & (UTRACE_ACTION_SINGLESTEP | UTRACE_ACTION_BLOCKSTEP
809                            | UTRACE_EVENT_SYSCALL)))
810                         quiesce(target, 0);
811                 spin_unlock(&utrace->lock);
812         }
813
814         if (report) {   /* Already quiescent, won't report itself.  */
815                 u32 action = (*engine->ops->report_quiesce)(engine, target);
816                 if (action & UTRACE_ACTION_DETACH)
817                         utrace_detach(target, engine);
818                 else if (action & UTRACE_ACTION_NEWSTATE) {
819                         /*
820                          * The callback has us changing the flags yet
821                          * again.  Since we released the lock, they
822                          * could have changed asynchronously just now.
823                          * We must refetch the current flags to change
824                          * the UTRACE_ACTION_STATE_MASK bits.  If the
825                          * target thread started dying, then there is
826                          * nothing we can do--but that failure is due
827                          * to the report_quiesce callback after the
828                          * original utrace_set_flags has already
829                          * succeeded, so we don't want to return
830                          * failure here (hence leave ret = 0).
831                          */
832                         utrace = get_utrace_lock_attached(target, engine);
833                         if (!unlikely(IS_ERR(utrace))) {
834                                 flags = action & UTRACE_ACTION_STATE_MASK;
835                                 flags |= (engine->flags
836                                           &~ UTRACE_ACTION_STATE_MASK);
837                                 goto restart;
838                         }
839                 }
840         }
841
842         return ret;
843 }
844 EXPORT_SYMBOL_GPL(utrace_set_flags);
845 \f
846 /*
847  * While running an engine callback, no locks are held.
848  * If a callback updates its engine's action state, then
849  * we need to take the utrace lock to install the flags update.
850  */
851 static inline u32
852 update_action(struct task_struct *tsk, struct utrace *utrace,
853               struct utrace_attached_engine *engine,
854               u32 ret)
855 {
856         if (ret & UTRACE_ACTION_DETACH)
857                 rcu_assign_pointer(engine->ops, &dead_engine_ops);
858         else if ((ret & UTRACE_ACTION_NEWSTATE)
859                  && ((ret ^ engine->flags) & UTRACE_ACTION_STATE_MASK)) {
860 #ifdef ARCH_HAS_SINGLE_STEP
861                 if (! ARCH_HAS_SINGLE_STEP)
862 #endif
863                         WARN_ON(ret & UTRACE_ACTION_SINGLESTEP);
864 #ifdef ARCH_HAS_BLOCK_STEP
865                 if (! ARCH_HAS_BLOCK_STEP)
866 #endif
867                         WARN_ON(ret & UTRACE_ACTION_BLOCKSTEP);
868                 spin_lock(&utrace->lock);
869                 /*
870                  * If we're changing something other than just QUIESCE,
871                  * make sure we pass through utrace_quiescent before
872                  * resuming even if we aren't going to stay quiescent.
873                  * That's where we get the correct union of all engines'
874                  * flags after they've finished changing, and apply changes.
875                  */
876                 if (((ret ^ engine->flags) & (UTRACE_ACTION_STATE_MASK
877                                               & ~UTRACE_ACTION_QUIESCE)))
878                         tsk->utrace_flags |= UTRACE_ACTION_QUIESCE;
879                 engine->flags &= ~UTRACE_ACTION_STATE_MASK;
880                 engine->flags |= ret & UTRACE_ACTION_STATE_MASK;
881                 tsk->utrace_flags |= engine->flags;
882                 spin_unlock(&utrace->lock);
883         }
884         else
885                 ret |= engine->flags & UTRACE_ACTION_STATE_MASK;
886         return ret;
887 }
888
889 #define REPORT(callback, ...) do { \
890         u32 ret = (*rcu_dereference(engine->ops)->callback) \
891                 (engine, tsk, ##__VA_ARGS__); \
892         action = update_action(tsk, utrace, engine, ret); \
893         } while (0)
894
895
896 /*
897  * Called with utrace->lock held, returns with it released.
898  */
899 static u32
900 remove_detached(struct task_struct *tsk, struct utrace *utrace,
901                 u32 action, unsigned long mask)
902 {
903         struct utrace_attached_engine *engine, *next;
904         unsigned long flags = 0;
905
906         list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
907                 if (engine->ops == &dead_engine_ops)
908                         remove_engine(engine, tsk, utrace);
909                 else
910                         flags |= engine->flags | UTRACE_EVENT(REAP);
911         }
912         check_dead_utrace(tsk, utrace, flags & mask);
913
914         flags &= UTRACE_ACTION_STATE_MASK;
915         return flags | (action & UTRACE_ACTION_OP_MASK);
916 }
917
918 /*
919  * Called after an event report loop.  Remove any engines marked for detach.
920  */
921 static inline u32
922 check_detach(struct task_struct *tsk, u32 action)
923 {
924         if (action & UTRACE_ACTION_DETACH) {
925                 /*
926                  * This must be current to be sure it's not possibly
927                  * getting into utrace_report_death.
928                  */
929                 BUG_ON(tsk != current);
930                 spin_lock(&tsk->utrace->lock);
931                 action = remove_detached(tsk, tsk->utrace, action, ~0UL);
932         }
933         return action;
934 }
935
936 static inline int
937 check_quiescent(struct task_struct *tsk, u32 action)
938 {
939         if (action & UTRACE_ACTION_STATE_MASK)
940                 return utrace_quiescent(tsk, NULL);
941         return 0;
942 }
943
944 /*
945  * Called iff UTRACE_EVENT(CLONE) flag is set.
946  * This notification call blocks the wake_up_new_task call on the child.
947  * So we must not quiesce here.  tracehook_report_clone_complete will do
948  * a quiescence check momentarily.
949  */
950 void
951 utrace_report_clone(unsigned long clone_flags, struct task_struct *child)
952 {
953         struct task_struct *tsk = current;
954         struct utrace *utrace = tsk->utrace;
955         struct list_head *pos, *next;
956         struct utrace_attached_engine *engine;
957         unsigned long action;
958
959         utrace->u.live.cloning = child;
960
961         /* XXX must change for sharing */
962         action = UTRACE_ACTION_RESUME;
963         list_for_each_safe_rcu(pos, next, &utrace->engines) {
964                 engine = list_entry(pos, struct utrace_attached_engine, entry);
965                 if (engine->flags & UTRACE_EVENT(CLONE))
966                         REPORT(report_clone, clone_flags, child);
967                 if (action & UTRACE_ACTION_HIDE)
968                         break;
969         }
970
971         utrace->u.live.cloning = NULL;
972
973         check_detach(tsk, action);
974 }
975
976 static unsigned long
977 report_quiescent(struct task_struct *tsk, struct utrace *utrace, u32 action)
978 {
979         struct list_head *pos, *next;
980         struct utrace_attached_engine *engine;
981
982         list_for_each_safe_rcu(pos, next, &utrace->engines) {
983                 engine = list_entry(pos, struct utrace_attached_engine, entry);
984                 if (engine->flags & UTRACE_EVENT(QUIESCE))
985                         REPORT(report_quiesce);
986                 action |= engine->flags & UTRACE_ACTION_STATE_MASK;
987         }
988
989         return check_detach(tsk, action);
990 }
991
992 /*
993  * Called iff UTRACE_EVENT(JCTL) flag is set.
994  */
995 int
996 utrace_report_jctl(int what)
997 {
998         struct task_struct *tsk = current;
999         struct utrace *utrace = tsk->utrace;
1000         struct list_head *pos, *next;
1001         struct utrace_attached_engine *engine;
1002         unsigned long action;
1003
1004         /* XXX must change for sharing */
1005         action = UTRACE_ACTION_RESUME;
1006         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1007                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1008                 if (engine->flags & UTRACE_EVENT(JCTL))
1009                         REPORT(report_jctl, what);
1010                 if (action & UTRACE_ACTION_HIDE)
1011                         break;
1012         }
1013
1014         /*
1015          * We are becoming quiescent, so report it now.
1016          * We don't block in utrace_quiescent because we are stopping anyway.
1017          * We know that upon resuming we'll go through tracehook_induce_signal,
1018          * which will keep us quiescent or set us up to resume with tracing.
1019          */
1020         action = report_quiescent(tsk, utrace, action);
1021
1022         if (what == CLD_STOPPED && tsk->state != TASK_STOPPED) {
1023                 /*
1024                  * The event report hooks could have blocked, though
1025                  * it should have been briefly.  Make sure we're in
1026                  * TASK_STOPPED state again to block properly, unless
1027                  * we've just come back out of job control stop.
1028                  */
1029                 spin_lock_irq(&tsk->sighand->siglock);
1030                 if (tsk->signal->flags & SIGNAL_STOP_STOPPED)
1031                         set_current_state(TASK_STOPPED);
1032                 spin_unlock_irq(&tsk->sighand->siglock);
1033         }
1034
1035         return action & UTRACE_JCTL_NOSIGCHLD;
1036 }
1037
1038
1039 /*
1040  * Return nonzero if there is a SIGKILL that should be waking us up.
1041  * Called with the siglock held.
1042  */
1043 static inline int
1044 sigkill_pending(struct task_struct *tsk)
1045 {
1046         return ((sigismember(&tsk->pending.signal, SIGKILL)
1047                  || sigismember(&tsk->signal->shared_pending.signal, SIGKILL))
1048                 && !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1049 }
1050
1051 /*
1052  * Called if UTRACE_EVENT(QUIESCE) or UTRACE_ACTION_QUIESCE flag is set.
1053  * Also called after other event reports.
1054  * It is a good time to block.
1055  * Returns nonzero if we woke up prematurely due to SIGKILL.
1056  *
1057  * The signal pointer is nonzero when called from utrace_get_signal,
1058  * where a pending forced signal can be processed right away.  Otherwise,
1059  * we keep UTRACE_ACTION_QUIESCE set after resuming so that utrace_get_signal
1060  * will be entered before user mode.
1061  */
1062 int
1063 utrace_quiescent(struct task_struct *tsk, struct utrace_signal *signal)
1064 {
1065         struct utrace *utrace = tsk->utrace;
1066         unsigned long action;
1067
1068 restart:
1069         /* XXX must change for sharing */
1070
1071         action = report_quiescent(tsk, utrace, UTRACE_ACTION_RESUME);
1072
1073         /*
1074          * If some engines want us quiescent, we block here.
1075          */
1076         if (action & UTRACE_ACTION_QUIESCE) {
1077                 int killed;
1078
1079                 if (signal != NULL) {
1080                         BUG_ON(utrace->u.live.signal != NULL);
1081                         utrace->u.live.signal = signal;
1082                 }
1083
1084                 spin_lock_irq(&tsk->sighand->siglock);
1085                 /*
1086                  * If wake_quiescent is trying to wake us up now, it will
1087                  * have cleared the QUIESCE flag before trying to take the
1088                  * siglock.  Now we have the siglock, so either it has
1089                  * already cleared the flag, or it will wake us up after we
1090                  * release the siglock it's waiting for.
1091                  * Never stop when there is a SIGKILL bringing us down.
1092                  */
1093                 killed = sigkill_pending(tsk);
1094                 if (!killed && (tsk->utrace_flags & UTRACE_ACTION_QUIESCE)) {
1095                         set_current_state(TASK_TRACED);
1096                         /*
1097                          * If there is a group stop in progress,
1098                          * we must participate in the bookkeeping.
1099                          */
1100                         if (tsk->signal->group_stop_count > 0)
1101                                 --tsk->signal->group_stop_count;
1102                         spin_unlock_irq(&tsk->sighand->siglock);
1103                         schedule();
1104                 }
1105                 else
1106                         spin_unlock_irq(&tsk->sighand->siglock);
1107
1108                 if (signal != NULL) {
1109                         /*
1110                          * We know the struct stays in place when its
1111                          * u.live.signal is set, see check_dead_utrace.
1112                          * This makes it safe to clear its pointer here.
1113                          */
1114                         BUG_ON(tsk->utrace != utrace);
1115                         BUG_ON(utrace->u.live.signal != signal);
1116                         utrace->u.live.signal = NULL;
1117                 }
1118
1119                 if (killed)     /* Game over, man!  */
1120                         return 1;
1121
1122                 /*
1123                  * We've woken up.  One engine could be waking us up while
1124                  * another has asked us to quiesce.  So check afresh.  We
1125                  * could have been detached while quiescent.  Now we are no
1126                  * longer quiescent, so don't need to do any RCU locking.
1127                  * But we do need to check our utrace pointer anew.
1128                  */
1129                 utrace = tsk->utrace;
1130                 if (tsk->utrace_flags
1131                     & (UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_STATE_MASK))
1132                         goto restart;
1133         }
1134         else if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
1135                 /*
1136                  * Our flags are out of date.
1137                  * Update the set of events of interest from the union
1138                  * of the interests of the remaining tracing engines.
1139                  * This may notice that there are no engines left
1140                  * and clean up the struct utrace.  It's left in place
1141                  * and the QUIESCE flag set as long as utrace_get_signal
1142                  * still needs to process a pending forced signal.
1143                  */
1144                 unsigned long flags;
1145                 utrace = rcu_dereference(tsk->utrace);
1146                 spin_lock(&utrace->lock);
1147                 flags = rescan_flags(utrace);
1148                 if (flags == 0)
1149                         utrace_clear_tsk(tsk, utrace);
1150                 check_dead_utrace(tsk, utrace, flags);
1151         }
1152
1153         /*
1154          * We're resuming.  Update the machine layer tracing state and then go.
1155          */
1156 #ifdef ARCH_HAS_SINGLE_STEP
1157         if (action & UTRACE_ACTION_SINGLESTEP)
1158                 tracehook_enable_single_step(tsk);
1159         else
1160                 tracehook_disable_single_step(tsk);
1161 #endif
1162 #ifdef ARCH_HAS_BLOCK_STEP
1163         if ((action & (UTRACE_ACTION_BLOCKSTEP|UTRACE_ACTION_SINGLESTEP))
1164             == UTRACE_ACTION_BLOCKSTEP)
1165                 tracehook_enable_block_step(tsk);
1166         else
1167                 tracehook_disable_block_step(tsk);
1168 #endif
1169         if (tsk->utrace_flags & UTRACE_EVENT_SYSCALL)
1170                 tracehook_enable_syscall_trace(tsk);
1171         else
1172                 tracehook_disable_syscall_trace(tsk);
1173
1174         return 0;
1175 }
1176
1177
1178 /*
1179  * Called iff UTRACE_EVENT(EXIT) flag is set.
1180  */
1181 void
1182 utrace_report_exit(long *exit_code)
1183 {
1184         struct task_struct *tsk = current;
1185         struct utrace *utrace = tsk->utrace;
1186         struct list_head *pos, *next;
1187         struct utrace_attached_engine *engine;
1188         unsigned long action;
1189         long orig_code = *exit_code;
1190
1191         /* XXX must change for sharing */
1192         action = UTRACE_ACTION_RESUME;
1193         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1194                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1195                 if (engine->flags & UTRACE_EVENT(EXIT))
1196                         REPORT(report_exit, orig_code, exit_code);
1197         }
1198         action = check_detach(tsk, action);
1199         check_quiescent(tsk, action);
1200 }
1201
1202 /*
1203  * Called with utrace locked, unlocks it on return.  Unconditionally
1204  * recompute the flags after report_death is finished.  This may notice
1205  * that there are no engines left and free the utrace struct.
1206  */
1207 static void
1208 finish_report_death(struct task_struct *tsk, struct utrace *utrace)
1209 {
1210         /*
1211          * After we unlock (possibly inside utrace_reap for callbacks) with
1212          * this flag clear, competing utrace_detach/utrace_set_flags calls
1213          * know that we've finished our callbacks and any detach bookkeeping.
1214          */
1215         utrace->u.exit.flags &= EXIT_FLAG_REAP;
1216
1217         if (utrace->u.exit.flags & EXIT_FLAG_REAP)
1218                 /*
1219                  * utrace_release_task was already called in parallel.
1220                  * We must complete its work now.
1221                  */
1222                 utrace_reap(tsk, utrace);
1223         else
1224                 /*
1225                  * Clear out any detached engines and in the process
1226                  * recompute the flags.  Mask off event bits we can't
1227                  * see any more.  This tells utrace_release_task we
1228                  * have already finished, if it comes along later.
1229                  * Note this all happens on the already-locked utrace,
1230                  * which might already be removed from the task.
1231                  */
1232                 remove_detached(tsk, utrace, 0, DEAD_FLAGS_MASK);
1233 }
1234
1235 /*
1236  * Called with utrace locked, unlocks it on return.
1237  * EXIT_FLAG_DELAYED_GROUP_LEADER is set.
1238  * Do second report_death callbacks for engines using NOREAP.
1239  */
1240 static void
1241 report_delayed_group_leader(struct task_struct *tsk, struct utrace *utrace)
1242 {
1243         struct list_head *pos, *next;
1244         struct utrace_attached_engine *engine;
1245         u32 action;
1246
1247         utrace->u.exit.flags |= EXIT_FLAG_DEATH;
1248         spin_unlock(&utrace->lock);
1249
1250         /* XXX must change for sharing */
1251         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1252                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1253 #define NOREAP_DEATH (UTRACE_EVENT(DEATH) | UTRACE_ACTION_NOREAP)
1254                 if ((engine->flags & NOREAP_DEATH) == NOREAP_DEATH)
1255                         REPORT(report_death);
1256         }
1257
1258         spin_lock(&utrace->lock);
1259         finish_report_death(tsk, utrace);
1260 }
1261
1262 /*
1263  * Called iff UTRACE_EVENT(DEATH) or UTRACE_ACTION_QUIESCE flag is set.
1264  *
1265  * It is always possible that we are racing with utrace_release_task here,
1266  * if UTRACE_ACTION_NOREAP is not set, or in the case of non-leader exec
1267  * where the old leader will get released regardless of NOREAP.  For this
1268  * reason, utrace_release_task checks for the event bits that get us here,
1269  * and delays its cleanup for us to do.
1270  */
1271 void
1272 utrace_report_death(struct task_struct *tsk, struct utrace *utrace)
1273 {
1274         struct list_head *pos, *next;
1275         struct utrace_attached_engine *engine;
1276         u32 action;
1277
1278         BUG_ON(!tsk->exit_state);
1279
1280         /*
1281          * We are presently considered "quiescent"--which is accurate
1282          * inasmuch as we won't run any more user instructions ever again.
1283          * But for utrace_detach and utrace_set_flags to be robust, they
1284          * must be sure whether or not we will run any more callbacks.  If
1285          * a call comes in before we do, taking the lock here synchronizes
1286          * us so we don't run any callbacks just disabled.  Calls that come
1287          * in while we're running the callbacks will see the report_death
1288          * flag and know that we are not yet fully quiescent for purposes
1289          * of detach bookkeeping.
1290          */
1291         spin_lock(&utrace->lock);
1292         BUG_ON(utrace->u.exit.flags & EXIT_FLAG_DEATH);
1293         utrace->u.exit.flags &= EXIT_FLAG_REAP;
1294         utrace->u.exit.flags |= EXIT_FLAG_DEATH;
1295         spin_unlock(&utrace->lock);
1296
1297         /* XXX must change for sharing */
1298         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1299                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1300                 if (engine->flags & UTRACE_EVENT(DEATH))
1301                         REPORT(report_death);
1302                 if (engine->flags & UTRACE_EVENT(QUIESCE))
1303                         REPORT(report_quiesce);
1304         }
1305
1306         spin_lock(&utrace->lock);
1307         if (unlikely(utrace->u.exit.flags & EXIT_FLAG_DELAYED_GROUP_LEADER))
1308                 /*
1309                  * Another thread's release_task came along and
1310                  * removed the delayed_group_leader condition,
1311                  * but after we might have started callbacks.
1312                  * Do the second report_death callback right now.
1313                  */
1314                 report_delayed_group_leader(tsk, utrace);
1315         else
1316                 finish_report_death(tsk, utrace);
1317 }
1318
1319 /*
1320  * We're called from release_task when delayed_group_leader(tsk) was
1321  * previously true and is no longer true, and NOREAP was set.
1322  * This means no parent notifications have happened for this zombie.
1323  */
1324 void
1325 utrace_report_delayed_group_leader(struct task_struct *tsk)
1326 {
1327         struct utrace *utrace;
1328
1329         rcu_read_lock();
1330         utrace = rcu_dereference(tsk->utrace);
1331         if (unlikely(utrace == NULL)) {
1332                 rcu_read_unlock();
1333                 return;
1334         }
1335         spin_lock(&utrace->lock);
1336         rcu_read_unlock();
1337
1338         utrace->u.exit.flags |= EXIT_FLAG_DELAYED_GROUP_LEADER;
1339
1340         /*
1341          * If utrace_report_death is still running, or release_task has
1342          * started already, there is nothing more to do now.
1343          */
1344         if ((utrace->u.exit.flags & (EXIT_FLAG_DEATH | EXIT_FLAG_REAP))
1345             || !likely(tsk->utrace_flags & UTRACE_ACTION_NOREAP))
1346                 spin_unlock(&utrace->lock);
1347         else
1348                 report_delayed_group_leader(tsk, utrace);
1349 }
1350
1351 /*
1352  * Called iff UTRACE_EVENT(VFORK_DONE) flag is set.
1353  */
1354 void
1355 utrace_report_vfork_done(pid_t child_pid)
1356 {
1357         struct task_struct *tsk = current;
1358         struct utrace *utrace = tsk->utrace;
1359         struct list_head *pos, *next;
1360         struct utrace_attached_engine *engine;
1361         unsigned long action;
1362
1363         /* XXX must change for sharing */
1364         action = UTRACE_ACTION_RESUME;
1365         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1366                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1367                 if (engine->flags & UTRACE_EVENT(VFORK_DONE))
1368                         REPORT(report_vfork_done, child_pid);
1369                 if (action & UTRACE_ACTION_HIDE)
1370                         break;
1371         }
1372         action = check_detach(tsk, action);
1373         check_quiescent(tsk, action);
1374 }
1375
1376 /*
1377  * Called iff UTRACE_EVENT(EXEC) flag is set.
1378  */
1379 void
1380 utrace_report_exec(struct linux_binprm *bprm, struct pt_regs *regs)
1381 {
1382         struct task_struct *tsk = current;
1383         struct utrace *utrace = tsk->utrace;
1384         struct list_head *pos, *next;
1385         struct utrace_attached_engine *engine;
1386         unsigned long action;
1387
1388         /* XXX must change for sharing */
1389         action = UTRACE_ACTION_RESUME;
1390         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1391                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1392                 if (engine->flags & UTRACE_EVENT(EXEC))
1393                         REPORT(report_exec, bprm, regs);
1394                 if (action & UTRACE_ACTION_HIDE)
1395                         break;
1396         }
1397         action = check_detach(tsk, action);
1398         check_quiescent(tsk, action);
1399 }
1400
1401 /*
1402  * Called iff UTRACE_EVENT(SYSCALL_{ENTRY,EXIT}) flag is set.
1403  */
1404 void
1405 utrace_report_syscall(struct pt_regs *regs, int is_exit)
1406 {
1407         struct task_struct *tsk = current;
1408         struct utrace *utrace = tsk->utrace;
1409         struct list_head *pos, *next;
1410         struct utrace_attached_engine *engine;
1411         unsigned long action, ev;
1412
1413 /*
1414   XXX pass syscall # to engine hook directly, let it return inhibit-action
1415   to reset to -1
1416         long syscall = tracehook_syscall_number(regs, is_exit);
1417 */
1418
1419         ev = is_exit ? UTRACE_EVENT(SYSCALL_EXIT) : UTRACE_EVENT(SYSCALL_ENTRY);
1420
1421         /* XXX must change for sharing */
1422         action = UTRACE_ACTION_RESUME;
1423         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1424                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1425                 if (engine->flags & ev) {
1426                         if (is_exit)
1427                                 REPORT(report_syscall_exit, regs);
1428                         else
1429                                 REPORT(report_syscall_entry, regs);
1430                 }
1431                 if (action & UTRACE_ACTION_HIDE)
1432                         break;
1433         }
1434         action = check_detach(tsk, action);
1435         if (unlikely(check_quiescent(tsk, action)) && !is_exit)
1436                 /*
1437                  * We are continuing despite QUIESCE because of a SIGKILL.
1438                  * Don't let the system call actually proceed.
1439                  */
1440                 tracehook_abort_syscall(regs);
1441 }
1442
1443
1444 /*
1445  * This is pointed to by the utrace struct, but it's really a private
1446  * structure between utrace_get_signal and utrace_inject_signal.
1447  */
1448 struct utrace_signal
1449 {
1450         siginfo_t *const info;
1451         struct k_sigaction *return_ka;
1452         int signr;
1453 };
1454
1455
1456 // XXX copied from signal.c
1457 #ifdef SIGEMT
1458 #define M_SIGEMT        M(SIGEMT)
1459 #else
1460 #define M_SIGEMT        0
1461 #endif
1462
1463 #if SIGRTMIN > BITS_PER_LONG
1464 #define M(sig) (1ULL << ((sig)-1))
1465 #else
1466 #define M(sig) (1UL << ((sig)-1))
1467 #endif
1468 #define T(sig, mask) (M(sig) & (mask))
1469
1470 #define SIG_KERNEL_ONLY_MASK (\
1471         M(SIGKILL)   |  M(SIGSTOP)                                   )
1472
1473 #define SIG_KERNEL_STOP_MASK (\
1474         M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
1475
1476 #define SIG_KERNEL_COREDUMP_MASK (\
1477         M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
1478         M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
1479         M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
1480
1481 #define SIG_KERNEL_IGNORE_MASK (\
1482         M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
1483
1484 #define sig_kernel_only(sig) \
1485                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
1486 #define sig_kernel_coredump(sig) \
1487                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
1488 #define sig_kernel_ignore(sig) \
1489                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
1490 #define sig_kernel_stop(sig) \
1491                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
1492
1493
1494 /*
1495  * Call each interested tracing engine's report_signal callback.
1496  */
1497 static u32
1498 report_signal(struct task_struct *tsk, struct pt_regs *regs,
1499               struct utrace *utrace, u32 action,
1500               unsigned long flags1, unsigned long flags2, siginfo_t *info,
1501               const struct k_sigaction *ka, struct k_sigaction *return_ka)
1502 {
1503         struct list_head *pos, *next;
1504         struct utrace_attached_engine *engine;
1505
1506         /* XXX must change for sharing */
1507         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1508                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1509                 if ((engine->flags & flags1) && (engine->flags & flags2)) {
1510                         u32 disp = action & UTRACE_ACTION_OP_MASK;
1511                         action &= ~UTRACE_ACTION_OP_MASK;
1512                         REPORT(report_signal, regs, disp, info, ka, return_ka);
1513                         if ((action & UTRACE_ACTION_OP_MASK) == 0)
1514                                 action |= disp;
1515                         if (action & UTRACE_ACTION_HIDE)
1516                                 break;
1517                 }
1518         }
1519
1520         return action;
1521 }
1522
1523 void
1524 utrace_signal_handler_singlestep(struct task_struct *tsk, struct pt_regs *regs)
1525 {
1526         u32 action;
1527         action = report_signal(tsk, regs, tsk->utrace, UTRACE_SIGNAL_HANDLER,
1528                                UTRACE_EVENT_SIGNAL_ALL,
1529                                UTRACE_ACTION_SINGLESTEP|UTRACE_ACTION_BLOCKSTEP,
1530                                NULL, NULL, NULL);
1531         action = check_detach(tsk, action);
1532         check_quiescent(tsk, action);
1533 }
1534
1535
1536 /*
1537  * This is the hook from the signals code, called with the siglock held.
1538  * Here is the ideal place to quiesce.  We also dequeue and intercept signals.
1539  */
1540 int
1541 utrace_get_signal(struct task_struct *tsk, struct pt_regs *regs,
1542                   siginfo_t *info, struct k_sigaction *return_ka)
1543 {
1544         struct utrace *utrace = tsk->utrace;
1545         struct utrace_signal signal = { info, return_ka, 0 };
1546         struct k_sigaction *ka;
1547         unsigned long action, event;
1548
1549         /*
1550          * If a signal was injected previously, it could not use our
1551          * stack space directly.  It had to allocate a data structure,
1552          * which we can now copy out of and free.
1553          *
1554          * We don't have to lock access to u.live.signal because it's only
1555          * touched by utrace_inject_signal when we're quiescent.
1556          */
1557         if (utrace->u.live.signal != NULL) {
1558                 signal.signr = utrace->u.live.signal->signr;
1559                 copy_siginfo(info, utrace->u.live.signal->info);
1560                 if (utrace->u.live.signal->return_ka)
1561                         *return_ka = *utrace->u.live.signal->return_ka;
1562                 else
1563                         signal.return_ka = NULL;
1564                 kfree(utrace->u.live.signal);
1565                 utrace->u.live.signal = NULL;
1566         }
1567
1568         /*
1569          * If we should quiesce, now is the time.
1570          * First stash a pointer to the state on our stack,
1571          * so that utrace_inject_signal can tell us what to do.
1572          */
1573         if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
1574                 int killed = sigkill_pending(tsk);
1575                 if (!killed) {
1576                         spin_unlock_irq(&tsk->sighand->siglock);
1577
1578                         killed = utrace_quiescent(tsk, &signal);
1579
1580                         /*
1581                          * Noone wants us quiescent any more, we can take
1582                          * signals.  Unless we have a forced signal to take,
1583                          * back out to the signal code to resynchronize after
1584                          * releasing the siglock.
1585                          */
1586                         if (signal.signr == 0 && !killed)
1587                                 /*
1588                                  * This return value says to reacquire the
1589                                  * siglock and check again.  This will check
1590                                  * for a pending group stop and process it
1591                                  * before coming back here.
1592                                  */
1593                                 return -1;
1594
1595                         spin_lock_irq(&tsk->sighand->siglock);
1596                 }
1597                 if (killed) {
1598                         /*
1599                          * The only reason we woke up now was because of a
1600                          * SIGKILL.  Don't do normal dequeuing in case it
1601                          * might get a signal other than SIGKILL.  That would
1602                          * perturb the death state so it might differ from
1603                          * what the debugger would have allowed to happen.
1604                          * Instead, pluck out just the SIGKILL to be sure
1605                          * we'll die immediately with nothing else different
1606                          * from the quiescent state the debugger wanted us in.
1607                          */
1608                         sigset_t sigkill_only;
1609                         sigfillset(&sigkill_only);
1610                         sigdelset(&sigkill_only, SIGKILL);
1611                         killed = dequeue_signal(tsk, &sigkill_only, info);
1612                         BUG_ON(killed != SIGKILL);
1613                         *return_ka = tsk->sighand->action[killed - 1];
1614                         return killed;
1615                 }
1616         }
1617
1618         /*
1619          * If a signal was injected, everything is in place now.  Go do it.
1620          */
1621         if (signal.signr != 0) {
1622                 if (signal.return_ka == NULL) {
1623                         ka = &tsk->sighand->action[signal.signr - 1];
1624                         if (ka->sa.sa_flags & SA_ONESHOT)
1625                                 ka->sa.sa_handler = SIG_DFL;
1626                         *return_ka = *ka;
1627                 }
1628                 else
1629                         BUG_ON(signal.return_ka != return_ka);
1630                 return signal.signr;
1631         }
1632
1633         /*
1634          * If noone is interested in intercepting signals, let the caller
1635          * just dequeue them normally.
1636          */
1637         if ((tsk->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) == 0)
1638                 return 0;
1639
1640         /*
1641          * Steal the next signal so we can let tracing engines examine it.
1642          * From the signal number and sigaction, determine what normal
1643          * delivery would do.  If no engine perturbs it, we'll do that
1644          * by returning the signal number after setting *return_ka.
1645          */
1646         signal.signr = dequeue_signal(tsk, &tsk->blocked, info);
1647         if (signal.signr == 0)
1648                 return 0;
1649
1650         BUG_ON(signal.signr != info->si_signo);
1651
1652         ka = &tsk->sighand->action[signal.signr - 1];
1653         *return_ka = *ka;
1654
1655         /*
1656          * We are never allowed to interfere with SIGKILL,
1657          * just punt after filling in *return_ka for our caller.
1658          */
1659         if (signal.signr == SIGKILL)
1660                 return signal.signr;
1661
1662         if (ka->sa.sa_handler == SIG_IGN) {
1663                 event = UTRACE_EVENT(SIGNAL_IGN);
1664                 action = UTRACE_SIGNAL_IGN;
1665         }
1666         else if (ka->sa.sa_handler != SIG_DFL) {
1667                 event = UTRACE_EVENT(SIGNAL);
1668                 action = UTRACE_ACTION_RESUME;
1669         }
1670         else if (sig_kernel_coredump(signal.signr)) {
1671                 event = UTRACE_EVENT(SIGNAL_CORE);
1672                 action = UTRACE_SIGNAL_CORE;
1673         }
1674         else if (sig_kernel_ignore(signal.signr)) {
1675                 event = UTRACE_EVENT(SIGNAL_IGN);
1676                 action = UTRACE_SIGNAL_IGN;
1677         }
1678         else if (sig_kernel_stop(signal.signr)) {
1679                 event = UTRACE_EVENT(SIGNAL_STOP);
1680                 action = (signal.signr == SIGSTOP
1681                           ? UTRACE_SIGNAL_STOP : UTRACE_SIGNAL_TSTP);
1682         }
1683         else {
1684                 event = UTRACE_EVENT(SIGNAL_TERM);
1685                 action = UTRACE_SIGNAL_TERM;
1686         }
1687
1688         if (tsk->utrace_flags & event) {
1689                 /*
1690                  * We have some interested engines, so tell them about the
1691                  * signal and let them change its disposition.
1692                  */
1693
1694                 spin_unlock_irq(&tsk->sighand->siglock);
1695
1696                 action = report_signal(tsk, regs, utrace, action, event, event,
1697                                        info, ka, return_ka);
1698                 action &= UTRACE_ACTION_OP_MASK;
1699
1700                 if (action & UTRACE_SIGNAL_HOLD) {
1701                         struct sigqueue *q = sigqueue_alloc();
1702                         if (likely(q != NULL)) {
1703                                 q->flags = 0;
1704                                 copy_siginfo(&q->info, info);
1705                         }
1706                         action &= ~UTRACE_SIGNAL_HOLD;
1707                         spin_lock_irq(&tsk->sighand->siglock);
1708                         sigaddset(&tsk->pending.signal, info->si_signo);
1709                         if (likely(q != NULL))
1710                                 list_add(&q->list, &tsk->pending.list);
1711                 }
1712                 else
1713                         spin_lock_irq(&tsk->sighand->siglock);
1714
1715                 recalc_sigpending_tsk(tsk);
1716         }
1717
1718         /*
1719          * We express the chosen action to the signals code in terms
1720          * of a representative signal whose default action does it.
1721          */
1722         switch (action) {
1723         case UTRACE_SIGNAL_IGN:
1724                 /*
1725                  * We've eaten the signal.  That's all we do.
1726                  * Tell the caller to restart.
1727                  */
1728                 spin_unlock_irq(&tsk->sighand->siglock);
1729                 return -1;
1730
1731         case UTRACE_ACTION_RESUME:
1732         case UTRACE_SIGNAL_DELIVER:
1733                 /*
1734                  * The handler will run.  We do the SA_ONESHOT work here
1735                  * since the normal path will only touch *return_ka now.
1736                  */
1737                 if (return_ka->sa.sa_flags & SA_ONESHOT)
1738                         ka->sa.sa_handler = SIG_DFL;
1739                 break;
1740
1741         case UTRACE_SIGNAL_TSTP:
1742                 signal.signr = SIGTSTP;
1743                 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1744                 return_ka->sa.sa_handler = SIG_DFL;
1745                 break;
1746
1747         case UTRACE_SIGNAL_STOP:
1748                 signal.signr = SIGSTOP;
1749                 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1750                 return_ka->sa.sa_handler = SIG_DFL;
1751                 break;
1752
1753         case UTRACE_SIGNAL_TERM:
1754                 signal.signr = SIGTERM;
1755                 return_ka->sa.sa_handler = SIG_DFL;
1756                 break;
1757
1758         case UTRACE_SIGNAL_CORE:
1759                 signal.signr = SIGQUIT;
1760                 return_ka->sa.sa_handler = SIG_DFL;
1761                 break;
1762
1763         default:
1764                 BUG();
1765         }
1766
1767         return signal.signr;
1768 }
1769
1770
1771 /*
1772  * Cause a specified signal delivery in the target thread,
1773  * which must be quiescent.  The action has UTRACE_SIGNAL_* bits
1774  * as returned from a report_signal callback.  If ka is non-null,
1775  * it gives the sigaction to follow for UTRACE_SIGNAL_DELIVER;
1776  * otherwise, the installed sigaction at the time of delivery is used.
1777  */
1778 int
1779 utrace_inject_signal(struct task_struct *target,
1780                      struct utrace_attached_engine *engine,
1781                      u32 action, siginfo_t *info,
1782                      const struct k_sigaction *ka)
1783 {
1784         struct utrace *utrace;
1785         struct utrace_signal *signal;
1786         int ret;
1787
1788         if (info->si_signo == 0 || !valid_signal(info->si_signo))
1789                 return -EINVAL;
1790
1791         utrace = get_utrace_lock_attached(target, engine);
1792         if (unlikely(IS_ERR(utrace)))
1793                 return PTR_ERR(utrace);
1794
1795         ret = 0;
1796         signal = utrace->u.live.signal;
1797         if (unlikely(target->exit_state))
1798                 ret = -ESRCH;
1799         else if (signal == NULL) {
1800                 ret = -ENOSYS;  /* XXX */
1801         }
1802         else if (signal->signr != 0)
1803                 ret = -EAGAIN;
1804         else {
1805                 if (info != signal->info)
1806                         copy_siginfo(signal->info, info);
1807
1808                 switch (action) {
1809                 default:
1810                         ret = -EINVAL;
1811                         break;
1812
1813                 case UTRACE_SIGNAL_IGN:
1814                         break;
1815
1816                 case UTRACE_ACTION_RESUME:
1817                 case UTRACE_SIGNAL_DELIVER:
1818                         /*
1819                          * The handler will run.  We do the SA_ONESHOT work
1820                          * here since the normal path will not touch the
1821                          * real sigaction when using an injected signal.
1822                          */
1823                         if (ka == NULL)
1824                                 signal->return_ka = NULL;
1825                         else if (ka != signal->return_ka)
1826                                 *signal->return_ka = *ka;
1827                         if (ka && ka->sa.sa_flags & SA_ONESHOT) {
1828                                 struct k_sigaction *a;
1829                                 a = &target->sighand->action[info->si_signo-1];
1830                                 spin_lock_irq(&target->sighand->siglock);
1831                                 a->sa.sa_handler = SIG_DFL;
1832                                 spin_unlock_irq(&target->sighand->siglock);
1833                         }
1834                         signal->signr = info->si_signo;
1835                         break;
1836
1837                 case UTRACE_SIGNAL_TSTP:
1838                         signal->signr = SIGTSTP;
1839                         spin_lock_irq(&target->sighand->siglock);
1840                         target->signal->flags |= SIGNAL_STOP_DEQUEUED;
1841                         spin_unlock_irq(&target->sighand->siglock);
1842                         signal->return_ka->sa.sa_handler = SIG_DFL;
1843                         break;
1844
1845                 case UTRACE_SIGNAL_STOP:
1846                         signal->signr = SIGSTOP;
1847                         spin_lock_irq(&target->sighand->siglock);
1848                         target->signal->flags |= SIGNAL_STOP_DEQUEUED;
1849                         spin_unlock_irq(&target->sighand->siglock);
1850                         signal->return_ka->sa.sa_handler = SIG_DFL;
1851                         break;
1852
1853                 case UTRACE_SIGNAL_TERM:
1854                         signal->signr = SIGTERM;
1855                         signal->return_ka->sa.sa_handler = SIG_DFL;
1856                         break;
1857
1858                 case UTRACE_SIGNAL_CORE:
1859                         signal->signr = SIGQUIT;
1860                         signal->return_ka->sa.sa_handler = SIG_DFL;
1861                         break;
1862                 }
1863         }
1864
1865         spin_unlock(&utrace->lock);
1866
1867         return ret;
1868 }
1869 EXPORT_SYMBOL_GPL(utrace_inject_signal);
1870
1871
1872 const struct utrace_regset *
1873 utrace_regset(struct task_struct *target,
1874               struct utrace_attached_engine *engine,
1875               const struct utrace_regset_view *view, int which)
1876 {
1877         if (unlikely((unsigned) which >= view->n))
1878                 return NULL;
1879
1880         if (target != current)
1881                 wait_task_inactive(target);
1882
1883         return &view->regsets[which];
1884 }
1885 EXPORT_SYMBOL_GPL(utrace_regset);
1886
1887
1888 /*
1889  * Return the task_struct for the task using ptrace on this one, or NULL.
1890  * Must be called with rcu_read_lock held to keep the returned struct alive.
1891  *
1892  * At exec time, this may be called with task_lock(p) still held from when
1893  * tracehook_unsafe_exec was just called.  In that case it must give
1894  * results consistent with those unsafe_exec results, i.e. non-NULL if
1895  * any LSM_UNSAFE_PTRACE_* bits were set.
1896  *
1897  * The value is also used to display after "TracerPid:" in /proc/PID/status,
1898  * where it is called with only rcu_read_lock held.
1899  */
1900 struct task_struct *
1901 utrace_tracer_task(struct task_struct *target)
1902 {
1903         struct utrace *utrace;
1904         struct task_struct *tracer = NULL;
1905
1906         utrace = rcu_dereference(target->utrace);
1907         if (utrace != NULL) {
1908                 struct list_head *pos, *next;
1909                 struct utrace_attached_engine *engine;
1910                 const struct utrace_engine_ops *ops;
1911                 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1912                         engine = list_entry(pos, struct utrace_attached_engine,
1913                                             entry);
1914                         ops = rcu_dereference(engine->ops);
1915                         if (ops->tracer_task) {
1916                                 tracer = (*ops->tracer_task)(engine, target);
1917                                 if (tracer != NULL)
1918                                         break;
1919                         }
1920                 }
1921         }
1922
1923         return tracer;
1924 }
1925
1926 int
1927 utrace_allow_access_process_vm(struct task_struct *target)
1928 {
1929         struct utrace *utrace;
1930         int ret = 0;
1931
1932         rcu_read_lock();
1933         utrace = rcu_dereference(target->utrace);
1934         if (utrace != NULL) {
1935                 struct list_head *pos, *next;
1936                 struct utrace_attached_engine *engine;
1937                 const struct utrace_engine_ops *ops;
1938                 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1939                         engine = list_entry(pos, struct utrace_attached_engine,
1940                                             entry);
1941                         ops = rcu_dereference(engine->ops);
1942                         if (ops->allow_access_process_vm) {
1943                                 ret = (*ops->allow_access_process_vm)(engine,
1944                                                                       target,
1945                                                                       current);
1946                                 if (ret)
1947                                         break;
1948                         }
1949                 }
1950         }
1951         rcu_read_unlock();
1952
1953         return ret;
1954 }
1955
1956 /*
1957  * Called on the current task to return LSM_UNSAFE_* bits implied by tracing.
1958  * Called with task_lock held.
1959  */
1960 int
1961 utrace_unsafe_exec(struct task_struct *tsk)
1962 {
1963         struct utrace *utrace = tsk->utrace;
1964         struct list_head *pos, *next;
1965         struct utrace_attached_engine *engine;
1966         const struct utrace_engine_ops *ops;
1967         int unsafe = 0;
1968
1969         /* XXX must change for sharing */
1970         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1971                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1972                 ops = rcu_dereference(engine->ops);
1973                 if (ops->unsafe_exec)
1974                         unsafe |= (*ops->unsafe_exec)(engine, tsk);
1975         }
1976
1977         return unsafe;
1978 }