52d9b0059958abb5df9f3c8a4e7397fbac5f8069
[linux-2.6.git] / kernel / utrace.c
1 #include <linux/utrace.h>
2 #include <linux/tracehook.h>
3 #include <linux/err.h>
4 #include <linux/sched.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/slab.h>
8 #include <asm/tracehook.h>
9
10
11 static kmem_cache_t *utrace_cachep;
12 static kmem_cache_t *utrace_engine_cachep;
13
14 static int __init
15 utrace_init(void)
16 {
17         utrace_cachep =
18                 kmem_cache_create("utrace_cache",
19                                   sizeof(struct utrace), 0,
20                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
21         utrace_engine_cachep =
22                 kmem_cache_create("utrace_engine_cache",
23                                   sizeof(struct utrace_attached_engine), 0,
24                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
25         return 0;
26 }
27 subsys_initcall(utrace_init);
28
29
30 /*
31  * Make sure target->utrace is allocated, and return with it locked on
32  * success.  This function mediates startup races.  The creating parent
33  * task has priority, and other callers will delay here to let its call
34  * succeed and take the new utrace lock first.
35  */
36 static struct utrace *
37 utrace_first_engine(struct task_struct *target,
38                     struct utrace_attached_engine *engine)
39 {
40         struct utrace *utrace, *ret;
41
42         /*
43          * If this is a newborn thread and we are not the creator,
44          * we have to wait for it.  The creator gets the first chance
45          * to attach.  The PF_STARTING flag is cleared after its
46          * report_clone hook has had a chance to run.
47          */
48         if ((target->flags & PF_STARTING)
49             && (current->utrace == NULL
50                 || current->utrace->u.live.cloning != target)) {
51                 yield();
52                 return (signal_pending(current)
53                         ? ERR_PTR(-ERESTARTNOINTR) : NULL);
54         }
55
56         utrace = kmem_cache_alloc(utrace_cachep, SLAB_KERNEL);
57         if (unlikely(utrace == NULL))
58                 return ERR_PTR(-ENOMEM);
59
60         utrace->u.live.cloning = NULL;
61         utrace->u.live.signal = NULL;
62         INIT_LIST_HEAD(&utrace->engines);
63         list_add(&engine->entry, &utrace->engines);
64         spin_lock_init(&utrace->lock);
65
66         ret = utrace;
67         utrace_lock(utrace);
68         task_lock(target);
69         if (likely(target->utrace == NULL)) {
70                 rcu_assign_pointer(target->utrace, utrace);
71                 /*
72                  * The task_lock protects us against another thread doing
73                  * the same thing.  We might still be racing against
74                  * tracehook_release_task.  It's called with ->exit_state
75                  * set to EXIT_DEAD and then checks ->utrace with an
76                  * smp_mb() in between.  If EXIT_DEAD is set, then
77                  * release_task might have checked ->utrace already and saw
78                  * it NULL; we can't attach.  If we see EXIT_DEAD not yet
79                  * set after our barrier, then we know release_task will
80                  * see our target->utrace pointer.
81                  */
82                 smp_mb();
83                 if (target->exit_state == EXIT_DEAD) {
84                         /*
85                          * The target has already been through release_task.
86                          */
87                         target->utrace = NULL;
88                         goto cannot_attach;
89                 }
90                 task_unlock(target);
91
92                 /*
93                  * If the thread is already dead when we attach, then its
94                  * parent was notified already and we shouldn't repeat the
95                  * notification later after a detach or NOREAP flag change.
96                  */
97                 if (target->exit_state)
98                         utrace->u.exit.notified = 1;
99         }
100         else {
101                 /*
102                  * Another engine attached first, so there is a struct already.
103                  * A null return says to restart looking for the existing one.
104                  */
105         cannot_attach:
106                 ret = NULL;
107                 task_unlock(target);
108                 utrace_unlock(utrace);
109                 kmem_cache_free(utrace_cachep, utrace);
110         }
111
112         return ret;
113 }
114
115 static void
116 utrace_free(struct rcu_head *rhead)
117 {
118         struct utrace *utrace = container_of(rhead, struct utrace, u.dead);
119         kmem_cache_free(utrace_cachep, utrace);
120 }
121
122 static void
123 rcu_utrace_free(struct utrace *utrace)
124 {
125         INIT_RCU_HEAD(&utrace->u.dead);
126         call_rcu(&utrace->u.dead, utrace_free);
127 }
128
129 static void
130 utrace_engine_free(struct rcu_head *rhead)
131 {
132         struct utrace_attached_engine *engine =
133                 container_of(rhead, struct utrace_attached_engine, rhead);
134         kmem_cache_free(utrace_engine_cachep, engine);
135 }
136
137 /*
138  * Called with utrace locked and the target quiescent (maybe current).
139  * If this was the last engine, utrace is left locked and not freed,
140  * but is removed from the task.
141  */
142 static void
143 remove_engine(struct utrace_attached_engine *engine,
144               struct task_struct *tsk, struct utrace *utrace)
145 {
146         list_del_rcu(&engine->entry);
147         if (list_empty(&utrace->engines)) {
148                 task_lock(tsk);
149                 if (likely(tsk->utrace != NULL)) {
150                         rcu_assign_pointer(tsk->utrace, NULL);
151                         tsk->utrace_flags = 0;
152                 }
153                 task_unlock(tsk);
154         }
155         call_rcu(&engine->rhead, utrace_engine_free);
156 }
157
158 /*
159  * This is pointed to by the utrace struct, but it's really a private
160  * structure between utrace_get_signal and utrace_inject_signal.
161  */
162 struct utrace_signal
163 {
164         siginfo_t *const info;
165         struct k_sigaction *return_ka;
166         int signr;
167 };
168
169 /*
170  * Called with utrace locked, after remove_engine may have run.
171  * Passed the flags from all remaining engines, i.e. zero if none left.
172  * Install the flags in tsk->utrace_flags and return with utrace unlocked.
173  * If no engines are left, utrace is freed and we return NULL.
174  */
175 static struct utrace *
176 check_dead_utrace(struct task_struct *tsk, struct utrace *utrace,
177                  unsigned long flags)
178 {
179         if (flags) {
180                 tsk->utrace_flags = flags;
181                 utrace_unlock(utrace);
182                 return utrace;
183         }
184
185         if (utrace->u.live.signal && utrace->u.live.signal->signr != 0) {
186                 utrace_unlock(utrace);
187                 return utrace;
188         }
189
190         utrace_unlock(utrace);
191         rcu_utrace_free(utrace);
192         return NULL;
193 }
194
195
196
197 /*
198  * Get the target thread to quiesce.  Return nonzero if it's already quiescent.
199  * Return zero if it will report a QUIESCE event soon.
200  * If interrupt is nonzero, wake it like a signal would so it quiesces ASAP.
201  * If interrupt is zero, just make sure it quiesces before going to user mode.
202  */
203 static int
204 quiesce(struct task_struct *target, int interrupt)
205 {
206         int quiescent;
207
208         target->utrace_flags |= UTRACE_ACTION_QUIESCE;
209         read_barrier_depends();
210
211         quiescent = (target->exit_state
212                      || target->state & (TASK_TRACED | TASK_STOPPED));
213
214         if (!quiescent) {
215                 spin_lock_irq(&target->sighand->siglock);
216                 quiescent = (unlikely(target->exit_state)
217                              || unlikely(target->state
218                                          & (TASK_TRACED | TASK_STOPPED)));
219                 if (!quiescent) {
220                         if (interrupt)
221                                 signal_wake_up(target, 0);
222                         else {
223                                 set_tsk_thread_flag(target, TIF_SIGPENDING);
224                                 kick_process(target);
225                         }
226                 }
227                 spin_unlock_irq(&target->sighand->siglock);
228         }
229
230         return quiescent;
231 }
232
233
234 static struct utrace_attached_engine *
235 matching_engine(struct utrace *utrace, int flags,
236                 const struct utrace_engine_ops *ops, unsigned long data)
237 {
238         struct utrace_attached_engine *engine;
239         list_for_each_entry_rcu(engine, &utrace->engines, entry) {
240                 if ((flags & UTRACE_ATTACH_MATCH_OPS)
241                     && engine->ops != ops)
242                         continue;
243                 if ((flags & UTRACE_ATTACH_MATCH_DATA)
244                     && engine->data != data)
245                         continue;
246                 if (flags & UTRACE_ATTACH_EXCLUSIVE)
247                         engine = ERR_PTR(-EEXIST);
248                 return engine;
249         }
250         return NULL;
251 }
252
253 /*
254   option to stop it?
255   option to match existing on ops, ops+data, return it; nocreate:lookup only
256  */
257 struct utrace_attached_engine *
258 utrace_attach(struct task_struct *target, int flags,
259              const struct utrace_engine_ops *ops, unsigned long data)
260 {
261         struct utrace *utrace;
262         struct utrace_attached_engine *engine;
263
264 restart:
265         rcu_read_lock();
266         utrace = rcu_dereference(target->utrace);
267         smp_rmb();
268         if (utrace == NULL) {
269                 rcu_read_unlock();
270
271                 if (!(flags & UTRACE_ATTACH_CREATE)) {
272                         return ERR_PTR(-ENOENT);
273                 }
274
275                 engine = kmem_cache_alloc(utrace_engine_cachep, SLAB_KERNEL);
276                 if (unlikely(engine == NULL))
277                         return ERR_PTR(-ENOMEM);
278                 engine->flags = 0;
279
280         first:
281                 utrace = utrace_first_engine(target, engine);
282                 if (IS_ERR(utrace)) {
283                         kmem_cache_free(utrace_engine_cachep, engine);
284                         return ERR_PTR(PTR_ERR(utrace));
285                 }
286                 if (unlikely(utrace == NULL)) /* Race condition.  */
287                         goto restart;
288         }
289         else if (unlikely(target->exit_state == EXIT_DEAD)) {
290                 /*
291                  * The target has already been reaped.
292                  */
293                 rcu_read_unlock();
294                 return ERR_PTR(-ESRCH);
295         }
296         else {
297                 if (!(flags & UTRACE_ATTACH_CREATE)) {
298                         engine = matching_engine(utrace, flags, ops, data);
299                         rcu_read_unlock();
300                         return engine;
301                 }
302
303                 engine = kmem_cache_alloc(utrace_engine_cachep, SLAB_KERNEL);
304                 if (unlikely(engine == NULL))
305                         return ERR_PTR(-ENOMEM);
306                 engine->flags = ops->report_reap ? UTRACE_EVENT(REAP) : 0;
307
308                 rcu_read_lock();
309                 utrace = rcu_dereference(target->utrace);
310                 if (unlikely(utrace == NULL)) { /* Race with detach.  */
311                         rcu_read_unlock();
312                         goto first;
313                 }
314
315                 utrace_lock(utrace);
316                 if (flags & UTRACE_ATTACH_EXCLUSIVE) {
317                         struct utrace_attached_engine *old;
318                         old = matching_engine(utrace, flags, ops, data);
319                         if (old != NULL) {
320                                 utrace_unlock(utrace);
321                                 rcu_read_unlock();
322                                 kmem_cache_free(utrace_engine_cachep, engine);
323                                 return ERR_PTR(-EEXIST);
324                         }
325                 }
326
327                 if (unlikely(rcu_dereference(target->utrace) != utrace)) {
328                         /*
329                          * We lost a race with other CPUs doing a sequence
330                          * of detach and attach before we got in.
331                          */
332                         utrace_unlock(utrace);
333                         rcu_read_unlock();
334                         kmem_cache_free(utrace_engine_cachep, engine);
335                         goto restart;
336                 }
337                 list_add_tail_rcu(&engine->entry, &utrace->engines);
338         }
339
340         engine->ops = ops;
341         engine->data = data;
342
343         utrace_unlock(utrace);
344
345         return engine;
346 }
347 EXPORT_SYMBOL_GPL(utrace_attach);
348
349 /*
350  * When an engine is detached, the target thread may still see it and make
351  * callbacks until it quiesces.  We reset its event flags to just QUIESCE
352  * and install a special ops vector whose callback is dead_engine_delete.
353  * When the target thread quiesces, it can safely free the engine itself.
354  */
355 static u32
356 dead_engine_delete(struct utrace_attached_engine *engine,
357                    struct task_struct *tsk)
358 {
359         return UTRACE_ACTION_DETACH;
360 }
361
362 static const struct utrace_engine_ops dead_engine_ops =
363 {
364         .report_quiesce = &dead_engine_delete
365 };
366
367
368 /*
369  * If tracing was preventing a SIGCHLD or self-reaping
370  * and is no longer, do that report or reaping right now.
371  */
372 static void
373 check_noreap(struct task_struct *target, struct utrace *utrace,
374              u32 old_action, u32 action)
375 {
376         if ((action | ~old_action) & UTRACE_ACTION_NOREAP)
377                 return;
378
379         if (utrace && xchg(&utrace->u.exit.notified, 1))
380                 return;
381
382         if (target->exit_signal == -1)
383                 release_task(target);
384         else if (thread_group_empty(target)) {
385                 read_lock(&tasklist_lock);
386                 do_notify_parent(target, target->exit_signal);
387                 read_unlock(&tasklist_lock);
388         }
389 }
390
391 /*
392  * We may have been the one keeping the target thread quiescent.
393  * Check if it should wake up now.
394  * Called with utrace locked, and unlocks it on return.
395  * If we were keeping it stopped, resume it.
396  * If we were keeping its zombie from reporting/self-reap, do it now.
397  */
398 static void
399 wake_quiescent(unsigned long old_flags,
400                struct utrace *utrace, struct task_struct *target)
401 {
402         unsigned long flags;
403         struct utrace_attached_engine *engine;
404
405         if (target->exit_state) {
406                 /*
407                  * Update the set of events of interest from the union
408                  * of the interests of the remaining tracing engines.
409                  */
410                 flags = 0;
411                 list_for_each_entry(engine, &utrace->engines, entry)
412                         flags |= engine->flags | UTRACE_EVENT(REAP);
413                 utrace = check_dead_utrace(target, utrace, flags);
414
415                 check_noreap(target, utrace, old_flags, flags);
416                 return;
417         }
418
419         /*
420          * Update the set of events of interest from the union
421          * of the interests of the remaining tracing engines.
422          */
423         flags = 0;
424         list_for_each_entry(engine, &utrace->engines, entry)
425                 flags |= engine->flags | UTRACE_EVENT(REAP);
426         utrace = check_dead_utrace(target, utrace, flags);
427
428         if (flags & UTRACE_ACTION_QUIESCE)
429                 return;
430
431         read_lock(&tasklist_lock);
432         if (!target->exit_state) {
433                 /*
434                  * The target is not dead and should not be in tracing stop
435                  * any more.  Wake it unless it's in job control stop.
436                  */
437                 spin_lock_irq(&target->sighand->siglock);
438                 if (target->signal->flags & SIGNAL_STOP_STOPPED) {
439                         int stop_count = target->signal->group_stop_count;
440                         target->state = TASK_STOPPED;
441                         spin_unlock_irq(&target->sighand->siglock);
442
443                         /*
444                          * If tracing was preventing a CLD_STOPPED report
445                          * and is no longer, do that report right now.
446                          */
447                         if (stop_count == 0
448                             && 0
449                             /*&& (events &~ interest) & UTRACE_INHIBIT_CLDSTOP*/
450                                 )
451                                 do_notify_parent_cldstop(target, CLD_STOPPED);
452                 }
453                 else {
454                         /*
455                          * Wake the task up.
456                          */
457                         recalc_sigpending_tsk(target);
458                         wake_up_state(target, TASK_STOPPED | TASK_TRACED);
459                         spin_unlock_irq(&target->sighand->siglock);
460                 }
461         }
462         read_unlock(&tasklist_lock);
463 }
464
465 void
466 utrace_detach(struct task_struct *target,
467               struct utrace_attached_engine *engine)
468 {
469         struct utrace *utrace;
470         unsigned long flags;
471
472         rcu_read_lock();
473         utrace = rcu_dereference(target->utrace);
474         smp_rmb();
475         if (unlikely(target->exit_state == EXIT_DEAD)) {
476                 /*
477                  * Called after utrace_release_task might have started.
478                  * A call to this engine's report_reap callback might
479                  * already be in progress or engine might even have been
480                  * freed already.
481                  */
482                 rcu_read_unlock();
483                 return;
484         }
485         utrace_lock(utrace);
486         rcu_read_unlock();
487
488         flags = engine->flags;
489         engine->flags = UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_QUIESCE;
490         rcu_assign_pointer(engine->ops, &dead_engine_ops);
491
492         if (quiesce(target, 1)) {
493                 remove_engine(engine, target, utrace);
494                 wake_quiescent(flags, utrace, target);
495         }
496         else
497                 utrace_unlock(utrace);
498 }
499 EXPORT_SYMBOL_GPL(utrace_detach);
500
501
502 /*
503  * Called with utrace->lock held.
504  * Notify and clean up all engines, then free utrace.
505  */
506 static void
507 utrace_reap(struct task_struct *target, struct utrace *utrace)
508 {
509         struct utrace_attached_engine *engine, *next;
510         const struct utrace_engine_ops *ops;
511
512 restart:
513         list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
514                 list_del_rcu(&engine->entry);
515
516                 /*
517                  * Now nothing else refers to this engine.
518                  */
519                 if (engine->flags & UTRACE_EVENT(REAP)) {
520                         ops = rcu_dereference(engine->ops);
521                         if (ops != &dead_engine_ops) {
522                                 utrace_unlock(utrace);
523                                 (*ops->report_reap)(engine, target);
524                                 call_rcu(&engine->rhead, utrace_engine_free);
525                                 utrace_lock(utrace);
526                                 goto restart;
527                         }
528                 }
529                 call_rcu(&engine->rhead, utrace_engine_free);
530         }
531         utrace_unlock(utrace);
532
533         rcu_utrace_free(utrace);
534 }
535
536 /*
537  * Called by release_task.  After this, target->utrace must be cleared.
538  */
539 void
540 utrace_release_task(struct task_struct *target)
541 {
542         struct utrace *utrace;
543
544         task_lock(target);
545         utrace = target->utrace;
546         rcu_assign_pointer(target->utrace, NULL);
547         task_unlock(target);
548
549         if (unlikely(utrace == NULL))
550                 return;
551
552         utrace_lock(utrace);
553
554         if (!utrace->u.exit.notified
555             && (target->utrace_flags & (UTRACE_EVENT(DEATH)
556                                         | UTRACE_EVENT(QUIESCE)))) {
557                 /*
558                  * The target will do some final callbacks but hasn't
559                  * finished them yet.  We know because it clears these
560                  * event bits after it's done.  Instead of cleaning up here
561                  * and requiring utrace_report_death to cope with it, we
562                  * delay the REAP report and the teardown until after the
563                  * target finishes its death reports.
564                  */
565                 utrace->u.exit.reap = 1;
566                 utrace_unlock(utrace);
567         }
568         else
569                 utrace_reap(target, utrace); /* Unlocks and frees.  */
570 }
571
572
573 void
574 utrace_set_flags(struct task_struct *target,
575                  struct utrace_attached_engine *engine,
576                  unsigned long flags)
577 {
578         struct utrace *utrace;
579         int report = 0;
580         unsigned long old_flags, old_utrace_flags;
581
582 #ifdef ARCH_HAS_SINGLE_STEP
583         if (! ARCH_HAS_SINGLE_STEP)
584 #endif
585                 WARN_ON(flags & UTRACE_ACTION_SINGLESTEP);
586 #ifdef ARCH_HAS_BLOCK_STEP
587         if (! ARCH_HAS_BLOCK_STEP)
588 #endif
589                 WARN_ON(flags & UTRACE_ACTION_BLOCKSTEP);
590
591         rcu_read_lock();
592         utrace = rcu_dereference(target->utrace);
593         smp_rmb();
594         if (unlikely(target->exit_state == EXIT_DEAD)) {
595                 /*
596                  * Race with reaping.
597                  */
598                 rcu_read_unlock();
599                 return;
600         }
601
602         utrace_lock(utrace);
603         rcu_read_unlock();
604
605         old_utrace_flags = target->utrace_flags;
606         old_flags = engine->flags;
607         engine->flags = flags;
608         target->utrace_flags |= flags;
609
610         if ((old_flags ^ flags) & UTRACE_ACTION_QUIESCE) {
611                 if (flags & UTRACE_ACTION_QUIESCE) {
612                         report = (quiesce(target, 1)
613                                   && (flags & UTRACE_EVENT(QUIESCE)));
614                         utrace_unlock(utrace);
615                 }
616                 else
617                         wake_quiescent(old_flags, utrace, target);
618         }
619         else {
620                 /*
621                  * If we're asking for single-stepping or syscall tracing,
622                  * we need to pass through utrace_quiescent before resuming
623                  * in user mode to get those effects, even if the target is
624                  * not going to be quiescent right now.
625                  */
626                 if (!(target->utrace_flags & UTRACE_ACTION_QUIESCE)
627                     && ((flags &~ old_utrace_flags)
628                         & (UTRACE_ACTION_SINGLESTEP | UTRACE_ACTION_BLOCKSTEP
629                            | UTRACE_EVENT_SYSCALL)))
630                         quiesce(target, 0);
631                 utrace_unlock(utrace);
632         }
633
634         if (report)          /* Already quiescent, won't report itself.  */
635                 (*engine->ops->report_quiesce)(engine, target);
636 }
637 EXPORT_SYMBOL_GPL(utrace_set_flags);
638 \f
639 /*
640  * While running an engine callback, no locks are held.
641  * If a callback updates its engine's action state, then
642  * we need to take the utrace lock to install the flags update.
643  */
644 static inline u32
645 update_action(struct task_struct *tsk, struct utrace *utrace,
646               struct utrace_attached_engine *engine,
647               u32 ret)
648 {
649         if (ret & UTRACE_ACTION_DETACH)
650                 rcu_assign_pointer(engine->ops, &dead_engine_ops);
651         else if ((ret & UTRACE_ACTION_NEWSTATE)
652                  && ((ret ^ engine->flags) & UTRACE_ACTION_STATE_MASK)) {
653 #ifdef ARCH_HAS_SINGLE_STEP
654                 if (! ARCH_HAS_SINGLE_STEP)
655 #endif
656                         WARN_ON(ret & UTRACE_ACTION_SINGLESTEP);
657 #ifdef ARCH_HAS_BLOCK_STEP
658                 if (! ARCH_HAS_BLOCK_STEP)
659 #endif
660                         WARN_ON(ret & UTRACE_ACTION_BLOCKSTEP);
661                 utrace_lock(utrace);
662                 /*
663                  * If we're changing something other than just QUIESCE,
664                  * make sure we pass through utrace_quiescent before
665                  * resuming even if we aren't going to stay quiescent.
666                  * That's where we get the correct union of all engines'
667                  * flags after they've finished changing, and apply changes.
668                  */
669                 if (((ret ^ engine->flags) & (UTRACE_ACTION_STATE_MASK
670                                               & ~UTRACE_ACTION_QUIESCE)))
671                         tsk->utrace_flags |= UTRACE_ACTION_QUIESCE;
672                 engine->flags &= ~UTRACE_ACTION_STATE_MASK;
673                 engine->flags |= ret & UTRACE_ACTION_STATE_MASK;
674                 tsk->utrace_flags |= engine->flags;
675                 utrace_unlock(utrace);
676         }
677         else
678                 ret |= engine->flags & UTRACE_ACTION_STATE_MASK;
679         return ret;
680 }
681
682 #define REPORT(callback, ...) do { \
683         u32 ret = (*rcu_dereference(engine->ops)->callback) \
684                 (engine, tsk, ##__VA_ARGS__); \
685         action = update_action(tsk, utrace, engine, ret); \
686         } while (0)
687
688
689 /*
690  * Called with utrace->lock held, returns with it released.
691  */
692 static u32
693 remove_detached(struct task_struct *tsk, struct utrace *utrace,
694                 struct utrace **utracep, u32 action)
695 {
696         struct utrace_attached_engine *engine, *next;
697         unsigned long flags;
698
699         flags = 0;
700         list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
701                 if (engine->ops == &dead_engine_ops)
702                         remove_engine(engine, tsk, utrace);
703                 else
704                         flags |= engine->flags | UTRACE_EVENT(REAP);
705         }
706         utrace = check_dead_utrace(tsk, utrace, flags);
707         if (utracep)
708                 *utracep = utrace;
709
710         flags &= UTRACE_ACTION_STATE_MASK;
711         return flags | (action & UTRACE_ACTION_OP_MASK);
712 }
713
714 /*
715  * Called after an event report loop.  Remove any engines marked for detach.
716  */
717 static inline u32
718 check_detach(struct task_struct *tsk, u32 action)
719 {
720         if (action & UTRACE_ACTION_DETACH) {
721                 utrace_lock(tsk->utrace);
722                 action = remove_detached(tsk, tsk->utrace, NULL, action);
723         }
724         return action;
725 }
726
727 static inline void
728 check_quiescent(struct task_struct *tsk, u32 action)
729 {
730         if (action & UTRACE_ACTION_STATE_MASK)
731                 utrace_quiescent(tsk);
732 }
733
734 /*
735  * Called iff UTRACE_EVENT(CLONE) flag is set.
736  * This notification call blocks the wake_up_new_task call on the child.
737  * So we must not quiesce here.  tracehook_report_clone_complete will do
738  * a quiescence check momentarily.
739  */
740 void
741 utrace_report_clone(unsigned long clone_flags, struct task_struct *child)
742 {
743         struct task_struct *tsk = current;
744         struct utrace *utrace = tsk->utrace;
745         struct list_head *pos, *next;
746         struct utrace_attached_engine *engine;
747         unsigned long action;
748
749         utrace->u.live.cloning = child;
750
751         /* XXX must change for sharing */
752         action = UTRACE_ACTION_RESUME;
753         list_for_each_safe_rcu(pos, next, &utrace->engines) {
754                 engine = list_entry(pos, struct utrace_attached_engine, entry);
755                 if (engine->flags & UTRACE_EVENT(CLONE))
756                         REPORT(report_clone, clone_flags, child);
757                 if (action & UTRACE_ACTION_HIDE)
758                         break;
759         }
760
761         utrace->u.live.cloning = NULL;
762
763         check_detach(tsk, action);
764 }
765
766 static unsigned long
767 report_quiescent(struct task_struct *tsk, struct utrace *utrace, u32 action)
768 {
769         struct list_head *pos, *next;
770         struct utrace_attached_engine *engine;
771
772         list_for_each_safe_rcu(pos, next, &utrace->engines) {
773                 engine = list_entry(pos, struct utrace_attached_engine, entry);
774                 if (engine->flags & UTRACE_EVENT(QUIESCE))
775                         REPORT(report_quiesce);
776                 action |= engine->flags & UTRACE_ACTION_STATE_MASK;
777         }
778
779         return check_detach(tsk, action);
780 }
781
782 /*
783  * Called iff UTRACE_EVENT(JCTL) flag is set.
784  */
785 int
786 utrace_report_jctl(int what)
787 {
788         struct task_struct *tsk = current;
789         struct utrace *utrace = tsk->utrace;
790         struct list_head *pos, *next;
791         struct utrace_attached_engine *engine;
792         unsigned long action;
793
794         /* XXX must change for sharing */
795         action = UTRACE_ACTION_RESUME;
796         list_for_each_safe_rcu(pos, next, &utrace->engines) {
797                 engine = list_entry(pos, struct utrace_attached_engine, entry);
798                 if (engine->flags & UTRACE_EVENT(JCTL))
799                         REPORT(report_jctl, what);
800                 if (action & UTRACE_ACTION_HIDE)
801                         break;
802         }
803
804         /*
805          * We are becoming quiescent, so report it now.
806          * We don't block in utrace_quiescent because we are stopping anyway.
807          * We know that upon resuming we'll go through tracehook_induce_signal,
808          * which will keep us quiescent or set us up to resume with tracing.
809          */
810         action = report_quiescent(tsk, utrace, action);
811
812         if (what == CLD_STOPPED && tsk->state != TASK_STOPPED) {
813                 /*
814                  * The event report hooks could have blocked, though
815                  * it should have been briefly.  Make sure we're in
816                  * TASK_STOPPED state again to block properly, unless
817                  * we've just come back out of job control stop.
818                  */
819                 spin_lock_irq(&tsk->sighand->siglock);
820                 if (tsk->signal->flags & SIGNAL_STOP_STOPPED)
821                         set_current_state(TASK_STOPPED);
822                 spin_unlock_irq(&tsk->sighand->siglock);
823         }
824
825         return action & UTRACE_JCTL_NOSIGCHLD;
826 }
827
828
829 /*
830  * Called if UTRACE_EVENT(QUIESCE) or UTRACE_ACTION_QUIESCE flag is set.
831  * Also called after other event reports.
832  * It is a good time to block.
833  */
834 void
835 utrace_quiescent(struct task_struct *tsk)
836 {
837         struct utrace *utrace = tsk->utrace;
838         unsigned long action;
839
840 restart:
841         /* XXX must change for sharing */
842
843         action = report_quiescent(tsk, utrace, UTRACE_ACTION_RESUME);
844
845         /*
846          * If some engines want us quiescent, we block here.
847          */
848         if (action & UTRACE_ACTION_QUIESCE) {
849                 spin_lock_irq(&tsk->sighand->siglock);
850                 /*
851                  * If wake_quiescent is trying to wake us up now, it will
852                  * have cleared the QUIESCE flag before trying to take the
853                  * siglock.  Now we have the siglock, so either it has
854                  * already cleared the flag, or it will wake us up after we
855                  * release the siglock it's waiting for.
856                  * Never stop when there is a SIGKILL bringing us down.
857                  */
858                 if ((tsk->utrace_flags & UTRACE_ACTION_QUIESCE)
859                     /*&& !(tsk->signal->flags & SIGNAL_GROUP_SIGKILL)*/) {
860                         set_current_state(TASK_TRACED);
861                         /*
862                          * If there is a group stop in progress,
863                          * we must participate in the bookkeeping.
864                          */
865                         if (tsk->signal->group_stop_count > 0)
866                                 --tsk->signal->group_stop_count;
867                         spin_unlock_irq(&tsk->sighand->siglock);
868                         schedule();
869                 }
870                 else
871                         spin_unlock_irq(&tsk->sighand->siglock);
872
873                 /*
874                  * We've woken up.  One engine could be waking us up while
875                  * another has asked us to quiesce.  So check afresh.  We
876                  * could have been detached while quiescent.  Now we are no
877                  * longer quiescent, so don't need to do any RCU locking.
878                  * But we do need to check our utrace pointer anew.
879                  */
880                 utrace = tsk->utrace;
881                 if (tsk->utrace_flags
882                     & (UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_STATE_MASK))
883                         goto restart;
884         }
885         else if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
886                 /*
887                  * Our flags are out of date.
888                  * Update the set of events of interest from the union
889                  * of the interests of the remaining tracing engines.
890                  */
891                 struct utrace_attached_engine *engine;
892                 unsigned long flags = 0;
893                 utrace = rcu_dereference(tsk->utrace);
894                 utrace_lock(utrace);
895                 list_for_each_entry(engine, &utrace->engines, entry)
896                         flags |= engine->flags | UTRACE_EVENT(REAP);
897                 tsk->utrace_flags = flags;
898                 utrace_unlock(utrace);
899         }
900
901         /*
902          * We're resuming.  Update the machine layer tracing state and then go.
903          */
904 #ifdef ARCH_HAS_SINGLE_STEP
905         if (action & UTRACE_ACTION_SINGLESTEP)
906                 tracehook_enable_single_step(tsk);
907         else
908                 tracehook_disable_single_step(tsk);
909 #endif
910 #ifdef ARCH_HAS_BLOCK_STEP
911         if ((action & (UTRACE_ACTION_BLOCKSTEP|UTRACE_ACTION_SINGLESTEP))
912             == UTRACE_ACTION_BLOCKSTEP)
913                 tracehook_enable_block_step(tsk);
914         else
915                 tracehook_disable_block_step(tsk);
916 #endif
917         if (tsk->utrace_flags & UTRACE_EVENT_SYSCALL)
918                 tracehook_enable_syscall_trace(tsk);
919         else
920                 tracehook_disable_syscall_trace(tsk);
921 }
922
923
924 /*
925  * Called iff UTRACE_EVENT(EXIT) flag is set.
926  */
927 void
928 utrace_report_exit(long *exit_code)
929 {
930         struct task_struct *tsk = current;
931         struct utrace *utrace = tsk->utrace;
932         struct list_head *pos, *next;
933         struct utrace_attached_engine *engine;
934         unsigned long action;
935         long orig_code = *exit_code;
936
937         /* XXX must change for sharing */
938         action = UTRACE_ACTION_RESUME;
939         list_for_each_safe_rcu(pos, next, &utrace->engines) {
940                 engine = list_entry(pos, struct utrace_attached_engine, entry);
941                 if (engine->flags & UTRACE_EVENT(EXIT))
942                         REPORT(report_exit, orig_code, exit_code);
943         }
944         action = check_detach(tsk, action);
945         check_quiescent(tsk, action);
946 }
947
948 /*
949  * Called iff UTRACE_EVENT(DEATH) flag is set.
950  *
951  * It is always possible that we are racing with utrace_release_task here,
952  * if UTRACE_ACTION_NOREAP is not set, or in the case of non-leader exec
953  * where the old leader will get released regardless of NOREAP.  For this
954  * reason, utrace_release_task checks for the event bits that get us here,
955  * and delays its cleanup for us to do.
956  */
957 void
958 utrace_report_death(struct task_struct *tsk, struct utrace *utrace)
959 {
960         struct list_head *pos, *next;
961         struct utrace_attached_engine *engine;
962         u32 action, oaction;
963
964         BUG_ON(!tsk->exit_state);
965
966         oaction = tsk->utrace_flags;
967
968         /* XXX must change for sharing */
969         action = UTRACE_ACTION_RESUME;
970         list_for_each_safe_rcu(pos, next, &utrace->engines) {
971                 engine = list_entry(pos, struct utrace_attached_engine, entry);
972                 if (engine->flags & UTRACE_EVENT(DEATH))
973                         REPORT(report_death);
974                 if (engine->flags & UTRACE_EVENT(QUIESCE))
975                         REPORT(report_quiesce);
976         }
977         /*
978          * Unconditionally lock and recompute the flags.
979          * This may notice that there are no engines left and
980          * free the utrace struct.
981          */
982         utrace_lock(utrace);
983         if (utrace->u.exit.reap) {
984                 /*
985                  * utrace_release_task was already called in parallel.
986                  * We must complete its work now.
987                  */
988         reap:
989                 utrace_reap(tsk, utrace);
990         }
991         else {
992                 action = remove_detached(tsk, utrace, &utrace, action);
993
994                 if (utrace != NULL) {
995                         utrace_lock(utrace);
996                         if (utrace->u.exit.reap)
997                                 goto reap;
998
999                         /*
1000                          * Clear event bits we can't see any more.  This
1001                          * tells utrace_release_task we have already
1002                          * finished, if it comes along later.
1003                          */
1004                         tsk->utrace_flags &= (UTRACE_EVENT(REAP)
1005                                               | UTRACE_ACTION_NOREAP);
1006
1007                         utrace_unlock(utrace);
1008                 }
1009
1010                 check_noreap(tsk, utrace, oaction, action);
1011         }
1012 }
1013
1014 /*
1015  * Called iff UTRACE_EVENT(VFORK_DONE) flag is set.
1016  */
1017 void
1018 utrace_report_vfork_done(pid_t child_pid)
1019 {
1020         struct task_struct *tsk = current;
1021         struct utrace *utrace = tsk->utrace;
1022         struct list_head *pos, *next;
1023         struct utrace_attached_engine *engine;
1024         unsigned long action;
1025
1026         /* XXX must change for sharing */
1027         action = UTRACE_ACTION_RESUME;
1028         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1029                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1030                 if (engine->flags & UTRACE_EVENT(VFORK_DONE))
1031                         REPORT(report_vfork_done, child_pid);
1032                 if (action & UTRACE_ACTION_HIDE)
1033                         break;
1034         }
1035         action = check_detach(tsk, action);
1036         check_quiescent(tsk, action);
1037 }
1038
1039 /*
1040  * Called iff UTRACE_EVENT(EXEC) flag is set.
1041  */
1042 void
1043 utrace_report_exec(struct linux_binprm *bprm, struct pt_regs *regs)
1044 {
1045         struct task_struct *tsk = current;
1046         struct utrace *utrace = tsk->utrace;
1047         struct list_head *pos, *next;
1048         struct utrace_attached_engine *engine;
1049         unsigned long action;
1050
1051         /* XXX must change for sharing */
1052         action = UTRACE_ACTION_RESUME;
1053         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1054                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1055                 if (engine->flags & UTRACE_EVENT(EXEC))
1056                         REPORT(report_exec, bprm, regs);
1057                 if (action & UTRACE_ACTION_HIDE)
1058                         break;
1059         }
1060         action = check_detach(tsk, action);
1061         check_quiescent(tsk, action);
1062 }
1063
1064 /*
1065  * Called iff UTRACE_EVENT(SYSCALL_{ENTRY,EXIT}) flag is set.
1066  */
1067 void
1068 utrace_report_syscall(struct pt_regs *regs, int is_exit)
1069 {
1070         struct task_struct *tsk = current;
1071         struct utrace *utrace = tsk->utrace;
1072         struct list_head *pos, *next;
1073         struct utrace_attached_engine *engine;
1074         unsigned long action, ev;
1075
1076 /*
1077   XXX pass syscall # to engine hook directly, let it return inhibit-action
1078   to reset to -1
1079         long syscall = tracehook_syscall_number(regs, is_exit);
1080 */
1081
1082         ev = is_exit ? UTRACE_EVENT(SYSCALL_EXIT) : UTRACE_EVENT(SYSCALL_ENTRY);
1083
1084         /* XXX must change for sharing */
1085         action = UTRACE_ACTION_RESUME;
1086         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1087                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1088                 if (engine->flags & ev) {
1089                         if (is_exit)
1090                                 REPORT(report_syscall_exit, regs);
1091                         else
1092                                 REPORT(report_syscall_entry, regs);
1093                 }
1094                 if (action & UTRACE_ACTION_HIDE)
1095                         break;
1096         }
1097         action = check_detach(tsk, action);
1098         check_quiescent(tsk, action);
1099 }
1100
1101 // XXX copied from signal.c
1102 #ifdef SIGEMT
1103 #define M_SIGEMT        M(SIGEMT)
1104 #else
1105 #define M_SIGEMT        0
1106 #endif
1107
1108 #if SIGRTMIN > BITS_PER_LONG
1109 #define M(sig) (1ULL << ((sig)-1))
1110 #else
1111 #define M(sig) (1UL << ((sig)-1))
1112 #endif
1113 #define T(sig, mask) (M(sig) & (mask))
1114
1115 #define SIG_KERNEL_ONLY_MASK (\
1116         M(SIGKILL)   |  M(SIGSTOP)                                   )
1117
1118 #define SIG_KERNEL_STOP_MASK (\
1119         M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
1120
1121 #define SIG_KERNEL_COREDUMP_MASK (\
1122         M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
1123         M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
1124         M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
1125
1126 #define SIG_KERNEL_IGNORE_MASK (\
1127         M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
1128
1129 #define sig_kernel_only(sig) \
1130                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
1131 #define sig_kernel_coredump(sig) \
1132                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
1133 #define sig_kernel_ignore(sig) \
1134                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
1135 #define sig_kernel_stop(sig) \
1136                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
1137
1138
1139 /*
1140  * Call each interested tracing engine's report_signal callback.
1141  */
1142 static u32
1143 report_signal(struct task_struct *tsk, struct pt_regs *regs,
1144               struct utrace *utrace, u32 action,
1145               unsigned long flags1, unsigned long flags2, siginfo_t *info,
1146               const struct k_sigaction *ka, struct k_sigaction *return_ka)
1147 {
1148         struct list_head *pos, *next;
1149         struct utrace_attached_engine *engine;
1150
1151         /* XXX must change for sharing */
1152         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1153                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1154                 if ((engine->flags & flags1) && (engine->flags & flags2)) {
1155                         u32 disp = action & UTRACE_ACTION_OP_MASK;
1156                         action &= ~UTRACE_ACTION_OP_MASK;
1157                         REPORT(report_signal, regs, disp, info, ka, return_ka);
1158                         if ((action & UTRACE_ACTION_OP_MASK) == 0)
1159                                 action |= disp;
1160                         if (action & UTRACE_ACTION_HIDE)
1161                                 break;
1162                 }
1163         }
1164
1165         return action;
1166 }
1167
1168 void
1169 utrace_signal_handler_singlestep(struct task_struct *tsk, struct pt_regs *regs)
1170 {
1171         u32 action;
1172         action = report_signal(tsk, regs, tsk->utrace, UTRACE_SIGNAL_HANDLER,
1173                                UTRACE_EVENT_SIGNAL_ALL,
1174                                UTRACE_ACTION_SINGLESTEP|UTRACE_ACTION_BLOCKSTEP,
1175                                NULL, NULL, NULL);
1176         action = check_detach(tsk, action);
1177         check_quiescent(tsk, action);
1178 }
1179
1180
1181 /*
1182  * This is the hook from the signals code, called with the siglock held.
1183  * Here is the ideal place to quiesce.  We also dequeue and intercept signals.
1184  */
1185 int
1186 utrace_get_signal(struct task_struct *tsk, struct pt_regs *regs,
1187                   siginfo_t *info, struct k_sigaction *return_ka)
1188 {
1189         struct utrace *utrace = tsk->utrace;
1190         struct utrace_signal signal = { info, return_ka, 0 };
1191         struct k_sigaction *ka;
1192         unsigned long action, event;
1193
1194 #if 0                           /* XXX */
1195         if (tsk->signal->flags & SIGNAL_GROUP_SIGKILL)
1196                 return 0;
1197 #endif
1198
1199         /*
1200          * If we should quiesce, now is the time.
1201          * First stash a pointer to the state on our stack,
1202          * so that utrace_inject_signal can tell us what to do.
1203          */
1204         if (utrace->u.live.signal == NULL)
1205                 utrace->u.live.signal = &signal;
1206
1207         if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
1208                 spin_unlock_irq(&tsk->sighand->siglock);
1209                 utrace_quiescent(tsk);
1210                 if (signal.signr == 0)
1211                         /*
1212                          * This return value says to reacquire the siglock
1213                          * and check again.  This will check for a pending
1214                          * group stop and process it before coming back here.
1215                          */
1216                         return -1;
1217                 spin_lock_irq(&tsk->sighand->siglock);
1218         }
1219
1220         /*
1221          * If a signal was injected previously, it could not use our
1222          * stack space directly.  It had to allocate a data structure,
1223          * which we can now copy out of and free.
1224          */
1225         if (utrace->u.live.signal != &signal) {
1226                 signal.signr = utrace->u.live.signal->signr;
1227                 copy_siginfo(info, utrace->u.live.signal->info);
1228                 if (utrace->u.live.signal->return_ka)
1229                         *return_ka = *utrace->u.live.signal->return_ka;
1230                 else
1231                         signal.return_ka = NULL;
1232                 kfree(utrace->u.live.signal);
1233         }
1234         utrace->u.live.signal = NULL;
1235
1236         /*
1237          * If a signal was injected, everything is in place now.  Go do it.
1238          */
1239         if (signal.signr != 0) {
1240                 if (signal.return_ka == NULL) {
1241                         ka = &tsk->sighand->action[signal.signr - 1];
1242                         if (ka->sa.sa_flags & SA_ONESHOT)
1243                                 ka->sa.sa_handler = SIG_DFL;
1244                         *return_ka = *ka;
1245                 }
1246                 else
1247                         BUG_ON(signal.return_ka != return_ka);
1248                 return signal.signr;
1249         }
1250
1251         /*
1252          * If noone is interested in intercepting signals, let the caller
1253          * just dequeue them normally.
1254          */
1255         if ((tsk->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) == 0)
1256                 return 0;
1257
1258         /*
1259          * Steal the next signal so we can let tracing engines examine it.
1260          * From the signal number and sigaction, determine what normal
1261          * delivery would do.  If no engine perturbs it, we'll do that
1262          * by returning the signal number after setting *return_ka.
1263          */
1264         signal.signr = dequeue_signal(tsk, &tsk->blocked, info);
1265         if (signal.signr == 0)
1266                 return 0;
1267
1268         BUG_ON(signal.signr != info->si_signo);
1269
1270         ka = &tsk->sighand->action[signal.signr - 1];
1271         *return_ka = *ka;
1272
1273         if (signal.signr == SIGKILL)
1274                 return signal.signr;
1275
1276         if (ka->sa.sa_handler == SIG_IGN) {
1277                 event = UTRACE_EVENT(SIGNAL_IGN);
1278                 action = UTRACE_SIGNAL_IGN;
1279         }
1280         else if (ka->sa.sa_handler != SIG_DFL) {
1281                 event = UTRACE_EVENT(SIGNAL);
1282                 action = UTRACE_ACTION_RESUME;
1283         }
1284         else if (sig_kernel_coredump(signal.signr)) {
1285                 event = UTRACE_EVENT(SIGNAL_CORE);
1286                 action = UTRACE_SIGNAL_CORE;
1287         }
1288         else if (sig_kernel_ignore(signal.signr)) {
1289                 event = UTRACE_EVENT(SIGNAL_IGN);
1290                 action = UTRACE_SIGNAL_IGN;
1291         }
1292         else if (sig_kernel_stop(signal.signr)) {
1293                 event = UTRACE_EVENT(SIGNAL_STOP);
1294                 action = (signal.signr == SIGSTOP
1295                           ? UTRACE_SIGNAL_STOP : UTRACE_SIGNAL_TSTP);
1296         }
1297         else {
1298                 event = UTRACE_EVENT(SIGNAL_TERM);
1299                 action = UTRACE_SIGNAL_TERM;
1300         }
1301
1302         if (tsk->utrace_flags & event) {
1303                 /*
1304                  * We have some interested engines, so tell them about the
1305                  * signal and let them change its disposition.
1306                  */
1307
1308                 spin_unlock_irq(&tsk->sighand->siglock);
1309
1310                 action = report_signal(tsk, regs, utrace, action, event, event,
1311                                        info, ka, return_ka);
1312                 action &= UTRACE_ACTION_OP_MASK;
1313
1314                 if (action & UTRACE_SIGNAL_HOLD) {
1315                         struct sigqueue *q = sigqueue_alloc();
1316                         if (likely(q != NULL)) {
1317                                 q->flags = 0;
1318                                 copy_siginfo(&q->info, info);
1319                         }
1320                         action &= ~UTRACE_SIGNAL_HOLD;
1321                         spin_lock_irq(&tsk->sighand->siglock);
1322                         sigaddset(&tsk->pending.signal, info->si_signo);
1323                         if (likely(q != NULL))
1324                                 list_add(&q->list, &tsk->pending.list);
1325                 }
1326                 else
1327                         spin_lock_irq(&tsk->sighand->siglock);
1328
1329                 recalc_sigpending_tsk(tsk);
1330         }
1331
1332         if (tsk->utrace != utrace)
1333                 rcu_utrace_free(utrace);
1334
1335         /*
1336          * We express the chosen action to the signals code in terms
1337          * of a representative signal whose default action does it.
1338          */
1339         switch (action) {
1340         case UTRACE_SIGNAL_IGN:
1341                 /*
1342                  * We've eaten the signal.  That's all we do.
1343                  * Tell the caller to restart.
1344                  */
1345                 spin_unlock_irq(&tsk->sighand->siglock);
1346                 return -1;
1347
1348         case UTRACE_ACTION_RESUME:
1349         case UTRACE_SIGNAL_DELIVER:
1350                 /*
1351                  * The handler will run.  We do the SA_ONESHOT work here
1352                  * since the normal path will only touch *return_ka now.
1353                  */
1354                 if (return_ka->sa.sa_flags & SA_ONESHOT)
1355                         ka->sa.sa_handler = SIG_DFL;
1356                 break;
1357
1358         case UTRACE_SIGNAL_TSTP:
1359                 signal.signr = SIGTSTP;
1360                 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1361                 return_ka->sa.sa_handler = SIG_DFL;
1362                 break;
1363
1364         case UTRACE_SIGNAL_STOP:
1365                 signal.signr = SIGSTOP;
1366                 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1367                 return_ka->sa.sa_handler = SIG_DFL;
1368                 break;
1369
1370         case UTRACE_SIGNAL_TERM:
1371                 signal.signr = SIGTERM;
1372                 return_ka->sa.sa_handler = SIG_DFL;
1373                 break;
1374
1375         case UTRACE_SIGNAL_CORE:
1376                 signal.signr = SIGQUIT;
1377                 return_ka->sa.sa_handler = SIG_DFL;
1378                 break;
1379
1380         default:
1381                 BUG();
1382         }
1383
1384         return signal.signr;
1385 }
1386
1387
1388 /*
1389  * Cause a specified signal delivery in the target thread,
1390  * which must be quiescent.  The action has UTRACE_SIGNAL_* bits
1391  * as returned from a report_signal callback.  If ka is non-null,
1392  * it gives the sigaction to follow for UTRACE_SIGNAL_DELIVER;
1393  * otherwise, the installed sigaction at the time of delivery is used.
1394  */
1395 int
1396 utrace_inject_signal(struct task_struct *target,
1397                     struct utrace_attached_engine *engine,
1398                     u32 action, siginfo_t *info,
1399                     const struct k_sigaction *ka)
1400 {
1401         struct utrace *utrace;
1402         struct utrace_signal *signal;
1403         int ret;
1404
1405         if (info->si_signo == 0 || !valid_signal(info->si_signo))
1406                 return -EINVAL;
1407
1408         rcu_read_lock();
1409         utrace = rcu_dereference(target->utrace);
1410         if (utrace == NULL) {
1411                 rcu_read_unlock();
1412                 return -ESRCH;
1413         }
1414         utrace_lock(utrace);
1415         rcu_read_unlock();
1416
1417         ret = 0;
1418         signal = utrace->u.live.signal;
1419         if (signal == NULL) {
1420                 ret = -ENOSYS;  /* XXX */
1421         }
1422         else if (signal->signr != 0)
1423                 ret = -EAGAIN;
1424         else {
1425                 if (info != signal->info)
1426                         copy_siginfo(signal->info, info);
1427
1428                 switch (action) {
1429                 default:
1430                         ret = -EINVAL;
1431                         break;
1432
1433                 case UTRACE_SIGNAL_IGN:
1434                         break;
1435
1436                 case UTRACE_ACTION_RESUME:
1437                 case UTRACE_SIGNAL_DELIVER:
1438                         /*
1439                          * The handler will run.  We do the SA_ONESHOT work
1440                          * here since the normal path will not touch the
1441                          * real sigaction when using an injected signal.
1442                          */
1443                         if (ka == NULL)
1444                                 signal->return_ka = NULL;
1445                         else if (ka != signal->return_ka)
1446                                 *signal->return_ka = *ka;
1447                         if (ka && ka->sa.sa_flags & SA_ONESHOT) {
1448                                 struct k_sigaction *a;
1449                                 a = &target->sighand->action[info->si_signo-1];
1450                                 spin_lock_irq(&target->sighand->siglock);
1451                                 a->sa.sa_handler = SIG_DFL;
1452                                 spin_unlock_irq(&target->sighand->siglock);
1453                         }
1454                         signal->signr = info->si_signo;
1455                         break;
1456
1457                 case UTRACE_SIGNAL_TSTP:
1458                         signal->signr = SIGTSTP;
1459                         spin_lock_irq(&target->sighand->siglock);
1460                         target->signal->flags |= SIGNAL_STOP_DEQUEUED;
1461                         spin_unlock_irq(&target->sighand->siglock);
1462                         signal->return_ka->sa.sa_handler = SIG_DFL;
1463                         break;
1464
1465                 case UTRACE_SIGNAL_STOP:
1466                         signal->signr = SIGSTOP;
1467                         spin_lock_irq(&target->sighand->siglock);
1468                         target->signal->flags |= SIGNAL_STOP_DEQUEUED;
1469                         spin_unlock_irq(&target->sighand->siglock);
1470                         signal->return_ka->sa.sa_handler = SIG_DFL;
1471                         break;
1472
1473                 case UTRACE_SIGNAL_TERM:
1474                         signal->signr = SIGTERM;
1475                         signal->return_ka->sa.sa_handler = SIG_DFL;
1476                         break;
1477
1478                 case UTRACE_SIGNAL_CORE:
1479                         signal->signr = SIGQUIT;
1480                         signal->return_ka->sa.sa_handler = SIG_DFL;
1481                         break;
1482                 }
1483         }
1484
1485         utrace_unlock(utrace);
1486
1487         return ret;
1488 }
1489 EXPORT_SYMBOL_GPL(utrace_inject_signal);
1490
1491
1492 const struct utrace_regset *
1493 utrace_regset(struct task_struct *target,
1494               struct utrace_attached_engine *engine,
1495               const struct utrace_regset_view *view, int which)
1496 {
1497         if (unlikely((unsigned) which >= view->n))
1498                 return NULL;
1499
1500         if (target != current)
1501                 wait_task_inactive(target);
1502
1503         return &view->regsets[which];
1504 }
1505 EXPORT_SYMBOL_GPL(utrace_regset);
1506
1507
1508 /*
1509  * Return the task_struct for the task using ptrace on this one, or NULL.
1510  * Must be called with rcu_read_lock held to keep the returned struct alive.
1511  *
1512  * At exec time, this may be called with task_lock(p) still held from when
1513  * tracehook_unsafe_exec was just called.  In that case it must give
1514  * results consistent with those unsafe_exec results, i.e. non-NULL if
1515  * any LSM_UNSAFE_PTRACE_* bits were set.
1516  *
1517  * The value is also used to display after "TracerPid:" in /proc/PID/status,
1518  * where it is called with only rcu_read_lock held.
1519  */
1520 struct task_struct *
1521 utrace_tracer_task(struct task_struct *target)
1522 {
1523         struct utrace *utrace;
1524         struct task_struct *tracer = NULL;
1525
1526         utrace = rcu_dereference(target->utrace);
1527         if (utrace != NULL) {
1528                 struct list_head *pos, *next;
1529                 struct utrace_attached_engine *engine;
1530                 const struct utrace_engine_ops *ops;
1531                 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1532                         engine = list_entry(pos, struct utrace_attached_engine,
1533                                             entry);
1534                         ops = rcu_dereference(engine->ops);
1535                         if (ops->tracer_task) {
1536                                 tracer = (*ops->tracer_task)(engine, target);
1537                                 if (tracer != NULL)
1538                                         break;
1539                         }
1540                 }
1541         }
1542
1543         return tracer;
1544 }
1545
1546 int
1547 utrace_allow_access_process_vm(struct task_struct *target)
1548 {
1549         struct utrace *utrace;
1550         int ret = 0;
1551
1552         rcu_read_lock();
1553         utrace = rcu_dereference(target->utrace);
1554         if (utrace != NULL) {
1555                 struct list_head *pos, *next;
1556                 struct utrace_attached_engine *engine;
1557                 const struct utrace_engine_ops *ops;
1558                 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1559                         engine = list_entry(pos, struct utrace_attached_engine,
1560                                             entry);
1561                         ops = rcu_dereference(engine->ops);
1562                         if (ops->allow_access_process_vm) {
1563                                 ret = (*ops->allow_access_process_vm)(engine,
1564                                                                       target,
1565                                                                       current);
1566                                 if (ret)
1567                                         break;
1568                         }
1569                 }
1570         }
1571         rcu_read_unlock();
1572
1573         return ret;
1574 }
1575
1576 /*
1577  * Called on the current task to return LSM_UNSAFE_* bits implied by tracing.
1578  * Called with task_lock held.
1579  */
1580 int
1581 utrace_unsafe_exec(struct task_struct *tsk)
1582 {
1583         struct utrace *utrace = tsk->utrace;
1584         struct list_head *pos, *next;
1585         struct utrace_attached_engine *engine;
1586         const struct utrace_engine_ops *ops;
1587         int unsafe = 0;
1588
1589         /* XXX must change for sharing */
1590         list_for_each_safe_rcu(pos, next, &utrace->engines) {
1591                 engine = list_entry(pos, struct utrace_attached_engine, entry);
1592                 ops = rcu_dereference(engine->ops);
1593                 if (ops->unsafe_exec)
1594                         unsafe |= (*ops->unsafe_exec)(engine, tsk);
1595         }
1596
1597         return unsafe;
1598 }