Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / arch / x86_64 / kernel / ptrace.c
1 /* ptrace.c */
2 /* By Ross Biro 1/23/92 */
3 /*
4  * Pentium III FXSR, SSE support
5  *      Gareth Hughes <gareth@valinux.com>, May 2000
6  * 
7  * x86-64 port 2000-2002 Andi Kleen
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/smp_lock.h>
15 #include <linux/errno.h>
16 #include <linux/tracehook.h>
17 #include <linux/ptrace.h>
18 #include <linux/user.h>
19 #include <linux/security.h>
20 #include <linux/audit.h>
21 #include <linux/seccomp.h>
22 #include <linux/signal.h>
23 #include <linux/module.h>
24
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/processor.h>
29 #include <asm/i387.h>
30 #include <asm/debugreg.h>
31 #include <asm/ldt.h>
32 #include <asm/desc.h>
33 #include <asm/proto.h>
34 #include <asm/ia32.h>
35 #include <asm/prctl.h>
36
37 /*
38  * does not yet catch signals sent when the child dies.
39  * in exit.c or in signal.c.
40  */
41
42 /*
43  * Determines which flags the user has access to [1 = access, 0 = no access].
44  * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
45  * Also masks reserved bits (63-22, 15, 5, 3, 1).
46  */
47 #define FLAG_MASK 0x54dd5UL
48
49 /* set's the trap flag. */
50 #define TRAP_FLAG 0x100UL
51
52 /*
53  * eflags and offset of eflags on child stack..
54  */
55 #define EFLAGS offsetof(struct pt_regs, eflags)
56 #define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
57
58 /*
59  * this routine will get a word off of the processes privileged stack. 
60  * the offset is how far from the base addr as stored in the TSS.  
61  * this routine assumes that all the privileged stacks are in our
62  * data space.
63  */   
64 static inline unsigned long get_stack_long(struct task_struct *task, int offset)
65 {
66         unsigned char *stack;
67
68         stack = (unsigned char *)task->thread.rsp0;
69         stack += offset;
70         return (*((unsigned long *)stack));
71 }
72
73 /*
74  * this routine will put a word on the processes privileged stack. 
75  * the offset is how far from the base addr as stored in the TSS.  
76  * this routine assumes that all the privileged stacks are in our
77  * data space.
78  */
79 static inline long put_stack_long(struct task_struct *task, int offset,
80         unsigned long data)
81 {
82         unsigned char * stack;
83
84         stack = (unsigned char *) task->thread.rsp0;
85         stack += offset;
86         *(unsigned long *) stack = data;
87         return 0;
88 }
89
90 #define LDT_SEGMENT 4
91
92 unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
93 {
94         unsigned long addr, seg;
95
96         addr = regs->rip;
97         seg = regs->cs & 0xffff;
98
99         /*
100          * We'll assume that the code segments in the GDT
101          * are all zero-based. That is largely true: the
102          * TLS segments are used for data, and the PNPBIOS
103          * and APM bios ones we just ignore here.
104          */
105         if (seg & LDT_SEGMENT) {
106                 u32 *desc;
107                 unsigned long base;
108
109                 down(&child->mm->context.sem);
110                 desc = child->mm->context.ldt + (seg & ~7);
111                 base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
112
113                 /* 16-bit code segment? */
114                 if (!((desc[1] >> 22) & 1))
115                         addr &= 0xffff;
116                 addr += base;
117                 up(&child->mm->context.sem);
118         }
119         return addr;
120 }
121
122 static int is_at_popf(struct task_struct *child, struct pt_regs *regs)
123 {
124         int i, copied;
125         unsigned char opcode[16];
126         unsigned long addr = convert_rip_to_linear(child, regs);
127
128         copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
129         for (i = 0; i < copied; i++) {
130                 switch (opcode[i]) {
131                 /* popf */
132                 case 0x9d:
133                         return 1;
134
135                         /* CHECKME: 64 65 */
136
137                 /* opcode and address size prefixes */
138                 case 0x66: case 0x67:
139                         continue;
140                 /* irrelevant prefixes (segment overrides and repeats) */
141                 case 0x26: case 0x2e:
142                 case 0x36: case 0x3e:
143                 case 0x64: case 0x65:
144                 case 0xf0: case 0xf2: case 0xf3:
145                         continue;
146
147                 /* REX prefixes */
148                 case 0x40 ... 0x4f:
149                         continue;
150
151                         /* CHECKME: f0, f2, f3 */
152
153                 /*
154                  * pushf: NOTE! We should probably not let
155                  * the user see the TF bit being set. But
156                  * it's more pain than it's worth to avoid
157                  * it, and a debugger could emulate this
158                  * all in user space if it _really_ cares.
159                  */
160                 case 0x9c:
161                 default:
162                         return 0;
163                 }
164         }
165         return 0;
166 }
167
168 void tracehook_enable_single_step(struct task_struct *child)
169 {
170         struct pt_regs *regs = task_pt_regs(child);
171
172         /*
173          * Always set TIF_SINGLESTEP - this guarantees that
174          * we single-step system calls etc..  This will also
175          * cause us to set TF when returning to user mode.
176          */
177         set_tsk_thread_flag(child, TIF_SINGLESTEP);
178
179         /*
180          * If TF was already set, don't do anything else
181          */
182         if (regs->eflags & TRAP_FLAG)
183                 return;
184
185         /* Set TF on the kernel stack.. */
186         regs->eflags |= TRAP_FLAG;
187
188         /*
189          * ..but if TF is changed by the instruction we will trace,
190          * don't mark it as being "us" that set it, so that we
191          * won't clear it by hand later.
192          *
193          * AK: this is not enough, LAHF and IRET can change TF in user space too.
194          */
195         if (is_at_popf(child, regs))
196                 return;
197
198         set_tsk_thread_flag(child, TIF_FORCED_TF);
199 }
200
201 void tracehook_disable_single_step(struct task_struct *child)
202 {
203         /* Always clear TIF_SINGLESTEP... */
204         clear_tsk_thread_flag(child, TIF_SINGLESTEP);
205
206         /* But touch TF only if it was set by us.. */
207         if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) {
208                 struct pt_regs *regs = task_pt_regs(child);
209                 regs->eflags &= ~TRAP_FLAG;
210         }
211 }
212
213 /*
214  * Called by kernel/ptrace.c when detaching..
215  *
216  * Make sure the single step bit is not set.
217  */
218 void ptrace_disable(struct task_struct *child)
219
220         tracehook_disable_single_step(child);
221 }
222
223 static int putreg(struct task_struct *child,
224         unsigned long regno, unsigned long value)
225 {
226         unsigned long tmp; 
227         
228         /* Some code in the 64bit emulation may not be 64bit clean.
229            Don't take any chances. */
230         if (test_tsk_thread_flag(child, TIF_IA32))
231                 value &= 0xffffffff;
232         switch (regno) {
233                 case offsetof(struct user_regs_struct,fs):
234                         if (value && (value & 3) != 3)
235                                 return -EIO;
236                         child->thread.fsindex = value & 0xffff; 
237                         return 0;
238                 case offsetof(struct user_regs_struct,gs):
239                         if (value && (value & 3) != 3)
240                                 return -EIO;
241                         child->thread.gsindex = value & 0xffff;
242                         return 0;
243                 case offsetof(struct user_regs_struct,ds):
244                         if (value && (value & 3) != 3)
245                                 return -EIO;
246                         child->thread.ds = value & 0xffff;
247                         return 0;
248                 case offsetof(struct user_regs_struct,es): 
249                         if (value && (value & 3) != 3)
250                                 return -EIO;
251                         child->thread.es = value & 0xffff;
252                         return 0;
253                 case offsetof(struct user_regs_struct,ss):
254                         if ((value & 3) != 3)
255                                 return -EIO;
256                         value &= 0xffff;
257                         return 0;
258                 case offsetof(struct user_regs_struct,fs_base):
259                         if (value >= TASK_SIZE_OF(child))
260                                 return -EIO;
261                         child->thread.fs = value;
262                         return 0;
263                 case offsetof(struct user_regs_struct,gs_base):
264                         if (value >= TASK_SIZE_OF(child))
265                                 return -EIO;
266                         child->thread.gs = value;
267                         return 0;
268                 case offsetof(struct user_regs_struct, eflags):
269                         value &= FLAG_MASK;
270                         tmp = get_stack_long(child, EFL_OFFSET); 
271                         tmp &= ~FLAG_MASK; 
272                         value |= tmp;
273                         clear_tsk_thread_flag(child, TIF_FORCED_TF);
274                         break;
275                 case offsetof(struct user_regs_struct,cs): 
276                         if ((value & 3) != 3)
277                                 return -EIO;
278                         value &= 0xffff;
279                         break;
280         }
281         put_stack_long(child, regno - sizeof(struct pt_regs), value);
282         return 0;
283 }
284
285 static unsigned long getreg(struct task_struct *child, unsigned long regno)
286 {
287         unsigned long val;
288         switch (regno) {
289                 case offsetof(struct user_regs_struct, fs):
290                         return child->thread.fsindex;
291                 case offsetof(struct user_regs_struct, gs):
292                         return child->thread.gsindex;
293                 case offsetof(struct user_regs_struct, ds):
294                         return child->thread.ds;
295                 case offsetof(struct user_regs_struct, es):
296                         return child->thread.es; 
297                 case offsetof(struct user_regs_struct, fs_base):
298                         return child->thread.fs;
299                 case offsetof(struct user_regs_struct, gs_base):
300                         return child->thread.gs;
301                 default:
302                         regno = regno - sizeof(struct pt_regs);
303                         val = get_stack_long(child, regno);
304                         if (test_tsk_thread_flag(child, TIF_IA32))
305                                 val &= 0xffffffff;
306                         if (regno == (offsetof(struct user_regs_struct, eflags)
307                                       - sizeof(struct pt_regs))
308                             && test_tsk_thread_flag(child, TIF_FORCED_TF))
309                                 val &= ~X86_EFLAGS_TF;
310                         return val;
311         }
312
313 }
314
315 static int
316 genregs_get(struct task_struct *target,
317             const struct utrace_regset *regset,
318             unsigned int pos, unsigned int count,
319             void *kbuf, void __user *ubuf)
320 {
321         if (kbuf) {
322                 unsigned long *kp = kbuf;
323                 while (count > 0) {
324                         *kp++ = getreg(target, pos);
325                         pos += sizeof(long);
326                         count -= sizeof(long);
327                 }
328         }
329         else {
330                 unsigned long __user *up = ubuf;
331                 while (count > 0) {
332                         if (__put_user(getreg(target, pos), up++))
333                                 return -EFAULT;
334                         pos += sizeof(long);
335                         count -= sizeof(long);
336                 }
337         }
338
339         return 0;
340 }
341
342 static int
343 genregs_set(struct task_struct *target,
344             const struct utrace_regset *regset,
345             unsigned int pos, unsigned int count,
346             const void *kbuf, const void __user *ubuf)
347 {
348         int ret = 0;
349
350         if (kbuf) {
351                 const unsigned long *kp = kbuf;
352                 while (!ret && count > 0) {
353                         ret = putreg(target, pos, *kp++);
354                         pos += sizeof(long);
355                         count -= sizeof(long);
356                 }
357         }
358         else {
359                 int ret = 0;
360                 const unsigned long __user *up = ubuf;
361                 while (!ret && count > 0) {
362                         unsigned long val;
363                         ret = __get_user(val, up++);
364                         if (!ret)
365                                 ret = putreg(target, pos, val);
366                         pos += sizeof(long);
367                         count -= sizeof(long);
368                 }
369         }
370
371         return ret;
372 }
373
374
375 static int
376 dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset)
377 {
378         if (tsk->thread.debugreg6 | tsk->thread.debugreg7)
379                 return 8;
380         return 0;
381 }
382
383 static int
384 dbregs_get(struct task_struct *target,
385            const struct utrace_regset *regset,
386            unsigned int pos, unsigned int count,
387            void *kbuf, void __user *ubuf)
388 {
389         for (pos >>= 3, count >>= 3; count > 0; --count, ++pos) {
390                 unsigned long val;
391
392                 /*
393                  * The hardware updates the status register on a debug trap,
394                  * but do_debug (traps.c) saves it for us when that happens.
395                  * So whether the target is current or not, debugregN is good.
396                  */
397                 val = 0;
398                 switch (pos) {
399                 case 0: val = target->thread.debugreg0; break;
400                 case 1: val = target->thread.debugreg1; break;
401                 case 2: val = target->thread.debugreg2; break;
402                 case 3: val = target->thread.debugreg3; break;
403                 case 6: val = target->thread.debugreg6; break;
404                 case 7: val = target->thread.debugreg7; break;
405                 }
406
407                 if (kbuf) {
408                         *(unsigned long *) kbuf = val;
409                         kbuf += sizeof(unsigned long);
410                 }
411                 else {
412                         if (__put_user(val, (unsigned long __user *) ubuf))
413                                 return -EFAULT;
414                         ubuf += sizeof(unsigned long);
415                 }
416         }
417
418         return 0;
419 }
420
421 static int
422 dbregs_set(struct task_struct *target,
423            const struct utrace_regset *regset,
424            unsigned int pos, unsigned int count,
425            const void *kbuf, const void __user *ubuf)
426 {
427         unsigned long maxaddr = TASK_SIZE_OF(target);
428         maxaddr -= test_tsk_thread_flag(target, TIF_IA32) ? 3 : 7;
429
430         for (pos >>= 3, count >>= 3; count > 0; --count, ++pos) {
431                 unsigned long val;
432                 unsigned int i;
433
434                 if (kbuf) {
435                         val = *(const unsigned long *) kbuf;
436                         kbuf += sizeof(unsigned long);
437                 }
438                 else {
439                         if (__get_user(val, (unsigned long __user *) ubuf))
440                                 return -EFAULT;
441                         ubuf += sizeof(unsigned long);
442                 }
443
444                 switch (pos) {
445 #define SET_DBREG(n)                                                    \
446                         target->thread.debugreg##n = val;               \
447                         if (target == current)                          \
448                                 set_debugreg(target->thread.debugreg##n, n)
449
450                 case 0:
451                         if (val >= maxaddr)
452                                 return -EIO;
453                         SET_DBREG(0);
454                         break;
455                 case 1:
456                         if (val >= maxaddr)
457                                 return -EIO;
458                         SET_DBREG(1);
459                         break;
460                 case 2:
461                         if (val >= maxaddr)
462                                 return -EIO;
463                         SET_DBREG(2);
464                         break;
465                 case 3:
466                         if (val >= maxaddr)
467                                 return -EIO;
468                         SET_DBREG(3);
469                         break;
470                 case 4:
471                 case 5:
472                         if (val != 0)
473                                 return -EIO;
474                         break;
475                 case 6:
476                         if (val >> 32)
477                                 return -EIO;
478                         SET_DBREG(6);
479                         break;
480                 case 7:
481                         /*
482                          * See arch/i386/kernel/ptrace.c for an explanation
483                          * of this awkward check.
484                          */
485                         val &= ~DR_CONTROL_RESERVED;
486                         for (i = 0; i < 4; i++)
487                                 if ((0x5554 >> ((val >> (16 + 4*i)) & 0xf))
488                                     & 1)
489                                         return -EIO;
490                         SET_DBREG(7);
491                         break;
492 #undef  SET_DBREG
493                 }
494         }
495
496         return 0;
497 }
498
499
500 static int
501 fpregs_active(struct task_struct *target, const struct utrace_regset *regset)
502 {
503         return tsk_used_math(target) ? regset->n : 0;
504 }
505
506 static int
507 fpregs_get(struct task_struct *target,
508            const struct utrace_regset *regset,
509            unsigned int pos, unsigned int count,
510            void *kbuf, void __user *ubuf)
511 {
512         if (tsk_used_math(target)) {
513                 if (target == current)
514                         unlazy_fpu(target);
515         }
516         else
517                 init_fpu(target);
518
519         return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
520                                      &target->thread.i387.fxsave, 0, -1);
521 }
522
523 static int
524 fpregs_set(struct task_struct *target,
525            const struct utrace_regset *regset,
526            unsigned int pos, unsigned int count,
527            const void *kbuf, const void __user *ubuf)
528 {
529         int ret;
530
531         if (tsk_used_math(target)) {
532                 if (target == current)
533                         unlazy_fpu(target);
534         }
535         else if (pos == 0 && count == sizeof(struct user_i387_struct))
536                 set_stopped_child_used_math(target);
537         else
538                 init_fpu(target);
539
540         ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
541                                    &target->thread.i387.fxsave, 0, -1);
542
543         target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
544
545         return ret;
546 }
547
548 static int
549 fsgs_active(struct task_struct *tsk, const struct utrace_regset *regset)
550 {
551         if (tsk->thread.gsindex == GS_TLS_SEL || tsk->thread.gs)
552                 return 2;
553         if (tsk->thread.fsindex == FS_TLS_SEL || tsk->thread.fs)
554                 return 1;
555         return 0;
556 }
557
558 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
559 {
560         struct desc_struct *desc = (void *)t->thread.tls_array;
561         desc += tls;
562         return desc->base0 |
563                 (((u32)desc->base1) << 16) |
564                 (((u32)desc->base2) << 24);
565 }
566
567 static int
568 fsgs_get(struct task_struct *target,
569          const struct utrace_regset *regset,
570          unsigned int pos, unsigned int count,
571          void *kbuf, void __user *ubuf)
572 {
573         const unsigned long *kaddr = kbuf;
574         const unsigned long __user *uaddr = ubuf;
575         unsigned long addr;
576
577         /*
578          * XXX why the MSR reads here?
579          * Can anything change the MSRs without changing thread.fs first?
580          */
581         if (pos == 0) {         /* FS */
582                 if (kaddr)
583                         addr = *kaddr++;
584                 else if (__get_user(addr, uaddr++))
585                         return -EFAULT;
586                 if (target->thread.fsindex == FS_TLS_SEL)
587                         addr = read_32bit_tls(target, FS_TLS);
588                 else if (target == current) {
589                         rdmsrl(MSR_FS_BASE, addr);
590                 }
591                 else
592                         addr = target->thread.fs;
593         }
594
595         if (count > sizeof(unsigned long)) { /* GS */
596                 if (kaddr)
597                         addr = *kaddr;
598                 else if (__get_user(addr, uaddr))
599                         return -EFAULT;
600                 if (target->thread.fsindex == GS_TLS_SEL)
601                         addr = read_32bit_tls(target, GS_TLS);
602                 else if (target == current) {
603                         rdmsrl(MSR_GS_BASE, addr);
604                 }
605                 else
606                         addr = target->thread.fs;
607         }
608
609         return 0;
610 }
611
612 static int
613 fsgs_set(struct task_struct *target,
614          const struct utrace_regset *regset,
615          unsigned int pos, unsigned int count,
616          const void *kbuf, const void __user *ubuf)
617 {
618         const unsigned long *kaddr = kbuf;
619         const unsigned long __user *uaddr = ubuf;
620         unsigned long addr;
621         int ret = 0;
622
623         if (pos == 0) {         /* FS */
624                 if (kaddr)
625                         addr = *kaddr++;
626                 else if (__get_user(addr, uaddr++))
627                         return -EFAULT;
628                 ret = do_arch_prctl(target, ARCH_SET_FS, addr);
629         }
630
631         if (!ret && count > sizeof(unsigned long)) { /* GS */
632                 if (kaddr)
633                         addr = *kaddr;
634                 else if (__get_user(addr, uaddr))
635                         return -EFAULT;
636                 ret = do_arch_prctl(target, ARCH_SET_GS, addr);
637         }
638
639         return ret;
640 }
641
642
643 /*
644  * These are our native regset flavors.
645  * XXX ioperm? vm86?
646  */
647 static const struct utrace_regset native_regsets[] = {
648         {
649                 .n = sizeof(struct user_regs_struct)/8, .size = 8, .align = 8,
650                 .get = genregs_get, .set = genregs_set
651         },
652         {
653                 .n = sizeof(struct user_i387_struct) / sizeof(long),
654                 .size = sizeof(long), .align = sizeof(long),
655                 .active = fpregs_active,
656                 .get = fpregs_get, .set = fpregs_set
657         },
658         {
659                 .n = 2, .size = sizeof(long), .align = sizeof(long),
660                 .active = fsgs_active,
661                 .get = fsgs_get, .set = fsgs_set
662         },
663         {
664                 .n = 8, .size = sizeof(long), .align = sizeof(long),
665                 .active = dbregs_active,
666                 .get = dbregs_get, .set = dbregs_set
667         },
668 };
669
670 const struct utrace_regset_view utrace_x86_64_native = {
671         .name = "x86-64", .e_machine = EM_X86_64,
672         .regsets = native_regsets,
673         .n = sizeof native_regsets / sizeof native_regsets[0],
674 };
675 EXPORT_SYMBOL_GPL(utrace_x86_64_native);
676
677
678 #ifdef CONFIG_PTRACE
679 static const struct ptrace_layout_segment x86_64_uarea[] = {
680         {0, sizeof(struct user_regs_struct), 0, 0},
681         {offsetof(struct user, u_debugreg[0]),
682          offsetof(struct user, u_debugreg[4]), 3, 0},
683         {offsetof(struct user, u_debugreg[6]),
684          offsetof(struct user, u_debugreg[8]), 3, 6 * sizeof(long)},
685         {0, 0, -1, 0}
686 };
687
688 fastcall int arch_ptrace(long *req, struct task_struct *child,
689                          struct utrace_attached_engine *engine,
690                          unsigned long addr, unsigned long data, long *val)
691 {
692         switch (*req) {
693         case PTRACE_PEEKUSR:
694                 return ptrace_peekusr(child, engine, x86_64_uarea, addr, data);
695         case PTRACE_POKEUSR:
696                 return ptrace_pokeusr(child, engine, x86_64_uarea, addr, data);
697         case PTRACE_GETREGS:
698                 return ptrace_whole_regset(child, engine, data, 0, 0);
699         case PTRACE_SETREGS:
700                 return ptrace_whole_regset(child, engine, data, 0, 1);
701         case PTRACE_GETFPREGS:
702                 return ptrace_whole_regset(child, engine, data, 1, 0);
703         case PTRACE_SETFPREGS:
704                 return ptrace_whole_regset(child, engine, data, 1, 1);
705 #ifdef CONFIG_IA32_EMULATION
706         case PTRACE_GET_THREAD_AREA:
707         case PTRACE_SET_THREAD_AREA:
708                 return ptrace_onereg_access(child, engine,
709                                             &utrace_ia32_view, 3,
710                                             addr, (void __user *)data,
711                                             *req == PTRACE_SET_THREAD_AREA);
712 #endif
713                 /* normal 64bit interface to access TLS data.
714                    Works just like arch_prctl, except that the arguments
715                    are reversed. */
716         case PTRACE_ARCH_PRCTL:
717                 return do_arch_prctl(child, data, addr);
718         }
719         return -ENOSYS;
720 }
721 #endif  /* CONFIG_PTRACE */
722
723
724 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
725 {
726         /* do the secure computing check first */
727         secure_computing(regs->orig_rax);
728
729         if (test_thread_flag(TIF_SYSCALL_TRACE))
730                 tracehook_report_syscall(regs, 0);
731
732         if (unlikely(current->audit_context)) {
733                 if (test_thread_flag(TIF_IA32)) {
734                         audit_syscall_entry(AUDIT_ARCH_I386,
735                                             regs->orig_rax,
736                                             regs->rbx, regs->rcx,
737                                             regs->rdx, regs->rsi);
738                 } else {
739                         audit_syscall_entry(AUDIT_ARCH_X86_64,
740                                             regs->orig_rax,
741                                             regs->rdi, regs->rsi,
742                                             regs->rdx, regs->r10);
743                 }
744         }
745 }
746
747 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
748 {
749         if (unlikely(current->audit_context))
750                 audit_syscall_exit(AUDITSC_RESULT(regs->rax), regs->rax);
751
752         if (test_thread_flag(TIF_SYSCALL_TRACE))
753                 tracehook_report_syscall(regs, 1);
754
755         if (test_thread_flag(TIF_SINGLESTEP)) {
756                 force_sig(SIGTRAP, current); /* XXX */
757                 tracehook_report_syscall_step(regs);
758         }
759 }