fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / x86_64 / kernel / ptrace.c
1 /* ptrace.c */
2 /* By Ross Biro 1/23/92 */
3 /*
4  * Pentium III FXSR, SSE support
5  *      Gareth Hughes <gareth@valinux.com>, May 2000
6  * 
7  * x86-64 port 2000-2002 Andi Kleen
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/smp_lock.h>
15 #include <linux/errno.h>
16 #include <linux/tracehook.h>
17 #include <linux/ptrace.h>
18 #include <linux/user.h>
19 #include <linux/security.h>
20 #include <linux/audit.h>
21 #include <linux/seccomp.h>
22 #include <linux/signal.h>
23 #include <linux/module.h>
24
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/processor.h>
29 #include <asm/i387.h>
30 #include <asm/debugreg.h>
31 #include <asm/ldt.h>
32 #include <asm/desc.h>
33 #include <asm/proto.h>
34 #include <asm/ia32.h>
35 #include <asm/prctl.h>
36
37 /*
38  * does not yet catch signals sent when the child dies.
39  * in exit.c or in signal.c.
40  */
41
42 /*
43  * Determines which flags the user has access to [1 = access, 0 = no access].
44  * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
45  * Also masks reserved bits (63-22, 15, 5, 3, 1).
46  */
47 #define FLAG_MASK 0x54dd5UL
48
49 /* set's the trap flag. */
50 #define TRAP_FLAG 0x100UL
51
52 /*
53  * eflags and offset of eflags on child stack..
54  */
55 #define EFLAGS offsetof(struct pt_regs, eflags)
56 #define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
57
58 /*
59  * this routine will get a word off of the processes privileged stack. 
60  * the offset is how far from the base addr as stored in the TSS.  
61  * this routine assumes that all the privileged stacks are in our
62  * data space.
63  */   
64 static inline unsigned long get_stack_long(struct task_struct *task, int offset)
65 {
66         unsigned char *stack;
67
68         stack = (unsigned char *)task->thread.rsp0;
69         stack += offset;
70         return (*((unsigned long *)stack));
71 }
72
73 /*
74  * this routine will put a word on the processes privileged stack. 
75  * the offset is how far from the base addr as stored in the TSS.  
76  * this routine assumes that all the privileged stacks are in our
77  * data space.
78  */
79 static inline long put_stack_long(struct task_struct *task, int offset,
80         unsigned long data)
81 {
82         unsigned char * stack;
83
84         stack = (unsigned char *) task->thread.rsp0;
85         stack += offset;
86         *(unsigned long *) stack = data;
87         return 0;
88 }
89
90 #define LDT_SEGMENT 4
91
92 unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
93 {
94         unsigned long addr, seg;
95
96         addr = regs->rip;
97         seg = regs->cs & 0xffff;
98
99         /*
100          * We'll assume that the code segments in the GDT
101          * are all zero-based. That is largely true: the
102          * TLS segments are used for data, and the PNPBIOS
103          * and APM bios ones we just ignore here.
104          */
105         if (seg & LDT_SEGMENT) {
106                 u32 *desc;
107                 unsigned long base;
108
109                 down(&child->mm->context.sem);
110                 desc = child->mm->context.ldt + (seg & ~7);
111                 base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
112
113                 /* 16-bit code segment? */
114                 if (!((desc[1] >> 22) & 1))
115                         addr &= 0xffff;
116                 addr += base;
117                 up(&child->mm->context.sem);
118         }
119         return addr;
120 }
121
122 static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
123 {
124         int i, copied;
125         unsigned char opcode[15];
126         unsigned long addr = convert_rip_to_linear(child, regs);
127
128         copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
129         for (i = 0; i < copied; i++) {
130                 switch (opcode[i]) {
131                 /* popf and iret */
132                 case 0x9d: case 0xcf:
133                         return 1;
134
135                         /* CHECKME: 64 65 */
136
137                 /* opcode and address size prefixes */
138                 case 0x66: case 0x67:
139                         continue;
140                 /* irrelevant prefixes (segment overrides and repeats) */
141                 case 0x26: case 0x2e:
142                 case 0x36: case 0x3e:
143                 case 0x64: case 0x65:
144                 case 0xf2: case 0xf3:
145                         continue;
146
147                 case 0x40 ... 0x4f:
148                         if (regs->cs != __USER_CS)
149                                 /* 32-bit mode: register increment */
150                                 return 0;
151                         /* 64-bit mode: REX prefix */
152                         continue;
153
154                         /* CHECKME: f2, f3 */
155
156                 /*
157                  * pushf: NOTE! We should probably not let
158                  * the user see the TF bit being set. But
159                  * it's more pain than it's worth to avoid
160                  * it, and a debugger could emulate this
161                  * all in user space if it _really_ cares.
162                  */
163                 case 0x9c:
164                 default:
165                         return 0;
166                 }
167         }
168         return 0;
169 }
170
171 void tracehook_enable_single_step(struct task_struct *child)
172 {
173         struct pt_regs *regs = task_pt_regs(child);
174
175         /*
176          * Always set TIF_SINGLESTEP - this guarantees that
177          * we single-step system calls etc..  This will also
178          * cause us to set TF when returning to user mode.
179          */
180         set_tsk_thread_flag(child, TIF_SINGLESTEP);
181
182         /*
183          * If TF was already set, don't do anything else
184          */
185         if (regs->eflags & TRAP_FLAG)
186                 return;
187
188         /* Set TF on the kernel stack.. */
189         regs->eflags |= TRAP_FLAG;
190
191         /*
192          * ..but if TF is changed by the instruction we will trace,
193          * don't mark it as being "us" that set it, so that we
194          * won't clear it by hand later.
195          */
196         if (is_setting_trap_flag(child, regs))
197                 return;
198
199         set_tsk_thread_flag(child, TIF_FORCED_TF);
200 }
201
202 void tracehook_disable_single_step(struct task_struct *child)
203 {
204         /* Always clear TIF_SINGLESTEP... */
205         clear_tsk_thread_flag(child, TIF_SINGLESTEP);
206
207         /* But touch TF only if it was set by us.. */
208         if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) {
209                 struct pt_regs *regs = task_pt_regs(child);
210                 regs->eflags &= ~TRAP_FLAG;
211         }
212 }
213
214 /*
215  * Called by kernel/ptrace.c when detaching..
216  *
217  * Make sure the single step bit is not set.
218  */
219 void ptrace_disable(struct task_struct *child)
220
221         tracehook_disable_single_step(child);
222 }
223
224 static int putreg(struct task_struct *child,
225         unsigned long regno, unsigned long value)
226 {
227         unsigned long tmp; 
228         
229         /* Some code in the 64bit emulation may not be 64bit clean.
230            Don't take any chances. */
231         if (test_tsk_thread_flag(child, TIF_IA32))
232                 value &= 0xffffffff;
233         switch (regno) {
234                 case offsetof(struct user_regs_struct,fs):
235                         if (value && (value & 3) != 3)
236                                 return -EIO;
237                         child->thread.fsindex = value & 0xffff; 
238                         return 0;
239                 case offsetof(struct user_regs_struct,gs):
240                         if (value && (value & 3) != 3)
241                                 return -EIO;
242                         child->thread.gsindex = value & 0xffff;
243                         return 0;
244                 case offsetof(struct user_regs_struct,ds):
245                         if (value && (value & 3) != 3)
246                                 return -EIO;
247                         child->thread.ds = value & 0xffff;
248                         return 0;
249                 case offsetof(struct user_regs_struct,es): 
250                         if (value && (value & 3) != 3)
251                                 return -EIO;
252                         child->thread.es = value & 0xffff;
253                         return 0;
254                 case offsetof(struct user_regs_struct,ss):
255                         if ((value & 3) != 3)
256                                 return -EIO;
257                         value &= 0xffff;
258                         return 0;
259                 case offsetof(struct user_regs_struct,fs_base):
260                         if (value >= TASK_SIZE_OF(child))
261                                 return -EIO;
262                         child->thread.fs = value;
263                         return 0;
264                 case offsetof(struct user_regs_struct,gs_base):
265                         if (value >= TASK_SIZE_OF(child))
266                                 return -EIO;
267                         child->thread.gs = value;
268                         return 0;
269                 case offsetof(struct user_regs_struct, eflags):
270                         value &= FLAG_MASK;
271                         tmp = get_stack_long(child, EFL_OFFSET); 
272                         tmp &= ~FLAG_MASK; 
273                         value |= tmp;
274                         clear_tsk_thread_flag(child, TIF_FORCED_TF);
275                         break;
276                 case offsetof(struct user_regs_struct,cs): 
277                         if ((value & 3) != 3)
278                                 return -EIO;
279                         value &= 0xffff;
280                         break;
281         }
282         put_stack_long(child, regno - sizeof(struct pt_regs), value);
283         return 0;
284 }
285
286 static unsigned long getreg(struct task_struct *child, unsigned long regno)
287 {
288         unsigned long val;
289         switch (regno) {
290                 case offsetof(struct user_regs_struct, fs):
291                         return child->thread.fsindex;
292                 case offsetof(struct user_regs_struct, gs):
293                         return child->thread.gsindex;
294                 case offsetof(struct user_regs_struct, ds):
295                         return child->thread.ds;
296                 case offsetof(struct user_regs_struct, es):
297                         return child->thread.es; 
298                 case offsetof(struct user_regs_struct, fs_base):
299                         return child->thread.fs;
300                 case offsetof(struct user_regs_struct, gs_base):
301                         return child->thread.gs;
302                 default:
303                         regno = regno - sizeof(struct pt_regs);
304                         val = get_stack_long(child, regno);
305                         if (test_tsk_thread_flag(child, TIF_IA32))
306                                 val &= 0xffffffff;
307                         if (regno == (offsetof(struct user_regs_struct, eflags)
308                                       - sizeof(struct pt_regs))
309                             && test_tsk_thread_flag(child, TIF_FORCED_TF))
310                                 val &= ~X86_EFLAGS_TF;
311                         return val;
312         }
313
314 }
315
316 static int
317 genregs_get(struct task_struct *target,
318             const struct utrace_regset *regset,
319             unsigned int pos, unsigned int count,
320             void *kbuf, void __user *ubuf)
321 {
322         if (kbuf) {
323                 unsigned long *kp = kbuf;
324                 while (count > 0) {
325                         *kp++ = getreg(target, pos);
326                         pos += sizeof(long);
327                         count -= sizeof(long);
328                 }
329         }
330         else {
331                 unsigned long __user *up = ubuf;
332                 while (count > 0) {
333                         if (__put_user(getreg(target, pos), up++))
334                                 return -EFAULT;
335                         pos += sizeof(long);
336                         count -= sizeof(long);
337                 }
338         }
339
340         return 0;
341 }
342
343 static int
344 genregs_set(struct task_struct *target,
345             const struct utrace_regset *regset,
346             unsigned int pos, unsigned int count,
347             const void *kbuf, const void __user *ubuf)
348 {
349         int ret = 0;
350
351         if (kbuf) {
352                 const unsigned long *kp = kbuf;
353                 while (!ret && count > 0) {
354                         ret = putreg(target, pos, *kp++);
355                         pos += sizeof(long);
356                         count -= sizeof(long);
357                 }
358         }
359         else {
360                 int ret = 0;
361                 const unsigned long __user *up = ubuf;
362                 while (!ret && count > 0) {
363                         unsigned long val;
364                         ret = __get_user(val, up++);
365                         if (!ret)
366                                 ret = putreg(target, pos, val);
367                         pos += sizeof(long);
368                         count -= sizeof(long);
369                 }
370         }
371
372         return ret;
373 }
374
375
376 static int
377 dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset)
378 {
379         if (tsk->thread.debugreg6 | tsk->thread.debugreg7)
380                 return 8;
381         return 0;
382 }
383
384 static int
385 dbregs_get(struct task_struct *target,
386            const struct utrace_regset *regset,
387            unsigned int pos, unsigned int count,
388            void *kbuf, void __user *ubuf)
389 {
390         for (pos >>= 3, count >>= 3; count > 0; --count, ++pos) {
391                 unsigned long val;
392
393                 /*
394                  * The hardware updates the status register on a debug trap,
395                  * but do_debug (traps.c) saves it for us when that happens.
396                  * So whether the target is current or not, debugregN is good.
397                  */
398                 val = 0;
399                 switch (pos) {
400                 case 0: val = target->thread.debugreg0; break;
401                 case 1: val = target->thread.debugreg1; break;
402                 case 2: val = target->thread.debugreg2; break;
403                 case 3: val = target->thread.debugreg3; break;
404                 case 6: val = target->thread.debugreg6; break;
405                 case 7: val = target->thread.debugreg7; break;
406                 }
407
408                 if (kbuf) {
409                         *(unsigned long *) kbuf = val;
410                         kbuf += sizeof(unsigned long);
411                 }
412                 else {
413                         if (__put_user(val, (unsigned long __user *) ubuf))
414                                 return -EFAULT;
415                         ubuf += sizeof(unsigned long);
416                 }
417         }
418
419         return 0;
420 }
421
422 static int
423 dbregs_set(struct task_struct *target,
424            const struct utrace_regset *regset,
425            unsigned int pos, unsigned int count,
426            const void *kbuf, const void __user *ubuf)
427 {
428
429         unsigned long maxaddr = TASK_SIZE_OF(target);
430         maxaddr -= test_tsk_thread_flag(target, TIF_IA32) ? 3 : 7;
431
432         for (pos >>= 3, count >>= 3; count > 0; --count, ++pos) {
433                 unsigned long val;
434                 unsigned int i;
435
436                 if (kbuf) {
437                         val = *(const unsigned long *) kbuf;
438                         kbuf += sizeof(unsigned long);
439                 }
440                 else {
441                         if (__get_user(val, (unsigned long __user *) ubuf))
442                                 return -EFAULT;
443                         ubuf += sizeof(unsigned long);
444                 }
445
446                 switch (pos) {
447 #define SET_DBREG(n)                                                    \
448                         target->thread.debugreg##n = val;               \
449                         if (target == current)                          \
450                                 set_debugreg(target->thread.debugreg##n, n)
451
452                 case 0:
453                         if (val >= maxaddr)
454                                 return -EIO;
455                         SET_DBREG(0);
456                         break;
457                 case 1:
458                         if (val >= maxaddr)
459                                 return -EIO;
460                         SET_DBREG(1);
461                         break;
462                 case 2:
463                         if (val >= maxaddr)
464                                 return -EIO;
465                         SET_DBREG(2);
466                         break;
467                 case 3:
468                         if (val >= maxaddr)
469                                 return -EIO;
470                         SET_DBREG(3);
471                         break;
472                 case 4:
473                 case 5:
474                         if (val != 0)
475                                 return -EIO;
476                         break;
477                 case 6:
478                         if (val >> 32)
479                                 return -EIO;
480                         SET_DBREG(6);
481                         break;
482                 case 7:
483                         /*
484                          * See arch/i386/kernel/ptrace.c for an explanation
485                          * of this awkward check.
486                          */
487                         val &= ~DR_CONTROL_RESERVED;
488                         for (i = 0; i < 4; i++)
489                                 if ((0x5554 >> ((val >> (16 + 4*i)) & 0xf))
490                                     & 1)
491                                         return -EIO;
492                         if (val)
493                                 set_tsk_thread_flag(target, TIF_DEBUG);
494                         else
495                                 clear_tsk_thread_flag(target, TIF_DEBUG);
496                         SET_DBREG(7);
497                         break;
498 #undef  SET_DBREG
499                 }
500         }
501
502         return 0;
503 }
504
505
506 static int
507 fpregs_active(struct task_struct *target, const struct utrace_regset *regset)
508 {
509         return tsk_used_math(target) ? regset->n : 0;
510 }
511
512 static int
513 fpregs_get(struct task_struct *target,
514            const struct utrace_regset *regset,
515            unsigned int pos, unsigned int count,
516            void *kbuf, void __user *ubuf)
517 {
518         if (tsk_used_math(target)) {
519                 if (target == current)
520                         unlazy_fpu(target);
521         } 
522         else
523                 init_fpu(target);
524
525         return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
526                                      &target->thread.i387.fxsave, 0, -1);
527 }
528
529 static int
530 fpregs_set(struct task_struct *target,
531            const struct utrace_regset *regset,
532            unsigned int pos, unsigned int count,
533            const void *kbuf, const void __user *ubuf)
534 {
535         int ret;
536
537         if (tsk_used_math(target)) {
538                 if (target == current)
539                         unlazy_fpu(target);
540         }
541         else if (pos == 0 && count == sizeof(struct user_i387_struct))
542                 set_stopped_child_used_math(target);
543         else
544                 init_fpu(target);
545
546         ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
547                                    &target->thread.i387.fxsave, 0, -1);
548
549         target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
550
551         return ret;
552 }
553
554 static int
555 fsgs_active(struct task_struct *tsk, const struct utrace_regset *regset)
556 {
557         if (tsk->thread.gsindex == GS_TLS_SEL || tsk->thread.gs)
558                 return 2;
559         if (tsk->thread.fsindex == FS_TLS_SEL || tsk->thread.fs)
560                 return 1;
561         return 0;
562 }
563
564 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
565 {
566         struct desc_struct *desc = (void *)t->thread.tls_array;
567         desc += tls;
568         return desc->base0 |
569                 (((u32)desc->base1) << 16) |
570                 (((u32)desc->base2) << 24);
571 }
572
573 static int
574 fsgs_get(struct task_struct *target,
575          const struct utrace_regset *regset,
576          unsigned int pos, unsigned int count,
577          void *kbuf, void __user *ubuf)
578 {
579         const unsigned long *kaddr = kbuf;
580         const unsigned long __user *uaddr = ubuf;
581         unsigned long addr;
582
583         /*
584          * XXX why the MSR reads here?
585          * Can anything change the MSRs without changing thread.fs first?
586          */
587         if (pos == 0) {         /* FS */
588                 if (kaddr)
589                         addr = *kaddr++;
590                 else if (__get_user(addr, uaddr++))
591                         return -EFAULT;
592                 if (target->thread.fsindex == FS_TLS_SEL)
593                         addr = read_32bit_tls(target, FS_TLS);
594                 else if (target == current) {
595                         rdmsrl(MSR_FS_BASE, addr);
596                 }
597                 else
598                         addr = target->thread.fs;
599         }
600
601         if (count > sizeof(unsigned long)) { /* GS */
602                 if (kaddr)
603                         addr = *kaddr;
604                 else if (__get_user(addr, uaddr))
605                         return -EFAULT;
606                 if (target->thread.fsindex == GS_TLS_SEL)
607                         addr = read_32bit_tls(target, GS_TLS);
608                 else if (target == current) {
609                         rdmsrl(MSR_GS_BASE, addr);
610                 }
611                 else
612                         addr = target->thread.fs;
613         }
614
615         return 0;
616 }
617
618 static int
619 fsgs_set(struct task_struct *target,
620          const struct utrace_regset *regset,
621          unsigned int pos, unsigned int count,
622          const void *kbuf, const void __user *ubuf)
623 {
624         const unsigned long *kaddr = kbuf;
625         const unsigned long __user *uaddr = ubuf;
626         unsigned long addr;
627         int ret = 0;
628
629         if (pos == 0) {         /* FS */
630                 if (kaddr)
631                         addr = *kaddr++;
632                 else if (__get_user(addr, uaddr++))
633                         return -EFAULT;
634                 ret = do_arch_prctl(target, ARCH_SET_FS, addr);
635         }
636
637         if (!ret && count > sizeof(unsigned long)) { /* GS */
638                 if (kaddr)
639                         addr = *kaddr;
640                 else if (__get_user(addr, uaddr))
641                         return -EFAULT;
642                 ret = do_arch_prctl(target, ARCH_SET_GS, addr);
643         }
644
645         return ret;
646 }
647
648
649 /*
650  * These are our native regset flavors.
651  * XXX ioperm? vm86?
652  */
653 static const struct utrace_regset native_regsets[] = {
654         {
655                 .n = sizeof(struct user_regs_struct)/8, .size = 8, .align = 8,
656                 .get = genregs_get, .set = genregs_set
657         },
658         {
659                 .n = sizeof(struct user_i387_struct) / sizeof(long),
660                 .size = sizeof(long), .align = sizeof(long),
661                 .active = fpregs_active,
662                 .get = fpregs_get, .set = fpregs_set
663         },
664         {
665                 .n = 2, .size = sizeof(long), .align = sizeof(long),
666                 .active = fsgs_active,
667                 .get = fsgs_get, .set = fsgs_set
668         },
669         {
670                 .n = 8, .size = sizeof(long), .align = sizeof(long),
671                 .active = dbregs_active,
672                 .get = dbregs_get, .set = dbregs_set
673         },
674 };
675
676 const struct utrace_regset_view utrace_x86_64_native = {
677         .name = "x86-64", .e_machine = EM_X86_64,
678         .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
679 };
680 EXPORT_SYMBOL_GPL(utrace_x86_64_native);
681
682
683 #ifdef CONFIG_PTRACE
684 static const struct ptrace_layout_segment x86_64_uarea[] = {
685         {0, sizeof(struct user_regs_struct), 0, 0},
686         {sizeof(struct user_regs_struct),
687          offsetof(struct user, u_debugreg[0]), -1, 0},
688         {offsetof(struct user, u_debugreg[0]),
689          offsetof(struct user, u_debugreg[8]), 3, 0},
690         {0, 0, -1, 0}
691 };
692
693 int arch_ptrace(long *req, struct task_struct *child,
694                 struct utrace_attached_engine *engine,
695                 unsigned long addr, unsigned long data, long *val)
696 {
697         switch (*req) {
698         case PTRACE_PEEKUSR:
699                 return ptrace_peekusr(child, engine, x86_64_uarea, addr, data);
700         case PTRACE_POKEUSR:
701                 return ptrace_pokeusr(child, engine, x86_64_uarea, addr, data);
702         case PTRACE_GETREGS:
703                 return ptrace_whole_regset(child, engine, data, 0, 0);
704         case PTRACE_SETREGS:
705                 return ptrace_whole_regset(child, engine, data, 0, 1);
706         case PTRACE_GETFPREGS:
707                 return ptrace_whole_regset(child, engine, data, 1, 0);
708         case PTRACE_SETFPREGS:
709                 return ptrace_whole_regset(child, engine, data, 1, 1);
710 #ifdef CONFIG_IA32_EMULATION
711         case PTRACE_GET_THREAD_AREA:
712         case PTRACE_SET_THREAD_AREA:
713                 return ptrace_onereg_access(child, engine,
714                                             &utrace_ia32_view, 3,
715                                             addr, (void __user *)data,
716                                             *req == PTRACE_SET_THREAD_AREA);
717 #endif
718                 /* normal 64bit interface to access TLS data.
719                    Works just like arch_prctl, except that the arguments
720                    are reversed. */
721         case PTRACE_ARCH_PRCTL:
722                 return do_arch_prctl(child, data, addr);
723         }
724         return -ENOSYS;
725 }
726 #endif  /* CONFIG_PTRACE */
727
728
729 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
730 {
731         /* do the secure computing check first */
732         secure_computing(regs->orig_rax);
733
734         if (test_thread_flag(TIF_SYSCALL_TRACE))
735                 tracehook_report_syscall(regs, 0);
736
737         if (unlikely(current->audit_context)) {
738                 if (test_thread_flag(TIF_IA32)) {
739                         audit_syscall_entry(AUDIT_ARCH_I386,
740                                             regs->orig_rax,
741                                             regs->rbx, regs->rcx,
742                                             regs->rdx, regs->rsi);
743                 } else {
744                         audit_syscall_entry(AUDIT_ARCH_X86_64,
745                                             regs->orig_rax,
746                                             regs->rdi, regs->rsi,
747                                             regs->rdx, regs->r10);
748                 }
749         }
750 }
751
752 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
753 {
754         if (unlikely(current->audit_context))
755                 audit_syscall_exit(AUDITSC_RESULT(regs->rax), regs->rax);
756
757         if (test_thread_flag(TIF_SYSCALL_TRACE))
758                 tracehook_report_syscall(regs, 1);
759
760         if (test_thread_flag(TIF_SINGLESTEP)) {
761                 force_sig(SIGTRAP, current); /* XXX */
762                 tracehook_report_syscall_step(regs);
763         }
764 }