Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / arch / x86_64 / ia32 / ptrace32.c
1 /* 
2  * 32bit ptrace for x86-64.
3  *
4  * Copyright 2001,2002 Andi Kleen, SuSE Labs.
5  * Some parts copied from arch/i386/kernel/ptrace.c. See that file for earlier 
6  * copyright.
7  * 
8  * This allows to access 64bit processes too; but there is no way to see the extended 
9  * register contents.
10  */ 
11
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/sched.h>
15 #include <linux/syscalls.h>
16 #include <linux/unistd.h>
17 #include <linux/mm.h>
18 #include <linux/ptrace.h>
19 #include <linux/tracehook.h>
20 #include <linux/module.h>
21 #include <linux/elf.h>
22 #include <asm/ptrace.h>
23 #include <asm/tracehook.h>
24 #include <asm/compat.h>
25 #include <asm/uaccess.h>
26 #include <asm/user32.h>
27 #include <asm/user.h>
28 #include <asm/errno.h>
29 #include <asm/debugreg.h>
30 #include <asm/i387.h>
31 #include <asm/fpu32.h>
32 #include <asm/ldt.h>
33 #include <asm/desc.h>
34
35 /*
36  * Determines which flags the user has access to [1 = access, 0 = no access].
37  * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
38  * Also masks reserved bits (31-22, 15, 5, 3, 1).
39  */
40 #define FLAG_MASK 0x54dd5UL
41
42 #define R32(l,q) \
43         case offsetof(struct user_regs_struct32, l): stack[offsetof(struct pt_regs, q)/8] = val; break
44
45 static int putreg32(struct task_struct *child, unsigned regno, u32 val)
46 {
47         __u64 *stack = (__u64 *)task_pt_regs(child);
48
49         switch (regno) {
50         case offsetof(struct user_regs_struct32, fs):
51                 if (val && (val & 3) != 3) return -EIO; 
52                 child->thread.fsindex = val & 0xffff;
53                 break;
54         case offsetof(struct user_regs_struct32, gs):
55                 if (val && (val & 3) != 3) return -EIO; 
56                 child->thread.gsindex = val & 0xffff;
57                 break;
58         case offsetof(struct user_regs_struct32, ds):
59                 if (val && (val & 3) != 3) return -EIO; 
60                 child->thread.ds = val & 0xffff;
61                 break;
62         case offsetof(struct user_regs_struct32, es):
63                 child->thread.es = val & 0xffff;
64                 break;
65         case offsetof(struct user_regs_struct32, ss):
66                 if ((val & 3) != 3) return -EIO;
67                 stack[offsetof(struct pt_regs, ss)/8] = val & 0xffff;
68                 break;
69         case offsetof(struct user_regs_struct32, cs):
70                 if ((val & 3) != 3) return -EIO;
71                 stack[offsetof(struct pt_regs, cs)/8] = val & 0xffff;
72                 break;
73
74         R32(ebx, rbx); 
75         R32(ecx, rcx);
76         R32(edx, rdx);
77         R32(edi, rdi);
78         R32(esi, rsi);
79         R32(ebp, rbp);
80         R32(eax, rax);
81         R32(orig_eax, orig_rax);
82         R32(eip, rip);
83         R32(esp, rsp);
84
85         case offsetof(struct user_regs_struct32, eflags): {
86                 __u64 *flags = &stack[offsetof(struct pt_regs, eflags)/8];
87                 val &= FLAG_MASK;
88                 *flags = val | (*flags & ~FLAG_MASK);
89                 clear_tsk_thread_flag(child, TIF_FORCED_TF);
90                 break;
91         }
92
93         default:
94                 BUG();
95         }
96         return 0;
97 }
98
99 #undef R32
100
101 #define R32(l,q) \
102         case offsetof(struct user_regs_struct32, l): val = stack[offsetof(struct pt_regs, q)/8]; break
103
104 static int getreg32(struct task_struct *child, unsigned regno)
105 {
106         __u64 *stack = (__u64 *)task_pt_regs(child);
107         u32 val;
108
109         switch (regno) {
110         case offsetof(struct user_regs_struct32, fs):
111                 val = child->thread.fsindex;
112                 break;
113         case offsetof(struct user_regs_struct32, gs):
114                 val = child->thread.gsindex;
115                 break;
116         case offsetof(struct user_regs_struct32, ds):
117                 val = child->thread.ds;
118                 break;
119         case offsetof(struct user_regs_struct32, es):
120                 val = child->thread.es;
121                 break;
122
123         R32(cs, cs);
124         R32(ss, ss);
125         R32(ebx, rbx); 
126         R32(ecx, rcx);
127         R32(edx, rdx);
128         R32(edi, rdi);
129         R32(esi, rsi);
130         R32(ebp, rbp);
131         R32(eax, rax);
132         R32(orig_eax, orig_rax);
133         R32(eip, rip);
134         R32(esp, rsp);
135
136         case offsetof(struct user_regs_struct32, eflags):
137                 val = stack[offsetof(struct pt_regs, eflags) / 8];
138                 if (test_tsk_thread_flag(child, TIF_FORCED_TF))
139                         val &= ~X86_EFLAGS_TF;
140                 break; 
141                     
142         default:
143                 BUG();
144                 val = -1;
145                 break;          
146         }
147
148         return val;
149 }
150
151 #undef R32
152
153 static int
154 ia32_genregs_get(struct task_struct *target,
155                  const struct utrace_regset *regset,
156                  unsigned int pos, unsigned int count,
157                  void *kbuf, void __user *ubuf)
158 {
159         if (kbuf) {
160                 u32 *kp = kbuf;
161                 while (count > 0) {
162                         *kp++ = getreg32(target, pos);
163                         pos += 4;
164                         count -= 4;
165                 }
166         }
167         else {
168                 u32 __user *up = ubuf;
169                 while (count > 0) {
170                         if (__put_user(getreg32(target, pos), up++))
171                                 return -EFAULT;
172                         pos += 4;
173                         count -= 4;
174                 }
175         }
176
177         return 0;
178 }
179
180 static int
181 ia32_genregs_set(struct task_struct *target,
182                  const struct utrace_regset *regset,
183                  unsigned int pos, unsigned int count,
184                  const void *kbuf, const void __user *ubuf)
185 {
186         int ret = 0;
187
188         if (kbuf) {
189                 const u32 *kp = kbuf;
190                 while (!ret && count > 0) {
191                         ret = putreg32(target, pos, *kp++);
192                         pos += 4;
193                         count -= 4;
194                 }
195         }
196         else {
197                 int ret = 0;
198                 const u32 __user *up = ubuf;
199                 while (!ret && count > 0) {
200                         u32 val;
201                         ret = __get_user(val, up++);
202                         if (!ret)
203                                 ret = putreg32(target, pos, val);
204                         pos += 4;
205                         count -= 4;
206                 }
207         }
208
209         return ret;
210 }
211
212 static int
213 ia32_fpregs_active(struct task_struct *target,
214                    const struct utrace_regset *regset)
215 {
216         return tsk_used_math(target) ? regset->n : 0;
217 }
218
219 static int
220 ia32_fpregs_get(struct task_struct *target,
221                 const struct utrace_regset *regset,
222                 unsigned int pos, unsigned int count,
223                 void *kbuf, void __user *ubuf)
224 {
225         struct user_i387_ia32_struct fp;
226         int ret;
227
228         if (tsk_used_math(target)) {
229                 if (target == current)
230                         unlazy_fpu(target);
231         }
232         else
233                 init_fpu(target);
234
235         ret = get_fpregs32(&fp, target);
236         if (ret == 0)
237                 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
238                                             &fp, 0, -1);
239
240         return ret;
241 }
242
243 static int
244 ia32_fpregs_set(struct task_struct *target,
245                 const struct utrace_regset *regset,
246                 unsigned int pos, unsigned int count,
247                 const void *kbuf, const void __user *ubuf)
248 {
249         struct user_i387_ia32_struct fp;
250         int ret;
251
252         if (tsk_used_math(target)) {
253                 if (target == current)
254                         unlazy_fpu(target);
255         }
256         else if (pos == 0 && count == sizeof(fp))
257                 set_stopped_child_used_math(target);
258         else
259                 init_fpu(target);
260
261         if (pos > 0 || count < sizeof(fp)) {
262                 ret = get_fpregs32(&fp, target);
263                 if (ret == 0)
264                         ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
265                                                    &fp, 0, -1);
266                 if (ret)
267                         return ret;
268                 kbuf = &fp;
269         }
270         else if (kbuf == NULL) {
271                 if (__copy_from_user(&fp, ubuf, sizeof(fp)))
272                         return -EFAULT;
273                 kbuf = &fp;
274         }
275
276         return set_fpregs32(target, kbuf);
277 }
278
279 static int
280 ia32_fpxregs_active(struct task_struct *target,
281                     const struct utrace_regset *regset)
282 {
283         return tsk_used_math(target) ? regset->n : 0;
284 }
285
286 static int
287 ia32_fpxregs_get(struct task_struct *target,
288                  const struct utrace_regset *regset,
289                  unsigned int pos, unsigned int count,
290                  void *kbuf, void __user *ubuf)
291 {
292         if (tsk_used_math(target)) {
293                 if (target == current)
294                         unlazy_fpu(target);
295         }
296         else
297                 init_fpu(target);
298
299         return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
300                                      &target->thread.i387.fxsave, 0, -1);
301 }
302
303 static int
304 ia32_fpxregs_set(struct task_struct *target,
305                  const struct utrace_regset *regset,
306                  unsigned int pos, unsigned int count,
307                  const void *kbuf, const void __user *ubuf)
308
309 {
310         int ret;
311
312         if (tsk_used_math(target)) {
313                 if (target == current)
314                         unlazy_fpu(target);
315         }
316         else if (pos == 0 && count == sizeof(struct i387_fxsave_struct))
317                 set_stopped_child_used_math(target);
318         else
319                 init_fpu(target);
320
321         ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
322                                    &target->thread.i387.fxsave, 0, -1);
323
324         target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
325
326         return ret;
327 }
328
329 static int
330 ia32_dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset)
331 {
332         if (tsk->thread.debugreg6 | tsk->thread.debugreg7)
333                 return 8;
334         return 0;
335 }
336
337 static int
338 ia32_dbregs_get(struct task_struct *target,
339                 const struct utrace_regset *regset,
340                 unsigned int pos, unsigned int count,
341                 void *kbuf, void __user *ubuf)
342 {
343         for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) {
344                 u32 val;
345
346                 /*
347                  * The hardware updates the status register on a debug trap,
348                  * but do_debug (traps.c) saves it for us when that happens.
349                  * So whether the target is current or not, debugregN is good.
350                  */
351                 val = 0;
352                 switch (pos) {
353                 case 0: val = target->thread.debugreg0; break;
354                 case 1: val = target->thread.debugreg1; break;
355                 case 2: val = target->thread.debugreg2; break;
356                 case 3: val = target->thread.debugreg3; break;
357                 case 6: val = target->thread.debugreg6; break;
358                 case 7: val = target->thread.debugreg7; break;
359                 }
360
361                 if (kbuf) {
362                         *(u32 *) kbuf = val;
363                         kbuf += sizeof(u32);
364                 }
365                 else {
366                         if (__put_user(val, (u32 __user *) ubuf))
367                                 return -EFAULT;
368                         ubuf += sizeof(u32);
369                 }
370         }
371
372         return 0;
373 }
374
375 static int
376 ia32_dbregs_set(struct task_struct *target,
377                 const struct utrace_regset *regset,
378                 unsigned int pos, unsigned int count,
379                 const void *kbuf, const void __user *ubuf)
380 {
381         /*
382          * We'll just hijack the native setter to do the real work for us.
383          */
384         const struct utrace_regset *dbregset = &utrace_x86_64_native.regsets[2];
385
386         int ret = 0;
387
388         for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) {
389                 unsigned long val;
390
391                 if (kbuf) {
392                         val = *(const u32 *) kbuf;
393                         kbuf += sizeof(u32);
394                 }
395                 else {
396                         if (__get_user(val, (u32 __user *) ubuf))
397                                 return -EFAULT;
398                         ubuf += sizeof(u32);
399                 }
400
401                 ret = (*dbregset->set)(target, dbregset, pos * sizeof(long),
402                                        sizeof(val), &val, NULL);
403                 if (ret)
404                         break;
405         }
406
407         return ret;
408 }
409
410
411 /*
412  * Perform get_thread_area on behalf of the traced child.
413  */
414 static int
415 ia32_tls_get(struct task_struct *target,
416              const struct utrace_regset *regset,
417              unsigned int pos, unsigned int count,
418              void *kbuf,  void __user *ubuf)
419 {
420         struct user_desc info, *ip;
421         const struct n_desc_struct *desc;
422         const struct n_desc_struct *tls;
423
424 /*
425  * Get the current Thread-Local Storage area:
426  */
427
428 #define GET_BASE(desc) ( \
429         (((desc)->a >> 16) & 0x0000ffff) | \
430         (((desc)->b << 16) & 0x00ff0000) | \
431         ( (desc)->b        & 0xff000000)   )
432
433 #define GET_LIMIT(desc) ( \
434         ((desc)->a & 0x0ffff) | \
435          ((desc)->b & 0xf0000) )
436
437 #define GET_32BIT(desc)         (((desc)->b >> 22) & 1)
438 #define GET_CONTENTS(desc)      (((desc)->b >> 10) & 3)
439 #define GET_WRITABLE(desc)      (((desc)->b >>  9) & 1)
440 #define GET_LIMIT_PAGES(desc)   (((desc)->b >> 23) & 1)
441 #define GET_PRESENT(desc)       (((desc)->b >> 15) & 1)
442 #define GET_USEABLE(desc)       (((desc)->b >> 20) & 1)
443
444         tls = (struct n_desc_struct *) target->thread.tls_array;
445         desc = &tls[pos];
446         ip = kbuf ?: &info;
447         memset(ip, 0, sizeof *ip);
448         for (; count > 0; count -= sizeof(struct user_desc), ++desc) {
449                 ip->entry_number = desc - tls + GDT_ENTRY_TLS_MIN;
450                 ip->base_addr = GET_BASE(desc);
451                 ip->limit = GET_LIMIT(desc);
452                 ip->seg_32bit = GET_32BIT(desc);
453                 ip->contents = GET_CONTENTS(desc);
454                 ip->read_exec_only = !GET_WRITABLE(desc);
455                 ip->limit_in_pages = GET_LIMIT_PAGES(desc);
456                 ip->seg_not_present = !GET_PRESENT(desc);
457                 ip->useable = GET_USEABLE(desc);
458
459                 if (kbuf)
460                         ++ip;
461                 else {
462                         if (__copy_to_user(ubuf, &info, sizeof(info)))
463                                 return -EFAULT;
464                         ubuf += sizeof(info);
465                 }
466         }
467
468         return 0;
469 }
470
471 /*
472  * Perform set_thread_area on behalf of the traced child.
473  */
474 static int
475 ia32_tls_set(struct task_struct *target,
476              const struct utrace_regset *regset,
477              unsigned int pos, unsigned int count,
478              const void *kbuf, const void __user *ubuf)
479 {
480         struct user_desc info;
481         struct n_desc_struct *desc;
482         struct n_desc_struct newtls[GDT_ENTRY_TLS_ENTRIES];
483         unsigned int i;
484         int cpu;
485
486         pos /= sizeof(struct user_desc);
487         count /= sizeof(struct user_desc);
488
489         desc = &newtls[pos];
490         for (i = 0; i < count; ++i, ++desc) {
491                 const struct user_desc *ip;
492                 if (kbuf) {
493                         ip = kbuf;
494                         kbuf += sizeof(struct user_desc);
495                 }
496                 else {
497                         ip = &info;
498                         if (__copy_from_user(&info, ubuf, sizeof(info)))
499                                 return -EFAULT;
500                         ubuf += sizeof(struct user_desc);
501                 }
502
503                 if (LDT_empty(ip)) {
504                         desc->a = 0;
505                         desc->b = 0;
506                 } else {
507                         desc->a = LDT_entry_a(ip);
508                         desc->b = LDT_entry_b(ip);
509                 }
510         }
511
512         /*
513          * We must not get preempted while modifying the TLS.
514          */
515         cpu = get_cpu();
516         memcpy(&target->thread.tls_array[pos], newtls,
517                count * sizeof(newtls[0]));
518         if (target == current)
519                 load_TLS(&target->thread, cpu);
520         put_cpu();
521
522         return 0;
523 }
524
525 /*
526  * Determine how many TLS slots are in use.
527  */
528 static int
529 ia32_tls_active(struct task_struct *target, const struct utrace_regset *regset)
530 {
531         int i;
532         for (i = GDT_ENTRY_TLS_ENTRIES; i > 0; --i) {
533                 struct n_desc_struct *desc = (struct n_desc_struct *)
534                         &target->thread.tls_array[i - 1];
535                 if ((desc->a | desc->b) != 0)
536                         break;
537         }
538         return i;
539 }
540
541
542 /*
543  * This should match arch/i386/kernel/ptrace.c:native_regsets.
544  * XXX ioperm? vm86?
545  */
546 static const struct utrace_regset ia32_regsets[] = {
547         {
548                 .n = sizeof(struct user_regs_struct32)/4,
549                 .size = 4, .align = 4,
550                 .get = ia32_genregs_get, .set = ia32_genregs_set
551         },
552         {
553                 .n = sizeof(struct user_i387_ia32_struct) / 4,
554                 .size = 4, .align = 4,
555                 .active = ia32_fpregs_active,
556                 .get = ia32_fpregs_get, .set = ia32_fpregs_set
557         },
558         {
559                 .n = sizeof(struct user32_fxsr_struct) / 4,
560                 .size = 4, .align = 4,
561                 .active = ia32_fpxregs_active,
562                 .get = ia32_fpxregs_get, .set = ia32_fpxregs_set
563         },
564         {
565                 .n = GDT_ENTRY_TLS_ENTRIES,
566                 .bias = GDT_ENTRY_TLS_MIN,
567                 .size = sizeof(struct user_desc),
568                 .align = sizeof(struct user_desc),
569                 .active = ia32_tls_active,
570                 .get = ia32_tls_get, .set = ia32_tls_set
571         },
572         {
573                 .n = 8, .size = 4, .align = 4,
574                 .active = ia32_dbregs_active,
575                 .get = ia32_dbregs_get, .set = ia32_dbregs_set
576         },
577 };
578
579 const struct utrace_regset_view utrace_ia32_view = {
580         .name = "i386", .e_machine = EM_386,
581         .regsets = ia32_regsets,
582         .n = sizeof ia32_regsets / sizeof ia32_regsets[0],
583 };
584 EXPORT_SYMBOL_GPL(utrace_ia32_view);
585
586
587 #ifdef CONFIG_PTRACE
588 /*
589  * This matches the arch/i386/kernel/ptrace.c definitions.
590  */
591
592 static const struct ptrace_layout_segment ia32_uarea[] = {
593         {0, sizeof(struct user_regs_struct32), 0, 0},
594         {offsetof(struct user32, u_debugreg[0]),
595          offsetof(struct user32, u_debugreg[8]), 4, 0},
596         {0, 0, -1, 0}
597 };
598
599 fastcall int arch_compat_ptrace(compat_long_t *req, struct task_struct *child,
600                                 struct utrace_attached_engine *engine,
601                                 compat_ulong_t addr, compat_ulong_t data,
602                                 compat_long_t *val)
603 {
604         switch (*req) {
605         case PTRACE_PEEKUSR:
606                 return ptrace_compat_peekusr(child, engine, ia32_uarea,
607                                              addr, data);
608         case PTRACE_POKEUSR:
609                 return ptrace_compat_pokeusr(child, engine, ia32_uarea,
610                                              addr, data);
611         case PTRACE_GETREGS:
612                 return ptrace_whole_regset(child, engine, data, 0, 0);
613         case PTRACE_SETREGS:
614                 return ptrace_whole_regset(child, engine, data, 0, 1);
615         case PTRACE_GETFPREGS:
616                 return ptrace_whole_regset(child, engine, data, 1, 0);
617         case PTRACE_SETFPREGS:
618                 return ptrace_whole_regset(child, engine, data, 1, 1);
619         case PTRACE_GETFPXREGS:
620                 return ptrace_whole_regset(child, engine, data, 2, 0);
621         case PTRACE_SETFPXREGS:
622                 return ptrace_whole_regset(child, engine, data, 2, 1);
623         case PTRACE_GET_THREAD_AREA:
624         case PTRACE_SET_THREAD_AREA:
625                 return ptrace_onereg_access(child, engine,
626                                             &utrace_ia32_view, 3,
627                                             addr,
628                                             (void __user *)(unsigned long)data,
629                                             *req == PTRACE_SET_THREAD_AREA);
630         }
631         return -ENOSYS;
632 }
633 #endif  /* CONFIG_PTRACE */