fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / ia64 / kernel / traps.c
1 /*
2  * Architecture-specific trap handling.
3  *
4  * Copyright (C) 1998-2003 Hewlett-Packard Co
5  *      David Mosberger-Tang <davidm@hpl.hp.com>
6  *
7  * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/tty.h>
14 #include <linux/vt_kern.h>              /* For unblank_screen() */
15 #include <linux/module.h>       /* for EXPORT_SYMBOL */
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/delay.h>                /* for ssleep() */
19
20 #include <asm/fpswa.h>
21 #include <asm/ia32.h>
22 #include <asm/intrinsics.h>
23 #include <asm/processor.h>
24 #include <asm/uaccess.h>
25 #include <asm/kdebug.h>
26
27 extern spinlock_t timerlist_lock;
28
29 fpswa_interface_t *fpswa_interface;
30 EXPORT_SYMBOL(fpswa_interface);
31
32 ATOMIC_NOTIFIER_HEAD(ia64die_chain);
33
34 int
35 register_die_notifier(struct notifier_block *nb)
36 {
37         return atomic_notifier_chain_register(&ia64die_chain, nb);
38 }
39 EXPORT_SYMBOL_GPL(register_die_notifier);
40
41 int
42 unregister_die_notifier(struct notifier_block *nb)
43 {
44         return atomic_notifier_chain_unregister(&ia64die_chain, nb);
45 }
46 EXPORT_SYMBOL_GPL(unregister_die_notifier);
47
48 void __init
49 trap_init (void)
50 {
51         if (ia64_boot_param->fpswa)
52                 /* FPSWA fixup: make the interface pointer a kernel virtual address: */
53                 fpswa_interface = __va(ia64_boot_param->fpswa);
54 }
55
56 /*
57  * Unlock any spinlocks which will prevent us from getting the message out (timerlist_lock
58  * is acquired through the console unblank code)
59  */
60 void
61 bust_spinlocks (int yes)
62 {
63         int loglevel_save = console_loglevel;
64
65         if (yes) {
66                 oops_in_progress = 1;
67                 return;
68         }
69
70 #ifdef CONFIG_VT
71         unblank_screen();
72 #endif
73         oops_in_progress = 0;
74         /*
75          * OK, the message is on the console.  Now we call printk() without
76          * oops_in_progress set so that printk will give klogd a poke.  Hold onto
77          * your hats...
78          */
79         console_loglevel = 15;          /* NMI oopser may have shut the console up */
80         printk(" ");
81         console_loglevel = loglevel_save;
82 }
83
84 void
85 die (const char *str, struct pt_regs *regs, long err)
86 {
87         static struct {
88                 spinlock_t lock;
89                 u32 lock_owner;
90                 int lock_owner_depth;
91         } die = {
92                 .lock =                 SPIN_LOCK_UNLOCKED,
93                 .lock_owner =           -1,
94                 .lock_owner_depth =     0
95         };
96         static int die_counter;
97         int cpu = get_cpu();
98
99         if (die.lock_owner != cpu) {
100                 console_verbose();
101                 spin_lock_irq(&die.lock);
102                 die.lock_owner = cpu;
103                 die.lock_owner_depth = 0;
104                 bust_spinlocks(1);
105         }
106         put_cpu();
107
108         if (++die.lock_owner_depth < 3) {
109                 printk("%s[%d[#%u]]: %s %ld [%d]\n",
110                         current->comm, current->pid, current->xid,
111                         str, err, ++die_counter);
112                 (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
113                 show_regs(regs);
114         } else
115                 printk(KERN_ERR "Recursive die() failure, output suppressed\n");
116
117         bust_spinlocks(0);
118         die.lock_owner = -1;
119         spin_unlock_irq(&die.lock);
120
121         if (panic_on_oops)
122                 panic("Fatal exception");
123
124         do_exit(SIGSEGV);
125 }
126
127 void
128 die_if_kernel (char *str, struct pt_regs *regs, long err)
129 {
130         if (!user_mode(regs))
131                 die(str, regs, err);
132 }
133
134 void
135 __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
136 {
137         siginfo_t siginfo;
138         int sig, code;
139
140         /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
141         siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
142         siginfo.si_imm = break_num;
143         siginfo.si_flags = 0;           /* clear __ISR_VALID */
144         siginfo.si_isr = 0;
145
146         switch (break_num) {
147               case 0: /* unknown error (used by GCC for __builtin_abort()) */
148                 if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
149                                 == NOTIFY_STOP)
150                         return;
151                 die_if_kernel("bugcheck!", regs, break_num);
152                 sig = SIGILL; code = ILL_ILLOPC;
153                 break;
154
155               case 1: /* integer divide by zero */
156                 sig = SIGFPE; code = FPE_INTDIV;
157                 break;
158
159               case 2: /* integer overflow */
160                 sig = SIGFPE; code = FPE_INTOVF;
161                 break;
162
163               case 3: /* range check/bounds check */
164                 sig = SIGFPE; code = FPE_FLTSUB;
165                 break;
166
167               case 4: /* null pointer dereference */
168                 sig = SIGSEGV; code = SEGV_MAPERR;
169                 break;
170
171               case 5: /* misaligned data */
172                 sig = SIGSEGV; code = BUS_ADRALN;
173                 break;
174
175               case 6: /* decimal overflow */
176                 sig = SIGFPE; code = __FPE_DECOVF;
177                 break;
178
179               case 7: /* decimal divide by zero */
180                 sig = SIGFPE; code = __FPE_DECDIV;
181                 break;
182
183               case 8: /* packed decimal error */
184                 sig = SIGFPE; code = __FPE_DECERR;
185                 break;
186
187               case 9: /* invalid ASCII digit */
188                 sig = SIGFPE; code = __FPE_INVASC;
189                 break;
190
191               case 10: /* invalid decimal digit */
192                 sig = SIGFPE; code = __FPE_INVDEC;
193                 break;
194
195               case 11: /* paragraph stack overflow */
196                 sig = SIGSEGV; code = __SEGV_PSTKOVF;
197                 break;
198
199               case 0x3f000 ... 0x3ffff: /* bundle-update in progress */
200                 sig = SIGILL; code = __ILL_BNDMOD;
201                 break;
202
203               default:
204                 if (break_num < 0x40000 || break_num > 0x100000)
205                         die_if_kernel("Bad break", regs, break_num);
206
207                 if (break_num < 0x80000) {
208                         sig = SIGILL; code = __ILL_BREAK;
209                 } else {
210                         if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP)
211                                         == NOTIFY_STOP)
212                                 return;
213                         sig = SIGTRAP; code = TRAP_BRKPT;
214                 }
215         }
216         siginfo.si_signo = sig;
217         siginfo.si_errno = 0;
218         siginfo.si_code = code;
219         force_sig_info(sig, &siginfo, current);
220 }
221
222 /*
223  * disabled_fph_fault() is called when a user-level process attempts to access f32..f127
224  * and it doesn't own the fp-high register partition.  When this happens, we save the
225  * current fph partition in the task_struct of the fpu-owner (if necessary) and then load
226  * the fp-high partition of the current task (if necessary).  Note that the kernel has
227  * access to fph by the time we get here, as the IVT's "Disabled FP-Register" handler takes
228  * care of clearing psr.dfh.
229  */
230 static inline void
231 disabled_fph_fault (struct pt_regs *regs)
232 {
233         struct ia64_psr *psr = ia64_psr(regs);
234
235         /* first, grant user-level access to fph partition: */
236         psr->dfh = 0;
237
238         /*
239          * Make sure that no other task gets in on this processor
240          * while we're claiming the FPU
241          */
242         preempt_disable();
243 #ifndef CONFIG_SMP
244         {
245                 struct task_struct *fpu_owner
246                         = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
247
248                 if (ia64_is_local_fpu_owner(current)) {
249                         preempt_enable_no_resched();
250                         return;
251                 }
252
253                 if (fpu_owner)
254                         ia64_flush_fph(fpu_owner);
255         }
256 #endif /* !CONFIG_SMP */
257         ia64_set_local_fpu_owner(current);
258         if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {
259                 __ia64_load_fpu(current->thread.fph);
260                 psr->mfh = 0;
261         } else {
262                 __ia64_init_fpu();
263                 /*
264                  * Set mfh because the state in thread.fph does not match the state in
265                  * the fph partition.
266                  */
267                 psr->mfh = 1;
268         }
269         preempt_enable_no_resched();
270 }
271
272 static inline int
273 fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
274             struct pt_regs *regs)
275 {
276         fp_state_t fp_state;
277         fpswa_ret_t ret;
278
279         if (!fpswa_interface)
280                 return -1;
281
282         memset(&fp_state, 0, sizeof(fp_state_t));
283
284         /*
285          * compute fp_state.  only FP registers f6 - f11 are used by the
286          * kernel, so set those bits in the mask and set the low volatile
287          * pointer to point to these registers.
288          */
289         fp_state.bitmask_low64 = 0xfc0;  /* bit6..bit11 */
290
291         fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
292         /*
293          * unsigned long (*EFI_FPSWA) (
294          *      unsigned long    trap_type,
295          *      void             *Bundle,
296          *      unsigned long    *pipsr,
297          *      unsigned long    *pfsr,
298          *      unsigned long    *pisr,
299          *      unsigned long    *ppreds,
300          *      unsigned long    *pifs,
301          *      void             *fp_state);
302          */
303         ret = (*fpswa_interface->fpswa)((unsigned long) fp_fault, bundle,
304                                         (unsigned long *) ipsr, (unsigned long *) fpsr,
305                                         (unsigned long *) isr, (unsigned long *) pr,
306                                         (unsigned long *) ifs, &fp_state);
307
308         return ret.status;
309 }
310
311 struct fpu_swa_msg {
312         unsigned long count;
313         unsigned long time;
314 };
315 static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast);
316 DECLARE_PER_CPU(struct fpu_swa_msg, cpulast);
317 static struct fpu_swa_msg last __cacheline_aligned;
318
319
320 /*
321  * Handle floating-point assist faults and traps.
322  */
323 static int
324 handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
325 {
326         long exception, bundle[2];
327         unsigned long fault_ip;
328         struct siginfo siginfo;
329
330         fault_ip = regs->cr_iip;
331         if (!fp_fault && (ia64_psr(regs)->ri == 0))
332                 fault_ip -= 16;
333         if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle)))
334                 return -1;
335
336         if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT))  {
337                 unsigned long count, current_jiffies = jiffies;
338                 struct fpu_swa_msg *cp = &__get_cpu_var(cpulast);
339
340                 if (unlikely(current_jiffies > cp->time))
341                         cp->count = 0;
342                 if (unlikely(cp->count < 5)) {
343                         cp->count++;
344                         cp->time = current_jiffies + 5 * HZ;
345
346                         /* minimize races by grabbing a copy of count BEFORE checking last.time. */
347                         count = last.count;
348                         barrier();
349
350                         /*
351                          * Lower 4 bits are used as a count. Upper bits are a sequence
352                          * number that is updated when count is reset. The cmpxchg will
353                          * fail is seqno has changed. This minimizes mutiple cpus
354                          * reseting the count.
355                          */
356                         if (current_jiffies > last.time)
357                                 (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
358
359                         /* used fetchadd to atomically update the count */
360                         if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
361                                 last.time = current_jiffies + 5 * HZ;
362                                 printk(KERN_WARNING
363                                         "%s(%d[#%u]): floating-point assist fault at ip %016lx, isr %016lx\n",
364                                         current->comm, current->pid, current->xid,
365                                         regs->cr_iip + ia64_psr(regs)->ri, isr);
366                         }
367                 }
368         }
369
370         exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
371                                &regs->cr_ifs, regs);
372         if (fp_fault) {
373                 if (exception == 0) {
374                         /* emulation was successful */
375                         ia64_increment_ip(regs);
376                 } else if (exception == -1) {
377                         printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
378                         return -1;
379                 } else {
380                         /* is next instruction a trap? */
381                         if (exception & 2) {
382                                 ia64_increment_ip(regs);
383                         }
384                         siginfo.si_signo = SIGFPE;
385                         siginfo.si_errno = 0;
386                         siginfo.si_code = __SI_FAULT;   /* default code */
387                         siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
388                         if (isr & 0x11) {
389                                 siginfo.si_code = FPE_FLTINV;
390                         } else if (isr & 0x22) {
391                                 /* denormal operand gets the same si_code as underflow 
392                                 * see arch/i386/kernel/traps.c:math_error()  */
393                                 siginfo.si_code = FPE_FLTUND;
394                         } else if (isr & 0x44) {
395                                 siginfo.si_code = FPE_FLTDIV;
396                         }
397                         siginfo.si_isr = isr;
398                         siginfo.si_flags = __ISR_VALID;
399                         siginfo.si_imm = 0;
400                         force_sig_info(SIGFPE, &siginfo, current);
401                 }
402         } else {
403                 if (exception == -1) {
404                         printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
405                         return -1;
406                 } else if (exception != 0) {
407                         /* raise exception */
408                         siginfo.si_signo = SIGFPE;
409                         siginfo.si_errno = 0;
410                         siginfo.si_code = __SI_FAULT;   /* default code */
411                         siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
412                         if (isr & 0x880) {
413                                 siginfo.si_code = FPE_FLTOVF;
414                         } else if (isr & 0x1100) {
415                                 siginfo.si_code = FPE_FLTUND;
416                         } else if (isr & 0x2200) {
417                                 siginfo.si_code = FPE_FLTRES;
418                         }
419                         siginfo.si_isr = isr;
420                         siginfo.si_flags = __ISR_VALID;
421                         siginfo.si_imm = 0;
422                         force_sig_info(SIGFPE, &siginfo, current);
423                 }
424         }
425         return 0;
426 }
427
428 struct illegal_op_return {
429         unsigned long fkt, arg1, arg2, arg3;
430 };
431
432 struct illegal_op_return
433 ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
434                        long arg4, long arg5, long arg6, long arg7,
435                        struct pt_regs regs)
436 {
437         struct illegal_op_return rv;
438         struct siginfo si;
439         char buf[128];
440
441 #ifdef CONFIG_IA64_BRL_EMU
442         {
443                 extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
444
445                 rv = ia64_emulate_brl(&regs, ec);
446                 if (rv.fkt != (unsigned long) -1)
447                         return rv;
448         }
449 #endif
450
451         sprintf(buf, "IA-64 Illegal operation fault");
452         die_if_kernel(buf, &regs, 0);
453
454         memset(&si, 0, sizeof(si));
455         si.si_signo = SIGILL;
456         si.si_code = ILL_ILLOPC;
457         si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri);
458         force_sig_info(SIGILL, &si, current);
459         rv.fkt = 0;
460         return rv;
461 }
462
463 void __kprobes
464 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
465             unsigned long iim, unsigned long itir, long arg5, long arg6,
466             long arg7, struct pt_regs regs)
467 {
468         unsigned long code, error = isr, iip;
469         struct siginfo siginfo;
470         char buf[128];
471         int result, sig;
472         static const char *reason[] = {
473                 "IA-64 Illegal Operation fault",
474                 "IA-64 Privileged Operation fault",
475                 "IA-64 Privileged Register fault",
476                 "IA-64 Reserved Register/Field fault",
477                 "Disabled Instruction Set Transition fault",
478                 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
479                 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
480                 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
481         };
482
483         if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
484                 /*
485                  * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
486                  * the lfetch.
487                  */
488                 ia64_psr(&regs)->ed = 1;
489                 return;
490         }
491
492         iip = regs.cr_iip + ia64_psr(&regs)->ri;
493
494         switch (vector) {
495               case 24: /* General Exception */
496                 code = (isr >> 4) & 0xf;
497                 sprintf(buf, "General Exception: %s%s", reason[code],
498                         (code == 3) ? ((isr & (1UL << 37))
499                                        ? " (RSE access)" : " (data access)") : "");
500                 if (code == 8) {
501 # ifdef CONFIG_IA64_PRINT_HAZARDS
502                         printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
503                                current->comm, current->pid,
504                                regs.cr_iip + ia64_psr(&regs)->ri, regs.pr);
505 # endif
506                         return;
507                 }
508                 break;
509
510               case 25: /* Disabled FP-Register */
511                 if (isr & 2) {
512                         disabled_fph_fault(&regs);
513                         return;
514                 }
515                 sprintf(buf, "Disabled FPL fault---not supposed to happen!");
516                 break;
517
518               case 26: /* NaT Consumption */
519                 if (user_mode(&regs)) {
520                         void __user *addr;
521
522                         if (((isr >> 4) & 0xf) == 2) {
523                                 /* NaT page consumption */
524                                 sig = SIGSEGV;
525                                 code = SEGV_ACCERR;
526                                 addr = (void __user *) ifa;
527                         } else {
528                                 /* register NaT consumption */
529                                 sig = SIGILL;
530                                 code = ILL_ILLOPN;
531                                 addr = (void __user *) (regs.cr_iip
532                                                         + ia64_psr(&regs)->ri);
533                         }
534                         siginfo.si_signo = sig;
535                         siginfo.si_code = code;
536                         siginfo.si_errno = 0;
537                         siginfo.si_addr = addr;
538                         siginfo.si_imm = vector;
539                         siginfo.si_flags = __ISR_VALID;
540                         siginfo.si_isr = isr;
541                         force_sig_info(sig, &siginfo, current);
542                         return;
543                 } else if (ia64_done_with_exception(&regs))
544                         return;
545                 sprintf(buf, "NaT consumption");
546                 break;
547
548               case 31: /* Unsupported Data Reference */
549                 if (user_mode(&regs)) {
550                         siginfo.si_signo = SIGILL;
551                         siginfo.si_code = ILL_ILLOPN;
552                         siginfo.si_errno = 0;
553                         siginfo.si_addr = (void __user *) iip;
554                         siginfo.si_imm = vector;
555                         siginfo.si_flags = __ISR_VALID;
556                         siginfo.si_isr = isr;
557                         force_sig_info(SIGILL, &siginfo, current);
558                         return;
559                 }
560                 sprintf(buf, "Unsupported data reference");
561                 break;
562
563               case 29: /* Debug */
564               case 35: /* Taken Branch Trap */
565               case 36: /* Single Step Trap */
566                 if (fsys_mode(current, &regs)) {
567                         extern char __kernel_syscall_via_break[];
568                         /*
569                          * Got a trap in fsys-mode: Taken Branch Trap
570                          * and Single Step trap need special handling;
571                          * Debug trap is ignored (we disable it here
572                          * and re-enable it in the lower-privilege trap).
573                          */
574                         if (unlikely(vector == 29)) {
575                                 set_thread_flag(TIF_DB_DISABLED);
576                                 ia64_psr(&regs)->db = 0;
577                                 ia64_psr(&regs)->lp = 1;
578                                 return;
579                         }
580                         /* re-do the system call via break 0x100000: */
581                         regs.cr_iip = (unsigned long) __kernel_syscall_via_break;
582                         ia64_psr(&regs)->ri = 0;
583                         ia64_psr(&regs)->cpl = 3;
584                         return;
585                 }
586                 switch (vector) {
587                       case 29:
588                         siginfo.si_code = TRAP_HWBKPT;
589 #ifdef CONFIG_ITANIUM
590                         /*
591                          * Erratum 10 (IFA may contain incorrect address) now has
592                          * "NoFix" status.  There are no plans for fixing this.
593                          */
594                         if (ia64_psr(&regs)->is == 0)
595                           ifa = regs.cr_iip;
596 #endif
597                         break;
598                       case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
599                       case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
600                 }
601                 if (notify_die(DIE_FAULT, "ia64_fault", &regs, vector, siginfo.si_code, SIGTRAP)
602                                 == NOTIFY_STOP)
603                         return;
604                 siginfo.si_signo = SIGTRAP;
605                 siginfo.si_errno = 0;
606                 siginfo.si_addr  = (void __user *) ifa;
607                 siginfo.si_imm   = 0;
608                 siginfo.si_flags = __ISR_VALID;
609                 siginfo.si_isr   = isr;
610                 force_sig_info(SIGTRAP, &siginfo, current);
611                 return;
612
613               case 32: /* fp fault */
614               case 33: /* fp trap */
615                 result = handle_fpu_swa((vector == 32) ? 1 : 0, &regs, isr);
616                 if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
617                         siginfo.si_signo = SIGFPE;
618                         siginfo.si_errno = 0;
619                         siginfo.si_code = FPE_FLTINV;
620                         siginfo.si_addr = (void __user *) iip;
621                         siginfo.si_flags = __ISR_VALID;
622                         siginfo.si_isr = isr;
623                         siginfo.si_imm = 0;
624                         force_sig_info(SIGFPE, &siginfo, current);
625                 }
626                 return;
627
628               case 34:
629                 if (isr & 0x2) {
630                         /* Lower-Privilege Transfer Trap */
631
632                         /* If we disabled debug traps during an fsyscall,
633                          * re-enable them here.
634                          */
635                         if (test_thread_flag(TIF_DB_DISABLED)) {
636                                 clear_thread_flag(TIF_DB_DISABLED);
637                                 ia64_psr(&regs)->db = 1;
638                         }
639
640                         /*
641                          * Just clear PSR.lp and then return immediately:
642                          * all the interesting work (e.g., signal delivery)
643                          * is done in the kernel exit path.
644                          */
645                         ia64_psr(&regs)->lp = 0;
646                         return;
647                 } else {
648                         /* Unimplemented Instr. Address Trap */
649                         if (user_mode(&regs)) {
650                                 siginfo.si_signo = SIGILL;
651                                 siginfo.si_code = ILL_BADIADDR;
652                                 siginfo.si_errno = 0;
653                                 siginfo.si_flags = 0;
654                                 siginfo.si_isr = 0;
655                                 siginfo.si_imm = 0;
656                                 siginfo.si_addr = (void __user *) iip;
657                                 force_sig_info(SIGILL, &siginfo, current);
658                                 return;
659                         }
660                         sprintf(buf, "Unimplemented Instruction Address fault");
661                 }
662                 break;
663
664               case 45:
665 #ifdef CONFIG_IA32_SUPPORT
666                 if (ia32_exception(&regs, isr) == 0)
667                         return;
668 #endif
669                 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
670                 printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
671                        iip, ifa, isr);
672                 force_sig(SIGSEGV, current);
673                 break;
674
675               case 46:
676 #ifdef CONFIG_IA32_SUPPORT
677                 if (ia32_intercept(&regs, isr) == 0)
678                         return;
679 #endif
680                 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
681                 printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
682                        iip, ifa, isr, iim);
683                 force_sig(SIGSEGV, current);
684                 return;
685
686               case 47:
687                 sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
688                 break;
689
690               default:
691                 sprintf(buf, "Fault %lu", vector);
692                 break;
693         }
694         die_if_kernel(buf, &regs, error);
695         force_sig(SIGILL, current);
696 }