1 /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
9 * I like traps on v9, :))))
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for jiffies */
15 #include <linux/kernel.h>
16 #include <linux/kallsyms.h>
17 #include <linux/signal.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
21 #include <linux/init.h>
23 #include <asm/delay.h>
24 #include <asm/system.h>
25 #include <asm/ptrace.h>
26 #include <asm/oplib.h>
28 #include <asm/pgtable.h>
29 #include <asm/unistd.h>
30 #include <asm/uaccess.h>
31 #include <asm/fpumacro.h>
34 #include <asm/estate.h>
35 #include <asm/chafsr.h>
36 #include <asm/psrcompat.h>
37 #include <asm/processor.h>
38 #include <asm/timer.h>
40 #include <linux/kmod.h>
43 /* When an irrecoverable trap occurs at tl > 0, the trap entry
44 * code logs the trap state registers at every level in the trap
45 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
58 static void dump_tl1_traplog(struct tl1_traplog *p)
62 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
64 for (i = 0; i < 4; i++) {
66 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
67 "TNPC[%016lx] TT[%lx]\n",
69 p->trapstack[i].tstate, p->trapstack[i].tpc,
70 p->trapstack[i].tnpc, p->trapstack[i].tt);
74 void bad_trap (struct pt_regs *regs, long lvl)
80 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
81 die_if_kernel(buffer, regs);
85 if (regs->tstate & TSTATE_PRIV) {
86 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
87 die_if_kernel (buffer, regs);
89 if (test_thread_flag(TIF_32BIT)) {
90 regs->tpc &= 0xffffffff;
91 regs->tnpc &= 0xffffffff;
93 info.si_signo = SIGILL;
95 info.si_code = ILL_ILLTRP;
96 info.si_addr = (void *)regs->tpc;
98 force_sig_info(SIGILL, &info, current);
101 void bad_trap_tl1 (struct pt_regs *regs, long lvl)
105 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
107 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
108 die_if_kernel (buffer, regs);
111 #ifdef CONFIG_DEBUG_BUGVERBOSE
112 void do_BUG(const char *file, int line)
115 printk("kernel BUG at %s:%d!\n", file, line);
119 void instruction_access_exception(struct pt_regs *regs,
120 unsigned long sfsr, unsigned long sfar)
124 if (regs->tstate & TSTATE_PRIV) {
125 printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
127 die_if_kernel("Iax", regs);
129 if (test_thread_flag(TIF_32BIT)) {
130 regs->tpc &= 0xffffffff;
131 regs->tnpc &= 0xffffffff;
133 info.si_signo = SIGSEGV;
135 info.si_code = SEGV_MAPERR;
136 info.si_addr = (void *)regs->tpc;
138 force_sig_info(SIGSEGV, &info, current);
141 void instruction_access_exception_tl1(struct pt_regs *regs,
142 unsigned long sfsr, unsigned long sfar)
144 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
145 instruction_access_exception(regs, sfsr, sfar);
148 void data_access_exception (struct pt_regs *regs,
149 unsigned long sfsr, unsigned long sfar)
153 if (regs->tstate & TSTATE_PRIV) {
154 /* Test if this comes from uaccess places. */
156 unsigned long g2 = regs->u_regs[UREG_G2];
158 if ((fixup = search_extables_range(regs->tpc, &g2))) {
159 /* Ouch, somebody is trying ugly VM hole tricks on us... */
160 #ifdef DEBUG_EXCEPTIONS
161 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
162 printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
163 "g2<%016lx>\n", regs->tpc, fixup, g2);
166 regs->tnpc = regs->tpc + 4;
167 regs->u_regs[UREG_G2] = g2;
171 printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
173 die_if_kernel("Dax", regs);
176 info.si_signo = SIGSEGV;
178 info.si_code = SEGV_MAPERR;
179 info.si_addr = (void *)sfar;
181 force_sig_info(SIGSEGV, &info, current);
185 /* This is really pathetic... */
186 extern volatile int pci_poke_in_progress;
187 extern volatile int pci_poke_cpu;
188 extern volatile int pci_poke_faulted;
191 /* When access exceptions happen, we must do this. */
192 static void spitfire_clean_and_reenable_l1_caches(void)
196 if (tlb_type != spitfire)
200 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
201 spitfire_put_icache_tag(va, 0x0);
202 spitfire_put_dcache_tag(va, 0x0);
205 /* Re-enable in LSU. */
206 __asm__ __volatile__("flush %%g6\n\t"
208 "stxa %0, [%%g0] %1\n\t"
211 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
212 LSU_CONTROL_IM | LSU_CONTROL_DM),
213 "i" (ASI_LSU_CONTROL)
217 void do_iae(struct pt_regs *regs)
221 spitfire_clean_and_reenable_l1_caches();
223 info.si_signo = SIGBUS;
225 info.si_code = BUS_OBJERR;
226 info.si_addr = (void *)0;
228 force_sig_info(SIGBUS, &info, current);
231 void do_dae(struct pt_regs *regs)
234 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
235 spitfire_clean_and_reenable_l1_caches();
237 pci_poke_faulted = 1;
239 /* Why the fuck did they have to change this? */
240 if (tlb_type == cheetah || tlb_type == cheetah_plus)
243 regs->tnpc = regs->tpc + 4;
250 static char ecc_syndrome_table[] = {
251 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
252 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
253 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
254 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
255 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
256 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
257 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
258 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
259 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
260 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
261 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
262 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
263 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
264 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
265 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
266 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
267 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
268 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
269 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
270 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
271 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
272 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
273 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
274 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
275 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
276 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
277 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
278 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
279 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
280 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
281 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
282 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
285 /* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
286 * in the following format. The AFAR is left as is, with
287 * reserved bits cleared, and is a raw 40-bit physical
290 #define CE_STATUS_UDBH_UE (1UL << (43 + 9))
291 #define CE_STATUS_UDBH_CE (1UL << (43 + 8))
292 #define CE_STATUS_UDBH_ESYNDR (0xffUL << 43)
293 #define CE_STATUS_UDBH_SHIFT 43
294 #define CE_STATUS_UDBL_UE (1UL << (33 + 9))
295 #define CE_STATUS_UDBL_CE (1UL << (33 + 8))
296 #define CE_STATUS_UDBL_ESYNDR (0xffUL << 33)
297 #define CE_STATUS_UDBL_SHIFT 33
298 #define CE_STATUS_AFSR_MASK (0x1ffffffffUL)
299 #define CE_STATUS_AFSR_ME (1UL << 32)
300 #define CE_STATUS_AFSR_PRIV (1UL << 31)
301 #define CE_STATUS_AFSR_ISAP (1UL << 30)
302 #define CE_STATUS_AFSR_ETP (1UL << 29)
303 #define CE_STATUS_AFSR_IVUE (1UL << 28)
304 #define CE_STATUS_AFSR_TO (1UL << 27)
305 #define CE_STATUS_AFSR_BERR (1UL << 26)
306 #define CE_STATUS_AFSR_LDP (1UL << 25)
307 #define CE_STATUS_AFSR_CP (1UL << 24)
308 #define CE_STATUS_AFSR_WP (1UL << 23)
309 #define CE_STATUS_AFSR_EDP (1UL << 22)
310 #define CE_STATUS_AFSR_UE (1UL << 21)
311 #define CE_STATUS_AFSR_CE (1UL << 20)
312 #define CE_STATUS_AFSR_ETS (0xfUL << 16)
313 #define CE_STATUS_AFSR_ETS_SHIFT 16
314 #define CE_STATUS_AFSR_PSYND (0xffffUL << 0)
315 #define CE_STATUS_AFSR_PSYND_SHIFT 0
317 /* Layout of Ecache TAG Parity Syndrome of AFSR */
318 #define AFSR_ETSYNDROME_7_0 0x1UL /* E$-tag bus bits <7:0> */
319 #define AFSR_ETSYNDROME_15_8 0x2UL /* E$-tag bus bits <15:8> */
320 #define AFSR_ETSYNDROME_21_16 0x4UL /* E$-tag bus bits <21:16> */
321 #define AFSR_ETSYNDROME_24_22 0x8UL /* E$-tag bus bits <24:22> */
323 static char *syndrome_unknown = "<Unknown>";
325 asmlinkage void cee_log(unsigned long ce_status,
327 struct pt_regs *regs)
331 unsigned short scode, udb_reg;
333 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
334 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n",
336 (ce_status & CE_STATUS_AFSR_MASK),
338 ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
339 ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
341 udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
342 if (udb_reg & (1 << 8)) {
343 scode = ecc_syndrome_table[udb_reg & 0xff];
344 if (prom_getunumber(scode, afar,
345 memmod_str, sizeof(memmod_str)) == -1)
346 p = syndrome_unknown;
349 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
350 "Memory Module \"%s\"\n",
351 smp_processor_id(), scode, p);
354 udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL);
355 if (udb_reg & (1 << 8)) {
356 scode = ecc_syndrome_table[udb_reg & 0xff];
357 if (prom_getunumber(scode, afar,
358 memmod_str, sizeof(memmod_str)) == -1)
359 p = syndrome_unknown;
362 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
363 "Memory Module \"%s\"\n",
364 smp_processor_id(), scode, p);
368 /* Cheetah error trap handling. */
369 static unsigned long ecache_flush_physbase;
370 static unsigned long ecache_flush_linesize;
371 static unsigned long ecache_flush_size;
373 /* WARNING: The error trap handlers in assembly know the precise
374 * layout of the following structure.
376 * C-level handlers below use this information to log the error
377 * and then determine how to recover (if possible).
379 struct cheetah_err_info {
384 /*0x10*/u64 dcache_data[4]; /* The actual data */
385 /*0x30*/u64 dcache_index; /* D-cache index */
386 /*0x38*/u64 dcache_tag; /* D-cache tag/valid */
387 /*0x40*/u64 dcache_utag; /* D-cache microtag */
388 /*0x48*/u64 dcache_stag; /* D-cache snooptag */
391 /*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
392 /*0x90*/u64 icache_index; /* I-cache index */
393 /*0x98*/u64 icache_tag; /* I-cache phys tag */
394 /*0xa0*/u64 icache_utag; /* I-cache microtag */
395 /*0xa8*/u64 icache_stag; /* I-cache snooptag */
396 /*0xb0*/u64 icache_upper; /* I-cache upper-tag */
397 /*0xb8*/u64 icache_lower; /* I-cache lower-tag */
400 /*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
401 /*0xe0*/u64 ecache_index; /* E-cache index */
402 /*0xe8*/u64 ecache_tag; /* E-cache tag/state */
404 /*0xf0*/u64 __pad[32 - 30];
406 #define CHAFSR_INVALID ((u64)-1L)
408 /* This table is ordered in priority of errors and matches the
409 * AFAR overwrite policy as well.
412 struct afsr_error_table {
417 static const char CHAFSR_PERR_msg[] =
418 "System interface protocol error";
419 static const char CHAFSR_IERR_msg[] =
420 "Internal processor error";
421 static const char CHAFSR_ISAP_msg[] =
422 "System request parity error on incoming addresss";
423 static const char CHAFSR_UCU_msg[] =
424 "Uncorrectable E-cache ECC error for ifetch/data";
425 static const char CHAFSR_UCC_msg[] =
426 "SW Correctable E-cache ECC error for ifetch/data";
427 static const char CHAFSR_UE_msg[] =
428 "Uncorrectable system bus data ECC error for read";
429 static const char CHAFSR_EDU_msg[] =
430 "Uncorrectable E-cache ECC error for stmerge/blkld";
431 static const char CHAFSR_EMU_msg[] =
432 "Uncorrectable system bus MTAG error";
433 static const char CHAFSR_WDU_msg[] =
434 "Uncorrectable E-cache ECC error for writeback";
435 static const char CHAFSR_CPU_msg[] =
436 "Uncorrectable ECC error for copyout";
437 static const char CHAFSR_CE_msg[] =
438 "HW corrected system bus data ECC error for read";
439 static const char CHAFSR_EDC_msg[] =
440 "HW corrected E-cache ECC error for stmerge/blkld";
441 static const char CHAFSR_EMC_msg[] =
442 "HW corrected system bus MTAG ECC error";
443 static const char CHAFSR_WDC_msg[] =
444 "HW corrected E-cache ECC error for writeback";
445 static const char CHAFSR_CPC_msg[] =
446 "HW corrected ECC error for copyout";
447 static const char CHAFSR_TO_msg[] =
448 "Unmapped error from system bus";
449 static const char CHAFSR_BERR_msg[] =
450 "Bus error response from system bus";
451 static const char CHAFSR_IVC_msg[] =
452 "HW corrected system bus data ECC error for ivec read";
453 static const char CHAFSR_IVU_msg[] =
454 "Uncorrectable system bus data ECC error for ivec read";
455 static struct afsr_error_table __cheetah_error_table[] = {
456 { CHAFSR_PERR, CHAFSR_PERR_msg },
457 { CHAFSR_IERR, CHAFSR_IERR_msg },
458 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
459 { CHAFSR_UCU, CHAFSR_UCU_msg },
460 { CHAFSR_UCC, CHAFSR_UCC_msg },
461 { CHAFSR_UE, CHAFSR_UE_msg },
462 { CHAFSR_EDU, CHAFSR_EDU_msg },
463 { CHAFSR_EMU, CHAFSR_EMU_msg },
464 { CHAFSR_WDU, CHAFSR_WDU_msg },
465 { CHAFSR_CPU, CHAFSR_CPU_msg },
466 { CHAFSR_CE, CHAFSR_CE_msg },
467 { CHAFSR_EDC, CHAFSR_EDC_msg },
468 { CHAFSR_EMC, CHAFSR_EMC_msg },
469 { CHAFSR_WDC, CHAFSR_WDC_msg },
470 { CHAFSR_CPC, CHAFSR_CPC_msg },
471 { CHAFSR_TO, CHAFSR_TO_msg },
472 { CHAFSR_BERR, CHAFSR_BERR_msg },
473 /* These two do not update the AFAR. */
474 { CHAFSR_IVC, CHAFSR_IVC_msg },
475 { CHAFSR_IVU, CHAFSR_IVU_msg },
478 static const char CHPAFSR_DTO_msg[] =
479 "System bus unmapped error for prefetch/storequeue-read";
480 static const char CHPAFSR_DBERR_msg[] =
481 "System bus error for prefetch/storequeue-read";
482 static const char CHPAFSR_THCE_msg[] =
483 "Hardware corrected E-cache Tag ECC error";
484 static const char CHPAFSR_TSCE_msg[] =
485 "SW handled correctable E-cache Tag ECC error";
486 static const char CHPAFSR_TUE_msg[] =
487 "Uncorrectable E-cache Tag ECC error";
488 static const char CHPAFSR_DUE_msg[] =
489 "System bus uncorrectable data ECC error due to prefetch/store-fill";
490 static struct afsr_error_table __cheetah_plus_error_table[] = {
491 { CHAFSR_PERR, CHAFSR_PERR_msg },
492 { CHAFSR_IERR, CHAFSR_IERR_msg },
493 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
494 { CHAFSR_UCU, CHAFSR_UCU_msg },
495 { CHAFSR_UCC, CHAFSR_UCC_msg },
496 { CHAFSR_UE, CHAFSR_UE_msg },
497 { CHAFSR_EDU, CHAFSR_EDU_msg },
498 { CHAFSR_EMU, CHAFSR_EMU_msg },
499 { CHAFSR_WDU, CHAFSR_WDU_msg },
500 { CHAFSR_CPU, CHAFSR_CPU_msg },
501 { CHAFSR_CE, CHAFSR_CE_msg },
502 { CHAFSR_EDC, CHAFSR_EDC_msg },
503 { CHAFSR_EMC, CHAFSR_EMC_msg },
504 { CHAFSR_WDC, CHAFSR_WDC_msg },
505 { CHAFSR_CPC, CHAFSR_CPC_msg },
506 { CHAFSR_TO, CHAFSR_TO_msg },
507 { CHAFSR_BERR, CHAFSR_BERR_msg },
508 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
509 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
510 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
511 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
512 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
513 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
514 /* These two do not update the AFAR. */
515 { CHAFSR_IVC, CHAFSR_IVC_msg },
516 { CHAFSR_IVU, CHAFSR_IVU_msg },
519 static const char JPAFSR_JETO_msg[] =
520 "System interface protocol error, hw timeout caused";
521 static const char JPAFSR_SCE_msg[] =
522 "Parity error on system snoop results";
523 static const char JPAFSR_JEIC_msg[] =
524 "System interface protocol error, illegal command detected";
525 static const char JPAFSR_JEIT_msg[] =
526 "System interface protocol error, illegal ADTYPE detected";
527 static const char JPAFSR_OM_msg[] =
528 "Out of range memory error has occurred";
529 static const char JPAFSR_ETP_msg[] =
530 "Parity error on L2 cache tag SRAM";
531 static const char JPAFSR_UMS_msg[] =
532 "Error due to unsupported store";
533 static const char JPAFSR_RUE_msg[] =
534 "Uncorrectable ECC error from remote cache/memory";
535 static const char JPAFSR_RCE_msg[] =
536 "Correctable ECC error from remote cache/memory";
537 static const char JPAFSR_BP_msg[] =
538 "JBUS parity error on returned read data";
539 static const char JPAFSR_WBP_msg[] =
540 "JBUS parity error on data for writeback or block store";
541 static const char JPAFSR_FRC_msg[] =
542 "Foreign read to DRAM incurring correctable ECC error";
543 static const char JPAFSR_FRU_msg[] =
544 "Foreign read to DRAM incurring uncorrectable ECC error";
545 static struct afsr_error_table __jalapeno_error_table[] = {
546 { JPAFSR_JETO, JPAFSR_JETO_msg },
547 { JPAFSR_SCE, JPAFSR_SCE_msg },
548 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
549 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
550 { CHAFSR_PERR, CHAFSR_PERR_msg },
551 { CHAFSR_IERR, CHAFSR_IERR_msg },
552 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
553 { CHAFSR_UCU, CHAFSR_UCU_msg },
554 { CHAFSR_UCC, CHAFSR_UCC_msg },
555 { CHAFSR_UE, CHAFSR_UE_msg },
556 { CHAFSR_EDU, CHAFSR_EDU_msg },
557 { JPAFSR_OM, JPAFSR_OM_msg },
558 { CHAFSR_WDU, CHAFSR_WDU_msg },
559 { CHAFSR_CPU, CHAFSR_CPU_msg },
560 { CHAFSR_CE, CHAFSR_CE_msg },
561 { CHAFSR_EDC, CHAFSR_EDC_msg },
562 { JPAFSR_ETP, JPAFSR_ETP_msg },
563 { CHAFSR_WDC, CHAFSR_WDC_msg },
564 { CHAFSR_CPC, CHAFSR_CPC_msg },
565 { CHAFSR_TO, CHAFSR_TO_msg },
566 { CHAFSR_BERR, CHAFSR_BERR_msg },
567 { JPAFSR_UMS, JPAFSR_UMS_msg },
568 { JPAFSR_RUE, JPAFSR_RUE_msg },
569 { JPAFSR_RCE, JPAFSR_RCE_msg },
570 { JPAFSR_BP, JPAFSR_BP_msg },
571 { JPAFSR_WBP, JPAFSR_WBP_msg },
572 { JPAFSR_FRC, JPAFSR_FRC_msg },
573 { JPAFSR_FRU, JPAFSR_FRU_msg },
574 /* These two do not update the AFAR. */
575 { CHAFSR_IVU, CHAFSR_IVU_msg },
578 static struct afsr_error_table *cheetah_error_table;
579 static unsigned long cheetah_afsr_errors;
581 /* This is allocated at boot time based upon the largest hardware
582 * cpu ID in the system. We allocate two entries per cpu, one for
583 * TL==0 logging and one for TL >= 1 logging.
585 struct cheetah_err_info *cheetah_error_log;
587 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
589 struct cheetah_err_info *p;
590 int cpu = smp_processor_id();
592 if (!cheetah_error_log)
595 p = cheetah_error_log + (cpu * 2);
596 if ((afsr & CHAFSR_TL1) != 0UL)
602 extern unsigned int tl0_icpe[], tl1_icpe[];
603 extern unsigned int tl0_dcpe[], tl1_dcpe[];
604 extern unsigned int tl0_fecc[], tl1_fecc[];
605 extern unsigned int tl0_cee[], tl1_cee[];
606 extern unsigned int tl0_iae[], tl1_iae[];
607 extern unsigned int tl0_dae[], tl1_dae[];
608 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
609 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
610 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
611 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
612 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
614 void __init cheetah_ecache_flush_init(void)
616 unsigned long largest_size, smallest_linesize, order, ver;
617 int node, i, instance;
619 /* Scan all cpu device tree nodes, note two values:
620 * 1) largest E-cache size
621 * 2) smallest E-cache line size
624 smallest_linesize = ~0UL;
627 while (!cpu_find_by_instance(instance, &node, NULL)) {
630 val = prom_getintdefault(node, "ecache-size",
632 if (val > largest_size)
634 val = prom_getintdefault(node, "ecache-line-size", 64);
635 if (val < smallest_linesize)
636 smallest_linesize = val;
640 if (largest_size == 0UL || smallest_linesize == ~0UL) {
641 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
646 ecache_flush_size = (2 * largest_size);
647 ecache_flush_linesize = smallest_linesize;
649 /* Discover a physically contiguous chunk of physical
650 * memory in 'sp_banks' of size ecache_flush_size calculated
651 * above. Store the physical base of this area at
652 * ecache_flush_physbase.
654 for (node = 0; ; node++) {
655 if (sp_banks[node].num_bytes == 0)
657 if (sp_banks[node].num_bytes >= ecache_flush_size) {
658 ecache_flush_physbase = sp_banks[node].base_addr;
663 /* Note: Zero would be a valid value of ecache_flush_physbase so
664 * don't use that as the success test. :-)
666 if (sp_banks[node].num_bytes == 0) {
667 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
668 "contiguous physical memory.\n", ecache_flush_size);
672 /* Now allocate error trap reporting scoreboard. */
673 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
674 for (order = 0; order < MAX_ORDER; order++) {
675 if ((PAGE_SIZE << order) >= node)
678 cheetah_error_log = (struct cheetah_err_info *)
679 __get_free_pages(GFP_KERNEL, order);
680 if (!cheetah_error_log) {
681 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
682 "error logging scoreboard (%d bytes).\n", node);
685 memset(cheetah_error_log, 0, PAGE_SIZE << order);
687 /* Mark all AFSRs as invalid so that the trap handler will
688 * log new new information there.
690 for (i = 0; i < 2 * NR_CPUS; i++)
691 cheetah_error_log[i].afsr = CHAFSR_INVALID;
693 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
694 if ((ver >> 32) == 0x003e0016) {
695 cheetah_error_table = &__jalapeno_error_table[0];
696 cheetah_afsr_errors = JPAFSR_ERRORS;
697 } else if ((ver >> 32) == 0x003e0015) {
698 cheetah_error_table = &__cheetah_plus_error_table[0];
699 cheetah_afsr_errors = CHPAFSR_ERRORS;
701 cheetah_error_table = &__cheetah_error_table[0];
702 cheetah_afsr_errors = CHAFSR_ERRORS;
705 /* Now patch trap tables. */
706 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
707 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
708 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
709 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
710 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
711 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
712 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
713 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
714 if (tlb_type == cheetah_plus) {
715 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
716 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
717 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
718 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
723 static void cheetah_flush_ecache(void)
725 unsigned long flush_base = ecache_flush_physbase;
726 unsigned long flush_linesize = ecache_flush_linesize;
727 unsigned long flush_size = ecache_flush_size;
729 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
730 " bne,pt %%xcc, 1b\n\t"
731 " ldxa [%2 + %0] %3, %%g0\n\t"
733 : "0" (flush_size), "r" (flush_base),
734 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
737 static void cheetah_flush_ecache_line(unsigned long physaddr)
741 physaddr &= ~(8UL - 1UL);
742 physaddr = (ecache_flush_physbase +
743 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
744 alias = physaddr + (ecache_flush_size >> 1UL);
745 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
746 "ldxa [%1] %2, %%g0\n\t"
749 : "r" (physaddr), "r" (alias),
750 "i" (ASI_PHYS_USE_EC));
754 unsigned long __init cheetah_tune_scheduling(void)
756 unsigned long tick1, tick2, raw;
757 unsigned long flush_base = ecache_flush_physbase;
758 unsigned long flush_linesize = ecache_flush_linesize;
759 unsigned long flush_size = ecache_flush_size;
761 /* Run through the whole cache to guarantee the timed loop
762 * is really displacing cache lines.
764 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
765 " bne,pt %%xcc, 1b\n\t"
766 " ldxa [%2 + %0] %3, %%g0\n\t"
768 : "0" (flush_size), "r" (flush_base),
769 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
771 /* The flush area is 2 X Ecache-size, so cut this in half for
774 flush_base = ecache_flush_physbase;
775 flush_linesize = ecache_flush_linesize;
776 flush_size = ecache_flush_size >> 1;
778 tick1 = tick_ops->get_tick();
780 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
781 " bne,pt %%xcc, 1b\n\t"
782 " ldxa [%2 + %0] %3, %%g0\n\t"
784 : "0" (flush_size), "r" (flush_base),
785 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
787 tick2 = tick_ops->get_tick();
789 raw = (tick2 - tick1);
791 return (raw - (raw >> 2));
795 /* Unfortunately, the diagnostic access to the I-cache tags we need to
796 * use to clear the thing interferes with I-cache coherency transactions.
798 * So we must only flush the I-cache when it is disabled.
800 static void __cheetah_flush_icache(void)
804 /* Clear the valid bits in all the tags. */
805 for (i = 0; i < (1 << 15); i += (1 << 5)) {
806 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
809 : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
813 static void cheetah_flush_icache(void)
815 unsigned long dcu_save;
817 /* Save current DCU, disable I-cache. */
818 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
819 "or %0, %2, %%g1\n\t"
820 "stxa %%g1, [%%g0] %1\n\t"
823 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
826 __cheetah_flush_icache();
828 /* Restore DCU register */
829 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
832 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
835 static void cheetah_flush_dcache(void)
839 for (i = 0; i < (1 << 16); i += (1 << 5)) {
840 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
843 : "r" (i), "i" (ASI_DCACHE_TAG));
847 /* In order to make the even parity correct we must do two things.
848 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
849 * Next, we clear out all 32-bytes of data for that line. Data of
850 * all-zero + tag parity value of zero == correct parity.
852 static void cheetah_plus_zap_dcache_parity(void)
856 for (i = 0; i < (1 << 16); i += (1 << 5)) {
857 unsigned long tag = (i >> 14);
860 __asm__ __volatile__("membar #Sync\n\t"
861 "stxa %0, [%1] %2\n\t"
864 : "r" (tag), "r" (i),
865 "i" (ASI_DCACHE_UTAG));
866 for (j = i; j < i + (1 << 5); j += (1 << 3))
867 __asm__ __volatile__("membar #Sync\n\t"
868 "stxa %%g0, [%0] %1\n\t"
871 : "r" (j), "i" (ASI_DCACHE_DATA));
875 /* Conversion tables used to frob Cheetah AFSR syndrome values into
876 * something palatable to the memory controller driver get_unumber
900 static unsigned char cheetah_ecc_syntab[] = {
901 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
902 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
903 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
904 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
905 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
906 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
907 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
908 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
909 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
910 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
911 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
912 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
913 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
914 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
915 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
916 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
917 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
918 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
919 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
920 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
921 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
922 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
923 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
924 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
925 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
926 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
927 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
928 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
929 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
930 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
931 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
932 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
934 static unsigned char cheetah_mtag_syntab[] = {
945 /* Return the highest priority error conditon mentioned. */
946 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
948 unsigned long tmp = 0;
951 for (i = 0; cheetah_error_table[i].mask; i++) {
952 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
958 static const char *cheetah_get_string(unsigned long bit)
962 for (i = 0; cheetah_error_table[i].mask; i++) {
963 if ((bit & cheetah_error_table[i].mask) != 0UL)
964 return cheetah_error_table[i].name;
969 extern int chmc_getunumber(int, unsigned long, char *, int);
971 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
972 unsigned long afsr, unsigned long afar, int recoverable)
977 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
978 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
980 (afsr & CHAFSR_TL1) ? 1 : 0);
981 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
982 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
983 regs->tpc, regs->tnpc, regs->tstate);
984 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
985 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
986 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
987 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
988 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
989 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
990 hipri = cheetah_get_hipri(afsr);
991 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
992 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
993 hipri, cheetah_get_string(hipri));
995 /* Try to get unumber if relevant. */
996 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
997 CHAFSR_CPC | CHAFSR_CPU | \
998 CHAFSR_UE | CHAFSR_CE | \
999 CHAFSR_EDC | CHAFSR_EDU | \
1000 CHAFSR_UCC | CHAFSR_UCU | \
1001 CHAFSR_WDU | CHAFSR_WDC)
1002 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1003 if (afsr & ESYND_ERRORS) {
1007 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1008 syndrome = cheetah_ecc_syntab[syndrome];
1009 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1011 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1012 (recoverable ? KERN_WARNING : KERN_CRIT),
1013 smp_processor_id(), unum);
1014 } else if (afsr & MSYND_ERRORS) {
1018 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1019 syndrome = cheetah_mtag_syntab[syndrome];
1020 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1022 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1023 (recoverable ? KERN_WARNING : KERN_CRIT),
1024 smp_processor_id(), unum);
1027 /* Now dump the cache snapshots. */
1028 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1029 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1030 (int) info->dcache_index,
1034 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1035 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1036 info->dcache_data[0],
1037 info->dcache_data[1],
1038 info->dcache_data[2],
1039 info->dcache_data[3]);
1040 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1041 "u[%016lx] l[%016lx]\n",
1042 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1043 (int) info->icache_index,
1048 info->icache_lower);
1049 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1050 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1051 info->icache_data[0],
1052 info->icache_data[1],
1053 info->icache_data[2],
1054 info->icache_data[3]);
1055 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1056 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1057 info->icache_data[4],
1058 info->icache_data[5],
1059 info->icache_data[6],
1060 info->icache_data[7]);
1061 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1062 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1063 (int) info->ecache_index, info->ecache_tag);
1064 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1065 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1066 info->ecache_data[0],
1067 info->ecache_data[1],
1068 info->ecache_data[2],
1069 info->ecache_data[3]);
1071 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1072 while (afsr != 0UL) {
1073 unsigned long bit = cheetah_get_hipri(afsr);
1075 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1076 (recoverable ? KERN_WARNING : KERN_CRIT),
1077 bit, cheetah_get_string(bit));
1083 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1086 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1088 unsigned long afsr, afar;
1091 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1094 if ((afsr & cheetah_afsr_errors) != 0) {
1096 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1104 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1106 : : "r" (afsr), "i" (ASI_AFSR));
1111 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1113 struct cheetah_err_info local_snapshot, *p;
1117 cheetah_flush_ecache();
1119 p = cheetah_get_error_log(afsr);
1121 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1123 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1124 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1128 /* Grab snapshot of logged error. */
1129 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1131 /* If the current trap snapshot does not match what the
1132 * trap handler passed along into our args, big trouble.
1133 * In such a case, mark the local copy as invalid.
1135 * Else, it matches and we mark the afsr in the non-local
1136 * copy as invalid so we may log new error traps there.
1138 if (p->afsr != afsr || p->afar != afar)
1139 local_snapshot.afsr = CHAFSR_INVALID;
1141 p->afsr = CHAFSR_INVALID;
1143 cheetah_flush_icache();
1144 cheetah_flush_dcache();
1146 /* Re-enable I-cache/D-cache */
1147 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1148 "or %%g1, %1, %%g1\n\t"
1149 "stxa %%g1, [%%g0] %0\n\t"
1152 : "i" (ASI_DCU_CONTROL_REG),
1153 "i" (DCU_DC | DCU_IC)
1156 /* Re-enable error reporting */
1157 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1158 "or %%g1, %1, %%g1\n\t"
1159 "stxa %%g1, [%%g0] %0\n\t"
1162 : "i" (ASI_ESTATE_ERROR_EN),
1163 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1166 /* Decide if we can continue after handling this trap and
1167 * logging the error.
1170 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1173 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1174 * error was logged while we had error reporting traps disabled.
1176 if (cheetah_recheck_errors(&local_snapshot)) {
1177 unsigned long new_afsr = local_snapshot.afsr;
1179 /* If we got a new asynchronous error, die... */
1180 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1181 CHAFSR_WDU | CHAFSR_CPU |
1182 CHAFSR_IVU | CHAFSR_UE |
1183 CHAFSR_BERR | CHAFSR_TO))
1188 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1191 panic("Irrecoverable Fast-ECC error trap.\n");
1193 /* Flush E-cache to kick the error trap handlers out. */
1194 cheetah_flush_ecache();
1197 /* Try to fix a correctable error by pushing the line out from
1198 * the E-cache. Recheck error reporting registers to see if the
1199 * problem is intermittent.
1201 static int cheetah_fix_ce(unsigned long physaddr)
1203 unsigned long orig_estate;
1204 unsigned long alias1, alias2;
1207 /* Make sure correctable error traps are disabled. */
1208 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1209 "andn %0, %1, %%g1\n\t"
1210 "stxa %%g1, [%%g0] %2\n\t"
1212 : "=&r" (orig_estate)
1213 : "i" (ESTATE_ERROR_CEEN),
1214 "i" (ASI_ESTATE_ERROR_EN)
1217 /* We calculate alias addresses that will force the
1218 * cache line in question out of the E-cache. Then
1219 * we bring it back in with an atomic instruction so
1220 * that we get it in some modified/exclusive state,
1221 * then we displace it again to try and get proper ECC
1222 * pushed back into the system.
1224 physaddr &= ~(8UL - 1UL);
1225 alias1 = (ecache_flush_physbase +
1226 (physaddr & ((ecache_flush_size >> 1) - 1)));
1227 alias2 = alias1 + (ecache_flush_size >> 1);
1228 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1229 "ldxa [%1] %3, %%g0\n\t"
1230 "casxa [%2] %3, %%g0, %%g0\n\t"
1231 "membar #StoreLoad | #StoreStore\n\t"
1232 "ldxa [%0] %3, %%g0\n\t"
1233 "ldxa [%1] %3, %%g0\n\t"
1236 : "r" (alias1), "r" (alias2),
1237 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1239 /* Did that trigger another error? */
1240 if (cheetah_recheck_errors(NULL)) {
1241 /* Try one more time. */
1242 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1244 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1245 if (cheetah_recheck_errors(NULL))
1250 /* No new error, intermittent problem. */
1254 /* Restore error enables. */
1255 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1257 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1262 /* Return non-zero if PADDR is a valid physical memory address. */
1263 static int cheetah_check_main_memory(unsigned long paddr)
1267 for (i = 0; ; i++) {
1268 if (sp_banks[i].num_bytes == 0)
1270 if (paddr >= sp_banks[i].base_addr &&
1271 paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1277 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1279 struct cheetah_err_info local_snapshot, *p;
1280 int recoverable, is_memory;
1282 p = cheetah_get_error_log(afsr);
1284 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1286 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1287 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1291 /* Grab snapshot of logged error. */
1292 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1294 /* If the current trap snapshot does not match what the
1295 * trap handler passed along into our args, big trouble.
1296 * In such a case, mark the local copy as invalid.
1298 * Else, it matches and we mark the afsr in the non-local
1299 * copy as invalid so we may log new error traps there.
1301 if (p->afsr != afsr || p->afar != afar)
1302 local_snapshot.afsr = CHAFSR_INVALID;
1304 p->afsr = CHAFSR_INVALID;
1306 is_memory = cheetah_check_main_memory(afar);
1308 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1309 /* XXX Might want to log the results of this operation
1310 * XXX somewhere... -DaveM
1312 cheetah_fix_ce(afar);
1316 int flush_all, flush_line;
1318 flush_all = flush_line = 0;
1319 if ((afsr & CHAFSR_EDC) != 0UL) {
1320 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1324 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1325 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1331 /* Trap handler only disabled I-cache, flush it. */
1332 cheetah_flush_icache();
1334 /* Re-enable I-cache */
1335 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1336 "or %%g1, %1, %%g1\n\t"
1337 "stxa %%g1, [%%g0] %0\n\t"
1340 : "i" (ASI_DCU_CONTROL_REG),
1345 cheetah_flush_ecache();
1346 else if (flush_line)
1347 cheetah_flush_ecache_line(afar);
1350 /* Re-enable error reporting */
1351 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1352 "or %%g1, %1, %%g1\n\t"
1353 "stxa %%g1, [%%g0] %0\n\t"
1356 : "i" (ASI_ESTATE_ERROR_EN),
1357 "i" (ESTATE_ERROR_CEEN)
1360 /* Decide if we can continue after handling this trap and
1361 * logging the error.
1364 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1367 /* Re-check AFSR/AFAR */
1368 (void) cheetah_recheck_errors(&local_snapshot);
1371 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1374 panic("Irrecoverable Correctable-ECC error trap.\n");
1377 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1379 struct cheetah_err_info local_snapshot, *p;
1380 int recoverable, is_memory;
1383 /* Check for the special PCI poke sequence. */
1384 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1385 cheetah_flush_icache();
1386 cheetah_flush_dcache();
1388 /* Re-enable I-cache/D-cache */
1389 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1390 "or %%g1, %1, %%g1\n\t"
1391 "stxa %%g1, [%%g0] %0\n\t"
1394 : "i" (ASI_DCU_CONTROL_REG),
1395 "i" (DCU_DC | DCU_IC)
1398 /* Re-enable error reporting */
1399 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1400 "or %%g1, %1, %%g1\n\t"
1401 "stxa %%g1, [%%g0] %0\n\t"
1404 : "i" (ASI_ESTATE_ERROR_EN),
1405 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1408 (void) cheetah_recheck_errors(NULL);
1410 pci_poke_faulted = 1;
1412 regs->tnpc = regs->tpc + 4;
1417 p = cheetah_get_error_log(afsr);
1419 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1421 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1422 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1426 /* Grab snapshot of logged error. */
1427 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1429 /* If the current trap snapshot does not match what the
1430 * trap handler passed along into our args, big trouble.
1431 * In such a case, mark the local copy as invalid.
1433 * Else, it matches and we mark the afsr in the non-local
1434 * copy as invalid so we may log new error traps there.
1436 if (p->afsr != afsr || p->afar != afar)
1437 local_snapshot.afsr = CHAFSR_INVALID;
1439 p->afsr = CHAFSR_INVALID;
1441 is_memory = cheetah_check_main_memory(afar);
1444 int flush_all, flush_line;
1446 flush_all = flush_line = 0;
1447 if ((afsr & CHAFSR_EDU) != 0UL) {
1448 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1452 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1453 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1459 cheetah_flush_icache();
1460 cheetah_flush_dcache();
1462 /* Re-enable I/D caches */
1463 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1464 "or %%g1, %1, %%g1\n\t"
1465 "stxa %%g1, [%%g0] %0\n\t"
1468 : "i" (ASI_DCU_CONTROL_REG),
1469 "i" (DCU_IC | DCU_DC)
1473 cheetah_flush_ecache();
1474 else if (flush_line)
1475 cheetah_flush_ecache_line(afar);
1478 /* Re-enable error reporting */
1479 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1480 "or %%g1, %1, %%g1\n\t"
1481 "stxa %%g1, [%%g0] %0\n\t"
1484 : "i" (ASI_ESTATE_ERROR_EN),
1485 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1488 /* Decide if we can continue after handling this trap and
1489 * logging the error.
1492 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1495 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1496 * error was logged while we had error reporting traps disabled.
1498 if (cheetah_recheck_errors(&local_snapshot)) {
1499 unsigned long new_afsr = local_snapshot.afsr;
1501 /* If we got a new asynchronous error, die... */
1502 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1503 CHAFSR_WDU | CHAFSR_CPU |
1504 CHAFSR_IVU | CHAFSR_UE |
1505 CHAFSR_BERR | CHAFSR_TO))
1510 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1512 /* "Recoverable" here means we try to yank the page from ever
1513 * being newly used again. This depends upon a few things:
1514 * 1) Must be main memory, and AFAR must be valid.
1515 * 2) If we trapped from user, OK.
1516 * 3) Else, if we trapped from kernel we must find exception
1517 * table entry (ie. we have to have been accessing user
1520 * If AFAR is not in main memory, or we trapped from kernel
1521 * and cannot find an exception table entry, it is unacceptable
1522 * to try and continue.
1524 if (recoverable && is_memory) {
1525 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1526 /* OK, usermode access. */
1529 unsigned long g2 = regs->u_regs[UREG_G2];
1530 unsigned long fixup = search_extables_range(regs->tpc, &g2);
1533 /* OK, kernel access to userspace. */
1537 /* BAD, privileged state is corrupted. */
1542 if (pfn_valid(afar >> PAGE_SHIFT))
1543 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1547 /* Only perform fixup if we still have a
1548 * recoverable condition.
1552 regs->tnpc = regs->tpc + 4;
1553 regs->u_regs[UREG_G2] = g2;
1562 panic("Irrecoverable deferred error trap.\n");
1565 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1567 * Bit0: 0=dcache,1=icache
1568 * Bit1: 0=recoverable,1=unrecoverable
1570 * The hardware has disabled both the I-cache and D-cache in
1571 * the %dcr register.
1573 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1576 __cheetah_flush_icache();
1578 cheetah_plus_zap_dcache_parity();
1579 cheetah_flush_dcache();
1581 /* Re-enable I-cache/D-cache */
1582 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1583 "or %%g1, %1, %%g1\n\t"
1584 "stxa %%g1, [%%g0] %0\n\t"
1587 : "i" (ASI_DCU_CONTROL_REG),
1588 "i" (DCU_DC | DCU_IC)
1592 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1594 (type & 0x1) ? 'I' : 'D',
1596 panic("Irrecoverable Cheetah+ parity error.");
1599 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1601 (type & 0x1) ? 'I' : 'D',
1605 void do_fpe_common(struct pt_regs *regs)
1607 if (regs->tstate & TSTATE_PRIV) {
1608 regs->tpc = regs->tnpc;
1611 unsigned long fsr = current_thread_info()->xfsr[0];
1614 if (test_thread_flag(TIF_32BIT)) {
1615 regs->tpc &= 0xffffffff;
1616 regs->tnpc &= 0xffffffff;
1618 info.si_signo = SIGFPE;
1620 info.si_addr = (void *)regs->tpc;
1622 info.si_code = __SI_FAULT;
1623 if ((fsr & 0x1c000) == (1 << 14)) {
1625 info.si_code = FPE_FLTINV;
1626 else if (fsr & 0x08)
1627 info.si_code = FPE_FLTOVF;
1628 else if (fsr & 0x04)
1629 info.si_code = FPE_FLTUND;
1630 else if (fsr & 0x02)
1631 info.si_code = FPE_FLTDIV;
1632 else if (fsr & 0x01)
1633 info.si_code = FPE_FLTRES;
1635 force_sig_info(SIGFPE, &info, current);
1639 void do_fpieee(struct pt_regs *regs)
1641 do_fpe_common(regs);
1644 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1646 void do_fpother(struct pt_regs *regs)
1648 struct fpustate *f = FPUSTATE;
1651 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1652 case (2 << 14): /* unfinished_FPop */
1653 case (3 << 14): /* unimplemented_FPop */
1654 ret = do_mathemu(regs, f);
1659 do_fpe_common(regs);
1662 void do_tof(struct pt_regs *regs)
1666 if (regs->tstate & TSTATE_PRIV)
1667 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1668 if (test_thread_flag(TIF_32BIT)) {
1669 regs->tpc &= 0xffffffff;
1670 regs->tnpc &= 0xffffffff;
1672 info.si_signo = SIGEMT;
1674 info.si_code = EMT_TAGOVF;
1675 info.si_addr = (void *)regs->tpc;
1677 force_sig_info(SIGEMT, &info, current);
1680 void do_div0(struct pt_regs *regs)
1684 if (regs->tstate & TSTATE_PRIV)
1685 die_if_kernel("TL0: Kernel divide by zero.", regs);
1686 if (test_thread_flag(TIF_32BIT)) {
1687 regs->tpc &= 0xffffffff;
1688 regs->tnpc &= 0xffffffff;
1690 info.si_signo = SIGFPE;
1692 info.si_code = FPE_INTDIV;
1693 info.si_addr = (void *)regs->tpc;
1695 force_sig_info(SIGFPE, &info, current);
1698 void instruction_dump (unsigned int *pc)
1702 if ((((unsigned long) pc) & 3))
1705 printk("Instruction DUMP:");
1706 for (i = -3; i < 6; i++)
1707 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1711 static void user_instruction_dump (unsigned int __user *pc)
1714 unsigned int buf[9];
1716 if ((((unsigned long) pc) & 3))
1719 if (copy_from_user(buf, pc - 3, sizeof(buf)))
1722 printk("Instruction DUMP:");
1723 for (i = 0; i < 9; i++)
1724 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1728 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1730 unsigned long pc, fp, thread_base, ksp;
1731 struct thread_info *tp = tsk->thread_info;
1732 struct reg_window *rw;
1735 ksp = (unsigned long) _ksp;
1737 if (tp == current_thread_info())
1740 fp = ksp + STACK_BIAS;
1741 thread_base = (unsigned long) tp;
1743 printk("Call Trace:");
1744 #ifdef CONFIG_KALLSYMS
1748 /* Bogus frame pointer? */
1749 if (fp < (thread_base + sizeof(struct thread_info)) ||
1750 fp >= (thread_base + THREAD_SIZE))
1752 rw = (struct reg_window *)fp;
1754 printk(" [%016lx] ", pc);
1755 print_symbol("%s\n", pc);
1756 fp = rw->ins[6] + STACK_BIAS;
1757 } while (++count < 16);
1758 #ifndef CONFIG_KALLSYMS
1763 void dump_stack(void)
1767 __asm__ __volatile__("mov %%fp, %0"
1769 show_stack(current, ksp);
1772 EXPORT_SYMBOL(dump_stack);
1774 void die_if_kernel(char *str, struct pt_regs *regs)
1776 static int die_counter;
1777 extern void __show_regs(struct pt_regs * regs);
1778 extern void smp_report_regs(void);
1781 /* Amuse the user. */
1784 " \"@'/ .. \\`@\"\n"
1788 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
1789 __asm__ __volatile__("flushw");
1791 if (regs->tstate & TSTATE_PRIV) {
1792 struct reg_window *rw = (struct reg_window *)
1793 (regs->u_regs[UREG_FP] + STACK_BIAS);
1795 /* Stop the back trace when we hit userland or we
1796 * find some badly aligned kernel stack.
1800 (((unsigned long) rw) >= PAGE_OFFSET) &&
1801 (char *) rw < ((char *) current)
1802 + sizeof (union thread_union) &&
1803 !(((unsigned long) rw) & 0x7)) {
1804 printk("Caller[%016lx]", rw->ins[7]);
1805 print_symbol(": %s", rw->ins[7]);
1807 rw = (struct reg_window *)
1808 (rw->ins[6] + STACK_BIAS);
1810 instruction_dump ((unsigned int *) regs->tpc);
1812 if (test_thread_flag(TIF_32BIT)) {
1813 regs->tpc &= 0xffffffff;
1814 regs->tnpc &= 0xffffffff;
1816 user_instruction_dump ((unsigned int __user *) regs->tpc);
1822 if (regs->tstate & TSTATE_PRIV)
1827 extern int handle_popc(u32 insn, struct pt_regs *regs);
1828 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1830 void do_illegal_instruction(struct pt_regs *regs)
1832 unsigned long pc = regs->tpc;
1833 unsigned long tstate = regs->tstate;
1837 if (tstate & TSTATE_PRIV)
1838 die_if_kernel("Kernel illegal instruction", regs);
1839 if (test_thread_flag(TIF_32BIT))
1841 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
1842 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1843 if (handle_popc(insn, regs))
1845 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1846 if (handle_ldf_stq(insn, regs))
1850 info.si_signo = SIGILL;
1852 info.si_code = ILL_ILLOPC;
1853 info.si_addr = (void *)pc;
1855 force_sig_info(SIGILL, &info, current);
1858 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1862 if (regs->tstate & TSTATE_PRIV) {
1863 extern void kernel_unaligned_trap(struct pt_regs *regs,
1866 unsigned long sfsr);
1868 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
1872 info.si_signo = SIGBUS;
1874 info.si_code = BUS_ADRALN;
1875 info.si_addr = (void *)sfar;
1877 force_sig_info(SIGBUS, &info, current);
1880 void do_privop(struct pt_regs *regs)
1884 if (test_thread_flag(TIF_32BIT)) {
1885 regs->tpc &= 0xffffffff;
1886 regs->tnpc &= 0xffffffff;
1888 info.si_signo = SIGILL;
1890 info.si_code = ILL_PRVOPC;
1891 info.si_addr = (void *)regs->tpc;
1893 force_sig_info(SIGILL, &info, current);
1896 void do_privact(struct pt_regs *regs)
1901 /* Trap level 1 stuff or other traps we should never see... */
1902 void do_cee(struct pt_regs *regs)
1904 die_if_kernel("TL0: Cache Error Exception", regs);
1907 void do_cee_tl1(struct pt_regs *regs)
1909 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1910 die_if_kernel("TL1: Cache Error Exception", regs);
1913 void do_dae_tl1(struct pt_regs *regs)
1915 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1916 die_if_kernel("TL1: Data Access Exception", regs);
1919 void do_iae_tl1(struct pt_regs *regs)
1921 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1922 die_if_kernel("TL1: Instruction Access Exception", regs);
1925 void do_div0_tl1(struct pt_regs *regs)
1927 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1928 die_if_kernel("TL1: DIV0 Exception", regs);
1931 void do_fpdis_tl1(struct pt_regs *regs)
1933 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1934 die_if_kernel("TL1: FPU Disabled", regs);
1937 void do_fpieee_tl1(struct pt_regs *regs)
1939 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1940 die_if_kernel("TL1: FPU IEEE Exception", regs);
1943 void do_fpother_tl1(struct pt_regs *regs)
1945 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1946 die_if_kernel("TL1: FPU Other Exception", regs);
1949 void do_ill_tl1(struct pt_regs *regs)
1951 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1952 die_if_kernel("TL1: Illegal Instruction Exception", regs);
1955 void do_irq_tl1(struct pt_regs *regs)
1957 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1958 die_if_kernel("TL1: IRQ Exception", regs);
1961 void do_lddfmna_tl1(struct pt_regs *regs)
1963 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1964 die_if_kernel("TL1: LDDF Exception", regs);
1967 void do_stdfmna_tl1(struct pt_regs *regs)
1969 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1970 die_if_kernel("TL1: STDF Exception", regs);
1973 void do_paw(struct pt_regs *regs)
1975 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
1978 void do_paw_tl1(struct pt_regs *regs)
1980 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1981 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
1984 void do_vaw(struct pt_regs *regs)
1986 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
1989 void do_vaw_tl1(struct pt_regs *regs)
1991 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1992 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
1995 void do_tof_tl1(struct pt_regs *regs)
1997 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1998 die_if_kernel("TL1: Tag Overflow Exception", regs);
2001 void do_getpsr(struct pt_regs *regs)
2003 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2004 regs->tpc = regs->tnpc;
2006 if (test_thread_flag(TIF_32BIT)) {
2007 regs->tpc &= 0xffffffff;
2008 regs->tnpc &= 0xffffffff;
2012 extern void thread_info_offsets_are_bolixed_dave(void);
2014 /* Only invoked on boot processor. */
2015 void __init trap_init(void)
2017 /* Compile time sanity check. */
2018 if (TI_TASK != offsetof(struct thread_info, task) ||
2019 TI_FLAGS != offsetof(struct thread_info, flags) ||
2020 TI_CPU != offsetof(struct thread_info, cpu) ||
2021 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2022 TI_KSP != offsetof(struct thread_info, ksp) ||
2023 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2024 TI_KREGS != offsetof(struct thread_info, kregs) ||
2025 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2026 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2027 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2028 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2029 TI_GSR != offsetof(struct thread_info, gsr) ||
2030 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2031 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2032 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2033 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2034 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2035 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2036 TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2037 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2038 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2039 (TI_FPREGS & (64 - 1)))
2040 thread_info_offsets_are_bolixed_dave();
2042 /* Attach to the address space of init_task. On SMP we
2043 * do this in smp.c:smp_callin for other cpus.
2045 atomic_inc(&init_mm.mm_count);
2046 current->active_mm = &init_mm;