ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / ia64 / kernel / mca_asm.S
1 //
2 // assembly portion of the IA64 MCA handling
3 //
4 // Mods by cfleck to integrate into kernel build
5 // 00/03/15 davidm Added various stop bits to get a clean compile
6 //
7 // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
8 //                 kstack, switch modes, jump to C INIT handler
9 //
10 // 02/01/04 J.Hall <jenna.s.hall@intel.com>
11 //                 Before entering virtual mode code:
12 //                 1. Check for TLB CPU error
13 //                 2. Restore current thread pointer to kr6
14 //                 3. Move stack ptr 16 bytes to conform to C calling convention
15 //
16 #include <linux/config.h>
17 #include <linux/threads.h>
18
19 #include <asm/asmmacro.h>
20 #include <asm/pgtable.h>
21 #include <asm/processor.h>
22 #include <asm/mca_asm.h>
23 #include <asm/mca.h>
24
25 /*
26  * When we get a machine check, the kernel stack pointer is no longer
27  * valid, so we need to set a new stack pointer.
28  */
29 #define MINSTATE_PHYS   /* Make sure stack access is physical for MINSTATE */
30
31 /*
32  * Needed for return context to SAL
33  */
34 #define IA64_MCA_SAME_CONTEXT   0
35 #define IA64_MCA_COLD_BOOT      -2
36
37 #include "minstate.h"
38
39 /*
40  * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
41  *              1. GR1 = OS GP
42  *              2. GR8 = PAL_PROC physical address
43  *              3. GR9 = SAL_PROC physical address
44  *              4. GR10 = SAL GP (physical)
45  *              5. GR11 = Rendez state
46  *              6. GR12 = Return address to location within SAL_CHECK
47  */
48 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)          \
49         LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
50         st8     [_tmp]=r1,0x08;;                        \
51         st8     [_tmp]=r8,0x08;;                        \
52         st8     [_tmp]=r9,0x08;;                        \
53         st8     [_tmp]=r10,0x08;;                       \
54         st8     [_tmp]=r11,0x08;;                       \
55         st8     [_tmp]=r12,0x08;;                       \
56         st8     [_tmp]=r17,0x08;;                       \
57         st8     [_tmp]=r18,0x08
58
59 /*
60  * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
61  * (p6) is executed if we never entered virtual mode (TLB error)
62  * (p7) is executed if we entered virtual mode as expected (normal case)
63  *      1. GR8 = OS_MCA return status
64  *      2. GR9 = SAL GP (physical)
65  *      3. GR10 = 0/1 returning same/new context
66  *      4. GR22 = New min state save area pointer
67  *      returns ptr to SAL rtn save loc in _tmp
68  */
69 #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp)       \
70         movl    _tmp=ia64_os_to_sal_handoff_state;;     \
71         DATA_VA_TO_PA(_tmp);;                           \
72         ld8     r8=[_tmp],0x08;;                        \
73         ld8     r9=[_tmp],0x08;;                        \
74         ld8     r10=[_tmp],0x08;;                       \
75         ld8     r22=[_tmp],0x08;;
76         // now _tmp is pointing to SAL rtn save location
77
78 /*
79  * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
80  *      imots_os_status=IA64_MCA_COLD_BOOT
81  *      imots_sal_gp=SAL GP
82  *      imots_context=IA64_MCA_SAME_CONTEXT
83  *      imots_new_min_state=Min state save area pointer
84  *      imots_sal_check_ra=Return address to location within SAL_CHECK
85  *
86  */
87 #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
88         movl    tmp=IA64_MCA_COLD_BOOT;                                 \
89         movl    sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state);   \
90         movl    os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);;  \
91         st8     [os_to_sal_handoff]=tmp,8;;                             \
92         ld8     tmp=[sal_to_os_handoff],48;;                            \
93         st8     [os_to_sal_handoff]=tmp,8;;                             \
94         movl    tmp=IA64_MCA_SAME_CONTEXT;;                             \
95         st8     [os_to_sal_handoff]=tmp,8;;                             \
96         ld8     tmp=[sal_to_os_handoff],-8;;                            \
97         st8     [os_to_sal_handoff]=tmp,8;;                             \
98         ld8     tmp=[sal_to_os_handoff];;                               \
99         st8     [os_to_sal_handoff]=tmp;;
100
101         .global ia64_os_mca_dispatch
102         .global ia64_os_mca_dispatch_end
103         .global ia64_sal_to_os_handoff_state
104         .global ia64_os_to_sal_handoff_state
105         .global ia64_mca_proc_state_dump
106         .global ia64_mca_stack
107         .global ia64_mca_stackframe
108         .global ia64_mca_bspstore
109         .global ia64_init_stack
110
111         .text
112         .align 16
113
114 ia64_os_mca_dispatch:
115
116         // Serialize all MCA processing
117         mov     r3=1;;
118         LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
119 ia64_os_mca_spin:
120         xchg8   r4=[r2],r3;;
121         cmp.ne  p6,p0=r4,r0
122 (p6)    br ia64_os_mca_spin
123
124         // Save the SAL to OS MCA handoff state as defined
125         // by SAL SPEC 3.0
126         // NOTE : The order in which the state gets saved
127         //        is dependent on the way the C-structure
128         //        for ia64_mca_sal_to_os_state_t has been
129         //        defined in include/asm/mca.h
130         SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
131         ;;
132
133         // LOG PROCESSOR STATE INFO FROM HERE ON..
134 begin_os_mca_dump:
135         br      ia64_os_mca_proc_state_dump;;
136
137 ia64_os_mca_done_dump:
138
139         LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
140         ;;
141         ld8 r18=[r16]           // Get processor state parameter on existing PALE_CHECK.
142         ;;
143         tbit.nz p6,p7=r18,60
144 (p7)    br.spnt done_tlb_purge_and_reload
145
146         // The following code purges TC and TR entries. Then reload all TC entries.
147         // Purge percpu data TC entries.
148 begin_tlb_purge_and_reload:
149         mov r16=cr.lid
150         LOAD_PHYSICAL(p0,r17,ia64_mca_tlb_list) // Physical address of ia64_mca_tlb_list
151         mov r19=0
152         mov r20=NR_CPUS
153         ;;
154 1:      cmp.eq p6,p7=r19,r20
155 (p6)    br.spnt.few err
156         ld8 r18=[r17],IA64_MCA_TLB_INFO_SIZE
157         ;;
158         add r19=1,r19
159         cmp.eq p6,p7=r18,r16
160 (p7)    br.sptk.few 1b
161         ;;
162         adds r17=-IA64_MCA_TLB_INFO_SIZE,r17
163         ;;
164         mov r23=r17             // save current ia64_mca_percpu_info addr pointer.
165         adds r17=16,r17
166         ;;
167         ld8 r18=[r17],8         // r18=ptce_base
168         ;;
169         ld4 r19=[r17],4         // r19=ptce_count[0]
170         ;;
171         ld4 r20=[r17],4         // r20=ptce_count[1]
172         ;;
173         ld4 r21=[r17],4         // r21=ptce_stride[0]
174         mov r24=0
175         ;;
176         ld4 r22=[r17],4         // r22=ptce_stride[1]
177         adds r20=-1,r20
178         ;;
179 2:
180         cmp.ltu p6,p7=r24,r19
181 (p7)    br.cond.dpnt.few 4f
182         mov ar.lc=r20
183 3:
184         ptc.e r18
185         ;;
186         add r18=r22,r18
187         br.cloop.sptk.few 3b
188         ;;
189         add r18=r21,r18
190         add r24=1,r24
191         ;;
192         br.sptk.few 2b
193 4:
194         srlz.i                  // srlz.i implies srlz.d
195         ;;
196
197         // Now purge addresses formerly mapped by TR registers
198         // 1. Purge ITR&DTR for kernel.
199         movl r16=KERNEL_START
200         mov r18=KERNEL_TR_PAGE_SHIFT<<2
201         ;;
202         ptr.i r16, r18
203         ptr.d r16, r18
204         ;;
205         srlz.i
206         ;;
207         srlz.d
208         ;;
209         // 2. Purge DTR for PERCPU data.
210         movl r16=PERCPU_ADDR
211         mov r18=PERCPU_PAGE_SHIFT<<2
212         ;;
213         ptr.d r16,r18
214         ;;
215         srlz.d
216         ;;
217         // 3. Purge ITR for PAL code.
218         adds r17=48,r23
219         ;;
220         ld8 r16=[r17]
221         mov r18=IA64_GRANULE_SHIFT<<2
222         ;;
223         ptr.i r16,r18
224         ;;
225         srlz.i
226         ;;
227         // 4. Purge DTR for stack.
228         mov r16=IA64_KR(CURRENT_STACK)
229         ;;
230         shl r16=r16,IA64_GRANULE_SHIFT
231         movl r19=PAGE_OFFSET
232         ;;
233         add r16=r19,r16
234         mov r18=IA64_GRANULE_SHIFT<<2
235         ;;
236         ptr.d r16,r18
237         ;;
238         srlz.i
239         ;;
240         // Finally reload the TR registers.
241         // 1. Reload DTR/ITR registers for kernel.
242         mov r18=KERNEL_TR_PAGE_SHIFT<<2
243         movl r17=KERNEL_START
244         ;;
245         mov cr.itir=r18
246         mov cr.ifa=r17
247         mov r16=IA64_TR_KERNEL
248         mov r19=ip
249         movl r18=PAGE_KERNEL
250         ;;
251         dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
252         ;;
253         or r18=r17,r18
254         ;;
255         itr.i itr[r16]=r18
256         ;;
257         itr.d dtr[r16]=r18
258         ;;
259         srlz.i
260         srlz.d
261         ;;
262         // 2. Reload DTR register for PERCPU data.
263         adds r17=8,r23
264         movl r16=PERCPU_ADDR            // vaddr
265         movl r18=PERCPU_PAGE_SHIFT<<2
266         ;;
267         mov cr.itir=r18
268         mov cr.ifa=r16
269         ;;
270         ld8 r18=[r17]                   // pte
271         mov r16=IA64_TR_PERCPU_DATA;
272         ;;
273         itr.d dtr[r16]=r18
274         ;;
275         srlz.d
276         ;;
277         // 3. Reload ITR for PAL code.
278         adds r17=40,r23
279         ;;
280         ld8 r18=[r17],8                 // pte
281         ;;
282         ld8 r16=[r17]                   // vaddr
283         mov r19=IA64_GRANULE_SHIFT<<2
284         ;;
285         mov cr.itir=r19
286         mov cr.ifa=r16
287         mov r20=IA64_TR_PALCODE
288         ;;
289         itr.i itr[r20]=r18
290         ;;
291         srlz.i
292         ;;
293         // 4. Reload DTR for stack.
294         mov r16=IA64_KR(CURRENT_STACK)
295         ;;
296         shl r16=r16,IA64_GRANULE_SHIFT
297         movl r19=PAGE_OFFSET
298         ;;
299         add r18=r19,r16
300         movl r20=PAGE_KERNEL
301         ;;
302         add r16=r20,r16
303         mov r19=IA64_GRANULE_SHIFT<<2
304         ;;
305         mov cr.itir=r19
306         mov cr.ifa=r18
307         mov r20=IA64_TR_CURRENT_STACK
308         ;;
309         itr.d dtr[r20]=r16
310         ;;
311         srlz.d
312         ;;
313         br.sptk.many done_tlb_purge_and_reload
314 err:
315         COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
316         br.sptk.many ia64_os_mca_done_restore
317
318 done_tlb_purge_and_reload:
319
320         // Setup new stack frame for OS_MCA handling
321         movl    r2=ia64_mca_bspstore;;  // local bspstore area location in r2
322         DATA_VA_TO_PA(r2);;
323         movl    r3=ia64_mca_stackframe;; // save stack frame to memory in r3
324         DATA_VA_TO_PA(r3);;
325         rse_switch_context(r6,r3,r2);;  // RSC management in this new context
326         movl    r12=ia64_mca_stack
327         mov     r2=8*1024;;             // stack size must be same as C array
328         add     r12=r2,r12;;            // stack base @ bottom of array
329         adds    r12=-16,r12;;           // allow 16 bytes of scratch
330                                         // (C calling convention)
331         DATA_VA_TO_PA(r12);;
332
333         // Enter virtual mode from physical mode
334         VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
335 ia64_os_mca_virtual_begin:
336
337         // Call virtual mode handler
338         movl            r2=ia64_mca_ucmc_handler;;
339         mov             b6=r2;;
340         br.call.sptk.many    b0=b6;;
341 .ret0:
342         // Revert back to physical mode before going back to SAL
343         PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
344 ia64_os_mca_virtual_end:
345
346         // restore the original stack frame here
347         movl    r2=ia64_mca_stackframe  // restore stack frame from memory at r2
348         ;;
349         DATA_VA_TO_PA(r2)
350         movl    r4=IA64_PSR_MC
351         ;;
352         rse_return_context(r4,r3,r2)    // switch from interrupt context for RSE
353
354         // let us restore all the registers from our PSI structure
355         mov     r8=gp
356         ;;
357 begin_os_mca_restore:
358         br      ia64_os_mca_proc_state_restore;;
359
360 ia64_os_mca_done_restore:
361         OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
362         // branch back to SALE_CHECK
363         ld8             r3=[r2];;
364         mov             b0=r3;;         // SAL_CHECK return address
365
366         // release lock
367         movl            r3=ia64_mca_serialize;;
368         DATA_VA_TO_PA(r3);;
369         st8.rel         [r3]=r0
370
371         br              b0
372         ;;
373 ia64_os_mca_dispatch_end:
374 //EndMain//////////////////////////////////////////////////////////////////////
375
376
377 //++
378 // Name:
379 //      ia64_os_mca_proc_state_dump()
380 //
381 // Stub Description:
382 //
383 //       This stub dumps the processor state during MCHK to a data area
384 //
385 //--
386
387 ia64_os_mca_proc_state_dump:
388 // Save bank 1 GRs 16-31 which will be used by c-language code when we switch
389 //  to virtual addressing mode.
390         LOAD_PHYSICAL(p0,r2,ia64_mca_proc_state_dump)// convert OS state dump area to physical address
391
392 // save ar.NaT
393         mov             r5=ar.unat                  // ar.unat
394
395 // save banked GRs 16-31 along with NaT bits
396         bsw.1;;
397         st8.spill       [r2]=r16,8;;
398         st8.spill       [r2]=r17,8;;
399         st8.spill       [r2]=r18,8;;
400         st8.spill       [r2]=r19,8;;
401         st8.spill       [r2]=r20,8;;
402         st8.spill       [r2]=r21,8;;
403         st8.spill       [r2]=r22,8;;
404         st8.spill       [r2]=r23,8;;
405         st8.spill       [r2]=r24,8;;
406         st8.spill       [r2]=r25,8;;
407         st8.spill       [r2]=r26,8;;
408         st8.spill       [r2]=r27,8;;
409         st8.spill       [r2]=r28,8;;
410         st8.spill       [r2]=r29,8;;
411         st8.spill       [r2]=r30,8;;
412         st8.spill       [r2]=r31,8;;
413
414         mov             r4=ar.unat;;
415         st8             [r2]=r4,8                // save User NaT bits for r16-r31
416         mov             ar.unat=r5                  // restore original unat
417         bsw.0;;
418
419 //save BRs
420         add             r4=8,r2                  // duplicate r2 in r4
421         add             r6=2*8,r2                // duplicate r2 in r4
422
423         mov             r3=b0
424         mov             r5=b1
425         mov             r7=b2;;
426         st8             [r2]=r3,3*8
427         st8             [r4]=r5,3*8
428         st8             [r6]=r7,3*8;;
429
430         mov             r3=b3
431         mov             r5=b4
432         mov             r7=b5;;
433         st8             [r2]=r3,3*8
434         st8             [r4]=r5,3*8
435         st8             [r6]=r7,3*8;;
436
437         mov             r3=b6
438         mov             r5=b7;;
439         st8             [r2]=r3,2*8
440         st8             [r4]=r5,2*8;;
441
442 cSaveCRs:
443 // save CRs
444         add             r4=8,r2                  // duplicate r2 in r4
445         add             r6=2*8,r2                // duplicate r2 in r4
446
447         mov             r3=cr.dcr
448         mov             r5=cr.itm
449         mov             r7=cr.iva;;
450
451         st8             [r2]=r3,8*8
452         st8             [r4]=r5,3*8
453         st8             [r6]=r7,3*8;;            // 48 byte rements
454
455         mov             r3=cr.pta;;
456         st8             [r2]=r3,8*8;;            // 64 byte rements
457
458 // if PSR.ic=0, reading interruption registers causes an illegal operation fault
459         mov             r3=psr;;
460         tbit.nz.unc     p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
461 (p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
462 begin_skip_intr_regs:
463 (p6)    br              SkipIntrRegs;;
464
465         add             r4=8,r2                  // duplicate r2 in r4
466         add             r6=2*8,r2                // duplicate r2 in r6
467
468         mov             r3=cr.ipsr
469         mov             r5=cr.isr
470         mov             r7=r0;;
471         st8             [r2]=r3,3*8
472         st8             [r4]=r5,3*8
473         st8             [r6]=r7,3*8;;
474
475         mov             r3=cr.iip
476         mov             r5=cr.ifa
477         mov             r7=cr.itir;;
478         st8             [r2]=r3,3*8
479         st8             [r4]=r5,3*8
480         st8             [r6]=r7,3*8;;
481
482         mov             r3=cr.iipa
483         mov             r5=cr.ifs
484         mov             r7=cr.iim;;
485         st8             [r2]=r3,3*8
486         st8             [r4]=r5,3*8
487         st8             [r6]=r7,3*8;;
488
489         mov             r3=cr25;;                   // cr.iha
490         st8             [r2]=r3,160;;               // 160 byte rement
491
492 SkipIntrRegs:
493         st8             [r2]=r0,152;;               // another 152 byte .
494
495         add             r4=8,r2                     // duplicate r2 in r4
496         add             r6=2*8,r2                   // duplicate r2 in r6
497
498         mov             r3=cr.lid
499 //      mov             r5=cr.ivr                     // cr.ivr, don't read it
500         mov             r7=cr.tpr;;
501         st8             [r2]=r3,3*8
502         st8             [r4]=r5,3*8
503         st8             [r6]=r7,3*8;;
504
505         mov             r3=r0                       // cr.eoi => cr67
506         mov             r5=r0                       // cr.irr0 => cr68
507         mov             r7=r0;;                     // cr.irr1 => cr69
508         st8             [r2]=r3,3*8
509         st8             [r4]=r5,3*8
510         st8             [r6]=r7,3*8;;
511
512         mov             r3=r0                       // cr.irr2 => cr70
513         mov             r5=r0                       // cr.irr3 => cr71
514         mov             r7=cr.itv;;
515         st8             [r2]=r3,3*8
516         st8             [r4]=r5,3*8
517         st8             [r6]=r7,3*8;;
518
519         mov             r3=cr.pmv
520         mov             r5=cr.cmcv;;
521         st8             [r2]=r3,7*8
522         st8             [r4]=r5,7*8;;
523
524         mov             r3=r0                       // cr.lrr0 => cr80
525         mov             r5=r0;;                     // cr.lrr1 => cr81
526         st8             [r2]=r3,23*8
527         st8             [r4]=r5,23*8;;
528
529         adds            r2=25*8,r2;;
530
531 cSaveARs:
532 // save ARs
533         add             r4=8,r2                  // duplicate r2 in r4
534         add             r6=2*8,r2                // duplicate r2 in r6
535
536         mov             r3=ar.k0
537         mov             r5=ar.k1
538         mov             r7=ar.k2;;
539         st8             [r2]=r3,3*8
540         st8             [r4]=r5,3*8
541         st8             [r6]=r7,3*8;;
542
543         mov             r3=ar.k3
544         mov             r5=ar.k4
545         mov             r7=ar.k5;;
546         st8             [r2]=r3,3*8
547         st8             [r4]=r5,3*8
548         st8             [r6]=r7,3*8;;
549
550         mov             r3=ar.k6
551         mov             r5=ar.k7
552         mov             r7=r0;;                     // ar.kr8
553         st8             [r2]=r3,10*8
554         st8             [r4]=r5,10*8
555         st8             [r6]=r7,10*8;;           // rement by 72 bytes
556
557         mov             r3=ar.rsc
558         mov             ar.rsc=r0                           // put RSE in enforced lazy mode
559         mov             r5=ar.bsp
560         ;;
561         mov             r7=ar.bspstore;;
562         st8             [r2]=r3,3*8
563         st8             [r4]=r5,3*8
564         st8             [r6]=r7,3*8;;
565
566         mov             r3=ar.rnat;;
567         st8             [r2]=r3,8*13             // increment by 13x8 bytes
568
569         mov             r3=ar.ccv;;
570         st8             [r2]=r3,8*4
571
572         mov             r3=ar.unat;;
573         st8             [r2]=r3,8*4
574
575         mov             r3=ar.fpsr;;
576         st8             [r2]=r3,8*4
577
578         mov             r3=ar.itc;;
579         st8             [r2]=r3,160                 // 160
580
581         mov             r3=ar.pfs;;
582         st8             [r2]=r3,8
583
584         mov             r3=ar.lc;;
585         st8             [r2]=r3,8
586
587         mov             r3=ar.ec;;
588         st8             [r2]=r3
589         add             r2=8*62,r2               //padding
590
591 // save RRs
592         mov             ar.lc=0x08-1
593         movl            r4=0x00;;
594
595 cStRR:
596         dep.z           r5=r4,61,3;;
597         mov             r3=rr[r5];;
598         st8             [r2]=r3,8
599         add             r4=1,r4
600         br.cloop.sptk.few       cStRR
601         ;;
602 end_os_mca_dump:
603         br      ia64_os_mca_done_dump;;
604
605 //EndStub//////////////////////////////////////////////////////////////////////
606
607
608 //++
609 // Name:
610 //       ia64_os_mca_proc_state_restore()
611 //
612 // Stub Description:
613 //
614 //       This is a stub to restore the saved processor state during MCHK
615 //
616 //--
617
618 ia64_os_mca_proc_state_restore:
619
620 // Restore bank1 GR16-31
621         movl            r2=ia64_mca_proc_state_dump     // Convert virtual address
622         ;;                                              // of OS state dump area
623         DATA_VA_TO_PA(r2)                               // to physical address
624
625 restore_GRs:                                    // restore bank-1 GRs 16-31
626         bsw.1;;
627         add             r3=16*8,r2;;                // to get to NaT of GR 16-31
628         ld8             r3=[r3];;
629         mov             ar.unat=r3;;                // first restore NaT
630
631         ld8.fill        r16=[r2],8;;
632         ld8.fill        r17=[r2],8;;
633         ld8.fill        r18=[r2],8;;
634         ld8.fill        r19=[r2],8;;
635         ld8.fill        r20=[r2],8;;
636         ld8.fill        r21=[r2],8;;
637         ld8.fill        r22=[r2],8;;
638         ld8.fill        r23=[r2],8;;
639         ld8.fill        r24=[r2],8;;
640         ld8.fill        r25=[r2],8;;
641         ld8.fill        r26=[r2],8;;
642         ld8.fill        r27=[r2],8;;
643         ld8.fill        r28=[r2],8;;
644         ld8.fill        r29=[r2],8;;
645         ld8.fill        r30=[r2],8;;
646         ld8.fill        r31=[r2],8;;
647
648         ld8             r3=[r2],8;;              // increment to skip NaT
649         bsw.0;;
650
651 restore_BRs:
652         add             r4=8,r2                  // duplicate r2 in r4
653         add             r6=2*8,r2;;              // duplicate r2 in r4
654
655         ld8             r3=[r2],3*8
656         ld8             r5=[r4],3*8
657         ld8             r7=[r6],3*8;;
658         mov             b0=r3
659         mov             b1=r5
660         mov             b2=r7;;
661
662         ld8             r3=[r2],3*8
663         ld8             r5=[r4],3*8
664         ld8             r7=[r6],3*8;;
665         mov             b3=r3
666         mov             b4=r5
667         mov             b5=r7;;
668
669         ld8             r3=[r2],2*8
670         ld8             r5=[r4],2*8;;
671         mov             b6=r3
672         mov             b7=r5;;
673
674 restore_CRs:
675         add             r4=8,r2                  // duplicate r2 in r4
676         add             r6=2*8,r2;;              // duplicate r2 in r4
677
678         ld8             r3=[r2],8*8
679         ld8             r5=[r4],3*8
680         ld8             r7=[r6],3*8;;            // 48 byte increments
681         mov             cr.dcr=r3
682         mov             cr.itm=r5
683         mov             cr.iva=r7;;
684
685         ld8             r3=[r2],8*8;;            // 64 byte increments
686 //      mov             cr.pta=r3
687
688
689 // if PSR.ic=1, reading interruption registers causes an illegal operation fault
690         mov             r3=psr;;
691         tbit.nz.unc     p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
692 (p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
693
694 begin_rskip_intr_regs:
695 (p6)    br              rSkipIntrRegs;;
696
697         add             r4=8,r2                  // duplicate r2 in r4
698         add             r6=2*8,r2;;              // duplicate r2 in r4
699
700         ld8             r3=[r2],3*8
701         ld8             r5=[r4],3*8
702         ld8             r7=[r6],3*8;;
703         mov             cr.ipsr=r3
704 //      mov             cr.isr=r5                   // cr.isr is read only
705
706         ld8             r3=[r2],3*8
707         ld8             r5=[r4],3*8
708         ld8             r7=[r6],3*8;;
709         mov             cr.iip=r3
710         mov             cr.ifa=r5
711         mov             cr.itir=r7;;
712
713         ld8             r3=[r2],3*8
714         ld8             r5=[r4],3*8
715         ld8             r7=[r6],3*8;;
716         mov             cr.iipa=r3
717         mov             cr.ifs=r5
718         mov             cr.iim=r7
719
720         ld8             r3=[r2],160;;               // 160 byte increment
721         mov             cr.iha=r3
722
723 rSkipIntrRegs:
724         ld8             r3=[r2],152;;               // another 152 byte inc.
725
726         add             r4=8,r2                     // duplicate r2 in r4
727         add             r6=2*8,r2;;                 // duplicate r2 in r6
728
729         ld8             r3=[r2],8*3
730         ld8             r5=[r4],8*3
731         ld8             r7=[r6],8*3;;
732         mov             cr.lid=r3
733 //      mov             cr.ivr=r5                   // cr.ivr is read only
734         mov             cr.tpr=r7;;
735
736         ld8             r3=[r2],8*3
737         ld8             r5=[r4],8*3
738         ld8             r7=[r6],8*3;;
739 //      mov             cr.eoi=r3
740 //      mov             cr.irr0=r5                  // cr.irr0 is read only
741 //      mov             cr.irr1=r7;;                // cr.irr1 is read only
742
743         ld8             r3=[r2],8*3
744         ld8             r5=[r4],8*3
745         ld8             r7=[r6],8*3;;
746 //      mov             cr.irr2=r3                  // cr.irr2 is read only
747 //      mov             cr.irr3=r5                  // cr.irr3 is read only
748         mov             cr.itv=r7;;
749
750         ld8             r3=[r2],8*7
751         ld8             r5=[r4],8*7;;
752         mov             cr.pmv=r3
753         mov             cr.cmcv=r5;;
754
755         ld8             r3=[r2],8*23
756         ld8             r5=[r4],8*23;;
757         adds            r2=8*23,r2
758         adds            r4=8*23,r4;;
759 //      mov             cr.lrr0=r3
760 //      mov             cr.lrr1=r5
761
762         adds            r2=8*2,r2;;
763
764 restore_ARs:
765         add             r4=8,r2                  // duplicate r2 in r4
766         add             r6=2*8,r2;;              // duplicate r2 in r4
767
768         ld8             r3=[r2],3*8
769         ld8             r5=[r4],3*8
770         ld8             r7=[r6],3*8;;
771         mov             ar.k0=r3
772         mov             ar.k1=r5
773         mov             ar.k2=r7;;
774
775         ld8             r3=[r2],3*8
776         ld8             r5=[r4],3*8
777         ld8             r7=[r6],3*8;;
778         mov             ar.k3=r3
779         mov             ar.k4=r5
780         mov             ar.k5=r7;;
781
782         ld8             r3=[r2],10*8
783         ld8             r5=[r4],10*8
784         ld8             r7=[r6],10*8;;
785         mov             ar.k6=r3
786         mov             ar.k7=r5
787         ;;
788
789         ld8             r3=[r2],3*8
790         ld8             r5=[r4],3*8
791         ld8             r7=[r6],3*8;;
792 //      mov             ar.rsc=r3
793 //      mov             ar.bsp=r5                   // ar.bsp is read only
794         mov             ar.rsc=r0                           // make sure that RSE is in enforced lazy mode
795         ;;
796         mov             ar.bspstore=r7;;
797
798         ld8             r9=[r2],8*13;;
799         mov             ar.rnat=r9
800
801         mov             ar.rsc=r3
802         ld8             r3=[r2],8*4;;
803         mov             ar.ccv=r3
804
805         ld8             r3=[r2],8*4;;
806         mov             ar.unat=r3
807
808         ld8             r3=[r2],8*4;;
809         mov             ar.fpsr=r3
810
811         ld8             r3=[r2],160;;               // 160
812 //      mov             ar.itc=r3
813
814         ld8             r3=[r2],8;;
815         mov             ar.pfs=r3
816
817         ld8             r3=[r2],8;;
818         mov             ar.lc=r3
819
820         ld8             r3=[r2];;
821         mov             ar.ec=r3
822         add             r2=8*62,r2;;             // padding
823
824 restore_RRs:
825         mov             r5=ar.lc
826         mov             ar.lc=0x08-1
827         movl            r4=0x00;;
828 cStRRr:
829         dep.z           r7=r4,61,3
830         ld8             r3=[r2],8;;
831         mov             rr[r7]=r3                   // what are its access previledges?
832         add             r4=1,r4
833         br.cloop.sptk.few       cStRRr
834         ;;
835         mov             ar.lc=r5
836         ;;
837 end_os_mca_restore:
838         br      ia64_os_mca_done_restore;;
839
840 //EndStub//////////////////////////////////////////////////////////////////////
841
842
843 // ok, the issue here is that we need to save state information so
844 // it can be useable by the kernel debugger and show regs routines.
845 // In order to do this, our best bet is save the current state (plus
846 // the state information obtain from the MIN_STATE_AREA) into a pt_regs
847 // format.  This way we can pass it on in a useable format.
848 //
849
850 //
851 // SAL to OS entry point for INIT on the monarch processor
852 // This has been defined for registration purposes with SAL
853 // as a part of ia64_mca_init.
854 //
855 // When we get here, the following registers have been
856 // set by the SAL for our use
857 //
858 //              1. GR1 = OS INIT GP
859 //              2. GR8 = PAL_PROC physical address
860 //              3. GR9 = SAL_PROC physical address
861 //              4. GR10 = SAL GP (physical)
862 //              5. GR11 = Init Reason
863 //                      0 = Received INIT for event other than crash dump switch
864 //                      1 = Received wakeup at the end of an OS_MCA corrected machine check
865 //                      2 = Received INIT dude to CrashDump switch assertion
866 //
867 //              6. GR12 = Return address to location within SAL_INIT procedure
868
869
870 GLOBAL_ENTRY(ia64_monarch_init_handler)
871
872         // stash the information the SAL passed to os
873         SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
874         ;;
875         SAVE_MIN_WITH_COVER
876         ;;
877         mov r8=cr.ifa
878         mov r9=cr.isr
879         adds r3=8,r2                            // set up second base pointer
880         ;;
881         SAVE_REST
882
883 // ok, enough should be saved at this point to be dangerous, and supply
884 // information for a dump
885 // We need to switch to Virtual mode before hitting the C functions.
886
887         movl    r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
888         mov     r3=psr  // get the current psr, minimum enabled at this point
889         ;;
890         or      r2=r2,r3
891         ;;
892         movl    r3=IVirtual_Switch
893         ;;
894         mov     cr.iip=r3       // short return to set the appropriate bits
895         mov     cr.ipsr=r2      // need to do an rfi to set appropriate bits
896         ;;
897         rfi
898         ;;
899 IVirtual_Switch:
900         //
901         // We should now be running virtual
902         //
903         // Let's call the C handler to get the rest of the state info
904         //
905         alloc r14=ar.pfs,0,0,2,0                // now it's safe (must be first in insn group!)
906         ;;
907         adds out0=16,sp                         // out0 = pointer to pt_regs
908         ;;
909         DO_SAVE_SWITCH_STACK
910         adds out1=16,sp                         // out0 = pointer to switch_stack
911
912         br.call.sptk.many rp=ia64_init_handler
913 .ret1:
914
915 return_from_init:
916         br.sptk return_from_init
917 END(ia64_monarch_init_handler)
918
919 //
920 // SAL to OS entry point for INIT on the slave processor
921 // This has been defined for registration purposes with SAL
922 // as a part of ia64_mca_init.
923 //
924
925 GLOBAL_ENTRY(ia64_slave_init_handler)
926 1:      br.sptk 1b
927 END(ia64_slave_init_handler)