more debug output
[linux-2.4.git] / arch / ia64 / kernel / mca_asm.S
1 //
2 // assembly portion of the IA64 MCA handling
3 //
4 // Mods by cfleck to integrate into kernel build
5 // 00/03/15 davidm Added various stop bits to get a clean compile
6 //
7 // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
8 //                 kstack, switch modes, jump to C INIT handler
9 //
10 // 02/01/04 J.Hall <jenna.s.hall@intel.com>
11 //                 Before entering virtual mode code:
12 //                 1. Check for TLB CPU error
13 //                 2. Restore current thread pointer to kr6
14 //                 3. Move stack ptr 16 bytes to conform to C calling convention
15 //
16 #include <linux/config.h>
17 #include <linux/threads.h>
18
19 #include <asm/asmmacro.h>
20 #include <asm/pgtable.h>
21 #include <asm/processor.h>
22 #include <asm/mca_asm.h>
23 #include <asm/mca.h>
24
25 /*
26  * When we get a machine check, the kernel stack pointer is no longer
27  * valid, so we need to set a new stack pointer.
28  */
29 #define MINSTATE_PHYS   /* Make sure stack access is physical for MINSTATE */
30
31 /*
32  * Needed for return context to SAL
33  */
34 #define IA64_MCA_SAME_CONTEXT   0
35 #define IA64_MCA_COLD_BOOT      -2
36
37 #include "minstate.h"
38
39 /*
40  * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
41  *              1. GR1 = OS GP
42  *              2. GR8 = PAL_PROC physical address
43  *              3. GR9 = SAL_PROC physical address
44  *              4. GR10 = SAL GP (physical)
45  *              5. GR11 = Rendez state
46  *              6. GR12 = Return address to location within SAL_CHECK
47  */
48 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)          \
49         movl    _tmp=ia64_sal_to_os_handoff_state;;     \
50         DATA_VA_TO_PA(_tmp);;                           \
51         st8     [_tmp]=r1,0x08;;                        \
52         st8     [_tmp]=r8,0x08;;                        \
53         st8     [_tmp]=r9,0x08;;                        \
54         st8     [_tmp]=r10,0x08;;                       \
55         st8     [_tmp]=r11,0x08;;                       \
56         st8     [_tmp]=r12,0x08;;                       \
57         st8     [_tmp]=r17,0x08;;                       \
58         st8     [_tmp]=r18,0x08
59
60 /*
61  * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
62  * (p6) is executed if we never entered virtual mode (TLB error)
63  * (p7) is executed if we entered virtual mode as expected (normal case)
64  *      1. GR8 = OS_MCA return status
65  *      2. GR9 = SAL GP (physical)
66  *      3. GR10 = 0/1 returning same/new context
67  *      4. GR22 = New min state save area pointer
68  *      returns ptr to SAL rtn save loc in _tmp
69  */
70 #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp)       \
71         movl    _tmp=ia64_os_to_sal_handoff_state;;     \
72         DATA_VA_TO_PA(_tmp);;                           \
73         ld8     r8=[_tmp],0x08;;                        \
74         ld8     r9=[_tmp],0x08;;                        \
75         ld8     r10=[_tmp],0x08;;                       \
76         ld8     r22=[_tmp],0x08;;
77         // now _tmp is pointing to SAL rtn save location
78
79 /*
80  * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
81  *      imots_os_status=IA64_MCA_COLD_BOOT
82  *      imots_sal_gp=SAL GP
83  *      imots_context=IA64_MCA_SAME_CONTEXT
84  *      imots_new_min_state=Min state save area pointer
85  *      imots_sal_check_ra=Return address to location within SAL_CHECK
86  *
87  */
88 #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
89         movl    tmp=IA64_MCA_COLD_BOOT;                                 \
90         movl    sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state);   \
91         movl    os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);;  \
92         st8     [os_to_sal_handoff]=tmp,8;;                             \
93         ld8     tmp=[sal_to_os_handoff],48;;                            \
94         st8     [os_to_sal_handoff]=tmp,8;;                             \
95         movl    tmp=IA64_MCA_SAME_CONTEXT;;                             \
96         st8     [os_to_sal_handoff]=tmp,8;;                             \
97         ld8     tmp=[sal_to_os_handoff],-8;;                            \
98         st8     [os_to_sal_handoff]=tmp,8;;                             \
99         ld8     tmp=[sal_to_os_handoff];;                               \
100         st8     [os_to_sal_handoff]=tmp;;
101
102         .global ia64_os_mca_dispatch
103         .global ia64_os_mca_dispatch_end
104         .global ia64_sal_to_os_handoff_state
105         .global ia64_os_to_sal_handoff_state
106         .global ia64_mca_proc_state_dump
107         .global ia64_mca_stack
108         .global ia64_mca_stackframe
109         .global ia64_mca_bspstore
110         .global ia64_init_stack
111
112         .text
113         .align 16
114
115 ia64_os_mca_dispatch:
116
117         // Serialize all MCA processing
118         movl    r2=ia64_mca_serialize
119         mov     r3=1;;
120         DATA_VA_TO_PA(r2);;
121 ia64_os_mca_spin:
122         xchg8   r4=[r2],r3;;
123         cmp.ne  p6,p0=r4,r0
124 (p6)    br ia64_os_mca_spin
125
126         // Save the SAL to OS MCA handoff state as defined
127         // by SAL SPEC 3.0
128         // NOTE : The order in which the state gets saved
129         //        is dependent on the way the C-structure
130         //        for ia64_mca_sal_to_os_state_t has been
131         //        defined in include/asm/mca.h
132         SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
133         ;;
134
135         // LOG PROCESSOR STATE INFO FROM HERE ON..
136 begin_os_mca_dump:
137         br      ia64_os_mca_proc_state_dump;;
138
139 ia64_os_mca_done_dump:
140
141         movl r16=__pa(ia64_sal_to_os_handoff_state)+56
142         ;;
143         ld8 r18=[r16]           // Get processor state parameter on existing PALE_CHECK.
144         ;;
145         tbit.nz p6,p7=r18,60
146 (p7)    br.spnt done_tlb_purge_and_reload
147
148         // The following code purges TC and TR entries. Then reload all TC entries.
149         // Purge percpu data TC entries.
150 begin_tlb_purge_and_reload:
151         mov r16=cr.lid
152         movl r17=__pa(ia64_mca_tlb_list) // Physical address of ia64_mca_tlb_list 
153         mov r19=0
154         mov r20=NR_CPUS
155         ;;
156 1:      cmp.eq p6,p7=r19,r20
157 (p6)    br.spnt.few err
158         ld8 r18=[r17],IA64_MCA_TLB_INFO_SIZE
159         ;;
160         add r19=1,r19
161         cmp.eq p6,p7=r18,r16
162 (p7)    br.sptk.few 1b
163         ;;
164         adds r17=-IA64_MCA_TLB_INFO_SIZE,r17
165         ;;
166         mov r23=r17             // save current ia64_mca_percpu_info addr pointer.
167         adds r17=16,r17
168         ;;
169         ld8 r18=[r17],8         // r18=ptce_base
170         ;;
171         ld4 r19=[r17],4         // r19=ptce_count[0]
172         ;;
173         ld4 r20=[r17],4         // r20=ptce_count[1]
174         ;;
175         ld4 r21=[r17],4         // r21=ptce_stride[0]
176         mov r24=0
177         ;;
178         ld4 r22=[r17],4         // r22=ptce_stride[1]
179         adds r20=-1,r20
180         ;;
181 2:
182         cmp.ltu p6,p7=r24,r19
183 (p7)    br.cond.dpnt.few 4f
184         mov ar.lc=r20
185 3:
186         ptc.e r18
187         ;;
188         add r18=r22,r18
189         br.cloop.sptk.few 3b
190         ;;
191         add r18=r21,r18
192         add r24=1,r24
193         ;;
194         br.sptk.few 2b
195 4:
196         srlz.i                  // srlz.i implies srlz.d
197         ;;
198
199         // Now purge addresses formerly mapped by TR registers
200         // 1. Purge ITR&DTR for kernel.
201         movl r16=KERNEL_START
202         mov r18=KERNEL_TR_PAGE_SHIFT<<2
203         ;;
204         ptr.i r16, r18
205         ptr.d r16, r18
206         ;;
207         srlz.i
208         ;;
209         srlz.d
210         ;;
211         // 2. Purge DTR for PERCPU data.
212         movl r16=PERCPU_ADDR
213         mov r18=PAGE_SHIFT<<2
214         ;;
215         ptr.d r16,r18
216         ;;
217         srlz.d
218         ;;
219         // 3. Purge ITR for PAL code.
220         adds r17=48,r23
221         ;;
222         ld8 r16=[r17]
223         mov r18=IA64_GRANULE_SHIFT<<2
224         ;;
225         ptr.i r16,r18
226         ;;
227         srlz.i
228         ;;
229         // 4. Purge DTR for stack.
230         mov r16=IA64_KR(CURRENT_STACK)
231         ;;
232         shl r16=r16,IA64_GRANULE_SHIFT
233         movl r19=PAGE_OFFSET
234         ;;
235         add r16=r19,r16
236         mov r18=IA64_GRANULE_SHIFT<<2
237         ;;
238         ptr.d r16,r18
239         ;;
240         srlz.i
241         ;;
242         // Finally reload the TR registers.
243         // 1. Reload DTR/ITR registers for kernel.
244         mov r18=KERNEL_TR_PAGE_SHIFT<<2
245         movl r17=KERNEL_START
246         ;;
247         mov cr.itir=r18
248         mov cr.ifa=r17
249         mov r16=IA64_TR_KERNEL
250         movl r18=((1 << KERNEL_TR_PAGE_SHIFT) | PAGE_KERNEL)
251         ;;
252         itr.i itr[r16]=r18
253         ;;
254         itr.d dtr[r16]=r18
255         ;;
256         srlz.i
257         srlz.d
258         ;;
259         // 2. Reload DTR register for PERCPU data.
260         adds r17=8,r23
261         movl r16=PERCPU_ADDR            // vaddr
262         movl r18=PAGE_SHIFT<<2
263         ;;
264         mov cr.itir=r18
265         mov cr.ifa=r16
266         ;;
267         ld8 r18=[r17]                   // pte
268         mov r16=IA64_TR_PERCPU_DATA;
269         ;;
270         itr.d dtr[r16]=r18
271         ;;
272         srlz.d
273         ;;
274         // 3. Reload ITR for PAL code.
275         adds r17=40,r23
276         ;;
277         ld8 r18=[r17],8                 // pte
278         ;;
279         ld8 r16=[r17]                   // vaddr
280         mov r19=IA64_GRANULE_SHIFT<<2
281         ;;
282         mov cr.itir=r19
283         mov cr.ifa=r16
284         mov r20=IA64_TR_PALCODE
285         ;;
286         itr.i itr[r20]=r18
287         ;;
288         srlz.i
289         ;;
290         // 4. Reload DTR for stack.
291         mov r16=IA64_KR(CURRENT_STACK)
292         ;;
293         shl r16=r16,IA64_GRANULE_SHIFT
294         movl r19=PAGE_OFFSET
295         ;;
296         add r18=r19,r16
297         movl r20=PAGE_KERNEL
298         ;;
299         add r16=r20,r16
300         mov r19=IA64_GRANULE_SHIFT<<2
301         ;;
302         mov cr.itir=r19
303         mov cr.ifa=r18
304         mov r20=IA64_TR_CURRENT_STACK
305         ;;
306         itr.d dtr[r20]=r16
307         ;;
308         srlz.d
309         ;;
310         br.sptk.many done_tlb_purge_and_reload
311 err:
312         COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
313         br.sptk.many ia64_os_mca_done_restore
314
315 done_tlb_purge_and_reload:
316
317         // Setup new stack frame for OS_MCA handling
318         movl    r2=ia64_mca_bspstore;;  // local bspstore area location in r2
319         DATA_VA_TO_PA(r2);;
320         movl    r3=ia64_mca_stackframe;; // save stack frame to memory in r3
321         DATA_VA_TO_PA(r3);;
322         rse_switch_context(r6,r3,r2);;  // RSC management in this new context
323         movl    r12=ia64_mca_stack
324         mov     r2=8*1024;;             // stack size must be same as C array
325         add     r12=r2,r12;;            // stack base @ bottom of array
326         adds    r12=-16,r12;;           // allow 16 bytes of scratch
327                                         // (C calling convention)
328         DATA_VA_TO_PA(r12);;
329
330         // Enter virtual mode from physical mode
331         VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
332 ia64_os_mca_virtual_begin:
333
334         // Call virtual mode handler
335         movl            r2=ia64_mca_ucmc_handler;;
336         mov             b6=r2;;
337         br.call.sptk.many    b0=b6;;
338 .ret0:
339         // Revert back to physical mode before going back to SAL
340         PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
341 ia64_os_mca_virtual_end:
342
343         // restore the original stack frame here
344         movl    r2=ia64_mca_stackframe  // restore stack frame from memory at r2
345         ;;
346         DATA_VA_TO_PA(r2)
347         movl    r4=IA64_PSR_MC
348         ;;
349         rse_return_context(r4,r3,r2)    // switch from interrupt context for RSE
350
351         // let us restore all the registers from our PSI structure
352         mov     r8=gp
353         ;;
354 begin_os_mca_restore:
355         br      ia64_os_mca_proc_state_restore;;
356
357 ia64_os_mca_done_restore:
358         OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
359         // branch back to SALE_CHECK
360         ld8             r3=[r2];;
361         mov             b0=r3;;         // SAL_CHECK return address
362
363         // release lock
364         movl            r3=ia64_mca_serialize;;
365         DATA_VA_TO_PA(r3);;
366         st8.rel         [r3]=r0
367
368         br              b0
369         ;;
370 ia64_os_mca_dispatch_end:
371 //EndMain//////////////////////////////////////////////////////////////////////
372
373
374 //++
375 // Name:
376 //      ia64_os_mca_proc_state_dump()
377 //
378 // Stub Description:
379 //
380 //       This stub dumps the processor state during MCHK to a data area
381 //
382 //--
383
384 ia64_os_mca_proc_state_dump:
385 // Save bank 1 GRs 16-31 which will be used by c-language code when we switch
386 //  to virtual addressing mode.
387         movl            r2=ia64_mca_proc_state_dump;;           // Os state dump area
388         DATA_VA_TO_PA(r2)                   // convert to to physical address
389
390 // save ar.NaT
391         mov             r5=ar.unat                  // ar.unat
392
393 // save banked GRs 16-31 along with NaT bits
394         bsw.1;;
395         st8.spill       [r2]=r16,8;;
396         st8.spill       [r2]=r17,8;;
397         st8.spill       [r2]=r18,8;;
398         st8.spill       [r2]=r19,8;;
399         st8.spill       [r2]=r20,8;;
400         st8.spill       [r2]=r21,8;;
401         st8.spill       [r2]=r22,8;;
402         st8.spill       [r2]=r23,8;;
403         st8.spill       [r2]=r24,8;;
404         st8.spill       [r2]=r25,8;;
405         st8.spill       [r2]=r26,8;;
406         st8.spill       [r2]=r27,8;;
407         st8.spill       [r2]=r28,8;;
408         st8.spill       [r2]=r29,8;;
409         st8.spill       [r2]=r30,8;;
410         st8.spill       [r2]=r31,8;;
411
412         mov             r4=ar.unat;;
413         st8             [r2]=r4,8                // save User NaT bits for r16-r31
414         mov             ar.unat=r5                  // restore original unat
415         bsw.0;;
416
417 //save BRs
418         add             r4=8,r2                  // duplicate r2 in r4
419         add             r6=2*8,r2                // duplicate r2 in r4
420
421         mov             r3=b0
422         mov             r5=b1
423         mov             r7=b2;;
424         st8             [r2]=r3,3*8
425         st8             [r4]=r5,3*8
426         st8             [r6]=r7,3*8;;
427
428         mov             r3=b3
429         mov             r5=b4
430         mov             r7=b5;;
431         st8             [r2]=r3,3*8
432         st8             [r4]=r5,3*8
433         st8             [r6]=r7,3*8;;
434
435         mov             r3=b6
436         mov             r5=b7;;
437         st8             [r2]=r3,2*8
438         st8             [r4]=r5,2*8;;
439
440 cSaveCRs:
441 // save CRs
442         add             r4=8,r2                  // duplicate r2 in r4
443         add             r6=2*8,r2                // duplicate r2 in r4
444
445         mov             r3=cr.dcr
446         mov             r5=cr.itm
447         mov             r7=cr.iva;;
448
449         st8             [r2]=r3,8*8
450         st8             [r4]=r5,3*8
451         st8             [r6]=r7,3*8;;            // 48 byte rements
452
453         mov             r3=cr.pta;;
454         st8             [r2]=r3,8*8;;            // 64 byte rements
455
456 // if PSR.ic=0, reading interruption registers causes an illegal operation fault
457         mov             r3=psr;;
458         tbit.nz.unc     p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
459 (p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
460 begin_skip_intr_regs:
461 (p6)    br              SkipIntrRegs;;
462
463         add             r4=8,r2                  // duplicate r2 in r4
464         add             r6=2*8,r2                // duplicate r2 in r6
465
466         mov             r3=cr.ipsr
467         mov             r5=cr.isr
468         mov             r7=r0;;
469         st8             [r2]=r3,3*8
470         st8             [r4]=r5,3*8
471         st8             [r6]=r7,3*8;;
472
473         mov             r3=cr.iip
474         mov             r5=cr.ifa
475         mov             r7=cr.itir;;
476         st8             [r2]=r3,3*8
477         st8             [r4]=r5,3*8
478         st8             [r6]=r7,3*8;;
479
480         mov             r3=cr.iipa
481         mov             r5=cr.ifs
482         mov             r7=cr.iim;;
483         st8             [r2]=r3,3*8
484         st8             [r4]=r5,3*8
485         st8             [r6]=r7,3*8;;
486
487         mov             r3=cr25;;                   // cr.iha
488         st8             [r2]=r3,160;;               // 160 byte rement
489
490 SkipIntrRegs:
491         st8             [r2]=r0,152;;               // another 152 byte .
492
493         add             r4=8,r2                     // duplicate r2 in r4
494         add             r6=2*8,r2                   // duplicate r2 in r6
495
496         mov             r3=cr.lid
497 //      mov             r5=cr.ivr                     // cr.ivr, don't read it
498         mov             r7=cr.tpr;;
499         st8             [r2]=r3,3*8
500         st8             [r4]=r5,3*8
501         st8             [r6]=r7,3*8;;
502
503         mov             r3=r0                       // cr.eoi => cr67
504         mov             r5=r0                       // cr.irr0 => cr68
505         mov             r7=r0;;                     // cr.irr1 => cr69
506         st8             [r2]=r3,3*8
507         st8             [r4]=r5,3*8
508         st8             [r6]=r7,3*8;;
509
510         mov             r3=r0                       // cr.irr2 => cr70
511         mov             r5=r0                       // cr.irr3 => cr71
512         mov             r7=cr.itv;;
513         st8             [r2]=r3,3*8
514         st8             [r4]=r5,3*8
515         st8             [r6]=r7,3*8;;
516
517         mov             r3=cr.pmv
518         mov             r5=cr.cmcv;;
519         st8             [r2]=r3,7*8
520         st8             [r4]=r5,7*8;;
521
522         mov             r3=r0                       // cr.lrr0 => cr80
523         mov             r5=r0;;                     // cr.lrr1 => cr81
524         st8             [r2]=r3,23*8
525         st8             [r4]=r5,23*8;;
526
527         adds            r2=25*8,r2;;
528
529 cSaveARs:
530 // save ARs
531         add             r4=8,r2                  // duplicate r2 in r4
532         add             r6=2*8,r2                // duplicate r2 in r6
533
534         mov             r3=ar.k0
535         mov             r5=ar.k1
536         mov             r7=ar.k2;;
537         st8             [r2]=r3,3*8
538         st8             [r4]=r5,3*8
539         st8             [r6]=r7,3*8;;
540
541         mov             r3=ar.k3
542         mov             r5=ar.k4
543         mov             r7=ar.k5;;
544         st8             [r2]=r3,3*8
545         st8             [r4]=r5,3*8
546         st8             [r6]=r7,3*8;;
547
548         mov             r3=ar.k6
549         mov             r5=ar.k7
550         mov             r7=r0;;                     // ar.kr8
551         st8             [r2]=r3,10*8
552         st8             [r4]=r5,10*8
553         st8             [r6]=r7,10*8;;           // rement by 72 bytes
554
555         mov             r3=ar.rsc
556         mov             ar.rsc=r0                           // put RSE in enforced lazy mode
557         mov             r5=ar.bsp
558         ;;
559         mov             r7=ar.bspstore;;
560         st8             [r2]=r3,3*8
561         st8             [r4]=r5,3*8
562         st8             [r6]=r7,3*8;;
563
564         mov             r3=ar.rnat;;
565         st8             [r2]=r3,8*13             // increment by 13x8 bytes
566
567         mov             r3=ar.ccv;;
568         st8             [r2]=r3,8*4
569
570         mov             r3=ar.unat;;
571         st8             [r2]=r3,8*4
572
573         mov             r3=ar.fpsr;;
574         st8             [r2]=r3,8*4
575
576         mov             r3=ar.itc;;
577         st8             [r2]=r3,160                 // 160
578
579         mov             r3=ar.pfs;;
580         st8             [r2]=r3,8
581
582         mov             r3=ar.lc;;
583         st8             [r2]=r3,8
584
585         mov             r3=ar.ec;;
586         st8             [r2]=r3
587         add             r2=8*62,r2               //padding
588
589 // save RRs
590         mov             ar.lc=0x08-1
591         movl            r4=0x00;;
592
593 cStRR:
594         dep.z           r5=r4,61,3;;
595         mov             r3=rr[r5];;
596         st8             [r2]=r3,8
597         add             r4=1,r4
598         br.cloop.sptk.few       cStRR
599         ;;
600 end_os_mca_dump:
601         br      ia64_os_mca_done_dump;;
602
603 //EndStub//////////////////////////////////////////////////////////////////////
604
605
606 //++
607 // Name:
608 //       ia64_os_mca_proc_state_restore()
609 //
610 // Stub Description:
611 //
612 //       This is a stub to restore the saved processor state during MCHK
613 //
614 //--
615
616 ia64_os_mca_proc_state_restore:
617
618 // Restore bank1 GR16-31
619         movl            r2=ia64_mca_proc_state_dump     // Convert virtual address
620         ;;                                              // of OS state dump area
621         DATA_VA_TO_PA(r2)                               // to physical address
622
623 restore_GRs:                                    // restore bank-1 GRs 16-31
624         bsw.1;;
625         add             r3=16*8,r2;;                // to get to NaT of GR 16-31
626         ld8             r3=[r3];;
627         mov             ar.unat=r3;;                // first restore NaT
628
629         ld8.fill        r16=[r2],8;;
630         ld8.fill        r17=[r2],8;;
631         ld8.fill        r18=[r2],8;;
632         ld8.fill        r19=[r2],8;;
633         ld8.fill        r20=[r2],8;;
634         ld8.fill        r21=[r2],8;;
635         ld8.fill        r22=[r2],8;;
636         ld8.fill        r23=[r2],8;;
637         ld8.fill        r24=[r2],8;;
638         ld8.fill        r25=[r2],8;;
639         ld8.fill        r26=[r2],8;;
640         ld8.fill        r27=[r2],8;;
641         ld8.fill        r28=[r2],8;;
642         ld8.fill        r29=[r2],8;;
643         ld8.fill        r30=[r2],8;;
644         ld8.fill        r31=[r2],8;;
645
646         ld8             r3=[r2],8;;              // increment to skip NaT
647         bsw.0;;
648
649 restore_BRs:
650         add             r4=8,r2                  // duplicate r2 in r4
651         add             r6=2*8,r2;;              // duplicate r2 in r4
652
653         ld8             r3=[r2],3*8
654         ld8             r5=[r4],3*8
655         ld8             r7=[r6],3*8;;
656         mov             b0=r3
657         mov             b1=r5
658         mov             b2=r7;;
659
660         ld8             r3=[r2],3*8
661         ld8             r5=[r4],3*8
662         ld8             r7=[r6],3*8;;
663         mov             b3=r3
664         mov             b4=r5
665         mov             b5=r7;;
666
667         ld8             r3=[r2],2*8
668         ld8             r5=[r4],2*8;;
669         mov             b6=r3
670         mov             b7=r5;;
671
672 restore_CRs:
673         add             r4=8,r2                  // duplicate r2 in r4
674         add             r6=2*8,r2;;              // duplicate r2 in r4
675
676         ld8             r3=[r2],8*8
677         ld8             r5=[r4],3*8
678         ld8             r7=[r6],3*8;;            // 48 byte increments
679         mov             cr.dcr=r3
680         mov             cr.itm=r5
681         mov             cr.iva=r7;;
682
683         ld8             r3=[r2],8*8;;            // 64 byte increments
684 //      mov             cr.pta=r3
685
686
687 // if PSR.ic=1, reading interruption registers causes an illegal operation fault
688         mov             r3=psr;;
689         tbit.nz.unc     p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
690 (p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
691
692 begin_rskip_intr_regs:
693 (p6)    br              rSkipIntrRegs;;
694
695         add             r4=8,r2                  // duplicate r2 in r4
696         add             r6=2*8,r2;;              // duplicate r2 in r4
697
698         ld8             r3=[r2],3*8
699         ld8             r5=[r4],3*8
700         ld8             r7=[r6],3*8;;
701         mov             cr.ipsr=r3
702 //      mov             cr.isr=r5                   // cr.isr is read only
703
704         ld8             r3=[r2],3*8
705         ld8             r5=[r4],3*8
706         ld8             r7=[r6],3*8;;
707         mov             cr.iip=r3
708         mov             cr.ifa=r5
709         mov             cr.itir=r7;;
710
711         ld8             r3=[r2],3*8
712         ld8             r5=[r4],3*8
713         ld8             r7=[r6],3*8;;
714         mov             cr.iipa=r3
715         mov             cr.ifs=r5
716         mov             cr.iim=r7
717
718         ld8             r3=[r2],160;;               // 160 byte increment
719         mov             cr.iha=r3
720
721 rSkipIntrRegs:
722         ld8             r3=[r2],152;;               // another 152 byte inc.
723
724         add             r4=8,r2                     // duplicate r2 in r4
725         add             r6=2*8,r2;;                 // duplicate r2 in r6
726
727         ld8             r3=[r2],8*3
728         ld8             r5=[r4],8*3
729         ld8             r7=[r6],8*3;;
730         mov             cr.lid=r3
731 //      mov             cr.ivr=r5                   // cr.ivr is read only
732         mov             cr.tpr=r7;;
733
734         ld8             r3=[r2],8*3
735         ld8             r5=[r4],8*3
736         ld8             r7=[r6],8*3;;
737 //      mov             cr.eoi=r3
738 //      mov             cr.irr0=r5                  // cr.irr0 is read only
739 //      mov             cr.irr1=r7;;                // cr.irr1 is read only
740
741         ld8             r3=[r2],8*3
742         ld8             r5=[r4],8*3
743         ld8             r7=[r6],8*3;;
744 //      mov             cr.irr2=r3                  // cr.irr2 is read only
745 //      mov             cr.irr3=r5                  // cr.irr3 is read only
746         mov             cr.itv=r7;;
747
748         ld8             r3=[r2],8*7
749         ld8             r5=[r4],8*7;;
750         mov             cr.pmv=r3
751         mov             cr.cmcv=r5;;
752
753         ld8             r3=[r2],8*23
754         ld8             r5=[r4],8*23;;
755         adds            r2=8*23,r2
756         adds            r4=8*23,r4;;
757 //      mov             cr.lrr0=r3
758 //      mov             cr.lrr1=r5
759
760         adds            r2=8*2,r2;;
761
762 restore_ARs:
763         add             r4=8,r2                  // duplicate r2 in r4
764         add             r6=2*8,r2;;              // duplicate r2 in r4
765
766         ld8             r3=[r2],3*8
767         ld8             r5=[r4],3*8
768         ld8             r7=[r6],3*8;;
769         mov             ar.k0=r3
770         mov             ar.k1=r5
771         mov             ar.k2=r7;;
772
773         ld8             r3=[r2],3*8
774         ld8             r5=[r4],3*8
775         ld8             r7=[r6],3*8;;
776         mov             ar.k3=r3
777         mov             ar.k4=r5
778         mov             ar.k5=r7;;
779
780         ld8             r3=[r2],10*8
781         ld8             r5=[r4],10*8
782         ld8             r7=[r6],10*8;;
783         mov             ar.k6=r3
784         mov             ar.k7=r5
785         ;;
786
787         ld8             r3=[r2],3*8
788         ld8             r5=[r4],3*8
789         ld8             r7=[r6],3*8;;
790 //      mov             ar.rsc=r3
791 //      mov             ar.bsp=r5                   // ar.bsp is read only
792         mov             ar.rsc=r0                           // make sure that RSE is in enforced lazy mode
793         ;;
794         mov             ar.bspstore=r7;;
795
796         ld8             r9=[r2],8*13;;
797         mov             ar.rnat=r9
798
799         mov             ar.rsc=r3
800         ld8             r3=[r2],8*4;;
801         mov             ar.ccv=r3
802
803         ld8             r3=[r2],8*4;;
804         mov             ar.unat=r3
805
806         ld8             r3=[r2],8*4;;
807         mov             ar.fpsr=r3
808
809         ld8             r3=[r2],160;;               // 160
810 //      mov             ar.itc=r3
811
812         ld8             r3=[r2],8;;
813         mov             ar.pfs=r3
814
815         ld8             r3=[r2],8;;
816         mov             ar.lc=r3
817
818         ld8             r3=[r2];;
819         mov             ar.ec=r3
820         add             r2=8*62,r2;;             // padding
821
822 restore_RRs:
823         mov             r5=ar.lc
824         mov             ar.lc=0x08-1
825         movl            r4=0x00;;
826 cStRRr:
827         dep.z           r7=r4,61,3
828         ld8             r3=[r2],8;;
829         mov             rr[r7]=r3                   // what are its access previledges?
830         add             r4=1,r4
831         br.cloop.sptk.few       cStRRr
832         ;;
833         mov             ar.lc=r5
834         ;;
835 end_os_mca_restore:
836         br      ia64_os_mca_done_restore;;
837
838 //EndStub//////////////////////////////////////////////////////////////////////
839
840
841 // ok, the issue here is that we need to save state information so
842 // it can be useable by the kernel debugger and show regs routines.
843 // In order to do this, our best bet is save the current state (plus
844 // the state information obtain from the MIN_STATE_AREA) into a pt_regs
845 // format.  This way we can pass it on in a useable format.
846 //
847
848 //
849 // SAL to OS entry point for INIT on the monarch processor
850 // This has been defined for registration purposes with SAL
851 // as a part of ia64_mca_init.
852 //
853 // When we get here, the following registers have been
854 // set by the SAL for our use
855 //
856 //              1. GR1 = OS INIT GP
857 //              2. GR8 = PAL_PROC physical address
858 //              3. GR9 = SAL_PROC physical address
859 //              4. GR10 = SAL GP (physical)
860 //              5. GR11 = Init Reason
861 //                      0 = Received INIT for event other than crash dump switch
862 //                      1 = Received wakeup at the end of an OS_MCA corrected machine check
863 //                      2 = Received INIT dude to CrashDump switch assertion
864 //
865 //              6. GR12 = Return address to location within SAL_INIT procedure
866
867
868 GLOBAL_ENTRY(ia64_monarch_init_handler)
869
870         // stash the information the SAL passed to os
871         SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
872         ;;
873         SAVE_MIN_WITH_COVER
874         ;;
875         mov r8=cr.ifa
876         mov r9=cr.isr
877         adds r3=8,r2                            // set up second base pointer
878         ;;
879         SAVE_REST
880
881 // ok, enough should be saved at this point to be dangerous, and supply
882 // information for a dump
883 // We need to switch to Virtual mode before hitting the C functions.
884
885         movl    r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
886         mov     r3=psr  // get the current psr, minimum enabled at this point
887         ;;
888         or      r2=r2,r3
889         ;;
890         movl    r3=IVirtual_Switch
891         ;;
892         mov     cr.iip=r3       // short return to set the appropriate bits
893         mov     cr.ipsr=r2      // need to do an rfi to set appropriate bits
894         ;;
895         rfi
896         ;;
897 IVirtual_Switch:
898         //
899         // We should now be running virtual
900         //
901         // Let's call the C handler to get the rest of the state info
902         //
903         alloc r14=ar.pfs,0,0,2,0                // now it's safe (must be first in insn group!)
904         ;;
905         adds out0=16,sp                         // out0 = pointer to pt_regs
906         ;;
907         DO_SAVE_SWITCH_STACK
908         adds out1=16,sp                         // out0 = pointer to switch_stack
909
910         br.call.sptk.many rp=ia64_init_handler
911 .ret1:
912
913 return_from_init:
914         br.sptk return_from_init
915 END(ia64_monarch_init_handler)
916
917 //
918 // SAL to OS entry point for INIT on the slave processor
919 // This has been defined for registration purposes with SAL
920 // as a part of ia64_mca_init.
921 //
922
923 GLOBAL_ENTRY(ia64_slave_init_handler)
924 1:      br.sptk 1b
925 END(ia64_slave_init_handler)