xref: /openbmc/linux/arch/ia64/kernel/mca_asm.S (revision 1da177e4)
1//
2// assembly portion of the IA64 MCA handling
3//
4// Mods by cfleck to integrate into kernel build
5// 00/03/15 davidm Added various stop bits to get a clean compile
6//
7// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
8//		   kstack, switch modes, jump to C INIT handler
9//
10// 02/01/04 J.Hall <jenna.s.hall@intel.com>
11//		   Before entering virtual mode code:
12//		   1. Check for TLB CPU error
13//		   2. Restore current thread pointer to kr6
14//		   3. Move stack ptr 16 bytes to conform to C calling convention
15//
16// 04/11/12 Russ Anderson <rja@sgi.com>
17//		   Added per cpu MCA/INIT stack save areas.
18//
19#include <linux/config.h>
20#include <linux/threads.h>
21
22#include <asm/asmmacro.h>
23#include <asm/pgtable.h>
24#include <asm/processor.h>
25#include <asm/mca_asm.h>
26#include <asm/mca.h>
27
28/*
29 * When we get a machine check, the kernel stack pointer is no longer
30 * valid, so we need to set a new stack pointer.
31 */
32#define	MINSTATE_PHYS	/* Make sure stack access is physical for MINSTATE */
33
34/*
35 * Needed for return context to SAL
36 */
37#define IA64_MCA_SAME_CONTEXT	0
38#define IA64_MCA_COLD_BOOT	-2
39
40#include "minstate.h"
41
42/*
43 * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
44 *		1. GR1 = OS GP
45 *		2. GR8 = PAL_PROC physical address
46 *		3. GR9 = SAL_PROC physical address
47 *		4. GR10 = SAL GP (physical)
48 *		5. GR11 = Rendez state
49 *		6. GR12 = Return address to location within SAL_CHECK
50 */
51#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)		\
52	LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
53	st8	[_tmp]=r1,0x08;;			\
54	st8	[_tmp]=r8,0x08;;			\
55	st8	[_tmp]=r9,0x08;;			\
56	st8	[_tmp]=r10,0x08;;			\
57	st8	[_tmp]=r11,0x08;;			\
58	st8	[_tmp]=r12,0x08;;			\
59	st8	[_tmp]=r17,0x08;;			\
60	st8	[_tmp]=r18,0x08
61
62/*
63 * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
64 * (p6) is executed if we never entered virtual mode (TLB error)
65 * (p7) is executed if we entered virtual mode as expected (normal case)
66 *	1. GR8 = OS_MCA return status
67 *	2. GR9 = SAL GP (physical)
68 *	3. GR10 = 0/1 returning same/new context
69 *	4. GR22 = New min state save area pointer
70 *	returns ptr to SAL rtn save loc in _tmp
71 */
72#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp)	\
73	movl	_tmp=ia64_os_to_sal_handoff_state;;	\
74	DATA_VA_TO_PA(_tmp);;				\
75	ld8	r8=[_tmp],0x08;;			\
76	ld8	r9=[_tmp],0x08;;			\
77	ld8	r10=[_tmp],0x08;;			\
78	ld8	r22=[_tmp],0x08;;
79	// now _tmp is pointing to SAL rtn save location
80
81/*
82 * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
83 *	imots_os_status=IA64_MCA_COLD_BOOT
84 *	imots_sal_gp=SAL GP
85 *	imots_context=IA64_MCA_SAME_CONTEXT
86 *	imots_new_min_state=Min state save area pointer
87 *	imots_sal_check_ra=Return address to location within SAL_CHECK
88 *
89 */
90#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
91	movl	tmp=IA64_MCA_COLD_BOOT;					\
92	movl	sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state);	\
93	movl	os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);;	\
94	st8	[os_to_sal_handoff]=tmp,8;;				\
95	ld8	tmp=[sal_to_os_handoff],48;;				\
96	st8	[os_to_sal_handoff]=tmp,8;;				\
97	movl	tmp=IA64_MCA_SAME_CONTEXT;;				\
98	st8	[os_to_sal_handoff]=tmp,8;;				\
99	ld8	tmp=[sal_to_os_handoff],-8;;				\
100	st8     [os_to_sal_handoff]=tmp,8;;				\
101	ld8	tmp=[sal_to_os_handoff];;				\
102	st8     [os_to_sal_handoff]=tmp;;
103
104#define GET_IA64_MCA_DATA(reg)						\
105	GET_THIS_PADDR(reg, ia64_mca_data)				\
106	;;								\
107	ld8 reg=[reg]
108
109	.global ia64_os_mca_dispatch
110	.global ia64_os_mca_dispatch_end
111	.global ia64_sal_to_os_handoff_state
112	.global	ia64_os_to_sal_handoff_state
113
114	.text
115	.align 16
116
117ia64_os_mca_dispatch:
118
119	// Serialize all MCA processing
120	mov	r3=1;;
121	LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
122ia64_os_mca_spin:
123	xchg8	r4=[r2],r3;;
124	cmp.ne	p6,p0=r4,r0
125(p6)	br ia64_os_mca_spin
126
127	// Save the SAL to OS MCA handoff state as defined
128	// by SAL SPEC 3.0
129	// NOTE : The order in which the state gets saved
130	//	  is dependent on the way the C-structure
131	//	  for ia64_mca_sal_to_os_state_t has been
132	//	  defined in include/asm/mca.h
133	SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
134	;;
135
136	// LOG PROCESSOR STATE INFO FROM HERE ON..
137begin_os_mca_dump:
138	br	ia64_os_mca_proc_state_dump;;
139
140ia64_os_mca_done_dump:
141
142	LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
143	;;
144	ld8 r18=[r16]		// Get processor state parameter on existing PALE_CHECK.
145	;;
146	tbit.nz p6,p7=r18,60
147(p7)	br.spnt done_tlb_purge_and_reload
148
149	// The following code purges TC and TR entries. Then reload all TC entries.
150	// Purge percpu data TC entries.
151begin_tlb_purge_and_reload:
152
153#define O(member)	IA64_CPUINFO_##member##_OFFSET
154
155	GET_THIS_PADDR(r2, cpu_info)	// load phys addr of cpu_info into r2
156	;;
157	addl r17=O(PTCE_STRIDE),r2
158	addl r2=O(PTCE_BASE),r2
159	;;
160	ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));;	// r18=ptce_base
161	ld4 r19=[r2],4					// r19=ptce_count[0]
162	ld4 r21=[r17],4					// r21=ptce_stride[0]
163	;;
164	ld4 r20=[r2]					// r20=ptce_count[1]
165	ld4 r22=[r17]					// r22=ptce_stride[1]
166	mov r24=0
167	;;
168	adds r20=-1,r20
169	;;
170#undef O
171
1722:
173	cmp.ltu p6,p7=r24,r19
174(p7)	br.cond.dpnt.few 4f
175	mov ar.lc=r20
1763:
177	ptc.e r18
178	;;
179	add r18=r22,r18
180	br.cloop.sptk.few 3b
181	;;
182	add r18=r21,r18
183	add r24=1,r24
184	;;
185	br.sptk.few 2b
1864:
187	srlz.i 			// srlz.i implies srlz.d
188	;;
189
190        // Now purge addresses formerly mapped by TR registers
191	// 1. Purge ITR&DTR for kernel.
192	movl r16=KERNEL_START
193	mov r18=KERNEL_TR_PAGE_SHIFT<<2
194	;;
195	ptr.i r16, r18
196	ptr.d r16, r18
197	;;
198	srlz.i
199	;;
200	srlz.d
201	;;
202	// 2. Purge DTR for PERCPU data.
203	movl r16=PERCPU_ADDR
204	mov r18=PERCPU_PAGE_SHIFT<<2
205	;;
206	ptr.d r16,r18
207	;;
208	srlz.d
209	;;
210	// 3. Purge ITR for PAL code.
211	GET_THIS_PADDR(r2, ia64_mca_pal_base)
212	;;
213	ld8 r16=[r2]
214	mov r18=IA64_GRANULE_SHIFT<<2
215	;;
216	ptr.i r16,r18
217	;;
218	srlz.i
219	;;
220	// 4. Purge DTR for stack.
221	mov r16=IA64_KR(CURRENT_STACK)
222	;;
223	shl r16=r16,IA64_GRANULE_SHIFT
224	movl r19=PAGE_OFFSET
225	;;
226	add r16=r19,r16
227	mov r18=IA64_GRANULE_SHIFT<<2
228	;;
229	ptr.d r16,r18
230	;;
231	srlz.i
232	;;
233	// Finally reload the TR registers.
234	// 1. Reload DTR/ITR registers for kernel.
235	mov r18=KERNEL_TR_PAGE_SHIFT<<2
236	movl r17=KERNEL_START
237	;;
238	mov cr.itir=r18
239	mov cr.ifa=r17
240        mov r16=IA64_TR_KERNEL
241	mov r19=ip
242	movl r18=PAGE_KERNEL
243	;;
244        dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
245	;;
246	or r18=r17,r18
247	;;
248        itr.i itr[r16]=r18
249	;;
250        itr.d dtr[r16]=r18
251        ;;
252	srlz.i
253	srlz.d
254	;;
255	// 2. Reload DTR register for PERCPU data.
256	GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
257	;;
258	movl r16=PERCPU_ADDR		// vaddr
259	movl r18=PERCPU_PAGE_SHIFT<<2
260	;;
261	mov cr.itir=r18
262	mov cr.ifa=r16
263	;;
264	ld8 r18=[r2]			// load per-CPU PTE
265	mov r16=IA64_TR_PERCPU_DATA;
266	;;
267	itr.d dtr[r16]=r18
268	;;
269	srlz.d
270	;;
271	// 3. Reload ITR for PAL code.
272	GET_THIS_PADDR(r2, ia64_mca_pal_pte)
273	;;
274	ld8 r18=[r2]			// load PAL PTE
275	;;
276	GET_THIS_PADDR(r2, ia64_mca_pal_base)
277	;;
278	ld8 r16=[r2]			// load PAL vaddr
279	mov r19=IA64_GRANULE_SHIFT<<2
280	;;
281	mov cr.itir=r19
282	mov cr.ifa=r16
283	mov r20=IA64_TR_PALCODE
284	;;
285	itr.i itr[r20]=r18
286	;;
287	srlz.i
288	;;
289	// 4. Reload DTR for stack.
290	mov r16=IA64_KR(CURRENT_STACK)
291	;;
292	shl r16=r16,IA64_GRANULE_SHIFT
293	movl r19=PAGE_OFFSET
294	;;
295	add r18=r19,r16
296	movl r20=PAGE_KERNEL
297	;;
298	add r16=r20,r16
299	mov r19=IA64_GRANULE_SHIFT<<2
300	;;
301	mov cr.itir=r19
302	mov cr.ifa=r18
303	mov r20=IA64_TR_CURRENT_STACK
304	;;
305	itr.d dtr[r20]=r16
306	;;
307	srlz.d
308	;;
309	br.sptk.many done_tlb_purge_and_reload
310err:
311	COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
312	br.sptk.many ia64_os_mca_done_restore
313
314done_tlb_purge_and_reload:
315
316	// Setup new stack frame for OS_MCA handling
317	GET_IA64_MCA_DATA(r2)
318	;;
319	add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
320	add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
321	;;
322	rse_switch_context(r6,r3,r2);;	// RSC management in this new context
323
324	GET_IA64_MCA_DATA(r2)
325	;;
326	add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
327	;;
328	mov r12=r2		// establish new stack-pointer
329
330        // Enter virtual mode from physical mode
331	VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
332ia64_os_mca_virtual_begin:
333
334	// Call virtual mode handler
335	movl		r2=ia64_mca_ucmc_handler;;
336	mov		b6=r2;;
337	br.call.sptk.many    b0=b6;;
338.ret0:
339	// Revert back to physical mode before going back to SAL
340	PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
341ia64_os_mca_virtual_end:
342
343	// restore the original stack frame here
344	GET_IA64_MCA_DATA(r2)
345	;;
346	add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
347	;;
348	movl    r4=IA64_PSR_MC
349	;;
350	rse_return_context(r4,r3,r2)	// switch from interrupt context for RSE
351
352	// let us restore all the registers from our PSI structure
353	mov	r8=gp
354	;;
355begin_os_mca_restore:
356	br	ia64_os_mca_proc_state_restore;;
357
358ia64_os_mca_done_restore:
359	OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
360	// branch back to SALE_CHECK
361	ld8		r3=[r2];;
362	mov		b0=r3;;		// SAL_CHECK return address
363
364	// release lock
365	movl		r3=ia64_mca_serialize;;
366	DATA_VA_TO_PA(r3);;
367	st8.rel		[r3]=r0
368
369	br		b0
370	;;
371ia64_os_mca_dispatch_end:
372//EndMain//////////////////////////////////////////////////////////////////////
373
374
375//++
376// Name:
377//      ia64_os_mca_proc_state_dump()
378//
379// Stub Description:
380//
381//       This stub dumps the processor state during MCHK to a data area
382//
383//--
384
385ia64_os_mca_proc_state_dump:
386// Save bank 1 GRs 16-31 which will be used by c-language code when we switch
387//  to virtual addressing mode.
388	GET_IA64_MCA_DATA(r2)
389	;;
390	add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
391	;;
392// save ar.NaT
393	mov		r5=ar.unat                  // ar.unat
394
395// save banked GRs 16-31 along with NaT bits
396	bsw.1;;
397	st8.spill	[r2]=r16,8;;
398	st8.spill	[r2]=r17,8;;
399	st8.spill	[r2]=r18,8;;
400	st8.spill	[r2]=r19,8;;
401	st8.spill	[r2]=r20,8;;
402	st8.spill	[r2]=r21,8;;
403	st8.spill	[r2]=r22,8;;
404	st8.spill	[r2]=r23,8;;
405	st8.spill	[r2]=r24,8;;
406	st8.spill	[r2]=r25,8;;
407	st8.spill	[r2]=r26,8;;
408	st8.spill	[r2]=r27,8;;
409	st8.spill	[r2]=r28,8;;
410	st8.spill	[r2]=r29,8;;
411	st8.spill	[r2]=r30,8;;
412	st8.spill	[r2]=r31,8;;
413
414	mov		r4=ar.unat;;
415	st8		[r2]=r4,8                // save User NaT bits for r16-r31
416	mov		ar.unat=r5                  // restore original unat
417	bsw.0;;
418
419//save BRs
420	add		r4=8,r2                  // duplicate r2 in r4
421	add		r6=2*8,r2                // duplicate r2 in r4
422
423	mov		r3=b0
424	mov		r5=b1
425	mov		r7=b2;;
426	st8		[r2]=r3,3*8
427	st8		[r4]=r5,3*8
428	st8		[r6]=r7,3*8;;
429
430	mov		r3=b3
431	mov		r5=b4
432	mov		r7=b5;;
433	st8		[r2]=r3,3*8
434	st8		[r4]=r5,3*8
435	st8		[r6]=r7,3*8;;
436
437	mov		r3=b6
438	mov		r5=b7;;
439	st8		[r2]=r3,2*8
440	st8		[r4]=r5,2*8;;
441
442cSaveCRs:
443// save CRs
444	add		r4=8,r2                  // duplicate r2 in r4
445	add		r6=2*8,r2                // duplicate r2 in r4
446
447	mov		r3=cr.dcr
448	mov		r5=cr.itm
449	mov		r7=cr.iva;;
450
451	st8		[r2]=r3,8*8
452	st8		[r4]=r5,3*8
453	st8		[r6]=r7,3*8;;            // 48 byte rements
454
455	mov		r3=cr.pta;;
456	st8		[r2]=r3,8*8;;            // 64 byte rements
457
458// if PSR.ic=0, reading interruption registers causes an illegal operation fault
459	mov		r3=psr;;
460	tbit.nz.unc	p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
461(p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
462begin_skip_intr_regs:
463(p6)	br		SkipIntrRegs;;
464
465	add		r4=8,r2                  // duplicate r2 in r4
466	add		r6=2*8,r2                // duplicate r2 in r6
467
468	mov		r3=cr.ipsr
469	mov		r5=cr.isr
470	mov		r7=r0;;
471	st8		[r2]=r3,3*8
472	st8		[r4]=r5,3*8
473	st8		[r6]=r7,3*8;;
474
475	mov		r3=cr.iip
476	mov		r5=cr.ifa
477	mov		r7=cr.itir;;
478	st8		[r2]=r3,3*8
479	st8		[r4]=r5,3*8
480	st8		[r6]=r7,3*8;;
481
482	mov		r3=cr.iipa
483	mov		r5=cr.ifs
484	mov		r7=cr.iim;;
485	st8		[r2]=r3,3*8
486	st8		[r4]=r5,3*8
487	st8		[r6]=r7,3*8;;
488
489	mov		r3=cr25;;                   // cr.iha
490	st8		[r2]=r3,160;;               // 160 byte rement
491
492SkipIntrRegs:
493	st8		[r2]=r0,152;;               // another 152 byte .
494
495	add		r4=8,r2                     // duplicate r2 in r4
496	add		r6=2*8,r2                   // duplicate r2 in r6
497
498	mov		r3=cr.lid
499//	mov		r5=cr.ivr                     // cr.ivr, don't read it
500	mov		r7=cr.tpr;;
501	st8		[r2]=r3,3*8
502	st8		[r4]=r5,3*8
503	st8		[r6]=r7,3*8;;
504
505	mov		r3=r0                       // cr.eoi => cr67
506	mov		r5=r0                       // cr.irr0 => cr68
507	mov		r7=r0;;                     // cr.irr1 => cr69
508	st8		[r2]=r3,3*8
509	st8		[r4]=r5,3*8
510	st8		[r6]=r7,3*8;;
511
512	mov		r3=r0                       // cr.irr2 => cr70
513	mov		r5=r0                       // cr.irr3 => cr71
514	mov		r7=cr.itv;;
515	st8		[r2]=r3,3*8
516	st8		[r4]=r5,3*8
517	st8		[r6]=r7,3*8;;
518
519	mov		r3=cr.pmv
520	mov		r5=cr.cmcv;;
521	st8		[r2]=r3,7*8
522	st8		[r4]=r5,7*8;;
523
524	mov		r3=r0                       // cr.lrr0 => cr80
525	mov		r5=r0;;                     // cr.lrr1 => cr81
526	st8		[r2]=r3,23*8
527	st8		[r4]=r5,23*8;;
528
529	adds		r2=25*8,r2;;
530
531cSaveARs:
532// save ARs
533	add		r4=8,r2                  // duplicate r2 in r4
534	add		r6=2*8,r2                // duplicate r2 in r6
535
536	mov		r3=ar.k0
537	mov		r5=ar.k1
538	mov		r7=ar.k2;;
539	st8		[r2]=r3,3*8
540	st8		[r4]=r5,3*8
541	st8		[r6]=r7,3*8;;
542
543	mov		r3=ar.k3
544	mov		r5=ar.k4
545	mov		r7=ar.k5;;
546	st8		[r2]=r3,3*8
547	st8		[r4]=r5,3*8
548	st8		[r6]=r7,3*8;;
549
550	mov		r3=ar.k6
551	mov		r5=ar.k7
552	mov		r7=r0;;                     // ar.kr8
553	st8		[r2]=r3,10*8
554	st8		[r4]=r5,10*8
555	st8		[r6]=r7,10*8;;           // rement by 72 bytes
556
557	mov		r3=ar.rsc
558	mov		ar.rsc=r0			    // put RSE in enforced lazy mode
559	mov		r5=ar.bsp
560	;;
561	mov		r7=ar.bspstore;;
562	st8		[r2]=r3,3*8
563	st8		[r4]=r5,3*8
564	st8		[r6]=r7,3*8;;
565
566	mov		r3=ar.rnat;;
567	st8		[r2]=r3,8*13             // increment by 13x8 bytes
568
569	mov		r3=ar.ccv;;
570	st8		[r2]=r3,8*4
571
572	mov		r3=ar.unat;;
573	st8		[r2]=r3,8*4
574
575	mov		r3=ar.fpsr;;
576	st8		[r2]=r3,8*4
577
578	mov		r3=ar.itc;;
579	st8		[r2]=r3,160                 // 160
580
581	mov		r3=ar.pfs;;
582	st8		[r2]=r3,8
583
584	mov		r3=ar.lc;;
585	st8		[r2]=r3,8
586
587	mov		r3=ar.ec;;
588	st8		[r2]=r3
589	add		r2=8*62,r2               //padding
590
591// save RRs
592	mov		ar.lc=0x08-1
593	movl		r4=0x00;;
594
595cStRR:
596	dep.z		r5=r4,61,3;;
597	mov		r3=rr[r5];;
598	st8		[r2]=r3,8
599	add		r4=1,r4
600	br.cloop.sptk.few	cStRR
601	;;
602end_os_mca_dump:
603	br	ia64_os_mca_done_dump;;
604
605//EndStub//////////////////////////////////////////////////////////////////////
606
607
608//++
609// Name:
610//       ia64_os_mca_proc_state_restore()
611//
612// Stub Description:
613//
614//       This is a stub to restore the saved processor state during MCHK
615//
616//--
617
618ia64_os_mca_proc_state_restore:
619
620// Restore bank1 GR16-31
621	GET_IA64_MCA_DATA(r2)
622	;;
623	add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
624
625restore_GRs:                                    // restore bank-1 GRs 16-31
626	bsw.1;;
627	add		r3=16*8,r2;;                // to get to NaT of GR 16-31
628	ld8		r3=[r3];;
629	mov		ar.unat=r3;;                // first restore NaT
630
631	ld8.fill	r16=[r2],8;;
632	ld8.fill	r17=[r2],8;;
633	ld8.fill	r18=[r2],8;;
634	ld8.fill	r19=[r2],8;;
635	ld8.fill	r20=[r2],8;;
636	ld8.fill	r21=[r2],8;;
637	ld8.fill	r22=[r2],8;;
638	ld8.fill	r23=[r2],8;;
639	ld8.fill	r24=[r2],8;;
640	ld8.fill	r25=[r2],8;;
641	ld8.fill	r26=[r2],8;;
642	ld8.fill	r27=[r2],8;;
643	ld8.fill	r28=[r2],8;;
644	ld8.fill	r29=[r2],8;;
645	ld8.fill	r30=[r2],8;;
646	ld8.fill	r31=[r2],8;;
647
648	ld8		r3=[r2],8;;              // increment to skip NaT
649	bsw.0;;
650
651restore_BRs:
652	add		r4=8,r2                  // duplicate r2 in r4
653	add		r6=2*8,r2;;              // duplicate r2 in r4
654
655	ld8		r3=[r2],3*8
656	ld8		r5=[r4],3*8
657	ld8		r7=[r6],3*8;;
658	mov		b0=r3
659	mov		b1=r5
660	mov		b2=r7;;
661
662	ld8		r3=[r2],3*8
663	ld8		r5=[r4],3*8
664	ld8		r7=[r6],3*8;;
665	mov		b3=r3
666	mov		b4=r5
667	mov		b5=r7;;
668
669	ld8		r3=[r2],2*8
670	ld8		r5=[r4],2*8;;
671	mov		b6=r3
672	mov		b7=r5;;
673
674restore_CRs:
675	add		r4=8,r2                  // duplicate r2 in r4
676	add		r6=2*8,r2;;              // duplicate r2 in r4
677
678	ld8		r3=[r2],8*8
679	ld8		r5=[r4],3*8
680	ld8		r7=[r6],3*8;;            // 48 byte increments
681	mov		cr.dcr=r3
682	mov		cr.itm=r5
683	mov		cr.iva=r7;;
684
685	ld8		r3=[r2],8*8;;            // 64 byte increments
686//      mov		cr.pta=r3
687
688
689// if PSR.ic=1, reading interruption registers causes an illegal operation fault
690	mov		r3=psr;;
691	tbit.nz.unc	p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. test
692(p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
693
694begin_rskip_intr_regs:
695(p6)	br		rSkipIntrRegs;;
696
697	add		r4=8,r2                  // duplicate r2 in r4
698	add		r6=2*8,r2;;              // duplicate r2 in r4
699
700	ld8		r3=[r2],3*8
701	ld8		r5=[r4],3*8
702	ld8		r7=[r6],3*8;;
703	mov		cr.ipsr=r3
704//	mov		cr.isr=r5                   // cr.isr is read only
705
706	ld8		r3=[r2],3*8
707	ld8		r5=[r4],3*8
708	ld8		r7=[r6],3*8;;
709	mov		cr.iip=r3
710	mov		cr.ifa=r5
711	mov		cr.itir=r7;;
712
713	ld8		r3=[r2],3*8
714	ld8		r5=[r4],3*8
715	ld8		r7=[r6],3*8;;
716	mov		cr.iipa=r3
717	mov		cr.ifs=r5
718	mov		cr.iim=r7
719
720	ld8		r3=[r2],160;;               // 160 byte increment
721	mov		cr.iha=r3
722
723rSkipIntrRegs:
724	ld8		r3=[r2],152;;               // another 152 byte inc.
725
726	add		r4=8,r2                     // duplicate r2 in r4
727	add		r6=2*8,r2;;                 // duplicate r2 in r6
728
729	ld8		r3=[r2],8*3
730	ld8		r5=[r4],8*3
731	ld8		r7=[r6],8*3;;
732	mov		cr.lid=r3
733//	mov		cr.ivr=r5                   // cr.ivr is read only
734	mov		cr.tpr=r7;;
735
736	ld8		r3=[r2],8*3
737	ld8		r5=[r4],8*3
738	ld8		r7=[r6],8*3;;
739//	mov		cr.eoi=r3
740//	mov		cr.irr0=r5                  // cr.irr0 is read only
741//	mov		cr.irr1=r7;;                // cr.irr1 is read only
742
743	ld8		r3=[r2],8*3
744	ld8		r5=[r4],8*3
745	ld8		r7=[r6],8*3;;
746//	mov		cr.irr2=r3                  // cr.irr2 is read only
747//	mov		cr.irr3=r5                  // cr.irr3 is read only
748	mov		cr.itv=r7;;
749
750	ld8		r3=[r2],8*7
751	ld8		r5=[r4],8*7;;
752	mov		cr.pmv=r3
753	mov		cr.cmcv=r5;;
754
755	ld8		r3=[r2],8*23
756	ld8		r5=[r4],8*23;;
757	adds		r2=8*23,r2
758	adds		r4=8*23,r4;;
759//	mov		cr.lrr0=r3
760//	mov		cr.lrr1=r5
761
762	adds		r2=8*2,r2;;
763
764restore_ARs:
765	add		r4=8,r2                  // duplicate r2 in r4
766	add		r6=2*8,r2;;              // duplicate r2 in r4
767
768	ld8		r3=[r2],3*8
769	ld8		r5=[r4],3*8
770	ld8		r7=[r6],3*8;;
771	mov		ar.k0=r3
772	mov		ar.k1=r5
773	mov		ar.k2=r7;;
774
775	ld8		r3=[r2],3*8
776	ld8		r5=[r4],3*8
777	ld8		r7=[r6],3*8;;
778	mov		ar.k3=r3
779	mov		ar.k4=r5
780	mov		ar.k5=r7;;
781
782	ld8		r3=[r2],10*8
783	ld8		r5=[r4],10*8
784	ld8		r7=[r6],10*8;;
785	mov		ar.k6=r3
786	mov		ar.k7=r5
787	;;
788
789	ld8		r3=[r2],3*8
790	ld8		r5=[r4],3*8
791	ld8		r7=[r6],3*8;;
792//	mov		ar.rsc=r3
793//	mov		ar.bsp=r5                   // ar.bsp is read only
794	mov		ar.rsc=r0			    // make sure that RSE is in enforced lazy mode
795	;;
796	mov		ar.bspstore=r7;;
797
798	ld8		r9=[r2],8*13;;
799	mov		ar.rnat=r9
800
801	mov		ar.rsc=r3
802	ld8		r3=[r2],8*4;;
803	mov		ar.ccv=r3
804
805	ld8		r3=[r2],8*4;;
806	mov		ar.unat=r3
807
808	ld8		r3=[r2],8*4;;
809	mov		ar.fpsr=r3
810
811	ld8		r3=[r2],160;;               // 160
812//      mov		ar.itc=r3
813
814	ld8		r3=[r2],8;;
815	mov		ar.pfs=r3
816
817	ld8		r3=[r2],8;;
818	mov		ar.lc=r3
819
820	ld8		r3=[r2];;
821	mov		ar.ec=r3
822	add		r2=8*62,r2;;             // padding
823
824restore_RRs:
825	mov		r5=ar.lc
826	mov		ar.lc=0x08-1
827	movl		r4=0x00;;
828cStRRr:
829	dep.z		r7=r4,61,3
830	ld8		r3=[r2],8;;
831	mov		rr[r7]=r3                   // what are its access previledges?
832	add		r4=1,r4
833	br.cloop.sptk.few	cStRRr
834	;;
835	mov		ar.lc=r5
836	;;
837end_os_mca_restore:
838	br	ia64_os_mca_done_restore;;
839
840//EndStub//////////////////////////////////////////////////////////////////////
841
842
843// ok, the issue here is that we need to save state information so
844// it can be useable by the kernel debugger and show regs routines.
845// In order to do this, our best bet is save the current state (plus
846// the state information obtain from the MIN_STATE_AREA) into a pt_regs
847// format.  This way we can pass it on in a useable format.
848//
849
850//
851// SAL to OS entry point for INIT on the monarch processor
852// This has been defined for registration purposes with SAL
853// as a part of ia64_mca_init.
854//
855// When we get here, the following registers have been
856// set by the SAL for our use
857//
858//		1. GR1 = OS INIT GP
859//		2. GR8 = PAL_PROC physical address
860//		3. GR9 = SAL_PROC physical address
861//		4. GR10 = SAL GP (physical)
862//		5. GR11 = Init Reason
863//			0 = Received INIT for event other than crash dump switch
864//			1 = Received wakeup at the end of an OS_MCA corrected machine check
865//			2 = Received INIT dude to CrashDump switch assertion
866//
867//		6. GR12 = Return address to location within SAL_INIT procedure
868
869
870GLOBAL_ENTRY(ia64_monarch_init_handler)
871	.prologue
872	// stash the information the SAL passed to os
873	SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
874	;;
875	SAVE_MIN_WITH_COVER
876	;;
877	mov r8=cr.ifa
878	mov r9=cr.isr
879	adds r3=8,r2				// set up second base pointer
880	;;
881	SAVE_REST
882
883// ok, enough should be saved at this point to be dangerous, and supply
884// information for a dump
885// We need to switch to Virtual mode before hitting the C functions.
886
887	movl	r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
888	mov	r3=psr	// get the current psr, minimum enabled at this point
889	;;
890	or	r2=r2,r3
891	;;
892	movl	r3=IVirtual_Switch
893	;;
894	mov	cr.iip=r3	// short return to set the appropriate bits
895	mov	cr.ipsr=r2	// need to do an rfi to set appropriate bits
896	;;
897	rfi
898	;;
899IVirtual_Switch:
900	//
901	// We should now be running virtual
902	//
903	// Let's call the C handler to get the rest of the state info
904	//
905	alloc r14=ar.pfs,0,0,2,0		// now it's safe (must be first in insn group!)
906	;;
907	adds out0=16,sp				// out0 = pointer to pt_regs
908	;;
909	DO_SAVE_SWITCH_STACK
910	.body
911	adds out1=16,sp				// out0 = pointer to switch_stack
912
913	br.call.sptk.many rp=ia64_init_handler
914.ret1:
915
916return_from_init:
917	br.sptk return_from_init
918END(ia64_monarch_init_handler)
919
920//
921// SAL to OS entry point for INIT on the slave processor
922// This has been defined for registration purposes with SAL
923// as a part of ia64_mca_init.
924//
925
926GLOBAL_ENTRY(ia64_slave_init_handler)
9271:	br.sptk 1b
928END(ia64_slave_init_handler)
929