xref: /openbmc/linux/arch/parisc/kernel/entry.S (revision 9ce7677c)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/config.h>
26#include <asm/asm-offsets.h>
27
28/* we have the following possibilities to act on an interruption:
29 *  - handle in assembly and use shadowed registers only
30 *  - save registers to kernel stack and handle in assembly or C */
31
32
33#include <asm/psw.h>
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#ifdef CONFIG_64BIT
41#define CMPIB           cmpib,*
42#define CMPB            cmpb,*
43#define COND(x)		*x
44
45	.level 2.0w
46#else
47#define CMPIB           cmpib,
48#define CMPB            cmpb,
49#define COND(x)		x
50
51	.level 2.0
52#endif
53
54	.import         pa_dbit_lock,data
55
56	/* space_to_prot macro creates a prot id from a space id */
57
58#if (SPACEID_SHIFT) == 0
59	.macro  space_to_prot spc prot
60	depd,z  \spc,62,31,\prot
61	.endm
62#else
63	.macro  space_to_prot spc prot
64	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
65	.endm
66#endif
67
68	/* Switch to virtual mapping, trashing only %r1 */
69	.macro  virt_map
70	/* pcxt_ssm_bug */
71	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
72	mtsp	%r0, %sr4
73	mtsp	%r0, %sr5
74	mfsp	%sr7, %r1
75	or,=    %r0,%r1,%r0	/* Only save sr7 in sr3 if sr7 != 0 */
76	mtsp	%r1, %sr3
77	tovirt_r1 %r29
78	load32	KERNEL_PSW, %r1
79
80	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
81	mtsp	%r0, %sr6
82	mtsp	%r0, %sr7
83	mtctl	%r0, %cr17	/* Clear IIASQ tail */
84	mtctl	%r0, %cr17	/* Clear IIASQ head */
85	mtctl	%r1, %ipsw
86	load32	4f, %r1
87	mtctl	%r1, %cr18	/* Set IIAOQ tail */
88	ldo	4(%r1), %r1
89	mtctl	%r1, %cr18	/* Set IIAOQ head */
90	rfir
91	nop
924:
93	.endm
94
95	/*
96	 * The "get_stack" macros are responsible for determining the
97	 * kernel stack value.
98	 *
99	 * For Faults:
100	 *      If sr7 == 0
101	 *          Already using a kernel stack, so call the
102	 *          get_stack_use_r30 macro to push a pt_regs structure
103	 *          on the stack, and store registers there.
104	 *      else
105	 *          Need to set up a kernel stack, so call the
106	 *          get_stack_use_cr30 macro to set up a pointer
107	 *          to the pt_regs structure contained within the
108	 *          task pointer pointed to by cr30. Set the stack
109	 *          pointer to point to the end of the task structure.
110	 *
111	 * For Interrupts:
112	 *      If sr7 == 0
113	 *          Already using a kernel stack, check to see if r30
114	 *          is already pointing to the per processor interrupt
115	 *          stack. If it is, call the get_stack_use_r30 macro
116	 *          to push a pt_regs structure on the stack, and store
117	 *          registers there. Otherwise, call get_stack_use_cr31
118	 *          to get a pointer to the base of the interrupt stack
119	 *          and push a pt_regs structure on that stack.
120	 *      else
121	 *          Need to set up a kernel stack, so call the
122	 *          get_stack_use_cr30 macro to set up a pointer
123	 *          to the pt_regs structure contained within the
124	 *          task pointer pointed to by cr30. Set the stack
125	 *          pointer to point to the end of the task structure.
126	 *          N.B: We don't use the interrupt stack for the
127	 *          first interrupt from userland, because signals/
128	 *          resched's are processed when returning to userland,
129	 *          and we can sleep in those cases.
130	 *
131	 * Note that we use shadowed registers for temps until
132	 * we can save %r26 and %r29. %r26 is used to preserve
133	 * %r8 (a shadowed register) which temporarily contained
134	 * either the fault type ("code") or the eirr. We need
135	 * to use a non-shadowed register to carry the value over
136	 * the rfir in virt_map. We use %r26 since this value winds
137	 * up being passed as the argument to either do_cpu_irq_mask
138	 * or handle_interruption. %r29 is used to hold a pointer
139	 * the register save area, and once again, it needs to
140	 * be a non-shadowed register so that it survives the rfir.
141	 *
142	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
143	 */
144
145	.macro  get_stack_use_cr30
146
147	/* we save the registers in the task struct */
148
149	mfctl   %cr30, %r1
150	tophys  %r1,%r9
151	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
152	tophys  %r1,%r9
153	ldo     TASK_REGS(%r9),%r9
154	STREG   %r30, PT_GR30(%r9)
155	STREG   %r29,PT_GR29(%r9)
156	STREG   %r26,PT_GR26(%r9)
157	copy    %r9,%r29
158	mfctl   %cr30, %r1
159	ldo	THREAD_SZ_ALGN(%r1), %r30
160	.endm
161
162	.macro  get_stack_use_r30
163
164	/* we put a struct pt_regs on the stack and save the registers there */
165
166	tophys  %r30,%r9
167	STREG   %r30,PT_GR30(%r9)
168	ldo	PT_SZ_ALGN(%r30),%r30
169	STREG   %r29,PT_GR29(%r9)
170	STREG   %r26,PT_GR26(%r9)
171	copy    %r9,%r29
172	.endm
173
174	.macro  rest_stack
175	LDREG   PT_GR1(%r29), %r1
176	LDREG   PT_GR30(%r29),%r30
177	LDREG   PT_GR29(%r29),%r29
178	.endm
179
180	/* default interruption handler
181	 * (calls traps.c:handle_interruption) */
182	.macro	def code
183	b	intr_save
184	ldi     \code, %r8
185	.align	32
186	.endm
187
188	/* Interrupt interruption handler
189	 * (calls irq.c:do_cpu_irq_mask) */
190	.macro	extint code
191	b	intr_extint
192	mfsp    %sr7,%r16
193	.align	32
194	.endm
195
196	.import	os_hpmc, code
197
198	/* HPMC handler */
199	.macro	hpmc code
200	nop			/* must be a NOP, will be patched later */
201	load32	PA(os_hpmc), %r3
202	bv,n	0(%r3)
203	nop
204	.word	0		/* checksum (will be patched) */
205	.word	PA(os_hpmc)	/* address of handler */
206	.word	0		/* length of handler */
207	.endm
208
209	/*
210	 * Performance Note: Instructions will be moved up into
211	 * this part of the code later on, once we are sure
212	 * that the tlb miss handlers are close to final form.
213	 */
214
215	/* Register definitions for tlb miss handler macros */
216
217	va  = r8	/* virtual address for which the trap occured */
218	spc = r24	/* space for which the trap occured */
219
220#ifndef CONFIG_64BIT
221
222	/*
223	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
224	 */
225
226	.macro	itlb_11 code
227
228	mfctl	%pcsq, spc
229	b	itlb_miss_11
230	mfctl	%pcoq, va
231
232	.align		32
233	.endm
234#endif
235
236	/*
237	 * itlb miss interruption handler (parisc 2.0)
238	 */
239
240	.macro	itlb_20 code
241	mfctl	%pcsq, spc
242#ifdef CONFIG_64BIT
243	b       itlb_miss_20w
244#else
245	b	itlb_miss_20
246#endif
247	mfctl	%pcoq, va
248
249	.align		32
250	.endm
251
252#ifndef CONFIG_64BIT
253	/*
254	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
255	 *
256	 * Note: naitlb misses will be treated
257	 * as an ordinary itlb miss for now.
258	 * However, note that naitlb misses
259	 * have the faulting address in the
260	 * IOR/ISR.
261	 */
262
263	.macro	naitlb_11 code
264
265	mfctl	%isr,spc
266	b	itlb_miss_11
267	mfctl 	%ior,va
268	/* FIXME: If user causes a naitlb miss, the priv level may not be in
269	 * lower bits of va, where the itlb miss handler is expecting them
270	 */
271
272	.align		32
273	.endm
274#endif
275
276	/*
277	 * naitlb miss interruption handler (parisc 2.0)
278	 *
279	 * Note: naitlb misses will be treated
280	 * as an ordinary itlb miss for now.
281	 * However, note that naitlb misses
282	 * have the faulting address in the
283	 * IOR/ISR.
284	 */
285
286	.macro	naitlb_20 code
287
288	mfctl	%isr,spc
289#ifdef CONFIG_64BIT
290	b       itlb_miss_20w
291#else
292	b	itlb_miss_20
293#endif
294	mfctl 	%ior,va
295	/* FIXME: If user causes a naitlb miss, the priv level may not be in
296	 * lower bits of va, where the itlb miss handler is expecting them
297	 */
298
299	.align		32
300	.endm
301
302#ifndef CONFIG_64BIT
303	/*
304	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
305	 */
306
307	.macro	dtlb_11 code
308
309	mfctl	%isr, spc
310	b	dtlb_miss_11
311	mfctl	%ior, va
312
313	.align		32
314	.endm
315#endif
316
317	/*
318	 * dtlb miss interruption handler (parisc 2.0)
319	 */
320
321	.macro	dtlb_20 code
322
323	mfctl	%isr, spc
324#ifdef CONFIG_64BIT
325	b       dtlb_miss_20w
326#else
327	b	dtlb_miss_20
328#endif
329	mfctl	%ior, va
330
331	.align		32
332	.endm
333
334#ifndef CONFIG_64BIT
335	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
336
337	.macro	nadtlb_11 code
338
339	mfctl	%isr,spc
340	b       nadtlb_miss_11
341	mfctl	%ior,va
342
343	.align		32
344	.endm
345#endif
346
347	/* nadtlb miss interruption handler (parisc 2.0) */
348
349	.macro	nadtlb_20 code
350
351	mfctl	%isr,spc
352#ifdef CONFIG_64BIT
353	b       nadtlb_miss_20w
354#else
355	b       nadtlb_miss_20
356#endif
357	mfctl	%ior,va
358
359	.align		32
360	.endm
361
362#ifndef CONFIG_64BIT
363	/*
364	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
365	 */
366
367	.macro	dbit_11 code
368
369	mfctl	%isr,spc
370	b	dbit_trap_11
371	mfctl	%ior,va
372
373	.align		32
374	.endm
375#endif
376
377	/*
378	 * dirty bit trap interruption handler (parisc 2.0)
379	 */
380
381	.macro	dbit_20 code
382
383	mfctl	%isr,spc
384#ifdef CONFIG_64BIT
385	b       dbit_trap_20w
386#else
387	b	dbit_trap_20
388#endif
389	mfctl	%ior,va
390
391	.align		32
392	.endm
393
394	/* The following are simple 32 vs 64 bit instruction
395	 * abstractions for the macros */
396	.macro		EXTR	reg1,start,length,reg2
397#ifdef CONFIG_64BIT
398	extrd,u		\reg1,32+\start,\length,\reg2
399#else
400	extrw,u		\reg1,\start,\length,\reg2
401#endif
402	.endm
403
404	.macro		DEP	reg1,start,length,reg2
405#ifdef CONFIG_64BIT
406	depd		\reg1,32+\start,\length,\reg2
407#else
408	depw		\reg1,\start,\length,\reg2
409#endif
410	.endm
411
412	.macro		DEPI	val,start,length,reg
413#ifdef CONFIG_64BIT
414	depdi		\val,32+\start,\length,\reg
415#else
416	depwi		\val,\start,\length,\reg
417#endif
418	.endm
419
420	/* In LP64, the space contains part of the upper 32 bits of the
421	 * fault.  We have to extract this and place it in the va,
422	 * zeroing the corresponding bits in the space register */
423	.macro		space_adjust	spc,va,tmp
424#ifdef CONFIG_64BIT
425	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
426	depd		%r0,63,SPACEID_SHIFT,\spc
427	depd		\tmp,31,SPACEID_SHIFT,\va
428#endif
429	.endm
430
431	.import		swapper_pg_dir,code
432
433	/* Get the pgd.  For faults on space zero (kernel space), this
434	 * is simply swapper_pg_dir.  For user space faults, the
435	 * pgd is stored in %cr25 */
436	.macro		get_pgd		spc,reg
437	ldil		L%PA(swapper_pg_dir),\reg
438	ldo		R%PA(swapper_pg_dir)(\reg),\reg
439	or,COND(=)	%r0,\spc,%r0
440	mfctl		%cr25,\reg
441	.endm
442
443	/*
444		space_check(spc,tmp,fault)
445
446		spc - The space we saw the fault with.
447		tmp - The place to store the current space.
448		fault - Function to call on failure.
449
450		Only allow faults on different spaces from the
451		currently active one if we're the kernel
452
453	*/
454	.macro		space_check	spc,tmp,fault
455	mfsp		%sr7,\tmp
456	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
457					 * as kernel, so defeat the space
458					 * check if it is */
459	copy		\spc,\tmp
460	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
461	cmpb,COND(<>),n	\tmp,\spc,\fault
462	.endm
463
464	/* Look up a PTE in a 2-Level scheme (faulting at each
465	 * level if the entry isn't present
466	 *
467	 * NOTE: we use ldw even for LP64, since the short pointers
468	 * can address up to 1TB
469	 */
470	.macro		L2_ptep	pmd,pte,index,va,fault
471#if PT_NLEVELS == 3
472	EXTR		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
473#else
474	EXTR		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
475#endif
476	DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
477	copy		%r0,\pte
478	ldw,s		\index(\pmd),\pmd
479	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
480	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
481	copy		\pmd,%r9
482#ifdef CONFIG_64BIT
483	shld		%r9,PxD_VALUE_SHIFT,\pmd
484#else
485	shlw		%r9,PxD_VALUE_SHIFT,\pmd
486#endif
487	EXTR		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
488	DEP		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
489	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd
490	LDREG		%r0(\pmd),\pte		/* pmd is now pte */
491	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
492	.endm
493
494	/* Look up PTE in a 3-Level scheme.
495	 *
496	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
497	 * first pmd adjacent to the pgd.  This means that we can
498	 * subtract a constant offset to get to it.  The pmd and pgd
499	 * sizes are arranged so that a single pmd covers 4GB (giving
500	 * a full LP64 process access to 8TB) so our lookups are
501	 * effectively L2 for the first 4GB of the kernel (i.e. for
502	 * all ILP32 processes and all the kernel for machines with
503	 * under 4GB of memory) */
504	.macro		L3_ptep pgd,pte,index,va,fault
505	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
506	copy		%r0,\pte
507	extrd,u,*=	\va,31,32,%r0
508	ldw,s		\index(\pgd),\pgd
509	extrd,u,*=	\va,31,32,%r0
510	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
511	extrd,u,*=	\va,31,32,%r0
512	shld		\pgd,PxD_VALUE_SHIFT,\index
513	extrd,u,*=	\va,31,32,%r0
514	copy		\index,\pgd
515	extrd,u,*<>	\va,31,32,%r0
516	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
517	L2_ptep		\pgd,\pte,\index,\va,\fault
518	.endm
519
520	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
521	 * don't needlessly dirty the cache line if it was already set */
522	.macro		update_ptep	ptep,pte,tmp,tmp1
523	ldi		_PAGE_ACCESSED,\tmp1
524	or		\tmp1,\pte,\tmp
525	and,COND(<>)	\tmp1,\pte,%r0
526	STREG		\tmp,0(\ptep)
527	.endm
528
529	/* Set the dirty bit (and accessed bit).  No need to be
530	 * clever, this is only used from the dirty fault */
531	.macro		update_dirty	ptep,pte,tmp
532	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
533	or		\tmp,\pte,\pte
534	STREG		\pte,0(\ptep)
535	.endm
536
537	/* Convert the pte and prot to tlb insertion values.  How
538	 * this happens is quite subtle, read below */
539	.macro		make_insert_tlb	spc,pte,prot
540	space_to_prot   \spc \prot        /* create prot id from space */
541	/* The following is the real subtlety.  This is depositing
542	 * T <-> _PAGE_REFTRAP
543	 * D <-> _PAGE_DIRTY
544	 * B <-> _PAGE_DMB (memory break)
545	 *
546	 * Then incredible subtlety: The access rights are
547	 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
548	 * See 3-14 of the parisc 2.0 manual
549	 *
550	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
551	 * trigger an access rights trap in user space if the user
552	 * tries to read an unreadable page */
553	depd            \pte,8,7,\prot
554
555	/* PAGE_USER indicates the page can be read with user privileges,
556	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
557	 * contains _PAGE_READ */
558	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
559	depdi		7,11,3,\prot
560	/* If we're a gateway page, drop PL2 back to zero for promotion
561	 * to kernel privilege (so we can execute the page as kernel).
562	 * Any privilege promotion page always denys read and write */
563	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
564	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
565
566	/* Get rid of prot bits and convert to page addr for iitlbt */
567
568	depd		%r0,63,PAGE_SHIFT,\pte
569	extrd,u		\pte,56,32,\pte
570	.endm
571
572	/* Identical macro to make_insert_tlb above, except it
573	 * makes the tlb entry for the differently formatted pa11
574	 * insertion instructions */
575	.macro		make_insert_tlb_11	spc,pte,prot
576	zdep		\spc,30,15,\prot
577	dep		\pte,8,7,\prot
578	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
579	depi		1,12,1,\prot
580	extru,=         \pte,_PAGE_USER_BIT,1,%r0
581	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
582	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
583	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
584
585	/* Get rid of prot bits and convert to page addr for iitlba */
586
587	depi		0,31,12,\pte
588	extru		\pte,24,25,\pte
589
590	.endm
591
592	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
593	 * to extend into I/O space if the address is 0xfXXXXXXX
594	 * so we extend the f's into the top word of the pte in
595	 * this case */
596	.macro		f_extend	pte,tmp
597	extrd,s		\pte,42,4,\tmp
598	addi,<>		1,\tmp,%r0
599	extrd,s		\pte,63,25,\pte
600	.endm
601
602	/* The alias region is an 8MB aligned 16MB to do clear and
603	 * copy user pages at addresses congruent with the user
604	 * virtual address.
605	 *
606	 * To use the alias page, you set %r26 up with the to TLB
607	 * entry (identifying the physical page) and %r23 up with
608	 * the from tlb entry (or nothing if only a to entry---for
609	 * clear_user_page_asm) */
610	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
611	cmpib,COND(<>),n 0,\spc,\fault
612	ldil		L%(TMPALIAS_MAP_START),\tmp
613#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
614	/* on LP64, ldi will sign extend into the upper 32 bits,
615	 * which is behaviour we don't want */
616	depdi		0,31,32,\tmp
617#endif
618	copy		\va,\tmp1
619	DEPI		0,31,23,\tmp1
620	cmpb,COND(<>),n	\tmp,\tmp1,\fault
621	ldi		(_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
622	depd,z		\prot,8,7,\prot
623	/*
624	 * OK, it is in the temp alias region, check whether "from" or "to".
625	 * Check "subtle" note in pacache.S re: r23/r26.
626	 */
627#ifdef CONFIG_64BIT
628	extrd,u,*=	\va,41,1,%r0
629#else
630	extrw,u,=	\va,9,1,%r0
631#endif
632	or,COND(tr)	%r23,%r0,\pte
633	or		%r26,%r0,\pte
634	.endm
635
636
637	/*
638	 * Align fault_vector_20 on 4K boundary so that both
639	 * fault_vector_11 and fault_vector_20 are on the
640	 * same page. This is only necessary as long as we
641	 * write protect the kernel text, which we may stop
642	 * doing once we use large page translations to cover
643	 * the static part of the kernel address space.
644	 */
645
646	.export fault_vector_20
647
648	.text
649
650	.align 4096
651
652fault_vector_20:
653	/* First vector is invalid (0) */
654	.ascii	"cows can fly"
655	.byte 0
656	.align 32
657
658	hpmc		 1
659	def		 2
660	def		 3
661	extint		 4
662	def		 5
663	itlb_20		 6
664	def		 7
665	def		 8
666	def              9
667	def		10
668	def		11
669	def		12
670	def		13
671	def		14
672	dtlb_20		15
673#if 0
674	naitlb_20	16
675#else
676	def             16
677#endif
678	nadtlb_20	17
679	def		18
680	def		19
681	dbit_20		20
682	def		21
683	def		22
684	def		23
685	def		24
686	def		25
687	def		26
688	def		27
689	def		28
690	def		29
691	def		30
692	def		31
693
694#ifndef CONFIG_64BIT
695
696	.export fault_vector_11
697
698	.align 2048
699
700fault_vector_11:
701	/* First vector is invalid (0) */
702	.ascii	"cows can fly"
703	.byte 0
704	.align 32
705
706	hpmc		 1
707	def		 2
708	def		 3
709	extint		 4
710	def		 5
711	itlb_11		 6
712	def		 7
713	def		 8
714	def              9
715	def		10
716	def		11
717	def		12
718	def		13
719	def		14
720	dtlb_11		15
721#if 0
722	naitlb_11	16
723#else
724	def             16
725#endif
726	nadtlb_11	17
727	def		18
728	def		19
729	dbit_11		20
730	def		21
731	def		22
732	def		23
733	def		24
734	def		25
735	def		26
736	def		27
737	def		28
738	def		29
739	def		30
740	def		31
741
742#endif
743
744	.import		handle_interruption,code
745	.import		do_cpu_irq_mask,code
746
747	/*
748	 * r26 = function to be called
749	 * r25 = argument to pass in
750	 * r24 = flags for do_fork()
751	 *
752	 * Kernel threads don't ever return, so they don't need
753	 * a true register context. We just save away the arguments
754	 * for copy_thread/ret_ to properly set up the child.
755	 */
756
757#define CLONE_VM 0x100	/* Must agree with <linux/sched.h> */
758#define CLONE_UNTRACED 0x00800000
759
760	.export __kernel_thread, code
761	.import do_fork
762__kernel_thread:
763	STREG	%r2, -RP_OFFSET(%r30)
764
765	copy	%r30, %r1
766	ldo	PT_SZ_ALGN(%r30),%r30
767#ifdef CONFIG_64BIT
768	/* Yo, function pointers in wide mode are little structs... -PB */
769	ldd	24(%r26), %r2
770	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */
771	ldd	16(%r26), %r26
772
773	STREG	%r22, PT_GR22(%r1)	/* save r22 (arg5) */
774	copy	%r0, %r22		/* user_tid */
775#endif
776	STREG	%r26, PT_GR26(%r1)  /* Store function & argument for child */
777	STREG	%r25, PT_GR25(%r1)
778	ldil	L%CLONE_UNTRACED, %r26
779	ldo	CLONE_VM(%r26), %r26   /* Force CLONE_VM since only init_mm */
780	or	%r26, %r24, %r26      /* will have kernel mappings.	 */
781	ldi	1, %r25			/* stack_start, signals kernel thread */
782	stw	%r0, -52(%r30)	     	/* user_tid */
783#ifdef CONFIG_64BIT
784	ldo	-16(%r30),%r29		/* Reference param save area */
785#endif
786	BL	do_fork, %r2
787	copy	%r1, %r24		/* pt_regs */
788
789	/* Parent Returns here */
790
791	LDREG	-PT_SZ_ALGN-RP_OFFSET(%r30), %r2
792	ldo	-PT_SZ_ALGN(%r30), %r30
793	bv	%r0(%r2)
794	nop
795
796	/*
797	 * Child Returns here
798	 *
799	 * copy_thread moved args from temp save area set up above
800	 * into task save area.
801	 */
802
803	.export	ret_from_kernel_thread
804ret_from_kernel_thread:
805
806	/* Call schedule_tail first though */
807	BL	schedule_tail, %r2
808	nop
809
810	LDREG	TI_TASK-THREAD_SZ_ALGN(%r30), %r1
811	LDREG	TASK_PT_GR25(%r1), %r26
812#ifdef CONFIG_64BIT
813	LDREG	TASK_PT_GR27(%r1), %r27
814	LDREG	TASK_PT_GR22(%r1), %r22
815#endif
816	LDREG	TASK_PT_GR26(%r1), %r1
817	ble	0(%sr7, %r1)
818	copy	%r31, %r2
819
820#ifdef CONFIG_64BIT
821	ldo	-16(%r30),%r29		/* Reference param save area */
822	loadgp				/* Thread could have been in a module */
823#endif
824#ifndef CONFIG_64BIT
825	b	sys_exit
826#else
827	load32	sys_exit, %r1
828	bv	%r0(%r1)
829#endif
830	ldi	0, %r26
831
832	.import	sys_execve, code
833	.export	__execve, code
834__execve:
835	copy	%r2, %r15
836	copy	%r30, %r16
837	ldo	PT_SZ_ALGN(%r30), %r30
838	STREG	%r26, PT_GR26(%r16)
839	STREG	%r25, PT_GR25(%r16)
840	STREG	%r24, PT_GR24(%r16)
841#ifdef CONFIG_64BIT
842	ldo	-16(%r30),%r29		/* Reference param save area */
843#endif
844	BL	sys_execve, %r2
845	copy	%r16, %r26
846
847	cmpib,=,n 0,%r28,intr_return    /* forward */
848
849	/* yes, this will trap and die. */
850	copy	%r15, %r2
851	copy	%r16, %r30
852	bv	%r0(%r2)
853	nop
854
855	.align 4
856
857	/*
858	 * struct task_struct *_switch_to(struct task_struct *prev,
859	 *	struct task_struct *next)
860	 *
861	 * switch kernel stacks and return prev */
862	.export	_switch_to, code
863_switch_to:
864	STREG	 %r2, -RP_OFFSET(%r30)
865
866	callee_save_float
867	callee_save
868
869	load32	_switch_to_ret, %r2
870
871	STREG	%r2, TASK_PT_KPC(%r26)
872	LDREG	TASK_PT_KPC(%r25), %r2
873
874	STREG	%r30, TASK_PT_KSP(%r26)
875	LDREG	TASK_PT_KSP(%r25), %r30
876	LDREG	TASK_THREAD_INFO(%r25), %r25
877	bv	%r0(%r2)
878	mtctl   %r25,%cr30
879
880_switch_to_ret:
881	mtctl	%r0, %cr0		/* Needed for single stepping */
882	callee_rest
883	callee_rest_float
884
885	LDREG	-RP_OFFSET(%r30), %r2
886	bv	%r0(%r2)
887	copy	%r26, %r28
888
889	/*
890	 * Common rfi return path for interruptions, kernel execve, and
891	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
892	 * return via this path if the signal was received when the process
893	 * was running; if the process was blocked on a syscall then the
894	 * normal syscall_exit path is used.  All syscalls for traced
895	 * proceses exit via intr_restore.
896	 *
897	 * XXX If any syscalls that change a processes space id ever exit
898	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
899	 * adjust IASQ[0..1].
900	 *
901	 */
902
903	.align 4096
904
905	.export	syscall_exit_rfi
906syscall_exit_rfi:
907	mfctl   %cr30,%r16
908	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
909	ldo	TASK_REGS(%r16),%r16
910	/* Force iaoq to userspace, as the user has had access to our current
911	 * context via sigcontext. Also Filter the PSW for the same reason.
912	 */
913	LDREG	PT_IAOQ0(%r16),%r19
914	depi	3,31,2,%r19
915	STREG	%r19,PT_IAOQ0(%r16)
916	LDREG	PT_IAOQ1(%r16),%r19
917	depi	3,31,2,%r19
918	STREG	%r19,PT_IAOQ1(%r16)
919	LDREG   PT_PSW(%r16),%r19
920	load32	USER_PSW_MASK,%r1
921#ifdef CONFIG_64BIT
922	load32	USER_PSW_HI_MASK,%r20
923	depd    %r20,31,32,%r1
924#endif
925	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
926	load32	USER_PSW,%r1
927	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
928	STREG   %r19,PT_PSW(%r16)
929
930	/*
931	 * If we aren't being traced, we never saved space registers
932	 * (we don't store them in the sigcontext), so set them
933	 * to "proper" values now (otherwise we'll wind up restoring
934	 * whatever was last stored in the task structure, which might
935	 * be inconsistent if an interrupt occured while on the gateway
936	 * page) Note that we may be "trashing" values the user put in
937	 * them, but we don't support the the user changing them.
938	 */
939
940	STREG   %r0,PT_SR2(%r16)
941	mfsp    %sr3,%r19
942	STREG   %r19,PT_SR0(%r16)
943	STREG   %r19,PT_SR1(%r16)
944	STREG   %r19,PT_SR3(%r16)
945	STREG   %r19,PT_SR4(%r16)
946	STREG   %r19,PT_SR5(%r16)
947	STREG   %r19,PT_SR6(%r16)
948	STREG   %r19,PT_SR7(%r16)
949
950intr_return:
951	/* NOTE: Need to enable interrupts incase we schedule. */
952	ssm     PSW_SM_I, %r0
953
954	/* Check for software interrupts */
955
956	.import irq_stat,data
957
958	load32	irq_stat,%r19
959#ifdef CONFIG_SMP
960	mfctl   %cr30,%r1
961	ldw	TI_CPU(%r1),%r1 /* get cpu # - int */
962	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
963	** irq_stat[] is defined using ____cacheline_aligned.
964	*/
965#ifdef CONFIG_64BIT
966	shld	%r1, 6, %r20
967#else
968	shlw	%r1, 5, %r20
969#endif
970	add     %r19,%r20,%r19	/* now have &irq_stat[smp_processor_id()] */
971#endif /* CONFIG_SMP */
972
973intr_check_resched:
974
975	/* check for reschedule */
976	mfctl   %cr30,%r1
977	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
978	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
979
980intr_check_sig:
981	/* As above */
982	mfctl   %cr30,%r1
983	LDREG	TI_FLAGS(%r1),%r19	/* sched.h: TIF_SIGPENDING */
984	bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */
985
986intr_restore:
987	copy            %r16,%r29
988	ldo             PT_FR31(%r29),%r1
989	rest_fp         %r1
990	rest_general    %r29
991
992	/* inverse of virt_map */
993	pcxt_ssm_bug
994	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
995	tophys_r1       %r29
996
997	/* Restore space id's and special cr's from PT_REGS
998	 * structure pointed to by r29
999	 */
1000	rest_specials	%r29
1001
1002	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
1003	 * It also restores r1 and r30.
1004	 */
1005	rest_stack
1006
1007	rfi
1008	nop
1009	nop
1010	nop
1011	nop
1012	nop
1013	nop
1014	nop
1015	nop
1016
1017	.import schedule,code
1018intr_do_resched:
1019	/* Only do reschedule if we are returning to user space */
1020	LDREG	PT_IASQ0(%r16), %r20
1021	CMPIB= 0,%r20,intr_restore /* backward */
1022	nop
1023	LDREG	PT_IASQ1(%r16), %r20
1024	CMPIB= 0,%r20,intr_restore /* backward */
1025	nop
1026
1027#ifdef CONFIG_64BIT
1028	ldo	-16(%r30),%r29		/* Reference param save area */
1029#endif
1030
1031	ldil	L%intr_check_sig, %r2
1032#ifndef CONFIG_64BIT
1033	b	schedule
1034#else
1035	load32	schedule, %r20
1036	bv	%r0(%r20)
1037#endif
1038	ldo	R%intr_check_sig(%r2), %r2
1039
1040
1041	.import do_signal,code
1042intr_do_signal:
1043	/*
1044		This check is critical to having LWS
1045		working. The IASQ is zero on the gateway
1046		page and we cannot deliver any signals until
1047		we get off the gateway page.
1048
1049		Only do signals if we are returning to user space
1050	*/
1051	LDREG	PT_IASQ0(%r16), %r20
1052	CMPIB= 0,%r20,intr_restore /* backward */
1053	nop
1054	LDREG	PT_IASQ1(%r16), %r20
1055	CMPIB= 0,%r20,intr_restore /* backward */
1056	nop
1057
1058	copy	%r0, %r24			/* unsigned long in_syscall */
1059	copy	%r16, %r25			/* struct pt_regs *regs */
1060#ifdef CONFIG_64BIT
1061	ldo	-16(%r30),%r29			/* Reference param save area */
1062#endif
1063
1064	BL	do_signal,%r2
1065	copy	%r0, %r26			/* sigset_t *oldset = NULL */
1066
1067	b	intr_check_sig
1068	nop
1069
1070	/*
1071	 * External interrupts.
1072	 */
1073
1074intr_extint:
1075	CMPIB=,n 0,%r16,1f
1076	get_stack_use_cr30
1077	b,n 3f
1078
10791:
1080#if 0  /* Interrupt Stack support not working yet! */
1081	mfctl	%cr31,%r1
1082	copy	%r30,%r17
1083	/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
1084#ifdef CONFIG_64BIT
1085	depdi	0,63,15,%r17
1086#else
1087	depi	0,31,15,%r17
1088#endif
1089	CMPB=,n	%r1,%r17,2f
1090	get_stack_use_cr31
1091	b,n 3f
1092#endif
10932:
1094	get_stack_use_r30
1095
10963:
1097	save_specials	%r29
1098	virt_map
1099	save_general	%r29
1100
1101	ldo	PT_FR0(%r29), %r24
1102	save_fp	%r24
1103
1104	loadgp
1105
1106	copy	%r29, %r26	/* arg0 is pt_regs */
1107	copy	%r29, %r16	/* save pt_regs */
1108
1109	ldil	L%intr_return, %r2
1110
1111#ifdef CONFIG_64BIT
1112	ldo	-16(%r30),%r29	/* Reference param save area */
1113#endif
1114
1115	b	do_cpu_irq_mask
1116	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1117
1118
1119	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1120
1121	.export         intr_save, code /* for os_hpmc */
1122
1123intr_save:
1124	mfsp    %sr7,%r16
1125	CMPIB=,n 0,%r16,1f
1126	get_stack_use_cr30
1127	b	2f
1128	copy    %r8,%r26
1129
11301:
1131	get_stack_use_r30
1132	copy    %r8,%r26
1133
11342:
1135	save_specials	%r29
1136
1137	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1138
1139	/*
1140	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1141	 *           traps.c.
1142	 *        2) Once we start executing code above 4 Gb, we need
1143	 *           to adjust iasq/iaoq here in the same way we
1144	 *           adjust isr/ior below.
1145	 */
1146
1147	CMPIB=,n        6,%r26,skip_save_ior
1148
1149
1150	mfctl           %cr20, %r16 /* isr */
1151	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1152	mfctl           %cr21, %r17 /* ior */
1153
1154
1155#ifdef CONFIG_64BIT
1156	/*
1157	 * If the interrupted code was running with W bit off (32 bit),
1158	 * clear the b bits (bits 0 & 1) in the ior.
1159	 * save_specials left ipsw value in r8 for us to test.
1160	 */
1161	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1162	depdi           0,1,2,%r17
1163
1164	/*
1165	 * FIXME: This code has hardwired assumptions about the split
1166	 *        between space bits and offset bits. This will change
1167	 *        when we allow alternate page sizes.
1168	 */
1169
1170	/* adjust isr/ior. */
1171
1172	extrd,u         %r16,63,7,%r1    /* get high bits from isr for ior */
1173	depd            %r1,31,7,%r17    /* deposit them into ior */
1174	depdi           0,63,7,%r16      /* clear them from isr */
1175#endif
1176	STREG           %r16, PT_ISR(%r29)
1177	STREG           %r17, PT_IOR(%r29)
1178
1179
1180skip_save_ior:
1181	virt_map
1182	save_general	%r29
1183
1184	ldo		PT_FR0(%r29), %r25
1185	save_fp		%r25
1186
1187	loadgp
1188
1189	copy		%r29, %r25	/* arg1 is pt_regs */
1190#ifdef CONFIG_64BIT
1191	ldo		-16(%r30),%r29	/* Reference param save area */
1192#endif
1193
1194	ldil		L%intr_check_sig, %r2
1195	copy		%r25, %r16	/* save pt_regs */
1196
1197	b		handle_interruption
1198	ldo		R%intr_check_sig(%r2), %r2
1199
1200
1201	/*
1202	 * Note for all tlb miss handlers:
1203	 *
1204	 * cr24 contains a pointer to the kernel address space
1205	 * page directory.
1206	 *
1207	 * cr25 contains a pointer to the current user address
1208	 * space page directory.
1209	 *
1210	 * sr3 will contain the space id of the user address space
1211	 * of the current running thread while that thread is
1212	 * running in the kernel.
1213	 */
1214
1215	/*
1216	 * register number allocations.  Note that these are all
1217	 * in the shadowed registers
1218	 */
1219
1220	t0 = r1		/* temporary register 0 */
1221	va = r8		/* virtual address for which the trap occured */
1222	t1 = r9		/* temporary register 1 */
1223	pte  = r16	/* pte/phys page # */
1224	prot = r17	/* prot bits */
1225	spc  = r24	/* space for which the trap occured */
1226	ptp = r25	/* page directory/page table pointer */
1227
1228#ifdef CONFIG_64BIT
1229
1230dtlb_miss_20w:
1231	space_adjust	spc,va,t0
1232	get_pgd		spc,ptp
1233	space_check	spc,t0,dtlb_fault
1234
1235	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1236
1237	update_ptep	ptp,pte,t0,t1
1238
1239	make_insert_tlb	spc,pte,prot
1240
1241	idtlbt          pte,prot
1242
1243	rfir
1244	nop
1245
1246dtlb_check_alias_20w:
1247	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1248
1249	idtlbt          pte,prot
1250
1251	rfir
1252	nop
1253
1254nadtlb_miss_20w:
1255	space_adjust	spc,va,t0
1256	get_pgd		spc,ptp
1257	space_check	spc,t0,nadtlb_fault
1258
1259	L3_ptep		ptp,pte,t0,va,nadtlb_check_flush_20w
1260
1261	update_ptep	ptp,pte,t0,t1
1262
1263	make_insert_tlb	spc,pte,prot
1264
1265	idtlbt          pte,prot
1266
1267	rfir
1268	nop
1269
1270nadtlb_check_flush_20w:
1271	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1272
1273	/* Insert a "flush only" translation */
1274
1275	depdi,z         7,7,3,prot
1276	depdi           1,10,1,prot
1277
1278	/* Get rid of prot bits and convert to page addr for idtlbt */
1279
1280	depdi		0,63,12,pte
1281	extrd,u         pte,56,52,pte
1282	idtlbt          pte,prot
1283
1284	rfir
1285	nop
1286
1287#else
1288
1289dtlb_miss_11:
1290	get_pgd		spc,ptp
1291
1292	space_check	spc,t0,dtlb_fault
1293
1294	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1295
1296	update_ptep	ptp,pte,t0,t1
1297
1298	make_insert_tlb_11	spc,pte,prot
1299
1300	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1301	mtsp		spc,%sr1
1302
1303	idtlba		pte,(%sr1,va)
1304	idtlbp		prot,(%sr1,va)
1305
1306	mtsp		t0, %sr1	/* Restore sr1 */
1307
1308	rfir
1309	nop
1310
1311dtlb_check_alias_11:
1312
1313	/* Check to see if fault is in the temporary alias region */
1314
1315	cmpib,<>,n      0,spc,dtlb_fault /* forward */
1316	ldil            L%(TMPALIAS_MAP_START),t0
1317	copy            va,t1
1318	depwi           0,31,23,t1
1319	cmpb,<>,n       t0,t1,dtlb_fault /* forward */
1320	ldi             (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1321	depw,z          prot,8,7,prot
1322
1323	/*
1324	 * OK, it is in the temp alias region, check whether "from" or "to".
1325	 * Check "subtle" note in pacache.S re: r23/r26.
1326	 */
1327
1328	extrw,u,=       va,9,1,r0
1329	or,tr           %r23,%r0,pte    /* If "from" use "from" page */
1330	or              %r26,%r0,pte    /* else "to", use "to" page  */
1331
1332	idtlba          pte,(va)
1333	idtlbp          prot,(va)
1334
1335	rfir
1336	nop
1337
1338nadtlb_miss_11:
1339	get_pgd		spc,ptp
1340
1341	space_check	spc,t0,nadtlb_fault
1342
1343	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_11
1344
1345	update_ptep	ptp,pte,t0,t1
1346
1347	make_insert_tlb_11	spc,pte,prot
1348
1349
1350	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1351	mtsp		spc,%sr1
1352
1353	idtlba		pte,(%sr1,va)
1354	idtlbp		prot,(%sr1,va)
1355
1356	mtsp		t0, %sr1	/* Restore sr1 */
1357
1358	rfir
1359	nop
1360
1361nadtlb_check_flush_11:
1362	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1363
1364	/* Insert a "flush only" translation */
1365
1366	zdepi           7,7,3,prot
1367	depi            1,10,1,prot
1368
1369	/* Get rid of prot bits and convert to page addr for idtlba */
1370
1371	depi		0,31,12,pte
1372	extru		pte,24,25,pte
1373
1374	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1375	mtsp		spc,%sr1
1376
1377	idtlba		pte,(%sr1,va)
1378	idtlbp		prot,(%sr1,va)
1379
1380	mtsp		t0, %sr1	/* Restore sr1 */
1381
1382	rfir
1383	nop
1384
1385dtlb_miss_20:
1386	space_adjust	spc,va,t0
1387	get_pgd		spc,ptp
1388	space_check	spc,t0,dtlb_fault
1389
1390	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1391
1392	update_ptep	ptp,pte,t0,t1
1393
1394	make_insert_tlb	spc,pte,prot
1395
1396	f_extend	pte,t0
1397
1398	idtlbt          pte,prot
1399
1400	rfir
1401	nop
1402
1403dtlb_check_alias_20:
1404	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1405
1406	idtlbt          pte,prot
1407
1408	rfir
1409	nop
1410
1411nadtlb_miss_20:
1412	get_pgd		spc,ptp
1413
1414	space_check	spc,t0,nadtlb_fault
1415
1416	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_20
1417
1418	update_ptep	ptp,pte,t0,t1
1419
1420	make_insert_tlb	spc,pte,prot
1421
1422	f_extend	pte,t0
1423
1424        idtlbt          pte,prot
1425
1426	rfir
1427	nop
1428
1429nadtlb_check_flush_20:
1430	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1431
1432	/* Insert a "flush only" translation */
1433
1434	depdi,z         7,7,3,prot
1435	depdi           1,10,1,prot
1436
1437	/* Get rid of prot bits and convert to page addr for idtlbt */
1438
1439	depdi		0,63,12,pte
1440	extrd,u         pte,56,32,pte
1441	idtlbt          pte,prot
1442
1443	rfir
1444	nop
1445#endif
1446
1447nadtlb_emulate:
1448
1449	/*
1450	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1451	 * probei instructions. We don't want to fault for these
1452	 * instructions (not only does it not make sense, it can cause
1453	 * deadlocks, since some flushes are done with the mmap
1454	 * semaphore held). If the translation doesn't exist, we can't
1455	 * insert a translation, so have to emulate the side effects
1456	 * of the instruction. Since we don't insert a translation
1457	 * we can get a lot of faults during a flush loop, so it makes
1458	 * sense to try to do it here with minimum overhead. We only
1459	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1460	 * and index registers are not shadowed. We defer everything
1461	 * else to the "slow" path.
1462	 */
1463
1464	mfctl           %cr19,%r9 /* Get iir */
1465
1466	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1467	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1468
1469	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1470	ldi             0x280,%r16
1471	and             %r9,%r16,%r17
1472	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1473	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1474	BL		get_register,%r25
1475	extrw,u         %r9,15,5,%r8           /* Get index register # */
1476	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1477	copy            %r1,%r24
1478	BL		get_register,%r25
1479	extrw,u         %r9,10,5,%r8           /* Get base register # */
1480	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1481	BL		set_register,%r25
1482	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1483
1484nadtlb_nullify:
1485	mfctl           %ipsw,%r8
1486	ldil            L%PSW_N,%r9
1487	or              %r8,%r9,%r8            /* Set PSW_N */
1488	mtctl           %r8,%ipsw
1489
1490	rfir
1491	nop
1492
1493	/*
1494		When there is no translation for the probe address then we
1495		must nullify the insn and return zero in the target regsiter.
1496		This will indicate to the calling code that it does not have
1497		write/read privileges to this address.
1498
1499		This should technically work for prober and probew in PA 1.1,
1500		and also probe,r and probe,w in PA 2.0
1501
1502		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1503		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1504
1505	*/
1506nadtlb_probe_check:
1507	ldi             0x80,%r16
1508	and             %r9,%r16,%r17
1509	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1510	BL              get_register,%r25      /* Find the target register */
1511	extrw,u         %r9,31,5,%r8           /* Get target register */
1512	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1513	BL		set_register,%r25
1514	copy            %r0,%r1                /* Write zero to target register */
1515	b nadtlb_nullify                       /* Nullify return insn */
1516	nop
1517
1518
1519#ifdef CONFIG_64BIT
1520itlb_miss_20w:
1521
1522	/*
1523	 * I miss is a little different, since we allow users to fault
1524	 * on the gateway page which is in the kernel address space.
1525	 */
1526
1527	space_adjust	spc,va,t0
1528	get_pgd		spc,ptp
1529	space_check	spc,t0,itlb_fault
1530
1531	L3_ptep		ptp,pte,t0,va,itlb_fault
1532
1533	update_ptep	ptp,pte,t0,t1
1534
1535	make_insert_tlb	spc,pte,prot
1536
1537	iitlbt          pte,prot
1538
1539	rfir
1540	nop
1541
1542#else
1543
1544itlb_miss_11:
1545	get_pgd		spc,ptp
1546
1547	space_check	spc,t0,itlb_fault
1548
1549	L2_ptep		ptp,pte,t0,va,itlb_fault
1550
1551	update_ptep	ptp,pte,t0,t1
1552
1553	make_insert_tlb_11	spc,pte,prot
1554
1555	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1556	mtsp		spc,%sr1
1557
1558	iitlba		pte,(%sr1,va)
1559	iitlbp		prot,(%sr1,va)
1560
1561	mtsp		t0, %sr1	/* Restore sr1 */
1562
1563	rfir
1564	nop
1565
1566itlb_miss_20:
1567	get_pgd		spc,ptp
1568
1569	space_check	spc,t0,itlb_fault
1570
1571	L2_ptep		ptp,pte,t0,va,itlb_fault
1572
1573	update_ptep	ptp,pte,t0,t1
1574
1575	make_insert_tlb	spc,pte,prot
1576
1577	f_extend	pte,t0
1578
1579	iitlbt          pte,prot
1580
1581	rfir
1582	nop
1583
1584#endif
1585
1586#ifdef CONFIG_64BIT
1587
1588dbit_trap_20w:
1589	space_adjust	spc,va,t0
1590	get_pgd		spc,ptp
1591	space_check	spc,t0,dbit_fault
1592
1593	L3_ptep		ptp,pte,t0,va,dbit_fault
1594
1595#ifdef CONFIG_SMP
1596	CMPIB=,n        0,spc,dbit_nolock_20w
1597	load32		PA(pa_dbit_lock),t0
1598
1599dbit_spin_20w:
1600	ldcw            0(t0),t1
1601	cmpib,=         0,t1,dbit_spin_20w
1602	nop
1603
1604dbit_nolock_20w:
1605#endif
1606	update_dirty	ptp,pte,t1
1607
1608	make_insert_tlb	spc,pte,prot
1609
1610	idtlbt          pte,prot
1611#ifdef CONFIG_SMP
1612	CMPIB=,n        0,spc,dbit_nounlock_20w
1613	ldi             1,t1
1614	stw             t1,0(t0)
1615
1616dbit_nounlock_20w:
1617#endif
1618
1619	rfir
1620	nop
1621#else
1622
1623dbit_trap_11:
1624
1625	get_pgd		spc,ptp
1626
1627	space_check	spc,t0,dbit_fault
1628
1629	L2_ptep		ptp,pte,t0,va,dbit_fault
1630
1631#ifdef CONFIG_SMP
1632	CMPIB=,n        0,spc,dbit_nolock_11
1633	load32		PA(pa_dbit_lock),t0
1634
1635dbit_spin_11:
1636	ldcw            0(t0),t1
1637	cmpib,=         0,t1,dbit_spin_11
1638	nop
1639
1640dbit_nolock_11:
1641#endif
1642	update_dirty	ptp,pte,t1
1643
1644	make_insert_tlb_11	spc,pte,prot
1645
1646	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1647	mtsp		spc,%sr1
1648
1649	idtlba		pte,(%sr1,va)
1650	idtlbp		prot,(%sr1,va)
1651
1652	mtsp            t1, %sr1     /* Restore sr1 */
1653#ifdef CONFIG_SMP
1654	CMPIB=,n        0,spc,dbit_nounlock_11
1655	ldi             1,t1
1656	stw             t1,0(t0)
1657
1658dbit_nounlock_11:
1659#endif
1660
1661	rfir
1662	nop
1663
1664dbit_trap_20:
1665	get_pgd		spc,ptp
1666
1667	space_check	spc,t0,dbit_fault
1668
1669	L2_ptep		ptp,pte,t0,va,dbit_fault
1670
1671#ifdef CONFIG_SMP
1672	CMPIB=,n        0,spc,dbit_nolock_20
1673	load32		PA(pa_dbit_lock),t0
1674
1675dbit_spin_20:
1676	ldcw            0(t0),t1
1677	cmpib,=         0,t1,dbit_spin_20
1678	nop
1679
1680dbit_nolock_20:
1681#endif
1682	update_dirty	ptp,pte,t1
1683
1684	make_insert_tlb	spc,pte,prot
1685
1686	f_extend	pte,t1
1687
1688        idtlbt          pte,prot
1689
1690#ifdef CONFIG_SMP
1691	CMPIB=,n        0,spc,dbit_nounlock_20
1692	ldi             1,t1
1693	stw             t1,0(t0)
1694
1695dbit_nounlock_20:
1696#endif
1697
1698	rfir
1699	nop
1700#endif
1701
1702	.import handle_interruption,code
1703
1704kernel_bad_space:
1705	b               intr_save
1706	ldi             31,%r8  /* Use an unused code */
1707
1708dbit_fault:
1709	b               intr_save
1710	ldi             20,%r8
1711
1712itlb_fault:
1713	b               intr_save
1714	ldi             6,%r8
1715
1716nadtlb_fault:
1717	b               intr_save
1718	ldi             17,%r8
1719
1720dtlb_fault:
1721	b               intr_save
1722	ldi             15,%r8
1723
1724	/* Register saving semantics for system calls:
1725
1726	   %r1		   clobbered by system call macro in userspace
1727	   %r2		   saved in PT_REGS by gateway page
1728	   %r3  - %r18	   preserved by C code (saved by signal code)
1729	   %r19 - %r20	   saved in PT_REGS by gateway page
1730	   %r21 - %r22	   non-standard syscall args
1731			   stored in kernel stack by gateway page
1732	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1733	   %r27 - %r30	   saved in PT_REGS by gateway page
1734	   %r31		   syscall return pointer
1735	 */
1736
1737	/* Floating point registers (FIXME: what do we do with these?)
1738
1739	   %fr0  - %fr3	   status/exception, not preserved
1740	   %fr4  - %fr7	   arguments
1741	   %fr8	 - %fr11   not preserved by C code
1742	   %fr12 - %fr21   preserved by C code
1743	   %fr22 - %fr31   not preserved by C code
1744	 */
1745
1746	.macro	reg_save regs
1747	STREG	%r3, PT_GR3(\regs)
1748	STREG	%r4, PT_GR4(\regs)
1749	STREG	%r5, PT_GR5(\regs)
1750	STREG	%r6, PT_GR6(\regs)
1751	STREG	%r7, PT_GR7(\regs)
1752	STREG	%r8, PT_GR8(\regs)
1753	STREG	%r9, PT_GR9(\regs)
1754	STREG   %r10,PT_GR10(\regs)
1755	STREG   %r11,PT_GR11(\regs)
1756	STREG   %r12,PT_GR12(\regs)
1757	STREG   %r13,PT_GR13(\regs)
1758	STREG   %r14,PT_GR14(\regs)
1759	STREG   %r15,PT_GR15(\regs)
1760	STREG   %r16,PT_GR16(\regs)
1761	STREG   %r17,PT_GR17(\regs)
1762	STREG   %r18,PT_GR18(\regs)
1763	.endm
1764
1765	.macro	reg_restore regs
1766	LDREG	PT_GR3(\regs), %r3
1767	LDREG	PT_GR4(\regs), %r4
1768	LDREG	PT_GR5(\regs), %r5
1769	LDREG	PT_GR6(\regs), %r6
1770	LDREG	PT_GR7(\regs), %r7
1771	LDREG	PT_GR8(\regs), %r8
1772	LDREG	PT_GR9(\regs), %r9
1773	LDREG   PT_GR10(\regs),%r10
1774	LDREG   PT_GR11(\regs),%r11
1775	LDREG   PT_GR12(\regs),%r12
1776	LDREG   PT_GR13(\regs),%r13
1777	LDREG   PT_GR14(\regs),%r14
1778	LDREG   PT_GR15(\regs),%r15
1779	LDREG   PT_GR16(\regs),%r16
1780	LDREG   PT_GR17(\regs),%r17
1781	LDREG   PT_GR18(\regs),%r18
1782	.endm
1783
1784	.export sys_fork_wrapper
1785	.export child_return
1786sys_fork_wrapper:
1787	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1788	ldo	TASK_REGS(%r1),%r1
1789	reg_save %r1
1790	mfctl	%cr27, %r3
1791	STREG	%r3, PT_CR27(%r1)
1792
1793	STREG	%r2,-RP_OFFSET(%r30)
1794	ldo	FRAME_SIZE(%r30),%r30
1795#ifdef CONFIG_64BIT
1796	ldo	-16(%r30),%r29		/* Reference param save area */
1797#endif
1798
1799	/* These are call-clobbered registers and therefore
1800	   also syscall-clobbered (we hope). */
1801	STREG	%r2,PT_GR19(%r1)	/* save for child */
1802	STREG	%r30,PT_GR21(%r1)
1803
1804	LDREG	PT_GR30(%r1),%r25
1805	copy	%r1,%r24
1806	BL	sys_clone,%r2
1807	ldi	SIGCHLD,%r26
1808
1809	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1810wrapper_exit:
1811	ldo	-FRAME_SIZE(%r30),%r30		/* get the stackframe */
1812	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1813	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1814
1815	LDREG	PT_CR27(%r1), %r3
1816	mtctl	%r3, %cr27
1817	reg_restore %r1
1818
1819	/* strace expects syscall # to be preserved in r20 */
1820	ldi	__NR_fork,%r20
1821	bv %r0(%r2)
1822	STREG	%r20,PT_GR20(%r1)
1823
1824	/* Set the return value for the child */
1825child_return:
1826	BL	schedule_tail, %r2
1827	nop
1828
1829	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1830	LDREG	TASK_PT_GR19(%r1),%r2
1831	b	wrapper_exit
1832	copy	%r0,%r28
1833
1834
1835	.export sys_clone_wrapper
1836sys_clone_wrapper:
1837	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1838	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1839	reg_save %r1
1840	mfctl	%cr27, %r3
1841	STREG	%r3, PT_CR27(%r1)
1842
1843	STREG	%r2,-RP_OFFSET(%r30)
1844	ldo	FRAME_SIZE(%r30),%r30
1845#ifdef CONFIG_64BIT
1846	ldo	-16(%r30),%r29		/* Reference param save area */
1847#endif
1848
1849	/* WARNING - Clobbers r19 and r21, userspace must save these! */
1850	STREG	%r2,PT_GR19(%r1)	/* save for child */
1851	STREG	%r30,PT_GR21(%r1)
1852	BL	sys_clone,%r2
1853	copy	%r1,%r24
1854
1855	b	wrapper_exit
1856	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1857
1858	.export sys_vfork_wrapper
1859sys_vfork_wrapper:
1860	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1861	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1862	reg_save %r1
1863	mfctl	%cr27, %r3
1864	STREG	%r3, PT_CR27(%r1)
1865
1866	STREG	%r2,-RP_OFFSET(%r30)
1867	ldo	FRAME_SIZE(%r30),%r30
1868#ifdef CONFIG_64BIT
1869	ldo	-16(%r30),%r29		/* Reference param save area */
1870#endif
1871
1872	STREG	%r2,PT_GR19(%r1)	/* save for child */
1873	STREG	%r30,PT_GR21(%r1)
1874
1875	BL	sys_vfork,%r2
1876	copy	%r1,%r26
1877
1878	b	wrapper_exit
1879	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1880
1881
1882	.macro  execve_wrapper execve
1883	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1884	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1885
1886	/*
1887	 * Do we need to save/restore r3-r18 here?
1888	 * I don't think so. why would new thread need old
1889	 * threads registers?
1890	 */
1891
1892	/* %arg0 - %arg3 are already saved for us. */
1893
1894	STREG %r2,-RP_OFFSET(%r30)
1895	ldo FRAME_SIZE(%r30),%r30
1896#ifdef CONFIG_64BIT
1897	ldo	-16(%r30),%r29		/* Reference param save area */
1898#endif
1899	BL \execve,%r2
1900	copy %r1,%arg0
1901
1902	ldo -FRAME_SIZE(%r30),%r30
1903	LDREG -RP_OFFSET(%r30),%r2
1904
1905	/* If exec succeeded we need to load the args */
1906
1907	ldo -1024(%r0),%r1
1908	cmpb,>>= %r28,%r1,error_\execve
1909	copy %r2,%r19
1910
1911error_\execve:
1912	bv %r0(%r19)
1913	nop
1914	.endm
1915
1916	.export sys_execve_wrapper
1917	.import sys_execve
1918
1919sys_execve_wrapper:
1920	execve_wrapper sys_execve
1921
1922#ifdef CONFIG_64BIT
1923	.export sys32_execve_wrapper
1924	.import sys32_execve
1925
1926sys32_execve_wrapper:
1927	execve_wrapper sys32_execve
1928#endif
1929
1930	.export sys_rt_sigreturn_wrapper
1931sys_rt_sigreturn_wrapper:
1932	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1933	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1934	/* Don't save regs, we are going to restore them from sigcontext. */
1935	STREG	%r2, -RP_OFFSET(%r30)
1936#ifdef CONFIG_64BIT
1937	ldo	FRAME_SIZE(%r30), %r30
1938	BL	sys_rt_sigreturn,%r2
1939	ldo	-16(%r30),%r29		/* Reference param save area */
1940#else
1941	BL	sys_rt_sigreturn,%r2
1942	ldo	FRAME_SIZE(%r30), %r30
1943#endif
1944
1945	ldo	-FRAME_SIZE(%r30), %r30
1946	LDREG	-RP_OFFSET(%r30), %r2
1947
1948	/* FIXME: I think we need to restore a few more things here. */
1949	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1950	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1951	reg_restore %r1
1952
1953	/* If the signal was received while the process was blocked on a
1954	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1955	 * take us to syscall_exit_rfi and on to intr_return.
1956	 */
1957	bv	%r0(%r2)
1958	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1959
1960	.export sys_sigaltstack_wrapper
1961sys_sigaltstack_wrapper:
1962	/* Get the user stack pointer */
1963	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1964	ldo	TASK_REGS(%r1),%r24	/* get pt regs */
1965	LDREG	TASK_PT_GR30(%r24),%r24
1966	STREG	%r2, -RP_OFFSET(%r30)
1967#ifdef CONFIG_64BIT
1968	ldo	FRAME_SIZE(%r30), %r30
1969	b,l	do_sigaltstack,%r2
1970	ldo	-16(%r30),%r29		/* Reference param save area */
1971#else
1972	bl	do_sigaltstack,%r2
1973	ldo	FRAME_SIZE(%r30), %r30
1974#endif
1975
1976	ldo	-FRAME_SIZE(%r30), %r30
1977	LDREG	-RP_OFFSET(%r30), %r2
1978	bv	%r0(%r2)
1979	nop
1980
1981#ifdef CONFIG_64BIT
1982	.export sys32_sigaltstack_wrapper
1983sys32_sigaltstack_wrapper:
1984	/* Get the user stack pointer */
1985	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1986	LDREG	TASK_PT_GR30(%r24),%r24
1987	STREG	%r2, -RP_OFFSET(%r30)
1988	ldo	FRAME_SIZE(%r30), %r30
1989	b,l	do_sigaltstack32,%r2
1990	ldo	-16(%r30),%r29		/* Reference param save area */
1991
1992	ldo	-FRAME_SIZE(%r30), %r30
1993	LDREG	-RP_OFFSET(%r30), %r2
1994	bv	%r0(%r2)
1995	nop
1996#endif
1997
1998	.export sys_rt_sigsuspend_wrapper
1999sys_rt_sigsuspend_wrapper:
2000	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2001	ldo	TASK_REGS(%r1),%r24
2002	reg_save %r24
2003
2004	STREG	%r2, -RP_OFFSET(%r30)
2005#ifdef CONFIG_64BIT
2006	ldo	FRAME_SIZE(%r30), %r30
2007	b,l	sys_rt_sigsuspend,%r2
2008	ldo	-16(%r30),%r29		/* Reference param save area */
2009#else
2010	bl	sys_rt_sigsuspend,%r2
2011	ldo	FRAME_SIZE(%r30), %r30
2012#endif
2013
2014	ldo	-FRAME_SIZE(%r30), %r30
2015	LDREG	-RP_OFFSET(%r30), %r2
2016
2017	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2018	ldo	TASK_REGS(%r1),%r1
2019	reg_restore %r1
2020
2021	bv	%r0(%r2)
2022	nop
2023
2024	.export syscall_exit
2025syscall_exit:
2026
2027	/* NOTE: HP-UX syscalls also come through here
2028	 * after hpux_syscall_exit fixes up return
2029	 * values. */
2030
2031	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
2032	 * via syscall_exit_rfi if the signal was received while the process
2033	 * was running.
2034	 */
2035
2036	/* save return value now */
2037
2038	mfctl     %cr30, %r1
2039	LDREG     TI_TASK(%r1),%r1
2040	STREG     %r28,TASK_PT_GR28(%r1)
2041
2042#ifdef CONFIG_HPUX
2043
2044/* <linux/personality.h> cannot be easily included */
2045#define PER_HPUX 0x10
2046	LDREG     TASK_PERSONALITY(%r1),%r19
2047
2048	/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
2049	ldo	  -PER_HPUX(%r19), %r19
2050	CMPIB<>,n 0,%r19,1f
2051
2052	/* Save other hpux returns if personality is PER_HPUX */
2053	STREG     %r22,TASK_PT_GR22(%r1)
2054	STREG     %r29,TASK_PT_GR29(%r1)
20551:
2056
2057#endif /* CONFIG_HPUX */
2058
2059	/* Seems to me that dp could be wrong here, if the syscall involved
2060	 * calling a module, and nothing got round to restoring dp on return.
2061	 */
2062	loadgp
2063
2064syscall_check_bh:
2065
2066	/* Check for software interrupts */
2067
2068	.import irq_stat,data
2069
2070	load32	irq_stat,%r19
2071
2072#ifdef CONFIG_SMP
2073	/* sched.h: int processor */
2074	/* %r26 is used as scratch register to index into irq_stat[] */
2075	ldw     TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2076
2077	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2078#ifdef CONFIG_64BIT
2079	shld	%r26, 6, %r20
2080#else
2081	shlw	%r26, 5, %r20
2082#endif
2083	add     %r19,%r20,%r19	/* now have &irq_stat[smp_processor_id()] */
2084#endif /* CONFIG_SMP */
2085
2086syscall_check_resched:
2087
2088	/* check for reschedule */
2089
2090	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
2091	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2092
2093syscall_check_sig:
2094	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19    /* get ti flags */
2095	bb,<,n	%r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
2096
2097syscall_restore:
2098	/* Are we being ptraced? */
2099	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2100
2101	LDREG	TASK_PTRACE(%r1), %r19
2102	bb,<	%r19,31,syscall_restore_rfi
2103	nop
2104
2105	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
2106	rest_fp	%r19
2107
2108	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
2109	mtsar	%r19
2110
2111	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
2112	LDREG	TASK_PT_GR19(%r1),%r19
2113	LDREG   TASK_PT_GR20(%r1),%r20
2114	LDREG	TASK_PT_GR21(%r1),%r21
2115	LDREG	TASK_PT_GR22(%r1),%r22
2116	LDREG	TASK_PT_GR23(%r1),%r23
2117	LDREG	TASK_PT_GR24(%r1),%r24
2118	LDREG	TASK_PT_GR25(%r1),%r25
2119	LDREG	TASK_PT_GR26(%r1),%r26
2120	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
2121	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
2122	LDREG	TASK_PT_GR29(%r1),%r29
2123	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
2124
2125	/* NOTE: We use rsm/ssm pair to make this operation atomic */
2126	rsm     PSW_SM_I, %r0
2127	LDREG   TASK_PT_GR30(%r1),%r30             /* restore user sp */
2128	mfsp	%sr3,%r1			   /* Get users space id */
2129	mtsp    %r1,%sr7                           /* Restore sr7 */
2130	ssm     PSW_SM_I, %r0
2131
2132	/* Set sr2 to zero for userspace syscalls to work. */
2133	mtsp	%r0,%sr2
2134	mtsp	%r1,%sr4			   /* Restore sr4 */
2135	mtsp	%r1,%sr5			   /* Restore sr5 */
2136	mtsp	%r1,%sr6			   /* Restore sr6 */
2137
2138	depi	3,31,2,%r31			   /* ensure return to user mode. */
2139
2140#ifdef CONFIG_64BIT
2141	/* decide whether to reset the wide mode bit
2142	 *
2143	 * For a syscall, the W bit is stored in the lowest bit
2144	 * of sp.  Extract it and reset W if it is zero */
2145	extrd,u,*<>	%r30,63,1,%r1
2146	rsm	PSW_SM_W, %r0
2147	/* now reset the lowest bit of sp if it was set */
2148	xor	%r30,%r1,%r30
2149#endif
2150	be,n    0(%sr3,%r31)                       /* return to user space */
2151
2152	/* We have to return via an RFI, so that PSW T and R bits can be set
2153	 * appropriately.
2154	 * This sets up pt_regs so we can return via intr_restore, which is not
2155	 * the most efficient way of doing things, but it works.
2156	 */
2157syscall_restore_rfi:
2158	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
2159	mtctl	%r2,%cr0			   /*   for immediate trap */
2160	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
2161	ldi	0x0b,%r20			   /* Create new PSW */
2162	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
2163
2164	/* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2165	 * set in include/linux/ptrace.h and converted to PA bitmap
2166	 * numbers in asm-offsets.c */
2167
2168	/* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2169	extru,=	%r19,PA_SINGLESTEP_BIT,1,%r0
2170	depi	-1,27,1,%r20			   /* R bit */
2171
2172	/* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2173	extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2174	depi	-1,7,1,%r20			   /* T bit */
2175
2176	STREG	%r20,TASK_PT_PSW(%r1)
2177
2178	/* Always store space registers, since sr3 can be changed (e.g. fork) */
2179
2180	mfsp    %sr3,%r25
2181	STREG   %r25,TASK_PT_SR3(%r1)
2182	STREG   %r25,TASK_PT_SR4(%r1)
2183	STREG   %r25,TASK_PT_SR5(%r1)
2184	STREG   %r25,TASK_PT_SR6(%r1)
2185	STREG   %r25,TASK_PT_SR7(%r1)
2186	STREG   %r25,TASK_PT_IASQ0(%r1)
2187	STREG   %r25,TASK_PT_IASQ1(%r1)
2188
2189	/* XXX W bit??? */
2190	/* Now if old D bit is clear, it means we didn't save all registers
2191	 * on syscall entry, so do that now.  This only happens on TRACEME
2192	 * calls, or if someone attached to us while we were on a syscall.
2193	 * We could make this more efficient by not saving r3-r18, but
2194	 * then we wouldn't be able to use the common intr_restore path.
2195	 * It is only for traced processes anyway, so performance is not
2196	 * an issue.
2197	 */
2198	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
2199	ldo	TASK_REGS(%r1),%r25
2200	reg_save %r25				   /* Save r3 to r18 */
2201
2202	/* Save the current sr */
2203	mfsp	%sr0,%r2
2204	STREG	%r2,TASK_PT_SR0(%r1)
2205
2206	/* Save the scratch sr */
2207	mfsp	%sr1,%r2
2208	STREG	%r2,TASK_PT_SR1(%r1)
2209
2210	/* sr2 should be set to zero for userspace syscalls */
2211	STREG	%r0,TASK_PT_SR2(%r1)
2212
2213pt_regs_ok:
2214	LDREG	TASK_PT_GR31(%r1),%r2
2215	depi	3,31,2,%r2			   /* ensure return to user mode. */
2216	STREG	%r2,TASK_PT_IAOQ0(%r1)
2217	ldo	4(%r2),%r2
2218	STREG	%r2,TASK_PT_IAOQ1(%r1)
2219	copy	%r25,%r16
2220	b	intr_restore
2221	nop
2222
2223	.import schedule,code
2224syscall_do_resched:
2225	BL	schedule,%r2
2226#ifdef CONFIG_64BIT
2227	ldo	-16(%r30),%r29		/* Reference param save area */
2228#else
2229	nop
2230#endif
2231	b       syscall_check_bh  /* if resched, we start over again */
2232	nop
2233
2234	.import do_signal,code
2235syscall_do_signal:
2236	/* Save callee-save registers (for sigcontext).
2237	   FIXME: After this point the process structure should be
2238	   consistent with all the relevant state of the process
2239	   before the syscall.  We need to verify this. */
2240	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2241	ldo	TASK_REGS(%r1), %r25		/* struct pt_regs *regs */
2242	reg_save %r25
2243
2244	ldi	1, %r24				/* unsigned long in_syscall */
2245
2246#ifdef CONFIG_64BIT
2247	ldo	-16(%r30),%r29			/* Reference param save area */
2248#endif
2249	BL	do_signal,%r2
2250	copy	%r0, %r26			/* sigset_t *oldset = NULL */
2251
2252	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2253	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
2254	reg_restore %r20
2255
2256	b,n     syscall_check_sig
2257
2258	/*
2259	 * get_register is used by the non access tlb miss handlers to
2260	 * copy the value of the general register specified in r8 into
2261	 * r1. This routine can't be used for shadowed registers, since
2262	 * the rfir will restore the original value. So, for the shadowed
2263	 * registers we put a -1 into r1 to indicate that the register
2264	 * should not be used (the register being copied could also have
2265	 * a -1 in it, but that is OK, it just means that we will have
2266	 * to use the slow path instead).
2267	 */
2268
2269get_register:
2270	blr     %r8,%r0
2271	nop
2272	bv      %r0(%r25)    /* r0 */
2273	copy    %r0,%r1
2274	bv      %r0(%r25)    /* r1 - shadowed */
2275	ldi     -1,%r1
2276	bv      %r0(%r25)    /* r2 */
2277	copy    %r2,%r1
2278	bv      %r0(%r25)    /* r3 */
2279	copy    %r3,%r1
2280	bv      %r0(%r25)    /* r4 */
2281	copy    %r4,%r1
2282	bv      %r0(%r25)    /* r5 */
2283	copy    %r5,%r1
2284	bv      %r0(%r25)    /* r6 */
2285	copy    %r6,%r1
2286	bv      %r0(%r25)    /* r7 */
2287	copy    %r7,%r1
2288	bv      %r0(%r25)    /* r8 - shadowed */
2289	ldi     -1,%r1
2290	bv      %r0(%r25)    /* r9 - shadowed */
2291	ldi     -1,%r1
2292	bv      %r0(%r25)    /* r10 */
2293	copy    %r10,%r1
2294	bv      %r0(%r25)    /* r11 */
2295	copy    %r11,%r1
2296	bv      %r0(%r25)    /* r12 */
2297	copy    %r12,%r1
2298	bv      %r0(%r25)    /* r13 */
2299	copy    %r13,%r1
2300	bv      %r0(%r25)    /* r14 */
2301	copy    %r14,%r1
2302	bv      %r0(%r25)    /* r15 */
2303	copy    %r15,%r1
2304	bv      %r0(%r25)    /* r16 - shadowed */
2305	ldi     -1,%r1
2306	bv      %r0(%r25)    /* r17 - shadowed */
2307	ldi     -1,%r1
2308	bv      %r0(%r25)    /* r18 */
2309	copy    %r18,%r1
2310	bv      %r0(%r25)    /* r19 */
2311	copy    %r19,%r1
2312	bv      %r0(%r25)    /* r20 */
2313	copy    %r20,%r1
2314	bv      %r0(%r25)    /* r21 */
2315	copy    %r21,%r1
2316	bv      %r0(%r25)    /* r22 */
2317	copy    %r22,%r1
2318	bv      %r0(%r25)    /* r23 */
2319	copy    %r23,%r1
2320	bv      %r0(%r25)    /* r24 - shadowed */
2321	ldi     -1,%r1
2322	bv      %r0(%r25)    /* r25 - shadowed */
2323	ldi     -1,%r1
2324	bv      %r0(%r25)    /* r26 */
2325	copy    %r26,%r1
2326	bv      %r0(%r25)    /* r27 */
2327	copy    %r27,%r1
2328	bv      %r0(%r25)    /* r28 */
2329	copy    %r28,%r1
2330	bv      %r0(%r25)    /* r29 */
2331	copy    %r29,%r1
2332	bv      %r0(%r25)    /* r30 */
2333	copy    %r30,%r1
2334	bv      %r0(%r25)    /* r31 */
2335	copy    %r31,%r1
2336
2337	/*
2338	 * set_register is used by the non access tlb miss handlers to
2339	 * copy the value of r1 into the general register specified in
2340	 * r8.
2341	 */
2342
2343set_register:
2344	blr     %r8,%r0
2345	nop
2346	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2347	copy    %r1,%r0
2348	bv      %r0(%r25)    /* r1 */
2349	copy    %r1,%r1
2350	bv      %r0(%r25)    /* r2 */
2351	copy    %r1,%r2
2352	bv      %r0(%r25)    /* r3 */
2353	copy    %r1,%r3
2354	bv      %r0(%r25)    /* r4 */
2355	copy    %r1,%r4
2356	bv      %r0(%r25)    /* r5 */
2357	copy    %r1,%r5
2358	bv      %r0(%r25)    /* r6 */
2359	copy    %r1,%r6
2360	bv      %r0(%r25)    /* r7 */
2361	copy    %r1,%r7
2362	bv      %r0(%r25)    /* r8 */
2363	copy    %r1,%r8
2364	bv      %r0(%r25)    /* r9 */
2365	copy    %r1,%r9
2366	bv      %r0(%r25)    /* r10 */
2367	copy    %r1,%r10
2368	bv      %r0(%r25)    /* r11 */
2369	copy    %r1,%r11
2370	bv      %r0(%r25)    /* r12 */
2371	copy    %r1,%r12
2372	bv      %r0(%r25)    /* r13 */
2373	copy    %r1,%r13
2374	bv      %r0(%r25)    /* r14 */
2375	copy    %r1,%r14
2376	bv      %r0(%r25)    /* r15 */
2377	copy    %r1,%r15
2378	bv      %r0(%r25)    /* r16 */
2379	copy    %r1,%r16
2380	bv      %r0(%r25)    /* r17 */
2381	copy    %r1,%r17
2382	bv      %r0(%r25)    /* r18 */
2383	copy    %r1,%r18
2384	bv      %r0(%r25)    /* r19 */
2385	copy    %r1,%r19
2386	bv      %r0(%r25)    /* r20 */
2387	copy    %r1,%r20
2388	bv      %r0(%r25)    /* r21 */
2389	copy    %r1,%r21
2390	bv      %r0(%r25)    /* r22 */
2391	copy    %r1,%r22
2392	bv      %r0(%r25)    /* r23 */
2393	copy    %r1,%r23
2394	bv      %r0(%r25)    /* r24 */
2395	copy    %r1,%r24
2396	bv      %r0(%r25)    /* r25 */
2397	copy    %r1,%r25
2398	bv      %r0(%r25)    /* r26 */
2399	copy    %r1,%r26
2400	bv      %r0(%r25)    /* r27 */
2401	copy    %r1,%r27
2402	bv      %r0(%r25)    /* r28 */
2403	copy    %r1,%r28
2404	bv      %r0(%r25)    /* r29 */
2405	copy    %r1,%r29
2406	bv      %r0(%r25)    /* r30 */
2407	copy    %r1,%r30
2408	bv      %r0(%r25)    /* r31 */
2409	copy    %r1,%r31
2410