xref: /openbmc/linux/arch/parisc/kernel/entry.S (revision 8569c914)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41
42#ifdef CONFIG_64BIT
43	.level 2.0w
44#else
45	.level 2.0
46#endif
47
48	.import         pa_dbit_lock,data
49
50	/* space_to_prot macro creates a prot id from a space id */
51
52#if (SPACEID_SHIFT) == 0
53	.macro  space_to_prot spc prot
54	depd,z  \spc,62,31,\prot
55	.endm
56#else
57	.macro  space_to_prot spc prot
58	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
59	.endm
60#endif
61
62	/* Switch to virtual mapping, trashing only %r1 */
63	.macro  virt_map
64	/* pcxt_ssm_bug */
65	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
66	mtsp	%r0, %sr4
67	mtsp	%r0, %sr5
68	mfsp	%sr7, %r1
69	or,=    %r0,%r1,%r0	/* Only save sr7 in sr3 if sr7 != 0 */
70	mtsp	%r1, %sr3
71	tovirt_r1 %r29
72	load32	KERNEL_PSW, %r1
73
74	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
75	mtsp	%r0, %sr6
76	mtsp	%r0, %sr7
77	mtctl	%r0, %cr17	/* Clear IIASQ tail */
78	mtctl	%r0, %cr17	/* Clear IIASQ head */
79	mtctl	%r1, %ipsw
80	load32	4f, %r1
81	mtctl	%r1, %cr18	/* Set IIAOQ tail */
82	ldo	4(%r1), %r1
83	mtctl	%r1, %cr18	/* Set IIAOQ head */
84	rfir
85	nop
864:
87	.endm
88
89	/*
90	 * The "get_stack" macros are responsible for determining the
91	 * kernel stack value.
92	 *
93	 *      If sr7 == 0
94	 *          Already using a kernel stack, so call the
95	 *          get_stack_use_r30 macro to push a pt_regs structure
96	 *          on the stack, and store registers there.
97	 *      else
98	 *          Need to set up a kernel stack, so call the
99	 *          get_stack_use_cr30 macro to set up a pointer
100	 *          to the pt_regs structure contained within the
101	 *          task pointer pointed to by cr30. Set the stack
102	 *          pointer to point to the end of the task structure.
103	 *
104	 * Note that we use shadowed registers for temps until
105	 * we can save %r26 and %r29. %r26 is used to preserve
106	 * %r8 (a shadowed register) which temporarily contained
107	 * either the fault type ("code") or the eirr. We need
108	 * to use a non-shadowed register to carry the value over
109	 * the rfir in virt_map. We use %r26 since this value winds
110	 * up being passed as the argument to either do_cpu_irq_mask
111	 * or handle_interruption. %r29 is used to hold a pointer
112	 * the register save area, and once again, it needs to
113	 * be a non-shadowed register so that it survives the rfir.
114	 *
115	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
116	 */
117
118	.macro  get_stack_use_cr30
119
120	/* we save the registers in the task struct */
121
122	mfctl   %cr30, %r1
123	tophys  %r1,%r9
124	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
125	tophys  %r1,%r9
126	ldo     TASK_REGS(%r9),%r9
127	STREG   %r30, PT_GR30(%r9)
128	STREG   %r29,PT_GR29(%r9)
129	STREG   %r26,PT_GR26(%r9)
130	copy    %r9,%r29
131	mfctl   %cr30, %r1
132	ldo	THREAD_SZ_ALGN(%r1), %r30
133	.endm
134
135	.macro  get_stack_use_r30
136
137	/* we put a struct pt_regs on the stack and save the registers there */
138
139	tophys  %r30,%r9
140	STREG   %r30,PT_GR30(%r9)
141	ldo	PT_SZ_ALGN(%r30),%r30
142	STREG   %r29,PT_GR29(%r9)
143	STREG   %r26,PT_GR26(%r9)
144	copy    %r9,%r29
145	.endm
146
147	.macro  rest_stack
148	LDREG   PT_GR1(%r29), %r1
149	LDREG   PT_GR30(%r29),%r30
150	LDREG   PT_GR29(%r29),%r29
151	.endm
152
153	/* default interruption handler
154	 * (calls traps.c:handle_interruption) */
155	.macro	def code
156	b	intr_save
157	ldi     \code, %r8
158	.align	32
159	.endm
160
161	/* Interrupt interruption handler
162	 * (calls irq.c:do_cpu_irq_mask) */
163	.macro	extint code
164	b	intr_extint
165	mfsp    %sr7,%r16
166	.align	32
167	.endm
168
169	.import	os_hpmc, code
170
171	/* HPMC handler */
172	.macro	hpmc code
173	nop			/* must be a NOP, will be patched later */
174	load32	PA(os_hpmc), %r3
175	bv,n	0(%r3)
176	nop
177	.word	0		/* checksum (will be patched) */
178	.word	PA(os_hpmc)	/* address of handler */
179	.word	0		/* length of handler */
180	.endm
181
182	/*
183	 * Performance Note: Instructions will be moved up into
184	 * this part of the code later on, once we are sure
185	 * that the tlb miss handlers are close to final form.
186	 */
187
188	/* Register definitions for tlb miss handler macros */
189
190	va  = r8	/* virtual address for which the trap occured */
191	spc = r24	/* space for which the trap occured */
192
193#ifndef CONFIG_64BIT
194
195	/*
196	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
197	 */
198
199	.macro	itlb_11 code
200
201	mfctl	%pcsq, spc
202	b	itlb_miss_11
203	mfctl	%pcoq, va
204
205	.align		32
206	.endm
207#endif
208
209	/*
210	 * itlb miss interruption handler (parisc 2.0)
211	 */
212
213	.macro	itlb_20 code
214	mfctl	%pcsq, spc
215#ifdef CONFIG_64BIT
216	b       itlb_miss_20w
217#else
218	b	itlb_miss_20
219#endif
220	mfctl	%pcoq, va
221
222	.align		32
223	.endm
224
225#ifndef CONFIG_64BIT
226	/*
227	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
228	 *
229	 * Note: naitlb misses will be treated
230	 * as an ordinary itlb miss for now.
231	 * However, note that naitlb misses
232	 * have the faulting address in the
233	 * IOR/ISR.
234	 */
235
236	.macro	naitlb_11 code
237
238	mfctl	%isr,spc
239	b	itlb_miss_11
240	mfctl 	%ior,va
241	/* FIXME: If user causes a naitlb miss, the priv level may not be in
242	 * lower bits of va, where the itlb miss handler is expecting them
243	 */
244
245	.align		32
246	.endm
247#endif
248
249	/*
250	 * naitlb miss interruption handler (parisc 2.0)
251	 *
252	 * Note: naitlb misses will be treated
253	 * as an ordinary itlb miss for now.
254	 * However, note that naitlb misses
255	 * have the faulting address in the
256	 * IOR/ISR.
257	 */
258
259	.macro	naitlb_20 code
260
261	mfctl	%isr,spc
262#ifdef CONFIG_64BIT
263	b       itlb_miss_20w
264#else
265	b	itlb_miss_20
266#endif
267	mfctl 	%ior,va
268	/* FIXME: If user causes a naitlb miss, the priv level may not be in
269	 * lower bits of va, where the itlb miss handler is expecting them
270	 */
271
272	.align		32
273	.endm
274
275#ifndef CONFIG_64BIT
276	/*
277	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
278	 */
279
280	.macro	dtlb_11 code
281
282	mfctl	%isr, spc
283	b	dtlb_miss_11
284	mfctl	%ior, va
285
286	.align		32
287	.endm
288#endif
289
290	/*
291	 * dtlb miss interruption handler (parisc 2.0)
292	 */
293
294	.macro	dtlb_20 code
295
296	mfctl	%isr, spc
297#ifdef CONFIG_64BIT
298	b       dtlb_miss_20w
299#else
300	b	dtlb_miss_20
301#endif
302	mfctl	%ior, va
303
304	.align		32
305	.endm
306
307#ifndef CONFIG_64BIT
308	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
309
310	.macro	nadtlb_11 code
311
312	mfctl	%isr,spc
313	b       nadtlb_miss_11
314	mfctl	%ior,va
315
316	.align		32
317	.endm
318#endif
319
320	/* nadtlb miss interruption handler (parisc 2.0) */
321
322	.macro	nadtlb_20 code
323
324	mfctl	%isr,spc
325#ifdef CONFIG_64BIT
326	b       nadtlb_miss_20w
327#else
328	b       nadtlb_miss_20
329#endif
330	mfctl	%ior,va
331
332	.align		32
333	.endm
334
335#ifndef CONFIG_64BIT
336	/*
337	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
338	 */
339
340	.macro	dbit_11 code
341
342	mfctl	%isr,spc
343	b	dbit_trap_11
344	mfctl	%ior,va
345
346	.align		32
347	.endm
348#endif
349
350	/*
351	 * dirty bit trap interruption handler (parisc 2.0)
352	 */
353
354	.macro	dbit_20 code
355
356	mfctl	%isr,spc
357#ifdef CONFIG_64BIT
358	b       dbit_trap_20w
359#else
360	b	dbit_trap_20
361#endif
362	mfctl	%ior,va
363
364	.align		32
365	.endm
366
367	/* The following are simple 32 vs 64 bit instruction
368	 * abstractions for the macros */
369	.macro		EXTR	reg1,start,length,reg2
370#ifdef CONFIG_64BIT
371	extrd,u		\reg1,32+\start,\length,\reg2
372#else
373	extrw,u		\reg1,\start,\length,\reg2
374#endif
375	.endm
376
377	.macro		DEP	reg1,start,length,reg2
378#ifdef CONFIG_64BIT
379	depd		\reg1,32+\start,\length,\reg2
380#else
381	depw		\reg1,\start,\length,\reg2
382#endif
383	.endm
384
385	.macro		DEPI	val,start,length,reg
386#ifdef CONFIG_64BIT
387	depdi		\val,32+\start,\length,\reg
388#else
389	depwi		\val,\start,\length,\reg
390#endif
391	.endm
392
393	/* In LP64, the space contains part of the upper 32 bits of the
394	 * fault.  We have to extract this and place it in the va,
395	 * zeroing the corresponding bits in the space register */
396	.macro		space_adjust	spc,va,tmp
397#ifdef CONFIG_64BIT
398	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
399	depd		%r0,63,SPACEID_SHIFT,\spc
400	depd		\tmp,31,SPACEID_SHIFT,\va
401#endif
402	.endm
403
404	.import		swapper_pg_dir,code
405
406	/* Get the pgd.  For faults on space zero (kernel space), this
407	 * is simply swapper_pg_dir.  For user space faults, the
408	 * pgd is stored in %cr25 */
409	.macro		get_pgd		spc,reg
410	ldil		L%PA(swapper_pg_dir),\reg
411	ldo		R%PA(swapper_pg_dir)(\reg),\reg
412	or,COND(=)	%r0,\spc,%r0
413	mfctl		%cr25,\reg
414	.endm
415
416	/*
417		space_check(spc,tmp,fault)
418
419		spc - The space we saw the fault with.
420		tmp - The place to store the current space.
421		fault - Function to call on failure.
422
423		Only allow faults on different spaces from the
424		currently active one if we're the kernel
425
426	*/
427	.macro		space_check	spc,tmp,fault
428	mfsp		%sr7,\tmp
429	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
430					 * as kernel, so defeat the space
431					 * check if it is */
432	copy		\spc,\tmp
433	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
434	cmpb,COND(<>),n	\tmp,\spc,\fault
435	.endm
436
437	/* Look up a PTE in a 2-Level scheme (faulting at each
438	 * level if the entry isn't present
439	 *
440	 * NOTE: we use ldw even for LP64, since the short pointers
441	 * can address up to 1TB
442	 */
443	.macro		L2_ptep	pmd,pte,index,va,fault
444#if PT_NLEVELS == 3
445	EXTR		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
446#else
447	EXTR		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
448#endif
449	DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
450	copy		%r0,\pte
451	ldw,s		\index(\pmd),\pmd
452	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
453	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
454	copy		\pmd,%r9
455	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
456	EXTR		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
457	DEP		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
458	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd
459	LDREG		%r0(\pmd),\pte		/* pmd is now pte */
460	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
461	.endm
462
463	/* Look up PTE in a 3-Level scheme.
464	 *
465	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
466	 * first pmd adjacent to the pgd.  This means that we can
467	 * subtract a constant offset to get to it.  The pmd and pgd
468	 * sizes are arranged so that a single pmd covers 4GB (giving
469	 * a full LP64 process access to 8TB) so our lookups are
470	 * effectively L2 for the first 4GB of the kernel (i.e. for
471	 * all ILP32 processes and all the kernel for machines with
472	 * under 4GB of memory) */
473	.macro		L3_ptep pgd,pte,index,va,fault
474#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
475	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
476	copy		%r0,\pte
477	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
478	ldw,s		\index(\pgd),\pgd
479	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
480	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
481	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
482	shld		\pgd,PxD_VALUE_SHIFT,\index
483	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
484	copy		\index,\pgd
485	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
486	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
487#endif
488	L2_ptep		\pgd,\pte,\index,\va,\fault
489	.endm
490
491	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
492	 * don't needlessly dirty the cache line if it was already set */
493	.macro		update_ptep	ptep,pte,tmp,tmp1
494	ldi		_PAGE_ACCESSED,\tmp1
495	or		\tmp1,\pte,\tmp
496	and,COND(<>)	\tmp1,\pte,%r0
497	STREG		\tmp,0(\ptep)
498	.endm
499
500	/* Set the dirty bit (and accessed bit).  No need to be
501	 * clever, this is only used from the dirty fault */
502	.macro		update_dirty	ptep,pte,tmp
503	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
504	or		\tmp,\pte,\pte
505	STREG		\pte,0(\ptep)
506	.endm
507
508	/* Convert the pte and prot to tlb insertion values.  How
509	 * this happens is quite subtle, read below */
510	.macro		make_insert_tlb	spc,pte,prot
511	space_to_prot   \spc \prot        /* create prot id from space */
512	/* The following is the real subtlety.  This is depositing
513	 * T <-> _PAGE_REFTRAP
514	 * D <-> _PAGE_DIRTY
515	 * B <-> _PAGE_DMB (memory break)
516	 *
517	 * Then incredible subtlety: The access rights are
518	 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
519	 * See 3-14 of the parisc 2.0 manual
520	 *
521	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
522	 * trigger an access rights trap in user space if the user
523	 * tries to read an unreadable page */
524	depd            \pte,8,7,\prot
525
526	/* PAGE_USER indicates the page can be read with user privileges,
527	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
528	 * contains _PAGE_READ */
529	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
530	depdi		7,11,3,\prot
531	/* If we're a gateway page, drop PL2 back to zero for promotion
532	 * to kernel privilege (so we can execute the page as kernel).
533	 * Any privilege promotion page always denys read and write */
534	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
535	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
536
537	/* Enforce uncacheable pages.
538	 * This should ONLY be use for MMIO on PA 2.0 machines.
539	 * Memory/DMA is cache coherent on all PA2.0 machines we support
540	 * (that means T-class is NOT supported) and the memory controllers
541	 * on most of those machines only handles cache transactions.
542	 */
543	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
544	depi		1,12,1,\prot
545
546	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
547	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
548	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
549	.endm
550
551	/* Identical macro to make_insert_tlb above, except it
552	 * makes the tlb entry for the differently formatted pa11
553	 * insertion instructions */
554	.macro		make_insert_tlb_11	spc,pte,prot
555	zdep		\spc,30,15,\prot
556	dep		\pte,8,7,\prot
557	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
558	depi		1,12,1,\prot
559	extru,=         \pte,_PAGE_USER_BIT,1,%r0
560	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
561	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
562	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
563
564	/* Get rid of prot bits and convert to page addr for iitlba */
565
566	depi		_PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
567	extru		\pte,24,25,\pte
568	.endm
569
570	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
571	 * to extend into I/O space if the address is 0xfXXXXXXX
572	 * so we extend the f's into the top word of the pte in
573	 * this case */
574	.macro		f_extend	pte,tmp
575	extrd,s		\pte,42,4,\tmp
576	addi,<>		1,\tmp,%r0
577	extrd,s		\pte,63,25,\pte
578	.endm
579
580	/* The alias region is an 8MB aligned 16MB to do clear and
581	 * copy user pages at addresses congruent with the user
582	 * virtual address.
583	 *
584	 * To use the alias page, you set %r26 up with the to TLB
585	 * entry (identifying the physical page) and %r23 up with
586	 * the from tlb entry (or nothing if only a to entry---for
587	 * clear_user_page_asm) */
588	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
589	cmpib,COND(<>),n 0,\spc,\fault
590	ldil		L%(TMPALIAS_MAP_START),\tmp
591#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
592	/* on LP64, ldi will sign extend into the upper 32 bits,
593	 * which is behaviour we don't want */
594	depdi		0,31,32,\tmp
595#endif
596	copy		\va,\tmp1
597	DEPI		0,31,23,\tmp1
598	cmpb,COND(<>),n	\tmp,\tmp1,\fault
599	ldi		(_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
600	depd,z		\prot,8,7,\prot
601	/*
602	 * OK, it is in the temp alias region, check whether "from" or "to".
603	 * Check "subtle" note in pacache.S re: r23/r26.
604	 */
605#ifdef CONFIG_64BIT
606	extrd,u,*=	\va,41,1,%r0
607#else
608	extrw,u,=	\va,9,1,%r0
609#endif
610	or,COND(tr)	%r23,%r0,\pte
611	or		%r26,%r0,\pte
612	.endm
613
614
615	/*
616	 * Align fault_vector_20 on 4K boundary so that both
617	 * fault_vector_11 and fault_vector_20 are on the
618	 * same page. This is only necessary as long as we
619	 * write protect the kernel text, which we may stop
620	 * doing once we use large page translations to cover
621	 * the static part of the kernel address space.
622	 */
623
624	.text
625
626	.align	PAGE_SIZE
627
628ENTRY(fault_vector_20)
629	/* First vector is invalid (0) */
630	.ascii	"cows can fly"
631	.byte 0
632	.align 32
633
634	hpmc		 1
635	def		 2
636	def		 3
637	extint		 4
638	def		 5
639	itlb_20		 6
640	def		 7
641	def		 8
642	def              9
643	def		10
644	def		11
645	def		12
646	def		13
647	def		14
648	dtlb_20		15
649#if 0
650	naitlb_20	16
651#else
652	def             16
653#endif
654	nadtlb_20	17
655	def		18
656	def		19
657	dbit_20		20
658	def		21
659	def		22
660	def		23
661	def		24
662	def		25
663	def		26
664	def		27
665	def		28
666	def		29
667	def		30
668	def		31
669END(fault_vector_20)
670
671#ifndef CONFIG_64BIT
672
673	.align 2048
674
675ENTRY(fault_vector_11)
676	/* First vector is invalid (0) */
677	.ascii	"cows can fly"
678	.byte 0
679	.align 32
680
681	hpmc		 1
682	def		 2
683	def		 3
684	extint		 4
685	def		 5
686	itlb_11		 6
687	def		 7
688	def		 8
689	def              9
690	def		10
691	def		11
692	def		12
693	def		13
694	def		14
695	dtlb_11		15
696#if 0
697	naitlb_11	16
698#else
699	def             16
700#endif
701	nadtlb_11	17
702	def		18
703	def		19
704	dbit_11		20
705	def		21
706	def		22
707	def		23
708	def		24
709	def		25
710	def		26
711	def		27
712	def		28
713	def		29
714	def		30
715	def		31
716END(fault_vector_11)
717
718#endif
719
720	.import		handle_interruption,code
721	.import		do_cpu_irq_mask,code
722
723	/*
724	 * r26 = function to be called
725	 * r25 = argument to pass in
726	 * r24 = flags for do_fork()
727	 *
728	 * Kernel threads don't ever return, so they don't need
729	 * a true register context. We just save away the arguments
730	 * for copy_thread/ret_ to properly set up the child.
731	 */
732
733#define CLONE_VM 0x100	/* Must agree with <linux/sched.h> */
734#define CLONE_UNTRACED 0x00800000
735
736	.import do_fork
737ENTRY(__kernel_thread)
738	STREG	%r2, -RP_OFFSET(%r30)
739
740	copy	%r30, %r1
741	ldo	PT_SZ_ALGN(%r30),%r30
742#ifdef CONFIG_64BIT
743	/* Yo, function pointers in wide mode are little structs... -PB */
744	ldd	24(%r26), %r2
745	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */
746	ldd	16(%r26), %r26
747
748	STREG	%r22, PT_GR22(%r1)	/* save r22 (arg5) */
749	copy	%r0, %r22		/* user_tid */
750#endif
751	STREG	%r26, PT_GR26(%r1)  /* Store function & argument for child */
752	STREG	%r25, PT_GR25(%r1)
753	ldil	L%CLONE_UNTRACED, %r26
754	ldo	CLONE_VM(%r26), %r26   /* Force CLONE_VM since only init_mm */
755	or	%r26, %r24, %r26      /* will have kernel mappings.	 */
756	ldi	1, %r25			/* stack_start, signals kernel thread */
757	stw	%r0, -52(%r30)	     	/* user_tid */
758#ifdef CONFIG_64BIT
759	ldo	-16(%r30),%r29		/* Reference param save area */
760#endif
761	BL	do_fork, %r2
762	copy	%r1, %r24		/* pt_regs */
763
764	/* Parent Returns here */
765
766	LDREG	-PT_SZ_ALGN-RP_OFFSET(%r30), %r2
767	ldo	-PT_SZ_ALGN(%r30), %r30
768	bv	%r0(%r2)
769	nop
770ENDPROC(__kernel_thread)
771
772	/*
773	 * Child Returns here
774	 *
775	 * copy_thread moved args from temp save area set up above
776	 * into task save area.
777	 */
778
779ENTRY(ret_from_kernel_thread)
780
781	/* Call schedule_tail first though */
782	BL	schedule_tail, %r2
783	nop
784
785	LDREG	TI_TASK-THREAD_SZ_ALGN(%r30), %r1
786	LDREG	TASK_PT_GR25(%r1), %r26
787#ifdef CONFIG_64BIT
788	LDREG	TASK_PT_GR27(%r1), %r27
789	LDREG	TASK_PT_GR22(%r1), %r22
790#endif
791	LDREG	TASK_PT_GR26(%r1), %r1
792	ble	0(%sr7, %r1)
793	copy	%r31, %r2
794
795#ifdef CONFIG_64BIT
796	ldo	-16(%r30),%r29		/* Reference param save area */
797	loadgp				/* Thread could have been in a module */
798#endif
799#ifndef CONFIG_64BIT
800	b	sys_exit
801#else
802	load32	sys_exit, %r1
803	bv	%r0(%r1)
804#endif
805	ldi	0, %r26
806ENDPROC(ret_from_kernel_thread)
807
808	.import	sys_execve, code
809ENTRY(__execve)
810	copy	%r2, %r15
811	copy	%r30, %r16
812	ldo	PT_SZ_ALGN(%r30), %r30
813	STREG	%r26, PT_GR26(%r16)
814	STREG	%r25, PT_GR25(%r16)
815	STREG	%r24, PT_GR24(%r16)
816#ifdef CONFIG_64BIT
817	ldo	-16(%r30),%r29		/* Reference param save area */
818#endif
819	BL	sys_execve, %r2
820	copy	%r16, %r26
821
822	cmpib,=,n 0,%r28,intr_return    /* forward */
823
824	/* yes, this will trap and die. */
825	copy	%r15, %r2
826	copy	%r16, %r30
827	bv	%r0(%r2)
828	nop
829ENDPROC(__execve)
830
831
832	/*
833	 * struct task_struct *_switch_to(struct task_struct *prev,
834	 *	struct task_struct *next)
835	 *
836	 * switch kernel stacks and return prev */
837ENTRY(_switch_to)
838	STREG	 %r2, -RP_OFFSET(%r30)
839
840	callee_save_float
841	callee_save
842
843	load32	_switch_to_ret, %r2
844
845	STREG	%r2, TASK_PT_KPC(%r26)
846	LDREG	TASK_PT_KPC(%r25), %r2
847
848	STREG	%r30, TASK_PT_KSP(%r26)
849	LDREG	TASK_PT_KSP(%r25), %r30
850	LDREG	TASK_THREAD_INFO(%r25), %r25
851	bv	%r0(%r2)
852	mtctl   %r25,%cr30
853
854_switch_to_ret:
855	mtctl	%r0, %cr0		/* Needed for single stepping */
856	callee_rest
857	callee_rest_float
858
859	LDREG	-RP_OFFSET(%r30), %r2
860	bv	%r0(%r2)
861	copy	%r26, %r28
862ENDPROC(_switch_to)
863
864	/*
865	 * Common rfi return path for interruptions, kernel execve, and
866	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
867	 * return via this path if the signal was received when the process
868	 * was running; if the process was blocked on a syscall then the
869	 * normal syscall_exit path is used.  All syscalls for traced
870	 * proceses exit via intr_restore.
871	 *
872	 * XXX If any syscalls that change a processes space id ever exit
873	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
874	 * adjust IASQ[0..1].
875	 *
876	 */
877
878	.align	PAGE_SIZE
879
880ENTRY(syscall_exit_rfi)
881	mfctl   %cr30,%r16
882	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
883	ldo	TASK_REGS(%r16),%r16
884	/* Force iaoq to userspace, as the user has had access to our current
885	 * context via sigcontext. Also Filter the PSW for the same reason.
886	 */
887	LDREG	PT_IAOQ0(%r16),%r19
888	depi	3,31,2,%r19
889	STREG	%r19,PT_IAOQ0(%r16)
890	LDREG	PT_IAOQ1(%r16),%r19
891	depi	3,31,2,%r19
892	STREG	%r19,PT_IAOQ1(%r16)
893	LDREG   PT_PSW(%r16),%r19
894	load32	USER_PSW_MASK,%r1
895#ifdef CONFIG_64BIT
896	load32	USER_PSW_HI_MASK,%r20
897	depd    %r20,31,32,%r1
898#endif
899	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
900	load32	USER_PSW,%r1
901	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
902	STREG   %r19,PT_PSW(%r16)
903
904	/*
905	 * If we aren't being traced, we never saved space registers
906	 * (we don't store them in the sigcontext), so set them
907	 * to "proper" values now (otherwise we'll wind up restoring
908	 * whatever was last stored in the task structure, which might
909	 * be inconsistent if an interrupt occured while on the gateway
910	 * page). Note that we may be "trashing" values the user put in
911	 * them, but we don't support the user changing them.
912	 */
913
914	STREG   %r0,PT_SR2(%r16)
915	mfsp    %sr3,%r19
916	STREG   %r19,PT_SR0(%r16)
917	STREG   %r19,PT_SR1(%r16)
918	STREG   %r19,PT_SR3(%r16)
919	STREG   %r19,PT_SR4(%r16)
920	STREG   %r19,PT_SR5(%r16)
921	STREG   %r19,PT_SR6(%r16)
922	STREG   %r19,PT_SR7(%r16)
923
924intr_return:
925	/* NOTE: Need to enable interrupts incase we schedule. */
926	ssm     PSW_SM_I, %r0
927
928intr_check_resched:
929
930	/* check for reschedule */
931	mfctl   %cr30,%r1
932	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
933	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
934
935	.import do_notify_resume,code
936intr_check_sig:
937	/* As above */
938	mfctl   %cr30,%r1
939	LDREG	TI_FLAGS(%r1),%r19
940	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
941	and,COND(<>)	%r19, %r20, %r0
942	b,n	intr_restore	/* skip past if we've nothing to do */
943
944	/* This check is critical to having LWS
945	 * working. The IASQ is zero on the gateway
946	 * page and we cannot deliver any signals until
947	 * we get off the gateway page.
948	 *
949	 * Only do signals if we are returning to user space
950	 */
951	LDREG	PT_IASQ0(%r16), %r20
952	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
953	LDREG	PT_IASQ1(%r16), %r20
954	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
955
956	copy	%r0, %r25			/* long in_syscall = 0 */
957#ifdef CONFIG_64BIT
958	ldo	-16(%r30),%r29			/* Reference param save area */
959#endif
960
961	BL	do_notify_resume,%r2
962	copy	%r16, %r26			/* struct pt_regs *regs */
963
964	b,n	intr_check_sig
965
966intr_restore:
967	copy            %r16,%r29
968	ldo             PT_FR31(%r29),%r1
969	rest_fp         %r1
970	rest_general    %r29
971
972	/* inverse of virt_map */
973	pcxt_ssm_bug
974	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
975	tophys_r1       %r29
976
977	/* Restore space id's and special cr's from PT_REGS
978	 * structure pointed to by r29
979	 */
980	rest_specials	%r29
981
982	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
983	 * It also restores r1 and r30.
984	 */
985	rest_stack
986
987	rfi
988	nop
989	nop
990	nop
991	nop
992	nop
993	nop
994	nop
995	nop
996
997#ifndef CONFIG_PREEMPT
998# define intr_do_preempt	intr_restore
999#endif /* !CONFIG_PREEMPT */
1000
1001	.import schedule,code
1002intr_do_resched:
1003	/* Only call schedule on return to userspace. If we're returning
1004	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
1005	 * we jump back to intr_restore.
1006	 */
1007	LDREG	PT_IASQ0(%r16), %r20
1008	cmpib,COND(=)	0, %r20, intr_do_preempt
1009	nop
1010	LDREG	PT_IASQ1(%r16), %r20
1011	cmpib,COND(=)	0, %r20, intr_do_preempt
1012	nop
1013
1014#ifdef CONFIG_64BIT
1015	ldo	-16(%r30),%r29		/* Reference param save area */
1016#endif
1017
1018	ldil	L%intr_check_sig, %r2
1019#ifndef CONFIG_64BIT
1020	b	schedule
1021#else
1022	load32	schedule, %r20
1023	bv	%r0(%r20)
1024#endif
1025	ldo	R%intr_check_sig(%r2), %r2
1026
1027	/* preempt the current task on returning to kernel
1028	 * mode from an interrupt, iff need_resched is set,
1029	 * and preempt_count is 0. otherwise, we continue on
1030	 * our merry way back to the current running task.
1031	 */
1032#ifdef CONFIG_PREEMPT
1033	.import preempt_schedule_irq,code
1034intr_do_preempt:
1035	rsm	PSW_SM_I, %r0		/* disable interrupts */
1036
1037	/* current_thread_info()->preempt_count */
1038	mfctl	%cr30, %r1
1039	LDREG	TI_PRE_COUNT(%r1), %r19
1040	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
1041	nop				/* prev insn branched backwards */
1042
1043	/* check if we interrupted a critical path */
1044	LDREG	PT_PSW(%r16), %r20
1045	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
1046	nop
1047
1048	BL	preempt_schedule_irq, %r2
1049	nop
1050
1051	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
1052#endif /* CONFIG_PREEMPT */
1053
1054	/*
1055	 * External interrupts.
1056	 */
1057
1058intr_extint:
1059	cmpib,COND(=),n 0,%r16,1f
1060
1061	get_stack_use_cr30
1062	b,n 2f
1063
10641:
1065	get_stack_use_r30
10662:
1067	save_specials	%r29
1068	virt_map
1069	save_general	%r29
1070
1071	ldo	PT_FR0(%r29), %r24
1072	save_fp	%r24
1073
1074	loadgp
1075
1076	copy	%r29, %r26	/* arg0 is pt_regs */
1077	copy	%r29, %r16	/* save pt_regs */
1078
1079	ldil	L%intr_return, %r2
1080
1081#ifdef CONFIG_64BIT
1082	ldo	-16(%r30),%r29	/* Reference param save area */
1083#endif
1084
1085	b	do_cpu_irq_mask
1086	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1087ENDPROC(syscall_exit_rfi)
1088
1089
1090	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1091
1092ENTRY(intr_save)		/* for os_hpmc */
1093	mfsp    %sr7,%r16
1094	cmpib,COND(=),n 0,%r16,1f
1095	get_stack_use_cr30
1096	b	2f
1097	copy    %r8,%r26
1098
10991:
1100	get_stack_use_r30
1101	copy    %r8,%r26
1102
11032:
1104	save_specials	%r29
1105
1106	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1107
1108	/*
1109	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1110	 *           traps.c.
1111	 *        2) Once we start executing code above 4 Gb, we need
1112	 *           to adjust iasq/iaoq here in the same way we
1113	 *           adjust isr/ior below.
1114	 */
1115
1116	cmpib,COND(=),n        6,%r26,skip_save_ior
1117
1118
1119	mfctl           %cr20, %r16 /* isr */
1120	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1121	mfctl           %cr21, %r17 /* ior */
1122
1123
1124#ifdef CONFIG_64BIT
1125	/*
1126	 * If the interrupted code was running with W bit off (32 bit),
1127	 * clear the b bits (bits 0 & 1) in the ior.
1128	 * save_specials left ipsw value in r8 for us to test.
1129	 */
1130	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1131	depdi           0,1,2,%r17
1132
1133	/*
1134	 * FIXME: This code has hardwired assumptions about the split
1135	 *        between space bits and offset bits. This will change
1136	 *        when we allow alternate page sizes.
1137	 */
1138
1139	/* adjust isr/ior. */
1140	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1141	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1142	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1143#endif
1144	STREG           %r16, PT_ISR(%r29)
1145	STREG           %r17, PT_IOR(%r29)
1146
1147
1148skip_save_ior:
1149	virt_map
1150	save_general	%r29
1151
1152	ldo		PT_FR0(%r29), %r25
1153	save_fp		%r25
1154
1155	loadgp
1156
1157	copy		%r29, %r25	/* arg1 is pt_regs */
1158#ifdef CONFIG_64BIT
1159	ldo		-16(%r30),%r29	/* Reference param save area */
1160#endif
1161
1162	ldil		L%intr_check_sig, %r2
1163	copy		%r25, %r16	/* save pt_regs */
1164
1165	b		handle_interruption
1166	ldo		R%intr_check_sig(%r2), %r2
1167ENDPROC(intr_save)
1168
1169
1170	/*
1171	 * Note for all tlb miss handlers:
1172	 *
1173	 * cr24 contains a pointer to the kernel address space
1174	 * page directory.
1175	 *
1176	 * cr25 contains a pointer to the current user address
1177	 * space page directory.
1178	 *
1179	 * sr3 will contain the space id of the user address space
1180	 * of the current running thread while that thread is
1181	 * running in the kernel.
1182	 */
1183
1184	/*
1185	 * register number allocations.  Note that these are all
1186	 * in the shadowed registers
1187	 */
1188
1189	t0 = r1		/* temporary register 0 */
1190	va = r8		/* virtual address for which the trap occured */
1191	t1 = r9		/* temporary register 1 */
1192	pte  = r16	/* pte/phys page # */
1193	prot = r17	/* prot bits */
1194	spc  = r24	/* space for which the trap occured */
1195	ptp = r25	/* page directory/page table pointer */
1196
1197#ifdef CONFIG_64BIT
1198
1199dtlb_miss_20w:
1200	space_adjust	spc,va,t0
1201	get_pgd		spc,ptp
1202	space_check	spc,t0,dtlb_fault
1203
1204	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1205
1206	update_ptep	ptp,pte,t0,t1
1207
1208	make_insert_tlb	spc,pte,prot
1209
1210	idtlbt          pte,prot
1211
1212	rfir
1213	nop
1214
1215dtlb_check_alias_20w:
1216	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1217
1218	idtlbt          pte,prot
1219
1220	rfir
1221	nop
1222
1223nadtlb_miss_20w:
1224	space_adjust	spc,va,t0
1225	get_pgd		spc,ptp
1226	space_check	spc,t0,nadtlb_fault
1227
1228	L3_ptep		ptp,pte,t0,va,nadtlb_check_flush_20w
1229
1230	update_ptep	ptp,pte,t0,t1
1231
1232	make_insert_tlb	spc,pte,prot
1233
1234	idtlbt          pte,prot
1235
1236	rfir
1237	nop
1238
1239nadtlb_check_flush_20w:
1240	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1241
1242	/* Insert a "flush only" translation */
1243
1244	depdi,z         7,7,3,prot
1245	depdi           1,10,1,prot
1246
1247	/* Get rid of prot bits and convert to page addr for idtlbt */
1248
1249	depdi		0,63,12,pte
1250	extrd,u         pte,56,52,pte
1251	idtlbt          pte,prot
1252
1253	rfir
1254	nop
1255
1256#else
1257
1258dtlb_miss_11:
1259	get_pgd		spc,ptp
1260
1261	space_check	spc,t0,dtlb_fault
1262
1263	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1264
1265	update_ptep	ptp,pte,t0,t1
1266
1267	make_insert_tlb_11	spc,pte,prot
1268
1269	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1270	mtsp		spc,%sr1
1271
1272	idtlba		pte,(%sr1,va)
1273	idtlbp		prot,(%sr1,va)
1274
1275	mtsp		t0, %sr1	/* Restore sr1 */
1276
1277	rfir
1278	nop
1279
1280dtlb_check_alias_11:
1281
1282	/* Check to see if fault is in the temporary alias region */
1283
1284	cmpib,<>,n      0,spc,dtlb_fault /* forward */
1285	ldil            L%(TMPALIAS_MAP_START),t0
1286	copy            va,t1
1287	depwi           0,31,23,t1
1288	cmpb,<>,n       t0,t1,dtlb_fault /* forward */
1289	ldi             (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1290	depw,z          prot,8,7,prot
1291
1292	/*
1293	 * OK, it is in the temp alias region, check whether "from" or "to".
1294	 * Check "subtle" note in pacache.S re: r23/r26.
1295	 */
1296
1297	extrw,u,=       va,9,1,r0
1298	or,tr           %r23,%r0,pte    /* If "from" use "from" page */
1299	or              %r26,%r0,pte    /* else "to", use "to" page  */
1300
1301	idtlba          pte,(va)
1302	idtlbp          prot,(va)
1303
1304	rfir
1305	nop
1306
1307nadtlb_miss_11:
1308	get_pgd		spc,ptp
1309
1310	space_check	spc,t0,nadtlb_fault
1311
1312	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_11
1313
1314	update_ptep	ptp,pte,t0,t1
1315
1316	make_insert_tlb_11	spc,pte,prot
1317
1318
1319	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1320	mtsp		spc,%sr1
1321
1322	idtlba		pte,(%sr1,va)
1323	idtlbp		prot,(%sr1,va)
1324
1325	mtsp		t0, %sr1	/* Restore sr1 */
1326
1327	rfir
1328	nop
1329
1330nadtlb_check_flush_11:
1331	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1332
1333	/* Insert a "flush only" translation */
1334
1335	zdepi           7,7,3,prot
1336	depi            1,10,1,prot
1337
1338	/* Get rid of prot bits and convert to page addr for idtlba */
1339
1340	depi		0,31,12,pte
1341	extru		pte,24,25,pte
1342
1343	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1344	mtsp		spc,%sr1
1345
1346	idtlba		pte,(%sr1,va)
1347	idtlbp		prot,(%sr1,va)
1348
1349	mtsp		t0, %sr1	/* Restore sr1 */
1350
1351	rfir
1352	nop
1353
1354dtlb_miss_20:
1355	space_adjust	spc,va,t0
1356	get_pgd		spc,ptp
1357	space_check	spc,t0,dtlb_fault
1358
1359	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1360
1361	update_ptep	ptp,pte,t0,t1
1362
1363	make_insert_tlb	spc,pte,prot
1364
1365	f_extend	pte,t0
1366
1367	idtlbt          pte,prot
1368
1369	rfir
1370	nop
1371
1372dtlb_check_alias_20:
1373	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1374
1375	idtlbt          pte,prot
1376
1377	rfir
1378	nop
1379
1380nadtlb_miss_20:
1381	get_pgd		spc,ptp
1382
1383	space_check	spc,t0,nadtlb_fault
1384
1385	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_20
1386
1387	update_ptep	ptp,pte,t0,t1
1388
1389	make_insert_tlb	spc,pte,prot
1390
1391	f_extend	pte,t0
1392
1393        idtlbt          pte,prot
1394
1395	rfir
1396	nop
1397
1398nadtlb_check_flush_20:
1399	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1400
1401	/* Insert a "flush only" translation */
1402
1403	depdi,z         7,7,3,prot
1404	depdi           1,10,1,prot
1405
1406	/* Get rid of prot bits and convert to page addr for idtlbt */
1407
1408	depdi		0,63,12,pte
1409	extrd,u         pte,56,32,pte
1410	idtlbt          pte,prot
1411
1412	rfir
1413	nop
1414#endif
1415
1416nadtlb_emulate:
1417
1418	/*
1419	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1420	 * probei instructions. We don't want to fault for these
1421	 * instructions (not only does it not make sense, it can cause
1422	 * deadlocks, since some flushes are done with the mmap
1423	 * semaphore held). If the translation doesn't exist, we can't
1424	 * insert a translation, so have to emulate the side effects
1425	 * of the instruction. Since we don't insert a translation
1426	 * we can get a lot of faults during a flush loop, so it makes
1427	 * sense to try to do it here with minimum overhead. We only
1428	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1429	 * and index registers are not shadowed. We defer everything
1430	 * else to the "slow" path.
1431	 */
1432
1433	mfctl           %cr19,%r9 /* Get iir */
1434
1435	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1436	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1437
1438	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1439	ldi             0x280,%r16
1440	and             %r9,%r16,%r17
1441	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1442	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1443	BL		get_register,%r25
1444	extrw,u         %r9,15,5,%r8           /* Get index register # */
1445	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1446	copy            %r1,%r24
1447	BL		get_register,%r25
1448	extrw,u         %r9,10,5,%r8           /* Get base register # */
1449	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1450	BL		set_register,%r25
1451	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1452
1453nadtlb_nullify:
1454	mfctl           %ipsw,%r8
1455	ldil            L%PSW_N,%r9
1456	or              %r8,%r9,%r8            /* Set PSW_N */
1457	mtctl           %r8,%ipsw
1458
1459	rfir
1460	nop
1461
1462	/*
1463		When there is no translation for the probe address then we
1464		must nullify the insn and return zero in the target regsiter.
1465		This will indicate to the calling code that it does not have
1466		write/read privileges to this address.
1467
1468		This should technically work for prober and probew in PA 1.1,
1469		and also probe,r and probe,w in PA 2.0
1470
1471		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1472		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1473
1474	*/
1475nadtlb_probe_check:
1476	ldi             0x80,%r16
1477	and             %r9,%r16,%r17
1478	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1479	BL              get_register,%r25      /* Find the target register */
1480	extrw,u         %r9,31,5,%r8           /* Get target register */
1481	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1482	BL		set_register,%r25
1483	copy            %r0,%r1                /* Write zero to target register */
1484	b nadtlb_nullify                       /* Nullify return insn */
1485	nop
1486
1487
1488#ifdef CONFIG_64BIT
1489itlb_miss_20w:
1490
1491	/*
1492	 * I miss is a little different, since we allow users to fault
1493	 * on the gateway page which is in the kernel address space.
1494	 */
1495
1496	space_adjust	spc,va,t0
1497	get_pgd		spc,ptp
1498	space_check	spc,t0,itlb_fault
1499
1500	L3_ptep		ptp,pte,t0,va,itlb_fault
1501
1502	update_ptep	ptp,pte,t0,t1
1503
1504	make_insert_tlb	spc,pte,prot
1505
1506	iitlbt          pte,prot
1507
1508	rfir
1509	nop
1510
1511#else
1512
1513itlb_miss_11:
1514	get_pgd		spc,ptp
1515
1516	space_check	spc,t0,itlb_fault
1517
1518	L2_ptep		ptp,pte,t0,va,itlb_fault
1519
1520	update_ptep	ptp,pte,t0,t1
1521
1522	make_insert_tlb_11	spc,pte,prot
1523
1524	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1525	mtsp		spc,%sr1
1526
1527	iitlba		pte,(%sr1,va)
1528	iitlbp		prot,(%sr1,va)
1529
1530	mtsp		t0, %sr1	/* Restore sr1 */
1531
1532	rfir
1533	nop
1534
1535itlb_miss_20:
1536	get_pgd		spc,ptp
1537
1538	space_check	spc,t0,itlb_fault
1539
1540	L2_ptep		ptp,pte,t0,va,itlb_fault
1541
1542	update_ptep	ptp,pte,t0,t1
1543
1544	make_insert_tlb	spc,pte,prot
1545
1546	f_extend	pte,t0
1547
1548	iitlbt          pte,prot
1549
1550	rfir
1551	nop
1552
1553#endif
1554
1555#ifdef CONFIG_64BIT
1556
1557dbit_trap_20w:
1558	space_adjust	spc,va,t0
1559	get_pgd		spc,ptp
1560	space_check	spc,t0,dbit_fault
1561
1562	L3_ptep		ptp,pte,t0,va,dbit_fault
1563
1564#ifdef CONFIG_SMP
1565	cmpib,COND(=),n        0,spc,dbit_nolock_20w
1566	load32		PA(pa_dbit_lock),t0
1567
1568dbit_spin_20w:
1569	LDCW		0(t0),t1
1570	cmpib,COND(=)         0,t1,dbit_spin_20w
1571	nop
1572
1573dbit_nolock_20w:
1574#endif
1575	update_dirty	ptp,pte,t1
1576
1577	make_insert_tlb	spc,pte,prot
1578
1579	idtlbt          pte,prot
1580#ifdef CONFIG_SMP
1581	cmpib,COND(=),n        0,spc,dbit_nounlock_20w
1582	ldi             1,t1
1583	stw             t1,0(t0)
1584
1585dbit_nounlock_20w:
1586#endif
1587
1588	rfir
1589	nop
1590#else
1591
1592dbit_trap_11:
1593
1594	get_pgd		spc,ptp
1595
1596	space_check	spc,t0,dbit_fault
1597
1598	L2_ptep		ptp,pte,t0,va,dbit_fault
1599
1600#ifdef CONFIG_SMP
1601	cmpib,COND(=),n        0,spc,dbit_nolock_11
1602	load32		PA(pa_dbit_lock),t0
1603
1604dbit_spin_11:
1605	LDCW		0(t0),t1
1606	cmpib,=         0,t1,dbit_spin_11
1607	nop
1608
1609dbit_nolock_11:
1610#endif
1611	update_dirty	ptp,pte,t1
1612
1613	make_insert_tlb_11	spc,pte,prot
1614
1615	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1616	mtsp		spc,%sr1
1617
1618	idtlba		pte,(%sr1,va)
1619	idtlbp		prot,(%sr1,va)
1620
1621	mtsp            t1, %sr1     /* Restore sr1 */
1622#ifdef CONFIG_SMP
1623	cmpib,COND(=),n        0,spc,dbit_nounlock_11
1624	ldi             1,t1
1625	stw             t1,0(t0)
1626
1627dbit_nounlock_11:
1628#endif
1629
1630	rfir
1631	nop
1632
1633dbit_trap_20:
1634	get_pgd		spc,ptp
1635
1636	space_check	spc,t0,dbit_fault
1637
1638	L2_ptep		ptp,pte,t0,va,dbit_fault
1639
1640#ifdef CONFIG_SMP
1641	cmpib,COND(=),n        0,spc,dbit_nolock_20
1642	load32		PA(pa_dbit_lock),t0
1643
1644dbit_spin_20:
1645	LDCW		0(t0),t1
1646	cmpib,=         0,t1,dbit_spin_20
1647	nop
1648
1649dbit_nolock_20:
1650#endif
1651	update_dirty	ptp,pte,t1
1652
1653	make_insert_tlb	spc,pte,prot
1654
1655	f_extend	pte,t1
1656
1657        idtlbt          pte,prot
1658
1659#ifdef CONFIG_SMP
1660	cmpib,COND(=),n        0,spc,dbit_nounlock_20
1661	ldi             1,t1
1662	stw             t1,0(t0)
1663
1664dbit_nounlock_20:
1665#endif
1666
1667	rfir
1668	nop
1669#endif
1670
1671	.import handle_interruption,code
1672
1673kernel_bad_space:
1674	b               intr_save
1675	ldi             31,%r8  /* Use an unused code */
1676
1677dbit_fault:
1678	b               intr_save
1679	ldi             20,%r8
1680
1681itlb_fault:
1682	b               intr_save
1683	ldi             6,%r8
1684
1685nadtlb_fault:
1686	b               intr_save
1687	ldi             17,%r8
1688
1689dtlb_fault:
1690	b               intr_save
1691	ldi             15,%r8
1692
1693	/* Register saving semantics for system calls:
1694
1695	   %r1		   clobbered by system call macro in userspace
1696	   %r2		   saved in PT_REGS by gateway page
1697	   %r3  - %r18	   preserved by C code (saved by signal code)
1698	   %r19 - %r20	   saved in PT_REGS by gateway page
1699	   %r21 - %r22	   non-standard syscall args
1700			   stored in kernel stack by gateway page
1701	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1702	   %r27 - %r30	   saved in PT_REGS by gateway page
1703	   %r31		   syscall return pointer
1704	 */
1705
1706	/* Floating point registers (FIXME: what do we do with these?)
1707
1708	   %fr0  - %fr3	   status/exception, not preserved
1709	   %fr4  - %fr7	   arguments
1710	   %fr8	 - %fr11   not preserved by C code
1711	   %fr12 - %fr21   preserved by C code
1712	   %fr22 - %fr31   not preserved by C code
1713	 */
1714
1715	.macro	reg_save regs
1716	STREG	%r3, PT_GR3(\regs)
1717	STREG	%r4, PT_GR4(\regs)
1718	STREG	%r5, PT_GR5(\regs)
1719	STREG	%r6, PT_GR6(\regs)
1720	STREG	%r7, PT_GR7(\regs)
1721	STREG	%r8, PT_GR8(\regs)
1722	STREG	%r9, PT_GR9(\regs)
1723	STREG   %r10,PT_GR10(\regs)
1724	STREG   %r11,PT_GR11(\regs)
1725	STREG   %r12,PT_GR12(\regs)
1726	STREG   %r13,PT_GR13(\regs)
1727	STREG   %r14,PT_GR14(\regs)
1728	STREG   %r15,PT_GR15(\regs)
1729	STREG   %r16,PT_GR16(\regs)
1730	STREG   %r17,PT_GR17(\regs)
1731	STREG   %r18,PT_GR18(\regs)
1732	.endm
1733
1734	.macro	reg_restore regs
1735	LDREG	PT_GR3(\regs), %r3
1736	LDREG	PT_GR4(\regs), %r4
1737	LDREG	PT_GR5(\regs), %r5
1738	LDREG	PT_GR6(\regs), %r6
1739	LDREG	PT_GR7(\regs), %r7
1740	LDREG	PT_GR8(\regs), %r8
1741	LDREG	PT_GR9(\regs), %r9
1742	LDREG   PT_GR10(\regs),%r10
1743	LDREG   PT_GR11(\regs),%r11
1744	LDREG   PT_GR12(\regs),%r12
1745	LDREG   PT_GR13(\regs),%r13
1746	LDREG   PT_GR14(\regs),%r14
1747	LDREG   PT_GR15(\regs),%r15
1748	LDREG   PT_GR16(\regs),%r16
1749	LDREG   PT_GR17(\regs),%r17
1750	LDREG   PT_GR18(\regs),%r18
1751	.endm
1752
1753ENTRY(sys_fork_wrapper)
1754	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1755	ldo	TASK_REGS(%r1),%r1
1756	reg_save %r1
1757	mfctl	%cr27, %r3
1758	STREG	%r3, PT_CR27(%r1)
1759
1760	STREG	%r2,-RP_OFFSET(%r30)
1761	ldo	FRAME_SIZE(%r30),%r30
1762#ifdef CONFIG_64BIT
1763	ldo	-16(%r30),%r29		/* Reference param save area */
1764#endif
1765
1766	/* These are call-clobbered registers and therefore
1767	   also syscall-clobbered (we hope). */
1768	STREG	%r2,PT_GR19(%r1)	/* save for child */
1769	STREG	%r30,PT_GR21(%r1)
1770
1771	LDREG	PT_GR30(%r1),%r25
1772	copy	%r1,%r24
1773	BL	sys_clone,%r2
1774	ldi	SIGCHLD,%r26
1775
1776	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1777wrapper_exit:
1778	ldo	-FRAME_SIZE(%r30),%r30		/* get the stackframe */
1779	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1780	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1781
1782	LDREG	PT_CR27(%r1), %r3
1783	mtctl	%r3, %cr27
1784	reg_restore %r1
1785
1786	/* strace expects syscall # to be preserved in r20 */
1787	ldi	__NR_fork,%r20
1788	bv %r0(%r2)
1789	STREG	%r20,PT_GR20(%r1)
1790ENDPROC(sys_fork_wrapper)
1791
1792	/* Set the return value for the child */
1793ENTRY(child_return)
1794	BL	schedule_tail, %r2
1795	nop
1796
1797	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1798	LDREG	TASK_PT_GR19(%r1),%r2
1799	b	wrapper_exit
1800	copy	%r0,%r28
1801ENDPROC(child_return)
1802
1803
1804ENTRY(sys_clone_wrapper)
1805	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1806	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1807	reg_save %r1
1808	mfctl	%cr27, %r3
1809	STREG	%r3, PT_CR27(%r1)
1810
1811	STREG	%r2,-RP_OFFSET(%r30)
1812	ldo	FRAME_SIZE(%r30),%r30
1813#ifdef CONFIG_64BIT
1814	ldo	-16(%r30),%r29		/* Reference param save area */
1815#endif
1816
1817	/* WARNING - Clobbers r19 and r21, userspace must save these! */
1818	STREG	%r2,PT_GR19(%r1)	/* save for child */
1819	STREG	%r30,PT_GR21(%r1)
1820	BL	sys_clone,%r2
1821	copy	%r1,%r24
1822
1823	b	wrapper_exit
1824	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1825ENDPROC(sys_clone_wrapper)
1826
1827
1828ENTRY(sys_vfork_wrapper)
1829	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1830	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1831	reg_save %r1
1832	mfctl	%cr27, %r3
1833	STREG	%r3, PT_CR27(%r1)
1834
1835	STREG	%r2,-RP_OFFSET(%r30)
1836	ldo	FRAME_SIZE(%r30),%r30
1837#ifdef CONFIG_64BIT
1838	ldo	-16(%r30),%r29		/* Reference param save area */
1839#endif
1840
1841	STREG	%r2,PT_GR19(%r1)	/* save for child */
1842	STREG	%r30,PT_GR21(%r1)
1843
1844	BL	sys_vfork,%r2
1845	copy	%r1,%r26
1846
1847	b	wrapper_exit
1848	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1849ENDPROC(sys_vfork_wrapper)
1850
1851
1852	.macro  execve_wrapper execve
1853	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1854	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1855
1856	/*
1857	 * Do we need to save/restore r3-r18 here?
1858	 * I don't think so. why would new thread need old
1859	 * threads registers?
1860	 */
1861
1862	/* %arg0 - %arg3 are already saved for us. */
1863
1864	STREG %r2,-RP_OFFSET(%r30)
1865	ldo FRAME_SIZE(%r30),%r30
1866#ifdef CONFIG_64BIT
1867	ldo	-16(%r30),%r29		/* Reference param save area */
1868#endif
1869	BL \execve,%r2
1870	copy %r1,%arg0
1871
1872	ldo -FRAME_SIZE(%r30),%r30
1873	LDREG -RP_OFFSET(%r30),%r2
1874
1875	/* If exec succeeded we need to load the args */
1876
1877	ldo -1024(%r0),%r1
1878	cmpb,>>= %r28,%r1,error_\execve
1879	copy %r2,%r19
1880
1881error_\execve:
1882	bv %r0(%r19)
1883	nop
1884	.endm
1885
1886	.import sys_execve
1887ENTRY(sys_execve_wrapper)
1888	execve_wrapper sys_execve
1889ENDPROC(sys_execve_wrapper)
1890
1891#ifdef CONFIG_64BIT
1892	.import sys32_execve
1893ENTRY(sys32_execve_wrapper)
1894	execve_wrapper sys32_execve
1895ENDPROC(sys32_execve_wrapper)
1896#endif
1897
1898ENTRY(sys_rt_sigreturn_wrapper)
1899	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1900	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1901	/* Don't save regs, we are going to restore them from sigcontext. */
1902	STREG	%r2, -RP_OFFSET(%r30)
1903#ifdef CONFIG_64BIT
1904	ldo	FRAME_SIZE(%r30), %r30
1905	BL	sys_rt_sigreturn,%r2
1906	ldo	-16(%r30),%r29		/* Reference param save area */
1907#else
1908	BL	sys_rt_sigreturn,%r2
1909	ldo	FRAME_SIZE(%r30), %r30
1910#endif
1911
1912	ldo	-FRAME_SIZE(%r30), %r30
1913	LDREG	-RP_OFFSET(%r30), %r2
1914
1915	/* FIXME: I think we need to restore a few more things here. */
1916	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1917	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1918	reg_restore %r1
1919
1920	/* If the signal was received while the process was blocked on a
1921	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1922	 * take us to syscall_exit_rfi and on to intr_return.
1923	 */
1924	bv	%r0(%r2)
1925	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1926ENDPROC(sys_rt_sigreturn_wrapper)
1927
1928ENTRY(sys_sigaltstack_wrapper)
1929	/* Get the user stack pointer */
1930	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1931	ldo	TASK_REGS(%r1),%r24	/* get pt regs */
1932	LDREG	TASK_PT_GR30(%r24),%r24
1933	STREG	%r2, -RP_OFFSET(%r30)
1934#ifdef CONFIG_64BIT
1935	ldo	FRAME_SIZE(%r30), %r30
1936	BL	do_sigaltstack,%r2
1937	ldo	-16(%r30),%r29		/* Reference param save area */
1938#else
1939	BL	do_sigaltstack,%r2
1940	ldo	FRAME_SIZE(%r30), %r30
1941#endif
1942
1943	ldo	-FRAME_SIZE(%r30), %r30
1944	LDREG	-RP_OFFSET(%r30), %r2
1945	bv	%r0(%r2)
1946	nop
1947ENDPROC(sys_sigaltstack_wrapper)
1948
1949#ifdef CONFIG_64BIT
1950ENTRY(sys32_sigaltstack_wrapper)
1951	/* Get the user stack pointer */
1952	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1953	LDREG	TASK_PT_GR30(%r24),%r24
1954	STREG	%r2, -RP_OFFSET(%r30)
1955	ldo	FRAME_SIZE(%r30), %r30
1956	BL	do_sigaltstack32,%r2
1957	ldo	-16(%r30),%r29		/* Reference param save area */
1958
1959	ldo	-FRAME_SIZE(%r30), %r30
1960	LDREG	-RP_OFFSET(%r30), %r2
1961	bv	%r0(%r2)
1962	nop
1963ENDPROC(sys32_sigaltstack_wrapper)
1964#endif
1965
1966ENTRY(syscall_exit)
1967	/* NOTE: HP-UX syscalls also come through here
1968	 * after hpux_syscall_exit fixes up return
1969	 * values. */
1970
1971	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1972	 * via syscall_exit_rfi if the signal was received while the process
1973	 * was running.
1974	 */
1975
1976	/* save return value now */
1977
1978	mfctl     %cr30, %r1
1979	LDREG     TI_TASK(%r1),%r1
1980	STREG     %r28,TASK_PT_GR28(%r1)
1981
1982#ifdef CONFIG_HPUX
1983/* <linux/personality.h> cannot be easily included */
1984#define PER_HPUX 0x10
1985	ldw	TASK_PERSONALITY(%r1),%r19
1986
1987	/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1988	ldo	  -PER_HPUX(%r19), %r19
1989	cmpib,COND(<>),n 0,%r19,1f
1990
1991	/* Save other hpux returns if personality is PER_HPUX */
1992	STREG     %r22,TASK_PT_GR22(%r1)
1993	STREG     %r29,TASK_PT_GR29(%r1)
19941:
1995
1996#endif /* CONFIG_HPUX */
1997
1998	/* Seems to me that dp could be wrong here, if the syscall involved
1999	 * calling a module, and nothing got round to restoring dp on return.
2000	 */
2001	loadgp
2002
2003syscall_check_resched:
2004
2005	/* check for reschedule */
2006
2007	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
2008	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2009
2010	.import do_signal,code
2011syscall_check_sig:
2012	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
2013	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
2014	and,COND(<>)	%r19, %r26, %r0
2015	b,n	syscall_restore	/* skip past if we've nothing to do */
2016
2017syscall_do_signal:
2018	/* Save callee-save registers (for sigcontext).
2019	 * FIXME: After this point the process structure should be
2020	 * consistent with all the relevant state of the process
2021	 * before the syscall.  We need to verify this.
2022	 */
2023	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2024	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
2025	reg_save %r26
2026
2027#ifdef CONFIG_64BIT
2028	ldo	-16(%r30),%r29			/* Reference param save area */
2029#endif
2030
2031	BL	do_notify_resume,%r2
2032	ldi	1, %r25				/* long in_syscall = 1 */
2033
2034	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2035	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
2036	reg_restore %r20
2037
2038	b,n     syscall_check_sig
2039
2040syscall_restore:
2041	/* Are we being ptraced? */
2042	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2043
2044	ldw	TASK_PTRACE(%r1), %r19
2045	bb,<	%r19,31,syscall_restore_rfi
2046	nop
2047
2048	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
2049	rest_fp	%r19
2050
2051	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
2052	mtsar	%r19
2053
2054	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
2055	LDREG	TASK_PT_GR19(%r1),%r19
2056	LDREG   TASK_PT_GR20(%r1),%r20
2057	LDREG	TASK_PT_GR21(%r1),%r21
2058	LDREG	TASK_PT_GR22(%r1),%r22
2059	LDREG	TASK_PT_GR23(%r1),%r23
2060	LDREG	TASK_PT_GR24(%r1),%r24
2061	LDREG	TASK_PT_GR25(%r1),%r25
2062	LDREG	TASK_PT_GR26(%r1),%r26
2063	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
2064	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
2065	LDREG	TASK_PT_GR29(%r1),%r29
2066	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
2067
2068	/* NOTE: We use rsm/ssm pair to make this operation atomic */
2069	rsm     PSW_SM_I, %r0
2070	LDREG   TASK_PT_GR30(%r1),%r30             /* restore user sp */
2071	mfsp	%sr3,%r1			   /* Get users space id */
2072	mtsp    %r1,%sr7                           /* Restore sr7 */
2073	ssm     PSW_SM_I, %r0
2074
2075	/* Set sr2 to zero for userspace syscalls to work. */
2076	mtsp	%r0,%sr2
2077	mtsp	%r1,%sr4			   /* Restore sr4 */
2078	mtsp	%r1,%sr5			   /* Restore sr5 */
2079	mtsp	%r1,%sr6			   /* Restore sr6 */
2080
2081	depi	3,31,2,%r31			   /* ensure return to user mode. */
2082
2083#ifdef CONFIG_64BIT
2084	/* decide whether to reset the wide mode bit
2085	 *
2086	 * For a syscall, the W bit is stored in the lowest bit
2087	 * of sp.  Extract it and reset W if it is zero */
2088	extrd,u,*<>	%r30,63,1,%r1
2089	rsm	PSW_SM_W, %r0
2090	/* now reset the lowest bit of sp if it was set */
2091	xor	%r30,%r1,%r30
2092#endif
2093	be,n    0(%sr3,%r31)                       /* return to user space */
2094
2095	/* We have to return via an RFI, so that PSW T and R bits can be set
2096	 * appropriately.
2097	 * This sets up pt_regs so we can return via intr_restore, which is not
2098	 * the most efficient way of doing things, but it works.
2099	 */
2100syscall_restore_rfi:
2101	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
2102	mtctl	%r2,%cr0			   /*   for immediate trap */
2103	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
2104	ldi	0x0b,%r20			   /* Create new PSW */
2105	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
2106
2107	/* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2108	 * set in include/linux/ptrace.h and converted to PA bitmap
2109	 * numbers in asm-offsets.c */
2110
2111	/* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2112	extru,=	%r19,PA_SINGLESTEP_BIT,1,%r0
2113	depi	-1,27,1,%r20			   /* R bit */
2114
2115	/* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2116	extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2117	depi	-1,7,1,%r20			   /* T bit */
2118
2119	STREG	%r20,TASK_PT_PSW(%r1)
2120
2121	/* Always store space registers, since sr3 can be changed (e.g. fork) */
2122
2123	mfsp    %sr3,%r25
2124	STREG   %r25,TASK_PT_SR3(%r1)
2125	STREG   %r25,TASK_PT_SR4(%r1)
2126	STREG   %r25,TASK_PT_SR5(%r1)
2127	STREG   %r25,TASK_PT_SR6(%r1)
2128	STREG   %r25,TASK_PT_SR7(%r1)
2129	STREG   %r25,TASK_PT_IASQ0(%r1)
2130	STREG   %r25,TASK_PT_IASQ1(%r1)
2131
2132	/* XXX W bit??? */
2133	/* Now if old D bit is clear, it means we didn't save all registers
2134	 * on syscall entry, so do that now.  This only happens on TRACEME
2135	 * calls, or if someone attached to us while we were on a syscall.
2136	 * We could make this more efficient by not saving r3-r18, but
2137	 * then we wouldn't be able to use the common intr_restore path.
2138	 * It is only for traced processes anyway, so performance is not
2139	 * an issue.
2140	 */
2141	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
2142	ldo	TASK_REGS(%r1),%r25
2143	reg_save %r25				   /* Save r3 to r18 */
2144
2145	/* Save the current sr */
2146	mfsp	%sr0,%r2
2147	STREG	%r2,TASK_PT_SR0(%r1)
2148
2149	/* Save the scratch sr */
2150	mfsp	%sr1,%r2
2151	STREG	%r2,TASK_PT_SR1(%r1)
2152
2153	/* sr2 should be set to zero for userspace syscalls */
2154	STREG	%r0,TASK_PT_SR2(%r1)
2155
2156pt_regs_ok:
2157	LDREG	TASK_PT_GR31(%r1),%r2
2158	depi	3,31,2,%r2			   /* ensure return to user mode. */
2159	STREG	%r2,TASK_PT_IAOQ0(%r1)
2160	ldo	4(%r2),%r2
2161	STREG	%r2,TASK_PT_IAOQ1(%r1)
2162	copy	%r25,%r16
2163	b	intr_restore
2164	nop
2165
2166	.import schedule,code
2167syscall_do_resched:
2168	BL	schedule,%r2
2169#ifdef CONFIG_64BIT
2170	ldo	-16(%r30),%r29		/* Reference param save area */
2171#else
2172	nop
2173#endif
2174	b	syscall_check_resched	/* if resched, we start over again */
2175	nop
2176ENDPROC(syscall_exit)
2177
2178
2179get_register:
2180	/*
2181	 * get_register is used by the non access tlb miss handlers to
2182	 * copy the value of the general register specified in r8 into
2183	 * r1. This routine can't be used for shadowed registers, since
2184	 * the rfir will restore the original value. So, for the shadowed
2185	 * registers we put a -1 into r1 to indicate that the register
2186	 * should not be used (the register being copied could also have
2187	 * a -1 in it, but that is OK, it just means that we will have
2188	 * to use the slow path instead).
2189	 */
2190	blr     %r8,%r0
2191	nop
2192	bv      %r0(%r25)    /* r0 */
2193	copy    %r0,%r1
2194	bv      %r0(%r25)    /* r1 - shadowed */
2195	ldi     -1,%r1
2196	bv      %r0(%r25)    /* r2 */
2197	copy    %r2,%r1
2198	bv      %r0(%r25)    /* r3 */
2199	copy    %r3,%r1
2200	bv      %r0(%r25)    /* r4 */
2201	copy    %r4,%r1
2202	bv      %r0(%r25)    /* r5 */
2203	copy    %r5,%r1
2204	bv      %r0(%r25)    /* r6 */
2205	copy    %r6,%r1
2206	bv      %r0(%r25)    /* r7 */
2207	copy    %r7,%r1
2208	bv      %r0(%r25)    /* r8 - shadowed */
2209	ldi     -1,%r1
2210	bv      %r0(%r25)    /* r9 - shadowed */
2211	ldi     -1,%r1
2212	bv      %r0(%r25)    /* r10 */
2213	copy    %r10,%r1
2214	bv      %r0(%r25)    /* r11 */
2215	copy    %r11,%r1
2216	bv      %r0(%r25)    /* r12 */
2217	copy    %r12,%r1
2218	bv      %r0(%r25)    /* r13 */
2219	copy    %r13,%r1
2220	bv      %r0(%r25)    /* r14 */
2221	copy    %r14,%r1
2222	bv      %r0(%r25)    /* r15 */
2223	copy    %r15,%r1
2224	bv      %r0(%r25)    /* r16 - shadowed */
2225	ldi     -1,%r1
2226	bv      %r0(%r25)    /* r17 - shadowed */
2227	ldi     -1,%r1
2228	bv      %r0(%r25)    /* r18 */
2229	copy    %r18,%r1
2230	bv      %r0(%r25)    /* r19 */
2231	copy    %r19,%r1
2232	bv      %r0(%r25)    /* r20 */
2233	copy    %r20,%r1
2234	bv      %r0(%r25)    /* r21 */
2235	copy    %r21,%r1
2236	bv      %r0(%r25)    /* r22 */
2237	copy    %r22,%r1
2238	bv      %r0(%r25)    /* r23 */
2239	copy    %r23,%r1
2240	bv      %r0(%r25)    /* r24 - shadowed */
2241	ldi     -1,%r1
2242	bv      %r0(%r25)    /* r25 - shadowed */
2243	ldi     -1,%r1
2244	bv      %r0(%r25)    /* r26 */
2245	copy    %r26,%r1
2246	bv      %r0(%r25)    /* r27 */
2247	copy    %r27,%r1
2248	bv      %r0(%r25)    /* r28 */
2249	copy    %r28,%r1
2250	bv      %r0(%r25)    /* r29 */
2251	copy    %r29,%r1
2252	bv      %r0(%r25)    /* r30 */
2253	copy    %r30,%r1
2254	bv      %r0(%r25)    /* r31 */
2255	copy    %r31,%r1
2256
2257
2258set_register:
2259	/*
2260	 * set_register is used by the non access tlb miss handlers to
2261	 * copy the value of r1 into the general register specified in
2262	 * r8.
2263	 */
2264	blr     %r8,%r0
2265	nop
2266	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2267	copy    %r1,%r0
2268	bv      %r0(%r25)    /* r1 */
2269	copy    %r1,%r1
2270	bv      %r0(%r25)    /* r2 */
2271	copy    %r1,%r2
2272	bv      %r0(%r25)    /* r3 */
2273	copy    %r1,%r3
2274	bv      %r0(%r25)    /* r4 */
2275	copy    %r1,%r4
2276	bv      %r0(%r25)    /* r5 */
2277	copy    %r1,%r5
2278	bv      %r0(%r25)    /* r6 */
2279	copy    %r1,%r6
2280	bv      %r0(%r25)    /* r7 */
2281	copy    %r1,%r7
2282	bv      %r0(%r25)    /* r8 */
2283	copy    %r1,%r8
2284	bv      %r0(%r25)    /* r9 */
2285	copy    %r1,%r9
2286	bv      %r0(%r25)    /* r10 */
2287	copy    %r1,%r10
2288	bv      %r0(%r25)    /* r11 */
2289	copy    %r1,%r11
2290	bv      %r0(%r25)    /* r12 */
2291	copy    %r1,%r12
2292	bv      %r0(%r25)    /* r13 */
2293	copy    %r1,%r13
2294	bv      %r0(%r25)    /* r14 */
2295	copy    %r1,%r14
2296	bv      %r0(%r25)    /* r15 */
2297	copy    %r1,%r15
2298	bv      %r0(%r25)    /* r16 */
2299	copy    %r1,%r16
2300	bv      %r0(%r25)    /* r17 */
2301	copy    %r1,%r17
2302	bv      %r0(%r25)    /* r18 */
2303	copy    %r1,%r18
2304	bv      %r0(%r25)    /* r19 */
2305	copy    %r1,%r19
2306	bv      %r0(%r25)    /* r20 */
2307	copy    %r1,%r20
2308	bv      %r0(%r25)    /* r21 */
2309	copy    %r1,%r21
2310	bv      %r0(%r25)    /* r22 */
2311	copy    %r1,%r22
2312	bv      %r0(%r25)    /* r23 */
2313	copy    %r1,%r23
2314	bv      %r0(%r25)    /* r24 */
2315	copy    %r1,%r24
2316	bv      %r0(%r25)    /* r25 */
2317	copy    %r1,%r25
2318	bv      %r0(%r25)    /* r26 */
2319	copy    %r1,%r26
2320	bv      %r0(%r25)    /* r27 */
2321	copy    %r1,%r27
2322	bv      %r0(%r25)    /* r28 */
2323	copy    %r1,%r28
2324	bv      %r0(%r25)    /* r29 */
2325	copy    %r1,%r29
2326	bv      %r0(%r25)    /* r30 */
2327	copy    %r1,%r30
2328	bv      %r0(%r25)    /* r31 */
2329	copy    %r1,%r31
2330
2331