xref: /openbmc/linux/arch/parisc/kernel/entry.S (revision a06c488d)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41
42#ifdef CONFIG_64BIT
43	.level 2.0w
44#else
45	.level 2.0
46#endif
47
48	.import		pa_tlb_lock,data
49
50	/* space_to_prot macro creates a prot id from a space id */
51
52#if (SPACEID_SHIFT) == 0
53	.macro  space_to_prot spc prot
54	depd,z  \spc,62,31,\prot
55	.endm
56#else
57	.macro  space_to_prot spc prot
58	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
59	.endm
60#endif
61
62	/* Switch to virtual mapping, trashing only %r1 */
63	.macro  virt_map
64	/* pcxt_ssm_bug */
65	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
66	mtsp	%r0, %sr4
67	mtsp	%r0, %sr5
68	mtsp	%r0, %sr6
69	tovirt_r1 %r29
70	load32	KERNEL_PSW, %r1
71
72	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
73	mtctl	%r0, %cr17	/* Clear IIASQ tail */
74	mtctl	%r0, %cr17	/* Clear IIASQ head */
75	mtctl	%r1, %ipsw
76	load32	4f, %r1
77	mtctl	%r1, %cr18	/* Set IIAOQ tail */
78	ldo	4(%r1), %r1
79	mtctl	%r1, %cr18	/* Set IIAOQ head */
80	rfir
81	nop
824:
83	.endm
84
85	/*
86	 * The "get_stack" macros are responsible for determining the
87	 * kernel stack value.
88	 *
89	 *      If sr7 == 0
90	 *          Already using a kernel stack, so call the
91	 *          get_stack_use_r30 macro to push a pt_regs structure
92	 *          on the stack, and store registers there.
93	 *      else
94	 *          Need to set up a kernel stack, so call the
95	 *          get_stack_use_cr30 macro to set up a pointer
96	 *          to the pt_regs structure contained within the
97	 *          task pointer pointed to by cr30. Set the stack
98	 *          pointer to point to the end of the task structure.
99	 *
100	 * Note that we use shadowed registers for temps until
101	 * we can save %r26 and %r29. %r26 is used to preserve
102	 * %r8 (a shadowed register) which temporarily contained
103	 * either the fault type ("code") or the eirr. We need
104	 * to use a non-shadowed register to carry the value over
105	 * the rfir in virt_map. We use %r26 since this value winds
106	 * up being passed as the argument to either do_cpu_irq_mask
107	 * or handle_interruption. %r29 is used to hold a pointer
108	 * the register save area, and once again, it needs to
109	 * be a non-shadowed register so that it survives the rfir.
110	 *
111	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
112	 */
113
114	.macro  get_stack_use_cr30
115
116	/* we save the registers in the task struct */
117
118	copy	%r30, %r17
119	mfctl   %cr30, %r1
120	ldo	THREAD_SZ_ALGN(%r1), %r30
121	mtsp	%r0,%sr7
122	mtsp	%r16,%sr3
123	tophys  %r1,%r9
124	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
125	tophys  %r1,%r9
126	ldo     TASK_REGS(%r9),%r9
127	STREG   %r17,PT_GR30(%r9)
128	STREG   %r29,PT_GR29(%r9)
129	STREG   %r26,PT_GR26(%r9)
130	STREG	%r16,PT_SR7(%r9)
131	copy    %r9,%r29
132	.endm
133
134	.macro  get_stack_use_r30
135
136	/* we put a struct pt_regs on the stack and save the registers there */
137
138	tophys  %r30,%r9
139	copy	%r30,%r1
140	ldo	PT_SZ_ALGN(%r30),%r30
141	STREG   %r1,PT_GR30(%r9)
142	STREG   %r29,PT_GR29(%r9)
143	STREG   %r26,PT_GR26(%r9)
144	STREG	%r16,PT_SR7(%r9)
145	copy    %r9,%r29
146	.endm
147
148	.macro  rest_stack
149	LDREG   PT_GR1(%r29), %r1
150	LDREG   PT_GR30(%r29),%r30
151	LDREG   PT_GR29(%r29),%r29
152	.endm
153
154	/* default interruption handler
155	 * (calls traps.c:handle_interruption) */
156	.macro	def code
157	b	intr_save
158	ldi     \code, %r8
159	.align	32
160	.endm
161
162	/* Interrupt interruption handler
163	 * (calls irq.c:do_cpu_irq_mask) */
164	.macro	extint code
165	b	intr_extint
166	mfsp    %sr7,%r16
167	.align	32
168	.endm
169
170	.import	os_hpmc, code
171
172	/* HPMC handler */
173	.macro	hpmc code
174	nop			/* must be a NOP, will be patched later */
175	load32	PA(os_hpmc), %r3
176	bv,n	0(%r3)
177	nop
178	.word	0		/* checksum (will be patched) */
179	.word	PA(os_hpmc)	/* address of handler */
180	.word	0		/* length of handler */
181	.endm
182
183	/*
184	 * Performance Note: Instructions will be moved up into
185	 * this part of the code later on, once we are sure
186	 * that the tlb miss handlers are close to final form.
187	 */
188
189	/* Register definitions for tlb miss handler macros */
190
191	va  = r8	/* virtual address for which the trap occurred */
192	spc = r24	/* space for which the trap occurred */
193
194#ifndef CONFIG_64BIT
195
196	/*
197	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
198	 */
199
200	.macro	itlb_11 code
201
202	mfctl	%pcsq, spc
203	b	itlb_miss_11
204	mfctl	%pcoq, va
205
206	.align		32
207	.endm
208#endif
209
210	/*
211	 * itlb miss interruption handler (parisc 2.0)
212	 */
213
214	.macro	itlb_20 code
215	mfctl	%pcsq, spc
216#ifdef CONFIG_64BIT
217	b       itlb_miss_20w
218#else
219	b	itlb_miss_20
220#endif
221	mfctl	%pcoq, va
222
223	.align		32
224	.endm
225
226#ifndef CONFIG_64BIT
227	/*
228	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
229	 */
230
231	.macro	naitlb_11 code
232
233	mfctl	%isr,spc
234	b	naitlb_miss_11
235	mfctl 	%ior,va
236
237	.align		32
238	.endm
239#endif
240
241	/*
242	 * naitlb miss interruption handler (parisc 2.0)
243	 */
244
245	.macro	naitlb_20 code
246
247	mfctl	%isr,spc
248#ifdef CONFIG_64BIT
249	b       naitlb_miss_20w
250#else
251	b	naitlb_miss_20
252#endif
253	mfctl 	%ior,va
254
255	.align		32
256	.endm
257
258#ifndef CONFIG_64BIT
259	/*
260	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
261	 */
262
263	.macro	dtlb_11 code
264
265	mfctl	%isr, spc
266	b	dtlb_miss_11
267	mfctl	%ior, va
268
269	.align		32
270	.endm
271#endif
272
273	/*
274	 * dtlb miss interruption handler (parisc 2.0)
275	 */
276
277	.macro	dtlb_20 code
278
279	mfctl	%isr, spc
280#ifdef CONFIG_64BIT
281	b       dtlb_miss_20w
282#else
283	b	dtlb_miss_20
284#endif
285	mfctl	%ior, va
286
287	.align		32
288	.endm
289
290#ifndef CONFIG_64BIT
291	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
292
293	.macro	nadtlb_11 code
294
295	mfctl	%isr,spc
296	b       nadtlb_miss_11
297	mfctl	%ior,va
298
299	.align		32
300	.endm
301#endif
302
303	/* nadtlb miss interruption handler (parisc 2.0) */
304
305	.macro	nadtlb_20 code
306
307	mfctl	%isr,spc
308#ifdef CONFIG_64BIT
309	b       nadtlb_miss_20w
310#else
311	b       nadtlb_miss_20
312#endif
313	mfctl	%ior,va
314
315	.align		32
316	.endm
317
318#ifndef CONFIG_64BIT
319	/*
320	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
321	 */
322
323	.macro	dbit_11 code
324
325	mfctl	%isr,spc
326	b	dbit_trap_11
327	mfctl	%ior,va
328
329	.align		32
330	.endm
331#endif
332
333	/*
334	 * dirty bit trap interruption handler (parisc 2.0)
335	 */
336
337	.macro	dbit_20 code
338
339	mfctl	%isr,spc
340#ifdef CONFIG_64BIT
341	b       dbit_trap_20w
342#else
343	b	dbit_trap_20
344#endif
345	mfctl	%ior,va
346
347	.align		32
348	.endm
349
350	/* In LP64, the space contains part of the upper 32 bits of the
351	 * fault.  We have to extract this and place it in the va,
352	 * zeroing the corresponding bits in the space register */
353	.macro		space_adjust	spc,va,tmp
354#ifdef CONFIG_64BIT
355	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
356	depd		%r0,63,SPACEID_SHIFT,\spc
357	depd		\tmp,31,SPACEID_SHIFT,\va
358#endif
359	.endm
360
361	.import		swapper_pg_dir,code
362
363	/* Get the pgd.  For faults on space zero (kernel space), this
364	 * is simply swapper_pg_dir.  For user space faults, the
365	 * pgd is stored in %cr25 */
366	.macro		get_pgd		spc,reg
367	ldil		L%PA(swapper_pg_dir),\reg
368	ldo		R%PA(swapper_pg_dir)(\reg),\reg
369	or,COND(=)	%r0,\spc,%r0
370	mfctl		%cr25,\reg
371	.endm
372
373	/*
374		space_check(spc,tmp,fault)
375
376		spc - The space we saw the fault with.
377		tmp - The place to store the current space.
378		fault - Function to call on failure.
379
380		Only allow faults on different spaces from the
381		currently active one if we're the kernel
382
383	*/
384	.macro		space_check	spc,tmp,fault
385	mfsp		%sr7,\tmp
386	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
387					 * as kernel, so defeat the space
388					 * check if it is */
389	copy		\spc,\tmp
390	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
391	cmpb,COND(<>),n	\tmp,\spc,\fault
392	.endm
393
394	/* Look up a PTE in a 2-Level scheme (faulting at each
395	 * level if the entry isn't present
396	 *
397	 * NOTE: we use ldw even for LP64, since the short pointers
398	 * can address up to 1TB
399	 */
400	.macro		L2_ptep	pmd,pte,index,va,fault
401#if CONFIG_PGTABLE_LEVELS == 3
402	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
403#else
404# if defined(CONFIG_64BIT)
405	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
406  #else
407  # if PAGE_SIZE > 4096
408	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
409  # else
410	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
411  # endif
412# endif
413#endif
414	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
415	copy		%r0,\pte
416	ldw,s		\index(\pmd),\pmd
417	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
418	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
419	copy		\pmd,%r9
420	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
421	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
422	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
423	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
424	LDREG		%r0(\pmd),\pte
425	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
426	.endm
427
428	/* Look up PTE in a 3-Level scheme.
429	 *
430	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
431	 * first pmd adjacent to the pgd.  This means that we can
432	 * subtract a constant offset to get to it.  The pmd and pgd
433	 * sizes are arranged so that a single pmd covers 4GB (giving
434	 * a full LP64 process access to 8TB) so our lookups are
435	 * effectively L2 for the first 4GB of the kernel (i.e. for
436	 * all ILP32 processes and all the kernel for machines with
437	 * under 4GB of memory) */
438	.macro		L3_ptep pgd,pte,index,va,fault
439#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
440	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
441	copy		%r0,\pte
442	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
443	ldw,s		\index(\pgd),\pgd
444	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
445	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
446	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
447	shld		\pgd,PxD_VALUE_SHIFT,\index
448	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
449	copy		\index,\pgd
450	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
451	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
452#endif
453	L2_ptep		\pgd,\pte,\index,\va,\fault
454	.endm
455
456	/* Acquire pa_tlb_lock lock and recheck page is still present. */
457	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
458#ifdef CONFIG_SMP
459	cmpib,COND(=),n	0,\spc,2f
460	load32		PA(pa_tlb_lock),\tmp
4611:	LDCW		0(\tmp),\tmp1
462	cmpib,COND(=)	0,\tmp1,1b
463	nop
464	LDREG		0(\ptp),\pte
465	bb,<,n		\pte,_PAGE_PRESENT_BIT,2f
466	b		\fault
467	stw		 \spc,0(\tmp)
4682:
469#endif
470	.endm
471
472	/* Release pa_tlb_lock lock without reloading lock address. */
473	.macro		tlb_unlock0	spc,tmp
474#ifdef CONFIG_SMP
475	or,COND(=)	%r0,\spc,%r0
476	stw             \spc,0(\tmp)
477#endif
478	.endm
479
480	/* Release pa_tlb_lock lock. */
481	.macro		tlb_unlock1	spc,tmp
482#ifdef CONFIG_SMP
483	load32		PA(pa_tlb_lock),\tmp
484	tlb_unlock0	\spc,\tmp
485#endif
486	.endm
487
488	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
489	 * don't needlessly dirty the cache line if it was already set */
490	.macro		update_accessed	ptp,pte,tmp,tmp1
491	ldi		_PAGE_ACCESSED,\tmp1
492	or		\tmp1,\pte,\tmp
493	and,COND(<>)	\tmp1,\pte,%r0
494	STREG		\tmp,0(\ptp)
495	.endm
496
497	/* Set the dirty bit (and accessed bit).  No need to be
498	 * clever, this is only used from the dirty fault */
499	.macro		update_dirty	ptp,pte,tmp
500	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
501	or		\tmp,\pte,\pte
502	STREG		\pte,0(\ptp)
503	.endm
504
505	/* We have (depending on the page size):
506	 * - 38 to 52-bit Physical Page Number
507	 * - 12 to 26-bit page offset
508	 */
509	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
510	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
511	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
512	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
513
514	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
515	.macro		convert_for_tlb_insert20 pte,tmp
516#ifdef CONFIG_HUGETLB_PAGE
517	copy		\pte,\tmp
518	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
519				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
520
521	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
522				(63-58)+PAGE_ADD_SHIFT,\pte
523	extrd,u,*=	\tmp,_PAGE_HPAGE_BIT+32,1,%r0
524	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
525				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
526#else /* Huge pages disabled */
527	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
528				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
529	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
530				(63-58)+PAGE_ADD_SHIFT,\pte
531#endif
532	.endm
533
534	/* Convert the pte and prot to tlb insertion values.  How
535	 * this happens is quite subtle, read below */
536	.macro		make_insert_tlb	spc,pte,prot,tmp
537	space_to_prot   \spc \prot        /* create prot id from space */
538	/* The following is the real subtlety.  This is depositing
539	 * T <-> _PAGE_REFTRAP
540	 * D <-> _PAGE_DIRTY
541	 * B <-> _PAGE_DMB (memory break)
542	 *
543	 * Then incredible subtlety: The access rights are
544	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
545	 * See 3-14 of the parisc 2.0 manual
546	 *
547	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
548	 * trigger an access rights trap in user space if the user
549	 * tries to read an unreadable page */
550	depd            \pte,8,7,\prot
551
552	/* PAGE_USER indicates the page can be read with user privileges,
553	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
554	 * contains _PAGE_READ) */
555	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
556	depdi		7,11,3,\prot
557	/* If we're a gateway page, drop PL2 back to zero for promotion
558	 * to kernel privilege (so we can execute the page as kernel).
559	 * Any privilege promotion page always denys read and write */
560	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
561	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
562
563	/* Enforce uncacheable pages.
564	 * This should ONLY be use for MMIO on PA 2.0 machines.
565	 * Memory/DMA is cache coherent on all PA2.0 machines we support
566	 * (that means T-class is NOT supported) and the memory controllers
567	 * on most of those machines only handles cache transactions.
568	 */
569	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
570	depdi		1,12,1,\prot
571
572	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
573	convert_for_tlb_insert20 \pte \tmp
574	.endm
575
576	/* Identical macro to make_insert_tlb above, except it
577	 * makes the tlb entry for the differently formatted pa11
578	 * insertion instructions */
579	.macro		make_insert_tlb_11	spc,pte,prot
580	zdep		\spc,30,15,\prot
581	dep		\pte,8,7,\prot
582	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
583	depi		1,12,1,\prot
584	extru,=         \pte,_PAGE_USER_BIT,1,%r0
585	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
586	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
587	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
588
589	/* Get rid of prot bits and convert to page addr for iitlba */
590
591	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
592	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
593	.endm
594
595	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
596	 * to extend into I/O space if the address is 0xfXXXXXXX
597	 * so we extend the f's into the top word of the pte in
598	 * this case */
599	.macro		f_extend	pte,tmp
600	extrd,s		\pte,42,4,\tmp
601	addi,<>		1,\tmp,%r0
602	extrd,s		\pte,63,25,\pte
603	.endm
604
605	/* The alias region is an 8MB aligned 16MB to do clear and
606	 * copy user pages at addresses congruent with the user
607	 * virtual address.
608	 *
609	 * To use the alias page, you set %r26 up with the to TLB
610	 * entry (identifying the physical page) and %r23 up with
611	 * the from tlb entry (or nothing if only a to entry---for
612	 * clear_user_page_asm) */
613	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
614	cmpib,COND(<>),n 0,\spc,\fault
615	ldil		L%(TMPALIAS_MAP_START),\tmp
616#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
617	/* on LP64, ldi will sign extend into the upper 32 bits,
618	 * which is behaviour we don't want */
619	depdi		0,31,32,\tmp
620#endif
621	copy		\va,\tmp1
622	depi		0,31,23,\tmp1
623	cmpb,COND(<>),n	\tmp,\tmp1,\fault
624	mfctl		%cr19,\tmp	/* iir */
625	/* get the opcode (first six bits) into \tmp */
626	extrw,u		\tmp,5,6,\tmp
627	/*
628	 * Only setting the T bit prevents data cache movein
629	 * Setting access rights to zero prevents instruction cache movein
630	 *
631	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
632	 * to type field and _PAGE_READ goes to top bit of PL1
633	 */
634	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
635	/*
636	 * so if the opcode is one (i.e. this is a memory management
637	 * instruction) nullify the next load so \prot is only T.
638	 * Otherwise this is a normal data operation
639	 */
640	cmpiclr,=	0x01,\tmp,%r0
641	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
642.ifc \patype,20
643	depd,z		\prot,8,7,\prot
644.else
645.ifc \patype,11
646	depw,z		\prot,8,7,\prot
647.else
648	.error "undefined PA type to do_alias"
649.endif
650.endif
651	/*
652	 * OK, it is in the temp alias region, check whether "from" or "to".
653	 * Check "subtle" note in pacache.S re: r23/r26.
654	 */
655#ifdef CONFIG_64BIT
656	extrd,u,*=	\va,41,1,%r0
657#else
658	extrw,u,=	\va,9,1,%r0
659#endif
660	or,COND(tr)	%r23,%r0,\pte
661	or		%r26,%r0,\pte
662	.endm
663
664
665	/*
666	 * Fault_vectors are architecturally required to be aligned on a 2K
667	 * boundary
668	 */
669
670	.text
671	.align 2048
672
673ENTRY(fault_vector_20)
674	/* First vector is invalid (0) */
675	.ascii	"cows can fly"
676	.byte 0
677	.align 32
678
679	hpmc		 1
680	def		 2
681	def		 3
682	extint		 4
683	def		 5
684	itlb_20		 6
685	def		 7
686	def		 8
687	def              9
688	def		10
689	def		11
690	def		12
691	def		13
692	def		14
693	dtlb_20		15
694	naitlb_20	16
695	nadtlb_20	17
696	def		18
697	def		19
698	dbit_20		20
699	def		21
700	def		22
701	def		23
702	def		24
703	def		25
704	def		26
705	def		27
706	def		28
707	def		29
708	def		30
709	def		31
710END(fault_vector_20)
711
712#ifndef CONFIG_64BIT
713
714	.align 2048
715
716ENTRY(fault_vector_11)
717	/* First vector is invalid (0) */
718	.ascii	"cows can fly"
719	.byte 0
720	.align 32
721
722	hpmc		 1
723	def		 2
724	def		 3
725	extint		 4
726	def		 5
727	itlb_11		 6
728	def		 7
729	def		 8
730	def              9
731	def		10
732	def		11
733	def		12
734	def		13
735	def		14
736	dtlb_11		15
737	naitlb_11	16
738	nadtlb_11	17
739	def		18
740	def		19
741	dbit_11		20
742	def		21
743	def		22
744	def		23
745	def		24
746	def		25
747	def		26
748	def		27
749	def		28
750	def		29
751	def		30
752	def		31
753END(fault_vector_11)
754
755#endif
756	/* Fault vector is separately protected and *must* be on its own page */
757	.align		PAGE_SIZE
758ENTRY(end_fault_vector)
759
760	.import		handle_interruption,code
761	.import		do_cpu_irq_mask,code
762
763	/*
764	 * Child Returns here
765	 *
766	 * copy_thread moved args into task save area.
767	 */
768
769ENTRY(ret_from_kernel_thread)
770
771	/* Call schedule_tail first though */
772	BL	schedule_tail, %r2
773	nop
774
775	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
776	LDREG	TASK_PT_GR25(%r1), %r26
777#ifdef CONFIG_64BIT
778	LDREG	TASK_PT_GR27(%r1), %r27
779#endif
780	LDREG	TASK_PT_GR26(%r1), %r1
781	ble	0(%sr7, %r1)
782	copy	%r31, %r2
783	b	finish_child_return
784	nop
785ENDPROC(ret_from_kernel_thread)
786
787
788	/*
789	 * struct task_struct *_switch_to(struct task_struct *prev,
790	 *	struct task_struct *next)
791	 *
792	 * switch kernel stacks and return prev */
793ENTRY(_switch_to)
794	STREG	 %r2, -RP_OFFSET(%r30)
795
796	callee_save_float
797	callee_save
798
799	load32	_switch_to_ret, %r2
800
801	STREG	%r2, TASK_PT_KPC(%r26)
802	LDREG	TASK_PT_KPC(%r25), %r2
803
804	STREG	%r30, TASK_PT_KSP(%r26)
805	LDREG	TASK_PT_KSP(%r25), %r30
806	LDREG	TASK_THREAD_INFO(%r25), %r25
807	bv	%r0(%r2)
808	mtctl   %r25,%cr30
809
810_switch_to_ret:
811	mtctl	%r0, %cr0		/* Needed for single stepping */
812	callee_rest
813	callee_rest_float
814
815	LDREG	-RP_OFFSET(%r30), %r2
816	bv	%r0(%r2)
817	copy	%r26, %r28
818ENDPROC(_switch_to)
819
820	/*
821	 * Common rfi return path for interruptions, kernel execve, and
822	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
823	 * return via this path if the signal was received when the process
824	 * was running; if the process was blocked on a syscall then the
825	 * normal syscall_exit path is used.  All syscalls for traced
826	 * proceses exit via intr_restore.
827	 *
828	 * XXX If any syscalls that change a processes space id ever exit
829	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
830	 * adjust IASQ[0..1].
831	 *
832	 */
833
834	.align	PAGE_SIZE
835
836ENTRY(syscall_exit_rfi)
837	mfctl   %cr30,%r16
838	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
839	ldo	TASK_REGS(%r16),%r16
840	/* Force iaoq to userspace, as the user has had access to our current
841	 * context via sigcontext. Also Filter the PSW for the same reason.
842	 */
843	LDREG	PT_IAOQ0(%r16),%r19
844	depi	3,31,2,%r19
845	STREG	%r19,PT_IAOQ0(%r16)
846	LDREG	PT_IAOQ1(%r16),%r19
847	depi	3,31,2,%r19
848	STREG	%r19,PT_IAOQ1(%r16)
849	LDREG   PT_PSW(%r16),%r19
850	load32	USER_PSW_MASK,%r1
851#ifdef CONFIG_64BIT
852	load32	USER_PSW_HI_MASK,%r20
853	depd    %r20,31,32,%r1
854#endif
855	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
856	load32	USER_PSW,%r1
857	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
858	STREG   %r19,PT_PSW(%r16)
859
860	/*
861	 * If we aren't being traced, we never saved space registers
862	 * (we don't store them in the sigcontext), so set them
863	 * to "proper" values now (otherwise we'll wind up restoring
864	 * whatever was last stored in the task structure, which might
865	 * be inconsistent if an interrupt occurred while on the gateway
866	 * page). Note that we may be "trashing" values the user put in
867	 * them, but we don't support the user changing them.
868	 */
869
870	STREG   %r0,PT_SR2(%r16)
871	mfsp    %sr3,%r19
872	STREG   %r19,PT_SR0(%r16)
873	STREG   %r19,PT_SR1(%r16)
874	STREG   %r19,PT_SR3(%r16)
875	STREG   %r19,PT_SR4(%r16)
876	STREG   %r19,PT_SR5(%r16)
877	STREG   %r19,PT_SR6(%r16)
878	STREG   %r19,PT_SR7(%r16)
879
880intr_return:
881	/* check for reschedule */
882	mfctl   %cr30,%r1
883	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
884	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
885
886	.import do_notify_resume,code
887intr_check_sig:
888	/* As above */
889	mfctl   %cr30,%r1
890	LDREG	TI_FLAGS(%r1),%r19
891	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
892	and,COND(<>)	%r19, %r20, %r0
893	b,n	intr_restore	/* skip past if we've nothing to do */
894
895	/* This check is critical to having LWS
896	 * working. The IASQ is zero on the gateway
897	 * page and we cannot deliver any signals until
898	 * we get off the gateway page.
899	 *
900	 * Only do signals if we are returning to user space
901	 */
902	LDREG	PT_IASQ0(%r16), %r20
903	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
904	LDREG	PT_IASQ1(%r16), %r20
905	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
906
907	/* NOTE: We need to enable interrupts if we have to deliver
908	 * signals. We used to do this earlier but it caused kernel
909	 * stack overflows. */
910	ssm     PSW_SM_I, %r0
911
912	copy	%r0, %r25			/* long in_syscall = 0 */
913#ifdef CONFIG_64BIT
914	ldo	-16(%r30),%r29			/* Reference param save area */
915#endif
916
917	BL	do_notify_resume,%r2
918	copy	%r16, %r26			/* struct pt_regs *regs */
919
920	b,n	intr_check_sig
921
922intr_restore:
923	copy            %r16,%r29
924	ldo             PT_FR31(%r29),%r1
925	rest_fp         %r1
926	rest_general    %r29
927
928	/* inverse of virt_map */
929	pcxt_ssm_bug
930	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
931	tophys_r1       %r29
932
933	/* Restore space id's and special cr's from PT_REGS
934	 * structure pointed to by r29
935	 */
936	rest_specials	%r29
937
938	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
939	 * It also restores r1 and r30.
940	 */
941	rest_stack
942
943	rfi
944	nop
945
946#ifndef CONFIG_PREEMPT
947# define intr_do_preempt	intr_restore
948#endif /* !CONFIG_PREEMPT */
949
950	.import schedule,code
951intr_do_resched:
952	/* Only call schedule on return to userspace. If we're returning
953	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
954	 * we jump back to intr_restore.
955	 */
956	LDREG	PT_IASQ0(%r16), %r20
957	cmpib,COND(=)	0, %r20, intr_do_preempt
958	nop
959	LDREG	PT_IASQ1(%r16), %r20
960	cmpib,COND(=)	0, %r20, intr_do_preempt
961	nop
962
963	/* NOTE: We need to enable interrupts if we schedule.  We used
964	 * to do this earlier but it caused kernel stack overflows. */
965	ssm     PSW_SM_I, %r0
966
967#ifdef CONFIG_64BIT
968	ldo	-16(%r30),%r29		/* Reference param save area */
969#endif
970
971	ldil	L%intr_check_sig, %r2
972#ifndef CONFIG_64BIT
973	b	schedule
974#else
975	load32	schedule, %r20
976	bv	%r0(%r20)
977#endif
978	ldo	R%intr_check_sig(%r2), %r2
979
980	/* preempt the current task on returning to kernel
981	 * mode from an interrupt, iff need_resched is set,
982	 * and preempt_count is 0. otherwise, we continue on
983	 * our merry way back to the current running task.
984	 */
985#ifdef CONFIG_PREEMPT
986	.import preempt_schedule_irq,code
987intr_do_preempt:
988	rsm	PSW_SM_I, %r0		/* disable interrupts */
989
990	/* current_thread_info()->preempt_count */
991	mfctl	%cr30, %r1
992	LDREG	TI_PRE_COUNT(%r1), %r19
993	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
994	nop				/* prev insn branched backwards */
995
996	/* check if we interrupted a critical path */
997	LDREG	PT_PSW(%r16), %r20
998	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
999	nop
1000
1001	BL	preempt_schedule_irq, %r2
1002	nop
1003
1004	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
1005#endif /* CONFIG_PREEMPT */
1006
1007	/*
1008	 * External interrupts.
1009	 */
1010
1011intr_extint:
1012	cmpib,COND(=),n 0,%r16,1f
1013
1014	get_stack_use_cr30
1015	b,n 2f
1016
10171:
1018	get_stack_use_r30
10192:
1020	save_specials	%r29
1021	virt_map
1022	save_general	%r29
1023
1024	ldo	PT_FR0(%r29), %r24
1025	save_fp	%r24
1026
1027	loadgp
1028
1029	copy	%r29, %r26	/* arg0 is pt_regs */
1030	copy	%r29, %r16	/* save pt_regs */
1031
1032	ldil	L%intr_return, %r2
1033
1034#ifdef CONFIG_64BIT
1035	ldo	-16(%r30),%r29	/* Reference param save area */
1036#endif
1037
1038	b	do_cpu_irq_mask
1039	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1040ENDPROC(syscall_exit_rfi)
1041
1042
1043	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1044
1045ENTRY(intr_save)		/* for os_hpmc */
1046	mfsp    %sr7,%r16
1047	cmpib,COND(=),n 0,%r16,1f
1048	get_stack_use_cr30
1049	b	2f
1050	copy    %r8,%r26
1051
10521:
1053	get_stack_use_r30
1054	copy    %r8,%r26
1055
10562:
1057	save_specials	%r29
1058
1059	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1060
1061	/*
1062	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1063	 *           traps.c.
1064	 *        2) Once we start executing code above 4 Gb, we need
1065	 *           to adjust iasq/iaoq here in the same way we
1066	 *           adjust isr/ior below.
1067	 */
1068
1069	cmpib,COND(=),n        6,%r26,skip_save_ior
1070
1071
1072	mfctl           %cr20, %r16 /* isr */
1073	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1074	mfctl           %cr21, %r17 /* ior */
1075
1076
1077#ifdef CONFIG_64BIT
1078	/*
1079	 * If the interrupted code was running with W bit off (32 bit),
1080	 * clear the b bits (bits 0 & 1) in the ior.
1081	 * save_specials left ipsw value in r8 for us to test.
1082	 */
1083	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1084	depdi           0,1,2,%r17
1085
1086	/*
1087	 * FIXME: This code has hardwired assumptions about the split
1088	 *        between space bits and offset bits. This will change
1089	 *        when we allow alternate page sizes.
1090	 */
1091
1092	/* adjust isr/ior. */
1093	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1094	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1095	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1096#endif
1097	STREG           %r16, PT_ISR(%r29)
1098	STREG           %r17, PT_IOR(%r29)
1099
1100
1101skip_save_ior:
1102	virt_map
1103	save_general	%r29
1104
1105	ldo		PT_FR0(%r29), %r25
1106	save_fp		%r25
1107
1108	loadgp
1109
1110	copy		%r29, %r25	/* arg1 is pt_regs */
1111#ifdef CONFIG_64BIT
1112	ldo		-16(%r30),%r29	/* Reference param save area */
1113#endif
1114
1115	ldil		L%intr_check_sig, %r2
1116	copy		%r25, %r16	/* save pt_regs */
1117
1118	b		handle_interruption
1119	ldo		R%intr_check_sig(%r2), %r2
1120ENDPROC(intr_save)
1121
1122
1123	/*
1124	 * Note for all tlb miss handlers:
1125	 *
1126	 * cr24 contains a pointer to the kernel address space
1127	 * page directory.
1128	 *
1129	 * cr25 contains a pointer to the current user address
1130	 * space page directory.
1131	 *
1132	 * sr3 will contain the space id of the user address space
1133	 * of the current running thread while that thread is
1134	 * running in the kernel.
1135	 */
1136
1137	/*
1138	 * register number allocations.  Note that these are all
1139	 * in the shadowed registers
1140	 */
1141
1142	t0 = r1		/* temporary register 0 */
1143	va = r8		/* virtual address for which the trap occurred */
1144	t1 = r9		/* temporary register 1 */
1145	pte  = r16	/* pte/phys page # */
1146	prot = r17	/* prot bits */
1147	spc  = r24	/* space for which the trap occurred */
1148	ptp = r25	/* page directory/page table pointer */
1149
1150#ifdef CONFIG_64BIT
1151
1152dtlb_miss_20w:
1153	space_adjust	spc,va,t0
1154	get_pgd		spc,ptp
1155	space_check	spc,t0,dtlb_fault
1156
1157	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1158
1159	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1160	update_accessed	ptp,pte,t0,t1
1161
1162	make_insert_tlb	spc,pte,prot,t1
1163
1164	idtlbt          pte,prot
1165
1166	tlb_unlock1	spc,t0
1167	rfir
1168	nop
1169
1170dtlb_check_alias_20w:
1171	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1172
1173	idtlbt          pte,prot
1174
1175	rfir
1176	nop
1177
1178nadtlb_miss_20w:
1179	space_adjust	spc,va,t0
1180	get_pgd		spc,ptp
1181	space_check	spc,t0,nadtlb_fault
1182
1183	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1184
1185	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1186	update_accessed	ptp,pte,t0,t1
1187
1188	make_insert_tlb	spc,pte,prot,t1
1189
1190	idtlbt          pte,prot
1191
1192	tlb_unlock1	spc,t0
1193	rfir
1194	nop
1195
1196nadtlb_check_alias_20w:
1197	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1198
1199	idtlbt          pte,prot
1200
1201	rfir
1202	nop
1203
1204#else
1205
1206dtlb_miss_11:
1207	get_pgd		spc,ptp
1208
1209	space_check	spc,t0,dtlb_fault
1210
1211	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1212
1213	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1214	update_accessed	ptp,pte,t0,t1
1215
1216	make_insert_tlb_11	spc,pte,prot
1217
1218	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1219	mtsp		spc,%sr1
1220
1221	idtlba		pte,(%sr1,va)
1222	idtlbp		prot,(%sr1,va)
1223
1224	mtsp		t1, %sr1	/* Restore sr1 */
1225
1226	tlb_unlock1	spc,t0
1227	rfir
1228	nop
1229
1230dtlb_check_alias_11:
1231	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1232
1233	idtlba          pte,(va)
1234	idtlbp          prot,(va)
1235
1236	rfir
1237	nop
1238
1239nadtlb_miss_11:
1240	get_pgd		spc,ptp
1241
1242	space_check	spc,t0,nadtlb_fault
1243
1244	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1245
1246	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1247	update_accessed	ptp,pte,t0,t1
1248
1249	make_insert_tlb_11	spc,pte,prot
1250
1251	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1252	mtsp		spc,%sr1
1253
1254	idtlba		pte,(%sr1,va)
1255	idtlbp		prot,(%sr1,va)
1256
1257	mtsp		t1, %sr1	/* Restore sr1 */
1258
1259	tlb_unlock1	spc,t0
1260	rfir
1261	nop
1262
1263nadtlb_check_alias_11:
1264	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1265
1266	idtlba          pte,(va)
1267	idtlbp          prot,(va)
1268
1269	rfir
1270	nop
1271
1272dtlb_miss_20:
1273	space_adjust	spc,va,t0
1274	get_pgd		spc,ptp
1275	space_check	spc,t0,dtlb_fault
1276
1277	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1278
1279	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1280	update_accessed	ptp,pte,t0,t1
1281
1282	make_insert_tlb	spc,pte,prot,t1
1283
1284	f_extend	pte,t1
1285
1286	idtlbt          pte,prot
1287
1288	tlb_unlock1	spc,t0
1289	rfir
1290	nop
1291
1292dtlb_check_alias_20:
1293	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1294
1295	idtlbt          pte,prot
1296
1297	rfir
1298	nop
1299
1300nadtlb_miss_20:
1301	get_pgd		spc,ptp
1302
1303	space_check	spc,t0,nadtlb_fault
1304
1305	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1306
1307	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1308	update_accessed	ptp,pte,t0,t1
1309
1310	make_insert_tlb	spc,pte,prot,t1
1311
1312	f_extend	pte,t1
1313
1314	idtlbt		pte,prot
1315
1316	tlb_unlock1	spc,t0
1317	rfir
1318	nop
1319
1320nadtlb_check_alias_20:
1321	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1322
1323	idtlbt          pte,prot
1324
1325	rfir
1326	nop
1327
1328#endif
1329
1330nadtlb_emulate:
1331
1332	/*
1333	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1334	 * probei instructions. We don't want to fault for these
1335	 * instructions (not only does it not make sense, it can cause
1336	 * deadlocks, since some flushes are done with the mmap
1337	 * semaphore held). If the translation doesn't exist, we can't
1338	 * insert a translation, so have to emulate the side effects
1339	 * of the instruction. Since we don't insert a translation
1340	 * we can get a lot of faults during a flush loop, so it makes
1341	 * sense to try to do it here with minimum overhead. We only
1342	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1343	 * and index registers are not shadowed. We defer everything
1344	 * else to the "slow" path.
1345	 */
1346
1347	mfctl           %cr19,%r9 /* Get iir */
1348
1349	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1350	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1351
1352	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1353	ldi             0x280,%r16
1354	and             %r9,%r16,%r17
1355	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1356	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1357	BL		get_register,%r25
1358	extrw,u         %r9,15,5,%r8           /* Get index register # */
1359	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1360	copy            %r1,%r24
1361	BL		get_register,%r25
1362	extrw,u         %r9,10,5,%r8           /* Get base register # */
1363	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1364	BL		set_register,%r25
1365	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1366
1367nadtlb_nullify:
1368	mfctl           %ipsw,%r8
1369	ldil            L%PSW_N,%r9
1370	or              %r8,%r9,%r8            /* Set PSW_N */
1371	mtctl           %r8,%ipsw
1372
1373	rfir
1374	nop
1375
1376	/*
1377		When there is no translation for the probe address then we
1378		must nullify the insn and return zero in the target regsiter.
1379		This will indicate to the calling code that it does not have
1380		write/read privileges to this address.
1381
1382		This should technically work for prober and probew in PA 1.1,
1383		and also probe,r and probe,w in PA 2.0
1384
1385		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1386		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1387
1388	*/
1389nadtlb_probe_check:
1390	ldi             0x80,%r16
1391	and             %r9,%r16,%r17
1392	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1393	BL              get_register,%r25      /* Find the target register */
1394	extrw,u         %r9,31,5,%r8           /* Get target register */
1395	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1396	BL		set_register,%r25
1397	copy            %r0,%r1                /* Write zero to target register */
1398	b nadtlb_nullify                       /* Nullify return insn */
1399	nop
1400
1401
1402#ifdef CONFIG_64BIT
1403itlb_miss_20w:
1404
1405	/*
1406	 * I miss is a little different, since we allow users to fault
1407	 * on the gateway page which is in the kernel address space.
1408	 */
1409
1410	space_adjust	spc,va,t0
1411	get_pgd		spc,ptp
1412	space_check	spc,t0,itlb_fault
1413
1414	L3_ptep		ptp,pte,t0,va,itlb_fault
1415
1416	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1417	update_accessed	ptp,pte,t0,t1
1418
1419	make_insert_tlb	spc,pte,prot,t1
1420
1421	iitlbt          pte,prot
1422
1423	tlb_unlock1	spc,t0
1424	rfir
1425	nop
1426
1427naitlb_miss_20w:
1428
1429	/*
1430	 * I miss is a little different, since we allow users to fault
1431	 * on the gateway page which is in the kernel address space.
1432	 */
1433
1434	space_adjust	spc,va,t0
1435	get_pgd		spc,ptp
1436	space_check	spc,t0,naitlb_fault
1437
1438	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1439
1440	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1441	update_accessed	ptp,pte,t0,t1
1442
1443	make_insert_tlb	spc,pte,prot,t1
1444
1445	iitlbt          pte,prot
1446
1447	tlb_unlock1	spc,t0
1448	rfir
1449	nop
1450
1451naitlb_check_alias_20w:
1452	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1453
1454	iitlbt		pte,prot
1455
1456	rfir
1457	nop
1458
1459#else
1460
1461itlb_miss_11:
1462	get_pgd		spc,ptp
1463
1464	space_check	spc,t0,itlb_fault
1465
1466	L2_ptep		ptp,pte,t0,va,itlb_fault
1467
1468	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1469	update_accessed	ptp,pte,t0,t1
1470
1471	make_insert_tlb_11	spc,pte,prot
1472
1473	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1474	mtsp		spc,%sr1
1475
1476	iitlba		pte,(%sr1,va)
1477	iitlbp		prot,(%sr1,va)
1478
1479	mtsp		t1, %sr1	/* Restore sr1 */
1480
1481	tlb_unlock1	spc,t0
1482	rfir
1483	nop
1484
1485naitlb_miss_11:
1486	get_pgd		spc,ptp
1487
1488	space_check	spc,t0,naitlb_fault
1489
1490	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1491
1492	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1493	update_accessed	ptp,pte,t0,t1
1494
1495	make_insert_tlb_11	spc,pte,prot
1496
1497	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1498	mtsp		spc,%sr1
1499
1500	iitlba		pte,(%sr1,va)
1501	iitlbp		prot,(%sr1,va)
1502
1503	mtsp		t1, %sr1	/* Restore sr1 */
1504
1505	tlb_unlock1	spc,t0
1506	rfir
1507	nop
1508
1509naitlb_check_alias_11:
1510	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1511
1512	iitlba          pte,(%sr0, va)
1513	iitlbp          prot,(%sr0, va)
1514
1515	rfir
1516	nop
1517
1518
1519itlb_miss_20:
1520	get_pgd		spc,ptp
1521
1522	space_check	spc,t0,itlb_fault
1523
1524	L2_ptep		ptp,pte,t0,va,itlb_fault
1525
1526	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1527	update_accessed	ptp,pte,t0,t1
1528
1529	make_insert_tlb	spc,pte,prot,t1
1530
1531	f_extend	pte,t1
1532
1533	iitlbt          pte,prot
1534
1535	tlb_unlock1	spc,t0
1536	rfir
1537	nop
1538
1539naitlb_miss_20:
1540	get_pgd		spc,ptp
1541
1542	space_check	spc,t0,naitlb_fault
1543
1544	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1545
1546	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1547	update_accessed	ptp,pte,t0,t1
1548
1549	make_insert_tlb	spc,pte,prot,t1
1550
1551	f_extend	pte,t1
1552
1553	iitlbt          pte,prot
1554
1555	tlb_unlock1	spc,t0
1556	rfir
1557	nop
1558
1559naitlb_check_alias_20:
1560	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1561
1562	iitlbt          pte,prot
1563
1564	rfir
1565	nop
1566
1567#endif
1568
1569#ifdef CONFIG_64BIT
1570
1571dbit_trap_20w:
1572	space_adjust	spc,va,t0
1573	get_pgd		spc,ptp
1574	space_check	spc,t0,dbit_fault
1575
1576	L3_ptep		ptp,pte,t0,va,dbit_fault
1577
1578	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1579	update_dirty	ptp,pte,t1
1580
1581	make_insert_tlb	spc,pte,prot,t1
1582
1583	idtlbt          pte,prot
1584
1585	tlb_unlock0	spc,t0
1586	rfir
1587	nop
1588#else
1589
1590dbit_trap_11:
1591
1592	get_pgd		spc,ptp
1593
1594	space_check	spc,t0,dbit_fault
1595
1596	L2_ptep		ptp,pte,t0,va,dbit_fault
1597
1598	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1599	update_dirty	ptp,pte,t1
1600
1601	make_insert_tlb_11	spc,pte,prot
1602
1603	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1604	mtsp		spc,%sr1
1605
1606	idtlba		pte,(%sr1,va)
1607	idtlbp		prot,(%sr1,va)
1608
1609	mtsp            t1, %sr1     /* Restore sr1 */
1610
1611	tlb_unlock0	spc,t0
1612	rfir
1613	nop
1614
1615dbit_trap_20:
1616	get_pgd		spc,ptp
1617
1618	space_check	spc,t0,dbit_fault
1619
1620	L2_ptep		ptp,pte,t0,va,dbit_fault
1621
1622	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1623	update_dirty	ptp,pte,t1
1624
1625	make_insert_tlb	spc,pte,prot,t1
1626
1627	f_extend	pte,t1
1628
1629	idtlbt		pte,prot
1630
1631	tlb_unlock0	spc,t0
1632	rfir
1633	nop
1634#endif
1635
1636	.import handle_interruption,code
1637
1638kernel_bad_space:
1639	b               intr_save
1640	ldi             31,%r8  /* Use an unused code */
1641
1642dbit_fault:
1643	b               intr_save
1644	ldi             20,%r8
1645
1646itlb_fault:
1647	b               intr_save
1648	ldi             6,%r8
1649
1650nadtlb_fault:
1651	b               intr_save
1652	ldi             17,%r8
1653
1654naitlb_fault:
1655	b               intr_save
1656	ldi             16,%r8
1657
1658dtlb_fault:
1659	b               intr_save
1660	ldi             15,%r8
1661
1662	/* Register saving semantics for system calls:
1663
1664	   %r1		   clobbered by system call macro in userspace
1665	   %r2		   saved in PT_REGS by gateway page
1666	   %r3  - %r18	   preserved by C code (saved by signal code)
1667	   %r19 - %r20	   saved in PT_REGS by gateway page
1668	   %r21 - %r22	   non-standard syscall args
1669			   stored in kernel stack by gateway page
1670	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1671	   %r27 - %r30	   saved in PT_REGS by gateway page
1672	   %r31		   syscall return pointer
1673	 */
1674
1675	/* Floating point registers (FIXME: what do we do with these?)
1676
1677	   %fr0  - %fr3	   status/exception, not preserved
1678	   %fr4  - %fr7	   arguments
1679	   %fr8	 - %fr11   not preserved by C code
1680	   %fr12 - %fr21   preserved by C code
1681	   %fr22 - %fr31   not preserved by C code
1682	 */
1683
1684	.macro	reg_save regs
1685	STREG	%r3, PT_GR3(\regs)
1686	STREG	%r4, PT_GR4(\regs)
1687	STREG	%r5, PT_GR5(\regs)
1688	STREG	%r6, PT_GR6(\regs)
1689	STREG	%r7, PT_GR7(\regs)
1690	STREG	%r8, PT_GR8(\regs)
1691	STREG	%r9, PT_GR9(\regs)
1692	STREG   %r10,PT_GR10(\regs)
1693	STREG   %r11,PT_GR11(\regs)
1694	STREG   %r12,PT_GR12(\regs)
1695	STREG   %r13,PT_GR13(\regs)
1696	STREG   %r14,PT_GR14(\regs)
1697	STREG   %r15,PT_GR15(\regs)
1698	STREG   %r16,PT_GR16(\regs)
1699	STREG   %r17,PT_GR17(\regs)
1700	STREG   %r18,PT_GR18(\regs)
1701	.endm
1702
1703	.macro	reg_restore regs
1704	LDREG	PT_GR3(\regs), %r3
1705	LDREG	PT_GR4(\regs), %r4
1706	LDREG	PT_GR5(\regs), %r5
1707	LDREG	PT_GR6(\regs), %r6
1708	LDREG	PT_GR7(\regs), %r7
1709	LDREG	PT_GR8(\regs), %r8
1710	LDREG	PT_GR9(\regs), %r9
1711	LDREG   PT_GR10(\regs),%r10
1712	LDREG   PT_GR11(\regs),%r11
1713	LDREG   PT_GR12(\regs),%r12
1714	LDREG   PT_GR13(\regs),%r13
1715	LDREG   PT_GR14(\regs),%r14
1716	LDREG   PT_GR15(\regs),%r15
1717	LDREG   PT_GR16(\regs),%r16
1718	LDREG   PT_GR17(\regs),%r17
1719	LDREG   PT_GR18(\regs),%r18
1720	.endm
1721
1722	.macro	fork_like name
1723ENTRY(sys_\name\()_wrapper)
1724	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1725	ldo	TASK_REGS(%r1),%r1
1726	reg_save %r1
1727	mfctl	%cr27, %r28
1728	ldil	L%sys_\name, %r31
1729	be	R%sys_\name(%sr4,%r31)
1730	STREG	%r28, PT_CR27(%r1)
1731ENDPROC(sys_\name\()_wrapper)
1732	.endm
1733
1734fork_like clone
1735fork_like fork
1736fork_like vfork
1737
1738	/* Set the return value for the child */
1739ENTRY(child_return)
1740	BL	schedule_tail, %r2
1741	nop
1742finish_child_return:
1743	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1744	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1745
1746	LDREG	PT_CR27(%r1), %r3
1747	mtctl	%r3, %cr27
1748	reg_restore %r1
1749	b	syscall_exit
1750	copy	%r0,%r28
1751ENDPROC(child_return)
1752
1753ENTRY(sys_rt_sigreturn_wrapper)
1754	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1755	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1756	/* Don't save regs, we are going to restore them from sigcontext. */
1757	STREG	%r2, -RP_OFFSET(%r30)
1758#ifdef CONFIG_64BIT
1759	ldo	FRAME_SIZE(%r30), %r30
1760	BL	sys_rt_sigreturn,%r2
1761	ldo	-16(%r30),%r29		/* Reference param save area */
1762#else
1763	BL	sys_rt_sigreturn,%r2
1764	ldo	FRAME_SIZE(%r30), %r30
1765#endif
1766
1767	ldo	-FRAME_SIZE(%r30), %r30
1768	LDREG	-RP_OFFSET(%r30), %r2
1769
1770	/* FIXME: I think we need to restore a few more things here. */
1771	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1772	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1773	reg_restore %r1
1774
1775	/* If the signal was received while the process was blocked on a
1776	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1777	 * take us to syscall_exit_rfi and on to intr_return.
1778	 */
1779	bv	%r0(%r2)
1780	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1781ENDPROC(sys_rt_sigreturn_wrapper)
1782
1783ENTRY(syscall_exit)
1784	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1785	 * via syscall_exit_rfi if the signal was received while the process
1786	 * was running.
1787	 */
1788
1789	/* save return value now */
1790
1791	mfctl     %cr30, %r1
1792	LDREG     TI_TASK(%r1),%r1
1793	STREG     %r28,TASK_PT_GR28(%r1)
1794
1795	/* Seems to me that dp could be wrong here, if the syscall involved
1796	 * calling a module, and nothing got round to restoring dp on return.
1797	 */
1798	loadgp
1799
1800syscall_check_resched:
1801
1802	/* check for reschedule */
1803
1804	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
1805	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1806
1807	.import do_signal,code
1808syscall_check_sig:
1809	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1810	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1811	and,COND(<>)	%r19, %r26, %r0
1812	b,n	syscall_restore	/* skip past if we've nothing to do */
1813
1814syscall_do_signal:
1815	/* Save callee-save registers (for sigcontext).
1816	 * FIXME: After this point the process structure should be
1817	 * consistent with all the relevant state of the process
1818	 * before the syscall.  We need to verify this.
1819	 */
1820	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1821	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1822	reg_save %r26
1823
1824#ifdef CONFIG_64BIT
1825	ldo	-16(%r30),%r29			/* Reference param save area */
1826#endif
1827
1828	BL	do_notify_resume,%r2
1829	ldi	1, %r25				/* long in_syscall = 1 */
1830
1831	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1832	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1833	reg_restore %r20
1834
1835	b,n     syscall_check_sig
1836
1837syscall_restore:
1838	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1839
1840	/* Are we being ptraced? */
1841	ldw	TASK_FLAGS(%r1),%r19
1842	ldi	_TIF_SYSCALL_TRACE_MASK,%r2
1843	and,COND(=)	%r19,%r2,%r0
1844	b,n	syscall_restore_rfi
1845
1846	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1847	rest_fp	%r19
1848
1849	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1850	mtsar	%r19
1851
1852	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1853	LDREG	TASK_PT_GR19(%r1),%r19
1854	LDREG   TASK_PT_GR20(%r1),%r20
1855	LDREG	TASK_PT_GR21(%r1),%r21
1856	LDREG	TASK_PT_GR22(%r1),%r22
1857	LDREG	TASK_PT_GR23(%r1),%r23
1858	LDREG	TASK_PT_GR24(%r1),%r24
1859	LDREG	TASK_PT_GR25(%r1),%r25
1860	LDREG	TASK_PT_GR26(%r1),%r26
1861	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1862	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1863	LDREG	TASK_PT_GR29(%r1),%r29
1864	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1865
1866	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1867	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1868	rsm     PSW_SM_I, %r0
1869	copy    %r1,%r30                           /* Restore user sp */
1870	mfsp    %sr3,%r1                           /* Get user space id */
1871	mtsp    %r1,%sr7                           /* Restore sr7 */
1872	ssm     PSW_SM_I, %r0
1873
1874	/* Set sr2 to zero for userspace syscalls to work. */
1875	mtsp	%r0,%sr2
1876	mtsp	%r1,%sr4			   /* Restore sr4 */
1877	mtsp	%r1,%sr5			   /* Restore sr5 */
1878	mtsp	%r1,%sr6			   /* Restore sr6 */
1879
1880	depi	3,31,2,%r31			   /* ensure return to user mode. */
1881
1882#ifdef CONFIG_64BIT
1883	/* decide whether to reset the wide mode bit
1884	 *
1885	 * For a syscall, the W bit is stored in the lowest bit
1886	 * of sp.  Extract it and reset W if it is zero */
1887	extrd,u,*<>	%r30,63,1,%r1
1888	rsm	PSW_SM_W, %r0
1889	/* now reset the lowest bit of sp if it was set */
1890	xor	%r30,%r1,%r30
1891#endif
1892	be,n    0(%sr3,%r31)                       /* return to user space */
1893
1894	/* We have to return via an RFI, so that PSW T and R bits can be set
1895	 * appropriately.
1896	 * This sets up pt_regs so we can return via intr_restore, which is not
1897	 * the most efficient way of doing things, but it works.
1898	 */
1899syscall_restore_rfi:
1900	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1901	mtctl	%r2,%cr0			   /*   for immediate trap */
1902	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1903	ldi	0x0b,%r20			   /* Create new PSW */
1904	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1905
1906	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1907	 * set in thread_info.h and converted to PA bitmap
1908	 * numbers in asm-offsets.c */
1909
1910	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1911	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1912	depi	-1,27,1,%r20			   /* R bit */
1913
1914	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1915	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1916	depi	-1,7,1,%r20			   /* T bit */
1917
1918	STREG	%r20,TASK_PT_PSW(%r1)
1919
1920	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1921
1922	mfsp    %sr3,%r25
1923	STREG   %r25,TASK_PT_SR3(%r1)
1924	STREG   %r25,TASK_PT_SR4(%r1)
1925	STREG   %r25,TASK_PT_SR5(%r1)
1926	STREG   %r25,TASK_PT_SR6(%r1)
1927	STREG   %r25,TASK_PT_SR7(%r1)
1928	STREG   %r25,TASK_PT_IASQ0(%r1)
1929	STREG   %r25,TASK_PT_IASQ1(%r1)
1930
1931	/* XXX W bit??? */
1932	/* Now if old D bit is clear, it means we didn't save all registers
1933	 * on syscall entry, so do that now.  This only happens on TRACEME
1934	 * calls, or if someone attached to us while we were on a syscall.
1935	 * We could make this more efficient by not saving r3-r18, but
1936	 * then we wouldn't be able to use the common intr_restore path.
1937	 * It is only for traced processes anyway, so performance is not
1938	 * an issue.
1939	 */
1940	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1941	ldo	TASK_REGS(%r1),%r25
1942	reg_save %r25				   /* Save r3 to r18 */
1943
1944	/* Save the current sr */
1945	mfsp	%sr0,%r2
1946	STREG	%r2,TASK_PT_SR0(%r1)
1947
1948	/* Save the scratch sr */
1949	mfsp	%sr1,%r2
1950	STREG	%r2,TASK_PT_SR1(%r1)
1951
1952	/* sr2 should be set to zero for userspace syscalls */
1953	STREG	%r0,TASK_PT_SR2(%r1)
1954
1955	LDREG	TASK_PT_GR31(%r1),%r2
1956	depi	3,31,2,%r2		   /* ensure return to user mode. */
1957	STREG   %r2,TASK_PT_IAOQ0(%r1)
1958	ldo	4(%r2),%r2
1959	STREG	%r2,TASK_PT_IAOQ1(%r1)
1960	b	intr_restore
1961	copy	%r25,%r16
1962
1963pt_regs_ok:
1964	LDREG	TASK_PT_IAOQ0(%r1),%r2
1965	depi	3,31,2,%r2		   /* ensure return to user mode. */
1966	STREG	%r2,TASK_PT_IAOQ0(%r1)
1967	LDREG	TASK_PT_IAOQ1(%r1),%r2
1968	depi	3,31,2,%r2
1969	STREG	%r2,TASK_PT_IAOQ1(%r1)
1970	b	intr_restore
1971	copy	%r25,%r16
1972
1973	.import schedule,code
1974syscall_do_resched:
1975	BL	schedule,%r2
1976#ifdef CONFIG_64BIT
1977	ldo	-16(%r30),%r29		/* Reference param save area */
1978#else
1979	nop
1980#endif
1981	b	syscall_check_resched	/* if resched, we start over again */
1982	nop
1983ENDPROC(syscall_exit)
1984
1985
1986#ifdef CONFIG_FUNCTION_TRACER
1987	.import ftrace_function_trampoline,code
1988ENTRY(_mcount)
1989	copy	%r3, %arg2
1990	b	ftrace_function_trampoline
1991	nop
1992ENDPROC(_mcount)
1993
1994ENTRY(return_to_handler)
1995	load32	return_trampoline, %rp
1996	copy	%ret0, %arg0
1997	copy	%ret1, %arg1
1998	b	ftrace_return_to_handler
1999	nop
2000return_trampoline:
2001	copy	%ret0, %rp
2002	copy	%r23, %ret0
2003	copy	%r24, %ret1
2004
2005.globl ftrace_stub
2006ftrace_stub:
2007	bv	%r0(%rp)
2008	nop
2009ENDPROC(return_to_handler)
2010#endif	/* CONFIG_FUNCTION_TRACER */
2011
2012#ifdef CONFIG_IRQSTACKS
2013/* void call_on_stack(unsigned long param1, void *func,
2014		      unsigned long new_stack) */
2015ENTRY(call_on_stack)
2016	copy	%sp, %r1
2017
2018	/* Regarding the HPPA calling conventions for function pointers,
2019	   we assume the PIC register is not changed across call.  For
2020	   CONFIG_64BIT, the argument pointer is left to point at the
2021	   argument region allocated for the call to call_on_stack. */
2022# ifdef CONFIG_64BIT
2023	/* Switch to new stack.  We allocate two 128 byte frames.  */
2024	ldo	256(%arg2), %sp
2025	/* Save previous stack pointer and return pointer in frame marker */
2026	STREG	%rp, -144(%sp)
2027	/* Calls always use function descriptor */
2028	LDREG	16(%arg1), %arg1
2029	bve,l	(%arg1), %rp
2030	STREG	%r1, -136(%sp)
2031	LDREG	-144(%sp), %rp
2032	bve	(%rp)
2033	LDREG	-136(%sp), %sp
2034# else
2035	/* Switch to new stack.  We allocate two 64 byte frames.  */
2036	ldo	128(%arg2), %sp
2037	/* Save previous stack pointer and return pointer in frame marker */
2038	STREG	%r1, -68(%sp)
2039	STREG	%rp, -84(%sp)
2040	/* Calls use function descriptor if PLABEL bit is set */
2041	bb,>=,n	%arg1, 30, 1f
2042	depwi	0,31,2, %arg1
2043	LDREG	0(%arg1), %arg1
20441:
2045	be,l	0(%sr4,%arg1), %sr0, %r31
2046	copy	%r31, %rp
2047	LDREG	-84(%sp), %rp
2048	bv	(%rp)
2049	LDREG	-68(%sp), %sp
2050# endif /* CONFIG_64BIT */
2051ENDPROC(call_on_stack)
2052#endif /* CONFIG_IRQSTACKS */
2053
2054get_register:
2055	/*
2056	 * get_register is used by the non access tlb miss handlers to
2057	 * copy the value of the general register specified in r8 into
2058	 * r1. This routine can't be used for shadowed registers, since
2059	 * the rfir will restore the original value. So, for the shadowed
2060	 * registers we put a -1 into r1 to indicate that the register
2061	 * should not be used (the register being copied could also have
2062	 * a -1 in it, but that is OK, it just means that we will have
2063	 * to use the slow path instead).
2064	 */
2065	blr     %r8,%r0
2066	nop
2067	bv      %r0(%r25)    /* r0 */
2068	copy    %r0,%r1
2069	bv      %r0(%r25)    /* r1 - shadowed */
2070	ldi     -1,%r1
2071	bv      %r0(%r25)    /* r2 */
2072	copy    %r2,%r1
2073	bv      %r0(%r25)    /* r3 */
2074	copy    %r3,%r1
2075	bv      %r0(%r25)    /* r4 */
2076	copy    %r4,%r1
2077	bv      %r0(%r25)    /* r5 */
2078	copy    %r5,%r1
2079	bv      %r0(%r25)    /* r6 */
2080	copy    %r6,%r1
2081	bv      %r0(%r25)    /* r7 */
2082	copy    %r7,%r1
2083	bv      %r0(%r25)    /* r8 - shadowed */
2084	ldi     -1,%r1
2085	bv      %r0(%r25)    /* r9 - shadowed */
2086	ldi     -1,%r1
2087	bv      %r0(%r25)    /* r10 */
2088	copy    %r10,%r1
2089	bv      %r0(%r25)    /* r11 */
2090	copy    %r11,%r1
2091	bv      %r0(%r25)    /* r12 */
2092	copy    %r12,%r1
2093	bv      %r0(%r25)    /* r13 */
2094	copy    %r13,%r1
2095	bv      %r0(%r25)    /* r14 */
2096	copy    %r14,%r1
2097	bv      %r0(%r25)    /* r15 */
2098	copy    %r15,%r1
2099	bv      %r0(%r25)    /* r16 - shadowed */
2100	ldi     -1,%r1
2101	bv      %r0(%r25)    /* r17 - shadowed */
2102	ldi     -1,%r1
2103	bv      %r0(%r25)    /* r18 */
2104	copy    %r18,%r1
2105	bv      %r0(%r25)    /* r19 */
2106	copy    %r19,%r1
2107	bv      %r0(%r25)    /* r20 */
2108	copy    %r20,%r1
2109	bv      %r0(%r25)    /* r21 */
2110	copy    %r21,%r1
2111	bv      %r0(%r25)    /* r22 */
2112	copy    %r22,%r1
2113	bv      %r0(%r25)    /* r23 */
2114	copy    %r23,%r1
2115	bv      %r0(%r25)    /* r24 - shadowed */
2116	ldi     -1,%r1
2117	bv      %r0(%r25)    /* r25 - shadowed */
2118	ldi     -1,%r1
2119	bv      %r0(%r25)    /* r26 */
2120	copy    %r26,%r1
2121	bv      %r0(%r25)    /* r27 */
2122	copy    %r27,%r1
2123	bv      %r0(%r25)    /* r28 */
2124	copy    %r28,%r1
2125	bv      %r0(%r25)    /* r29 */
2126	copy    %r29,%r1
2127	bv      %r0(%r25)    /* r30 */
2128	copy    %r30,%r1
2129	bv      %r0(%r25)    /* r31 */
2130	copy    %r31,%r1
2131
2132
2133set_register:
2134	/*
2135	 * set_register is used by the non access tlb miss handlers to
2136	 * copy the value of r1 into the general register specified in
2137	 * r8.
2138	 */
2139	blr     %r8,%r0
2140	nop
2141	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2142	copy    %r1,%r0
2143	bv      %r0(%r25)    /* r1 */
2144	copy    %r1,%r1
2145	bv      %r0(%r25)    /* r2 */
2146	copy    %r1,%r2
2147	bv      %r0(%r25)    /* r3 */
2148	copy    %r1,%r3
2149	bv      %r0(%r25)    /* r4 */
2150	copy    %r1,%r4
2151	bv      %r0(%r25)    /* r5 */
2152	copy    %r1,%r5
2153	bv      %r0(%r25)    /* r6 */
2154	copy    %r1,%r6
2155	bv      %r0(%r25)    /* r7 */
2156	copy    %r1,%r7
2157	bv      %r0(%r25)    /* r8 */
2158	copy    %r1,%r8
2159	bv      %r0(%r25)    /* r9 */
2160	copy    %r1,%r9
2161	bv      %r0(%r25)    /* r10 */
2162	copy    %r1,%r10
2163	bv      %r0(%r25)    /* r11 */
2164	copy    %r1,%r11
2165	bv      %r0(%r25)    /* r12 */
2166	copy    %r1,%r12
2167	bv      %r0(%r25)    /* r13 */
2168	copy    %r1,%r13
2169	bv      %r0(%r25)    /* r14 */
2170	copy    %r1,%r14
2171	bv      %r0(%r25)    /* r15 */
2172	copy    %r1,%r15
2173	bv      %r0(%r25)    /* r16 */
2174	copy    %r1,%r16
2175	bv      %r0(%r25)    /* r17 */
2176	copy    %r1,%r17
2177	bv      %r0(%r25)    /* r18 */
2178	copy    %r1,%r18
2179	bv      %r0(%r25)    /* r19 */
2180	copy    %r1,%r19
2181	bv      %r0(%r25)    /* r20 */
2182	copy    %r1,%r20
2183	bv      %r0(%r25)    /* r21 */
2184	copy    %r1,%r21
2185	bv      %r0(%r25)    /* r22 */
2186	copy    %r1,%r22
2187	bv      %r0(%r25)    /* r23 */
2188	copy    %r1,%r23
2189	bv      %r0(%r25)    /* r24 */
2190	copy    %r1,%r24
2191	bv      %r0(%r25)    /* r25 */
2192	copy    %r1,%r25
2193	bv      %r0(%r25)    /* r26 */
2194	copy    %r1,%r26
2195	bv      %r0(%r25)    /* r27 */
2196	copy    %r1,%r27
2197	bv      %r0(%r25)    /* r28 */
2198	copy    %r1,%r28
2199	bv      %r0(%r25)    /* r29 */
2200	copy    %r1,%r29
2201	bv      %r0(%r25)    /* r30 */
2202	copy    %r1,%r30
2203	bv      %r0(%r25)    /* r31 */
2204	copy    %r1,%r31
2205
2206