xref: /openbmc/linux/arch/parisc/kernel/entry.S (revision 09bae3b6)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/ldcw.h>
39#include <asm/traps.h>
40#include <asm/thread_info.h>
41
42#include <linux/linkage.h>
43
44#ifdef CONFIG_64BIT
45	.level 2.0w
46#else
47	.level 2.0
48#endif
49
50	.import		pa_tlb_lock,data
51	.macro  load_pa_tlb_lock reg
52#if __PA_LDCW_ALIGNMENT > 4
53	load32	PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
54	depi	0,31,__PA_LDCW_ALIGN_ORDER, \reg
55#else
56	load32	PA(pa_tlb_lock), \reg
57#endif
58	.endm
59
60	/* space_to_prot macro creates a prot id from a space id */
61
62#if (SPACEID_SHIFT) == 0
63	.macro  space_to_prot spc prot
64	depd,z  \spc,62,31,\prot
65	.endm
66#else
67	.macro  space_to_prot spc prot
68	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
69	.endm
70#endif
71
72	/* Switch to virtual mapping, trashing only %r1 */
73	.macro  virt_map
74	/* pcxt_ssm_bug */
75	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
76	mtsp	%r0, %sr4
77	mtsp	%r0, %sr5
78	mtsp	%r0, %sr6
79	tovirt_r1 %r29
80	load32	KERNEL_PSW, %r1
81
82	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
83	mtctl	%r0, %cr17	/* Clear IIASQ tail */
84	mtctl	%r0, %cr17	/* Clear IIASQ head */
85	mtctl	%r1, %ipsw
86	load32	4f, %r1
87	mtctl	%r1, %cr18	/* Set IIAOQ tail */
88	ldo	4(%r1), %r1
89	mtctl	%r1, %cr18	/* Set IIAOQ head */
90	rfir
91	nop
924:
93	.endm
94
95	/*
96	 * The "get_stack" macros are responsible for determining the
97	 * kernel stack value.
98	 *
99	 *      If sr7 == 0
100	 *          Already using a kernel stack, so call the
101	 *          get_stack_use_r30 macro to push a pt_regs structure
102	 *          on the stack, and store registers there.
103	 *      else
104	 *          Need to set up a kernel stack, so call the
105	 *          get_stack_use_cr30 macro to set up a pointer
106	 *          to the pt_regs structure contained within the
107	 *          task pointer pointed to by cr30. Set the stack
108	 *          pointer to point to the end of the task structure.
109	 *
110	 * Note that we use shadowed registers for temps until
111	 * we can save %r26 and %r29. %r26 is used to preserve
112	 * %r8 (a shadowed register) which temporarily contained
113	 * either the fault type ("code") or the eirr. We need
114	 * to use a non-shadowed register to carry the value over
115	 * the rfir in virt_map. We use %r26 since this value winds
116	 * up being passed as the argument to either do_cpu_irq_mask
117	 * or handle_interruption. %r29 is used to hold a pointer
118	 * the register save area, and once again, it needs to
119	 * be a non-shadowed register so that it survives the rfir.
120	 *
121	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
122	 */
123
124	.macro  get_stack_use_cr30
125
126	/* we save the registers in the task struct */
127
128	copy	%r30, %r17
129	mfctl   %cr30, %r1
130	ldo	THREAD_SZ_ALGN(%r1), %r30
131	mtsp	%r0,%sr7
132	mtsp	%r16,%sr3
133	tophys  %r1,%r9
134	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
135	tophys  %r1,%r9
136	ldo     TASK_REGS(%r9),%r9
137	STREG   %r17,PT_GR30(%r9)
138	STREG   %r29,PT_GR29(%r9)
139	STREG   %r26,PT_GR26(%r9)
140	STREG	%r16,PT_SR7(%r9)
141	copy    %r9,%r29
142	.endm
143
144	.macro  get_stack_use_r30
145
146	/* we put a struct pt_regs on the stack and save the registers there */
147
148	tophys  %r30,%r9
149	copy	%r30,%r1
150	ldo	PT_SZ_ALGN(%r30),%r30
151	STREG   %r1,PT_GR30(%r9)
152	STREG   %r29,PT_GR29(%r9)
153	STREG   %r26,PT_GR26(%r9)
154	STREG	%r16,PT_SR7(%r9)
155	copy    %r9,%r29
156	.endm
157
158	.macro  rest_stack
159	LDREG   PT_GR1(%r29), %r1
160	LDREG   PT_GR30(%r29),%r30
161	LDREG   PT_GR29(%r29),%r29
162	.endm
163
164	/* default interruption handler
165	 * (calls traps.c:handle_interruption) */
166	.macro	def code
167	b	intr_save
168	ldi     \code, %r8
169	.align	32
170	.endm
171
172	/* Interrupt interruption handler
173	 * (calls irq.c:do_cpu_irq_mask) */
174	.macro	extint code
175	b	intr_extint
176	mfsp    %sr7,%r16
177	.align	32
178	.endm
179
180	.import	os_hpmc, code
181
182	/* HPMC handler */
183	.macro	hpmc code
184	nop			/* must be a NOP, will be patched later */
185	load32	PA(os_hpmc), %r3
186	bv,n	0(%r3)
187	nop
188	.word	0		/* checksum (will be patched) */
189	.word	PA(os_hpmc)	/* address of handler */
190	.word	0		/* length of handler */
191	.endm
192
193	/*
194	 * Performance Note: Instructions will be moved up into
195	 * this part of the code later on, once we are sure
196	 * that the tlb miss handlers are close to final form.
197	 */
198
199	/* Register definitions for tlb miss handler macros */
200
201	va  = r8	/* virtual address for which the trap occurred */
202	spc = r24	/* space for which the trap occurred */
203
204#ifndef CONFIG_64BIT
205
206	/*
207	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
208	 */
209
210	.macro	itlb_11 code
211
212	mfctl	%pcsq, spc
213	b	itlb_miss_11
214	mfctl	%pcoq, va
215
216	.align		32
217	.endm
218#endif
219
220	/*
221	 * itlb miss interruption handler (parisc 2.0)
222	 */
223
224	.macro	itlb_20 code
225	mfctl	%pcsq, spc
226#ifdef CONFIG_64BIT
227	b       itlb_miss_20w
228#else
229	b	itlb_miss_20
230#endif
231	mfctl	%pcoq, va
232
233	.align		32
234	.endm
235
236#ifndef CONFIG_64BIT
237	/*
238	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
239	 */
240
241	.macro	naitlb_11 code
242
243	mfctl	%isr,spc
244	b	naitlb_miss_11
245	mfctl 	%ior,va
246
247	.align		32
248	.endm
249#endif
250
251	/*
252	 * naitlb miss interruption handler (parisc 2.0)
253	 */
254
255	.macro	naitlb_20 code
256
257	mfctl	%isr,spc
258#ifdef CONFIG_64BIT
259	b       naitlb_miss_20w
260#else
261	b	naitlb_miss_20
262#endif
263	mfctl 	%ior,va
264
265	.align		32
266	.endm
267
268#ifndef CONFIG_64BIT
269	/*
270	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
271	 */
272
273	.macro	dtlb_11 code
274
275	mfctl	%isr, spc
276	b	dtlb_miss_11
277	mfctl	%ior, va
278
279	.align		32
280	.endm
281#endif
282
283	/*
284	 * dtlb miss interruption handler (parisc 2.0)
285	 */
286
287	.macro	dtlb_20 code
288
289	mfctl	%isr, spc
290#ifdef CONFIG_64BIT
291	b       dtlb_miss_20w
292#else
293	b	dtlb_miss_20
294#endif
295	mfctl	%ior, va
296
297	.align		32
298	.endm
299
300#ifndef CONFIG_64BIT
301	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
302
303	.macro	nadtlb_11 code
304
305	mfctl	%isr,spc
306	b       nadtlb_miss_11
307	mfctl	%ior,va
308
309	.align		32
310	.endm
311#endif
312
313	/* nadtlb miss interruption handler (parisc 2.0) */
314
315	.macro	nadtlb_20 code
316
317	mfctl	%isr,spc
318#ifdef CONFIG_64BIT
319	b       nadtlb_miss_20w
320#else
321	b       nadtlb_miss_20
322#endif
323	mfctl	%ior,va
324
325	.align		32
326	.endm
327
328#ifndef CONFIG_64BIT
329	/*
330	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
331	 */
332
333	.macro	dbit_11 code
334
335	mfctl	%isr,spc
336	b	dbit_trap_11
337	mfctl	%ior,va
338
339	.align		32
340	.endm
341#endif
342
343	/*
344	 * dirty bit trap interruption handler (parisc 2.0)
345	 */
346
347	.macro	dbit_20 code
348
349	mfctl	%isr,spc
350#ifdef CONFIG_64BIT
351	b       dbit_trap_20w
352#else
353	b	dbit_trap_20
354#endif
355	mfctl	%ior,va
356
357	.align		32
358	.endm
359
360	/* In LP64, the space contains part of the upper 32 bits of the
361	 * fault.  We have to extract this and place it in the va,
362	 * zeroing the corresponding bits in the space register */
363	.macro		space_adjust	spc,va,tmp
364#ifdef CONFIG_64BIT
365	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
366	depd		%r0,63,SPACEID_SHIFT,\spc
367	depd		\tmp,31,SPACEID_SHIFT,\va
368#endif
369	.endm
370
371	.import		swapper_pg_dir,code
372
373	/* Get the pgd.  For faults on space zero (kernel space), this
374	 * is simply swapper_pg_dir.  For user space faults, the
375	 * pgd is stored in %cr25 */
376	.macro		get_pgd		spc,reg
377	ldil		L%PA(swapper_pg_dir),\reg
378	ldo		R%PA(swapper_pg_dir)(\reg),\reg
379	or,COND(=)	%r0,\spc,%r0
380	mfctl		%cr25,\reg
381	.endm
382
383	/*
384		space_check(spc,tmp,fault)
385
386		spc - The space we saw the fault with.
387		tmp - The place to store the current space.
388		fault - Function to call on failure.
389
390		Only allow faults on different spaces from the
391		currently active one if we're the kernel
392
393	*/
394	.macro		space_check	spc,tmp,fault
395	mfsp		%sr7,\tmp
396	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
397					 * as kernel, so defeat the space
398					 * check if it is */
399	copy		\spc,\tmp
400	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
401	cmpb,COND(<>),n	\tmp,\spc,\fault
402	.endm
403
404	/* Look up a PTE in a 2-Level scheme (faulting at each
405	 * level if the entry isn't present
406	 *
407	 * NOTE: we use ldw even for LP64, since the short pointers
408	 * can address up to 1TB
409	 */
410	.macro		L2_ptep	pmd,pte,index,va,fault
411#if CONFIG_PGTABLE_LEVELS == 3
412	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
413#else
414# if defined(CONFIG_64BIT)
415	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
416  #else
417  # if PAGE_SIZE > 4096
418	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
419  # else
420	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
421  # endif
422# endif
423#endif
424	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
425	copy		%r0,\pte
426	ldw,s		\index(\pmd),\pmd
427	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
428	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
429	copy		\pmd,%r9
430	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
431	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
432	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
433	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
434	LDREG		%r0(\pmd),\pte
435	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
436	.endm
437
438	/* Look up PTE in a 3-Level scheme.
439	 *
440	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
441	 * first pmd adjacent to the pgd.  This means that we can
442	 * subtract a constant offset to get to it.  The pmd and pgd
443	 * sizes are arranged so that a single pmd covers 4GB (giving
444	 * a full LP64 process access to 8TB) so our lookups are
445	 * effectively L2 for the first 4GB of the kernel (i.e. for
446	 * all ILP32 processes and all the kernel for machines with
447	 * under 4GB of memory) */
448	.macro		L3_ptep pgd,pte,index,va,fault
449#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
450	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
451	copy		%r0,\pte
452	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
453	ldw,s		\index(\pgd),\pgd
454	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
455	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
456	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
457	shld		\pgd,PxD_VALUE_SHIFT,\index
458	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
459	copy		\index,\pgd
460	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
461	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
462#endif
463	L2_ptep		\pgd,\pte,\index,\va,\fault
464	.endm
465
466	/* Acquire pa_tlb_lock lock and recheck page is still present. */
467	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
468#ifdef CONFIG_SMP
469	cmpib,COND(=),n	0,\spc,2f
470	load_pa_tlb_lock \tmp
4711:	LDCW		0(\tmp),\tmp1
472	cmpib,COND(=)	0,\tmp1,1b
473	nop
474	LDREG		0(\ptp),\pte
475	bb,<,n		\pte,_PAGE_PRESENT_BIT,2f
476	b		\fault
477	stw		 \spc,0(\tmp)
4782:
479#endif
480	.endm
481
482	/* Release pa_tlb_lock lock without reloading lock address. */
483	.macro		tlb_unlock0	spc,tmp
484#ifdef CONFIG_SMP
485	or,COND(=)	%r0,\spc,%r0
486	sync
487	or,COND(=)	%r0,\spc,%r0
488	stw             \spc,0(\tmp)
489#endif
490	.endm
491
492	/* Release pa_tlb_lock lock. */
493	.macro		tlb_unlock1	spc,tmp
494#ifdef CONFIG_SMP
495	load_pa_tlb_lock \tmp
496	tlb_unlock0	\spc,\tmp
497#endif
498	.endm
499
500	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
501	 * don't needlessly dirty the cache line if it was already set */
502	.macro		update_accessed	ptp,pte,tmp,tmp1
503	ldi		_PAGE_ACCESSED,\tmp1
504	or		\tmp1,\pte,\tmp
505	and,COND(<>)	\tmp1,\pte,%r0
506	STREG		\tmp,0(\ptp)
507	.endm
508
509	/* Set the dirty bit (and accessed bit).  No need to be
510	 * clever, this is only used from the dirty fault */
511	.macro		update_dirty	ptp,pte,tmp
512	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
513	or		\tmp,\pte,\pte
514	STREG		\pte,0(\ptp)
515	.endm
516
517	/* We have (depending on the page size):
518	 * - 38 to 52-bit Physical Page Number
519	 * - 12 to 26-bit page offset
520	 */
521	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
522	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
523	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
524	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
525
526	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
527	.macro		convert_for_tlb_insert20 pte,tmp
528#ifdef CONFIG_HUGETLB_PAGE
529	copy		\pte,\tmp
530	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
531				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
532
533	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
534				(63-58)+PAGE_ADD_SHIFT,\pte
535	extrd,u,*=	\tmp,_PAGE_HPAGE_BIT+32,1,%r0
536	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
537				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
538#else /* Huge pages disabled */
539	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
540				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
541	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
542				(63-58)+PAGE_ADD_SHIFT,\pte
543#endif
544	.endm
545
546	/* Convert the pte and prot to tlb insertion values.  How
547	 * this happens is quite subtle, read below */
548	.macro		make_insert_tlb	spc,pte,prot,tmp
549	space_to_prot   \spc \prot        /* create prot id from space */
550	/* The following is the real subtlety.  This is depositing
551	 * T <-> _PAGE_REFTRAP
552	 * D <-> _PAGE_DIRTY
553	 * B <-> _PAGE_DMB (memory break)
554	 *
555	 * Then incredible subtlety: The access rights are
556	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
557	 * See 3-14 of the parisc 2.0 manual
558	 *
559	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
560	 * trigger an access rights trap in user space if the user
561	 * tries to read an unreadable page */
562	depd            \pte,8,7,\prot
563
564	/* PAGE_USER indicates the page can be read with user privileges,
565	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
566	 * contains _PAGE_READ) */
567	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
568	depdi		7,11,3,\prot
569	/* If we're a gateway page, drop PL2 back to zero for promotion
570	 * to kernel privilege (so we can execute the page as kernel).
571	 * Any privilege promotion page always denys read and write */
572	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
573	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
574
575	/* Enforce uncacheable pages.
576	 * This should ONLY be use for MMIO on PA 2.0 machines.
577	 * Memory/DMA is cache coherent on all PA2.0 machines we support
578	 * (that means T-class is NOT supported) and the memory controllers
579	 * on most of those machines only handles cache transactions.
580	 */
581	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
582	depdi		1,12,1,\prot
583
584	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
585	convert_for_tlb_insert20 \pte \tmp
586	.endm
587
588	/* Identical macro to make_insert_tlb above, except it
589	 * makes the tlb entry for the differently formatted pa11
590	 * insertion instructions */
591	.macro		make_insert_tlb_11	spc,pte,prot
592	zdep		\spc,30,15,\prot
593	dep		\pte,8,7,\prot
594	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
595	depi		1,12,1,\prot
596	extru,=         \pte,_PAGE_USER_BIT,1,%r0
597	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
598	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
599	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
600
601	/* Get rid of prot bits and convert to page addr for iitlba */
602
603	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
604	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
605	.endm
606
607	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
608	 * to extend into I/O space if the address is 0xfXXXXXXX
609	 * so we extend the f's into the top word of the pte in
610	 * this case */
611	.macro		f_extend	pte,tmp
612	extrd,s		\pte,42,4,\tmp
613	addi,<>		1,\tmp,%r0
614	extrd,s		\pte,63,25,\pte
615	.endm
616
617	/* The alias region is an 8MB aligned 16MB to do clear and
618	 * copy user pages at addresses congruent with the user
619	 * virtual address.
620	 *
621	 * To use the alias page, you set %r26 up with the to TLB
622	 * entry (identifying the physical page) and %r23 up with
623	 * the from tlb entry (or nothing if only a to entry---for
624	 * clear_user_page_asm) */
625	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
626	cmpib,COND(<>),n 0,\spc,\fault
627	ldil		L%(TMPALIAS_MAP_START),\tmp
628#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
629	/* on LP64, ldi will sign extend into the upper 32 bits,
630	 * which is behaviour we don't want */
631	depdi		0,31,32,\tmp
632#endif
633	copy		\va,\tmp1
634	depi		0,31,23,\tmp1
635	cmpb,COND(<>),n	\tmp,\tmp1,\fault
636	mfctl		%cr19,\tmp	/* iir */
637	/* get the opcode (first six bits) into \tmp */
638	extrw,u		\tmp,5,6,\tmp
639	/*
640	 * Only setting the T bit prevents data cache movein
641	 * Setting access rights to zero prevents instruction cache movein
642	 *
643	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
644	 * to type field and _PAGE_READ goes to top bit of PL1
645	 */
646	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
647	/*
648	 * so if the opcode is one (i.e. this is a memory management
649	 * instruction) nullify the next load so \prot is only T.
650	 * Otherwise this is a normal data operation
651	 */
652	cmpiclr,=	0x01,\tmp,%r0
653	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
654.ifc \patype,20
655	depd,z		\prot,8,7,\prot
656.else
657.ifc \patype,11
658	depw,z		\prot,8,7,\prot
659.else
660	.error "undefined PA type to do_alias"
661.endif
662.endif
663	/*
664	 * OK, it is in the temp alias region, check whether "from" or "to".
665	 * Check "subtle" note in pacache.S re: r23/r26.
666	 */
667#ifdef CONFIG_64BIT
668	extrd,u,*=	\va,41,1,%r0
669#else
670	extrw,u,=	\va,9,1,%r0
671#endif
672	or,COND(tr)	%r23,%r0,\pte
673	or		%r26,%r0,\pte
674	.endm
675
676
677	/*
678	 * Fault_vectors are architecturally required to be aligned on a 2K
679	 * boundary
680	 */
681
682	.section .text.hot
683	.align 2048
684
685ENTRY(fault_vector_20)
686	/* First vector is invalid (0) */
687	.ascii	"cows can fly"
688	.byte 0
689	.align 32
690
691	hpmc		 1
692	def		 2
693	def		 3
694	extint		 4
695	def		 5
696	itlb_20		 PARISC_ITLB_TRAP
697	def		 7
698	def		 8
699	def              9
700	def		10
701	def		11
702	def		12
703	def		13
704	def		14
705	dtlb_20		15
706	naitlb_20	16
707	nadtlb_20	17
708	def		18
709	def		19
710	dbit_20		20
711	def		21
712	def		22
713	def		23
714	def		24
715	def		25
716	def		26
717	def		27
718	def		28
719	def		29
720	def		30
721	def		31
722END(fault_vector_20)
723
724#ifndef CONFIG_64BIT
725
726	.align 2048
727
728ENTRY(fault_vector_11)
729	/* First vector is invalid (0) */
730	.ascii	"cows can fly"
731	.byte 0
732	.align 32
733
734	hpmc		 1
735	def		 2
736	def		 3
737	extint		 4
738	def		 5
739	itlb_11		 PARISC_ITLB_TRAP
740	def		 7
741	def		 8
742	def              9
743	def		10
744	def		11
745	def		12
746	def		13
747	def		14
748	dtlb_11		15
749	naitlb_11	16
750	nadtlb_11	17
751	def		18
752	def		19
753	dbit_11		20
754	def		21
755	def		22
756	def		23
757	def		24
758	def		25
759	def		26
760	def		27
761	def		28
762	def		29
763	def		30
764	def		31
765END(fault_vector_11)
766
767#endif
768	/* Fault vector is separately protected and *must* be on its own page */
769	.align		PAGE_SIZE
770
771	.import		handle_interruption,code
772	.import		do_cpu_irq_mask,code
773
774	/*
775	 * Child Returns here
776	 *
777	 * copy_thread moved args into task save area.
778	 */
779
780ENTRY(ret_from_kernel_thread)
781	/* Call schedule_tail first though */
782	BL	schedule_tail, %r2
783	nop
784
785	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
786	LDREG	TASK_PT_GR25(%r1), %r26
787#ifdef CONFIG_64BIT
788	LDREG	TASK_PT_GR27(%r1), %r27
789#endif
790	LDREG	TASK_PT_GR26(%r1), %r1
791	ble	0(%sr7, %r1)
792	copy	%r31, %r2
793	b	finish_child_return
794	nop
795END(ret_from_kernel_thread)
796
797
798	/*
799	 * struct task_struct *_switch_to(struct task_struct *prev,
800	 *	struct task_struct *next)
801	 *
802	 * switch kernel stacks and return prev */
803ENTRY_CFI(_switch_to)
804	STREG	 %r2, -RP_OFFSET(%r30)
805
806	callee_save_float
807	callee_save
808
809	load32	_switch_to_ret, %r2
810
811	STREG	%r2, TASK_PT_KPC(%r26)
812	LDREG	TASK_PT_KPC(%r25), %r2
813
814	STREG	%r30, TASK_PT_KSP(%r26)
815	LDREG	TASK_PT_KSP(%r25), %r30
816	LDREG	TASK_THREAD_INFO(%r25), %r25
817	bv	%r0(%r2)
818	mtctl   %r25,%cr30
819
820ENTRY(_switch_to_ret)
821	mtctl	%r0, %cr0		/* Needed for single stepping */
822	callee_rest
823	callee_rest_float
824
825	LDREG	-RP_OFFSET(%r30), %r2
826	bv	%r0(%r2)
827	copy	%r26, %r28
828ENDPROC_CFI(_switch_to)
829
830	/*
831	 * Common rfi return path for interruptions, kernel execve, and
832	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
833	 * return via this path if the signal was received when the process
834	 * was running; if the process was blocked on a syscall then the
835	 * normal syscall_exit path is used.  All syscalls for traced
836	 * proceses exit via intr_restore.
837	 *
838	 * XXX If any syscalls that change a processes space id ever exit
839	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
840	 * adjust IASQ[0..1].
841	 *
842	 */
843
844	.align	PAGE_SIZE
845
846ENTRY_CFI(syscall_exit_rfi)
847	mfctl   %cr30,%r16
848	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
849	ldo	TASK_REGS(%r16),%r16
850	/* Force iaoq to userspace, as the user has had access to our current
851	 * context via sigcontext. Also Filter the PSW for the same reason.
852	 */
853	LDREG	PT_IAOQ0(%r16),%r19
854	depi	3,31,2,%r19
855	STREG	%r19,PT_IAOQ0(%r16)
856	LDREG	PT_IAOQ1(%r16),%r19
857	depi	3,31,2,%r19
858	STREG	%r19,PT_IAOQ1(%r16)
859	LDREG   PT_PSW(%r16),%r19
860	load32	USER_PSW_MASK,%r1
861#ifdef CONFIG_64BIT
862	load32	USER_PSW_HI_MASK,%r20
863	depd    %r20,31,32,%r1
864#endif
865	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
866	load32	USER_PSW,%r1
867	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
868	STREG   %r19,PT_PSW(%r16)
869
870	/*
871	 * If we aren't being traced, we never saved space registers
872	 * (we don't store them in the sigcontext), so set them
873	 * to "proper" values now (otherwise we'll wind up restoring
874	 * whatever was last stored in the task structure, which might
875	 * be inconsistent if an interrupt occurred while on the gateway
876	 * page). Note that we may be "trashing" values the user put in
877	 * them, but we don't support the user changing them.
878	 */
879
880	STREG   %r0,PT_SR2(%r16)
881	mfsp    %sr3,%r19
882	STREG   %r19,PT_SR0(%r16)
883	STREG   %r19,PT_SR1(%r16)
884	STREG   %r19,PT_SR3(%r16)
885	STREG   %r19,PT_SR4(%r16)
886	STREG   %r19,PT_SR5(%r16)
887	STREG   %r19,PT_SR6(%r16)
888	STREG   %r19,PT_SR7(%r16)
889
890ENTRY(intr_return)
891	/* check for reschedule */
892	mfctl   %cr30,%r1
893	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
894	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
895
896	.import do_notify_resume,code
897intr_check_sig:
898	/* As above */
899	mfctl   %cr30,%r1
900	LDREG	TI_FLAGS(%r1),%r19
901	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
902	and,COND(<>)	%r19, %r20, %r0
903	b,n	intr_restore	/* skip past if we've nothing to do */
904
905	/* This check is critical to having LWS
906	 * working. The IASQ is zero on the gateway
907	 * page and we cannot deliver any signals until
908	 * we get off the gateway page.
909	 *
910	 * Only do signals if we are returning to user space
911	 */
912	LDREG	PT_IASQ0(%r16), %r20
913	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
914	LDREG	PT_IASQ1(%r16), %r20
915	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
916
917	/* NOTE: We need to enable interrupts if we have to deliver
918	 * signals. We used to do this earlier but it caused kernel
919	 * stack overflows. */
920	ssm     PSW_SM_I, %r0
921
922	copy	%r0, %r25			/* long in_syscall = 0 */
923#ifdef CONFIG_64BIT
924	ldo	-16(%r30),%r29			/* Reference param save area */
925#endif
926
927	BL	do_notify_resume,%r2
928	copy	%r16, %r26			/* struct pt_regs *regs */
929
930	b,n	intr_check_sig
931
932intr_restore:
933	copy            %r16,%r29
934	ldo             PT_FR31(%r29),%r1
935	rest_fp         %r1
936	rest_general    %r29
937
938	/* inverse of virt_map */
939	pcxt_ssm_bug
940	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
941	tophys_r1       %r29
942
943	/* Restore space id's and special cr's from PT_REGS
944	 * structure pointed to by r29
945	 */
946	rest_specials	%r29
947
948	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
949	 * It also restores r1 and r30.
950	 */
951	rest_stack
952
953	rfi
954	nop
955
956#ifndef CONFIG_PREEMPT
957# define intr_do_preempt	intr_restore
958#endif /* !CONFIG_PREEMPT */
959
960	.import schedule,code
961intr_do_resched:
962	/* Only call schedule on return to userspace. If we're returning
963	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
964	 * we jump back to intr_restore.
965	 */
966	LDREG	PT_IASQ0(%r16), %r20
967	cmpib,COND(=)	0, %r20, intr_do_preempt
968	nop
969	LDREG	PT_IASQ1(%r16), %r20
970	cmpib,COND(=)	0, %r20, intr_do_preempt
971	nop
972
973	/* NOTE: We need to enable interrupts if we schedule.  We used
974	 * to do this earlier but it caused kernel stack overflows. */
975	ssm     PSW_SM_I, %r0
976
977#ifdef CONFIG_64BIT
978	ldo	-16(%r30),%r29		/* Reference param save area */
979#endif
980
981	ldil	L%intr_check_sig, %r2
982#ifndef CONFIG_64BIT
983	b	schedule
984#else
985	load32	schedule, %r20
986	bv	%r0(%r20)
987#endif
988	ldo	R%intr_check_sig(%r2), %r2
989
990	/* preempt the current task on returning to kernel
991	 * mode from an interrupt, iff need_resched is set,
992	 * and preempt_count is 0. otherwise, we continue on
993	 * our merry way back to the current running task.
994	 */
995#ifdef CONFIG_PREEMPT
996	.import preempt_schedule_irq,code
997intr_do_preempt:
998	rsm	PSW_SM_I, %r0		/* disable interrupts */
999
1000	/* current_thread_info()->preempt_count */
1001	mfctl	%cr30, %r1
1002	LDREG	TI_PRE_COUNT(%r1), %r19
1003	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
1004	nop				/* prev insn branched backwards */
1005
1006	/* check if we interrupted a critical path */
1007	LDREG	PT_PSW(%r16), %r20
1008	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
1009	nop
1010
1011	BL	preempt_schedule_irq, %r2
1012	nop
1013
1014	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
1015#endif /* CONFIG_PREEMPT */
1016
1017	/*
1018	 * External interrupts.
1019	 */
1020
1021intr_extint:
1022	cmpib,COND(=),n 0,%r16,1f
1023
1024	get_stack_use_cr30
1025	b,n 2f
1026
10271:
1028	get_stack_use_r30
10292:
1030	save_specials	%r29
1031	virt_map
1032	save_general	%r29
1033
1034	ldo	PT_FR0(%r29), %r24
1035	save_fp	%r24
1036
1037	loadgp
1038
1039	copy	%r29, %r26	/* arg0 is pt_regs */
1040	copy	%r29, %r16	/* save pt_regs */
1041
1042	ldil	L%intr_return, %r2
1043
1044#ifdef CONFIG_64BIT
1045	ldo	-16(%r30),%r29	/* Reference param save area */
1046#endif
1047
1048	b	do_cpu_irq_mask
1049	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1050ENDPROC_CFI(syscall_exit_rfi)
1051
1052
1053	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1054
1055ENTRY_CFI(intr_save)		/* for os_hpmc */
1056	mfsp    %sr7,%r16
1057	cmpib,COND(=),n 0,%r16,1f
1058	get_stack_use_cr30
1059	b	2f
1060	copy    %r8,%r26
1061
10621:
1063	get_stack_use_r30
1064	copy    %r8,%r26
1065
10662:
1067	save_specials	%r29
1068
1069	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1070	cmpib,COND(=),n        PARISC_ITLB_TRAP,%r26,skip_save_ior
1071
1072
1073	mfctl           %isr, %r16
1074	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1075	mfctl           %ior, %r17
1076
1077
1078#ifdef CONFIG_64BIT
1079	/*
1080	 * If the interrupted code was running with W bit off (32 bit),
1081	 * clear the b bits (bits 0 & 1) in the ior.
1082	 * save_specials left ipsw value in r8 for us to test.
1083	 */
1084	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1085	depdi           0,1,2,%r17
1086
1087	/* adjust isr/ior: get high bits from isr and deposit in ior */
1088	space_adjust	%r16,%r17,%r1
1089#endif
1090	STREG           %r16, PT_ISR(%r29)
1091	STREG           %r17, PT_IOR(%r29)
1092
1093#if 0 && defined(CONFIG_64BIT)
1094	/* Revisit when we have 64-bit code above 4Gb */
1095	b,n		intr_save2
1096
1097skip_save_ior:
1098	/* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1099	 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1100	 * above.
1101	 */
1102	extrd,u,*	%r8,PSW_W_BIT,1,%r1
1103	cmpib,COND(=),n	1,%r1,intr_save2
1104	LDREG		PT_IASQ0(%r29), %r16
1105	LDREG		PT_IAOQ0(%r29), %r17
1106	/* adjust iasq/iaoq */
1107	space_adjust	%r16,%r17,%r1
1108	STREG           %r16, PT_IASQ0(%r29)
1109	STREG           %r17, PT_IAOQ0(%r29)
1110#else
1111skip_save_ior:
1112#endif
1113
1114intr_save2:
1115	virt_map
1116	save_general	%r29
1117
1118	ldo		PT_FR0(%r29), %r25
1119	save_fp		%r25
1120
1121	loadgp
1122
1123	copy		%r29, %r25	/* arg1 is pt_regs */
1124#ifdef CONFIG_64BIT
1125	ldo		-16(%r30),%r29	/* Reference param save area */
1126#endif
1127
1128	ldil		L%intr_check_sig, %r2
1129	copy		%r25, %r16	/* save pt_regs */
1130
1131	b		handle_interruption
1132	ldo		R%intr_check_sig(%r2), %r2
1133ENDPROC_CFI(intr_save)
1134
1135
1136	/*
1137	 * Note for all tlb miss handlers:
1138	 *
1139	 * cr24 contains a pointer to the kernel address space
1140	 * page directory.
1141	 *
1142	 * cr25 contains a pointer to the current user address
1143	 * space page directory.
1144	 *
1145	 * sr3 will contain the space id of the user address space
1146	 * of the current running thread while that thread is
1147	 * running in the kernel.
1148	 */
1149
1150	/*
1151	 * register number allocations.  Note that these are all
1152	 * in the shadowed registers
1153	 */
1154
1155	t0 = r1		/* temporary register 0 */
1156	va = r8		/* virtual address for which the trap occurred */
1157	t1 = r9		/* temporary register 1 */
1158	pte  = r16	/* pte/phys page # */
1159	prot = r17	/* prot bits */
1160	spc  = r24	/* space for which the trap occurred */
1161	ptp = r25	/* page directory/page table pointer */
1162
1163#ifdef CONFIG_64BIT
1164
1165dtlb_miss_20w:
1166	space_adjust	spc,va,t0
1167	get_pgd		spc,ptp
1168	space_check	spc,t0,dtlb_fault
1169
1170	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1171
1172	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1173	update_accessed	ptp,pte,t0,t1
1174
1175	make_insert_tlb	spc,pte,prot,t1
1176
1177	idtlbt          pte,prot
1178
1179	tlb_unlock1	spc,t0
1180	rfir
1181	nop
1182
1183dtlb_check_alias_20w:
1184	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1185
1186	idtlbt          pte,prot
1187
1188	rfir
1189	nop
1190
1191nadtlb_miss_20w:
1192	space_adjust	spc,va,t0
1193	get_pgd		spc,ptp
1194	space_check	spc,t0,nadtlb_fault
1195
1196	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1197
1198	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1199	update_accessed	ptp,pte,t0,t1
1200
1201	make_insert_tlb	spc,pte,prot,t1
1202
1203	idtlbt          pte,prot
1204
1205	tlb_unlock1	spc,t0
1206	rfir
1207	nop
1208
1209nadtlb_check_alias_20w:
1210	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1211
1212	idtlbt          pte,prot
1213
1214	rfir
1215	nop
1216
1217#else
1218
1219dtlb_miss_11:
1220	get_pgd		spc,ptp
1221
1222	space_check	spc,t0,dtlb_fault
1223
1224	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1225
1226	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1227	update_accessed	ptp,pte,t0,t1
1228
1229	make_insert_tlb_11	spc,pte,prot
1230
1231	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1232	mtsp		spc,%sr1
1233
1234	idtlba		pte,(%sr1,va)
1235	idtlbp		prot,(%sr1,va)
1236
1237	mtsp		t1, %sr1	/* Restore sr1 */
1238
1239	tlb_unlock1	spc,t0
1240	rfir
1241	nop
1242
1243dtlb_check_alias_11:
1244	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1245
1246	idtlba          pte,(va)
1247	idtlbp          prot,(va)
1248
1249	rfir
1250	nop
1251
1252nadtlb_miss_11:
1253	get_pgd		spc,ptp
1254
1255	space_check	spc,t0,nadtlb_fault
1256
1257	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1258
1259	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1260	update_accessed	ptp,pte,t0,t1
1261
1262	make_insert_tlb_11	spc,pte,prot
1263
1264	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1265	mtsp		spc,%sr1
1266
1267	idtlba		pte,(%sr1,va)
1268	idtlbp		prot,(%sr1,va)
1269
1270	mtsp		t1, %sr1	/* Restore sr1 */
1271
1272	tlb_unlock1	spc,t0
1273	rfir
1274	nop
1275
1276nadtlb_check_alias_11:
1277	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1278
1279	idtlba          pte,(va)
1280	idtlbp          prot,(va)
1281
1282	rfir
1283	nop
1284
1285dtlb_miss_20:
1286	space_adjust	spc,va,t0
1287	get_pgd		spc,ptp
1288	space_check	spc,t0,dtlb_fault
1289
1290	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1291
1292	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1293	update_accessed	ptp,pte,t0,t1
1294
1295	make_insert_tlb	spc,pte,prot,t1
1296
1297	f_extend	pte,t1
1298
1299	idtlbt          pte,prot
1300
1301	tlb_unlock1	spc,t0
1302	rfir
1303	nop
1304
1305dtlb_check_alias_20:
1306	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1307
1308	idtlbt          pte,prot
1309
1310	rfir
1311	nop
1312
1313nadtlb_miss_20:
1314	get_pgd		spc,ptp
1315
1316	space_check	spc,t0,nadtlb_fault
1317
1318	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1319
1320	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1321	update_accessed	ptp,pte,t0,t1
1322
1323	make_insert_tlb	spc,pte,prot,t1
1324
1325	f_extend	pte,t1
1326
1327	idtlbt		pte,prot
1328
1329	tlb_unlock1	spc,t0
1330	rfir
1331	nop
1332
1333nadtlb_check_alias_20:
1334	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1335
1336	idtlbt          pte,prot
1337
1338	rfir
1339	nop
1340
1341#endif
1342
1343nadtlb_emulate:
1344
1345	/*
1346	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1347	 * probei instructions. We don't want to fault for these
1348	 * instructions (not only does it not make sense, it can cause
1349	 * deadlocks, since some flushes are done with the mmap
1350	 * semaphore held). If the translation doesn't exist, we can't
1351	 * insert a translation, so have to emulate the side effects
1352	 * of the instruction. Since we don't insert a translation
1353	 * we can get a lot of faults during a flush loop, so it makes
1354	 * sense to try to do it here with minimum overhead. We only
1355	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1356	 * and index registers are not shadowed. We defer everything
1357	 * else to the "slow" path.
1358	 */
1359
1360	mfctl           %cr19,%r9 /* Get iir */
1361
1362	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1363	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1364
1365	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1366	ldi             0x280,%r16
1367	and             %r9,%r16,%r17
1368	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1369	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1370	BL		get_register,%r25
1371	extrw,u         %r9,15,5,%r8           /* Get index register # */
1372	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1373	copy            %r1,%r24
1374	BL		get_register,%r25
1375	extrw,u         %r9,10,5,%r8           /* Get base register # */
1376	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1377	BL		set_register,%r25
1378	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1379
1380nadtlb_nullify:
1381	mfctl           %ipsw,%r8
1382	ldil            L%PSW_N,%r9
1383	or              %r8,%r9,%r8            /* Set PSW_N */
1384	mtctl           %r8,%ipsw
1385
1386	rfir
1387	nop
1388
1389	/*
1390		When there is no translation for the probe address then we
1391		must nullify the insn and return zero in the target register.
1392		This will indicate to the calling code that it does not have
1393		write/read privileges to this address.
1394
1395		This should technically work for prober and probew in PA 1.1,
1396		and also probe,r and probe,w in PA 2.0
1397
1398		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1399		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1400
1401	*/
1402nadtlb_probe_check:
1403	ldi             0x80,%r16
1404	and             %r9,%r16,%r17
1405	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1406	BL              get_register,%r25      /* Find the target register */
1407	extrw,u         %r9,31,5,%r8           /* Get target register */
1408	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1409	BL		set_register,%r25
1410	copy            %r0,%r1                /* Write zero to target register */
1411	b nadtlb_nullify                       /* Nullify return insn */
1412	nop
1413
1414
1415#ifdef CONFIG_64BIT
1416itlb_miss_20w:
1417
1418	/*
1419	 * I miss is a little different, since we allow users to fault
1420	 * on the gateway page which is in the kernel address space.
1421	 */
1422
1423	space_adjust	spc,va,t0
1424	get_pgd		spc,ptp
1425	space_check	spc,t0,itlb_fault
1426
1427	L3_ptep		ptp,pte,t0,va,itlb_fault
1428
1429	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1430	update_accessed	ptp,pte,t0,t1
1431
1432	make_insert_tlb	spc,pte,prot,t1
1433
1434	iitlbt          pte,prot
1435
1436	tlb_unlock1	spc,t0
1437	rfir
1438	nop
1439
1440naitlb_miss_20w:
1441
1442	/*
1443	 * I miss is a little different, since we allow users to fault
1444	 * on the gateway page which is in the kernel address space.
1445	 */
1446
1447	space_adjust	spc,va,t0
1448	get_pgd		spc,ptp
1449	space_check	spc,t0,naitlb_fault
1450
1451	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1452
1453	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1454	update_accessed	ptp,pte,t0,t1
1455
1456	make_insert_tlb	spc,pte,prot,t1
1457
1458	iitlbt          pte,prot
1459
1460	tlb_unlock1	spc,t0
1461	rfir
1462	nop
1463
1464naitlb_check_alias_20w:
1465	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1466
1467	iitlbt		pte,prot
1468
1469	rfir
1470	nop
1471
1472#else
1473
1474itlb_miss_11:
1475	get_pgd		spc,ptp
1476
1477	space_check	spc,t0,itlb_fault
1478
1479	L2_ptep		ptp,pte,t0,va,itlb_fault
1480
1481	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1482	update_accessed	ptp,pte,t0,t1
1483
1484	make_insert_tlb_11	spc,pte,prot
1485
1486	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1487	mtsp		spc,%sr1
1488
1489	iitlba		pte,(%sr1,va)
1490	iitlbp		prot,(%sr1,va)
1491
1492	mtsp		t1, %sr1	/* Restore sr1 */
1493
1494	tlb_unlock1	spc,t0
1495	rfir
1496	nop
1497
1498naitlb_miss_11:
1499	get_pgd		spc,ptp
1500
1501	space_check	spc,t0,naitlb_fault
1502
1503	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1504
1505	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1506	update_accessed	ptp,pte,t0,t1
1507
1508	make_insert_tlb_11	spc,pte,prot
1509
1510	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1511	mtsp		spc,%sr1
1512
1513	iitlba		pte,(%sr1,va)
1514	iitlbp		prot,(%sr1,va)
1515
1516	mtsp		t1, %sr1	/* Restore sr1 */
1517
1518	tlb_unlock1	spc,t0
1519	rfir
1520	nop
1521
1522naitlb_check_alias_11:
1523	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1524
1525	iitlba          pte,(%sr0, va)
1526	iitlbp          prot,(%sr0, va)
1527
1528	rfir
1529	nop
1530
1531
1532itlb_miss_20:
1533	get_pgd		spc,ptp
1534
1535	space_check	spc,t0,itlb_fault
1536
1537	L2_ptep		ptp,pte,t0,va,itlb_fault
1538
1539	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1540	update_accessed	ptp,pte,t0,t1
1541
1542	make_insert_tlb	spc,pte,prot,t1
1543
1544	f_extend	pte,t1
1545
1546	iitlbt          pte,prot
1547
1548	tlb_unlock1	spc,t0
1549	rfir
1550	nop
1551
1552naitlb_miss_20:
1553	get_pgd		spc,ptp
1554
1555	space_check	spc,t0,naitlb_fault
1556
1557	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1558
1559	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1560	update_accessed	ptp,pte,t0,t1
1561
1562	make_insert_tlb	spc,pte,prot,t1
1563
1564	f_extend	pte,t1
1565
1566	iitlbt          pte,prot
1567
1568	tlb_unlock1	spc,t0
1569	rfir
1570	nop
1571
1572naitlb_check_alias_20:
1573	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1574
1575	iitlbt          pte,prot
1576
1577	rfir
1578	nop
1579
1580#endif
1581
1582#ifdef CONFIG_64BIT
1583
1584dbit_trap_20w:
1585	space_adjust	spc,va,t0
1586	get_pgd		spc,ptp
1587	space_check	spc,t0,dbit_fault
1588
1589	L3_ptep		ptp,pte,t0,va,dbit_fault
1590
1591	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1592	update_dirty	ptp,pte,t1
1593
1594	make_insert_tlb	spc,pte,prot,t1
1595
1596	idtlbt          pte,prot
1597
1598	tlb_unlock0	spc,t0
1599	rfir
1600	nop
1601#else
1602
1603dbit_trap_11:
1604
1605	get_pgd		spc,ptp
1606
1607	space_check	spc,t0,dbit_fault
1608
1609	L2_ptep		ptp,pte,t0,va,dbit_fault
1610
1611	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1612	update_dirty	ptp,pte,t1
1613
1614	make_insert_tlb_11	spc,pte,prot
1615
1616	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1617	mtsp		spc,%sr1
1618
1619	idtlba		pte,(%sr1,va)
1620	idtlbp		prot,(%sr1,va)
1621
1622	mtsp            t1, %sr1     /* Restore sr1 */
1623
1624	tlb_unlock0	spc,t0
1625	rfir
1626	nop
1627
1628dbit_trap_20:
1629	get_pgd		spc,ptp
1630
1631	space_check	spc,t0,dbit_fault
1632
1633	L2_ptep		ptp,pte,t0,va,dbit_fault
1634
1635	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1636	update_dirty	ptp,pte,t1
1637
1638	make_insert_tlb	spc,pte,prot,t1
1639
1640	f_extend	pte,t1
1641
1642	idtlbt		pte,prot
1643
1644	tlb_unlock0	spc,t0
1645	rfir
1646	nop
1647#endif
1648
1649	.import handle_interruption,code
1650
1651kernel_bad_space:
1652	b               intr_save
1653	ldi             31,%r8  /* Use an unused code */
1654
1655dbit_fault:
1656	b               intr_save
1657	ldi             20,%r8
1658
1659itlb_fault:
1660	b               intr_save
1661	ldi             6,%r8
1662
1663nadtlb_fault:
1664	b               intr_save
1665	ldi             17,%r8
1666
1667naitlb_fault:
1668	b               intr_save
1669	ldi             16,%r8
1670
1671dtlb_fault:
1672	b               intr_save
1673	ldi             15,%r8
1674
1675	/* Register saving semantics for system calls:
1676
1677	   %r1		   clobbered by system call macro in userspace
1678	   %r2		   saved in PT_REGS by gateway page
1679	   %r3  - %r18	   preserved by C code (saved by signal code)
1680	   %r19 - %r20	   saved in PT_REGS by gateway page
1681	   %r21 - %r22	   non-standard syscall args
1682			   stored in kernel stack by gateway page
1683	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1684	   %r27 - %r30	   saved in PT_REGS by gateway page
1685	   %r31		   syscall return pointer
1686	 */
1687
1688	/* Floating point registers (FIXME: what do we do with these?)
1689
1690	   %fr0  - %fr3	   status/exception, not preserved
1691	   %fr4  - %fr7	   arguments
1692	   %fr8	 - %fr11   not preserved by C code
1693	   %fr12 - %fr21   preserved by C code
1694	   %fr22 - %fr31   not preserved by C code
1695	 */
1696
1697	.macro	reg_save regs
1698	STREG	%r3, PT_GR3(\regs)
1699	STREG	%r4, PT_GR4(\regs)
1700	STREG	%r5, PT_GR5(\regs)
1701	STREG	%r6, PT_GR6(\regs)
1702	STREG	%r7, PT_GR7(\regs)
1703	STREG	%r8, PT_GR8(\regs)
1704	STREG	%r9, PT_GR9(\regs)
1705	STREG   %r10,PT_GR10(\regs)
1706	STREG   %r11,PT_GR11(\regs)
1707	STREG   %r12,PT_GR12(\regs)
1708	STREG   %r13,PT_GR13(\regs)
1709	STREG   %r14,PT_GR14(\regs)
1710	STREG   %r15,PT_GR15(\regs)
1711	STREG   %r16,PT_GR16(\regs)
1712	STREG   %r17,PT_GR17(\regs)
1713	STREG   %r18,PT_GR18(\regs)
1714	.endm
1715
1716	.macro	reg_restore regs
1717	LDREG	PT_GR3(\regs), %r3
1718	LDREG	PT_GR4(\regs), %r4
1719	LDREG	PT_GR5(\regs), %r5
1720	LDREG	PT_GR6(\regs), %r6
1721	LDREG	PT_GR7(\regs), %r7
1722	LDREG	PT_GR8(\regs), %r8
1723	LDREG	PT_GR9(\regs), %r9
1724	LDREG   PT_GR10(\regs),%r10
1725	LDREG   PT_GR11(\regs),%r11
1726	LDREG   PT_GR12(\regs),%r12
1727	LDREG   PT_GR13(\regs),%r13
1728	LDREG   PT_GR14(\regs),%r14
1729	LDREG   PT_GR15(\regs),%r15
1730	LDREG   PT_GR16(\regs),%r16
1731	LDREG   PT_GR17(\regs),%r17
1732	LDREG   PT_GR18(\regs),%r18
1733	.endm
1734
1735	.macro	fork_like name
1736ENTRY_CFI(sys_\name\()_wrapper)
1737	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1738	ldo	TASK_REGS(%r1),%r1
1739	reg_save %r1
1740	mfctl	%cr27, %r28
1741	ldil	L%sys_\name, %r31
1742	be	R%sys_\name(%sr4,%r31)
1743	STREG	%r28, PT_CR27(%r1)
1744ENDPROC_CFI(sys_\name\()_wrapper)
1745	.endm
1746
1747fork_like clone
1748fork_like fork
1749fork_like vfork
1750
1751	/* Set the return value for the child */
1752ENTRY(child_return)
1753	BL	schedule_tail, %r2
1754	nop
1755finish_child_return:
1756	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1757	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1758
1759	LDREG	PT_CR27(%r1), %r3
1760	mtctl	%r3, %cr27
1761	reg_restore %r1
1762	b	syscall_exit
1763	copy	%r0,%r28
1764END(child_return)
1765
1766ENTRY_CFI(sys_rt_sigreturn_wrapper)
1767	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1768	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1769	/* Don't save regs, we are going to restore them from sigcontext. */
1770	STREG	%r2, -RP_OFFSET(%r30)
1771#ifdef CONFIG_64BIT
1772	ldo	FRAME_SIZE(%r30), %r30
1773	BL	sys_rt_sigreturn,%r2
1774	ldo	-16(%r30),%r29		/* Reference param save area */
1775#else
1776	BL	sys_rt_sigreturn,%r2
1777	ldo	FRAME_SIZE(%r30), %r30
1778#endif
1779
1780	ldo	-FRAME_SIZE(%r30), %r30
1781	LDREG	-RP_OFFSET(%r30), %r2
1782
1783	/* FIXME: I think we need to restore a few more things here. */
1784	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1785	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1786	reg_restore %r1
1787
1788	/* If the signal was received while the process was blocked on a
1789	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1790	 * take us to syscall_exit_rfi and on to intr_return.
1791	 */
1792	bv	%r0(%r2)
1793	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1794ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1795
1796ENTRY(syscall_exit)
1797	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1798	 * via syscall_exit_rfi if the signal was received while the process
1799	 * was running.
1800	 */
1801
1802	/* save return value now */
1803
1804	mfctl     %cr30, %r1
1805	LDREG     TI_TASK(%r1),%r1
1806	STREG     %r28,TASK_PT_GR28(%r1)
1807
1808	/* Seems to me that dp could be wrong here, if the syscall involved
1809	 * calling a module, and nothing got round to restoring dp on return.
1810	 */
1811	loadgp
1812
1813syscall_check_resched:
1814
1815	/* check for reschedule */
1816
1817	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
1818	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1819
1820	.import do_signal,code
1821syscall_check_sig:
1822	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1823	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1824	and,COND(<>)	%r19, %r26, %r0
1825	b,n	syscall_restore	/* skip past if we've nothing to do */
1826
1827syscall_do_signal:
1828	/* Save callee-save registers (for sigcontext).
1829	 * FIXME: After this point the process structure should be
1830	 * consistent with all the relevant state of the process
1831	 * before the syscall.  We need to verify this.
1832	 */
1833	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1834	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1835	reg_save %r26
1836
1837#ifdef CONFIG_64BIT
1838	ldo	-16(%r30),%r29			/* Reference param save area */
1839#endif
1840
1841	BL	do_notify_resume,%r2
1842	ldi	1, %r25				/* long in_syscall = 1 */
1843
1844	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1845	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1846	reg_restore %r20
1847
1848	b,n     syscall_check_sig
1849
1850syscall_restore:
1851	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1852
1853	/* Are we being ptraced? */
1854	ldw	TASK_FLAGS(%r1),%r19
1855	ldi	_TIF_SYSCALL_TRACE_MASK,%r2
1856	and,COND(=)	%r19,%r2,%r0
1857	b,n	syscall_restore_rfi
1858
1859	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1860	rest_fp	%r19
1861
1862	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1863	mtsar	%r19
1864
1865	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1866	LDREG	TASK_PT_GR19(%r1),%r19
1867	LDREG   TASK_PT_GR20(%r1),%r20
1868	LDREG	TASK_PT_GR21(%r1),%r21
1869	LDREG	TASK_PT_GR22(%r1),%r22
1870	LDREG	TASK_PT_GR23(%r1),%r23
1871	LDREG	TASK_PT_GR24(%r1),%r24
1872	LDREG	TASK_PT_GR25(%r1),%r25
1873	LDREG	TASK_PT_GR26(%r1),%r26
1874	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1875	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1876	LDREG	TASK_PT_GR29(%r1),%r29
1877	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1878
1879	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1880	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1881	rsm     PSW_SM_I, %r0
1882	copy    %r1,%r30                           /* Restore user sp */
1883	mfsp    %sr3,%r1                           /* Get user space id */
1884	mtsp    %r1,%sr7                           /* Restore sr7 */
1885	ssm     PSW_SM_I, %r0
1886
1887	/* Set sr2 to zero for userspace syscalls to work. */
1888	mtsp	%r0,%sr2
1889	mtsp	%r1,%sr4			   /* Restore sr4 */
1890	mtsp	%r1,%sr5			   /* Restore sr5 */
1891	mtsp	%r1,%sr6			   /* Restore sr6 */
1892
1893	depi	3,31,2,%r31			   /* ensure return to user mode. */
1894
1895#ifdef CONFIG_64BIT
1896	/* decide whether to reset the wide mode bit
1897	 *
1898	 * For a syscall, the W bit is stored in the lowest bit
1899	 * of sp.  Extract it and reset W if it is zero */
1900	extrd,u,*<>	%r30,63,1,%r1
1901	rsm	PSW_SM_W, %r0
1902	/* now reset the lowest bit of sp if it was set */
1903	xor	%r30,%r1,%r30
1904#endif
1905	be,n    0(%sr3,%r31)                       /* return to user space */
1906
1907	/* We have to return via an RFI, so that PSW T and R bits can be set
1908	 * appropriately.
1909	 * This sets up pt_regs so we can return via intr_restore, which is not
1910	 * the most efficient way of doing things, but it works.
1911	 */
1912syscall_restore_rfi:
1913	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1914	mtctl	%r2,%cr0			   /*   for immediate trap */
1915	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1916	ldi	0x0b,%r20			   /* Create new PSW */
1917	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1918
1919	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1920	 * set in thread_info.h and converted to PA bitmap
1921	 * numbers in asm-offsets.c */
1922
1923	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1924	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1925	depi	-1,27,1,%r20			   /* R bit */
1926
1927	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1928	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1929	depi	-1,7,1,%r20			   /* T bit */
1930
1931	STREG	%r20,TASK_PT_PSW(%r1)
1932
1933	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1934
1935	mfsp    %sr3,%r25
1936	STREG   %r25,TASK_PT_SR3(%r1)
1937	STREG   %r25,TASK_PT_SR4(%r1)
1938	STREG   %r25,TASK_PT_SR5(%r1)
1939	STREG   %r25,TASK_PT_SR6(%r1)
1940	STREG   %r25,TASK_PT_SR7(%r1)
1941	STREG   %r25,TASK_PT_IASQ0(%r1)
1942	STREG   %r25,TASK_PT_IASQ1(%r1)
1943
1944	/* XXX W bit??? */
1945	/* Now if old D bit is clear, it means we didn't save all registers
1946	 * on syscall entry, so do that now.  This only happens on TRACEME
1947	 * calls, or if someone attached to us while we were on a syscall.
1948	 * We could make this more efficient by not saving r3-r18, but
1949	 * then we wouldn't be able to use the common intr_restore path.
1950	 * It is only for traced processes anyway, so performance is not
1951	 * an issue.
1952	 */
1953	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1954	ldo	TASK_REGS(%r1),%r25
1955	reg_save %r25				   /* Save r3 to r18 */
1956
1957	/* Save the current sr */
1958	mfsp	%sr0,%r2
1959	STREG	%r2,TASK_PT_SR0(%r1)
1960
1961	/* Save the scratch sr */
1962	mfsp	%sr1,%r2
1963	STREG	%r2,TASK_PT_SR1(%r1)
1964
1965	/* sr2 should be set to zero for userspace syscalls */
1966	STREG	%r0,TASK_PT_SR2(%r1)
1967
1968	LDREG	TASK_PT_GR31(%r1),%r2
1969	depi	3,31,2,%r2		   /* ensure return to user mode. */
1970	STREG   %r2,TASK_PT_IAOQ0(%r1)
1971	ldo	4(%r2),%r2
1972	STREG	%r2,TASK_PT_IAOQ1(%r1)
1973	b	intr_restore
1974	copy	%r25,%r16
1975
1976pt_regs_ok:
1977	LDREG	TASK_PT_IAOQ0(%r1),%r2
1978	depi	3,31,2,%r2		   /* ensure return to user mode. */
1979	STREG	%r2,TASK_PT_IAOQ0(%r1)
1980	LDREG	TASK_PT_IAOQ1(%r1),%r2
1981	depi	3,31,2,%r2
1982	STREG	%r2,TASK_PT_IAOQ1(%r1)
1983	b	intr_restore
1984	copy	%r25,%r16
1985
1986syscall_do_resched:
1987	load32	syscall_check_resched,%r2 /* if resched, we start over again */
1988	load32	schedule,%r19
1989	bv	%r0(%r19)		/* jumps to schedule() */
1990#ifdef CONFIG_64BIT
1991	ldo	-16(%r30),%r29		/* Reference param save area */
1992#else
1993	nop
1994#endif
1995END(syscall_exit)
1996
1997
1998#ifdef CONFIG_FUNCTION_TRACER
1999
2000	.import ftrace_function_trampoline,code
2001	.align L1_CACHE_BYTES
2002ENTRY_CFI(mcount, caller)
2003_mcount:
2004	.export _mcount,data
2005	/*
2006	 * The 64bit mcount() function pointer needs 4 dwords, of which the
2007	 * first two are free.  We optimize it here and put 2 instructions for
2008	 * calling mcount(), and 2 instructions for ftrace_stub().  That way we
2009	 * have all on one L1 cacheline.
2010	 */
2011	b	ftrace_function_trampoline
2012	copy	%r3, %arg2	/* caller original %sp */
2013ftrace_stub:
2014	.globl ftrace_stub
2015        .type  ftrace_stub, @function
2016#ifdef CONFIG_64BIT
2017	bve	(%rp)
2018#else
2019	bv	%r0(%rp)
2020#endif
2021	nop
2022#ifdef CONFIG_64BIT
2023	.dword mcount
2024	.dword 0 /* code in head.S puts value of global gp here */
2025#endif
2026ENDPROC_CFI(mcount)
2027
2028#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2029	.align 8
2030ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2031	.export parisc_return_to_handler,data
2032parisc_return_to_handler:
2033	copy %r3,%r1
2034	STREG %r0,-RP_OFFSET(%sp)	/* store 0 as %rp */
2035	copy %sp,%r3
2036	STREGM %r1,FRAME_SIZE(%sp)
2037	STREG %ret0,8(%r3)
2038	STREG %ret1,16(%r3)
2039
2040#ifdef CONFIG_64BIT
2041	loadgp
2042#endif
2043
2044	/* call ftrace_return_to_handler(0) */
2045	.import ftrace_return_to_handler,code
2046	load32 ftrace_return_to_handler,%ret0
2047	load32 .Lftrace_ret,%r2
2048#ifdef CONFIG_64BIT
2049	ldo -16(%sp),%ret1		/* Reference param save area */
2050	bve	(%ret0)
2051#else
2052	bv	%r0(%ret0)
2053#endif
2054	ldi 0,%r26
2055.Lftrace_ret:
2056	copy %ret0,%rp
2057
2058	/* restore original return values */
2059	LDREG 8(%r3),%ret0
2060	LDREG 16(%r3),%ret1
2061
2062	/* return from function */
2063#ifdef CONFIG_64BIT
2064	bve	(%rp)
2065#else
2066	bv	%r0(%rp)
2067#endif
2068	LDREGM -FRAME_SIZE(%sp),%r3
2069ENDPROC_CFI(return_to_handler)
2070
2071#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2072
2073#endif	/* CONFIG_FUNCTION_TRACER */
2074
2075#ifdef CONFIG_IRQSTACKS
2076/* void call_on_stack(unsigned long param1, void *func,
2077		      unsigned long new_stack) */
2078ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2079ENTRY(_call_on_stack)
2080	copy	%sp, %r1
2081
2082	/* Regarding the HPPA calling conventions for function pointers,
2083	   we assume the PIC register is not changed across call.  For
2084	   CONFIG_64BIT, the argument pointer is left to point at the
2085	   argument region allocated for the call to call_on_stack. */
2086
2087	/* Switch to new stack.  We allocate two frames.  */
2088	ldo	2*FRAME_SIZE(%arg2), %sp
2089# ifdef CONFIG_64BIT
2090	/* Save previous stack pointer and return pointer in frame marker */
2091	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2092	/* Calls always use function descriptor */
2093	LDREG	16(%arg1), %arg1
2094	bve,l	(%arg1), %rp
2095	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2096	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2097	bve	(%rp)
2098	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2099# else
2100	/* Save previous stack pointer and return pointer in frame marker */
2101	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2102	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2103	/* Calls use function descriptor if PLABEL bit is set */
2104	bb,>=,n	%arg1, 30, 1f
2105	depwi	0,31,2, %arg1
2106	LDREG	0(%arg1), %arg1
21071:
2108	be,l	0(%sr4,%arg1), %sr0, %r31
2109	copy	%r31, %rp
2110	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2111	bv	(%rp)
2112	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2113# endif /* CONFIG_64BIT */
2114ENDPROC_CFI(call_on_stack)
2115#endif /* CONFIG_IRQSTACKS */
2116
2117ENTRY_CFI(get_register)
2118	/*
2119	 * get_register is used by the non access tlb miss handlers to
2120	 * copy the value of the general register specified in r8 into
2121	 * r1. This routine can't be used for shadowed registers, since
2122	 * the rfir will restore the original value. So, for the shadowed
2123	 * registers we put a -1 into r1 to indicate that the register
2124	 * should not be used (the register being copied could also have
2125	 * a -1 in it, but that is OK, it just means that we will have
2126	 * to use the slow path instead).
2127	 */
2128	blr     %r8,%r0
2129	nop
2130	bv      %r0(%r25)    /* r0 */
2131	copy    %r0,%r1
2132	bv      %r0(%r25)    /* r1 - shadowed */
2133	ldi     -1,%r1
2134	bv      %r0(%r25)    /* r2 */
2135	copy    %r2,%r1
2136	bv      %r0(%r25)    /* r3 */
2137	copy    %r3,%r1
2138	bv      %r0(%r25)    /* r4 */
2139	copy    %r4,%r1
2140	bv      %r0(%r25)    /* r5 */
2141	copy    %r5,%r1
2142	bv      %r0(%r25)    /* r6 */
2143	copy    %r6,%r1
2144	bv      %r0(%r25)    /* r7 */
2145	copy    %r7,%r1
2146	bv      %r0(%r25)    /* r8 - shadowed */
2147	ldi     -1,%r1
2148	bv      %r0(%r25)    /* r9 - shadowed */
2149	ldi     -1,%r1
2150	bv      %r0(%r25)    /* r10 */
2151	copy    %r10,%r1
2152	bv      %r0(%r25)    /* r11 */
2153	copy    %r11,%r1
2154	bv      %r0(%r25)    /* r12 */
2155	copy    %r12,%r1
2156	bv      %r0(%r25)    /* r13 */
2157	copy    %r13,%r1
2158	bv      %r0(%r25)    /* r14 */
2159	copy    %r14,%r1
2160	bv      %r0(%r25)    /* r15 */
2161	copy    %r15,%r1
2162	bv      %r0(%r25)    /* r16 - shadowed */
2163	ldi     -1,%r1
2164	bv      %r0(%r25)    /* r17 - shadowed */
2165	ldi     -1,%r1
2166	bv      %r0(%r25)    /* r18 */
2167	copy    %r18,%r1
2168	bv      %r0(%r25)    /* r19 */
2169	copy    %r19,%r1
2170	bv      %r0(%r25)    /* r20 */
2171	copy    %r20,%r1
2172	bv      %r0(%r25)    /* r21 */
2173	copy    %r21,%r1
2174	bv      %r0(%r25)    /* r22 */
2175	copy    %r22,%r1
2176	bv      %r0(%r25)    /* r23 */
2177	copy    %r23,%r1
2178	bv      %r0(%r25)    /* r24 - shadowed */
2179	ldi     -1,%r1
2180	bv      %r0(%r25)    /* r25 - shadowed */
2181	ldi     -1,%r1
2182	bv      %r0(%r25)    /* r26 */
2183	copy    %r26,%r1
2184	bv      %r0(%r25)    /* r27 */
2185	copy    %r27,%r1
2186	bv      %r0(%r25)    /* r28 */
2187	copy    %r28,%r1
2188	bv      %r0(%r25)    /* r29 */
2189	copy    %r29,%r1
2190	bv      %r0(%r25)    /* r30 */
2191	copy    %r30,%r1
2192	bv      %r0(%r25)    /* r31 */
2193	copy    %r31,%r1
2194ENDPROC_CFI(get_register)
2195
2196
2197ENTRY_CFI(set_register)
2198	/*
2199	 * set_register is used by the non access tlb miss handlers to
2200	 * copy the value of r1 into the general register specified in
2201	 * r8.
2202	 */
2203	blr     %r8,%r0
2204	nop
2205	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2206	copy    %r1,%r0
2207	bv      %r0(%r25)    /* r1 */
2208	copy    %r1,%r1
2209	bv      %r0(%r25)    /* r2 */
2210	copy    %r1,%r2
2211	bv      %r0(%r25)    /* r3 */
2212	copy    %r1,%r3
2213	bv      %r0(%r25)    /* r4 */
2214	copy    %r1,%r4
2215	bv      %r0(%r25)    /* r5 */
2216	copy    %r1,%r5
2217	bv      %r0(%r25)    /* r6 */
2218	copy    %r1,%r6
2219	bv      %r0(%r25)    /* r7 */
2220	copy    %r1,%r7
2221	bv      %r0(%r25)    /* r8 */
2222	copy    %r1,%r8
2223	bv      %r0(%r25)    /* r9 */
2224	copy    %r1,%r9
2225	bv      %r0(%r25)    /* r10 */
2226	copy    %r1,%r10
2227	bv      %r0(%r25)    /* r11 */
2228	copy    %r1,%r11
2229	bv      %r0(%r25)    /* r12 */
2230	copy    %r1,%r12
2231	bv      %r0(%r25)    /* r13 */
2232	copy    %r1,%r13
2233	bv      %r0(%r25)    /* r14 */
2234	copy    %r1,%r14
2235	bv      %r0(%r25)    /* r15 */
2236	copy    %r1,%r15
2237	bv      %r0(%r25)    /* r16 */
2238	copy    %r1,%r16
2239	bv      %r0(%r25)    /* r17 */
2240	copy    %r1,%r17
2241	bv      %r0(%r25)    /* r18 */
2242	copy    %r1,%r18
2243	bv      %r0(%r25)    /* r19 */
2244	copy    %r1,%r19
2245	bv      %r0(%r25)    /* r20 */
2246	copy    %r1,%r20
2247	bv      %r0(%r25)    /* r21 */
2248	copy    %r1,%r21
2249	bv      %r0(%r25)    /* r22 */
2250	copy    %r1,%r22
2251	bv      %r0(%r25)    /* r23 */
2252	copy    %r1,%r23
2253	bv      %r0(%r25)    /* r24 */
2254	copy    %r1,%r24
2255	bv      %r0(%r25)    /* r25 */
2256	copy    %r1,%r25
2257	bv      %r0(%r25)    /* r26 */
2258	copy    %r1,%r26
2259	bv      %r0(%r25)    /* r27 */
2260	copy    %r1,%r27
2261	bv      %r0(%r25)    /* r28 */
2262	copy    %r1,%r28
2263	bv      %r0(%r25)    /* r29 */
2264	copy    %r1,%r29
2265	bv      %r0(%r25)    /* r30 */
2266	copy    %r1,%r30
2267	bv      %r0(%r25)    /* r31 */
2268	copy    %r1,%r31
2269ENDPROC_CFI(set_register)
2270
2271