xref: /openbmc/linux/arch/parisc/kernel/entry.S (revision 8730046c)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41
42#ifdef CONFIG_64BIT
43	.level 2.0w
44#else
45	.level 2.0
46#endif
47
48	.import		pa_tlb_lock,data
49
50	/* space_to_prot macro creates a prot id from a space id */
51
52#if (SPACEID_SHIFT) == 0
53	.macro  space_to_prot spc prot
54	depd,z  \spc,62,31,\prot
55	.endm
56#else
57	.macro  space_to_prot spc prot
58	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
59	.endm
60#endif
61
62	/* Switch to virtual mapping, trashing only %r1 */
63	.macro  virt_map
64	/* pcxt_ssm_bug */
65	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
66	mtsp	%r0, %sr4
67	mtsp	%r0, %sr5
68	mtsp	%r0, %sr6
69	tovirt_r1 %r29
70	load32	KERNEL_PSW, %r1
71
72	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
73	mtctl	%r0, %cr17	/* Clear IIASQ tail */
74	mtctl	%r0, %cr17	/* Clear IIASQ head */
75	mtctl	%r1, %ipsw
76	load32	4f, %r1
77	mtctl	%r1, %cr18	/* Set IIAOQ tail */
78	ldo	4(%r1), %r1
79	mtctl	%r1, %cr18	/* Set IIAOQ head */
80	rfir
81	nop
824:
83	.endm
84
85	/*
86	 * The "get_stack" macros are responsible for determining the
87	 * kernel stack value.
88	 *
89	 *      If sr7 == 0
90	 *          Already using a kernel stack, so call the
91	 *          get_stack_use_r30 macro to push a pt_regs structure
92	 *          on the stack, and store registers there.
93	 *      else
94	 *          Need to set up a kernel stack, so call the
95	 *          get_stack_use_cr30 macro to set up a pointer
96	 *          to the pt_regs structure contained within the
97	 *          task pointer pointed to by cr30. Set the stack
98	 *          pointer to point to the end of the task structure.
99	 *
100	 * Note that we use shadowed registers for temps until
101	 * we can save %r26 and %r29. %r26 is used to preserve
102	 * %r8 (a shadowed register) which temporarily contained
103	 * either the fault type ("code") or the eirr. We need
104	 * to use a non-shadowed register to carry the value over
105	 * the rfir in virt_map. We use %r26 since this value winds
106	 * up being passed as the argument to either do_cpu_irq_mask
107	 * or handle_interruption. %r29 is used to hold a pointer
108	 * the register save area, and once again, it needs to
109	 * be a non-shadowed register so that it survives the rfir.
110	 *
111	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
112	 */
113
114	.macro  get_stack_use_cr30
115
116	/* we save the registers in the task struct */
117
118	copy	%r30, %r17
119	mfctl   %cr30, %r1
120	ldo	THREAD_SZ_ALGN(%r1), %r30
121	mtsp	%r0,%sr7
122	mtsp	%r16,%sr3
123	tophys  %r1,%r9
124	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
125	tophys  %r1,%r9
126	ldo     TASK_REGS(%r9),%r9
127	STREG   %r17,PT_GR30(%r9)
128	STREG   %r29,PT_GR29(%r9)
129	STREG   %r26,PT_GR26(%r9)
130	STREG	%r16,PT_SR7(%r9)
131	copy    %r9,%r29
132	.endm
133
134	.macro  get_stack_use_r30
135
136	/* we put a struct pt_regs on the stack and save the registers there */
137
138	tophys  %r30,%r9
139	copy	%r30,%r1
140	ldo	PT_SZ_ALGN(%r30),%r30
141	STREG   %r1,PT_GR30(%r9)
142	STREG   %r29,PT_GR29(%r9)
143	STREG   %r26,PT_GR26(%r9)
144	STREG	%r16,PT_SR7(%r9)
145	copy    %r9,%r29
146	.endm
147
148	.macro  rest_stack
149	LDREG   PT_GR1(%r29), %r1
150	LDREG   PT_GR30(%r29),%r30
151	LDREG   PT_GR29(%r29),%r29
152	.endm
153
154	/* default interruption handler
155	 * (calls traps.c:handle_interruption) */
156	.macro	def code
157	b	intr_save
158	ldi     \code, %r8
159	.align	32
160	.endm
161
162	/* Interrupt interruption handler
163	 * (calls irq.c:do_cpu_irq_mask) */
164	.macro	extint code
165	b	intr_extint
166	mfsp    %sr7,%r16
167	.align	32
168	.endm
169
170	.import	os_hpmc, code
171
172	/* HPMC handler */
173	.macro	hpmc code
174	nop			/* must be a NOP, will be patched later */
175	load32	PA(os_hpmc), %r3
176	bv,n	0(%r3)
177	nop
178	.word	0		/* checksum (will be patched) */
179	.word	PA(os_hpmc)	/* address of handler */
180	.word	0		/* length of handler */
181	.endm
182
183	/*
184	 * Performance Note: Instructions will be moved up into
185	 * this part of the code later on, once we are sure
186	 * that the tlb miss handlers are close to final form.
187	 */
188
189	/* Register definitions for tlb miss handler macros */
190
191	va  = r8	/* virtual address for which the trap occurred */
192	spc = r24	/* space for which the trap occurred */
193
194#ifndef CONFIG_64BIT
195
196	/*
197	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
198	 */
199
200	.macro	itlb_11 code
201
202	mfctl	%pcsq, spc
203	b	itlb_miss_11
204	mfctl	%pcoq, va
205
206	.align		32
207	.endm
208#endif
209
210	/*
211	 * itlb miss interruption handler (parisc 2.0)
212	 */
213
214	.macro	itlb_20 code
215	mfctl	%pcsq, spc
216#ifdef CONFIG_64BIT
217	b       itlb_miss_20w
218#else
219	b	itlb_miss_20
220#endif
221	mfctl	%pcoq, va
222
223	.align		32
224	.endm
225
226#ifndef CONFIG_64BIT
227	/*
228	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
229	 */
230
231	.macro	naitlb_11 code
232
233	mfctl	%isr,spc
234	b	naitlb_miss_11
235	mfctl 	%ior,va
236
237	.align		32
238	.endm
239#endif
240
241	/*
242	 * naitlb miss interruption handler (parisc 2.0)
243	 */
244
245	.macro	naitlb_20 code
246
247	mfctl	%isr,spc
248#ifdef CONFIG_64BIT
249	b       naitlb_miss_20w
250#else
251	b	naitlb_miss_20
252#endif
253	mfctl 	%ior,va
254
255	.align		32
256	.endm
257
258#ifndef CONFIG_64BIT
259	/*
260	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
261	 */
262
263	.macro	dtlb_11 code
264
265	mfctl	%isr, spc
266	b	dtlb_miss_11
267	mfctl	%ior, va
268
269	.align		32
270	.endm
271#endif
272
273	/*
274	 * dtlb miss interruption handler (parisc 2.0)
275	 */
276
277	.macro	dtlb_20 code
278
279	mfctl	%isr, spc
280#ifdef CONFIG_64BIT
281	b       dtlb_miss_20w
282#else
283	b	dtlb_miss_20
284#endif
285	mfctl	%ior, va
286
287	.align		32
288	.endm
289
290#ifndef CONFIG_64BIT
291	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
292
293	.macro	nadtlb_11 code
294
295	mfctl	%isr,spc
296	b       nadtlb_miss_11
297	mfctl	%ior,va
298
299	.align		32
300	.endm
301#endif
302
303	/* nadtlb miss interruption handler (parisc 2.0) */
304
305	.macro	nadtlb_20 code
306
307	mfctl	%isr,spc
308#ifdef CONFIG_64BIT
309	b       nadtlb_miss_20w
310#else
311	b       nadtlb_miss_20
312#endif
313	mfctl	%ior,va
314
315	.align		32
316	.endm
317
318#ifndef CONFIG_64BIT
319	/*
320	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
321	 */
322
323	.macro	dbit_11 code
324
325	mfctl	%isr,spc
326	b	dbit_trap_11
327	mfctl	%ior,va
328
329	.align		32
330	.endm
331#endif
332
333	/*
334	 * dirty bit trap interruption handler (parisc 2.0)
335	 */
336
337	.macro	dbit_20 code
338
339	mfctl	%isr,spc
340#ifdef CONFIG_64BIT
341	b       dbit_trap_20w
342#else
343	b	dbit_trap_20
344#endif
345	mfctl	%ior,va
346
347	.align		32
348	.endm
349
350	/* In LP64, the space contains part of the upper 32 bits of the
351	 * fault.  We have to extract this and place it in the va,
352	 * zeroing the corresponding bits in the space register */
353	.macro		space_adjust	spc,va,tmp
354#ifdef CONFIG_64BIT
355	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
356	depd		%r0,63,SPACEID_SHIFT,\spc
357	depd		\tmp,31,SPACEID_SHIFT,\va
358#endif
359	.endm
360
361	.import		swapper_pg_dir,code
362
363	/* Get the pgd.  For faults on space zero (kernel space), this
364	 * is simply swapper_pg_dir.  For user space faults, the
365	 * pgd is stored in %cr25 */
366	.macro		get_pgd		spc,reg
367	ldil		L%PA(swapper_pg_dir),\reg
368	ldo		R%PA(swapper_pg_dir)(\reg),\reg
369	or,COND(=)	%r0,\spc,%r0
370	mfctl		%cr25,\reg
371	.endm
372
373	/*
374		space_check(spc,tmp,fault)
375
376		spc - The space we saw the fault with.
377		tmp - The place to store the current space.
378		fault - Function to call on failure.
379
380		Only allow faults on different spaces from the
381		currently active one if we're the kernel
382
383	*/
384	.macro		space_check	spc,tmp,fault
385	mfsp		%sr7,\tmp
386	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
387					 * as kernel, so defeat the space
388					 * check if it is */
389	copy		\spc,\tmp
390	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
391	cmpb,COND(<>),n	\tmp,\spc,\fault
392	.endm
393
394	/* Look up a PTE in a 2-Level scheme (faulting at each
395	 * level if the entry isn't present
396	 *
397	 * NOTE: we use ldw even for LP64, since the short pointers
398	 * can address up to 1TB
399	 */
400	.macro		L2_ptep	pmd,pte,index,va,fault
401#if CONFIG_PGTABLE_LEVELS == 3
402	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
403#else
404# if defined(CONFIG_64BIT)
405	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
406  #else
407  # if PAGE_SIZE > 4096
408	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
409  # else
410	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
411  # endif
412# endif
413#endif
414	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
415	copy		%r0,\pte
416	ldw,s		\index(\pmd),\pmd
417	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
418	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
419	copy		\pmd,%r9
420	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
421	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
422	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
423	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
424	LDREG		%r0(\pmd),\pte
425	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
426	.endm
427
428	/* Look up PTE in a 3-Level scheme.
429	 *
430	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
431	 * first pmd adjacent to the pgd.  This means that we can
432	 * subtract a constant offset to get to it.  The pmd and pgd
433	 * sizes are arranged so that a single pmd covers 4GB (giving
434	 * a full LP64 process access to 8TB) so our lookups are
435	 * effectively L2 for the first 4GB of the kernel (i.e. for
436	 * all ILP32 processes and all the kernel for machines with
437	 * under 4GB of memory) */
438	.macro		L3_ptep pgd,pte,index,va,fault
439#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
440	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
441	copy		%r0,\pte
442	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
443	ldw,s		\index(\pgd),\pgd
444	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
445	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
446	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
447	shld		\pgd,PxD_VALUE_SHIFT,\index
448	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
449	copy		\index,\pgd
450	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
451	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
452#endif
453	L2_ptep		\pgd,\pte,\index,\va,\fault
454	.endm
455
456	/* Acquire pa_tlb_lock lock and recheck page is still present. */
457	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
458#ifdef CONFIG_SMP
459	cmpib,COND(=),n	0,\spc,2f
460	load32		PA(pa_tlb_lock),\tmp
4611:	LDCW		0(\tmp),\tmp1
462	cmpib,COND(=)	0,\tmp1,1b
463	nop
464	LDREG		0(\ptp),\pte
465	bb,<,n		\pte,_PAGE_PRESENT_BIT,2f
466	b		\fault
467	stw		 \spc,0(\tmp)
4682:
469#endif
470	.endm
471
472	/* Release pa_tlb_lock lock without reloading lock address. */
473	.macro		tlb_unlock0	spc,tmp
474#ifdef CONFIG_SMP
475	or,COND(=)	%r0,\spc,%r0
476	stw             \spc,0(\tmp)
477#endif
478	.endm
479
480	/* Release pa_tlb_lock lock. */
481	.macro		tlb_unlock1	spc,tmp
482#ifdef CONFIG_SMP
483	load32		PA(pa_tlb_lock),\tmp
484	tlb_unlock0	\spc,\tmp
485#endif
486	.endm
487
488	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
489	 * don't needlessly dirty the cache line if it was already set */
490	.macro		update_accessed	ptp,pte,tmp,tmp1
491	ldi		_PAGE_ACCESSED,\tmp1
492	or		\tmp1,\pte,\tmp
493	and,COND(<>)	\tmp1,\pte,%r0
494	STREG		\tmp,0(\ptp)
495	.endm
496
497	/* Set the dirty bit (and accessed bit).  No need to be
498	 * clever, this is only used from the dirty fault */
499	.macro		update_dirty	ptp,pte,tmp
500	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
501	or		\tmp,\pte,\pte
502	STREG		\pte,0(\ptp)
503	.endm
504
505	/* We have (depending on the page size):
506	 * - 38 to 52-bit Physical Page Number
507	 * - 12 to 26-bit page offset
508	 */
509	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
510	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
511	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
512	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
513
514	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
515	.macro		convert_for_tlb_insert20 pte,tmp
516#ifdef CONFIG_HUGETLB_PAGE
517	copy		\pte,\tmp
518	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
519				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
520
521	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
522				(63-58)+PAGE_ADD_SHIFT,\pte
523	extrd,u,*=	\tmp,_PAGE_HPAGE_BIT+32,1,%r0
524	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
525				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
526#else /* Huge pages disabled */
527	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
528				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
529	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
530				(63-58)+PAGE_ADD_SHIFT,\pte
531#endif
532	.endm
533
534	/* Convert the pte and prot to tlb insertion values.  How
535	 * this happens is quite subtle, read below */
536	.macro		make_insert_tlb	spc,pte,prot,tmp
537	space_to_prot   \spc \prot        /* create prot id from space */
538	/* The following is the real subtlety.  This is depositing
539	 * T <-> _PAGE_REFTRAP
540	 * D <-> _PAGE_DIRTY
541	 * B <-> _PAGE_DMB (memory break)
542	 *
543	 * Then incredible subtlety: The access rights are
544	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
545	 * See 3-14 of the parisc 2.0 manual
546	 *
547	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
548	 * trigger an access rights trap in user space if the user
549	 * tries to read an unreadable page */
550	depd            \pte,8,7,\prot
551
552	/* PAGE_USER indicates the page can be read with user privileges,
553	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
554	 * contains _PAGE_READ) */
555	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
556	depdi		7,11,3,\prot
557	/* If we're a gateway page, drop PL2 back to zero for promotion
558	 * to kernel privilege (so we can execute the page as kernel).
559	 * Any privilege promotion page always denys read and write */
560	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
561	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
562
563	/* Enforce uncacheable pages.
564	 * This should ONLY be use for MMIO on PA 2.0 machines.
565	 * Memory/DMA is cache coherent on all PA2.0 machines we support
566	 * (that means T-class is NOT supported) and the memory controllers
567	 * on most of those machines only handles cache transactions.
568	 */
569	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
570	depdi		1,12,1,\prot
571
572	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
573	convert_for_tlb_insert20 \pte \tmp
574	.endm
575
576	/* Identical macro to make_insert_tlb above, except it
577	 * makes the tlb entry for the differently formatted pa11
578	 * insertion instructions */
579	.macro		make_insert_tlb_11	spc,pte,prot
580	zdep		\spc,30,15,\prot
581	dep		\pte,8,7,\prot
582	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
583	depi		1,12,1,\prot
584	extru,=         \pte,_PAGE_USER_BIT,1,%r0
585	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
586	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
587	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
588
589	/* Get rid of prot bits and convert to page addr for iitlba */
590
591	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
592	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
593	.endm
594
595	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
596	 * to extend into I/O space if the address is 0xfXXXXXXX
597	 * so we extend the f's into the top word of the pte in
598	 * this case */
599	.macro		f_extend	pte,tmp
600	extrd,s		\pte,42,4,\tmp
601	addi,<>		1,\tmp,%r0
602	extrd,s		\pte,63,25,\pte
603	.endm
604
605	/* The alias region is an 8MB aligned 16MB to do clear and
606	 * copy user pages at addresses congruent with the user
607	 * virtual address.
608	 *
609	 * To use the alias page, you set %r26 up with the to TLB
610	 * entry (identifying the physical page) and %r23 up with
611	 * the from tlb entry (or nothing if only a to entry---for
612	 * clear_user_page_asm) */
613	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
614	cmpib,COND(<>),n 0,\spc,\fault
615	ldil		L%(TMPALIAS_MAP_START),\tmp
616#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
617	/* on LP64, ldi will sign extend into the upper 32 bits,
618	 * which is behaviour we don't want */
619	depdi		0,31,32,\tmp
620#endif
621	copy		\va,\tmp1
622	depi		0,31,23,\tmp1
623	cmpb,COND(<>),n	\tmp,\tmp1,\fault
624	mfctl		%cr19,\tmp	/* iir */
625	/* get the opcode (first six bits) into \tmp */
626	extrw,u		\tmp,5,6,\tmp
627	/*
628	 * Only setting the T bit prevents data cache movein
629	 * Setting access rights to zero prevents instruction cache movein
630	 *
631	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
632	 * to type field and _PAGE_READ goes to top bit of PL1
633	 */
634	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
635	/*
636	 * so if the opcode is one (i.e. this is a memory management
637	 * instruction) nullify the next load so \prot is only T.
638	 * Otherwise this is a normal data operation
639	 */
640	cmpiclr,=	0x01,\tmp,%r0
641	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
642.ifc \patype,20
643	depd,z		\prot,8,7,\prot
644.else
645.ifc \patype,11
646	depw,z		\prot,8,7,\prot
647.else
648	.error "undefined PA type to do_alias"
649.endif
650.endif
651	/*
652	 * OK, it is in the temp alias region, check whether "from" or "to".
653	 * Check "subtle" note in pacache.S re: r23/r26.
654	 */
655#ifdef CONFIG_64BIT
656	extrd,u,*=	\va,41,1,%r0
657#else
658	extrw,u,=	\va,9,1,%r0
659#endif
660	or,COND(tr)	%r23,%r0,\pte
661	or		%r26,%r0,\pte
662	.endm
663
664
665	/*
666	 * Fault_vectors are architecturally required to be aligned on a 2K
667	 * boundary
668	 */
669
670	.section .text.hot
671	.align 2048
672
673ENTRY(fault_vector_20)
674	/* First vector is invalid (0) */
675	.ascii	"cows can fly"
676	.byte 0
677	.align 32
678
679	hpmc		 1
680	def		 2
681	def		 3
682	extint		 4
683	def		 5
684	itlb_20		 6
685	def		 7
686	def		 8
687	def              9
688	def		10
689	def		11
690	def		12
691	def		13
692	def		14
693	dtlb_20		15
694	naitlb_20	16
695	nadtlb_20	17
696	def		18
697	def		19
698	dbit_20		20
699	def		21
700	def		22
701	def		23
702	def		24
703	def		25
704	def		26
705	def		27
706	def		28
707	def		29
708	def		30
709	def		31
710END(fault_vector_20)
711
712#ifndef CONFIG_64BIT
713
714	.align 2048
715
716ENTRY(fault_vector_11)
717	/* First vector is invalid (0) */
718	.ascii	"cows can fly"
719	.byte 0
720	.align 32
721
722	hpmc		 1
723	def		 2
724	def		 3
725	extint		 4
726	def		 5
727	itlb_11		 6
728	def		 7
729	def		 8
730	def              9
731	def		10
732	def		11
733	def		12
734	def		13
735	def		14
736	dtlb_11		15
737	naitlb_11	16
738	nadtlb_11	17
739	def		18
740	def		19
741	dbit_11		20
742	def		21
743	def		22
744	def		23
745	def		24
746	def		25
747	def		26
748	def		27
749	def		28
750	def		29
751	def		30
752	def		31
753END(fault_vector_11)
754
755#endif
756	/* Fault vector is separately protected and *must* be on its own page */
757	.align		PAGE_SIZE
758ENTRY(end_fault_vector)
759
760	.import		handle_interruption,code
761	.import		do_cpu_irq_mask,code
762
763	/*
764	 * Child Returns here
765	 *
766	 * copy_thread moved args into task save area.
767	 */
768
769ENTRY_CFI(ret_from_kernel_thread)
770
771	/* Call schedule_tail first though */
772	BL	schedule_tail, %r2
773	nop
774
775	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
776	LDREG	TASK_PT_GR25(%r1), %r26
777#ifdef CONFIG_64BIT
778	LDREG	TASK_PT_GR27(%r1), %r27
779#endif
780	LDREG	TASK_PT_GR26(%r1), %r1
781	ble	0(%sr7, %r1)
782	copy	%r31, %r2
783	b	finish_child_return
784	nop
785ENDPROC_CFI(ret_from_kernel_thread)
786
787
788	/*
789	 * struct task_struct *_switch_to(struct task_struct *prev,
790	 *	struct task_struct *next)
791	 *
792	 * switch kernel stacks and return prev */
793ENTRY_CFI(_switch_to)
794	STREG	 %r2, -RP_OFFSET(%r30)
795
796	callee_save_float
797	callee_save
798
799	load32	_switch_to_ret, %r2
800
801	STREG	%r2, TASK_PT_KPC(%r26)
802	LDREG	TASK_PT_KPC(%r25), %r2
803
804	STREG	%r30, TASK_PT_KSP(%r26)
805	LDREG	TASK_PT_KSP(%r25), %r30
806	LDREG	TASK_THREAD_INFO(%r25), %r25
807	bv	%r0(%r2)
808	mtctl   %r25,%cr30
809
810_switch_to_ret:
811	mtctl	%r0, %cr0		/* Needed for single stepping */
812	callee_rest
813	callee_rest_float
814
815	LDREG	-RP_OFFSET(%r30), %r2
816	bv	%r0(%r2)
817	copy	%r26, %r28
818ENDPROC_CFI(_switch_to)
819
820	/*
821	 * Common rfi return path for interruptions, kernel execve, and
822	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
823	 * return via this path if the signal was received when the process
824	 * was running; if the process was blocked on a syscall then the
825	 * normal syscall_exit path is used.  All syscalls for traced
826	 * proceses exit via intr_restore.
827	 *
828	 * XXX If any syscalls that change a processes space id ever exit
829	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
830	 * adjust IASQ[0..1].
831	 *
832	 */
833
834	.align	PAGE_SIZE
835
836ENTRY_CFI(syscall_exit_rfi)
837	mfctl   %cr30,%r16
838	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
839	ldo	TASK_REGS(%r16),%r16
840	/* Force iaoq to userspace, as the user has had access to our current
841	 * context via sigcontext. Also Filter the PSW for the same reason.
842	 */
843	LDREG	PT_IAOQ0(%r16),%r19
844	depi	3,31,2,%r19
845	STREG	%r19,PT_IAOQ0(%r16)
846	LDREG	PT_IAOQ1(%r16),%r19
847	depi	3,31,2,%r19
848	STREG	%r19,PT_IAOQ1(%r16)
849	LDREG   PT_PSW(%r16),%r19
850	load32	USER_PSW_MASK,%r1
851#ifdef CONFIG_64BIT
852	load32	USER_PSW_HI_MASK,%r20
853	depd    %r20,31,32,%r1
854#endif
855	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
856	load32	USER_PSW,%r1
857	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
858	STREG   %r19,PT_PSW(%r16)
859
860	/*
861	 * If we aren't being traced, we never saved space registers
862	 * (we don't store them in the sigcontext), so set them
863	 * to "proper" values now (otherwise we'll wind up restoring
864	 * whatever was last stored in the task structure, which might
865	 * be inconsistent if an interrupt occurred while on the gateway
866	 * page). Note that we may be "trashing" values the user put in
867	 * them, but we don't support the user changing them.
868	 */
869
870	STREG   %r0,PT_SR2(%r16)
871	mfsp    %sr3,%r19
872	STREG   %r19,PT_SR0(%r16)
873	STREG   %r19,PT_SR1(%r16)
874	STREG   %r19,PT_SR3(%r16)
875	STREG   %r19,PT_SR4(%r16)
876	STREG   %r19,PT_SR5(%r16)
877	STREG   %r19,PT_SR6(%r16)
878	STREG   %r19,PT_SR7(%r16)
879
880intr_return:
881	/* NOTE: Need to enable interrupts incase we schedule. */
882	ssm     PSW_SM_I, %r0
883
884	/* check for reschedule */
885	mfctl   %cr30,%r1
886	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
887	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
888
889	.import do_notify_resume,code
890intr_check_sig:
891	/* As above */
892	mfctl   %cr30,%r1
893	LDREG	TI_FLAGS(%r1),%r19
894	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
895	and,COND(<>)	%r19, %r20, %r0
896	b,n	intr_restore	/* skip past if we've nothing to do */
897
898	/* This check is critical to having LWS
899	 * working. The IASQ is zero on the gateway
900	 * page and we cannot deliver any signals until
901	 * we get off the gateway page.
902	 *
903	 * Only do signals if we are returning to user space
904	 */
905	LDREG	PT_IASQ0(%r16), %r20
906	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
907	LDREG	PT_IASQ1(%r16), %r20
908	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
909
910	copy	%r0, %r25			/* long in_syscall = 0 */
911#ifdef CONFIG_64BIT
912	ldo	-16(%r30),%r29			/* Reference param save area */
913#endif
914
915	BL	do_notify_resume,%r2
916	copy	%r16, %r26			/* struct pt_regs *regs */
917
918	b,n	intr_check_sig
919
920intr_restore:
921	copy            %r16,%r29
922	ldo             PT_FR31(%r29),%r1
923	rest_fp         %r1
924	rest_general    %r29
925
926	/* inverse of virt_map */
927	pcxt_ssm_bug
928	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
929	tophys_r1       %r29
930
931	/* Restore space id's and special cr's from PT_REGS
932	 * structure pointed to by r29
933	 */
934	rest_specials	%r29
935
936	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
937	 * It also restores r1 and r30.
938	 */
939	rest_stack
940
941	rfi
942	nop
943
944#ifndef CONFIG_PREEMPT
945# define intr_do_preempt	intr_restore
946#endif /* !CONFIG_PREEMPT */
947
948	.import schedule,code
949intr_do_resched:
950	/* Only call schedule on return to userspace. If we're returning
951	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
952	 * we jump back to intr_restore.
953	 */
954	LDREG	PT_IASQ0(%r16), %r20
955	cmpib,COND(=)	0, %r20, intr_do_preempt
956	nop
957	LDREG	PT_IASQ1(%r16), %r20
958	cmpib,COND(=)	0, %r20, intr_do_preempt
959	nop
960
961#ifdef CONFIG_64BIT
962	ldo	-16(%r30),%r29		/* Reference param save area */
963#endif
964
965	ldil	L%intr_check_sig, %r2
966#ifndef CONFIG_64BIT
967	b	schedule
968#else
969	load32	schedule, %r20
970	bv	%r0(%r20)
971#endif
972	ldo	R%intr_check_sig(%r2), %r2
973
974	/* preempt the current task on returning to kernel
975	 * mode from an interrupt, iff need_resched is set,
976	 * and preempt_count is 0. otherwise, we continue on
977	 * our merry way back to the current running task.
978	 */
979#ifdef CONFIG_PREEMPT
980	.import preempt_schedule_irq,code
981intr_do_preempt:
982	rsm	PSW_SM_I, %r0		/* disable interrupts */
983
984	/* current_thread_info()->preempt_count */
985	mfctl	%cr30, %r1
986	LDREG	TI_PRE_COUNT(%r1), %r19
987	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
988	nop				/* prev insn branched backwards */
989
990	/* check if we interrupted a critical path */
991	LDREG	PT_PSW(%r16), %r20
992	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
993	nop
994
995	BL	preempt_schedule_irq, %r2
996	nop
997
998	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
999#endif /* CONFIG_PREEMPT */
1000
1001	/*
1002	 * External interrupts.
1003	 */
1004
1005intr_extint:
1006	cmpib,COND(=),n 0,%r16,1f
1007
1008	get_stack_use_cr30
1009	b,n 2f
1010
10111:
1012	get_stack_use_r30
10132:
1014	save_specials	%r29
1015	virt_map
1016	save_general	%r29
1017
1018	ldo	PT_FR0(%r29), %r24
1019	save_fp	%r24
1020
1021	loadgp
1022
1023	copy	%r29, %r26	/* arg0 is pt_regs */
1024	copy	%r29, %r16	/* save pt_regs */
1025
1026	ldil	L%intr_return, %r2
1027
1028#ifdef CONFIG_64BIT
1029	ldo	-16(%r30),%r29	/* Reference param save area */
1030#endif
1031
1032	b	do_cpu_irq_mask
1033	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1034ENDPROC_CFI(syscall_exit_rfi)
1035
1036
1037	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1038
1039ENTRY_CFI(intr_save)		/* for os_hpmc */
1040	mfsp    %sr7,%r16
1041	cmpib,COND(=),n 0,%r16,1f
1042	get_stack_use_cr30
1043	b	2f
1044	copy    %r8,%r26
1045
10461:
1047	get_stack_use_r30
1048	copy    %r8,%r26
1049
10502:
1051	save_specials	%r29
1052
1053	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1054
1055	/*
1056	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1057	 *           traps.c.
1058	 *        2) Once we start executing code above 4 Gb, we need
1059	 *           to adjust iasq/iaoq here in the same way we
1060	 *           adjust isr/ior below.
1061	 */
1062
1063	cmpib,COND(=),n        6,%r26,skip_save_ior
1064
1065
1066	mfctl           %cr20, %r16 /* isr */
1067	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1068	mfctl           %cr21, %r17 /* ior */
1069
1070
1071#ifdef CONFIG_64BIT
1072	/*
1073	 * If the interrupted code was running with W bit off (32 bit),
1074	 * clear the b bits (bits 0 & 1) in the ior.
1075	 * save_specials left ipsw value in r8 for us to test.
1076	 */
1077	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1078	depdi           0,1,2,%r17
1079
1080	/*
1081	 * FIXME: This code has hardwired assumptions about the split
1082	 *        between space bits and offset bits. This will change
1083	 *        when we allow alternate page sizes.
1084	 */
1085
1086	/* adjust isr/ior. */
1087	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1088	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1089	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1090#endif
1091	STREG           %r16, PT_ISR(%r29)
1092	STREG           %r17, PT_IOR(%r29)
1093
1094
1095skip_save_ior:
1096	virt_map
1097	save_general	%r29
1098
1099	ldo		PT_FR0(%r29), %r25
1100	save_fp		%r25
1101
1102	loadgp
1103
1104	copy		%r29, %r25	/* arg1 is pt_regs */
1105#ifdef CONFIG_64BIT
1106	ldo		-16(%r30),%r29	/* Reference param save area */
1107#endif
1108
1109	ldil		L%intr_check_sig, %r2
1110	copy		%r25, %r16	/* save pt_regs */
1111
1112	b		handle_interruption
1113	ldo		R%intr_check_sig(%r2), %r2
1114ENDPROC_CFI(intr_save)
1115
1116
1117	/*
1118	 * Note for all tlb miss handlers:
1119	 *
1120	 * cr24 contains a pointer to the kernel address space
1121	 * page directory.
1122	 *
1123	 * cr25 contains a pointer to the current user address
1124	 * space page directory.
1125	 *
1126	 * sr3 will contain the space id of the user address space
1127	 * of the current running thread while that thread is
1128	 * running in the kernel.
1129	 */
1130
1131	/*
1132	 * register number allocations.  Note that these are all
1133	 * in the shadowed registers
1134	 */
1135
1136	t0 = r1		/* temporary register 0 */
1137	va = r8		/* virtual address for which the trap occurred */
1138	t1 = r9		/* temporary register 1 */
1139	pte  = r16	/* pte/phys page # */
1140	prot = r17	/* prot bits */
1141	spc  = r24	/* space for which the trap occurred */
1142	ptp = r25	/* page directory/page table pointer */
1143
1144#ifdef CONFIG_64BIT
1145
1146dtlb_miss_20w:
1147	space_adjust	spc,va,t0
1148	get_pgd		spc,ptp
1149	space_check	spc,t0,dtlb_fault
1150
1151	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1152
1153	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1154	update_accessed	ptp,pte,t0,t1
1155
1156	make_insert_tlb	spc,pte,prot,t1
1157
1158	idtlbt          pte,prot
1159
1160	tlb_unlock1	spc,t0
1161	rfir
1162	nop
1163
1164dtlb_check_alias_20w:
1165	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1166
1167	idtlbt          pte,prot
1168
1169	rfir
1170	nop
1171
1172nadtlb_miss_20w:
1173	space_adjust	spc,va,t0
1174	get_pgd		spc,ptp
1175	space_check	spc,t0,nadtlb_fault
1176
1177	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1178
1179	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1180	update_accessed	ptp,pte,t0,t1
1181
1182	make_insert_tlb	spc,pte,prot,t1
1183
1184	idtlbt          pte,prot
1185
1186	tlb_unlock1	spc,t0
1187	rfir
1188	nop
1189
1190nadtlb_check_alias_20w:
1191	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1192
1193	idtlbt          pte,prot
1194
1195	rfir
1196	nop
1197
1198#else
1199
1200dtlb_miss_11:
1201	get_pgd		spc,ptp
1202
1203	space_check	spc,t0,dtlb_fault
1204
1205	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1206
1207	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1208	update_accessed	ptp,pte,t0,t1
1209
1210	make_insert_tlb_11	spc,pte,prot
1211
1212	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1213	mtsp		spc,%sr1
1214
1215	idtlba		pte,(%sr1,va)
1216	idtlbp		prot,(%sr1,va)
1217
1218	mtsp		t1, %sr1	/* Restore sr1 */
1219
1220	tlb_unlock1	spc,t0
1221	rfir
1222	nop
1223
1224dtlb_check_alias_11:
1225	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1226
1227	idtlba          pte,(va)
1228	idtlbp          prot,(va)
1229
1230	rfir
1231	nop
1232
1233nadtlb_miss_11:
1234	get_pgd		spc,ptp
1235
1236	space_check	spc,t0,nadtlb_fault
1237
1238	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1239
1240	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1241	update_accessed	ptp,pte,t0,t1
1242
1243	make_insert_tlb_11	spc,pte,prot
1244
1245	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1246	mtsp		spc,%sr1
1247
1248	idtlba		pte,(%sr1,va)
1249	idtlbp		prot,(%sr1,va)
1250
1251	mtsp		t1, %sr1	/* Restore sr1 */
1252
1253	tlb_unlock1	spc,t0
1254	rfir
1255	nop
1256
1257nadtlb_check_alias_11:
1258	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1259
1260	idtlba          pte,(va)
1261	idtlbp          prot,(va)
1262
1263	rfir
1264	nop
1265
1266dtlb_miss_20:
1267	space_adjust	spc,va,t0
1268	get_pgd		spc,ptp
1269	space_check	spc,t0,dtlb_fault
1270
1271	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1272
1273	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1274	update_accessed	ptp,pte,t0,t1
1275
1276	make_insert_tlb	spc,pte,prot,t1
1277
1278	f_extend	pte,t1
1279
1280	idtlbt          pte,prot
1281
1282	tlb_unlock1	spc,t0
1283	rfir
1284	nop
1285
1286dtlb_check_alias_20:
1287	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1288
1289	idtlbt          pte,prot
1290
1291	rfir
1292	nop
1293
1294nadtlb_miss_20:
1295	get_pgd		spc,ptp
1296
1297	space_check	spc,t0,nadtlb_fault
1298
1299	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1300
1301	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1302	update_accessed	ptp,pte,t0,t1
1303
1304	make_insert_tlb	spc,pte,prot,t1
1305
1306	f_extend	pte,t1
1307
1308	idtlbt		pte,prot
1309
1310	tlb_unlock1	spc,t0
1311	rfir
1312	nop
1313
1314nadtlb_check_alias_20:
1315	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1316
1317	idtlbt          pte,prot
1318
1319	rfir
1320	nop
1321
1322#endif
1323
1324nadtlb_emulate:
1325
1326	/*
1327	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1328	 * probei instructions. We don't want to fault for these
1329	 * instructions (not only does it not make sense, it can cause
1330	 * deadlocks, since some flushes are done with the mmap
1331	 * semaphore held). If the translation doesn't exist, we can't
1332	 * insert a translation, so have to emulate the side effects
1333	 * of the instruction. Since we don't insert a translation
1334	 * we can get a lot of faults during a flush loop, so it makes
1335	 * sense to try to do it here with minimum overhead. We only
1336	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1337	 * and index registers are not shadowed. We defer everything
1338	 * else to the "slow" path.
1339	 */
1340
1341	mfctl           %cr19,%r9 /* Get iir */
1342
1343	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1344	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1345
1346	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1347	ldi             0x280,%r16
1348	and             %r9,%r16,%r17
1349	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1350	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1351	BL		get_register,%r25
1352	extrw,u         %r9,15,5,%r8           /* Get index register # */
1353	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1354	copy            %r1,%r24
1355	BL		get_register,%r25
1356	extrw,u         %r9,10,5,%r8           /* Get base register # */
1357	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1358	BL		set_register,%r25
1359	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1360
1361nadtlb_nullify:
1362	mfctl           %ipsw,%r8
1363	ldil            L%PSW_N,%r9
1364	or              %r8,%r9,%r8            /* Set PSW_N */
1365	mtctl           %r8,%ipsw
1366
1367	rfir
1368	nop
1369
1370	/*
1371		When there is no translation for the probe address then we
1372		must nullify the insn and return zero in the target regsiter.
1373		This will indicate to the calling code that it does not have
1374		write/read privileges to this address.
1375
1376		This should technically work for prober and probew in PA 1.1,
1377		and also probe,r and probe,w in PA 2.0
1378
1379		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1380		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1381
1382	*/
1383nadtlb_probe_check:
1384	ldi             0x80,%r16
1385	and             %r9,%r16,%r17
1386	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1387	BL              get_register,%r25      /* Find the target register */
1388	extrw,u         %r9,31,5,%r8           /* Get target register */
1389	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1390	BL		set_register,%r25
1391	copy            %r0,%r1                /* Write zero to target register */
1392	b nadtlb_nullify                       /* Nullify return insn */
1393	nop
1394
1395
1396#ifdef CONFIG_64BIT
1397itlb_miss_20w:
1398
1399	/*
1400	 * I miss is a little different, since we allow users to fault
1401	 * on the gateway page which is in the kernel address space.
1402	 */
1403
1404	space_adjust	spc,va,t0
1405	get_pgd		spc,ptp
1406	space_check	spc,t0,itlb_fault
1407
1408	L3_ptep		ptp,pte,t0,va,itlb_fault
1409
1410	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1411	update_accessed	ptp,pte,t0,t1
1412
1413	make_insert_tlb	spc,pte,prot,t1
1414
1415	iitlbt          pte,prot
1416
1417	tlb_unlock1	spc,t0
1418	rfir
1419	nop
1420
1421naitlb_miss_20w:
1422
1423	/*
1424	 * I miss is a little different, since we allow users to fault
1425	 * on the gateway page which is in the kernel address space.
1426	 */
1427
1428	space_adjust	spc,va,t0
1429	get_pgd		spc,ptp
1430	space_check	spc,t0,naitlb_fault
1431
1432	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1433
1434	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1435	update_accessed	ptp,pte,t0,t1
1436
1437	make_insert_tlb	spc,pte,prot,t1
1438
1439	iitlbt          pte,prot
1440
1441	tlb_unlock1	spc,t0
1442	rfir
1443	nop
1444
1445naitlb_check_alias_20w:
1446	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1447
1448	iitlbt		pte,prot
1449
1450	rfir
1451	nop
1452
1453#else
1454
1455itlb_miss_11:
1456	get_pgd		spc,ptp
1457
1458	space_check	spc,t0,itlb_fault
1459
1460	L2_ptep		ptp,pte,t0,va,itlb_fault
1461
1462	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1463	update_accessed	ptp,pte,t0,t1
1464
1465	make_insert_tlb_11	spc,pte,prot
1466
1467	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1468	mtsp		spc,%sr1
1469
1470	iitlba		pte,(%sr1,va)
1471	iitlbp		prot,(%sr1,va)
1472
1473	mtsp		t1, %sr1	/* Restore sr1 */
1474
1475	tlb_unlock1	spc,t0
1476	rfir
1477	nop
1478
1479naitlb_miss_11:
1480	get_pgd		spc,ptp
1481
1482	space_check	spc,t0,naitlb_fault
1483
1484	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1485
1486	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1487	update_accessed	ptp,pte,t0,t1
1488
1489	make_insert_tlb_11	spc,pte,prot
1490
1491	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1492	mtsp		spc,%sr1
1493
1494	iitlba		pte,(%sr1,va)
1495	iitlbp		prot,(%sr1,va)
1496
1497	mtsp		t1, %sr1	/* Restore sr1 */
1498
1499	tlb_unlock1	spc,t0
1500	rfir
1501	nop
1502
1503naitlb_check_alias_11:
1504	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1505
1506	iitlba          pte,(%sr0, va)
1507	iitlbp          prot,(%sr0, va)
1508
1509	rfir
1510	nop
1511
1512
1513itlb_miss_20:
1514	get_pgd		spc,ptp
1515
1516	space_check	spc,t0,itlb_fault
1517
1518	L2_ptep		ptp,pte,t0,va,itlb_fault
1519
1520	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1521	update_accessed	ptp,pte,t0,t1
1522
1523	make_insert_tlb	spc,pte,prot,t1
1524
1525	f_extend	pte,t1
1526
1527	iitlbt          pte,prot
1528
1529	tlb_unlock1	spc,t0
1530	rfir
1531	nop
1532
1533naitlb_miss_20:
1534	get_pgd		spc,ptp
1535
1536	space_check	spc,t0,naitlb_fault
1537
1538	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1539
1540	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1541	update_accessed	ptp,pte,t0,t1
1542
1543	make_insert_tlb	spc,pte,prot,t1
1544
1545	f_extend	pte,t1
1546
1547	iitlbt          pte,prot
1548
1549	tlb_unlock1	spc,t0
1550	rfir
1551	nop
1552
1553naitlb_check_alias_20:
1554	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1555
1556	iitlbt          pte,prot
1557
1558	rfir
1559	nop
1560
1561#endif
1562
1563#ifdef CONFIG_64BIT
1564
1565dbit_trap_20w:
1566	space_adjust	spc,va,t0
1567	get_pgd		spc,ptp
1568	space_check	spc,t0,dbit_fault
1569
1570	L3_ptep		ptp,pte,t0,va,dbit_fault
1571
1572	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1573	update_dirty	ptp,pte,t1
1574
1575	make_insert_tlb	spc,pte,prot,t1
1576
1577	idtlbt          pte,prot
1578
1579	tlb_unlock0	spc,t0
1580	rfir
1581	nop
1582#else
1583
1584dbit_trap_11:
1585
1586	get_pgd		spc,ptp
1587
1588	space_check	spc,t0,dbit_fault
1589
1590	L2_ptep		ptp,pte,t0,va,dbit_fault
1591
1592	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1593	update_dirty	ptp,pte,t1
1594
1595	make_insert_tlb_11	spc,pte,prot
1596
1597	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1598	mtsp		spc,%sr1
1599
1600	idtlba		pte,(%sr1,va)
1601	idtlbp		prot,(%sr1,va)
1602
1603	mtsp            t1, %sr1     /* Restore sr1 */
1604
1605	tlb_unlock0	spc,t0
1606	rfir
1607	nop
1608
1609dbit_trap_20:
1610	get_pgd		spc,ptp
1611
1612	space_check	spc,t0,dbit_fault
1613
1614	L2_ptep		ptp,pte,t0,va,dbit_fault
1615
1616	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1617	update_dirty	ptp,pte,t1
1618
1619	make_insert_tlb	spc,pte,prot,t1
1620
1621	f_extend	pte,t1
1622
1623	idtlbt		pte,prot
1624
1625	tlb_unlock0	spc,t0
1626	rfir
1627	nop
1628#endif
1629
1630	.import handle_interruption,code
1631
1632kernel_bad_space:
1633	b               intr_save
1634	ldi             31,%r8  /* Use an unused code */
1635
1636dbit_fault:
1637	b               intr_save
1638	ldi             20,%r8
1639
1640itlb_fault:
1641	b               intr_save
1642	ldi             6,%r8
1643
1644nadtlb_fault:
1645	b               intr_save
1646	ldi             17,%r8
1647
1648naitlb_fault:
1649	b               intr_save
1650	ldi             16,%r8
1651
1652dtlb_fault:
1653	b               intr_save
1654	ldi             15,%r8
1655
1656	/* Register saving semantics for system calls:
1657
1658	   %r1		   clobbered by system call macro in userspace
1659	   %r2		   saved in PT_REGS by gateway page
1660	   %r3  - %r18	   preserved by C code (saved by signal code)
1661	   %r19 - %r20	   saved in PT_REGS by gateway page
1662	   %r21 - %r22	   non-standard syscall args
1663			   stored in kernel stack by gateway page
1664	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1665	   %r27 - %r30	   saved in PT_REGS by gateway page
1666	   %r31		   syscall return pointer
1667	 */
1668
1669	/* Floating point registers (FIXME: what do we do with these?)
1670
1671	   %fr0  - %fr3	   status/exception, not preserved
1672	   %fr4  - %fr7	   arguments
1673	   %fr8	 - %fr11   not preserved by C code
1674	   %fr12 - %fr21   preserved by C code
1675	   %fr22 - %fr31   not preserved by C code
1676	 */
1677
1678	.macro	reg_save regs
1679	STREG	%r3, PT_GR3(\regs)
1680	STREG	%r4, PT_GR4(\regs)
1681	STREG	%r5, PT_GR5(\regs)
1682	STREG	%r6, PT_GR6(\regs)
1683	STREG	%r7, PT_GR7(\regs)
1684	STREG	%r8, PT_GR8(\regs)
1685	STREG	%r9, PT_GR9(\regs)
1686	STREG   %r10,PT_GR10(\regs)
1687	STREG   %r11,PT_GR11(\regs)
1688	STREG   %r12,PT_GR12(\regs)
1689	STREG   %r13,PT_GR13(\regs)
1690	STREG   %r14,PT_GR14(\regs)
1691	STREG   %r15,PT_GR15(\regs)
1692	STREG   %r16,PT_GR16(\regs)
1693	STREG   %r17,PT_GR17(\regs)
1694	STREG   %r18,PT_GR18(\regs)
1695	.endm
1696
1697	.macro	reg_restore regs
1698	LDREG	PT_GR3(\regs), %r3
1699	LDREG	PT_GR4(\regs), %r4
1700	LDREG	PT_GR5(\regs), %r5
1701	LDREG	PT_GR6(\regs), %r6
1702	LDREG	PT_GR7(\regs), %r7
1703	LDREG	PT_GR8(\regs), %r8
1704	LDREG	PT_GR9(\regs), %r9
1705	LDREG   PT_GR10(\regs),%r10
1706	LDREG   PT_GR11(\regs),%r11
1707	LDREG   PT_GR12(\regs),%r12
1708	LDREG   PT_GR13(\regs),%r13
1709	LDREG   PT_GR14(\regs),%r14
1710	LDREG   PT_GR15(\regs),%r15
1711	LDREG   PT_GR16(\regs),%r16
1712	LDREG   PT_GR17(\regs),%r17
1713	LDREG   PT_GR18(\regs),%r18
1714	.endm
1715
1716	.macro	fork_like name
1717ENTRY_CFI(sys_\name\()_wrapper)
1718	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1719	ldo	TASK_REGS(%r1),%r1
1720	reg_save %r1
1721	mfctl	%cr27, %r28
1722	ldil	L%sys_\name, %r31
1723	be	R%sys_\name(%sr4,%r31)
1724	STREG	%r28, PT_CR27(%r1)
1725ENDPROC_CFI(sys_\name\()_wrapper)
1726	.endm
1727
1728fork_like clone
1729fork_like fork
1730fork_like vfork
1731
1732	/* Set the return value for the child */
1733ENTRY_CFI(child_return)
1734	BL	schedule_tail, %r2
1735	nop
1736finish_child_return:
1737	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1738	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1739
1740	LDREG	PT_CR27(%r1), %r3
1741	mtctl	%r3, %cr27
1742	reg_restore %r1
1743	b	syscall_exit
1744	copy	%r0,%r28
1745ENDPROC_CFI(child_return)
1746
1747ENTRY_CFI(sys_rt_sigreturn_wrapper)
1748	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1749	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1750	/* Don't save regs, we are going to restore them from sigcontext. */
1751	STREG	%r2, -RP_OFFSET(%r30)
1752#ifdef CONFIG_64BIT
1753	ldo	FRAME_SIZE(%r30), %r30
1754	BL	sys_rt_sigreturn,%r2
1755	ldo	-16(%r30),%r29		/* Reference param save area */
1756#else
1757	BL	sys_rt_sigreturn,%r2
1758	ldo	FRAME_SIZE(%r30), %r30
1759#endif
1760
1761	ldo	-FRAME_SIZE(%r30), %r30
1762	LDREG	-RP_OFFSET(%r30), %r2
1763
1764	/* FIXME: I think we need to restore a few more things here. */
1765	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1766	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1767	reg_restore %r1
1768
1769	/* If the signal was received while the process was blocked on a
1770	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1771	 * take us to syscall_exit_rfi and on to intr_return.
1772	 */
1773	bv	%r0(%r2)
1774	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1775ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1776
1777ENTRY_CFI(syscall_exit)
1778	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1779	 * via syscall_exit_rfi if the signal was received while the process
1780	 * was running.
1781	 */
1782
1783	/* save return value now */
1784
1785	mfctl     %cr30, %r1
1786	LDREG     TI_TASK(%r1),%r1
1787	STREG     %r28,TASK_PT_GR28(%r1)
1788
1789	/* Seems to me that dp could be wrong here, if the syscall involved
1790	 * calling a module, and nothing got round to restoring dp on return.
1791	 */
1792	loadgp
1793
1794syscall_check_resched:
1795
1796	/* check for reschedule */
1797
1798	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
1799	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1800
1801	.import do_signal,code
1802syscall_check_sig:
1803	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1804	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1805	and,COND(<>)	%r19, %r26, %r0
1806	b,n	syscall_restore	/* skip past if we've nothing to do */
1807
1808syscall_do_signal:
1809	/* Save callee-save registers (for sigcontext).
1810	 * FIXME: After this point the process structure should be
1811	 * consistent with all the relevant state of the process
1812	 * before the syscall.  We need to verify this.
1813	 */
1814	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1815	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1816	reg_save %r26
1817
1818#ifdef CONFIG_64BIT
1819	ldo	-16(%r30),%r29			/* Reference param save area */
1820#endif
1821
1822	BL	do_notify_resume,%r2
1823	ldi	1, %r25				/* long in_syscall = 1 */
1824
1825	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1826	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1827	reg_restore %r20
1828
1829	b,n     syscall_check_sig
1830
1831syscall_restore:
1832	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1833
1834	/* Are we being ptraced? */
1835	ldw	TASK_FLAGS(%r1),%r19
1836	ldi	_TIF_SYSCALL_TRACE_MASK,%r2
1837	and,COND(=)	%r19,%r2,%r0
1838	b,n	syscall_restore_rfi
1839
1840	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1841	rest_fp	%r19
1842
1843	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1844	mtsar	%r19
1845
1846	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1847	LDREG	TASK_PT_GR19(%r1),%r19
1848	LDREG   TASK_PT_GR20(%r1),%r20
1849	LDREG	TASK_PT_GR21(%r1),%r21
1850	LDREG	TASK_PT_GR22(%r1),%r22
1851	LDREG	TASK_PT_GR23(%r1),%r23
1852	LDREG	TASK_PT_GR24(%r1),%r24
1853	LDREG	TASK_PT_GR25(%r1),%r25
1854	LDREG	TASK_PT_GR26(%r1),%r26
1855	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1856	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1857	LDREG	TASK_PT_GR29(%r1),%r29
1858	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1859
1860	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1861	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1862	rsm     PSW_SM_I, %r0
1863	copy    %r1,%r30                           /* Restore user sp */
1864	mfsp    %sr3,%r1                           /* Get user space id */
1865	mtsp    %r1,%sr7                           /* Restore sr7 */
1866	ssm     PSW_SM_I, %r0
1867
1868	/* Set sr2 to zero for userspace syscalls to work. */
1869	mtsp	%r0,%sr2
1870	mtsp	%r1,%sr4			   /* Restore sr4 */
1871	mtsp	%r1,%sr5			   /* Restore sr5 */
1872	mtsp	%r1,%sr6			   /* Restore sr6 */
1873
1874	depi	3,31,2,%r31			   /* ensure return to user mode. */
1875
1876#ifdef CONFIG_64BIT
1877	/* decide whether to reset the wide mode bit
1878	 *
1879	 * For a syscall, the W bit is stored in the lowest bit
1880	 * of sp.  Extract it and reset W if it is zero */
1881	extrd,u,*<>	%r30,63,1,%r1
1882	rsm	PSW_SM_W, %r0
1883	/* now reset the lowest bit of sp if it was set */
1884	xor	%r30,%r1,%r30
1885#endif
1886	be,n    0(%sr3,%r31)                       /* return to user space */
1887
1888	/* We have to return via an RFI, so that PSW T and R bits can be set
1889	 * appropriately.
1890	 * This sets up pt_regs so we can return via intr_restore, which is not
1891	 * the most efficient way of doing things, but it works.
1892	 */
1893syscall_restore_rfi:
1894	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1895	mtctl	%r2,%cr0			   /*   for immediate trap */
1896	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1897	ldi	0x0b,%r20			   /* Create new PSW */
1898	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1899
1900	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1901	 * set in thread_info.h and converted to PA bitmap
1902	 * numbers in asm-offsets.c */
1903
1904	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1905	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1906	depi	-1,27,1,%r20			   /* R bit */
1907
1908	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1909	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1910	depi	-1,7,1,%r20			   /* T bit */
1911
1912	STREG	%r20,TASK_PT_PSW(%r1)
1913
1914	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1915
1916	mfsp    %sr3,%r25
1917	STREG   %r25,TASK_PT_SR3(%r1)
1918	STREG   %r25,TASK_PT_SR4(%r1)
1919	STREG   %r25,TASK_PT_SR5(%r1)
1920	STREG   %r25,TASK_PT_SR6(%r1)
1921	STREG   %r25,TASK_PT_SR7(%r1)
1922	STREG   %r25,TASK_PT_IASQ0(%r1)
1923	STREG   %r25,TASK_PT_IASQ1(%r1)
1924
1925	/* XXX W bit??? */
1926	/* Now if old D bit is clear, it means we didn't save all registers
1927	 * on syscall entry, so do that now.  This only happens on TRACEME
1928	 * calls, or if someone attached to us while we were on a syscall.
1929	 * We could make this more efficient by not saving r3-r18, but
1930	 * then we wouldn't be able to use the common intr_restore path.
1931	 * It is only for traced processes anyway, so performance is not
1932	 * an issue.
1933	 */
1934	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1935	ldo	TASK_REGS(%r1),%r25
1936	reg_save %r25				   /* Save r3 to r18 */
1937
1938	/* Save the current sr */
1939	mfsp	%sr0,%r2
1940	STREG	%r2,TASK_PT_SR0(%r1)
1941
1942	/* Save the scratch sr */
1943	mfsp	%sr1,%r2
1944	STREG	%r2,TASK_PT_SR1(%r1)
1945
1946	/* sr2 should be set to zero for userspace syscalls */
1947	STREG	%r0,TASK_PT_SR2(%r1)
1948
1949	LDREG	TASK_PT_GR31(%r1),%r2
1950	depi	3,31,2,%r2		   /* ensure return to user mode. */
1951	STREG   %r2,TASK_PT_IAOQ0(%r1)
1952	ldo	4(%r2),%r2
1953	STREG	%r2,TASK_PT_IAOQ1(%r1)
1954	b	intr_restore
1955	copy	%r25,%r16
1956
1957pt_regs_ok:
1958	LDREG	TASK_PT_IAOQ0(%r1),%r2
1959	depi	3,31,2,%r2		   /* ensure return to user mode. */
1960	STREG	%r2,TASK_PT_IAOQ0(%r1)
1961	LDREG	TASK_PT_IAOQ1(%r1),%r2
1962	depi	3,31,2,%r2
1963	STREG	%r2,TASK_PT_IAOQ1(%r1)
1964	b	intr_restore
1965	copy	%r25,%r16
1966
1967syscall_do_resched:
1968	load32	syscall_check_resched,%r2 /* if resched, we start over again */
1969	load32	schedule,%r19
1970	bv	%r0(%r19)		/* jumps to schedule() */
1971#ifdef CONFIG_64BIT
1972	ldo	-16(%r30),%r29		/* Reference param save area */
1973#else
1974	nop
1975#endif
1976ENDPROC_CFI(syscall_exit)
1977
1978
1979#ifdef CONFIG_FUNCTION_TRACER
1980
1981	.import ftrace_function_trampoline,code
1982	.align L1_CACHE_BYTES
1983	.globl mcount
1984	.type  mcount, @function
1985ENTRY(mcount)
1986_mcount:
1987	.export _mcount,data
1988	.proc
1989	.callinfo caller,frame=0
1990	.entry
1991	/*
1992	 * The 64bit mcount() function pointer needs 4 dwords, of which the
1993	 * first two are free.  We optimize it here and put 2 instructions for
1994	 * calling mcount(), and 2 instructions for ftrace_stub().  That way we
1995	 * have all on one L1 cacheline.
1996	 */
1997	b	ftrace_function_trampoline
1998	copy	%r3, %arg2	/* caller original %sp */
1999ftrace_stub:
2000	.globl ftrace_stub
2001        .type  ftrace_stub, @function
2002#ifdef CONFIG_64BIT
2003	bve	(%rp)
2004#else
2005	bv	%r0(%rp)
2006#endif
2007	nop
2008#ifdef CONFIG_64BIT
2009	.dword mcount
2010	.dword 0 /* code in head.S puts value of global gp here */
2011#endif
2012	.exit
2013	.procend
2014ENDPROC(mcount)
2015
2016#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2017	.align 8
2018	.globl return_to_handler
2019	.type  return_to_handler, @function
2020ENTRY_CFI(return_to_handler)
2021	.proc
2022	.callinfo caller,frame=FRAME_SIZE
2023	.entry
2024	.export parisc_return_to_handler,data
2025parisc_return_to_handler:
2026	copy %r3,%r1
2027	STREG %r0,-RP_OFFSET(%sp)	/* store 0 as %rp */
2028	copy %sp,%r3
2029	STREGM %r1,FRAME_SIZE(%sp)
2030	STREG %ret0,8(%r3)
2031	STREG %ret1,16(%r3)
2032
2033#ifdef CONFIG_64BIT
2034	loadgp
2035#endif
2036
2037	/* call ftrace_return_to_handler(0) */
2038	.import ftrace_return_to_handler,code
2039	load32 ftrace_return_to_handler,%ret0
2040	load32 .Lftrace_ret,%r2
2041#ifdef CONFIG_64BIT
2042	ldo -16(%sp),%ret1		/* Reference param save area */
2043	bve	(%ret0)
2044#else
2045	bv	%r0(%ret0)
2046#endif
2047	ldi 0,%r26
2048.Lftrace_ret:
2049	copy %ret0,%rp
2050
2051	/* restore original return values */
2052	LDREG 8(%r3),%ret0
2053	LDREG 16(%r3),%ret1
2054
2055	/* return from function */
2056#ifdef CONFIG_64BIT
2057	bve	(%rp)
2058#else
2059	bv	%r0(%rp)
2060#endif
2061	LDREGM -FRAME_SIZE(%sp),%r3
2062	.exit
2063	.procend
2064ENDPROC_CFI(return_to_handler)
2065
2066#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2067
2068#endif	/* CONFIG_FUNCTION_TRACER */
2069
2070#ifdef CONFIG_IRQSTACKS
2071/* void call_on_stack(unsigned long param1, void *func,
2072		      unsigned long new_stack) */
2073ENTRY_CFI(call_on_stack)
2074	copy	%sp, %r1
2075
2076	/* Regarding the HPPA calling conventions for function pointers,
2077	   we assume the PIC register is not changed across call.  For
2078	   CONFIG_64BIT, the argument pointer is left to point at the
2079	   argument region allocated for the call to call_on_stack. */
2080# ifdef CONFIG_64BIT
2081	/* Switch to new stack.  We allocate two 128 byte frames.  */
2082	ldo	256(%arg2), %sp
2083	/* Save previous stack pointer and return pointer in frame marker */
2084	STREG	%rp, -144(%sp)
2085	/* Calls always use function descriptor */
2086	LDREG	16(%arg1), %arg1
2087	bve,l	(%arg1), %rp
2088	STREG	%r1, -136(%sp)
2089	LDREG	-144(%sp), %rp
2090	bve	(%rp)
2091	LDREG	-136(%sp), %sp
2092# else
2093	/* Switch to new stack.  We allocate two 64 byte frames.  */
2094	ldo	128(%arg2), %sp
2095	/* Save previous stack pointer and return pointer in frame marker */
2096	STREG	%r1, -68(%sp)
2097	STREG	%rp, -84(%sp)
2098	/* Calls use function descriptor if PLABEL bit is set */
2099	bb,>=,n	%arg1, 30, 1f
2100	depwi	0,31,2, %arg1
2101	LDREG	0(%arg1), %arg1
21021:
2103	be,l	0(%sr4,%arg1), %sr0, %r31
2104	copy	%r31, %rp
2105	LDREG	-84(%sp), %rp
2106	bv	(%rp)
2107	LDREG	-68(%sp), %sp
2108# endif /* CONFIG_64BIT */
2109ENDPROC_CFI(call_on_stack)
2110#endif /* CONFIG_IRQSTACKS */
2111
2112ENTRY_CFI(get_register)
2113	/*
2114	 * get_register is used by the non access tlb miss handlers to
2115	 * copy the value of the general register specified in r8 into
2116	 * r1. This routine can't be used for shadowed registers, since
2117	 * the rfir will restore the original value. So, for the shadowed
2118	 * registers we put a -1 into r1 to indicate that the register
2119	 * should not be used (the register being copied could also have
2120	 * a -1 in it, but that is OK, it just means that we will have
2121	 * to use the slow path instead).
2122	 */
2123	blr     %r8,%r0
2124	nop
2125	bv      %r0(%r25)    /* r0 */
2126	copy    %r0,%r1
2127	bv      %r0(%r25)    /* r1 - shadowed */
2128	ldi     -1,%r1
2129	bv      %r0(%r25)    /* r2 */
2130	copy    %r2,%r1
2131	bv      %r0(%r25)    /* r3 */
2132	copy    %r3,%r1
2133	bv      %r0(%r25)    /* r4 */
2134	copy    %r4,%r1
2135	bv      %r0(%r25)    /* r5 */
2136	copy    %r5,%r1
2137	bv      %r0(%r25)    /* r6 */
2138	copy    %r6,%r1
2139	bv      %r0(%r25)    /* r7 */
2140	copy    %r7,%r1
2141	bv      %r0(%r25)    /* r8 - shadowed */
2142	ldi     -1,%r1
2143	bv      %r0(%r25)    /* r9 - shadowed */
2144	ldi     -1,%r1
2145	bv      %r0(%r25)    /* r10 */
2146	copy    %r10,%r1
2147	bv      %r0(%r25)    /* r11 */
2148	copy    %r11,%r1
2149	bv      %r0(%r25)    /* r12 */
2150	copy    %r12,%r1
2151	bv      %r0(%r25)    /* r13 */
2152	copy    %r13,%r1
2153	bv      %r0(%r25)    /* r14 */
2154	copy    %r14,%r1
2155	bv      %r0(%r25)    /* r15 */
2156	copy    %r15,%r1
2157	bv      %r0(%r25)    /* r16 - shadowed */
2158	ldi     -1,%r1
2159	bv      %r0(%r25)    /* r17 - shadowed */
2160	ldi     -1,%r1
2161	bv      %r0(%r25)    /* r18 */
2162	copy    %r18,%r1
2163	bv      %r0(%r25)    /* r19 */
2164	copy    %r19,%r1
2165	bv      %r0(%r25)    /* r20 */
2166	copy    %r20,%r1
2167	bv      %r0(%r25)    /* r21 */
2168	copy    %r21,%r1
2169	bv      %r0(%r25)    /* r22 */
2170	copy    %r22,%r1
2171	bv      %r0(%r25)    /* r23 */
2172	copy    %r23,%r1
2173	bv      %r0(%r25)    /* r24 - shadowed */
2174	ldi     -1,%r1
2175	bv      %r0(%r25)    /* r25 - shadowed */
2176	ldi     -1,%r1
2177	bv      %r0(%r25)    /* r26 */
2178	copy    %r26,%r1
2179	bv      %r0(%r25)    /* r27 */
2180	copy    %r27,%r1
2181	bv      %r0(%r25)    /* r28 */
2182	copy    %r28,%r1
2183	bv      %r0(%r25)    /* r29 */
2184	copy    %r29,%r1
2185	bv      %r0(%r25)    /* r30 */
2186	copy    %r30,%r1
2187	bv      %r0(%r25)    /* r31 */
2188	copy    %r31,%r1
2189ENDPROC_CFI(get_register)
2190
2191
2192ENTRY_CFI(set_register)
2193	/*
2194	 * set_register is used by the non access tlb miss handlers to
2195	 * copy the value of r1 into the general register specified in
2196	 * r8.
2197	 */
2198	blr     %r8,%r0
2199	nop
2200	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2201	copy    %r1,%r0
2202	bv      %r0(%r25)    /* r1 */
2203	copy    %r1,%r1
2204	bv      %r0(%r25)    /* r2 */
2205	copy    %r1,%r2
2206	bv      %r0(%r25)    /* r3 */
2207	copy    %r1,%r3
2208	bv      %r0(%r25)    /* r4 */
2209	copy    %r1,%r4
2210	bv      %r0(%r25)    /* r5 */
2211	copy    %r1,%r5
2212	bv      %r0(%r25)    /* r6 */
2213	copy    %r1,%r6
2214	bv      %r0(%r25)    /* r7 */
2215	copy    %r1,%r7
2216	bv      %r0(%r25)    /* r8 */
2217	copy    %r1,%r8
2218	bv      %r0(%r25)    /* r9 */
2219	copy    %r1,%r9
2220	bv      %r0(%r25)    /* r10 */
2221	copy    %r1,%r10
2222	bv      %r0(%r25)    /* r11 */
2223	copy    %r1,%r11
2224	bv      %r0(%r25)    /* r12 */
2225	copy    %r1,%r12
2226	bv      %r0(%r25)    /* r13 */
2227	copy    %r1,%r13
2228	bv      %r0(%r25)    /* r14 */
2229	copy    %r1,%r14
2230	bv      %r0(%r25)    /* r15 */
2231	copy    %r1,%r15
2232	bv      %r0(%r25)    /* r16 */
2233	copy    %r1,%r16
2234	bv      %r0(%r25)    /* r17 */
2235	copy    %r1,%r17
2236	bv      %r0(%r25)    /* r18 */
2237	copy    %r1,%r18
2238	bv      %r0(%r25)    /* r19 */
2239	copy    %r1,%r19
2240	bv      %r0(%r25)    /* r20 */
2241	copy    %r1,%r20
2242	bv      %r0(%r25)    /* r21 */
2243	copy    %r1,%r21
2244	bv      %r0(%r25)    /* r22 */
2245	copy    %r1,%r22
2246	bv      %r0(%r25)    /* r23 */
2247	copy    %r1,%r23
2248	bv      %r0(%r25)    /* r24 */
2249	copy    %r1,%r24
2250	bv      %r0(%r25)    /* r25 */
2251	copy    %r1,%r25
2252	bv      %r0(%r25)    /* r26 */
2253	copy    %r1,%r26
2254	bv      %r0(%r25)    /* r27 */
2255	copy    %r1,%r27
2256	bv      %r0(%r25)    /* r28 */
2257	copy    %r1,%r28
2258	bv      %r0(%r25)    /* r29 */
2259	copy    %r1,%r29
2260	bv      %r0(%r25)    /* r30 */
2261	copy    %r1,%r30
2262	bv      %r0(%r25)    /* r31 */
2263	copy    %r1,%r31
2264ENDPROC_CFI(set_register)
2265
2266