xref: /openbmc/linux/arch/parisc/kernel/entry.S (revision cdfce539)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41
42#ifdef CONFIG_64BIT
43	.level 2.0w
44#else
45	.level 2.0
46#endif
47
48	.import         pa_dbit_lock,data
49
50	/* space_to_prot macro creates a prot id from a space id */
51
52#if (SPACEID_SHIFT) == 0
53	.macro  space_to_prot spc prot
54	depd,z  \spc,62,31,\prot
55	.endm
56#else
57	.macro  space_to_prot spc prot
58	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
59	.endm
60#endif
61
62	/* Switch to virtual mapping, trashing only %r1 */
63	.macro  virt_map
64	/* pcxt_ssm_bug */
65	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
66	mtsp	%r0, %sr4
67	mtsp	%r0, %sr5
68	mfsp	%sr7, %r1
69	or,=    %r0,%r1,%r0	/* Only save sr7 in sr3 if sr7 != 0 */
70	mtsp	%r1, %sr3
71	tovirt_r1 %r29
72	load32	KERNEL_PSW, %r1
73
74	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
75	mtsp	%r0, %sr6
76	mtsp	%r0, %sr7
77	mtctl	%r0, %cr17	/* Clear IIASQ tail */
78	mtctl	%r0, %cr17	/* Clear IIASQ head */
79	mtctl	%r1, %ipsw
80	load32	4f, %r1
81	mtctl	%r1, %cr18	/* Set IIAOQ tail */
82	ldo	4(%r1), %r1
83	mtctl	%r1, %cr18	/* Set IIAOQ head */
84	rfir
85	nop
864:
87	.endm
88
89	/*
90	 * The "get_stack" macros are responsible for determining the
91	 * kernel stack value.
92	 *
93	 *      If sr7 == 0
94	 *          Already using a kernel stack, so call the
95	 *          get_stack_use_r30 macro to push a pt_regs structure
96	 *          on the stack, and store registers there.
97	 *      else
98	 *          Need to set up a kernel stack, so call the
99	 *          get_stack_use_cr30 macro to set up a pointer
100	 *          to the pt_regs structure contained within the
101	 *          task pointer pointed to by cr30. Set the stack
102	 *          pointer to point to the end of the task structure.
103	 *
104	 * Note that we use shadowed registers for temps until
105	 * we can save %r26 and %r29. %r26 is used to preserve
106	 * %r8 (a shadowed register) which temporarily contained
107	 * either the fault type ("code") or the eirr. We need
108	 * to use a non-shadowed register to carry the value over
109	 * the rfir in virt_map. We use %r26 since this value winds
110	 * up being passed as the argument to either do_cpu_irq_mask
111	 * or handle_interruption. %r29 is used to hold a pointer
112	 * the register save area, and once again, it needs to
113	 * be a non-shadowed register so that it survives the rfir.
114	 *
115	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
116	 */
117
118	.macro  get_stack_use_cr30
119
120	/* we save the registers in the task struct */
121
122	mfctl   %cr30, %r1
123	tophys  %r1,%r9
124	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
125	tophys  %r1,%r9
126	ldo     TASK_REGS(%r9),%r9
127	STREG   %r30, PT_GR30(%r9)
128	STREG   %r29,PT_GR29(%r9)
129	STREG   %r26,PT_GR26(%r9)
130	copy    %r9,%r29
131	mfctl   %cr30, %r1
132	ldo	THREAD_SZ_ALGN(%r1), %r30
133	.endm
134
135	.macro  get_stack_use_r30
136
137	/* we put a struct pt_regs on the stack and save the registers there */
138
139	tophys  %r30,%r9
140	STREG   %r30,PT_GR30(%r9)
141	ldo	PT_SZ_ALGN(%r30),%r30
142	STREG   %r29,PT_GR29(%r9)
143	STREG   %r26,PT_GR26(%r9)
144	copy    %r9,%r29
145	.endm
146
147	.macro  rest_stack
148	LDREG   PT_GR1(%r29), %r1
149	LDREG   PT_GR30(%r29),%r30
150	LDREG   PT_GR29(%r29),%r29
151	.endm
152
153	/* default interruption handler
154	 * (calls traps.c:handle_interruption) */
155	.macro	def code
156	b	intr_save
157	ldi     \code, %r8
158	.align	32
159	.endm
160
161	/* Interrupt interruption handler
162	 * (calls irq.c:do_cpu_irq_mask) */
163	.macro	extint code
164	b	intr_extint
165	mfsp    %sr7,%r16
166	.align	32
167	.endm
168
169	.import	os_hpmc, code
170
171	/* HPMC handler */
172	.macro	hpmc code
173	nop			/* must be a NOP, will be patched later */
174	load32	PA(os_hpmc), %r3
175	bv,n	0(%r3)
176	nop
177	.word	0		/* checksum (will be patched) */
178	.word	PA(os_hpmc)	/* address of handler */
179	.word	0		/* length of handler */
180	.endm
181
182	/*
183	 * Performance Note: Instructions will be moved up into
184	 * this part of the code later on, once we are sure
185	 * that the tlb miss handlers are close to final form.
186	 */
187
188	/* Register definitions for tlb miss handler macros */
189
190	va  = r8	/* virtual address for which the trap occurred */
191	spc = r24	/* space for which the trap occurred */
192
193#ifndef CONFIG_64BIT
194
195	/*
196	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
197	 */
198
199	.macro	itlb_11 code
200
201	mfctl	%pcsq, spc
202	b	itlb_miss_11
203	mfctl	%pcoq, va
204
205	.align		32
206	.endm
207#endif
208
209	/*
210	 * itlb miss interruption handler (parisc 2.0)
211	 */
212
213	.macro	itlb_20 code
214	mfctl	%pcsq, spc
215#ifdef CONFIG_64BIT
216	b       itlb_miss_20w
217#else
218	b	itlb_miss_20
219#endif
220	mfctl	%pcoq, va
221
222	.align		32
223	.endm
224
225#ifndef CONFIG_64BIT
226	/*
227	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
228	 */
229
230	.macro	naitlb_11 code
231
232	mfctl	%isr,spc
233	b	naitlb_miss_11
234	mfctl 	%ior,va
235
236	.align		32
237	.endm
238#endif
239
240	/*
241	 * naitlb miss interruption handler (parisc 2.0)
242	 */
243
244	.macro	naitlb_20 code
245
246	mfctl	%isr,spc
247#ifdef CONFIG_64BIT
248	b       naitlb_miss_20w
249#else
250	b	naitlb_miss_20
251#endif
252	mfctl 	%ior,va
253
254	.align		32
255	.endm
256
257#ifndef CONFIG_64BIT
258	/*
259	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
260	 */
261
262	.macro	dtlb_11 code
263
264	mfctl	%isr, spc
265	b	dtlb_miss_11
266	mfctl	%ior, va
267
268	.align		32
269	.endm
270#endif
271
272	/*
273	 * dtlb miss interruption handler (parisc 2.0)
274	 */
275
276	.macro	dtlb_20 code
277
278	mfctl	%isr, spc
279#ifdef CONFIG_64BIT
280	b       dtlb_miss_20w
281#else
282	b	dtlb_miss_20
283#endif
284	mfctl	%ior, va
285
286	.align		32
287	.endm
288
289#ifndef CONFIG_64BIT
290	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
291
292	.macro	nadtlb_11 code
293
294	mfctl	%isr,spc
295	b       nadtlb_miss_11
296	mfctl	%ior,va
297
298	.align		32
299	.endm
300#endif
301
302	/* nadtlb miss interruption handler (parisc 2.0) */
303
304	.macro	nadtlb_20 code
305
306	mfctl	%isr,spc
307#ifdef CONFIG_64BIT
308	b       nadtlb_miss_20w
309#else
310	b       nadtlb_miss_20
311#endif
312	mfctl	%ior,va
313
314	.align		32
315	.endm
316
317#ifndef CONFIG_64BIT
318	/*
319	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
320	 */
321
322	.macro	dbit_11 code
323
324	mfctl	%isr,spc
325	b	dbit_trap_11
326	mfctl	%ior,va
327
328	.align		32
329	.endm
330#endif
331
332	/*
333	 * dirty bit trap interruption handler (parisc 2.0)
334	 */
335
336	.macro	dbit_20 code
337
338	mfctl	%isr,spc
339#ifdef CONFIG_64BIT
340	b       dbit_trap_20w
341#else
342	b	dbit_trap_20
343#endif
344	mfctl	%ior,va
345
346	.align		32
347	.endm
348
349	/* In LP64, the space contains part of the upper 32 bits of the
350	 * fault.  We have to extract this and place it in the va,
351	 * zeroing the corresponding bits in the space register */
352	.macro		space_adjust	spc,va,tmp
353#ifdef CONFIG_64BIT
354	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
355	depd		%r0,63,SPACEID_SHIFT,\spc
356	depd		\tmp,31,SPACEID_SHIFT,\va
357#endif
358	.endm
359
360	.import		swapper_pg_dir,code
361
362	/* Get the pgd.  For faults on space zero (kernel space), this
363	 * is simply swapper_pg_dir.  For user space faults, the
364	 * pgd is stored in %cr25 */
365	.macro		get_pgd		spc,reg
366	ldil		L%PA(swapper_pg_dir),\reg
367	ldo		R%PA(swapper_pg_dir)(\reg),\reg
368	or,COND(=)	%r0,\spc,%r0
369	mfctl		%cr25,\reg
370	.endm
371
372	/*
373		space_check(spc,tmp,fault)
374
375		spc - The space we saw the fault with.
376		tmp - The place to store the current space.
377		fault - Function to call on failure.
378
379		Only allow faults on different spaces from the
380		currently active one if we're the kernel
381
382	*/
383	.macro		space_check	spc,tmp,fault
384	mfsp		%sr7,\tmp
385	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
386					 * as kernel, so defeat the space
387					 * check if it is */
388	copy		\spc,\tmp
389	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
390	cmpb,COND(<>),n	\tmp,\spc,\fault
391	.endm
392
393	/* Look up a PTE in a 2-Level scheme (faulting at each
394	 * level if the entry isn't present
395	 *
396	 * NOTE: we use ldw even for LP64, since the short pointers
397	 * can address up to 1TB
398	 */
399	.macro		L2_ptep	pmd,pte,index,va,fault
400#if PT_NLEVELS == 3
401	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
402#else
403# if defined(CONFIG_64BIT)
404	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
405  #else
406  # if PAGE_SIZE > 4096
407	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
408  # else
409	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
410  # endif
411# endif
412#endif
413	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
414	copy		%r0,\pte
415	ldw,s		\index(\pmd),\pmd
416	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
417	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
418	copy		\pmd,%r9
419	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
420	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
421	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
422	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd
423	LDREG		%r0(\pmd),\pte		/* pmd is now pte */
424	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
425	.endm
426
427	/* Look up PTE in a 3-Level scheme.
428	 *
429	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
430	 * first pmd adjacent to the pgd.  This means that we can
431	 * subtract a constant offset to get to it.  The pmd and pgd
432	 * sizes are arranged so that a single pmd covers 4GB (giving
433	 * a full LP64 process access to 8TB) so our lookups are
434	 * effectively L2 for the first 4GB of the kernel (i.e. for
435	 * all ILP32 processes and all the kernel for machines with
436	 * under 4GB of memory) */
437	.macro		L3_ptep pgd,pte,index,va,fault
438#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
439	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
440	copy		%r0,\pte
441	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
442	ldw,s		\index(\pgd),\pgd
443	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
444	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
445	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
446	shld		\pgd,PxD_VALUE_SHIFT,\index
447	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
448	copy		\index,\pgd
449	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
450	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
451#endif
452	L2_ptep		\pgd,\pte,\index,\va,\fault
453	.endm
454
455	/* Acquire pa_dbit_lock lock. */
456	.macro		dbit_lock	spc,tmp,tmp1
457#ifdef CONFIG_SMP
458	cmpib,COND(=),n	0,\spc,2f
459	load32		PA(pa_dbit_lock),\tmp
4601:	LDCW		0(\tmp),\tmp1
461	cmpib,COND(=)	0,\tmp1,1b
462	nop
4632:
464#endif
465	.endm
466
467	/* Release pa_dbit_lock lock without reloading lock address. */
468	.macro		dbit_unlock0	spc,tmp
469#ifdef CONFIG_SMP
470	or,COND(=)	%r0,\spc,%r0
471	stw             \spc,0(\tmp)
472#endif
473	.endm
474
475	/* Release pa_dbit_lock lock. */
476	.macro		dbit_unlock1	spc,tmp
477#ifdef CONFIG_SMP
478	load32		PA(pa_dbit_lock),\tmp
479	dbit_unlock0	\spc,\tmp
480#endif
481	.endm
482
483	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
484	 * don't needlessly dirty the cache line if it was already set */
485	.macro		update_ptep	spc,ptep,pte,tmp,tmp1
486#ifdef CONFIG_SMP
487	or,COND(=)	%r0,\spc,%r0
488	LDREG		0(\ptep),\pte
489#endif
490	ldi		_PAGE_ACCESSED,\tmp1
491	or		\tmp1,\pte,\tmp
492	and,COND(<>)	\tmp1,\pte,%r0
493	STREG		\tmp,0(\ptep)
494	.endm
495
496	/* Set the dirty bit (and accessed bit).  No need to be
497	 * clever, this is only used from the dirty fault */
498	.macro		update_dirty	spc,ptep,pte,tmp
499#ifdef CONFIG_SMP
500	or,COND(=)	%r0,\spc,%r0
501	LDREG		0(\ptep),\pte
502#endif
503	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
504	or		\tmp,\pte,\pte
505	STREG		\pte,0(\ptep)
506	.endm
507
508	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
509	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
510	#define PAGE_ADD_SHIFT  (PAGE_SHIFT-12)
511
512	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
513	.macro		convert_for_tlb_insert20 pte
514	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
515				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
516	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
517				(63-58)+PAGE_ADD_SHIFT,\pte
518	.endm
519
520	/* Convert the pte and prot to tlb insertion values.  How
521	 * this happens is quite subtle, read below */
522	.macro		make_insert_tlb	spc,pte,prot
523	space_to_prot   \spc \prot        /* create prot id from space */
524	/* The following is the real subtlety.  This is depositing
525	 * T <-> _PAGE_REFTRAP
526	 * D <-> _PAGE_DIRTY
527	 * B <-> _PAGE_DMB (memory break)
528	 *
529	 * Then incredible subtlety: The access rights are
530	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
531	 * See 3-14 of the parisc 2.0 manual
532	 *
533	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
534	 * trigger an access rights trap in user space if the user
535	 * tries to read an unreadable page */
536	depd            \pte,8,7,\prot
537
538	/* PAGE_USER indicates the page can be read with user privileges,
539	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
540	 * contains _PAGE_READ) */
541	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
542	depdi		7,11,3,\prot
543	/* If we're a gateway page, drop PL2 back to zero for promotion
544	 * to kernel privilege (so we can execute the page as kernel).
545	 * Any privilege promotion page always denys read and write */
546	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
547	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
548
549	/* Enforce uncacheable pages.
550	 * This should ONLY be use for MMIO on PA 2.0 machines.
551	 * Memory/DMA is cache coherent on all PA2.0 machines we support
552	 * (that means T-class is NOT supported) and the memory controllers
553	 * on most of those machines only handles cache transactions.
554	 */
555	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
556	depdi		1,12,1,\prot
557
558	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
559	convert_for_tlb_insert20 \pte
560	.endm
561
562	/* Identical macro to make_insert_tlb above, except it
563	 * makes the tlb entry for the differently formatted pa11
564	 * insertion instructions */
565	.macro		make_insert_tlb_11	spc,pte,prot
566	zdep		\spc,30,15,\prot
567	dep		\pte,8,7,\prot
568	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
569	depi		1,12,1,\prot
570	extru,=         \pte,_PAGE_USER_BIT,1,%r0
571	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
572	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
573	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
574
575	/* Get rid of prot bits and convert to page addr for iitlba */
576
577	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
578	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
579	.endm
580
581	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
582	 * to extend into I/O space if the address is 0xfXXXXXXX
583	 * so we extend the f's into the top word of the pte in
584	 * this case */
585	.macro		f_extend	pte,tmp
586	extrd,s		\pte,42,4,\tmp
587	addi,<>		1,\tmp,%r0
588	extrd,s		\pte,63,25,\pte
589	.endm
590
591	/* The alias region is an 8MB aligned 16MB to do clear and
592	 * copy user pages at addresses congruent with the user
593	 * virtual address.
594	 *
595	 * To use the alias page, you set %r26 up with the to TLB
596	 * entry (identifying the physical page) and %r23 up with
597	 * the from tlb entry (or nothing if only a to entry---for
598	 * clear_user_page_asm) */
599	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
600	cmpib,COND(<>),n 0,\spc,\fault
601	ldil		L%(TMPALIAS_MAP_START),\tmp
602#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
603	/* on LP64, ldi will sign extend into the upper 32 bits,
604	 * which is behaviour we don't want */
605	depdi		0,31,32,\tmp
606#endif
607	copy		\va,\tmp1
608	depi		0,31,23,\tmp1
609	cmpb,COND(<>),n	\tmp,\tmp1,\fault
610	mfctl		%cr19,\tmp	/* iir */
611	/* get the opcode (first six bits) into \tmp */
612	extrw,u		\tmp,5,6,\tmp
613	/*
614	 * Only setting the T bit prevents data cache movein
615	 * Setting access rights to zero prevents instruction cache movein
616	 *
617	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
618	 * to type field and _PAGE_READ goes to top bit of PL1
619	 */
620	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
621	/*
622	 * so if the opcode is one (i.e. this is a memory management
623	 * instruction) nullify the next load so \prot is only T.
624	 * Otherwise this is a normal data operation
625	 */
626	cmpiclr,=	0x01,\tmp,%r0
627	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
628.ifc \patype,20
629	depd,z		\prot,8,7,\prot
630.else
631.ifc \patype,11
632	depw,z		\prot,8,7,\prot
633.else
634	.error "undefined PA type to do_alias"
635.endif
636.endif
637	/*
638	 * OK, it is in the temp alias region, check whether "from" or "to".
639	 * Check "subtle" note in pacache.S re: r23/r26.
640	 */
641#ifdef CONFIG_64BIT
642	extrd,u,*=	\va,41,1,%r0
643#else
644	extrw,u,=	\va,9,1,%r0
645#endif
646	or,COND(tr)	%r23,%r0,\pte
647	or		%r26,%r0,\pte
648	.endm
649
650
651	/*
652	 * Align fault_vector_20 on 4K boundary so that both
653	 * fault_vector_11 and fault_vector_20 are on the
654	 * same page. This is only necessary as long as we
655	 * write protect the kernel text, which we may stop
656	 * doing once we use large page translations to cover
657	 * the static part of the kernel address space.
658	 */
659
660	.text
661
662	.align 4096
663
664ENTRY(fault_vector_20)
665	/* First vector is invalid (0) */
666	.ascii	"cows can fly"
667	.byte 0
668	.align 32
669
670	hpmc		 1
671	def		 2
672	def		 3
673	extint		 4
674	def		 5
675	itlb_20		 6
676	def		 7
677	def		 8
678	def              9
679	def		10
680	def		11
681	def		12
682	def		13
683	def		14
684	dtlb_20		15
685	naitlb_20	16
686	nadtlb_20	17
687	def		18
688	def		19
689	dbit_20		20
690	def		21
691	def		22
692	def		23
693	def		24
694	def		25
695	def		26
696	def		27
697	def		28
698	def		29
699	def		30
700	def		31
701END(fault_vector_20)
702
703#ifndef CONFIG_64BIT
704
705	.align 2048
706
707ENTRY(fault_vector_11)
708	/* First vector is invalid (0) */
709	.ascii	"cows can fly"
710	.byte 0
711	.align 32
712
713	hpmc		 1
714	def		 2
715	def		 3
716	extint		 4
717	def		 5
718	itlb_11		 6
719	def		 7
720	def		 8
721	def              9
722	def		10
723	def		11
724	def		12
725	def		13
726	def		14
727	dtlb_11		15
728	naitlb_11	16
729	nadtlb_11	17
730	def		18
731	def		19
732	dbit_11		20
733	def		21
734	def		22
735	def		23
736	def		24
737	def		25
738	def		26
739	def		27
740	def		28
741	def		29
742	def		30
743	def		31
744END(fault_vector_11)
745
746#endif
747	/* Fault vector is separately protected and *must* be on its own page */
748	.align		PAGE_SIZE
749ENTRY(end_fault_vector)
750
751	.import		handle_interruption,code
752	.import		do_cpu_irq_mask,code
753
754	/*
755	 * Child Returns here
756	 *
757	 * copy_thread moved args into task save area.
758	 */
759
760ENTRY(ret_from_kernel_thread)
761
762	/* Call schedule_tail first though */
763	BL	schedule_tail, %r2
764	nop
765
766	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
767	LDREG	TASK_PT_GR25(%r1), %r26
768#ifdef CONFIG_64BIT
769	LDREG	TASK_PT_GR27(%r1), %r27
770#endif
771	LDREG	TASK_PT_GR26(%r1), %r1
772	ble	0(%sr7, %r1)
773	copy	%r31, %r2
774	b	finish_child_return
775	nop
776ENDPROC(ret_from_kernel_thread)
777
778
779	/*
780	 * struct task_struct *_switch_to(struct task_struct *prev,
781	 *	struct task_struct *next)
782	 *
783	 * switch kernel stacks and return prev */
784ENTRY(_switch_to)
785	STREG	 %r2, -RP_OFFSET(%r30)
786
787	callee_save_float
788	callee_save
789
790	load32	_switch_to_ret, %r2
791
792	STREG	%r2, TASK_PT_KPC(%r26)
793	LDREG	TASK_PT_KPC(%r25), %r2
794
795	STREG	%r30, TASK_PT_KSP(%r26)
796	LDREG	TASK_PT_KSP(%r25), %r30
797	LDREG	TASK_THREAD_INFO(%r25), %r25
798	bv	%r0(%r2)
799	mtctl   %r25,%cr30
800
801_switch_to_ret:
802	mtctl	%r0, %cr0		/* Needed for single stepping */
803	callee_rest
804	callee_rest_float
805
806	LDREG	-RP_OFFSET(%r30), %r2
807	bv	%r0(%r2)
808	copy	%r26, %r28
809ENDPROC(_switch_to)
810
811	/*
812	 * Common rfi return path for interruptions, kernel execve, and
813	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
814	 * return via this path if the signal was received when the process
815	 * was running; if the process was blocked on a syscall then the
816	 * normal syscall_exit path is used.  All syscalls for traced
817	 * proceses exit via intr_restore.
818	 *
819	 * XXX If any syscalls that change a processes space id ever exit
820	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
821	 * adjust IASQ[0..1].
822	 *
823	 */
824
825	.align	PAGE_SIZE
826
827ENTRY(syscall_exit_rfi)
828	mfctl   %cr30,%r16
829	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
830	ldo	TASK_REGS(%r16),%r16
831	/* Force iaoq to userspace, as the user has had access to our current
832	 * context via sigcontext. Also Filter the PSW for the same reason.
833	 */
834	LDREG	PT_IAOQ0(%r16),%r19
835	depi	3,31,2,%r19
836	STREG	%r19,PT_IAOQ0(%r16)
837	LDREG	PT_IAOQ1(%r16),%r19
838	depi	3,31,2,%r19
839	STREG	%r19,PT_IAOQ1(%r16)
840	LDREG   PT_PSW(%r16),%r19
841	load32	USER_PSW_MASK,%r1
842#ifdef CONFIG_64BIT
843	load32	USER_PSW_HI_MASK,%r20
844	depd    %r20,31,32,%r1
845#endif
846	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
847	load32	USER_PSW,%r1
848	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
849	STREG   %r19,PT_PSW(%r16)
850
851	/*
852	 * If we aren't being traced, we never saved space registers
853	 * (we don't store them in the sigcontext), so set them
854	 * to "proper" values now (otherwise we'll wind up restoring
855	 * whatever was last stored in the task structure, which might
856	 * be inconsistent if an interrupt occurred while on the gateway
857	 * page). Note that we may be "trashing" values the user put in
858	 * them, but we don't support the user changing them.
859	 */
860
861	STREG   %r0,PT_SR2(%r16)
862	mfsp    %sr3,%r19
863	STREG   %r19,PT_SR0(%r16)
864	STREG   %r19,PT_SR1(%r16)
865	STREG   %r19,PT_SR3(%r16)
866	STREG   %r19,PT_SR4(%r16)
867	STREG   %r19,PT_SR5(%r16)
868	STREG   %r19,PT_SR6(%r16)
869	STREG   %r19,PT_SR7(%r16)
870
871intr_return:
872	/* check for reschedule */
873	mfctl   %cr30,%r1
874	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
875	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
876
877	.import do_notify_resume,code
878intr_check_sig:
879	/* As above */
880	mfctl   %cr30,%r1
881	LDREG	TI_FLAGS(%r1),%r19
882	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
883	and,COND(<>)	%r19, %r20, %r0
884	b,n	intr_restore	/* skip past if we've nothing to do */
885
886	/* This check is critical to having LWS
887	 * working. The IASQ is zero on the gateway
888	 * page and we cannot deliver any signals until
889	 * we get off the gateway page.
890	 *
891	 * Only do signals if we are returning to user space
892	 */
893	LDREG	PT_IASQ0(%r16), %r20
894	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
895	LDREG	PT_IASQ1(%r16), %r20
896	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
897
898	/* NOTE: We need to enable interrupts if we have to deliver
899	 * signals. We used to do this earlier but it caused kernel
900	 * stack overflows. */
901	ssm     PSW_SM_I, %r0
902
903	copy	%r0, %r25			/* long in_syscall = 0 */
904#ifdef CONFIG_64BIT
905	ldo	-16(%r30),%r29			/* Reference param save area */
906#endif
907
908	BL	do_notify_resume,%r2
909	copy	%r16, %r26			/* struct pt_regs *regs */
910
911	b,n	intr_check_sig
912
913intr_restore:
914	copy            %r16,%r29
915	ldo             PT_FR31(%r29),%r1
916	rest_fp         %r1
917	rest_general    %r29
918
919	/* inverse of virt_map */
920	pcxt_ssm_bug
921	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
922	tophys_r1       %r29
923
924	/* Restore space id's and special cr's from PT_REGS
925	 * structure pointed to by r29
926	 */
927	rest_specials	%r29
928
929	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
930	 * It also restores r1 and r30.
931	 */
932	rest_stack
933
934	rfi
935	nop
936
937#ifndef CONFIG_PREEMPT
938# define intr_do_preempt	intr_restore
939#endif /* !CONFIG_PREEMPT */
940
941	.import schedule,code
942intr_do_resched:
943	/* Only call schedule on return to userspace. If we're returning
944	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
945	 * we jump back to intr_restore.
946	 */
947	LDREG	PT_IASQ0(%r16), %r20
948	cmpib,COND(=)	0, %r20, intr_do_preempt
949	nop
950	LDREG	PT_IASQ1(%r16), %r20
951	cmpib,COND(=)	0, %r20, intr_do_preempt
952	nop
953
954	/* NOTE: We need to enable interrupts if we schedule.  We used
955	 * to do this earlier but it caused kernel stack overflows. */
956	ssm     PSW_SM_I, %r0
957
958#ifdef CONFIG_64BIT
959	ldo	-16(%r30),%r29		/* Reference param save area */
960#endif
961
962	ldil	L%intr_check_sig, %r2
963#ifndef CONFIG_64BIT
964	b	schedule
965#else
966	load32	schedule, %r20
967	bv	%r0(%r20)
968#endif
969	ldo	R%intr_check_sig(%r2), %r2
970
971	/* preempt the current task on returning to kernel
972	 * mode from an interrupt, iff need_resched is set,
973	 * and preempt_count is 0. otherwise, we continue on
974	 * our merry way back to the current running task.
975	 */
976#ifdef CONFIG_PREEMPT
977	.import preempt_schedule_irq,code
978intr_do_preempt:
979	rsm	PSW_SM_I, %r0		/* disable interrupts */
980
981	/* current_thread_info()->preempt_count */
982	mfctl	%cr30, %r1
983	LDREG	TI_PRE_COUNT(%r1), %r19
984	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
985	nop				/* prev insn branched backwards */
986
987	/* check if we interrupted a critical path */
988	LDREG	PT_PSW(%r16), %r20
989	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
990	nop
991
992	BL	preempt_schedule_irq, %r2
993	nop
994
995	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
996#endif /* CONFIG_PREEMPT */
997
998	/*
999	 * External interrupts.
1000	 */
1001
1002intr_extint:
1003	cmpib,COND(=),n 0,%r16,1f
1004
1005	get_stack_use_cr30
1006	b,n 2f
1007
10081:
1009	get_stack_use_r30
10102:
1011	save_specials	%r29
1012	virt_map
1013	save_general	%r29
1014
1015	ldo	PT_FR0(%r29), %r24
1016	save_fp	%r24
1017
1018	loadgp
1019
1020	copy	%r29, %r26	/* arg0 is pt_regs */
1021	copy	%r29, %r16	/* save pt_regs */
1022
1023	ldil	L%intr_return, %r2
1024
1025#ifdef CONFIG_64BIT
1026	ldo	-16(%r30),%r29	/* Reference param save area */
1027#endif
1028
1029	b	do_cpu_irq_mask
1030	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1031ENDPROC(syscall_exit_rfi)
1032
1033
1034	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1035
1036ENTRY(intr_save)		/* for os_hpmc */
1037	mfsp    %sr7,%r16
1038	cmpib,COND(=),n 0,%r16,1f
1039	get_stack_use_cr30
1040	b	2f
1041	copy    %r8,%r26
1042
10431:
1044	get_stack_use_r30
1045	copy    %r8,%r26
1046
10472:
1048	save_specials	%r29
1049
1050	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1051
1052	/*
1053	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1054	 *           traps.c.
1055	 *        2) Once we start executing code above 4 Gb, we need
1056	 *           to adjust iasq/iaoq here in the same way we
1057	 *           adjust isr/ior below.
1058	 */
1059
1060	cmpib,COND(=),n        6,%r26,skip_save_ior
1061
1062
1063	mfctl           %cr20, %r16 /* isr */
1064	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1065	mfctl           %cr21, %r17 /* ior */
1066
1067
1068#ifdef CONFIG_64BIT
1069	/*
1070	 * If the interrupted code was running with W bit off (32 bit),
1071	 * clear the b bits (bits 0 & 1) in the ior.
1072	 * save_specials left ipsw value in r8 for us to test.
1073	 */
1074	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1075	depdi           0,1,2,%r17
1076
1077	/*
1078	 * FIXME: This code has hardwired assumptions about the split
1079	 *        between space bits and offset bits. This will change
1080	 *        when we allow alternate page sizes.
1081	 */
1082
1083	/* adjust isr/ior. */
1084	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1085	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1086	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1087#endif
1088	STREG           %r16, PT_ISR(%r29)
1089	STREG           %r17, PT_IOR(%r29)
1090
1091
1092skip_save_ior:
1093	virt_map
1094	save_general	%r29
1095
1096	ldo		PT_FR0(%r29), %r25
1097	save_fp		%r25
1098
1099	loadgp
1100
1101	copy		%r29, %r25	/* arg1 is pt_regs */
1102#ifdef CONFIG_64BIT
1103	ldo		-16(%r30),%r29	/* Reference param save area */
1104#endif
1105
1106	ldil		L%intr_check_sig, %r2
1107	copy		%r25, %r16	/* save pt_regs */
1108
1109	b		handle_interruption
1110	ldo		R%intr_check_sig(%r2), %r2
1111ENDPROC(intr_save)
1112
1113
1114	/*
1115	 * Note for all tlb miss handlers:
1116	 *
1117	 * cr24 contains a pointer to the kernel address space
1118	 * page directory.
1119	 *
1120	 * cr25 contains a pointer to the current user address
1121	 * space page directory.
1122	 *
1123	 * sr3 will contain the space id of the user address space
1124	 * of the current running thread while that thread is
1125	 * running in the kernel.
1126	 */
1127
1128	/*
1129	 * register number allocations.  Note that these are all
1130	 * in the shadowed registers
1131	 */
1132
1133	t0 = r1		/* temporary register 0 */
1134	va = r8		/* virtual address for which the trap occurred */
1135	t1 = r9		/* temporary register 1 */
1136	pte  = r16	/* pte/phys page # */
1137	prot = r17	/* prot bits */
1138	spc  = r24	/* space for which the trap occurred */
1139	ptp = r25	/* page directory/page table pointer */
1140
1141#ifdef CONFIG_64BIT
1142
1143dtlb_miss_20w:
1144	space_adjust	spc,va,t0
1145	get_pgd		spc,ptp
1146	space_check	spc,t0,dtlb_fault
1147
1148	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1149
1150	dbit_lock	spc,t0,t1
1151	update_ptep	spc,ptp,pte,t0,t1
1152
1153	make_insert_tlb	spc,pte,prot
1154
1155	idtlbt          pte,prot
1156	dbit_unlock1	spc,t0
1157
1158	rfir
1159	nop
1160
1161dtlb_check_alias_20w:
1162	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1163
1164	idtlbt          pte,prot
1165
1166	rfir
1167	nop
1168
1169nadtlb_miss_20w:
1170	space_adjust	spc,va,t0
1171	get_pgd		spc,ptp
1172	space_check	spc,t0,nadtlb_fault
1173
1174	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1175
1176	dbit_lock	spc,t0,t1
1177	update_ptep	spc,ptp,pte,t0,t1
1178
1179	make_insert_tlb	spc,pte,prot
1180
1181	idtlbt          pte,prot
1182	dbit_unlock1	spc,t0
1183
1184	rfir
1185	nop
1186
1187nadtlb_check_alias_20w:
1188	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1189
1190	idtlbt          pte,prot
1191
1192	rfir
1193	nop
1194
1195#else
1196
1197dtlb_miss_11:
1198	get_pgd		spc,ptp
1199
1200	space_check	spc,t0,dtlb_fault
1201
1202	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1203
1204	dbit_lock	spc,t0,t1
1205	update_ptep	spc,ptp,pte,t0,t1
1206
1207	make_insert_tlb_11	spc,pte,prot
1208
1209	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1210	mtsp		spc,%sr1
1211
1212	idtlba		pte,(%sr1,va)
1213	idtlbp		prot,(%sr1,va)
1214
1215	mtsp		t0, %sr1	/* Restore sr1 */
1216	dbit_unlock1	spc,t0
1217
1218	rfir
1219	nop
1220
1221dtlb_check_alias_11:
1222	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1223
1224	idtlba          pte,(va)
1225	idtlbp          prot,(va)
1226
1227	rfir
1228	nop
1229
1230nadtlb_miss_11:
1231	get_pgd		spc,ptp
1232
1233	space_check	spc,t0,nadtlb_fault
1234
1235	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1236
1237	dbit_lock	spc,t0,t1
1238	update_ptep	spc,ptp,pte,t0,t1
1239
1240	make_insert_tlb_11	spc,pte,prot
1241
1242
1243	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1244	mtsp		spc,%sr1
1245
1246	idtlba		pte,(%sr1,va)
1247	idtlbp		prot,(%sr1,va)
1248
1249	mtsp		t0, %sr1	/* Restore sr1 */
1250	dbit_unlock1	spc,t0
1251
1252	rfir
1253	nop
1254
1255nadtlb_check_alias_11:
1256	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1257
1258	idtlba          pte,(va)
1259	idtlbp          prot,(va)
1260
1261	rfir
1262	nop
1263
1264dtlb_miss_20:
1265	space_adjust	spc,va,t0
1266	get_pgd		spc,ptp
1267	space_check	spc,t0,dtlb_fault
1268
1269	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1270
1271	dbit_lock	spc,t0,t1
1272	update_ptep	spc,ptp,pte,t0,t1
1273
1274	make_insert_tlb	spc,pte,prot
1275
1276	f_extend	pte,t0
1277
1278	idtlbt          pte,prot
1279	dbit_unlock1	spc,t0
1280
1281	rfir
1282	nop
1283
1284dtlb_check_alias_20:
1285	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1286
1287	idtlbt          pte,prot
1288
1289	rfir
1290	nop
1291
1292nadtlb_miss_20:
1293	get_pgd		spc,ptp
1294
1295	space_check	spc,t0,nadtlb_fault
1296
1297	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1298
1299	dbit_lock	spc,t0,t1
1300	update_ptep	spc,ptp,pte,t0,t1
1301
1302	make_insert_tlb	spc,pte,prot
1303
1304	f_extend	pte,t0
1305
1306        idtlbt          pte,prot
1307	dbit_unlock1	spc,t0
1308
1309	rfir
1310	nop
1311
1312nadtlb_check_alias_20:
1313	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1314
1315	idtlbt          pte,prot
1316
1317	rfir
1318	nop
1319
1320#endif
1321
1322nadtlb_emulate:
1323
1324	/*
1325	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1326	 * probei instructions. We don't want to fault for these
1327	 * instructions (not only does it not make sense, it can cause
1328	 * deadlocks, since some flushes are done with the mmap
1329	 * semaphore held). If the translation doesn't exist, we can't
1330	 * insert a translation, so have to emulate the side effects
1331	 * of the instruction. Since we don't insert a translation
1332	 * we can get a lot of faults during a flush loop, so it makes
1333	 * sense to try to do it here with minimum overhead. We only
1334	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1335	 * and index registers are not shadowed. We defer everything
1336	 * else to the "slow" path.
1337	 */
1338
1339	mfctl           %cr19,%r9 /* Get iir */
1340
1341	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1342	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1343
1344	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1345	ldi             0x280,%r16
1346	and             %r9,%r16,%r17
1347	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1348	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1349	BL		get_register,%r25
1350	extrw,u         %r9,15,5,%r8           /* Get index register # */
1351	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1352	copy            %r1,%r24
1353	BL		get_register,%r25
1354	extrw,u         %r9,10,5,%r8           /* Get base register # */
1355	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1356	BL		set_register,%r25
1357	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1358
1359nadtlb_nullify:
1360	mfctl           %ipsw,%r8
1361	ldil            L%PSW_N,%r9
1362	or              %r8,%r9,%r8            /* Set PSW_N */
1363	mtctl           %r8,%ipsw
1364
1365	rfir
1366	nop
1367
1368	/*
1369		When there is no translation for the probe address then we
1370		must nullify the insn and return zero in the target regsiter.
1371		This will indicate to the calling code that it does not have
1372		write/read privileges to this address.
1373
1374		This should technically work for prober and probew in PA 1.1,
1375		and also probe,r and probe,w in PA 2.0
1376
1377		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1378		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1379
1380	*/
1381nadtlb_probe_check:
1382	ldi             0x80,%r16
1383	and             %r9,%r16,%r17
1384	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1385	BL              get_register,%r25      /* Find the target register */
1386	extrw,u         %r9,31,5,%r8           /* Get target register */
1387	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1388	BL		set_register,%r25
1389	copy            %r0,%r1                /* Write zero to target register */
1390	b nadtlb_nullify                       /* Nullify return insn */
1391	nop
1392
1393
1394#ifdef CONFIG_64BIT
1395itlb_miss_20w:
1396
1397	/*
1398	 * I miss is a little different, since we allow users to fault
1399	 * on the gateway page which is in the kernel address space.
1400	 */
1401
1402	space_adjust	spc,va,t0
1403	get_pgd		spc,ptp
1404	space_check	spc,t0,itlb_fault
1405
1406	L3_ptep		ptp,pte,t0,va,itlb_fault
1407
1408	dbit_lock	spc,t0,t1
1409	update_ptep	spc,ptp,pte,t0,t1
1410
1411	make_insert_tlb	spc,pte,prot
1412
1413	iitlbt          pte,prot
1414	dbit_unlock1	spc,t0
1415
1416	rfir
1417	nop
1418
1419naitlb_miss_20w:
1420
1421	/*
1422	 * I miss is a little different, since we allow users to fault
1423	 * on the gateway page which is in the kernel address space.
1424	 */
1425
1426	space_adjust	spc,va,t0
1427	get_pgd		spc,ptp
1428	space_check	spc,t0,naitlb_fault
1429
1430	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1431
1432	dbit_lock	spc,t0,t1
1433	update_ptep	spc,ptp,pte,t0,t1
1434
1435	make_insert_tlb	spc,pte,prot
1436
1437	iitlbt          pte,prot
1438	dbit_unlock1	spc,t0
1439
1440	rfir
1441	nop
1442
1443naitlb_check_alias_20w:
1444	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1445
1446	iitlbt		pte,prot
1447
1448	rfir
1449	nop
1450
1451#else
1452
1453itlb_miss_11:
1454	get_pgd		spc,ptp
1455
1456	space_check	spc,t0,itlb_fault
1457
1458	L2_ptep		ptp,pte,t0,va,itlb_fault
1459
1460	dbit_lock	spc,t0,t1
1461	update_ptep	spc,ptp,pte,t0,t1
1462
1463	make_insert_tlb_11	spc,pte,prot
1464
1465	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1466	mtsp		spc,%sr1
1467
1468	iitlba		pte,(%sr1,va)
1469	iitlbp		prot,(%sr1,va)
1470
1471	mtsp		t0, %sr1	/* Restore sr1 */
1472	dbit_unlock1	spc,t0
1473
1474	rfir
1475	nop
1476
1477naitlb_miss_11:
1478	get_pgd		spc,ptp
1479
1480	space_check	spc,t0,naitlb_fault
1481
1482	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1483
1484	dbit_lock	spc,t0,t1
1485	update_ptep	spc,ptp,pte,t0,t1
1486
1487	make_insert_tlb_11	spc,pte,prot
1488
1489	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1490	mtsp		spc,%sr1
1491
1492	iitlba		pte,(%sr1,va)
1493	iitlbp		prot,(%sr1,va)
1494
1495	mtsp		t0, %sr1	/* Restore sr1 */
1496	dbit_unlock1	spc,t0
1497
1498	rfir
1499	nop
1500
1501naitlb_check_alias_11:
1502	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1503
1504	iitlba          pte,(%sr0, va)
1505	iitlbp          prot,(%sr0, va)
1506
1507	rfir
1508	nop
1509
1510
1511itlb_miss_20:
1512	get_pgd		spc,ptp
1513
1514	space_check	spc,t0,itlb_fault
1515
1516	L2_ptep		ptp,pte,t0,va,itlb_fault
1517
1518	dbit_lock	spc,t0,t1
1519	update_ptep	spc,ptp,pte,t0,t1
1520
1521	make_insert_tlb	spc,pte,prot
1522
1523	f_extend	pte,t0
1524
1525	iitlbt          pte,prot
1526	dbit_unlock1	spc,t0
1527
1528	rfir
1529	nop
1530
1531naitlb_miss_20:
1532	get_pgd		spc,ptp
1533
1534	space_check	spc,t0,naitlb_fault
1535
1536	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1537
1538	dbit_lock	spc,t0,t1
1539	update_ptep	spc,ptp,pte,t0,t1
1540
1541	make_insert_tlb	spc,pte,prot
1542
1543	f_extend	pte,t0
1544
1545	iitlbt          pte,prot
1546	dbit_unlock1	spc,t0
1547
1548	rfir
1549	nop
1550
1551naitlb_check_alias_20:
1552	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1553
1554	iitlbt          pte,prot
1555
1556	rfir
1557	nop
1558
1559#endif
1560
1561#ifdef CONFIG_64BIT
1562
1563dbit_trap_20w:
1564	space_adjust	spc,va,t0
1565	get_pgd		spc,ptp
1566	space_check	spc,t0,dbit_fault
1567
1568	L3_ptep		ptp,pte,t0,va,dbit_fault
1569
1570	dbit_lock	spc,t0,t1
1571	update_dirty	spc,ptp,pte,t1
1572
1573	make_insert_tlb	spc,pte,prot
1574
1575	idtlbt          pte,prot
1576	dbit_unlock0	spc,t0
1577
1578	rfir
1579	nop
1580#else
1581
1582dbit_trap_11:
1583
1584	get_pgd		spc,ptp
1585
1586	space_check	spc,t0,dbit_fault
1587
1588	L2_ptep		ptp,pte,t0,va,dbit_fault
1589
1590	dbit_lock	spc,t0,t1
1591	update_dirty	spc,ptp,pte,t1
1592
1593	make_insert_tlb_11	spc,pte,prot
1594
1595	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1596	mtsp		spc,%sr1
1597
1598	idtlba		pte,(%sr1,va)
1599	idtlbp		prot,(%sr1,va)
1600
1601	mtsp            t1, %sr1     /* Restore sr1 */
1602	dbit_unlock0	spc,t0
1603
1604	rfir
1605	nop
1606
1607dbit_trap_20:
1608	get_pgd		spc,ptp
1609
1610	space_check	spc,t0,dbit_fault
1611
1612	L2_ptep		ptp,pte,t0,va,dbit_fault
1613
1614	dbit_lock	spc,t0,t1
1615	update_dirty	spc,ptp,pte,t1
1616
1617	make_insert_tlb	spc,pte,prot
1618
1619	f_extend	pte,t1
1620
1621        idtlbt          pte,prot
1622	dbit_unlock0	spc,t0
1623
1624	rfir
1625	nop
1626#endif
1627
1628	.import handle_interruption,code
1629
1630kernel_bad_space:
1631	b               intr_save
1632	ldi             31,%r8  /* Use an unused code */
1633
1634dbit_fault:
1635	b               intr_save
1636	ldi             20,%r8
1637
1638itlb_fault:
1639	b               intr_save
1640	ldi             6,%r8
1641
1642nadtlb_fault:
1643	b               intr_save
1644	ldi             17,%r8
1645
1646naitlb_fault:
1647	b               intr_save
1648	ldi             16,%r8
1649
1650dtlb_fault:
1651	b               intr_save
1652	ldi             15,%r8
1653
1654	/* Register saving semantics for system calls:
1655
1656	   %r1		   clobbered by system call macro in userspace
1657	   %r2		   saved in PT_REGS by gateway page
1658	   %r3  - %r18	   preserved by C code (saved by signal code)
1659	   %r19 - %r20	   saved in PT_REGS by gateway page
1660	   %r21 - %r22	   non-standard syscall args
1661			   stored in kernel stack by gateway page
1662	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1663	   %r27 - %r30	   saved in PT_REGS by gateway page
1664	   %r31		   syscall return pointer
1665	 */
1666
1667	/* Floating point registers (FIXME: what do we do with these?)
1668
1669	   %fr0  - %fr3	   status/exception, not preserved
1670	   %fr4  - %fr7	   arguments
1671	   %fr8	 - %fr11   not preserved by C code
1672	   %fr12 - %fr21   preserved by C code
1673	   %fr22 - %fr31   not preserved by C code
1674	 */
1675
1676	.macro	reg_save regs
1677	STREG	%r3, PT_GR3(\regs)
1678	STREG	%r4, PT_GR4(\regs)
1679	STREG	%r5, PT_GR5(\regs)
1680	STREG	%r6, PT_GR6(\regs)
1681	STREG	%r7, PT_GR7(\regs)
1682	STREG	%r8, PT_GR8(\regs)
1683	STREG	%r9, PT_GR9(\regs)
1684	STREG   %r10,PT_GR10(\regs)
1685	STREG   %r11,PT_GR11(\regs)
1686	STREG   %r12,PT_GR12(\regs)
1687	STREG   %r13,PT_GR13(\regs)
1688	STREG   %r14,PT_GR14(\regs)
1689	STREG   %r15,PT_GR15(\regs)
1690	STREG   %r16,PT_GR16(\regs)
1691	STREG   %r17,PT_GR17(\regs)
1692	STREG   %r18,PT_GR18(\regs)
1693	.endm
1694
1695	.macro	reg_restore regs
1696	LDREG	PT_GR3(\regs), %r3
1697	LDREG	PT_GR4(\regs), %r4
1698	LDREG	PT_GR5(\regs), %r5
1699	LDREG	PT_GR6(\regs), %r6
1700	LDREG	PT_GR7(\regs), %r7
1701	LDREG	PT_GR8(\regs), %r8
1702	LDREG	PT_GR9(\regs), %r9
1703	LDREG   PT_GR10(\regs),%r10
1704	LDREG   PT_GR11(\regs),%r11
1705	LDREG   PT_GR12(\regs),%r12
1706	LDREG   PT_GR13(\regs),%r13
1707	LDREG   PT_GR14(\regs),%r14
1708	LDREG   PT_GR15(\regs),%r15
1709	LDREG   PT_GR16(\regs),%r16
1710	LDREG   PT_GR17(\regs),%r17
1711	LDREG   PT_GR18(\regs),%r18
1712	.endm
1713
1714	.macro	fork_like name
1715ENTRY(sys_\name\()_wrapper)
1716	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1717	ldo	TASK_REGS(%r1),%r1
1718	reg_save %r1
1719	mfctl	%cr27, %r28
1720	ldil	L%sys_\name, %r31
1721	be	R%sys_\name(%sr4,%r31)
1722	STREG	%r28, PT_CR27(%r1)
1723ENDPROC(sys_\name\()_wrapper)
1724	.endm
1725
1726fork_like clone
1727fork_like fork
1728fork_like vfork
1729
1730	/* Set the return value for the child */
1731ENTRY(child_return)
1732	BL	schedule_tail, %r2
1733	nop
1734finish_child_return:
1735	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1736	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1737
1738	LDREG	PT_CR27(%r1), %r3
1739	mtctl	%r3, %cr27
1740	reg_restore %r1
1741	b	syscall_exit
1742	copy	%r0,%r28
1743ENDPROC(child_return)
1744
1745ENTRY(sys_rt_sigreturn_wrapper)
1746	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1747	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1748	/* Don't save regs, we are going to restore them from sigcontext. */
1749	STREG	%r2, -RP_OFFSET(%r30)
1750#ifdef CONFIG_64BIT
1751	ldo	FRAME_SIZE(%r30), %r30
1752	BL	sys_rt_sigreturn,%r2
1753	ldo	-16(%r30),%r29		/* Reference param save area */
1754#else
1755	BL	sys_rt_sigreturn,%r2
1756	ldo	FRAME_SIZE(%r30), %r30
1757#endif
1758
1759	ldo	-FRAME_SIZE(%r30), %r30
1760	LDREG	-RP_OFFSET(%r30), %r2
1761
1762	/* FIXME: I think we need to restore a few more things here. */
1763	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1764	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1765	reg_restore %r1
1766
1767	/* If the signal was received while the process was blocked on a
1768	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1769	 * take us to syscall_exit_rfi and on to intr_return.
1770	 */
1771	bv	%r0(%r2)
1772	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1773ENDPROC(sys_rt_sigreturn_wrapper)
1774
1775ENTRY(syscall_exit)
1776	/* NOTE: HP-UX syscalls also come through here
1777	 * after hpux_syscall_exit fixes up return
1778	 * values. */
1779
1780	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1781	 * via syscall_exit_rfi if the signal was received while the process
1782	 * was running.
1783	 */
1784
1785	/* save return value now */
1786
1787	mfctl     %cr30, %r1
1788	LDREG     TI_TASK(%r1),%r1
1789	STREG     %r28,TASK_PT_GR28(%r1)
1790
1791#ifdef CONFIG_HPUX
1792/* <linux/personality.h> cannot be easily included */
1793#define PER_HPUX 0x10
1794	ldw	TASK_PERSONALITY(%r1),%r19
1795
1796	/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1797	ldo	  -PER_HPUX(%r19), %r19
1798	cmpib,COND(<>),n 0,%r19,1f
1799
1800	/* Save other hpux returns if personality is PER_HPUX */
1801	STREG     %r22,TASK_PT_GR22(%r1)
1802	STREG     %r29,TASK_PT_GR29(%r1)
18031:
1804
1805#endif /* CONFIG_HPUX */
1806
1807	/* Seems to me that dp could be wrong here, if the syscall involved
1808	 * calling a module, and nothing got round to restoring dp on return.
1809	 */
1810	loadgp
1811
1812syscall_check_resched:
1813
1814	/* check for reschedule */
1815
1816	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
1817	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1818
1819	.import do_signal,code
1820syscall_check_sig:
1821	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1822	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1823	and,COND(<>)	%r19, %r26, %r0
1824	b,n	syscall_restore	/* skip past if we've nothing to do */
1825
1826syscall_do_signal:
1827	/* Save callee-save registers (for sigcontext).
1828	 * FIXME: After this point the process structure should be
1829	 * consistent with all the relevant state of the process
1830	 * before the syscall.  We need to verify this.
1831	 */
1832	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1833	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1834	reg_save %r26
1835
1836#ifdef CONFIG_64BIT
1837	ldo	-16(%r30),%r29			/* Reference param save area */
1838#endif
1839
1840	BL	do_notify_resume,%r2
1841	ldi	1, %r25				/* long in_syscall = 1 */
1842
1843	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1844	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1845	reg_restore %r20
1846
1847	b,n     syscall_check_sig
1848
1849syscall_restore:
1850	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1851
1852	/* Are we being ptraced? */
1853	ldw	TASK_FLAGS(%r1),%r19
1854	ldi	_TIF_SYSCALL_TRACE_MASK,%r2
1855	and,COND(=)	%r19,%r2,%r0
1856	b,n	syscall_restore_rfi
1857
1858	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1859	rest_fp	%r19
1860
1861	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1862	mtsar	%r19
1863
1864	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1865	LDREG	TASK_PT_GR19(%r1),%r19
1866	LDREG   TASK_PT_GR20(%r1),%r20
1867	LDREG	TASK_PT_GR21(%r1),%r21
1868	LDREG	TASK_PT_GR22(%r1),%r22
1869	LDREG	TASK_PT_GR23(%r1),%r23
1870	LDREG	TASK_PT_GR24(%r1),%r24
1871	LDREG	TASK_PT_GR25(%r1),%r25
1872	LDREG	TASK_PT_GR26(%r1),%r26
1873	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1874	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1875	LDREG	TASK_PT_GR29(%r1),%r29
1876	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1877
1878	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1879	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1880	rsm     PSW_SM_I, %r0
1881	copy    %r1,%r30                           /* Restore user sp */
1882	mfsp    %sr3,%r1                           /* Get user space id */
1883	mtsp    %r1,%sr7                           /* Restore sr7 */
1884	ssm     PSW_SM_I, %r0
1885
1886	/* Set sr2 to zero for userspace syscalls to work. */
1887	mtsp	%r0,%sr2
1888	mtsp	%r1,%sr4			   /* Restore sr4 */
1889	mtsp	%r1,%sr5			   /* Restore sr5 */
1890	mtsp	%r1,%sr6			   /* Restore sr6 */
1891
1892	depi	3,31,2,%r31			   /* ensure return to user mode. */
1893
1894#ifdef CONFIG_64BIT
1895	/* decide whether to reset the wide mode bit
1896	 *
1897	 * For a syscall, the W bit is stored in the lowest bit
1898	 * of sp.  Extract it and reset W if it is zero */
1899	extrd,u,*<>	%r30,63,1,%r1
1900	rsm	PSW_SM_W, %r0
1901	/* now reset the lowest bit of sp if it was set */
1902	xor	%r30,%r1,%r30
1903#endif
1904	be,n    0(%sr3,%r31)                       /* return to user space */
1905
1906	/* We have to return via an RFI, so that PSW T and R bits can be set
1907	 * appropriately.
1908	 * This sets up pt_regs so we can return via intr_restore, which is not
1909	 * the most efficient way of doing things, but it works.
1910	 */
1911syscall_restore_rfi:
1912	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1913	mtctl	%r2,%cr0			   /*   for immediate trap */
1914	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1915	ldi	0x0b,%r20			   /* Create new PSW */
1916	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1917
1918	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1919	 * set in thread_info.h and converted to PA bitmap
1920	 * numbers in asm-offsets.c */
1921
1922	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1923	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1924	depi	-1,27,1,%r20			   /* R bit */
1925
1926	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1927	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1928	depi	-1,7,1,%r20			   /* T bit */
1929
1930	STREG	%r20,TASK_PT_PSW(%r1)
1931
1932	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1933
1934	mfsp    %sr3,%r25
1935	STREG   %r25,TASK_PT_SR3(%r1)
1936	STREG   %r25,TASK_PT_SR4(%r1)
1937	STREG   %r25,TASK_PT_SR5(%r1)
1938	STREG   %r25,TASK_PT_SR6(%r1)
1939	STREG   %r25,TASK_PT_SR7(%r1)
1940	STREG   %r25,TASK_PT_IASQ0(%r1)
1941	STREG   %r25,TASK_PT_IASQ1(%r1)
1942
1943	/* XXX W bit??? */
1944	/* Now if old D bit is clear, it means we didn't save all registers
1945	 * on syscall entry, so do that now.  This only happens on TRACEME
1946	 * calls, or if someone attached to us while we were on a syscall.
1947	 * We could make this more efficient by not saving r3-r18, but
1948	 * then we wouldn't be able to use the common intr_restore path.
1949	 * It is only for traced processes anyway, so performance is not
1950	 * an issue.
1951	 */
1952	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1953	ldo	TASK_REGS(%r1),%r25
1954	reg_save %r25				   /* Save r3 to r18 */
1955
1956	/* Save the current sr */
1957	mfsp	%sr0,%r2
1958	STREG	%r2,TASK_PT_SR0(%r1)
1959
1960	/* Save the scratch sr */
1961	mfsp	%sr1,%r2
1962	STREG	%r2,TASK_PT_SR1(%r1)
1963
1964	/* sr2 should be set to zero for userspace syscalls */
1965	STREG	%r0,TASK_PT_SR2(%r1)
1966
1967	LDREG	TASK_PT_GR31(%r1),%r2
1968	depi	3,31,2,%r2		   /* ensure return to user mode. */
1969	STREG   %r2,TASK_PT_IAOQ0(%r1)
1970	ldo	4(%r2),%r2
1971	STREG	%r2,TASK_PT_IAOQ1(%r1)
1972	b	intr_restore
1973	copy	%r25,%r16
1974
1975pt_regs_ok:
1976	LDREG	TASK_PT_IAOQ0(%r1),%r2
1977	depi	3,31,2,%r2		   /* ensure return to user mode. */
1978	STREG	%r2,TASK_PT_IAOQ0(%r1)
1979	LDREG	TASK_PT_IAOQ1(%r1),%r2
1980	depi	3,31,2,%r2
1981	STREG	%r2,TASK_PT_IAOQ1(%r1)
1982	b	intr_restore
1983	copy	%r25,%r16
1984
1985	.import schedule,code
1986syscall_do_resched:
1987	BL	schedule,%r2
1988#ifdef CONFIG_64BIT
1989	ldo	-16(%r30),%r29		/* Reference param save area */
1990#else
1991	nop
1992#endif
1993	b	syscall_check_resched	/* if resched, we start over again */
1994	nop
1995ENDPROC(syscall_exit)
1996
1997
1998#ifdef CONFIG_FUNCTION_TRACER
1999	.import ftrace_function_trampoline,code
2000ENTRY(_mcount)
2001	copy	%r3, %arg2
2002	b	ftrace_function_trampoline
2003	nop
2004ENDPROC(_mcount)
2005
2006ENTRY(return_to_handler)
2007	load32	return_trampoline, %rp
2008	copy	%ret0, %arg0
2009	copy	%ret1, %arg1
2010	b	ftrace_return_to_handler
2011	nop
2012return_trampoline:
2013	copy	%ret0, %rp
2014	copy	%r23, %ret0
2015	copy	%r24, %ret1
2016
2017.globl ftrace_stub
2018ftrace_stub:
2019	bv	%r0(%rp)
2020	nop
2021ENDPROC(return_to_handler)
2022#endif	/* CONFIG_FUNCTION_TRACER */
2023
2024#ifdef CONFIG_IRQSTACKS
2025/* void call_on_stack(unsigned long param1, void *func,
2026		      unsigned long new_stack) */
2027ENTRY(call_on_stack)
2028	copy	%sp, %r1
2029
2030	/* Regarding the HPPA calling conventions for function pointers,
2031	   we assume the PIC register is not changed across call.  For
2032	   CONFIG_64BIT, the argument pointer is left to point at the
2033	   argument region allocated for the call to call_on_stack. */
2034# ifdef CONFIG_64BIT
2035	/* Switch to new stack.  We allocate two 128 byte frames.  */
2036	ldo	256(%arg2), %sp
2037	/* Save previous stack pointer and return pointer in frame marker */
2038	STREG	%rp, -144(%sp)
2039	/* Calls always use function descriptor */
2040	LDREG	16(%arg1), %arg1
2041	bve,l	(%arg1), %rp
2042	STREG	%r1, -136(%sp)
2043	LDREG	-144(%sp), %rp
2044	bve	(%rp)
2045	LDREG	-136(%sp), %sp
2046# else
2047	/* Switch to new stack.  We allocate two 64 byte frames.  */
2048	ldo	128(%arg2), %sp
2049	/* Save previous stack pointer and return pointer in frame marker */
2050	STREG	%r1, -68(%sp)
2051	STREG	%rp, -84(%sp)
2052	/* Calls use function descriptor if PLABEL bit is set */
2053	bb,>=,n	%arg1, 30, 1f
2054	depwi	0,31,2, %arg1
2055	LDREG	0(%arg1), %arg1
20561:
2057	be,l	0(%sr4,%arg1), %sr0, %r31
2058	copy	%r31, %rp
2059	LDREG	-84(%sp), %rp
2060	bv	(%rp)
2061	LDREG	-68(%sp), %sp
2062# endif /* CONFIG_64BIT */
2063ENDPROC(call_on_stack)
2064#endif /* CONFIG_IRQSTACKS */
2065
2066get_register:
2067	/*
2068	 * get_register is used by the non access tlb miss handlers to
2069	 * copy the value of the general register specified in r8 into
2070	 * r1. This routine can't be used for shadowed registers, since
2071	 * the rfir will restore the original value. So, for the shadowed
2072	 * registers we put a -1 into r1 to indicate that the register
2073	 * should not be used (the register being copied could also have
2074	 * a -1 in it, but that is OK, it just means that we will have
2075	 * to use the slow path instead).
2076	 */
2077	blr     %r8,%r0
2078	nop
2079	bv      %r0(%r25)    /* r0 */
2080	copy    %r0,%r1
2081	bv      %r0(%r25)    /* r1 - shadowed */
2082	ldi     -1,%r1
2083	bv      %r0(%r25)    /* r2 */
2084	copy    %r2,%r1
2085	bv      %r0(%r25)    /* r3 */
2086	copy    %r3,%r1
2087	bv      %r0(%r25)    /* r4 */
2088	copy    %r4,%r1
2089	bv      %r0(%r25)    /* r5 */
2090	copy    %r5,%r1
2091	bv      %r0(%r25)    /* r6 */
2092	copy    %r6,%r1
2093	bv      %r0(%r25)    /* r7 */
2094	copy    %r7,%r1
2095	bv      %r0(%r25)    /* r8 - shadowed */
2096	ldi     -1,%r1
2097	bv      %r0(%r25)    /* r9 - shadowed */
2098	ldi     -1,%r1
2099	bv      %r0(%r25)    /* r10 */
2100	copy    %r10,%r1
2101	bv      %r0(%r25)    /* r11 */
2102	copy    %r11,%r1
2103	bv      %r0(%r25)    /* r12 */
2104	copy    %r12,%r1
2105	bv      %r0(%r25)    /* r13 */
2106	copy    %r13,%r1
2107	bv      %r0(%r25)    /* r14 */
2108	copy    %r14,%r1
2109	bv      %r0(%r25)    /* r15 */
2110	copy    %r15,%r1
2111	bv      %r0(%r25)    /* r16 - shadowed */
2112	ldi     -1,%r1
2113	bv      %r0(%r25)    /* r17 - shadowed */
2114	ldi     -1,%r1
2115	bv      %r0(%r25)    /* r18 */
2116	copy    %r18,%r1
2117	bv      %r0(%r25)    /* r19 */
2118	copy    %r19,%r1
2119	bv      %r0(%r25)    /* r20 */
2120	copy    %r20,%r1
2121	bv      %r0(%r25)    /* r21 */
2122	copy    %r21,%r1
2123	bv      %r0(%r25)    /* r22 */
2124	copy    %r22,%r1
2125	bv      %r0(%r25)    /* r23 */
2126	copy    %r23,%r1
2127	bv      %r0(%r25)    /* r24 - shadowed */
2128	ldi     -1,%r1
2129	bv      %r0(%r25)    /* r25 - shadowed */
2130	ldi     -1,%r1
2131	bv      %r0(%r25)    /* r26 */
2132	copy    %r26,%r1
2133	bv      %r0(%r25)    /* r27 */
2134	copy    %r27,%r1
2135	bv      %r0(%r25)    /* r28 */
2136	copy    %r28,%r1
2137	bv      %r0(%r25)    /* r29 */
2138	copy    %r29,%r1
2139	bv      %r0(%r25)    /* r30 */
2140	copy    %r30,%r1
2141	bv      %r0(%r25)    /* r31 */
2142	copy    %r31,%r1
2143
2144
2145set_register:
2146	/*
2147	 * set_register is used by the non access tlb miss handlers to
2148	 * copy the value of r1 into the general register specified in
2149	 * r8.
2150	 */
2151	blr     %r8,%r0
2152	nop
2153	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2154	copy    %r1,%r0
2155	bv      %r0(%r25)    /* r1 */
2156	copy    %r1,%r1
2157	bv      %r0(%r25)    /* r2 */
2158	copy    %r1,%r2
2159	bv      %r0(%r25)    /* r3 */
2160	copy    %r1,%r3
2161	bv      %r0(%r25)    /* r4 */
2162	copy    %r1,%r4
2163	bv      %r0(%r25)    /* r5 */
2164	copy    %r1,%r5
2165	bv      %r0(%r25)    /* r6 */
2166	copy    %r1,%r6
2167	bv      %r0(%r25)    /* r7 */
2168	copy    %r1,%r7
2169	bv      %r0(%r25)    /* r8 */
2170	copy    %r1,%r8
2171	bv      %r0(%r25)    /* r9 */
2172	copy    %r1,%r9
2173	bv      %r0(%r25)    /* r10 */
2174	copy    %r1,%r10
2175	bv      %r0(%r25)    /* r11 */
2176	copy    %r1,%r11
2177	bv      %r0(%r25)    /* r12 */
2178	copy    %r1,%r12
2179	bv      %r0(%r25)    /* r13 */
2180	copy    %r1,%r13
2181	bv      %r0(%r25)    /* r14 */
2182	copy    %r1,%r14
2183	bv      %r0(%r25)    /* r15 */
2184	copy    %r1,%r15
2185	bv      %r0(%r25)    /* r16 */
2186	copy    %r1,%r16
2187	bv      %r0(%r25)    /* r17 */
2188	copy    %r1,%r17
2189	bv      %r0(%r25)    /* r18 */
2190	copy    %r1,%r18
2191	bv      %r0(%r25)    /* r19 */
2192	copy    %r1,%r19
2193	bv      %r0(%r25)    /* r20 */
2194	copy    %r1,%r20
2195	bv      %r0(%r25)    /* r21 */
2196	copy    %r1,%r21
2197	bv      %r0(%r25)    /* r22 */
2198	copy    %r1,%r22
2199	bv      %r0(%r25)    /* r23 */
2200	copy    %r1,%r23
2201	bv      %r0(%r25)    /* r24 */
2202	copy    %r1,%r24
2203	bv      %r0(%r25)    /* r25 */
2204	copy    %r1,%r25
2205	bv      %r0(%r25)    /* r26 */
2206	copy    %r1,%r26
2207	bv      %r0(%r25)    /* r27 */
2208	copy    %r1,%r27
2209	bv      %r0(%r25)    /* r28 */
2210	copy    %r1,%r28
2211	bv      %r0(%r25)    /* r29 */
2212	copy    %r1,%r29
2213	bv      %r0(%r25)    /* r30 */
2214	copy    %r1,%r30
2215	bv      %r0(%r25)    /* r31 */
2216	copy    %r1,%r31
2217
2218