xref: /openbmc/linux/arch/parisc/kernel/entry.S (revision 6fffb01e)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
4 *
5 * kernel entry points (interruptions, system call wrappers)
6 *  Copyright (C) 1999,2000 Philipp Rumpf
7 *  Copyright (C) 1999 SuSE GmbH Nuernberg
8 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
10 */
11
12#include <asm/asm-offsets.h>
13
14/* we have the following possibilities to act on an interruption:
15 *  - handle in assembly and use shadowed registers only
16 *  - save registers to kernel stack and handle in assembly or C */
17
18
19#include <asm/psw.h>
20#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
21#include <asm/assembly.h>	/* for LDREG/STREG defines */
22#include <asm/signal.h>
23#include <asm/unistd.h>
24#include <asm/ldcw.h>
25#include <asm/traps.h>
26#include <asm/thread_info.h>
27#include <asm/alternative.h>
28
29#include <linux/linkage.h>
30#include <linux/pgtable.h>
31
32#ifdef CONFIG_64BIT
33	.level 2.0w
34#else
35	.level 2.0
36#endif
37
38	/* Get aligned page_table_lock address for this mm from cr28/tr4 */
39	.macro  get_ptl reg
40	mfctl	%cr28,\reg
41	.endm
42
43	/* space_to_prot macro creates a prot id from a space id */
44
45#if (SPACEID_SHIFT) == 0
46	.macro  space_to_prot spc prot
47	depd,z  \spc,62,31,\prot
48	.endm
49#else
50	.macro  space_to_prot spc prot
51	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
52	.endm
53#endif
54	/*
55	 * The "get_stack" macros are responsible for determining the
56	 * kernel stack value.
57	 *
58	 *      If sr7 == 0
59	 *          Already using a kernel stack, so call the
60	 *          get_stack_use_r30 macro to push a pt_regs structure
61	 *          on the stack, and store registers there.
62	 *      else
63	 *          Need to set up a kernel stack, so call the
64	 *          get_stack_use_cr30 macro to set up a pointer
65	 *          to the pt_regs structure contained within the
66	 *          task pointer pointed to by cr30. Load the stack
67	 *          pointer from the task structure.
68	 *
69	 * Note that we use shadowed registers for temps until
70	 * we can save %r26 and %r29. %r26 is used to preserve
71	 * %r8 (a shadowed register) which temporarily contained
72	 * either the fault type ("code") or the eirr. We need
73	 * to use a non-shadowed register to carry the value over
74	 * the rfir in virt_map. We use %r26 since this value winds
75	 * up being passed as the argument to either do_cpu_irq_mask
76	 * or handle_interruption. %r29 is used to hold a pointer
77	 * the register save area, and once again, it needs to
78	 * be a non-shadowed register so that it survives the rfir.
79	 */
80
81	.macro  get_stack_use_cr30
82
83	/* we save the registers in the task struct */
84
85	copy	%r30, %r17
86	mfctl   %cr30, %r1
87	tophys  %r1,%r9		/* task_struct */
88	LDREG	TASK_STACK(%r9),%r30
89	ldo	PT_SZ_ALGN(%r30),%r30
90	mtsp	%r0,%sr7	/* clear sr7 after kernel stack was set! */
91	mtsp	%r16,%sr3
92	ldo     TASK_REGS(%r9),%r9
93	STREG   %r17,PT_GR30(%r9)
94	STREG   %r29,PT_GR29(%r9)
95	STREG   %r26,PT_GR26(%r9)
96	STREG	%r16,PT_SR7(%r9)
97	copy    %r9,%r29
98	.endm
99
100	.macro  get_stack_use_r30
101
102	/* we put a struct pt_regs on the stack and save the registers there */
103
104	tophys  %r30,%r9
105	copy	%r30,%r1
106	ldo	PT_SZ_ALGN(%r30),%r30
107	STREG   %r1,PT_GR30(%r9)
108	STREG   %r29,PT_GR29(%r9)
109	STREG   %r26,PT_GR26(%r9)
110	STREG	%r16,PT_SR7(%r9)
111	copy    %r9,%r29
112	.endm
113
114	.macro  rest_stack
115	LDREG   PT_GR1(%r29), %r1
116	LDREG   PT_GR30(%r29),%r30
117	LDREG   PT_GR29(%r29),%r29
118	.endm
119
120	/* default interruption handler
121	 * (calls traps.c:handle_interruption) */
122	.macro	def code
123	b	intr_save
124	ldi     \code, %r8
125	.align	32
126	.endm
127
128	/* Interrupt interruption handler
129	 * (calls irq.c:do_cpu_irq_mask) */
130	.macro	extint code
131	b	intr_extint
132	mfsp    %sr7,%r16
133	.align	32
134	.endm
135
136	.import	os_hpmc, code
137
138	/* HPMC handler */
139	.macro	hpmc code
140	nop			/* must be a NOP, will be patched later */
141	load32	PA(os_hpmc), %r3
142	bv,n	0(%r3)
143	nop
144	.word	0		/* checksum (will be patched) */
145	.word	0		/* address of handler */
146	.word	0		/* length of handler */
147	.endm
148
149	/*
150	 * Performance Note: Instructions will be moved up into
151	 * this part of the code later on, once we are sure
152	 * that the tlb miss handlers are close to final form.
153	 */
154
155	/* Register definitions for tlb miss handler macros */
156
157	va  = r8	/* virtual address for which the trap occurred */
158	spc = r24	/* space for which the trap occurred */
159
160#ifndef CONFIG_64BIT
161
162	/*
163	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
164	 */
165
166	.macro	itlb_11 code
167
168	mfctl	%pcsq, spc
169	b	itlb_miss_11
170	mfctl	%pcoq, va
171
172	.align		32
173	.endm
174#endif
175
176	/*
177	 * itlb miss interruption handler (parisc 2.0)
178	 */
179
180	.macro	itlb_20 code
181	mfctl	%pcsq, spc
182#ifdef CONFIG_64BIT
183	b       itlb_miss_20w
184#else
185	b	itlb_miss_20
186#endif
187	mfctl	%pcoq, va
188
189	.align		32
190	.endm
191
192#ifndef CONFIG_64BIT
193	/*
194	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
195	 */
196
197	.macro	naitlb_11 code
198
199	mfctl	%isr,spc
200	b	naitlb_miss_11
201	mfctl 	%ior,va
202
203	.align		32
204	.endm
205#endif
206
207	/*
208	 * naitlb miss interruption handler (parisc 2.0)
209	 */
210
211	.macro	naitlb_20 code
212
213	mfctl	%isr,spc
214#ifdef CONFIG_64BIT
215	b       naitlb_miss_20w
216#else
217	b	naitlb_miss_20
218#endif
219	mfctl 	%ior,va
220
221	.align		32
222	.endm
223
224#ifndef CONFIG_64BIT
225	/*
226	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
227	 */
228
229	.macro	dtlb_11 code
230
231	mfctl	%isr, spc
232	b	dtlb_miss_11
233	mfctl	%ior, va
234
235	.align		32
236	.endm
237#endif
238
239	/*
240	 * dtlb miss interruption handler (parisc 2.0)
241	 */
242
243	.macro	dtlb_20 code
244
245	mfctl	%isr, spc
246#ifdef CONFIG_64BIT
247	b       dtlb_miss_20w
248#else
249	b	dtlb_miss_20
250#endif
251	mfctl	%ior, va
252
253	.align		32
254	.endm
255
256#ifndef CONFIG_64BIT
257	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
258
259	.macro	nadtlb_11 code
260
261	mfctl	%isr,spc
262	b       nadtlb_miss_11
263	mfctl	%ior,va
264
265	.align		32
266	.endm
267#endif
268
269	/* nadtlb miss interruption handler (parisc 2.0) */
270
271	.macro	nadtlb_20 code
272
273	mfctl	%isr,spc
274#ifdef CONFIG_64BIT
275	b       nadtlb_miss_20w
276#else
277	b       nadtlb_miss_20
278#endif
279	mfctl	%ior,va
280
281	.align		32
282	.endm
283
284#ifndef CONFIG_64BIT
285	/*
286	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
287	 */
288
289	.macro	dbit_11 code
290
291	mfctl	%isr,spc
292	b	dbit_trap_11
293	mfctl	%ior,va
294
295	.align		32
296	.endm
297#endif
298
299	/*
300	 * dirty bit trap interruption handler (parisc 2.0)
301	 */
302
303	.macro	dbit_20 code
304
305	mfctl	%isr,spc
306#ifdef CONFIG_64BIT
307	b       dbit_trap_20w
308#else
309	b	dbit_trap_20
310#endif
311	mfctl	%ior,va
312
313	.align		32
314	.endm
315
316	/* In LP64, the space contains part of the upper 32 bits of the
317	 * fault.  We have to extract this and place it in the va,
318	 * zeroing the corresponding bits in the space register */
319	.macro		space_adjust	spc,va,tmp
320#ifdef CONFIG_64BIT
321	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
322	depd		%r0,63,SPACEID_SHIFT,\spc
323	depd		\tmp,31,SPACEID_SHIFT,\va
324#endif
325	.endm
326
327	.import		swapper_pg_dir,code
328
329	/* Get the pgd.  For faults on space zero (kernel space), this
330	 * is simply swapper_pg_dir.  For user space faults, the
331	 * pgd is stored in %cr25 */
332	.macro		get_pgd		spc,reg
333	ldil		L%PA(swapper_pg_dir),\reg
334	ldo		R%PA(swapper_pg_dir)(\reg),\reg
335	or,COND(=)	%r0,\spc,%r0
336	mfctl		%cr25,\reg
337	.endm
338
339	/*
340		space_check(spc,tmp,fault)
341
342		spc - The space we saw the fault with.
343		tmp - The place to store the current space.
344		fault - Function to call on failure.
345
346		Only allow faults on different spaces from the
347		currently active one if we're the kernel
348
349	*/
350	.macro		space_check	spc,tmp,fault
351	mfsp		%sr7,\tmp
352	/* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
353	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
354					 * as kernel, so defeat the space
355					 * check if it is */
356	copy		\spc,\tmp
357	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
358	cmpb,COND(<>),n	\tmp,\spc,\fault
359	.endm
360
361	/* Look up a PTE in a 2-Level scheme (faulting at each
362	 * level if the entry isn't present
363	 *
364	 * NOTE: we use ldw even for LP64, since the short pointers
365	 * can address up to 1TB
366	 */
367	.macro		L2_ptep	pmd,pte,index,va,fault
368#if CONFIG_PGTABLE_LEVELS == 3
369	extru_safe	\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
370#else
371	extru_safe	\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
372#endif
373	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
374#if CONFIG_PGTABLE_LEVELS < 3
375	copy		%r0,\pte
376#endif
377	ldw,s		\index(\pmd),\pmd
378	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
379	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
380	SHLREG		\pmd,PxD_VALUE_SHIFT,\pmd
381	extru_safe	\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
382	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
383	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
384	.endm
385
386	/* Look up PTE in a 3-Level scheme. */
387	.macro		L3_ptep pgd,pte,index,va,fault
388#if CONFIG_PGTABLE_LEVELS == 3
389	copy		%r0,\pte
390	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
391	ldw,s		\index(\pgd),\pgd
392	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
393	shld		\pgd,PxD_VALUE_SHIFT,\pgd
394#endif
395	L2_ptep		\pgd,\pte,\index,\va,\fault
396	.endm
397
398	/* Acquire page_table_lock and check page is present. */
399	.macro		ptl_lock	spc,ptp,pte,tmp,tmp1,fault
400#ifdef CONFIG_TLB_PTLOCK
40198:	cmpib,COND(=),n	0,\spc,2f
402	get_ptl		\tmp
4031:	LDCW		0(\tmp),\tmp1
404	cmpib,COND(=)	0,\tmp1,1b
405	nop
406	LDREG		0(\ptp),\pte
407	bb,<,n		\pte,_PAGE_PRESENT_BIT,3f
408	b		\fault
409	stw		\spc,0(\tmp)
41099:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
411#endif
4122:	LDREG		0(\ptp),\pte
413	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
4143:
415	.endm
416
417	/* Release page_table_lock without reloading lock address.
418	   Note that the values in the register spc are limited to
419	   NR_SPACE_IDS (262144). Thus, the stw instruction always
420	   stores a nonzero value even when register spc is 64 bits.
421	   We use an ordered store to ensure all prior accesses are
422	   performed prior to releasing the lock. */
423	.macro		ptl_unlock0	spc,tmp
424#ifdef CONFIG_TLB_PTLOCK
42598:	or,COND(=)	%r0,\spc,%r0
426	stw,ma		\spc,0(\tmp)
42799:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
428#endif
429	.endm
430
431	/* Release page_table_lock. */
432	.macro		ptl_unlock1	spc,tmp
433#ifdef CONFIG_TLB_PTLOCK
43498:	get_ptl		\tmp
435	ptl_unlock0	\spc,\tmp
43699:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
437#endif
438	.endm
439
440	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
441	 * don't needlessly dirty the cache line if it was already set */
442	.macro		update_accessed	ptp,pte,tmp,tmp1
443	ldi		_PAGE_ACCESSED,\tmp1
444	or		\tmp1,\pte,\tmp
445	and,COND(<>)	\tmp1,\pte,%r0
446	STREG		\tmp,0(\ptp)
447	.endm
448
449	/* Set the dirty bit (and accessed bit).  No need to be
450	 * clever, this is only used from the dirty fault */
451	.macro		update_dirty	ptp,pte,tmp
452	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
453	or		\tmp,\pte,\pte
454	STREG		\pte,0(\ptp)
455	.endm
456
457	/* We have (depending on the page size):
458	 * - 38 to 52-bit Physical Page Number
459	 * - 12 to 26-bit page offset
460	 */
461	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
462	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
463	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
464	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
465
466	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
467	.macro		convert_for_tlb_insert20 pte,tmp
468#ifdef CONFIG_HUGETLB_PAGE
469	copy		\pte,\tmp
470	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
471				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
472
473	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
474				(63-58)+PAGE_ADD_SHIFT,\pte
475	extrd,u,*=	\tmp,_PAGE_HPAGE_BIT+32,1,%r0
476	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
477				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
478#else /* Huge pages disabled */
479	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
480				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
481	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
482				(63-58)+PAGE_ADD_SHIFT,\pte
483#endif
484	.endm
485
486	/* Convert the pte and prot to tlb insertion values.  How
487	 * this happens is quite subtle, read below */
488	.macro		make_insert_tlb	spc,pte,prot,tmp
489	space_to_prot   \spc \prot        /* create prot id from space */
490	/* The following is the real subtlety.  This is depositing
491	 * T <-> _PAGE_REFTRAP
492	 * D <-> _PAGE_DIRTY
493	 * B <-> _PAGE_DMB (memory break)
494	 *
495	 * Then incredible subtlety: The access rights are
496	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
497	 * See 3-14 of the parisc 2.0 manual
498	 *
499	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
500	 * trigger an access rights trap in user space if the user
501	 * tries to read an unreadable page */
502	depd            \pte,8,7,\prot
503
504	/* PAGE_USER indicates the page can be read with user privileges,
505	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
506	 * contains _PAGE_READ) */
507	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
508	depdi		7,11,3,\prot
509	/* If we're a gateway page, drop PL2 back to zero for promotion
510	 * to kernel privilege (so we can execute the page as kernel).
511	 * Any privilege promotion page always denys read and write */
512	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
513	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
514
515	/* Enforce uncacheable pages.
516	 * This should ONLY be use for MMIO on PA 2.0 machines.
517	 * Memory/DMA is cache coherent on all PA2.0 machines we support
518	 * (that means T-class is NOT supported) and the memory controllers
519	 * on most of those machines only handles cache transactions.
520	 */
521	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
522	depdi		1,12,1,\prot
523
524	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
525	convert_for_tlb_insert20 \pte \tmp
526	.endm
527
528	/* Identical macro to make_insert_tlb above, except it
529	 * makes the tlb entry for the differently formatted pa11
530	 * insertion instructions */
531	.macro		make_insert_tlb_11	spc,pte,prot
532	zdep		\spc,30,15,\prot
533	dep		\pte,8,7,\prot
534	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
535	depi		1,12,1,\prot
536	extru,=         \pte,_PAGE_USER_BIT,1,%r0
537	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
538	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
539	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
540
541	/* Get rid of prot bits and convert to page addr for iitlba */
542
543	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
544	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
545	.endm
546
547	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
548	 * to extend into I/O space if the address is 0xfXXXXXXX
549	 * so we extend the f's into the top word of the pte in
550	 * this case */
551	.macro		f_extend	pte,tmp
552	extrd,s		\pte,42,4,\tmp
553	addi,<>		1,\tmp,%r0
554	extrd,s		\pte,63,25,\pte
555	.endm
556
557	/* The alias region is an 8MB aligned 16MB to do clear and
558	 * copy user pages at addresses congruent with the user
559	 * virtual address.
560	 *
561	 * To use the alias page, you set %r26 up with the to TLB
562	 * entry (identifying the physical page) and %r23 up with
563	 * the from tlb entry (or nothing if only a to entry---for
564	 * clear_user_page_asm) */
565	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
566	cmpib,COND(<>),n 0,\spc,\fault
567	ldil		L%(TMPALIAS_MAP_START),\tmp
568#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
569	/* on LP64, ldi will sign extend into the upper 32 bits,
570	 * which is behaviour we don't want */
571	depdi		0,31,32,\tmp
572#endif
573	copy		\va,\tmp1
574	depi		0,31,23,\tmp1
575	cmpb,COND(<>),n	\tmp,\tmp1,\fault
576	mfctl		%cr19,\tmp	/* iir */
577	/* get the opcode (first six bits) into \tmp */
578	extrw,u		\tmp,5,6,\tmp
579	/*
580	 * Only setting the T bit prevents data cache movein
581	 * Setting access rights to zero prevents instruction cache movein
582	 *
583	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
584	 * to type field and _PAGE_READ goes to top bit of PL1
585	 */
586	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
587	/*
588	 * so if the opcode is one (i.e. this is a memory management
589	 * instruction) nullify the next load so \prot is only T.
590	 * Otherwise this is a normal data operation
591	 */
592	cmpiclr,=	0x01,\tmp,%r0
593	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
594.ifc \patype,20
595	depd,z		\prot,8,7,\prot
596.else
597.ifc \patype,11
598	depw,z		\prot,8,7,\prot
599.else
600	.error "undefined PA type to do_alias"
601.endif
602.endif
603	/*
604	 * OK, it is in the temp alias region, check whether "from" or "to".
605	 * Check "subtle" note in pacache.S re: r23/r26.
606	 */
607#ifdef CONFIG_64BIT
608	extrd,u,*=	\va,41,1,%r0
609#else
610	extrw,u,=	\va,9,1,%r0
611#endif
612	or,COND(tr)	%r23,%r0,\pte
613	or		%r26,%r0,\pte
614	.endm
615
616
617	/*
618	 * Fault_vectors are architecturally required to be aligned on a 2K
619	 * boundary
620	 */
621
622	.section .text.hot
623	.align 2048
624
625ENTRY(fault_vector_20)
626	/* First vector is invalid (0) */
627	.ascii	"cows can fly"
628	.byte 0
629	.align 32
630
631	hpmc		 1
632	def		 2
633	def		 3
634	extint		 4
635	def		 5
636	itlb_20		 PARISC_ITLB_TRAP
637	def		 7
638	def		 8
639	def              9
640	def		10
641	def		11
642	def		12
643	def		13
644	def		14
645	dtlb_20		15
646	naitlb_20	16
647	nadtlb_20	17
648	def		18
649	def		19
650	dbit_20		20
651	def		21
652	def		22
653	def		23
654	def		24
655	def		25
656	def		26
657	def		27
658	def		28
659	def		29
660	def		30
661	def		31
662END(fault_vector_20)
663
664#ifndef CONFIG_64BIT
665
666	.align 2048
667
668ENTRY(fault_vector_11)
669	/* First vector is invalid (0) */
670	.ascii	"cows can fly"
671	.byte 0
672	.align 32
673
674	hpmc		 1
675	def		 2
676	def		 3
677	extint		 4
678	def		 5
679	itlb_11		 PARISC_ITLB_TRAP
680	def		 7
681	def		 8
682	def              9
683	def		10
684	def		11
685	def		12
686	def		13
687	def		14
688	dtlb_11		15
689	naitlb_11	16
690	nadtlb_11	17
691	def		18
692	def		19
693	dbit_11		20
694	def		21
695	def		22
696	def		23
697	def		24
698	def		25
699	def		26
700	def		27
701	def		28
702	def		29
703	def		30
704	def		31
705END(fault_vector_11)
706
707#endif
708	/* Fault vector is separately protected and *must* be on its own page */
709	.align		PAGE_SIZE
710
711	.import		handle_interruption,code
712	.import		do_cpu_irq_mask,code
713
714	/*
715	 * Child Returns here
716	 *
717	 * copy_thread moved args into task save area.
718	 */
719
720ENTRY(ret_from_kernel_thread)
721	/* Call schedule_tail first though */
722	BL	schedule_tail, %r2
723	nop
724
725	mfctl	%cr30,%r1	/* task_struct */
726	LDREG	TASK_PT_GR25(%r1), %r26
727#ifdef CONFIG_64BIT
728	LDREG	TASK_PT_GR27(%r1), %r27
729#endif
730	LDREG	TASK_PT_GR26(%r1), %r1
731	ble	0(%sr7, %r1)
732	copy	%r31, %r2
733	b	finish_child_return
734	nop
735END(ret_from_kernel_thread)
736
737
738	/*
739	 * struct task_struct *_switch_to(struct task_struct *prev,
740	 *	struct task_struct *next)
741	 *
742	 * switch kernel stacks and return prev */
743ENTRY_CFI(_switch_to)
744	STREG	 %r2, -RP_OFFSET(%r30)
745
746	callee_save_float
747	callee_save
748
749	load32	_switch_to_ret, %r2
750
751	STREG	%r2, TASK_PT_KPC(%r26)
752	LDREG	TASK_PT_KPC(%r25), %r2
753
754	STREG	%r30, TASK_PT_KSP(%r26)
755	LDREG	TASK_PT_KSP(%r25), %r30
756	bv	%r0(%r2)
757	mtctl   %r25,%cr30
758
759ENTRY(_switch_to_ret)
760	mtctl	%r0, %cr0		/* Needed for single stepping */
761	callee_rest
762	callee_rest_float
763
764	LDREG	-RP_OFFSET(%r30), %r2
765	bv	%r0(%r2)
766	copy	%r26, %r28
767ENDPROC_CFI(_switch_to)
768
769	/*
770	 * Common rfi return path for interruptions, kernel execve, and
771	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
772	 * return via this path if the signal was received when the process
773	 * was running; if the process was blocked on a syscall then the
774	 * normal syscall_exit path is used.  All syscalls for traced
775	 * proceses exit via intr_restore.
776	 *
777	 * XXX If any syscalls that change a processes space id ever exit
778	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
779	 * adjust IASQ[0..1].
780	 *
781	 */
782
783	.align	PAGE_SIZE
784
785ENTRY_CFI(syscall_exit_rfi)
786	mfctl	%cr30,%r16		/* task_struct */
787	ldo	TASK_REGS(%r16),%r16
788	/* Force iaoq to userspace, as the user has had access to our current
789	 * context via sigcontext. Also Filter the PSW for the same reason.
790	 */
791	LDREG	PT_IAOQ0(%r16),%r19
792	depi	PRIV_USER,31,2,%r19
793	STREG	%r19,PT_IAOQ0(%r16)
794	LDREG	PT_IAOQ1(%r16),%r19
795	depi	PRIV_USER,31,2,%r19
796	STREG	%r19,PT_IAOQ1(%r16)
797	LDREG   PT_PSW(%r16),%r19
798	load32	USER_PSW_MASK,%r1
799#ifdef CONFIG_64BIT
800	load32	USER_PSW_HI_MASK,%r20
801	depd    %r20,31,32,%r1
802#endif
803	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
804	load32	USER_PSW,%r1
805	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
806	STREG   %r19,PT_PSW(%r16)
807
808	/*
809	 * If we aren't being traced, we never saved space registers
810	 * (we don't store them in the sigcontext), so set them
811	 * to "proper" values now (otherwise we'll wind up restoring
812	 * whatever was last stored in the task structure, which might
813	 * be inconsistent if an interrupt occurred while on the gateway
814	 * page). Note that we may be "trashing" values the user put in
815	 * them, but we don't support the user changing them.
816	 */
817
818	STREG   %r0,PT_SR2(%r16)
819	mfsp    %sr3,%r19
820	STREG   %r19,PT_SR0(%r16)
821	STREG   %r19,PT_SR1(%r16)
822	STREG   %r19,PT_SR3(%r16)
823	STREG   %r19,PT_SR4(%r16)
824	STREG   %r19,PT_SR5(%r16)
825	STREG   %r19,PT_SR6(%r16)
826	STREG   %r19,PT_SR7(%r16)
827
828ENTRY(intr_return)
829	/* check for reschedule */
830	mfctl   %cr30,%r1
831	LDREG   TASK_TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
832	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
833
834	.import do_notify_resume,code
835intr_check_sig:
836	/* As above */
837	mfctl   %cr30,%r1
838	LDREG	TASK_TI_FLAGS(%r1),%r19
839	ldi	(_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
840	and,COND(<>)	%r19, %r20, %r0
841	b,n	intr_restore	/* skip past if we've nothing to do */
842
843	/* This check is critical to having LWS
844	 * working. The IASQ is zero on the gateway
845	 * page and we cannot deliver any signals until
846	 * we get off the gateway page.
847	 *
848	 * Only do signals if we are returning to user space
849	 */
850	LDREG	PT_IASQ0(%r16), %r20
851	cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
852	LDREG	PT_IASQ1(%r16), %r20
853	cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
854
855	copy	%r0, %r25			/* long in_syscall = 0 */
856#ifdef CONFIG_64BIT
857	ldo	-16(%r30),%r29			/* Reference param save area */
858#endif
859
860	/* NOTE: We need to enable interrupts if we have to deliver
861	 * signals. We used to do this earlier but it caused kernel
862	 * stack overflows. */
863	ssm	PSW_SM_I, %r0
864
865	BL	do_notify_resume,%r2
866	copy	%r16, %r26			/* struct pt_regs *regs */
867
868	b,n	intr_check_sig
869
870intr_restore:
871	copy            %r16,%r29
872	ldo             PT_FR31(%r29),%r1
873	rest_fp         %r1
874	rest_general    %r29
875
876	/* inverse of virt_map */
877	pcxt_ssm_bug
878	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
879	tophys_r1       %r29
880
881	/* Restore space id's and special cr's from PT_REGS
882	 * structure pointed to by r29
883	 */
884	rest_specials	%r29
885
886	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
887	 * It also restores r1 and r30.
888	 */
889	rest_stack
890
891	rfi
892	nop
893
894#ifndef CONFIG_PREEMPTION
895# define intr_do_preempt	intr_restore
896#endif /* !CONFIG_PREEMPTION */
897
898	.import schedule,code
899intr_do_resched:
900	/* Only call schedule on return to userspace. If we're returning
901	 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
902	 * we jump back to intr_restore.
903	 */
904	LDREG	PT_IASQ0(%r16), %r20
905	cmpib,COND(=)	0, %r20, intr_do_preempt
906	nop
907	LDREG	PT_IASQ1(%r16), %r20
908	cmpib,COND(=)	0, %r20, intr_do_preempt
909	nop
910
911	/* NOTE: We need to enable interrupts if we schedule.  We used
912	 * to do this earlier but it caused kernel stack overflows. */
913	ssm     PSW_SM_I, %r0
914
915#ifdef CONFIG_64BIT
916	ldo	-16(%r30),%r29		/* Reference param save area */
917#endif
918
919	ldil	L%intr_check_sig, %r2
920#ifndef CONFIG_64BIT
921	b	schedule
922#else
923	load32	schedule, %r20
924	bv	%r0(%r20)
925#endif
926	ldo	R%intr_check_sig(%r2), %r2
927
928	/* preempt the current task on returning to kernel
929	 * mode from an interrupt, iff need_resched is set,
930	 * and preempt_count is 0. otherwise, we continue on
931	 * our merry way back to the current running task.
932	 */
933#ifdef CONFIG_PREEMPTION
934	.import preempt_schedule_irq,code
935intr_do_preempt:
936	rsm	PSW_SM_I, %r0		/* disable interrupts */
937
938	/* current_thread_info()->preempt_count */
939	mfctl	%cr30, %r1
940	ldw	TI_PRE_COUNT(%r1), %r19
941	cmpib,<>	0, %r19, intr_restore	/* if preempt_count > 0 */
942	nop				/* prev insn branched backwards */
943
944	/* check if we interrupted a critical path */
945	LDREG	PT_PSW(%r16), %r20
946	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
947	nop
948
949	/* ssm PSW_SM_I done later in intr_restore */
950#ifdef CONFIG_MLONGCALLS
951	ldil	L%intr_restore, %r2
952	load32	preempt_schedule_irq, %r1
953	bv	%r0(%r1)
954	ldo	R%intr_restore(%r2), %r2
955#else
956	ldil	L%intr_restore, %r1
957	BL	preempt_schedule_irq, %r2
958	ldo	R%intr_restore(%r1), %r2
959#endif
960#endif /* CONFIG_PREEMPTION */
961
962	/*
963	 * External interrupts.
964	 */
965
966intr_extint:
967	cmpib,COND(=),n 0,%r16,1f
968
969	get_stack_use_cr30
970	b,n 2f
971
9721:
973	get_stack_use_r30
9742:
975	save_specials	%r29
976	virt_map
977	save_general	%r29
978
979	ldo	PT_FR0(%r29), %r24
980	save_fp	%r24
981
982	loadgp
983
984	copy	%r29, %r26	/* arg0 is pt_regs */
985	copy	%r29, %r16	/* save pt_regs */
986
987	ldil	L%intr_return, %r2
988
989#ifdef CONFIG_64BIT
990	ldo	-16(%r30),%r29	/* Reference param save area */
991#endif
992
993	b	do_cpu_irq_mask
994	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
995ENDPROC_CFI(syscall_exit_rfi)
996
997
998	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
999
1000ENTRY_CFI(intr_save)		/* for os_hpmc */
1001	mfsp    %sr7,%r16
1002	cmpib,COND(=),n 0,%r16,1f
1003	get_stack_use_cr30
1004	b	2f
1005	copy    %r8,%r26
1006
10071:
1008	get_stack_use_r30
1009	copy    %r8,%r26
1010
10112:
1012	save_specials	%r29
1013
1014	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1015	cmpib,COND(=),n        PARISC_ITLB_TRAP,%r26,skip_save_ior
1016
1017
1018	mfctl           %isr, %r16
1019	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1020	mfctl           %ior, %r17
1021
1022
1023#ifdef CONFIG_64BIT
1024	/*
1025	 * If the interrupted code was running with W bit off (32 bit),
1026	 * clear the b bits (bits 0 & 1) in the ior.
1027	 * save_specials left ipsw value in r8 for us to test.
1028	 */
1029	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1030	depdi           0,1,2,%r17
1031
1032	/* adjust isr/ior: get high bits from isr and deposit in ior */
1033	space_adjust	%r16,%r17,%r1
1034#endif
1035	STREG           %r16, PT_ISR(%r29)
1036	STREG           %r17, PT_IOR(%r29)
1037
1038#if 0 && defined(CONFIG_64BIT)
1039	/* Revisit when we have 64-bit code above 4Gb */
1040	b,n		intr_save2
1041
1042skip_save_ior:
1043	/* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1044	 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1045	 * above.
1046	 */
1047	extrd,u,*	%r8,PSW_W_BIT,1,%r1
1048	cmpib,COND(=),n	1,%r1,intr_save2
1049	LDREG		PT_IASQ0(%r29), %r16
1050	LDREG		PT_IAOQ0(%r29), %r17
1051	/* adjust iasq/iaoq */
1052	space_adjust	%r16,%r17,%r1
1053	STREG           %r16, PT_IASQ0(%r29)
1054	STREG           %r17, PT_IAOQ0(%r29)
1055#else
1056skip_save_ior:
1057#endif
1058
1059intr_save2:
1060	virt_map
1061	save_general	%r29
1062
1063	ldo		PT_FR0(%r29), %r25
1064	save_fp		%r25
1065
1066	loadgp
1067
1068	copy		%r29, %r25	/* arg1 is pt_regs */
1069#ifdef CONFIG_64BIT
1070	ldo		-16(%r30),%r29	/* Reference param save area */
1071#endif
1072
1073	ldil		L%intr_check_sig, %r2
1074	copy		%r25, %r16	/* save pt_regs */
1075
1076	b		handle_interruption
1077	ldo		R%intr_check_sig(%r2), %r2
1078ENDPROC_CFI(intr_save)
1079
1080
1081	/*
1082	 * Note for all tlb miss handlers:
1083	 *
1084	 * cr24 contains a pointer to the kernel address space
1085	 * page directory.
1086	 *
1087	 * cr25 contains a pointer to the current user address
1088	 * space page directory.
1089	 *
1090	 * sr3 will contain the space id of the user address space
1091	 * of the current running thread while that thread is
1092	 * running in the kernel.
1093	 */
1094
1095	/*
1096	 * register number allocations.  Note that these are all
1097	 * in the shadowed registers
1098	 */
1099
1100	t0 = r1		/* temporary register 0 */
1101	va = r8		/* virtual address for which the trap occurred */
1102	t1 = r9		/* temporary register 1 */
1103	pte  = r16	/* pte/phys page # */
1104	prot = r17	/* prot bits */
1105	spc  = r24	/* space for which the trap occurred */
1106	ptp = r25	/* page directory/page table pointer */
1107
1108#ifdef CONFIG_64BIT
1109
1110dtlb_miss_20w:
1111	space_adjust	spc,va,t0
1112	get_pgd		spc,ptp
1113	space_check	spc,t0,dtlb_fault
1114
1115	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1116
1117	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1118	update_accessed	ptp,pte,t0,t1
1119
1120	make_insert_tlb	spc,pte,prot,t1
1121
1122	idtlbt          pte,prot
1123
1124	ptl_unlock1	spc,t0
1125	rfir
1126	nop
1127
1128dtlb_check_alias_20w:
1129	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1130
1131	idtlbt          pte,prot
1132
1133	rfir
1134	nop
1135
1136nadtlb_miss_20w:
1137	space_adjust	spc,va,t0
1138	get_pgd		spc,ptp
1139	space_check	spc,t0,nadtlb_fault
1140
1141	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1142
1143	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1144	update_accessed	ptp,pte,t0,t1
1145
1146	make_insert_tlb	spc,pte,prot,t1
1147
1148	idtlbt          pte,prot
1149
1150	ptl_unlock1	spc,t0
1151	rfir
1152	nop
1153
1154nadtlb_check_alias_20w:
1155	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1156
1157	idtlbt          pte,prot
1158
1159	rfir
1160	nop
1161
1162#else
1163
1164dtlb_miss_11:
1165	get_pgd		spc,ptp
1166
1167	space_check	spc,t0,dtlb_fault
1168
1169	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1170
1171	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1172	update_accessed	ptp,pte,t0,t1
1173
1174	make_insert_tlb_11	spc,pte,prot
1175
1176	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1177	mtsp		spc,%sr1
1178
1179	idtlba		pte,(%sr1,va)
1180	idtlbp		prot,(%sr1,va)
1181
1182	mtsp		t1, %sr1	/* Restore sr1 */
1183
1184	ptl_unlock1	spc,t0
1185	rfir
1186	nop
1187
1188dtlb_check_alias_11:
1189	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1190
1191	idtlba          pte,(va)
1192	idtlbp          prot,(va)
1193
1194	rfir
1195	nop
1196
1197nadtlb_miss_11:
1198	get_pgd		spc,ptp
1199
1200	space_check	spc,t0,nadtlb_fault
1201
1202	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1203
1204	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1205	update_accessed	ptp,pte,t0,t1
1206
1207	make_insert_tlb_11	spc,pte,prot
1208
1209	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1210	mtsp		spc,%sr1
1211
1212	idtlba		pte,(%sr1,va)
1213	idtlbp		prot,(%sr1,va)
1214
1215	mtsp		t1, %sr1	/* Restore sr1 */
1216
1217	ptl_unlock1	spc,t0
1218	rfir
1219	nop
1220
1221nadtlb_check_alias_11:
1222	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1223
1224	idtlba          pte,(va)
1225	idtlbp          prot,(va)
1226
1227	rfir
1228	nop
1229
1230dtlb_miss_20:
1231	space_adjust	spc,va,t0
1232	get_pgd		spc,ptp
1233	space_check	spc,t0,dtlb_fault
1234
1235	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1236
1237	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1238	update_accessed	ptp,pte,t0,t1
1239
1240	make_insert_tlb	spc,pte,prot,t1
1241
1242	f_extend	pte,t1
1243
1244	idtlbt          pte,prot
1245
1246	ptl_unlock1	spc,t0
1247	rfir
1248	nop
1249
1250dtlb_check_alias_20:
1251	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1252
1253	idtlbt          pte,prot
1254
1255	rfir
1256	nop
1257
1258nadtlb_miss_20:
1259	get_pgd		spc,ptp
1260
1261	space_check	spc,t0,nadtlb_fault
1262
1263	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1264
1265	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1266	update_accessed	ptp,pte,t0,t1
1267
1268	make_insert_tlb	spc,pte,prot,t1
1269
1270	f_extend	pte,t1
1271
1272	idtlbt		pte,prot
1273
1274	ptl_unlock1	spc,t0
1275	rfir
1276	nop
1277
1278nadtlb_check_alias_20:
1279	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1280
1281	idtlbt          pte,prot
1282
1283	rfir
1284	nop
1285
1286#endif
1287
1288nadtlb_emulate:
1289
1290	/*
1291	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1292	 * probei instructions. We don't want to fault for these
1293	 * instructions (not only does it not make sense, it can cause
1294	 * deadlocks, since some flushes are done with the mmap
1295	 * semaphore held). If the translation doesn't exist, we can't
1296	 * insert a translation, so have to emulate the side effects
1297	 * of the instruction. Since we don't insert a translation
1298	 * we can get a lot of faults during a flush loop, so it makes
1299	 * sense to try to do it here with minimum overhead. We only
1300	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1301	 * and index registers are not shadowed. We defer everything
1302	 * else to the "slow" path.
1303	 */
1304
1305	mfctl           %cr19,%r9 /* Get iir */
1306
1307	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1308	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1309
1310	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1311	ldi             0x280,%r16
1312	and             %r9,%r16,%r17
1313	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1314	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1315	BL		get_register,%r25
1316	extrw,u         %r9,15,5,%r8           /* Get index register # */
1317	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1318	copy            %r1,%r24
1319	BL		get_register,%r25
1320	extrw,u         %r9,10,5,%r8           /* Get base register # */
1321	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1322	BL		set_register,%r25
1323	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1324
1325nadtlb_nullify:
1326	mfctl           %ipsw,%r8
1327	ldil            L%PSW_N,%r9
1328	or              %r8,%r9,%r8            /* Set PSW_N */
1329	mtctl           %r8,%ipsw
1330
1331	rfir
1332	nop
1333
1334	/*
1335		When there is no translation for the probe address then we
1336		must nullify the insn and return zero in the target register.
1337		This will indicate to the calling code that it does not have
1338		write/read privileges to this address.
1339
1340		This should technically work for prober and probew in PA 1.1,
1341		and also probe,r and probe,w in PA 2.0
1342
1343		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1344		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1345
1346	*/
1347nadtlb_probe_check:
1348	ldi             0x80,%r16
1349	and             %r9,%r16,%r17
1350	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1351	BL              get_register,%r25      /* Find the target register */
1352	extrw,u         %r9,31,5,%r8           /* Get target register */
1353	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1354	BL		set_register,%r25
1355	copy            %r0,%r1                /* Write zero to target register */
1356	b nadtlb_nullify                       /* Nullify return insn */
1357	nop
1358
1359
1360#ifdef CONFIG_64BIT
1361itlb_miss_20w:
1362
1363	/*
1364	 * I miss is a little different, since we allow users to fault
1365	 * on the gateway page which is in the kernel address space.
1366	 */
1367
1368	space_adjust	spc,va,t0
1369	get_pgd		spc,ptp
1370	space_check	spc,t0,itlb_fault
1371
1372	L3_ptep		ptp,pte,t0,va,itlb_fault
1373
1374	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1375	update_accessed	ptp,pte,t0,t1
1376
1377	make_insert_tlb	spc,pte,prot,t1
1378
1379	iitlbt          pte,prot
1380
1381	ptl_unlock1	spc,t0
1382	rfir
1383	nop
1384
1385naitlb_miss_20w:
1386
1387	/*
1388	 * I miss is a little different, since we allow users to fault
1389	 * on the gateway page which is in the kernel address space.
1390	 */
1391
1392	space_adjust	spc,va,t0
1393	get_pgd		spc,ptp
1394	space_check	spc,t0,naitlb_fault
1395
1396	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1397
1398	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1399	update_accessed	ptp,pte,t0,t1
1400
1401	make_insert_tlb	spc,pte,prot,t1
1402
1403	iitlbt          pte,prot
1404
1405	ptl_unlock1	spc,t0
1406	rfir
1407	nop
1408
1409naitlb_check_alias_20w:
1410	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1411
1412	iitlbt		pte,prot
1413
1414	rfir
1415	nop
1416
1417#else
1418
1419itlb_miss_11:
1420	get_pgd		spc,ptp
1421
1422	space_check	spc,t0,itlb_fault
1423
1424	L2_ptep		ptp,pte,t0,va,itlb_fault
1425
1426	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1427	update_accessed	ptp,pte,t0,t1
1428
1429	make_insert_tlb_11	spc,pte,prot
1430
1431	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1432	mtsp		spc,%sr1
1433
1434	iitlba		pte,(%sr1,va)
1435	iitlbp		prot,(%sr1,va)
1436
1437	mtsp		t1, %sr1	/* Restore sr1 */
1438
1439	ptl_unlock1	spc,t0
1440	rfir
1441	nop
1442
1443naitlb_miss_11:
1444	get_pgd		spc,ptp
1445
1446	space_check	spc,t0,naitlb_fault
1447
1448	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1449
1450	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1451	update_accessed	ptp,pte,t0,t1
1452
1453	make_insert_tlb_11	spc,pte,prot
1454
1455	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1456	mtsp		spc,%sr1
1457
1458	iitlba		pte,(%sr1,va)
1459	iitlbp		prot,(%sr1,va)
1460
1461	mtsp		t1, %sr1	/* Restore sr1 */
1462
1463	ptl_unlock1	spc,t0
1464	rfir
1465	nop
1466
1467naitlb_check_alias_11:
1468	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1469
1470	iitlba          pte,(%sr0, va)
1471	iitlbp          prot,(%sr0, va)
1472
1473	rfir
1474	nop
1475
1476
1477itlb_miss_20:
1478	get_pgd		spc,ptp
1479
1480	space_check	spc,t0,itlb_fault
1481
1482	L2_ptep		ptp,pte,t0,va,itlb_fault
1483
1484	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1485	update_accessed	ptp,pte,t0,t1
1486
1487	make_insert_tlb	spc,pte,prot,t1
1488
1489	f_extend	pte,t1
1490
1491	iitlbt          pte,prot
1492
1493	ptl_unlock1	spc,t0
1494	rfir
1495	nop
1496
1497naitlb_miss_20:
1498	get_pgd		spc,ptp
1499
1500	space_check	spc,t0,naitlb_fault
1501
1502	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1503
1504	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1505	update_accessed	ptp,pte,t0,t1
1506
1507	make_insert_tlb	spc,pte,prot,t1
1508
1509	f_extend	pte,t1
1510
1511	iitlbt          pte,prot
1512
1513	ptl_unlock1	spc,t0
1514	rfir
1515	nop
1516
1517naitlb_check_alias_20:
1518	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1519
1520	iitlbt          pte,prot
1521
1522	rfir
1523	nop
1524
1525#endif
1526
1527#ifdef CONFIG_64BIT
1528
1529dbit_trap_20w:
1530	space_adjust	spc,va,t0
1531	get_pgd		spc,ptp
1532	space_check	spc,t0,dbit_fault
1533
1534	L3_ptep		ptp,pte,t0,va,dbit_fault
1535
1536	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1537	update_dirty	ptp,pte,t1
1538
1539	make_insert_tlb	spc,pte,prot,t1
1540
1541	idtlbt          pte,prot
1542
1543	ptl_unlock0	spc,t0
1544	rfir
1545	nop
1546#else
1547
1548dbit_trap_11:
1549
1550	get_pgd		spc,ptp
1551
1552	space_check	spc,t0,dbit_fault
1553
1554	L2_ptep		ptp,pte,t0,va,dbit_fault
1555
1556	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1557	update_dirty	ptp,pte,t1
1558
1559	make_insert_tlb_11	spc,pte,prot
1560
1561	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1562	mtsp		spc,%sr1
1563
1564	idtlba		pte,(%sr1,va)
1565	idtlbp		prot,(%sr1,va)
1566
1567	mtsp            t1, %sr1     /* Restore sr1 */
1568
1569	ptl_unlock0	spc,t0
1570	rfir
1571	nop
1572
1573dbit_trap_20:
1574	get_pgd		spc,ptp
1575
1576	space_check	spc,t0,dbit_fault
1577
1578	L2_ptep		ptp,pte,t0,va,dbit_fault
1579
1580	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1581	update_dirty	ptp,pte,t1
1582
1583	make_insert_tlb	spc,pte,prot,t1
1584
1585	f_extend	pte,t1
1586
1587	idtlbt		pte,prot
1588
1589	ptl_unlock0	spc,t0
1590	rfir
1591	nop
1592#endif
1593
1594	.import handle_interruption,code
1595
1596kernel_bad_space:
1597	b               intr_save
1598	ldi             31,%r8  /* Use an unused code */
1599
1600dbit_fault:
1601	b               intr_save
1602	ldi             20,%r8
1603
1604itlb_fault:
1605	b               intr_save
1606	ldi             PARISC_ITLB_TRAP,%r8
1607
1608nadtlb_fault:
1609	b               intr_save
1610	ldi             17,%r8
1611
1612naitlb_fault:
1613	b               intr_save
1614	ldi             16,%r8
1615
1616dtlb_fault:
1617	b               intr_save
1618	ldi             15,%r8
1619
1620	/* Register saving semantics for system calls:
1621
1622	   %r1		   clobbered by system call macro in userspace
1623	   %r2		   saved in PT_REGS by gateway page
1624	   %r3  - %r18	   preserved by C code (saved by signal code)
1625	   %r19 - %r20	   saved in PT_REGS by gateway page
1626	   %r21 - %r22	   non-standard syscall args
1627			   stored in kernel stack by gateway page
1628	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1629	   %r27 - %r30	   saved in PT_REGS by gateway page
1630	   %r31		   syscall return pointer
1631	 */
1632
1633	/* Floating point registers (FIXME: what do we do with these?)
1634
1635	   %fr0  - %fr3	   status/exception, not preserved
1636	   %fr4  - %fr7	   arguments
1637	   %fr8	 - %fr11   not preserved by C code
1638	   %fr12 - %fr21   preserved by C code
1639	   %fr22 - %fr31   not preserved by C code
1640	 */
1641
1642	.macro	reg_save regs
1643	STREG	%r3, PT_GR3(\regs)
1644	STREG	%r4, PT_GR4(\regs)
1645	STREG	%r5, PT_GR5(\regs)
1646	STREG	%r6, PT_GR6(\regs)
1647	STREG	%r7, PT_GR7(\regs)
1648	STREG	%r8, PT_GR8(\regs)
1649	STREG	%r9, PT_GR9(\regs)
1650	STREG   %r10,PT_GR10(\regs)
1651	STREG   %r11,PT_GR11(\regs)
1652	STREG   %r12,PT_GR12(\regs)
1653	STREG   %r13,PT_GR13(\regs)
1654	STREG   %r14,PT_GR14(\regs)
1655	STREG   %r15,PT_GR15(\regs)
1656	STREG   %r16,PT_GR16(\regs)
1657	STREG   %r17,PT_GR17(\regs)
1658	STREG   %r18,PT_GR18(\regs)
1659	.endm
1660
1661	.macro	reg_restore regs
1662	LDREG	PT_GR3(\regs), %r3
1663	LDREG	PT_GR4(\regs), %r4
1664	LDREG	PT_GR5(\regs), %r5
1665	LDREG	PT_GR6(\regs), %r6
1666	LDREG	PT_GR7(\regs), %r7
1667	LDREG	PT_GR8(\regs), %r8
1668	LDREG	PT_GR9(\regs), %r9
1669	LDREG   PT_GR10(\regs),%r10
1670	LDREG   PT_GR11(\regs),%r11
1671	LDREG   PT_GR12(\regs),%r12
1672	LDREG   PT_GR13(\regs),%r13
1673	LDREG   PT_GR14(\regs),%r14
1674	LDREG   PT_GR15(\regs),%r15
1675	LDREG   PT_GR16(\regs),%r16
1676	LDREG   PT_GR17(\regs),%r17
1677	LDREG   PT_GR18(\regs),%r18
1678	.endm
1679
1680	.macro	fork_like name
1681ENTRY_CFI(sys_\name\()_wrapper)
1682	mfctl	%cr30,%r1
1683	ldo	TASK_REGS(%r1),%r1
1684	reg_save %r1
1685	mfctl	%cr27, %r28
1686	ldil	L%sys_\name, %r31
1687	be	R%sys_\name(%sr4,%r31)
1688	STREG	%r28, PT_CR27(%r1)
1689ENDPROC_CFI(sys_\name\()_wrapper)
1690	.endm
1691
1692fork_like clone
1693fork_like clone3
1694fork_like fork
1695fork_like vfork
1696
1697	/* Set the return value for the child */
1698ENTRY(child_return)
1699	BL	schedule_tail, %r2
1700	nop
1701finish_child_return:
1702	mfctl	%cr30,%r1
1703	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1704
1705	LDREG	PT_CR27(%r1), %r3
1706	mtctl	%r3, %cr27
1707	reg_restore %r1
1708	b	syscall_exit
1709	copy	%r0,%r28
1710END(child_return)
1711
1712ENTRY_CFI(sys_rt_sigreturn_wrapper)
1713	mfctl	%cr30,%r26
1714	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1715	/* Don't save regs, we are going to restore them from sigcontext. */
1716	STREG	%r2, -RP_OFFSET(%r30)
1717#ifdef CONFIG_64BIT
1718	ldo	FRAME_SIZE(%r30), %r30
1719	BL	sys_rt_sigreturn,%r2
1720	ldo	-16(%r30),%r29		/* Reference param save area */
1721#else
1722	BL	sys_rt_sigreturn,%r2
1723	ldo	FRAME_SIZE(%r30), %r30
1724#endif
1725
1726	ldo	-FRAME_SIZE(%r30), %r30
1727	LDREG	-RP_OFFSET(%r30), %r2
1728
1729	/* FIXME: I think we need to restore a few more things here. */
1730	mfctl	%cr30,%r1
1731	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1732	reg_restore %r1
1733
1734	/* If the signal was received while the process was blocked on a
1735	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1736	 * take us to syscall_exit_rfi and on to intr_return.
1737	 */
1738	bv	%r0(%r2)
1739	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1740ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1741
1742ENTRY(syscall_exit)
1743	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1744	 * via syscall_exit_rfi if the signal was received while the process
1745	 * was running.
1746	 */
1747
1748	/* save return value now */
1749	mfctl     %cr30, %r1
1750	STREG     %r28,TASK_PT_GR28(%r1)
1751
1752	/* Seems to me that dp could be wrong here, if the syscall involved
1753	 * calling a module, and nothing got round to restoring dp on return.
1754	 */
1755	loadgp
1756
1757syscall_check_resched:
1758
1759	/* check for reschedule */
1760	mfctl	%cr30,%r19
1761	LDREG	TASK_TI_FLAGS(%r19),%r19	/* long */
1762	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1763
1764	.import do_signal,code
1765syscall_check_sig:
1766	mfctl	%cr30,%r19
1767	LDREG	TASK_TI_FLAGS(%r19),%r19
1768	ldi	(_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1769	and,COND(<>)	%r19, %r26, %r0
1770	b,n	syscall_restore	/* skip past if we've nothing to do */
1771
1772syscall_do_signal:
1773	/* Save callee-save registers (for sigcontext).
1774	 * FIXME: After this point the process structure should be
1775	 * consistent with all the relevant state of the process
1776	 * before the syscall.  We need to verify this.
1777	 */
1778	mfctl	%cr30,%r1
1779	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1780	reg_save %r26
1781
1782#ifdef CONFIG_64BIT
1783	ldo	-16(%r30),%r29			/* Reference param save area */
1784#endif
1785
1786	BL	do_notify_resume,%r2
1787	ldi	1, %r25				/* long in_syscall = 1 */
1788
1789	mfctl	%cr30,%r1
1790	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1791	reg_restore %r20
1792
1793	b,n     syscall_check_sig
1794
1795syscall_restore:
1796	mfctl	%cr30,%r1
1797
1798	/* Are we being ptraced? */
1799	LDREG	TASK_TI_FLAGS(%r1),%r19
1800	ldi	_TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
1801	and,COND(=)	%r19,%r2,%r0
1802	b,n	syscall_restore_rfi
1803
1804	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1805	rest_fp	%r19
1806
1807	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1808	mtsar	%r19
1809
1810	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1811	LDREG	TASK_PT_GR19(%r1),%r19
1812	LDREG   TASK_PT_GR20(%r1),%r20
1813	LDREG	TASK_PT_GR21(%r1),%r21
1814	LDREG	TASK_PT_GR22(%r1),%r22
1815	LDREG	TASK_PT_GR23(%r1),%r23
1816	LDREG	TASK_PT_GR24(%r1),%r24
1817	LDREG	TASK_PT_GR25(%r1),%r25
1818	LDREG	TASK_PT_GR26(%r1),%r26
1819	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1820	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1821	LDREG	TASK_PT_GR29(%r1),%r29
1822	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1823
1824	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1825	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1826	rsm     PSW_SM_I, %r0
1827	copy    %r1,%r30                           /* Restore user sp */
1828	mfsp    %sr3,%r1                           /* Get user space id */
1829	mtsp    %r1,%sr7                           /* Restore sr7 */
1830	ssm     PSW_SM_I, %r0
1831
1832	/* Set sr2 to zero for userspace syscalls to work. */
1833	mtsp	%r0,%sr2
1834	mtsp	%r1,%sr4			   /* Restore sr4 */
1835	mtsp	%r1,%sr5			   /* Restore sr5 */
1836	mtsp	%r1,%sr6			   /* Restore sr6 */
1837
1838	depi	PRIV_USER,31,2,%r31	/* ensure return to user mode. */
1839
1840#ifdef CONFIG_64BIT
1841	/* decide whether to reset the wide mode bit
1842	 *
1843	 * For a syscall, the W bit is stored in the lowest bit
1844	 * of sp.  Extract it and reset W if it is zero */
1845	extrd,u,*<>	%r30,63,1,%r1
1846	rsm	PSW_SM_W, %r0
1847	/* now reset the lowest bit of sp if it was set */
1848	xor	%r30,%r1,%r30
1849#endif
1850	be,n    0(%sr3,%r31)                       /* return to user space */
1851
1852	/* We have to return via an RFI, so that PSW T and R bits can be set
1853	 * appropriately.
1854	 * This sets up pt_regs so we can return via intr_restore, which is not
1855	 * the most efficient way of doing things, but it works.
1856	 */
1857syscall_restore_rfi:
1858	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1859	mtctl	%r2,%cr0			   /*   for immediate trap */
1860	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1861	ldi	0x0b,%r20			   /* Create new PSW */
1862	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1863
1864	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1865	 * set in thread_info.h and converted to PA bitmap
1866	 * numbers in asm-offsets.c */
1867
1868	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1869	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1870	depi	-1,27,1,%r20			   /* R bit */
1871
1872	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1873	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1874	depi	-1,7,1,%r20			   /* T bit */
1875
1876	STREG	%r20,TASK_PT_PSW(%r1)
1877
1878	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1879
1880	mfsp    %sr3,%r25
1881	STREG   %r25,TASK_PT_SR3(%r1)
1882	STREG   %r25,TASK_PT_SR4(%r1)
1883	STREG   %r25,TASK_PT_SR5(%r1)
1884	STREG   %r25,TASK_PT_SR6(%r1)
1885	STREG   %r25,TASK_PT_SR7(%r1)
1886	STREG   %r25,TASK_PT_IASQ0(%r1)
1887	STREG   %r25,TASK_PT_IASQ1(%r1)
1888
1889	/* XXX W bit??? */
1890	/* Now if old D bit is clear, it means we didn't save all registers
1891	 * on syscall entry, so do that now.  This only happens on TRACEME
1892	 * calls, or if someone attached to us while we were on a syscall.
1893	 * We could make this more efficient by not saving r3-r18, but
1894	 * then we wouldn't be able to use the common intr_restore path.
1895	 * It is only for traced processes anyway, so performance is not
1896	 * an issue.
1897	 */
1898	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1899	ldo	TASK_REGS(%r1),%r25
1900	reg_save %r25				   /* Save r3 to r18 */
1901
1902	/* Save the current sr */
1903	mfsp	%sr0,%r2
1904	STREG	%r2,TASK_PT_SR0(%r1)
1905
1906	/* Save the scratch sr */
1907	mfsp	%sr1,%r2
1908	STREG	%r2,TASK_PT_SR1(%r1)
1909
1910	/* sr2 should be set to zero for userspace syscalls */
1911	STREG	%r0,TASK_PT_SR2(%r1)
1912
1913	LDREG	TASK_PT_GR31(%r1),%r2
1914	depi	PRIV_USER,31,2,%r2	/* ensure return to user mode. */
1915	STREG   %r2,TASK_PT_IAOQ0(%r1)
1916	ldo	4(%r2),%r2
1917	STREG	%r2,TASK_PT_IAOQ1(%r1)
1918	b	intr_restore
1919	copy	%r25,%r16
1920
1921pt_regs_ok:
1922	LDREG	TASK_PT_IAOQ0(%r1),%r2
1923	depi	PRIV_USER,31,2,%r2	/* ensure return to user mode. */
1924	STREG	%r2,TASK_PT_IAOQ0(%r1)
1925	LDREG	TASK_PT_IAOQ1(%r1),%r2
1926	depi	PRIV_USER,31,2,%r2
1927	STREG	%r2,TASK_PT_IAOQ1(%r1)
1928	b	intr_restore
1929	copy	%r25,%r16
1930
1931syscall_do_resched:
1932	load32	syscall_check_resched,%r2 /* if resched, we start over again */
1933	load32	schedule,%r19
1934	bv	%r0(%r19)		/* jumps to schedule() */
1935#ifdef CONFIG_64BIT
1936	ldo	-16(%r30),%r29		/* Reference param save area */
1937#else
1938	nop
1939#endif
1940END(syscall_exit)
1941
1942
1943#ifdef CONFIG_FUNCTION_TRACER
1944
1945	.import ftrace_function_trampoline,code
1946	.align L1_CACHE_BYTES
1947ENTRY_CFI(mcount, caller)
1948_mcount:
1949	.export _mcount,data
1950	/*
1951	 * The 64bit mcount() function pointer needs 4 dwords, of which the
1952	 * first two are free.  We optimize it here and put 2 instructions for
1953	 * calling mcount(), and 2 instructions for ftrace_stub().  That way we
1954	 * have all on one L1 cacheline.
1955	 */
1956	ldi	0, %arg3
1957	b	ftrace_function_trampoline
1958	copy	%r3, %arg2	/* caller original %sp */
1959ftrace_stub:
1960	.globl ftrace_stub
1961        .type  ftrace_stub, @function
1962#ifdef CONFIG_64BIT
1963	bve	(%rp)
1964#else
1965	bv	%r0(%rp)
1966#endif
1967	nop
1968#ifdef CONFIG_64BIT
1969	.dword mcount
1970	.dword 0 /* code in head.S puts value of global gp here */
1971#endif
1972ENDPROC_CFI(mcount)
1973
1974#ifdef CONFIG_DYNAMIC_FTRACE
1975
1976#ifdef CONFIG_64BIT
1977#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
1978#else
1979#define FTRACE_FRAME_SIZE FRAME_SIZE
1980#endif
1981ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
1982ftrace_caller:
1983	.global ftrace_caller
1984
1985	STREG	%r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
1986	ldo	-FTRACE_FRAME_SIZE(%sp), %r3
1987	STREG	%rp, -RP_OFFSET(%r3)
1988
1989	/* Offset 0 is already allocated for %r1 */
1990	STREG	%r23, 2*REG_SZ(%r3)
1991	STREG	%r24, 3*REG_SZ(%r3)
1992	STREG	%r25, 4*REG_SZ(%r3)
1993	STREG	%r26, 5*REG_SZ(%r3)
1994	STREG	%r28, 6*REG_SZ(%r3)
1995	STREG	%r29, 7*REG_SZ(%r3)
1996#ifdef CONFIG_64BIT
1997	STREG	%r19, 8*REG_SZ(%r3)
1998	STREG	%r20, 9*REG_SZ(%r3)
1999	STREG	%r21, 10*REG_SZ(%r3)
2000	STREG	%r22, 11*REG_SZ(%r3)
2001	STREG	%r27, 12*REG_SZ(%r3)
2002	STREG	%r31, 13*REG_SZ(%r3)
2003	loadgp
2004	ldo	-16(%sp),%r29
2005#endif
2006	LDREG	0(%r3), %r25
2007	copy	%rp, %r26
2008	ldo	-8(%r25), %r25
2009	ldi	0, %r23		/* no pt_regs */
2010	b,l	ftrace_function_trampoline, %rp
2011	copy	%r3, %r24
2012
2013	LDREG	-RP_OFFSET(%r3), %rp
2014	LDREG	2*REG_SZ(%r3), %r23
2015	LDREG	3*REG_SZ(%r3), %r24
2016	LDREG	4*REG_SZ(%r3), %r25
2017	LDREG	5*REG_SZ(%r3), %r26
2018	LDREG	6*REG_SZ(%r3), %r28
2019	LDREG	7*REG_SZ(%r3), %r29
2020#ifdef CONFIG_64BIT
2021	LDREG	8*REG_SZ(%r3), %r19
2022	LDREG	9*REG_SZ(%r3), %r20
2023	LDREG	10*REG_SZ(%r3), %r21
2024	LDREG	11*REG_SZ(%r3), %r22
2025	LDREG	12*REG_SZ(%r3), %r27
2026	LDREG	13*REG_SZ(%r3), %r31
2027#endif
2028	LDREG	1*REG_SZ(%r3), %r3
2029
2030	LDREGM	-FTRACE_FRAME_SIZE(%sp), %r1
2031	/* Adjust return point to jump back to beginning of traced function */
2032	ldo	-4(%r1), %r1
2033	bv,n	(%r1)
2034
2035ENDPROC_CFI(ftrace_caller)
2036
2037#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
2038ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2039	CALLS,SAVE_RP,SAVE_SP)
2040ftrace_regs_caller:
2041	.global ftrace_regs_caller
2042
2043	ldo	-FTRACE_FRAME_SIZE(%sp), %r1
2044	STREG	%rp, -RP_OFFSET(%r1)
2045
2046	copy	%sp, %r1
2047	ldo	PT_SZ_ALGN(%sp), %sp
2048
2049	STREG	%rp, PT_GR2(%r1)
2050	STREG	%r3, PT_GR3(%r1)
2051	STREG	%r4, PT_GR4(%r1)
2052	STREG	%r5, PT_GR5(%r1)
2053	STREG	%r6, PT_GR6(%r1)
2054	STREG	%r7, PT_GR7(%r1)
2055	STREG	%r8, PT_GR8(%r1)
2056	STREG	%r9, PT_GR9(%r1)
2057	STREG   %r10, PT_GR10(%r1)
2058	STREG   %r11, PT_GR11(%r1)
2059	STREG   %r12, PT_GR12(%r1)
2060	STREG   %r13, PT_GR13(%r1)
2061	STREG   %r14, PT_GR14(%r1)
2062	STREG   %r15, PT_GR15(%r1)
2063	STREG   %r16, PT_GR16(%r1)
2064	STREG   %r17, PT_GR17(%r1)
2065	STREG   %r18, PT_GR18(%r1)
2066	STREG	%r19, PT_GR19(%r1)
2067	STREG	%r20, PT_GR20(%r1)
2068	STREG	%r21, PT_GR21(%r1)
2069	STREG	%r22, PT_GR22(%r1)
2070	STREG	%r23, PT_GR23(%r1)
2071	STREG	%r24, PT_GR24(%r1)
2072	STREG	%r25, PT_GR25(%r1)
2073	STREG	%r26, PT_GR26(%r1)
2074	STREG	%r27, PT_GR27(%r1)
2075	STREG	%r28, PT_GR28(%r1)
2076	STREG	%r29, PT_GR29(%r1)
2077	STREG	%r30, PT_GR30(%r1)
2078	STREG	%r31, PT_GR31(%r1)
2079	mfctl	%cr11, %r26
2080	STREG	%r26, PT_SAR(%r1)
2081
2082	copy	%rp, %r26
2083	LDREG	-FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2084	ldo	-8(%r25), %r25
2085	ldo	-FTRACE_FRAME_SIZE(%r1), %arg2
2086	b,l	ftrace_function_trampoline, %rp
2087	copy	%r1, %arg3 /* struct pt_regs */
2088
2089	ldo	-PT_SZ_ALGN(%sp), %r1
2090
2091	LDREG	PT_SAR(%r1), %rp
2092	mtctl	%rp, %cr11
2093
2094	LDREG	PT_GR2(%r1), %rp
2095	LDREG	PT_GR3(%r1), %r3
2096	LDREG	PT_GR4(%r1), %r4
2097	LDREG	PT_GR5(%r1), %r5
2098	LDREG	PT_GR6(%r1), %r6
2099	LDREG	PT_GR7(%r1), %r7
2100	LDREG	PT_GR8(%r1), %r8
2101	LDREG	PT_GR9(%r1), %r9
2102	LDREG   PT_GR10(%r1),%r10
2103	LDREG   PT_GR11(%r1),%r11
2104	LDREG   PT_GR12(%r1),%r12
2105	LDREG   PT_GR13(%r1),%r13
2106	LDREG   PT_GR14(%r1),%r14
2107	LDREG   PT_GR15(%r1),%r15
2108	LDREG   PT_GR16(%r1),%r16
2109	LDREG   PT_GR17(%r1),%r17
2110	LDREG   PT_GR18(%r1),%r18
2111	LDREG   PT_GR19(%r1),%r19
2112	LDREG   PT_GR20(%r1),%r20
2113	LDREG   PT_GR21(%r1),%r21
2114	LDREG   PT_GR22(%r1),%r22
2115	LDREG   PT_GR23(%r1),%r23
2116	LDREG   PT_GR24(%r1),%r24
2117	LDREG   PT_GR25(%r1),%r25
2118	LDREG   PT_GR26(%r1),%r26
2119	LDREG   PT_GR27(%r1),%r27
2120	LDREG   PT_GR28(%r1),%r28
2121	LDREG   PT_GR29(%r1),%r29
2122	LDREG   PT_GR30(%r1),%r30
2123	LDREG   PT_GR31(%r1),%r31
2124
2125	ldo	-PT_SZ_ALGN(%sp), %sp
2126	LDREGM	-FTRACE_FRAME_SIZE(%sp), %r1
2127	/* Adjust return point to jump back to beginning of traced function */
2128	ldo	-4(%r1), %r1
2129	bv,n	(%r1)
2130
2131ENDPROC_CFI(ftrace_regs_caller)
2132
2133#endif
2134#endif
2135
2136#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2137	.align 8
2138ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2139	.export parisc_return_to_handler,data
2140parisc_return_to_handler:
2141	copy %r3,%r1
2142	STREG %r0,-RP_OFFSET(%sp)	/* store 0 as %rp */
2143	copy %sp,%r3
2144	STREGM %r1,FRAME_SIZE(%sp)
2145	STREG %ret0,8(%r3)
2146	STREG %ret1,16(%r3)
2147
2148#ifdef CONFIG_64BIT
2149	loadgp
2150#endif
2151
2152	/* call ftrace_return_to_handler(0) */
2153	.import ftrace_return_to_handler,code
2154	load32 ftrace_return_to_handler,%ret0
2155	load32 .Lftrace_ret,%r2
2156#ifdef CONFIG_64BIT
2157	ldo -16(%sp),%ret1		/* Reference param save area */
2158	bve	(%ret0)
2159#else
2160	bv	%r0(%ret0)
2161#endif
2162	ldi 0,%r26
2163.Lftrace_ret:
2164	copy %ret0,%rp
2165
2166	/* restore original return values */
2167	LDREG 8(%r3),%ret0
2168	LDREG 16(%r3),%ret1
2169
2170	/* return from function */
2171#ifdef CONFIG_64BIT
2172	bve	(%rp)
2173#else
2174	bv	%r0(%rp)
2175#endif
2176	LDREGM -FRAME_SIZE(%sp),%r3
2177ENDPROC_CFI(return_to_handler)
2178
2179#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2180
2181#endif	/* CONFIG_FUNCTION_TRACER */
2182
2183#ifdef CONFIG_IRQSTACKS
2184/* void call_on_stack(unsigned long param1, void *func,
2185		      unsigned long new_stack) */
2186ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2187ENTRY(_call_on_stack)
2188	copy	%sp, %r1
2189
2190	/* Regarding the HPPA calling conventions for function pointers,
2191	   we assume the PIC register is not changed across call.  For
2192	   CONFIG_64BIT, the argument pointer is left to point at the
2193	   argument region allocated for the call to call_on_stack. */
2194
2195	/* Switch to new stack.  We allocate two frames.  */
2196	ldo	2*FRAME_SIZE(%arg2), %sp
2197# ifdef CONFIG_64BIT
2198	/* Save previous stack pointer and return pointer in frame marker */
2199	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2200	/* Calls always use function descriptor */
2201	LDREG	16(%arg1), %arg1
2202	bve,l	(%arg1), %rp
2203	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2204	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2205	bve	(%rp)
2206	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2207# else
2208	/* Save previous stack pointer and return pointer in frame marker */
2209	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2210	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2211	/* Calls use function descriptor if PLABEL bit is set */
2212	bb,>=,n	%arg1, 30, 1f
2213	depwi	0,31,2, %arg1
2214	LDREG	0(%arg1), %arg1
22151:
2216	be,l	0(%sr4,%arg1), %sr0, %r31
2217	copy	%r31, %rp
2218	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2219	bv	(%rp)
2220	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2221# endif /* CONFIG_64BIT */
2222ENDPROC_CFI(call_on_stack)
2223#endif /* CONFIG_IRQSTACKS */
2224
2225ENTRY_CFI(get_register)
2226	/*
2227	 * get_register is used by the non access tlb miss handlers to
2228	 * copy the value of the general register specified in r8 into
2229	 * r1. This routine can't be used for shadowed registers, since
2230	 * the rfir will restore the original value. So, for the shadowed
2231	 * registers we put a -1 into r1 to indicate that the register
2232	 * should not be used (the register being copied could also have
2233	 * a -1 in it, but that is OK, it just means that we will have
2234	 * to use the slow path instead).
2235	 */
2236	blr     %r8,%r0
2237	nop
2238	bv      %r0(%r25)    /* r0 */
2239	copy    %r0,%r1
2240	bv      %r0(%r25)    /* r1 - shadowed */
2241	ldi     -1,%r1
2242	bv      %r0(%r25)    /* r2 */
2243	copy    %r2,%r1
2244	bv      %r0(%r25)    /* r3 */
2245	copy    %r3,%r1
2246	bv      %r0(%r25)    /* r4 */
2247	copy    %r4,%r1
2248	bv      %r0(%r25)    /* r5 */
2249	copy    %r5,%r1
2250	bv      %r0(%r25)    /* r6 */
2251	copy    %r6,%r1
2252	bv      %r0(%r25)    /* r7 */
2253	copy    %r7,%r1
2254	bv      %r0(%r25)    /* r8 - shadowed */
2255	ldi     -1,%r1
2256	bv      %r0(%r25)    /* r9 - shadowed */
2257	ldi     -1,%r1
2258	bv      %r0(%r25)    /* r10 */
2259	copy    %r10,%r1
2260	bv      %r0(%r25)    /* r11 */
2261	copy    %r11,%r1
2262	bv      %r0(%r25)    /* r12 */
2263	copy    %r12,%r1
2264	bv      %r0(%r25)    /* r13 */
2265	copy    %r13,%r1
2266	bv      %r0(%r25)    /* r14 */
2267	copy    %r14,%r1
2268	bv      %r0(%r25)    /* r15 */
2269	copy    %r15,%r1
2270	bv      %r0(%r25)    /* r16 - shadowed */
2271	ldi     -1,%r1
2272	bv      %r0(%r25)    /* r17 - shadowed */
2273	ldi     -1,%r1
2274	bv      %r0(%r25)    /* r18 */
2275	copy    %r18,%r1
2276	bv      %r0(%r25)    /* r19 */
2277	copy    %r19,%r1
2278	bv      %r0(%r25)    /* r20 */
2279	copy    %r20,%r1
2280	bv      %r0(%r25)    /* r21 */
2281	copy    %r21,%r1
2282	bv      %r0(%r25)    /* r22 */
2283	copy    %r22,%r1
2284	bv      %r0(%r25)    /* r23 */
2285	copy    %r23,%r1
2286	bv      %r0(%r25)    /* r24 - shadowed */
2287	ldi     -1,%r1
2288	bv      %r0(%r25)    /* r25 - shadowed */
2289	ldi     -1,%r1
2290	bv      %r0(%r25)    /* r26 */
2291	copy    %r26,%r1
2292	bv      %r0(%r25)    /* r27 */
2293	copy    %r27,%r1
2294	bv      %r0(%r25)    /* r28 */
2295	copy    %r28,%r1
2296	bv      %r0(%r25)    /* r29 */
2297	copy    %r29,%r1
2298	bv      %r0(%r25)    /* r30 */
2299	copy    %r30,%r1
2300	bv      %r0(%r25)    /* r31 */
2301	copy    %r31,%r1
2302ENDPROC_CFI(get_register)
2303
2304
2305ENTRY_CFI(set_register)
2306	/*
2307	 * set_register is used by the non access tlb miss handlers to
2308	 * copy the value of r1 into the general register specified in
2309	 * r8.
2310	 */
2311	blr     %r8,%r0
2312	nop
2313	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2314	copy    %r1,%r0
2315	bv      %r0(%r25)    /* r1 */
2316	copy    %r1,%r1
2317	bv      %r0(%r25)    /* r2 */
2318	copy    %r1,%r2
2319	bv      %r0(%r25)    /* r3 */
2320	copy    %r1,%r3
2321	bv      %r0(%r25)    /* r4 */
2322	copy    %r1,%r4
2323	bv      %r0(%r25)    /* r5 */
2324	copy    %r1,%r5
2325	bv      %r0(%r25)    /* r6 */
2326	copy    %r1,%r6
2327	bv      %r0(%r25)    /* r7 */
2328	copy    %r1,%r7
2329	bv      %r0(%r25)    /* r8 */
2330	copy    %r1,%r8
2331	bv      %r0(%r25)    /* r9 */
2332	copy    %r1,%r9
2333	bv      %r0(%r25)    /* r10 */
2334	copy    %r1,%r10
2335	bv      %r0(%r25)    /* r11 */
2336	copy    %r1,%r11
2337	bv      %r0(%r25)    /* r12 */
2338	copy    %r1,%r12
2339	bv      %r0(%r25)    /* r13 */
2340	copy    %r1,%r13
2341	bv      %r0(%r25)    /* r14 */
2342	copy    %r1,%r14
2343	bv      %r0(%r25)    /* r15 */
2344	copy    %r1,%r15
2345	bv      %r0(%r25)    /* r16 */
2346	copy    %r1,%r16
2347	bv      %r0(%r25)    /* r17 */
2348	copy    %r1,%r17
2349	bv      %r0(%r25)    /* r18 */
2350	copy    %r1,%r18
2351	bv      %r0(%r25)    /* r19 */
2352	copy    %r1,%r19
2353	bv      %r0(%r25)    /* r20 */
2354	copy    %r1,%r20
2355	bv      %r0(%r25)    /* r21 */
2356	copy    %r1,%r21
2357	bv      %r0(%r25)    /* r22 */
2358	copy    %r1,%r22
2359	bv      %r0(%r25)    /* r23 */
2360	copy    %r1,%r23
2361	bv      %r0(%r25)    /* r24 */
2362	copy    %r1,%r24
2363	bv      %r0(%r25)    /* r25 */
2364	copy    %r1,%r25
2365	bv      %r0(%r25)    /* r26 */
2366	copy    %r1,%r26
2367	bv      %r0(%r25)    /* r27 */
2368	copy    %r1,%r27
2369	bv      %r0(%r25)    /* r28 */
2370	copy    %r1,%r28
2371	bv      %r0(%r25)    /* r29 */
2372	copy    %r1,%r29
2373	bv      %r0(%r25)    /* r30 */
2374	copy    %r1,%r30
2375	bv      %r0(%r25)    /* r31 */
2376	copy    %r1,%r31
2377ENDPROC_CFI(set_register)
2378
2379