xref: /openbmc/linux/arch/parisc/kernel/entry.S (revision 2f3f53d6)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
4 *
5 * kernel entry points (interruptions, system call wrappers)
6 *  Copyright (C) 1999,2000 Philipp Rumpf
7 *  Copyright (C) 1999 SuSE GmbH Nuernberg
8 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
10 */
11
12#include <asm/asm-offsets.h>
13
14/* we have the following possibilities to act on an interruption:
15 *  - handle in assembly and use shadowed registers only
16 *  - save registers to kernel stack and handle in assembly or C */
17
18
19#include <asm/psw.h>
20#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
21#include <asm/assembly.h>	/* for LDREG/STREG defines */
22#include <asm/signal.h>
23#include <asm/unistd.h>
24#include <asm/ldcw.h>
25#include <asm/traps.h>
26#include <asm/thread_info.h>
27#include <asm/alternative.h>
28
29#include <linux/linkage.h>
30#include <linux/pgtable.h>
31
32#ifdef CONFIG_64BIT
33	.level 2.0w
34#else
35	.level 2.0
36#endif
37
38	/* Get aligned page_table_lock address for this mm from cr28/tr4 */
39	.macro  get_ptl reg
40	mfctl	%cr28,\reg
41	.endm
42
43	/* space_to_prot macro creates a prot id from a space id */
44
45#if (SPACEID_SHIFT) == 0
46	.macro  space_to_prot spc prot
47	depd,z  \spc,62,31,\prot
48	.endm
49#else
50	.macro  space_to_prot spc prot
51	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
52	.endm
53#endif
54	/*
55	 * The "get_stack" macros are responsible for determining the
56	 * kernel stack value.
57	 *
58	 *      If sr7 == 0
59	 *          Already using a kernel stack, so call the
60	 *          get_stack_use_r30 macro to push a pt_regs structure
61	 *          on the stack, and store registers there.
62	 *      else
63	 *          Need to set up a kernel stack, so call the
64	 *          get_stack_use_cr30 macro to set up a pointer
65	 *          to the pt_regs structure contained within the
66	 *          task pointer pointed to by cr30. Load the stack
67	 *          pointer from the task structure.
68	 *
69	 * Note that we use shadowed registers for temps until
70	 * we can save %r26 and %r29. %r26 is used to preserve
71	 * %r8 (a shadowed register) which temporarily contained
72	 * either the fault type ("code") or the eirr. We need
73	 * to use a non-shadowed register to carry the value over
74	 * the rfir in virt_map. We use %r26 since this value winds
75	 * up being passed as the argument to either do_cpu_irq_mask
76	 * or handle_interruption. %r29 is used to hold a pointer
77	 * the register save area, and once again, it needs to
78	 * be a non-shadowed register so that it survives the rfir.
79	 */
80
81	.macro  get_stack_use_cr30
82
83	/* we save the registers in the task struct */
84
85	copy	%r30, %r17
86	mfctl   %cr30, %r1
87	tophys  %r1,%r9		/* task_struct */
88	LDREG	TASK_STACK(%r9),%r30
89	ldo	PT_SZ_ALGN(%r30),%r30
90	mtsp	%r0,%sr7	/* clear sr7 after kernel stack was set! */
91	mtsp	%r16,%sr3
92	ldo     TASK_REGS(%r9),%r9
93	STREG   %r17,PT_GR30(%r9)
94	STREG   %r29,PT_GR29(%r9)
95	STREG   %r26,PT_GR26(%r9)
96	STREG	%r16,PT_SR7(%r9)
97	copy    %r9,%r29
98	.endm
99
100	.macro  get_stack_use_r30
101
102	/* we put a struct pt_regs on the stack and save the registers there */
103
104	tophys  %r30,%r9
105	copy	%r30,%r1
106	ldo	PT_SZ_ALGN(%r30),%r30
107	STREG   %r1,PT_GR30(%r9)
108	STREG   %r29,PT_GR29(%r9)
109	STREG   %r26,PT_GR26(%r9)
110	STREG	%r16,PT_SR7(%r9)
111	copy    %r9,%r29
112	.endm
113
114	.macro  rest_stack
115	LDREG   PT_GR1(%r29), %r1
116	LDREG   PT_GR30(%r29),%r30
117	LDREG   PT_GR29(%r29),%r29
118	.endm
119
120	/* default interruption handler
121	 * (calls traps.c:handle_interruption) */
122	.macro	def code
123	b	intr_save
124	ldi     \code, %r8
125	.align	32
126	.endm
127
128	/* Interrupt interruption handler
129	 * (calls irq.c:do_cpu_irq_mask) */
130	.macro	extint code
131	b	intr_extint
132	mfsp    %sr7,%r16
133	.align	32
134	.endm
135
136	.import	os_hpmc, code
137
138	/* HPMC handler */
139	.macro	hpmc code
140	nop			/* must be a NOP, will be patched later */
141	load32	PA(os_hpmc), %r3
142	bv,n	0(%r3)
143	nop
144	.word	0		/* checksum (will be patched) */
145	.word	0		/* address of handler */
146	.word	0		/* length of handler */
147	.endm
148
149	/*
150	 * Performance Note: Instructions will be moved up into
151	 * this part of the code later on, once we are sure
152	 * that the tlb miss handlers are close to final form.
153	 */
154
155	/* Register definitions for tlb miss handler macros */
156
157	va  = r8	/* virtual address for which the trap occurred */
158	spc = r24	/* space for which the trap occurred */
159
160#ifndef CONFIG_64BIT
161
162	/*
163	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
164	 */
165
166	.macro	itlb_11 code
167
168	mfctl	%pcsq, spc
169	b	itlb_miss_11
170	mfctl	%pcoq, va
171
172	.align		32
173	.endm
174#endif
175
176	/*
177	 * itlb miss interruption handler (parisc 2.0)
178	 */
179
180	.macro	itlb_20 code
181	mfctl	%pcsq, spc
182#ifdef CONFIG_64BIT
183	b       itlb_miss_20w
184#else
185	b	itlb_miss_20
186#endif
187	mfctl	%pcoq, va
188
189	.align		32
190	.endm
191
192#ifndef CONFIG_64BIT
193	/*
194	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
195	 */
196
197	.macro	naitlb_11 code
198
199	mfctl	%isr,spc
200	b	naitlb_miss_11
201	mfctl 	%ior,va
202
203	.align		32
204	.endm
205#endif
206
207	/*
208	 * naitlb miss interruption handler (parisc 2.0)
209	 */
210
211	.macro	naitlb_20 code
212
213	mfctl	%isr,spc
214#ifdef CONFIG_64BIT
215	b       naitlb_miss_20w
216#else
217	b	naitlb_miss_20
218#endif
219	mfctl 	%ior,va
220
221	.align		32
222	.endm
223
224#ifndef CONFIG_64BIT
225	/*
226	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
227	 */
228
229	.macro	dtlb_11 code
230
231	mfctl	%isr, spc
232	b	dtlb_miss_11
233	mfctl	%ior, va
234
235	.align		32
236	.endm
237#endif
238
239	/*
240	 * dtlb miss interruption handler (parisc 2.0)
241	 */
242
243	.macro	dtlb_20 code
244
245	mfctl	%isr, spc
246#ifdef CONFIG_64BIT
247	b       dtlb_miss_20w
248#else
249	b	dtlb_miss_20
250#endif
251	mfctl	%ior, va
252
253	.align		32
254	.endm
255
256#ifndef CONFIG_64BIT
257	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
258
259	.macro	nadtlb_11 code
260
261	mfctl	%isr,spc
262	b       nadtlb_miss_11
263	mfctl	%ior,va
264
265	.align		32
266	.endm
267#endif
268
269	/* nadtlb miss interruption handler (parisc 2.0) */
270
271	.macro	nadtlb_20 code
272
273	mfctl	%isr,spc
274#ifdef CONFIG_64BIT
275	b       nadtlb_miss_20w
276#else
277	b       nadtlb_miss_20
278#endif
279	mfctl	%ior,va
280
281	.align		32
282	.endm
283
284#ifndef CONFIG_64BIT
285	/*
286	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
287	 */
288
289	.macro	dbit_11 code
290
291	mfctl	%isr,spc
292	b	dbit_trap_11
293	mfctl	%ior,va
294
295	.align		32
296	.endm
297#endif
298
299	/*
300	 * dirty bit trap interruption handler (parisc 2.0)
301	 */
302
303	.macro	dbit_20 code
304
305	mfctl	%isr,spc
306#ifdef CONFIG_64BIT
307	b       dbit_trap_20w
308#else
309	b	dbit_trap_20
310#endif
311	mfctl	%ior,va
312
313	.align		32
314	.endm
315
316	/* In LP64, the space contains part of the upper 32 bits of the
317	 * fault.  We have to extract this and place it in the va,
318	 * zeroing the corresponding bits in the space register */
319	.macro		space_adjust	spc,va,tmp
320#ifdef CONFIG_64BIT
321	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
322	depd		%r0,63,SPACEID_SHIFT,\spc
323	depd		\tmp,31,SPACEID_SHIFT,\va
324#endif
325	.endm
326
327	.import		swapper_pg_dir,code
328
329	/* Get the pgd.  For faults on space zero (kernel space), this
330	 * is simply swapper_pg_dir.  For user space faults, the
331	 * pgd is stored in %cr25 */
332	.macro		get_pgd		spc,reg
333	ldil		L%PA(swapper_pg_dir),\reg
334	ldo		R%PA(swapper_pg_dir)(\reg),\reg
335	or,COND(=)	%r0,\spc,%r0
336	mfctl		%cr25,\reg
337	.endm
338
339	/*
340		space_check(spc,tmp,fault)
341
342		spc - The space we saw the fault with.
343		tmp - The place to store the current space.
344		fault - Function to call on failure.
345
346		Only allow faults on different spaces from the
347		currently active one if we're the kernel
348
349	*/
350	.macro		space_check	spc,tmp,fault
351	mfsp		%sr7,\tmp
352	/* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
353	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
354					 * as kernel, so defeat the space
355					 * check if it is */
356	copy		\spc,\tmp
357	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
358	cmpb,COND(<>),n	\tmp,\spc,\fault
359	.endm
360
361	/* Look up a PTE in a 2-Level scheme (faulting at each
362	 * level if the entry isn't present
363	 *
364	 * NOTE: we use ldw even for LP64, since the short pointers
365	 * can address up to 1TB
366	 */
367	.macro		L2_ptep	pmd,pte,index,va,fault
368#if CONFIG_PGTABLE_LEVELS == 3
369	extru_safe	\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
370#else
371	extru_safe	\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
372#endif
373	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
374#if CONFIG_PGTABLE_LEVELS < 3
375	copy		%r0,\pte
376#endif
377	ldw,s		\index(\pmd),\pmd
378	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
379	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
380	SHLREG		\pmd,PxD_VALUE_SHIFT,\pmd
381	extru_safe	\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
382	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
383	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
384	.endm
385
386	/* Look up PTE in a 3-Level scheme. */
387	.macro		L3_ptep pgd,pte,index,va,fault
388#if CONFIG_PGTABLE_LEVELS == 3
389	copy		%r0,\pte
390	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
391	ldw,s		\index(\pgd),\pgd
392	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
393	shld		\pgd,PxD_VALUE_SHIFT,\pgd
394#endif
395	L2_ptep		\pgd,\pte,\index,\va,\fault
396	.endm
397
398	/* Acquire page_table_lock and check page is present. */
399	.macro		ptl_lock	spc,ptp,pte,tmp,tmp1,fault
400#ifdef CONFIG_TLB_PTLOCK
40198:	cmpib,COND(=),n	0,\spc,2f
402	get_ptl		\tmp
4031:	LDCW		0(\tmp),\tmp1
404	cmpib,COND(=)	0,\tmp1,1b
405	nop
406	LDREG		0(\ptp),\pte
407	bb,<,n		\pte,_PAGE_PRESENT_BIT,3f
408	b		\fault
409	stw		\spc,0(\tmp)
41099:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
411#endif
4122:	LDREG		0(\ptp),\pte
413	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
4143:
415	.endm
416
417	/* Release page_table_lock without reloading lock address.
418	   Note that the values in the register spc are limited to
419	   NR_SPACE_IDS (262144). Thus, the stw instruction always
420	   stores a nonzero value even when register spc is 64 bits.
421	   We use an ordered store to ensure all prior accesses are
422	   performed prior to releasing the lock. */
423	.macro		ptl_unlock0	spc,tmp
424#ifdef CONFIG_TLB_PTLOCK
42598:	or,COND(=)	%r0,\spc,%r0
426	stw,ma		\spc,0(\tmp)
42799:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
428#endif
429	.endm
430
431	/* Release page_table_lock. */
432	.macro		ptl_unlock1	spc,tmp
433#ifdef CONFIG_TLB_PTLOCK
43498:	get_ptl		\tmp
435	ptl_unlock0	\spc,\tmp
43699:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
437#endif
438	.endm
439
440	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
441	 * don't needlessly dirty the cache line if it was already set */
442	.macro		update_accessed	ptp,pte,tmp,tmp1
443	ldi		_PAGE_ACCESSED,\tmp1
444	or		\tmp1,\pte,\tmp
445	and,COND(<>)	\tmp1,\pte,%r0
446	STREG		\tmp,0(\ptp)
447	.endm
448
449	/* Set the dirty bit (and accessed bit).  No need to be
450	 * clever, this is only used from the dirty fault */
451	.macro		update_dirty	ptp,pte,tmp
452	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
453	or		\tmp,\pte,\pte
454	STREG		\pte,0(\ptp)
455	.endm
456
457	/* We have (depending on the page size):
458	 * - 38 to 52-bit Physical Page Number
459	 * - 12 to 26-bit page offset
460	 */
461	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
462	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
463	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
464	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
465
466	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
467	.macro		convert_for_tlb_insert20 pte,tmp
468#ifdef CONFIG_HUGETLB_PAGE
469	copy		\pte,\tmp
470	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
471				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
472
473	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
474				(63-58)+PAGE_ADD_SHIFT,\pte
475	extrd,u,*=	\tmp,_PAGE_HPAGE_BIT+32,1,%r0
476	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
477				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
478#else /* Huge pages disabled */
479	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
480				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
481	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
482				(63-58)+PAGE_ADD_SHIFT,\pte
483#endif
484	.endm
485
486	/* Convert the pte and prot to tlb insertion values.  How
487	 * this happens is quite subtle, read below */
488	.macro		make_insert_tlb	spc,pte,prot,tmp
489	space_to_prot   \spc \prot        /* create prot id from space */
490	/* The following is the real subtlety.  This is depositing
491	 * T <-> _PAGE_REFTRAP
492	 * D <-> _PAGE_DIRTY
493	 * B <-> _PAGE_DMB (memory break)
494	 *
495	 * Then incredible subtlety: The access rights are
496	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
497	 * See 3-14 of the parisc 2.0 manual
498	 *
499	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
500	 * trigger an access rights trap in user space if the user
501	 * tries to read an unreadable page */
502#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
503	/* need to drop DMB bit, as it's used as SPECIAL flag */
504	depi		0,_PAGE_SPECIAL_BIT,1,\pte
505#endif
506	depd            \pte,8,7,\prot
507
508	/* PAGE_USER indicates the page can be read with user privileges,
509	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
510	 * contains _PAGE_READ) */
511	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
512	depdi		7,11,3,\prot
513	/* If we're a gateway page, drop PL2 back to zero for promotion
514	 * to kernel privilege (so we can execute the page as kernel).
515	 * Any privilege promotion page always denys read and write */
516	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
517	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
518
519	/* Enforce uncacheable pages.
520	 * This should ONLY be use for MMIO on PA 2.0 machines.
521	 * Memory/DMA is cache coherent on all PA2.0 machines we support
522	 * (that means T-class is NOT supported) and the memory controllers
523	 * on most of those machines only handles cache transactions.
524	 */
525	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
526	depdi		1,12,1,\prot
527
528	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
529	convert_for_tlb_insert20 \pte \tmp
530	.endm
531
532	/* Identical macro to make_insert_tlb above, except it
533	 * makes the tlb entry for the differently formatted pa11
534	 * insertion instructions */
535	.macro		make_insert_tlb_11	spc,pte,prot
536#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
537	/* need to drop DMB bit, as it's used as SPECIAL flag */
538	depi		0,_PAGE_SPECIAL_BIT,1,\pte
539#endif
540	zdep		\spc,30,15,\prot
541	dep		\pte,8,7,\prot
542	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
543	depi		1,12,1,\prot
544	extru,=         \pte,_PAGE_USER_BIT,1,%r0
545	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
546	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
547	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
548
549	/* Get rid of prot bits and convert to page addr for iitlba */
550
551	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
552	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
553	.endm
554
555	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
556	 * to extend into I/O space if the address is 0xfXXXXXXX
557	 * so we extend the f's into the top word of the pte in
558	 * this case */
559	.macro		f_extend	pte,tmp
560	extrd,s		\pte,42,4,\tmp
561	addi,<>		1,\tmp,%r0
562	extrd,s		\pte,63,25,\pte
563	.endm
564
565	/* The alias region is comprised of a pair of 4 MB regions
566	 * aligned to 8 MB. It is used to clear/copy/flush user pages
567	 * using kernel virtual addresses congruent with the user
568	 * virtual address.
569	 *
570	 * To use the alias page, you set %r26 up with the to TLB
571	 * entry (identifying the physical page) and %r23 up with
572	 * the from tlb entry (or nothing if only a to entry---for
573	 * clear_user_page_asm) */
574	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
575	cmpib,COND(<>),n 0,\spc,\fault
576	ldil		L%(TMPALIAS_MAP_START),\tmp
577	copy		\va,\tmp1
578	depi_safe	0,31,TMPALIAS_SIZE_BITS+1,\tmp1
579	cmpb,COND(<>),n	\tmp,\tmp1,\fault
580	mfctl		%cr19,\tmp	/* iir */
581	/* get the opcode (first six bits) into \tmp */
582	extrw,u		\tmp,5,6,\tmp
583	/*
584	 * Only setting the T bit prevents data cache movein
585	 * Setting access rights to zero prevents instruction cache movein
586	 *
587	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
588	 * to type field and _PAGE_READ goes to top bit of PL1
589	 */
590	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
591	/*
592	 * so if the opcode is one (i.e. this is a memory management
593	 * instruction) nullify the next load so \prot is only T.
594	 * Otherwise this is a normal data operation
595	 */
596	cmpiclr,=	0x01,\tmp,%r0
597	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
598.ifc \patype,20
599	depd,z		\prot,8,7,\prot
600.else
601.ifc \patype,11
602	depw,z		\prot,8,7,\prot
603.else
604	.error "undefined PA type to do_alias"
605.endif
606.endif
607	/*
608	 * OK, it is in the temp alias region, check whether "from" or "to".
609	 * Check "subtle" note in pacache.S re: r23/r26.
610	 */
611	extrw,u,=	\va,31-TMPALIAS_SIZE_BITS,1,%r0
612	or,COND(tr)	%r23,%r0,\pte
613	or		%r26,%r0,\pte
614
615	/* convert phys addr in \pte (from r23 or r26) to tlb insert format */
616	SHRREG		\pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte
617	depi_safe	_PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte
618	.endm
619
620
621	/*
622	 * Fault_vectors are architecturally required to be aligned on a 2K
623	 * boundary
624	 */
625
626	.section .text.hot
627	.align 2048
628
629ENTRY(fault_vector_20)
630	/* First vector is invalid (0) */
631	.ascii	"cows can fly"
632	.byte 0
633	.align 32
634
635	hpmc		 1
636	def		 2
637	def		 3
638	extint		 4
639	def		 5
640	itlb_20		 PARISC_ITLB_TRAP
641	def		 7
642	def		 8
643	def              9
644	def		10
645	def		11
646	def		12
647	def		13
648	def		14
649	dtlb_20		15
650	naitlb_20	16
651	nadtlb_20	17
652	def		18
653	def		19
654	dbit_20		20
655	def		21
656	def		22
657	def		23
658	def		24
659	def		25
660	def		26
661	def		27
662	def		28
663	def		29
664	def		30
665	def		31
666END(fault_vector_20)
667
668#ifndef CONFIG_64BIT
669
670	.align 2048
671
672ENTRY(fault_vector_11)
673	/* First vector is invalid (0) */
674	.ascii	"cows can fly"
675	.byte 0
676	.align 32
677
678	hpmc		 1
679	def		 2
680	def		 3
681	extint		 4
682	def		 5
683	itlb_11		 PARISC_ITLB_TRAP
684	def		 7
685	def		 8
686	def              9
687	def		10
688	def		11
689	def		12
690	def		13
691	def		14
692	dtlb_11		15
693	naitlb_11	16
694	nadtlb_11	17
695	def		18
696	def		19
697	dbit_11		20
698	def		21
699	def		22
700	def		23
701	def		24
702	def		25
703	def		26
704	def		27
705	def		28
706	def		29
707	def		30
708	def		31
709END(fault_vector_11)
710
711#endif
712	/* Fault vector is separately protected and *must* be on its own page */
713	.align		PAGE_SIZE
714
715	.import		handle_interruption,code
716	.import		do_cpu_irq_mask,code
717
718	/*
719	 * Child Returns here
720	 *
721	 * copy_thread moved args into task save area.
722	 */
723
724ENTRY(ret_from_kernel_thread)
725	/* Call schedule_tail first though */
726	BL	schedule_tail, %r2
727	nop
728
729	mfctl	%cr30,%r1	/* task_struct */
730	LDREG	TASK_PT_GR25(%r1), %r26
731#ifdef CONFIG_64BIT
732	LDREG	TASK_PT_GR27(%r1), %r27
733#endif
734	LDREG	TASK_PT_GR26(%r1), %r1
735	ble	0(%sr7, %r1)
736	copy	%r31, %r2
737	b	finish_child_return
738	nop
739END(ret_from_kernel_thread)
740
741
742	/*
743	 * struct task_struct *_switch_to(struct task_struct *prev,
744	 *	struct task_struct *next)
745	 *
746	 * switch kernel stacks and return prev */
747ENTRY_CFI(_switch_to)
748	STREG	 %r2, -RP_OFFSET(%r30)
749
750	callee_save_float
751	callee_save
752
753	load32	_switch_to_ret, %r2
754
755	STREG	%r2, TASK_PT_KPC(%r26)
756	LDREG	TASK_PT_KPC(%r25), %r2
757
758	STREG	%r30, TASK_PT_KSP(%r26)
759	LDREG	TASK_PT_KSP(%r25), %r30
760	bv	%r0(%r2)
761	mtctl   %r25,%cr30
762
763ENTRY(_switch_to_ret)
764	mtctl	%r0, %cr0		/* Needed for single stepping */
765	callee_rest
766	callee_rest_float
767
768	LDREG	-RP_OFFSET(%r30), %r2
769	bv	%r0(%r2)
770	copy	%r26, %r28
771ENDPROC_CFI(_switch_to)
772
773	/*
774	 * Common rfi return path for interruptions, kernel execve, and
775	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
776	 * return via this path if the signal was received when the process
777	 * was running; if the process was blocked on a syscall then the
778	 * normal syscall_exit path is used.  All syscalls for traced
779	 * proceses exit via intr_restore.
780	 *
781	 * XXX If any syscalls that change a processes space id ever exit
782	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
783	 * adjust IASQ[0..1].
784	 *
785	 */
786
787	.align	PAGE_SIZE
788
789ENTRY_CFI(syscall_exit_rfi)
790	mfctl	%cr30,%r16		/* task_struct */
791	ldo	TASK_REGS(%r16),%r16
792	/* Force iaoq to userspace, as the user has had access to our current
793	 * context via sigcontext. Also Filter the PSW for the same reason.
794	 */
795	LDREG	PT_IAOQ0(%r16),%r19
796	depi	PRIV_USER,31,2,%r19
797	STREG	%r19,PT_IAOQ0(%r16)
798	LDREG	PT_IAOQ1(%r16),%r19
799	depi	PRIV_USER,31,2,%r19
800	STREG	%r19,PT_IAOQ1(%r16)
801	LDREG   PT_PSW(%r16),%r19
802	load32	USER_PSW_MASK,%r1
803#ifdef CONFIG_64BIT
804	load32	USER_PSW_HI_MASK,%r20
805	depd    %r20,31,32,%r1
806#endif
807	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
808	load32	USER_PSW,%r1
809	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
810	STREG   %r19,PT_PSW(%r16)
811
812	/*
813	 * If we aren't being traced, we never saved space registers
814	 * (we don't store them in the sigcontext), so set them
815	 * to "proper" values now (otherwise we'll wind up restoring
816	 * whatever was last stored in the task structure, which might
817	 * be inconsistent if an interrupt occurred while on the gateway
818	 * page). Note that we may be "trashing" values the user put in
819	 * them, but we don't support the user changing them.
820	 */
821
822	STREG   %r0,PT_SR2(%r16)
823	mfsp    %sr3,%r19
824	STREG   %r19,PT_SR0(%r16)
825	STREG   %r19,PT_SR1(%r16)
826	STREG   %r19,PT_SR3(%r16)
827	STREG   %r19,PT_SR4(%r16)
828	STREG   %r19,PT_SR5(%r16)
829	STREG   %r19,PT_SR6(%r16)
830	STREG   %r19,PT_SR7(%r16)
831
832ENTRY(intr_return)
833	/* check for reschedule */
834	mfctl   %cr30,%r1
835	LDREG   TASK_TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
836	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
837
838	.import do_notify_resume,code
839intr_check_sig:
840	/* As above */
841	mfctl   %cr30,%r1
842	LDREG	TASK_TI_FLAGS(%r1),%r19
843	ldi	(_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
844	and,COND(<>)	%r19, %r20, %r0
845	b,n	intr_restore	/* skip past if we've nothing to do */
846
847	/* This check is critical to having LWS
848	 * working. The IASQ is zero on the gateway
849	 * page and we cannot deliver any signals until
850	 * we get off the gateway page.
851	 *
852	 * Only do signals if we are returning to user space
853	 */
854	LDREG	PT_IASQ0(%r16), %r20
855	cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
856	LDREG	PT_IASQ1(%r16), %r20
857	cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
858
859	copy	%r0, %r25			/* long in_syscall = 0 */
860#ifdef CONFIG_64BIT
861	ldo	-16(%r30),%r29			/* Reference param save area */
862#endif
863
864	/* NOTE: We need to enable interrupts if we have to deliver
865	 * signals. We used to do this earlier but it caused kernel
866	 * stack overflows. */
867	ssm	PSW_SM_I, %r0
868
869	BL	do_notify_resume,%r2
870	copy	%r16, %r26			/* struct pt_regs *regs */
871
872	b,n	intr_check_sig
873
874intr_restore:
875	copy            %r16,%r29
876	ldo             PT_FR31(%r29),%r1
877	rest_fp         %r1
878	rest_general    %r29
879
880	/* inverse of virt_map */
881	pcxt_ssm_bug
882	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
883	tophys_r1       %r29
884
885	/* Restore space id's and special cr's from PT_REGS
886	 * structure pointed to by r29
887	 */
888	rest_specials	%r29
889
890	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
891	 * It also restores r1 and r30.
892	 */
893	rest_stack
894
895	rfi
896	nop
897
898#ifndef CONFIG_PREEMPTION
899# define intr_do_preempt	intr_restore
900#endif /* !CONFIG_PREEMPTION */
901
902	.import schedule,code
903intr_do_resched:
904	/* Only call schedule on return to userspace. If we're returning
905	 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
906	 * we jump back to intr_restore.
907	 */
908	LDREG	PT_IASQ0(%r16), %r20
909	cmpib,COND(=)	0, %r20, intr_do_preempt
910	nop
911	LDREG	PT_IASQ1(%r16), %r20
912	cmpib,COND(=)	0, %r20, intr_do_preempt
913	nop
914
915	/* NOTE: We need to enable interrupts if we schedule.  We used
916	 * to do this earlier but it caused kernel stack overflows. */
917	ssm     PSW_SM_I, %r0
918
919#ifdef CONFIG_64BIT
920	ldo	-16(%r30),%r29		/* Reference param save area */
921#endif
922
923	ldil	L%intr_check_sig, %r2
924#ifndef CONFIG_64BIT
925	b	schedule
926#else
927	load32	schedule, %r20
928	bv	%r0(%r20)
929#endif
930	ldo	R%intr_check_sig(%r2), %r2
931
932	/* preempt the current task on returning to kernel
933	 * mode from an interrupt, iff need_resched is set,
934	 * and preempt_count is 0. otherwise, we continue on
935	 * our merry way back to the current running task.
936	 */
937#ifdef CONFIG_PREEMPTION
938	.import preempt_schedule_irq,code
939intr_do_preempt:
940	rsm	PSW_SM_I, %r0		/* disable interrupts */
941
942	/* current_thread_info()->preempt_count */
943	mfctl	%cr30, %r1
944	ldw	TI_PRE_COUNT(%r1), %r19
945	cmpib,<>	0, %r19, intr_restore	/* if preempt_count > 0 */
946	nop				/* prev insn branched backwards */
947
948	/* check if we interrupted a critical path */
949	LDREG	PT_PSW(%r16), %r20
950	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
951	nop
952
953	/* ssm PSW_SM_I done later in intr_restore */
954#ifdef CONFIG_MLONGCALLS
955	ldil	L%intr_restore, %r2
956	load32	preempt_schedule_irq, %r1
957	bv	%r0(%r1)
958	ldo	R%intr_restore(%r2), %r2
959#else
960	ldil	L%intr_restore, %r1
961	BL	preempt_schedule_irq, %r2
962	ldo	R%intr_restore(%r1), %r2
963#endif
964#endif /* CONFIG_PREEMPTION */
965
966	/*
967	 * External interrupts.
968	 */
969
970intr_extint:
971	cmpib,COND(=),n 0,%r16,1f
972
973	get_stack_use_cr30
974	b,n 2f
975
9761:
977	get_stack_use_r30
9782:
979	save_specials	%r29
980	virt_map
981	save_general	%r29
982
983	ldo	PT_FR0(%r29), %r24
984	save_fp	%r24
985
986	loadgp
987
988	copy	%r29, %r26	/* arg0 is pt_regs */
989	copy	%r29, %r16	/* save pt_regs */
990
991	ldil	L%intr_return, %r2
992
993#ifdef CONFIG_64BIT
994	ldo	-16(%r30),%r29	/* Reference param save area */
995#endif
996
997	b	do_cpu_irq_mask
998	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
999ENDPROC_CFI(syscall_exit_rfi)
1000
1001
1002	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1003
1004ENTRY_CFI(intr_save)		/* for os_hpmc */
1005	mfsp    %sr7,%r16
1006	cmpib,COND(=),n 0,%r16,1f
1007	get_stack_use_cr30
1008	b	2f
1009	copy    %r8,%r26
1010
10111:
1012	get_stack_use_r30
1013	copy    %r8,%r26
1014
10152:
1016	save_specials	%r29
1017
1018	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1019	cmpib,COND(=),n        PARISC_ITLB_TRAP,%r26,skip_save_ior
1020
1021
1022	mfctl           %isr, %r16
1023	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1024	mfctl           %ior, %r17
1025
1026
1027#ifdef CONFIG_64BIT
1028	/*
1029	 * If the interrupted code was running with W bit off (32 bit),
1030	 * clear the b bits (bits 0 & 1) in the ior.
1031	 * save_specials left ipsw value in r8 for us to test.
1032	 */
1033	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1034	depdi           0,1,2,%r17
1035
1036	/* adjust isr/ior: get high bits from isr and deposit in ior */
1037	space_adjust	%r16,%r17,%r1
1038#endif
1039	STREG           %r16, PT_ISR(%r29)
1040	STREG           %r17, PT_IOR(%r29)
1041
1042#if 0 && defined(CONFIG_64BIT)
1043	/* Revisit when we have 64-bit code above 4Gb */
1044	b,n		intr_save2
1045
1046skip_save_ior:
1047	/* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1048	 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1049	 * above.
1050	 */
1051	extrd,u,*	%r8,PSW_W_BIT,1,%r1
1052	cmpib,COND(=),n	1,%r1,intr_save2
1053	LDREG		PT_IASQ0(%r29), %r16
1054	LDREG		PT_IAOQ0(%r29), %r17
1055	/* adjust iasq/iaoq */
1056	space_adjust	%r16,%r17,%r1
1057	STREG           %r16, PT_IASQ0(%r29)
1058	STREG           %r17, PT_IAOQ0(%r29)
1059#else
1060skip_save_ior:
1061#endif
1062
1063intr_save2:
1064	virt_map
1065	save_general	%r29
1066
1067	ldo		PT_FR0(%r29), %r25
1068	save_fp		%r25
1069
1070	loadgp
1071
1072	copy		%r29, %r25	/* arg1 is pt_regs */
1073#ifdef CONFIG_64BIT
1074	ldo		-16(%r30),%r29	/* Reference param save area */
1075#endif
1076
1077	ldil		L%intr_check_sig, %r2
1078	copy		%r25, %r16	/* save pt_regs */
1079
1080	b		handle_interruption
1081	ldo		R%intr_check_sig(%r2), %r2
1082ENDPROC_CFI(intr_save)
1083
1084
1085	/*
1086	 * Note for all tlb miss handlers:
1087	 *
1088	 * cr24 contains a pointer to the kernel address space
1089	 * page directory.
1090	 *
1091	 * cr25 contains a pointer to the current user address
1092	 * space page directory.
1093	 *
1094	 * sr3 will contain the space id of the user address space
1095	 * of the current running thread while that thread is
1096	 * running in the kernel.
1097	 */
1098
1099	/*
1100	 * register number allocations.  Note that these are all
1101	 * in the shadowed registers
1102	 */
1103
1104	t0 = r1		/* temporary register 0 */
1105	va = r8		/* virtual address for which the trap occurred */
1106	t1 = r9		/* temporary register 1 */
1107	pte  = r16	/* pte/phys page # */
1108	prot = r17	/* prot bits */
1109	spc  = r24	/* space for which the trap occurred */
1110	ptp = r25	/* page directory/page table pointer */
1111
1112#ifdef CONFIG_64BIT
1113
1114dtlb_miss_20w:
1115	space_adjust	spc,va,t0
1116	get_pgd		spc,ptp
1117	space_check	spc,t0,dtlb_fault
1118
1119	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1120
1121	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1122	update_accessed	ptp,pte,t0,t1
1123
1124	make_insert_tlb	spc,pte,prot,t1
1125
1126	idtlbt          pte,prot
1127
1128	ptl_unlock1	spc,t0
1129	rfir
1130	nop
1131
1132dtlb_check_alias_20w:
1133	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1134
1135	idtlbt          pte,prot
1136
1137	rfir
1138	nop
1139
1140nadtlb_miss_20w:
1141	space_adjust	spc,va,t0
1142	get_pgd		spc,ptp
1143	space_check	spc,t0,nadtlb_fault
1144
1145	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1146
1147	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1148	update_accessed	ptp,pte,t0,t1
1149
1150	make_insert_tlb	spc,pte,prot,t1
1151
1152	idtlbt          pte,prot
1153
1154	ptl_unlock1	spc,t0
1155	rfir
1156	nop
1157
1158nadtlb_check_alias_20w:
1159	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1160
1161	idtlbt          pte,prot
1162
1163	rfir
1164	nop
1165
1166#else
1167
1168dtlb_miss_11:
1169	get_pgd		spc,ptp
1170
1171	space_check	spc,t0,dtlb_fault
1172
1173	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1174
1175	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1176	update_accessed	ptp,pte,t0,t1
1177
1178	make_insert_tlb_11	spc,pte,prot
1179
1180	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1181	mtsp		spc,%sr1
1182
1183	idtlba		pte,(%sr1,va)
1184	idtlbp		prot,(%sr1,va)
1185
1186	mtsp		t1, %sr1	/* Restore sr1 */
1187
1188	ptl_unlock1	spc,t0
1189	rfir
1190	nop
1191
1192dtlb_check_alias_11:
1193	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1194
1195	idtlba          pte,(va)
1196	idtlbp          prot,(va)
1197
1198	rfir
1199	nop
1200
1201nadtlb_miss_11:
1202	get_pgd		spc,ptp
1203
1204	space_check	spc,t0,nadtlb_fault
1205
1206	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1207
1208	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1209	update_accessed	ptp,pte,t0,t1
1210
1211	make_insert_tlb_11	spc,pte,prot
1212
1213	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1214	mtsp		spc,%sr1
1215
1216	idtlba		pte,(%sr1,va)
1217	idtlbp		prot,(%sr1,va)
1218
1219	mtsp		t1, %sr1	/* Restore sr1 */
1220
1221	ptl_unlock1	spc,t0
1222	rfir
1223	nop
1224
1225nadtlb_check_alias_11:
1226	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1227
1228	idtlba          pte,(va)
1229	idtlbp          prot,(va)
1230
1231	rfir
1232	nop
1233
1234dtlb_miss_20:
1235	space_adjust	spc,va,t0
1236	get_pgd		spc,ptp
1237	space_check	spc,t0,dtlb_fault
1238
1239	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1240
1241	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1242	update_accessed	ptp,pte,t0,t1
1243
1244	make_insert_tlb	spc,pte,prot,t1
1245
1246	f_extend	pte,t1
1247
1248	idtlbt          pte,prot
1249
1250	ptl_unlock1	spc,t0
1251	rfir
1252	nop
1253
1254dtlb_check_alias_20:
1255	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1256
1257	idtlbt          pte,prot
1258
1259	rfir
1260	nop
1261
1262nadtlb_miss_20:
1263	get_pgd		spc,ptp
1264
1265	space_check	spc,t0,nadtlb_fault
1266
1267	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1268
1269	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1270	update_accessed	ptp,pte,t0,t1
1271
1272	make_insert_tlb	spc,pte,prot,t1
1273
1274	f_extend	pte,t1
1275
1276	idtlbt		pte,prot
1277
1278	ptl_unlock1	spc,t0
1279	rfir
1280	nop
1281
1282nadtlb_check_alias_20:
1283	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1284
1285	idtlbt          pte,prot
1286
1287	rfir
1288	nop
1289
1290#endif
1291
1292nadtlb_emulate:
1293
1294	/*
1295	 * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and
1296	 * probei instructions. The kernel no longer faults doing flushes.
1297	 * Use of lpa and probe instructions is rare. Given the issue
1298	 * with shadow registers, we defer everything to the "slow" path.
1299	 */
1300	b,n		nadtlb_fault
1301
1302#ifdef CONFIG_64BIT
1303itlb_miss_20w:
1304
1305	/*
1306	 * I miss is a little different, since we allow users to fault
1307	 * on the gateway page which is in the kernel address space.
1308	 */
1309
1310	space_adjust	spc,va,t0
1311	get_pgd		spc,ptp
1312	space_check	spc,t0,itlb_fault
1313
1314	L3_ptep		ptp,pte,t0,va,itlb_fault
1315
1316	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1317	update_accessed	ptp,pte,t0,t1
1318
1319	make_insert_tlb	spc,pte,prot,t1
1320
1321	iitlbt          pte,prot
1322
1323	ptl_unlock1	spc,t0
1324	rfir
1325	nop
1326
1327naitlb_miss_20w:
1328
1329	/*
1330	 * I miss is a little different, since we allow users to fault
1331	 * on the gateway page which is in the kernel address space.
1332	 */
1333
1334	space_adjust	spc,va,t0
1335	get_pgd		spc,ptp
1336	space_check	spc,t0,naitlb_fault
1337
1338	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1339
1340	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1341	update_accessed	ptp,pte,t0,t1
1342
1343	make_insert_tlb	spc,pte,prot,t1
1344
1345	iitlbt          pte,prot
1346
1347	ptl_unlock1	spc,t0
1348	rfir
1349	nop
1350
1351naitlb_check_alias_20w:
1352	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1353
1354	iitlbt		pte,prot
1355
1356	rfir
1357	nop
1358
1359#else
1360
1361itlb_miss_11:
1362	get_pgd		spc,ptp
1363
1364	space_check	spc,t0,itlb_fault
1365
1366	L2_ptep		ptp,pte,t0,va,itlb_fault
1367
1368	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1369	update_accessed	ptp,pte,t0,t1
1370
1371	make_insert_tlb_11	spc,pte,prot
1372
1373	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1374	mtsp		spc,%sr1
1375
1376	iitlba		pte,(%sr1,va)
1377	iitlbp		prot,(%sr1,va)
1378
1379	mtsp		t1, %sr1	/* Restore sr1 */
1380
1381	ptl_unlock1	spc,t0
1382	rfir
1383	nop
1384
1385naitlb_miss_11:
1386	get_pgd		spc,ptp
1387
1388	space_check	spc,t0,naitlb_fault
1389
1390	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1391
1392	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1393	update_accessed	ptp,pte,t0,t1
1394
1395	make_insert_tlb_11	spc,pte,prot
1396
1397	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1398	mtsp		spc,%sr1
1399
1400	iitlba		pte,(%sr1,va)
1401	iitlbp		prot,(%sr1,va)
1402
1403	mtsp		t1, %sr1	/* Restore sr1 */
1404
1405	ptl_unlock1	spc,t0
1406	rfir
1407	nop
1408
1409naitlb_check_alias_11:
1410	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1411
1412	iitlba          pte,(%sr0, va)
1413	iitlbp          prot,(%sr0, va)
1414
1415	rfir
1416	nop
1417
1418
1419itlb_miss_20:
1420	get_pgd		spc,ptp
1421
1422	space_check	spc,t0,itlb_fault
1423
1424	L2_ptep		ptp,pte,t0,va,itlb_fault
1425
1426	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1427	update_accessed	ptp,pte,t0,t1
1428
1429	make_insert_tlb	spc,pte,prot,t1
1430
1431	f_extend	pte,t1
1432
1433	iitlbt          pte,prot
1434
1435	ptl_unlock1	spc,t0
1436	rfir
1437	nop
1438
1439naitlb_miss_20:
1440	get_pgd		spc,ptp
1441
1442	space_check	spc,t0,naitlb_fault
1443
1444	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1445
1446	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1447	update_accessed	ptp,pte,t0,t1
1448
1449	make_insert_tlb	spc,pte,prot,t1
1450
1451	f_extend	pte,t1
1452
1453	iitlbt          pte,prot
1454
1455	ptl_unlock1	spc,t0
1456	rfir
1457	nop
1458
1459naitlb_check_alias_20:
1460	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1461
1462	iitlbt          pte,prot
1463
1464	rfir
1465	nop
1466
1467#endif
1468
1469#ifdef CONFIG_64BIT
1470
1471dbit_trap_20w:
1472	space_adjust	spc,va,t0
1473	get_pgd		spc,ptp
1474	space_check	spc,t0,dbit_fault
1475
1476	L3_ptep		ptp,pte,t0,va,dbit_fault
1477
1478	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1479	update_dirty	ptp,pte,t1
1480
1481	make_insert_tlb	spc,pte,prot,t1
1482
1483	idtlbt          pte,prot
1484
1485	ptl_unlock0	spc,t0
1486	rfir
1487	nop
1488#else
1489
1490dbit_trap_11:
1491
1492	get_pgd		spc,ptp
1493
1494	space_check	spc,t0,dbit_fault
1495
1496	L2_ptep		ptp,pte,t0,va,dbit_fault
1497
1498	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1499	update_dirty	ptp,pte,t1
1500
1501	make_insert_tlb_11	spc,pte,prot
1502
1503	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1504	mtsp		spc,%sr1
1505
1506	idtlba		pte,(%sr1,va)
1507	idtlbp		prot,(%sr1,va)
1508
1509	mtsp            t1, %sr1     /* Restore sr1 */
1510
1511	ptl_unlock0	spc,t0
1512	rfir
1513	nop
1514
1515dbit_trap_20:
1516	get_pgd		spc,ptp
1517
1518	space_check	spc,t0,dbit_fault
1519
1520	L2_ptep		ptp,pte,t0,va,dbit_fault
1521
1522	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1523	update_dirty	ptp,pte,t1
1524
1525	make_insert_tlb	spc,pte,prot,t1
1526
1527	f_extend	pte,t1
1528
1529	idtlbt		pte,prot
1530
1531	ptl_unlock0	spc,t0
1532	rfir
1533	nop
1534#endif
1535
1536	.import handle_interruption,code
1537
1538kernel_bad_space:
1539	b               intr_save
1540	ldi             31,%r8  /* Use an unused code */
1541
1542dbit_fault:
1543	b               intr_save
1544	ldi             20,%r8
1545
1546itlb_fault:
1547	b               intr_save
1548	ldi             PARISC_ITLB_TRAP,%r8
1549
1550nadtlb_fault:
1551	b               intr_save
1552	ldi             17,%r8
1553
1554naitlb_fault:
1555	b               intr_save
1556	ldi             16,%r8
1557
1558dtlb_fault:
1559	b               intr_save
1560	ldi             15,%r8
1561
1562	/* Register saving semantics for system calls:
1563
1564	   %r1		   clobbered by system call macro in userspace
1565	   %r2		   saved in PT_REGS by gateway page
1566	   %r3  - %r18	   preserved by C code (saved by signal code)
1567	   %r19 - %r20	   saved in PT_REGS by gateway page
1568	   %r21 - %r22	   non-standard syscall args
1569			   stored in kernel stack by gateway page
1570	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1571	   %r27 - %r30	   saved in PT_REGS by gateway page
1572	   %r31		   syscall return pointer
1573	 */
1574
1575	/* Floating point registers (FIXME: what do we do with these?)
1576
1577	   %fr0  - %fr3	   status/exception, not preserved
1578	   %fr4  - %fr7	   arguments
1579	   %fr8	 - %fr11   not preserved by C code
1580	   %fr12 - %fr21   preserved by C code
1581	   %fr22 - %fr31   not preserved by C code
1582	 */
1583
1584	.macro	reg_save regs
1585	STREG	%r3, PT_GR3(\regs)
1586	STREG	%r4, PT_GR4(\regs)
1587	STREG	%r5, PT_GR5(\regs)
1588	STREG	%r6, PT_GR6(\regs)
1589	STREG	%r7, PT_GR7(\regs)
1590	STREG	%r8, PT_GR8(\regs)
1591	STREG	%r9, PT_GR9(\regs)
1592	STREG   %r10,PT_GR10(\regs)
1593	STREG   %r11,PT_GR11(\regs)
1594	STREG   %r12,PT_GR12(\regs)
1595	STREG   %r13,PT_GR13(\regs)
1596	STREG   %r14,PT_GR14(\regs)
1597	STREG   %r15,PT_GR15(\regs)
1598	STREG   %r16,PT_GR16(\regs)
1599	STREG   %r17,PT_GR17(\regs)
1600	STREG   %r18,PT_GR18(\regs)
1601	.endm
1602
1603	.macro	reg_restore regs
1604	LDREG	PT_GR3(\regs), %r3
1605	LDREG	PT_GR4(\regs), %r4
1606	LDREG	PT_GR5(\regs), %r5
1607	LDREG	PT_GR6(\regs), %r6
1608	LDREG	PT_GR7(\regs), %r7
1609	LDREG	PT_GR8(\regs), %r8
1610	LDREG	PT_GR9(\regs), %r9
1611	LDREG   PT_GR10(\regs),%r10
1612	LDREG   PT_GR11(\regs),%r11
1613	LDREG   PT_GR12(\regs),%r12
1614	LDREG   PT_GR13(\regs),%r13
1615	LDREG   PT_GR14(\regs),%r14
1616	LDREG   PT_GR15(\regs),%r15
1617	LDREG   PT_GR16(\regs),%r16
1618	LDREG   PT_GR17(\regs),%r17
1619	LDREG   PT_GR18(\regs),%r18
1620	.endm
1621
1622	.macro	fork_like name
1623ENTRY_CFI(sys_\name\()_wrapper)
1624	mfctl	%cr30,%r1
1625	ldo	TASK_REGS(%r1),%r1
1626	reg_save %r1
1627	mfctl	%cr27, %r28
1628	ldil	L%sys_\name, %r31
1629	be	R%sys_\name(%sr4,%r31)
1630	STREG	%r28, PT_CR27(%r1)
1631ENDPROC_CFI(sys_\name\()_wrapper)
1632	.endm
1633
1634fork_like clone
1635fork_like clone3
1636fork_like fork
1637fork_like vfork
1638
1639	/* Set the return value for the child */
1640ENTRY(child_return)
1641	BL	schedule_tail, %r2
1642	nop
1643finish_child_return:
1644	mfctl	%cr30,%r1
1645	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1646
1647	LDREG	PT_CR27(%r1), %r3
1648	mtctl	%r3, %cr27
1649	reg_restore %r1
1650	b	syscall_exit
1651	copy	%r0,%r28
1652END(child_return)
1653
1654ENTRY_CFI(sys_rt_sigreturn_wrapper)
1655	mfctl	%cr30,%r26
1656	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1657	/* Don't save regs, we are going to restore them from sigcontext. */
1658	STREG	%r2, -RP_OFFSET(%r30)
1659#ifdef CONFIG_64BIT
1660	ldo	FRAME_SIZE(%r30), %r30
1661	BL	sys_rt_sigreturn,%r2
1662	ldo	-16(%r30),%r29		/* Reference param save area */
1663#else
1664	BL	sys_rt_sigreturn,%r2
1665	ldo	FRAME_SIZE(%r30), %r30
1666#endif
1667
1668	ldo	-FRAME_SIZE(%r30), %r30
1669	LDREG	-RP_OFFSET(%r30), %r2
1670
1671	/* FIXME: I think we need to restore a few more things here. */
1672	mfctl	%cr30,%r1
1673	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1674	reg_restore %r1
1675
1676	/* If the signal was received while the process was blocked on a
1677	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1678	 * take us to syscall_exit_rfi and on to intr_return.
1679	 */
1680	bv	%r0(%r2)
1681	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1682ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1683
1684ENTRY(syscall_exit)
1685	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1686	 * via syscall_exit_rfi if the signal was received while the process
1687	 * was running.
1688	 */
1689
1690	/* save return value now */
1691	mfctl     %cr30, %r1
1692	STREG     %r28,TASK_PT_GR28(%r1)
1693
1694	/* Seems to me that dp could be wrong here, if the syscall involved
1695	 * calling a module, and nothing got round to restoring dp on return.
1696	 */
1697	loadgp
1698
1699syscall_check_resched:
1700
1701	/* check for reschedule */
1702	mfctl	%cr30,%r19
1703	LDREG	TASK_TI_FLAGS(%r19),%r19	/* long */
1704	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1705
1706	.import do_signal,code
1707syscall_check_sig:
1708	mfctl	%cr30,%r19
1709	LDREG	TASK_TI_FLAGS(%r19),%r19
1710	ldi	(_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1711	and,COND(<>)	%r19, %r26, %r0
1712	b,n	syscall_restore	/* skip past if we've nothing to do */
1713
1714syscall_do_signal:
1715	/* Save callee-save registers (for sigcontext).
1716	 * FIXME: After this point the process structure should be
1717	 * consistent with all the relevant state of the process
1718	 * before the syscall.  We need to verify this.
1719	 */
1720	mfctl	%cr30,%r1
1721	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1722	reg_save %r26
1723
1724#ifdef CONFIG_64BIT
1725	ldo	-16(%r30),%r29			/* Reference param save area */
1726#endif
1727
1728	BL	do_notify_resume,%r2
1729	ldi	1, %r25				/* long in_syscall = 1 */
1730
1731	mfctl	%cr30,%r1
1732	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1733	reg_restore %r20
1734
1735	b,n     syscall_check_sig
1736
1737syscall_restore:
1738	mfctl	%cr30,%r1
1739
1740	/* Are we being ptraced? */
1741	LDREG	TASK_TI_FLAGS(%r1),%r19
1742	ldi	_TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
1743	and,COND(=)	%r19,%r2,%r0
1744	b,n	syscall_restore_rfi
1745
1746	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1747	rest_fp	%r19
1748
1749	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1750	mtsar	%r19
1751
1752	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1753	LDREG	TASK_PT_GR19(%r1),%r19
1754	LDREG   TASK_PT_GR20(%r1),%r20
1755	LDREG	TASK_PT_GR21(%r1),%r21
1756	LDREG	TASK_PT_GR22(%r1),%r22
1757	LDREG	TASK_PT_GR23(%r1),%r23
1758	LDREG	TASK_PT_GR24(%r1),%r24
1759	LDREG	TASK_PT_GR25(%r1),%r25
1760	LDREG	TASK_PT_GR26(%r1),%r26
1761	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1762	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1763	LDREG	TASK_PT_GR29(%r1),%r29
1764	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1765
1766	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1767	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1768	rsm     PSW_SM_I, %r0
1769	copy    %r1,%r30                           /* Restore user sp */
1770	mfsp    %sr3,%r1                           /* Get user space id */
1771	mtsp    %r1,%sr7                           /* Restore sr7 */
1772	ssm     PSW_SM_I, %r0
1773
1774	/* Set sr2 to zero for userspace syscalls to work. */
1775	mtsp	%r0,%sr2
1776	mtsp	%r1,%sr4			   /* Restore sr4 */
1777	mtsp	%r1,%sr5			   /* Restore sr5 */
1778	mtsp	%r1,%sr6			   /* Restore sr6 */
1779
1780	depi	PRIV_USER,31,2,%r31	/* ensure return to user mode. */
1781
1782#ifdef CONFIG_64BIT
1783	/* decide whether to reset the wide mode bit
1784	 *
1785	 * For a syscall, the W bit is stored in the lowest bit
1786	 * of sp.  Extract it and reset W if it is zero */
1787	extrd,u,*<>	%r30,63,1,%r1
1788	rsm	PSW_SM_W, %r0
1789	/* now reset the lowest bit of sp if it was set */
1790	xor	%r30,%r1,%r30
1791#endif
1792	be,n    0(%sr3,%r31)                       /* return to user space */
1793
1794	/* We have to return via an RFI, so that PSW T and R bits can be set
1795	 * appropriately.
1796	 * This sets up pt_regs so we can return via intr_restore, which is not
1797	 * the most efficient way of doing things, but it works.
1798	 */
1799syscall_restore_rfi:
1800	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1801	mtctl	%r2,%cr0			   /*   for immediate trap */
1802	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1803	ldi	0x0b,%r20			   /* Create new PSW */
1804	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1805
1806	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1807	 * set in thread_info.h and converted to PA bitmap
1808	 * numbers in asm-offsets.c */
1809
1810	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1811	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1812	depi	-1,27,1,%r20			   /* R bit */
1813
1814	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1815	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1816	depi	-1,7,1,%r20			   /* T bit */
1817
1818	STREG	%r20,TASK_PT_PSW(%r1)
1819
1820	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1821
1822	mfsp    %sr3,%r25
1823	STREG   %r25,TASK_PT_SR3(%r1)
1824	STREG   %r25,TASK_PT_SR4(%r1)
1825	STREG   %r25,TASK_PT_SR5(%r1)
1826	STREG   %r25,TASK_PT_SR6(%r1)
1827	STREG   %r25,TASK_PT_SR7(%r1)
1828	STREG   %r25,TASK_PT_IASQ0(%r1)
1829	STREG   %r25,TASK_PT_IASQ1(%r1)
1830
1831	/* XXX W bit??? */
1832	/* Now if old D bit is clear, it means we didn't save all registers
1833	 * on syscall entry, so do that now.  This only happens on TRACEME
1834	 * calls, or if someone attached to us while we were on a syscall.
1835	 * We could make this more efficient by not saving r3-r18, but
1836	 * then we wouldn't be able to use the common intr_restore path.
1837	 * It is only for traced processes anyway, so performance is not
1838	 * an issue.
1839	 */
1840	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1841	ldo	TASK_REGS(%r1),%r25
1842	reg_save %r25				   /* Save r3 to r18 */
1843
1844	/* Save the current sr */
1845	mfsp	%sr0,%r2
1846	STREG	%r2,TASK_PT_SR0(%r1)
1847
1848	/* Save the scratch sr */
1849	mfsp	%sr1,%r2
1850	STREG	%r2,TASK_PT_SR1(%r1)
1851
1852	/* sr2 should be set to zero for userspace syscalls */
1853	STREG	%r0,TASK_PT_SR2(%r1)
1854
1855	LDREG	TASK_PT_GR31(%r1),%r2
1856	depi	PRIV_USER,31,2,%r2	/* ensure return to user mode. */
1857	STREG   %r2,TASK_PT_IAOQ0(%r1)
1858	ldo	4(%r2),%r2
1859	STREG	%r2,TASK_PT_IAOQ1(%r1)
1860	b	intr_restore
1861	copy	%r25,%r16
1862
1863pt_regs_ok:
1864	LDREG	TASK_PT_IAOQ0(%r1),%r2
1865	depi	PRIV_USER,31,2,%r2	/* ensure return to user mode. */
1866	STREG	%r2,TASK_PT_IAOQ0(%r1)
1867	LDREG	TASK_PT_IAOQ1(%r1),%r2
1868	depi	PRIV_USER,31,2,%r2
1869	STREG	%r2,TASK_PT_IAOQ1(%r1)
1870	b	intr_restore
1871	copy	%r25,%r16
1872
1873syscall_do_resched:
1874	load32	syscall_check_resched,%r2 /* if resched, we start over again */
1875	load32	schedule,%r19
1876	bv	%r0(%r19)		/* jumps to schedule() */
1877#ifdef CONFIG_64BIT
1878	ldo	-16(%r30),%r29		/* Reference param save area */
1879#else
1880	nop
1881#endif
1882END(syscall_exit)
1883
1884
1885#ifdef CONFIG_FUNCTION_TRACER
1886
1887	.import ftrace_function_trampoline,code
1888	.align L1_CACHE_BYTES
1889ENTRY_CFI(mcount, caller)
1890_mcount:
1891	.export _mcount,data
1892	/*
1893	 * The 64bit mcount() function pointer needs 4 dwords, of which the
1894	 * first two are free.  We optimize it here and put 2 instructions for
1895	 * calling mcount(), and 2 instructions for ftrace_stub().  That way we
1896	 * have all on one L1 cacheline.
1897	 */
1898	ldi	0, %arg3
1899	b	ftrace_function_trampoline
1900	copy	%r3, %arg2	/* caller original %sp */
1901ftrace_stub:
1902	.globl ftrace_stub
1903        .type  ftrace_stub, @function
1904#ifdef CONFIG_64BIT
1905	bve	(%rp)
1906#else
1907	bv	%r0(%rp)
1908#endif
1909	nop
1910#ifdef CONFIG_64BIT
1911	.dword mcount
1912	.dword 0 /* code in head.S puts value of global gp here */
1913#endif
1914ENDPROC_CFI(mcount)
1915
1916#ifdef CONFIG_DYNAMIC_FTRACE
1917
1918#ifdef CONFIG_64BIT
1919#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
1920#else
1921#define FTRACE_FRAME_SIZE FRAME_SIZE
1922#endif
1923ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
1924ftrace_caller:
1925	.global ftrace_caller
1926
1927	STREG	%r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
1928	ldo	-FTRACE_FRAME_SIZE(%sp), %r3
1929	STREG	%rp, -RP_OFFSET(%r3)
1930
1931	/* Offset 0 is already allocated for %r1 */
1932	STREG	%r23, 2*REG_SZ(%r3)
1933	STREG	%r24, 3*REG_SZ(%r3)
1934	STREG	%r25, 4*REG_SZ(%r3)
1935	STREG	%r26, 5*REG_SZ(%r3)
1936	STREG	%r28, 6*REG_SZ(%r3)
1937	STREG	%r29, 7*REG_SZ(%r3)
1938#ifdef CONFIG_64BIT
1939	STREG	%r19, 8*REG_SZ(%r3)
1940	STREG	%r20, 9*REG_SZ(%r3)
1941	STREG	%r21, 10*REG_SZ(%r3)
1942	STREG	%r22, 11*REG_SZ(%r3)
1943	STREG	%r27, 12*REG_SZ(%r3)
1944	STREG	%r31, 13*REG_SZ(%r3)
1945	loadgp
1946	ldo	-16(%sp),%r29
1947#endif
1948	LDREG	0(%r3), %r25
1949	copy	%rp, %r26
1950	ldo	-8(%r25), %r25
1951	ldi	0, %r23		/* no pt_regs */
1952	b,l	ftrace_function_trampoline, %rp
1953	copy	%r3, %r24
1954
1955	LDREG	-RP_OFFSET(%r3), %rp
1956	LDREG	2*REG_SZ(%r3), %r23
1957	LDREG	3*REG_SZ(%r3), %r24
1958	LDREG	4*REG_SZ(%r3), %r25
1959	LDREG	5*REG_SZ(%r3), %r26
1960	LDREG	6*REG_SZ(%r3), %r28
1961	LDREG	7*REG_SZ(%r3), %r29
1962#ifdef CONFIG_64BIT
1963	LDREG	8*REG_SZ(%r3), %r19
1964	LDREG	9*REG_SZ(%r3), %r20
1965	LDREG	10*REG_SZ(%r3), %r21
1966	LDREG	11*REG_SZ(%r3), %r22
1967	LDREG	12*REG_SZ(%r3), %r27
1968	LDREG	13*REG_SZ(%r3), %r31
1969#endif
1970	LDREG	1*REG_SZ(%r3), %r3
1971
1972	LDREGM	-FTRACE_FRAME_SIZE(%sp), %r1
1973	/* Adjust return point to jump back to beginning of traced function */
1974	ldo	-4(%r1), %r1
1975	bv,n	(%r1)
1976
1977ENDPROC_CFI(ftrace_caller)
1978
1979#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
1980ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
1981	CALLS,SAVE_RP,SAVE_SP)
1982ftrace_regs_caller:
1983	.global ftrace_regs_caller
1984
1985	ldo	-FTRACE_FRAME_SIZE(%sp), %r1
1986	STREG	%rp, -RP_OFFSET(%r1)
1987
1988	copy	%sp, %r1
1989	ldo	PT_SZ_ALGN(%sp), %sp
1990
1991	STREG	%rp, PT_GR2(%r1)
1992	STREG	%r3, PT_GR3(%r1)
1993	STREG	%r4, PT_GR4(%r1)
1994	STREG	%r5, PT_GR5(%r1)
1995	STREG	%r6, PT_GR6(%r1)
1996	STREG	%r7, PT_GR7(%r1)
1997	STREG	%r8, PT_GR8(%r1)
1998	STREG	%r9, PT_GR9(%r1)
1999	STREG   %r10, PT_GR10(%r1)
2000	STREG   %r11, PT_GR11(%r1)
2001	STREG   %r12, PT_GR12(%r1)
2002	STREG   %r13, PT_GR13(%r1)
2003	STREG   %r14, PT_GR14(%r1)
2004	STREG   %r15, PT_GR15(%r1)
2005	STREG   %r16, PT_GR16(%r1)
2006	STREG   %r17, PT_GR17(%r1)
2007	STREG   %r18, PT_GR18(%r1)
2008	STREG	%r19, PT_GR19(%r1)
2009	STREG	%r20, PT_GR20(%r1)
2010	STREG	%r21, PT_GR21(%r1)
2011	STREG	%r22, PT_GR22(%r1)
2012	STREG	%r23, PT_GR23(%r1)
2013	STREG	%r24, PT_GR24(%r1)
2014	STREG	%r25, PT_GR25(%r1)
2015	STREG	%r26, PT_GR26(%r1)
2016	STREG	%r27, PT_GR27(%r1)
2017	STREG	%r28, PT_GR28(%r1)
2018	STREG	%r29, PT_GR29(%r1)
2019	STREG	%r30, PT_GR30(%r1)
2020	STREG	%r31, PT_GR31(%r1)
2021	mfctl	%cr11, %r26
2022	STREG	%r26, PT_SAR(%r1)
2023
2024	copy	%rp, %r26
2025	LDREG	-FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2026	ldo	-8(%r25), %r25
2027	ldo	-FTRACE_FRAME_SIZE(%r1), %arg2
2028	b,l	ftrace_function_trampoline, %rp
2029	copy	%r1, %arg3 /* struct pt_regs */
2030
2031	ldo	-PT_SZ_ALGN(%sp), %r1
2032
2033	LDREG	PT_SAR(%r1), %rp
2034	mtctl	%rp, %cr11
2035
2036	LDREG	PT_GR2(%r1), %rp
2037	LDREG	PT_GR3(%r1), %r3
2038	LDREG	PT_GR4(%r1), %r4
2039	LDREG	PT_GR5(%r1), %r5
2040	LDREG	PT_GR6(%r1), %r6
2041	LDREG	PT_GR7(%r1), %r7
2042	LDREG	PT_GR8(%r1), %r8
2043	LDREG	PT_GR9(%r1), %r9
2044	LDREG   PT_GR10(%r1),%r10
2045	LDREG   PT_GR11(%r1),%r11
2046	LDREG   PT_GR12(%r1),%r12
2047	LDREG   PT_GR13(%r1),%r13
2048	LDREG   PT_GR14(%r1),%r14
2049	LDREG   PT_GR15(%r1),%r15
2050	LDREG   PT_GR16(%r1),%r16
2051	LDREG   PT_GR17(%r1),%r17
2052	LDREG   PT_GR18(%r1),%r18
2053	LDREG   PT_GR19(%r1),%r19
2054	LDREG   PT_GR20(%r1),%r20
2055	LDREG   PT_GR21(%r1),%r21
2056	LDREG   PT_GR22(%r1),%r22
2057	LDREG   PT_GR23(%r1),%r23
2058	LDREG   PT_GR24(%r1),%r24
2059	LDREG   PT_GR25(%r1),%r25
2060	LDREG   PT_GR26(%r1),%r26
2061	LDREG   PT_GR27(%r1),%r27
2062	LDREG   PT_GR28(%r1),%r28
2063	LDREG   PT_GR29(%r1),%r29
2064	LDREG   PT_GR30(%r1),%r30
2065	LDREG   PT_GR31(%r1),%r31
2066
2067	ldo	-PT_SZ_ALGN(%sp), %sp
2068	LDREGM	-FTRACE_FRAME_SIZE(%sp), %r1
2069	/* Adjust return point to jump back to beginning of traced function */
2070	ldo	-4(%r1), %r1
2071	bv,n	(%r1)
2072
2073ENDPROC_CFI(ftrace_regs_caller)
2074
2075#endif
2076#endif
2077
2078#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2079	.align 8
2080ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2081	.export parisc_return_to_handler,data
2082parisc_return_to_handler:
2083	copy %r3,%r1
2084	STREG %r0,-RP_OFFSET(%sp)	/* store 0 as %rp */
2085	copy %sp,%r3
2086	STREGM %r1,FRAME_SIZE(%sp)
2087	STREG %ret0,8(%r3)
2088	STREG %ret1,16(%r3)
2089
2090#ifdef CONFIG_64BIT
2091	loadgp
2092#endif
2093
2094	/* call ftrace_return_to_handler(0) */
2095	.import ftrace_return_to_handler,code
2096	load32 ftrace_return_to_handler,%ret0
2097	load32 .Lftrace_ret,%r2
2098#ifdef CONFIG_64BIT
2099	ldo -16(%sp),%ret1		/* Reference param save area */
2100	bve	(%ret0)
2101#else
2102	bv	%r0(%ret0)
2103#endif
2104	ldi 0,%r26
2105.Lftrace_ret:
2106	copy %ret0,%rp
2107
2108	/* restore original return values */
2109	LDREG 8(%r3),%ret0
2110	LDREG 16(%r3),%ret1
2111
2112	/* return from function */
2113#ifdef CONFIG_64BIT
2114	bve	(%rp)
2115#else
2116	bv	%r0(%rp)
2117#endif
2118	LDREGM -FRAME_SIZE(%sp),%r3
2119ENDPROC_CFI(return_to_handler)
2120
2121#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2122
2123#endif	/* CONFIG_FUNCTION_TRACER */
2124
2125#ifdef CONFIG_IRQSTACKS
2126/* void call_on_stack(unsigned long param1, void *func,
2127		      unsigned long new_stack) */
2128ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2129ENTRY(_call_on_stack)
2130	copy	%sp, %r1
2131
2132	/* Regarding the HPPA calling conventions for function pointers,
2133	   we assume the PIC register is not changed across call.  For
2134	   CONFIG_64BIT, the argument pointer is left to point at the
2135	   argument region allocated for the call to call_on_stack. */
2136
2137	/* Switch to new stack.  We allocate two frames.  */
2138	ldo	2*FRAME_SIZE(%arg2), %sp
2139# ifdef CONFIG_64BIT
2140	/* Save previous stack pointer and return pointer in frame marker */
2141	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2142	/* Calls always use function descriptor */
2143	LDREG	16(%arg1), %arg1
2144	bve,l	(%arg1), %rp
2145	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2146	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2147	bve	(%rp)
2148	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2149# else
2150	/* Save previous stack pointer and return pointer in frame marker */
2151	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2152	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2153	/* Calls use function descriptor if PLABEL bit is set */
2154	bb,>=,n	%arg1, 30, 1f
2155	depwi	0,31,2, %arg1
2156	LDREG	0(%arg1), %arg1
21571:
2158	be,l	0(%sr4,%arg1), %sr0, %r31
2159	copy	%r31, %rp
2160	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2161	bv	(%rp)
2162	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2163# endif /* CONFIG_64BIT */
2164ENDPROC_CFI(call_on_stack)
2165#endif /* CONFIG_IRQSTACKS */
2166
2167ENTRY_CFI(get_register)
2168	/*
2169	 * get_register is used by the non access tlb miss handlers to
2170	 * copy the value of the general register specified in r8 into
2171	 * r1. This routine can't be used for shadowed registers, since
2172	 * the rfir will restore the original value. So, for the shadowed
2173	 * registers we put a -1 into r1 to indicate that the register
2174	 * should not be used (the register being copied could also have
2175	 * a -1 in it, but that is OK, it just means that we will have
2176	 * to use the slow path instead).
2177	 */
2178	blr     %r8,%r0
2179	nop
2180	bv      %r0(%r25)    /* r0 */
2181	copy    %r0,%r1
2182	bv      %r0(%r25)    /* r1 - shadowed */
2183	ldi     -1,%r1
2184	bv      %r0(%r25)    /* r2 */
2185	copy    %r2,%r1
2186	bv      %r0(%r25)    /* r3 */
2187	copy    %r3,%r1
2188	bv      %r0(%r25)    /* r4 */
2189	copy    %r4,%r1
2190	bv      %r0(%r25)    /* r5 */
2191	copy    %r5,%r1
2192	bv      %r0(%r25)    /* r6 */
2193	copy    %r6,%r1
2194	bv      %r0(%r25)    /* r7 */
2195	copy    %r7,%r1
2196	bv      %r0(%r25)    /* r8 - shadowed */
2197	ldi     -1,%r1
2198	bv      %r0(%r25)    /* r9 - shadowed */
2199	ldi     -1,%r1
2200	bv      %r0(%r25)    /* r10 */
2201	copy    %r10,%r1
2202	bv      %r0(%r25)    /* r11 */
2203	copy    %r11,%r1
2204	bv      %r0(%r25)    /* r12 */
2205	copy    %r12,%r1
2206	bv      %r0(%r25)    /* r13 */
2207	copy    %r13,%r1
2208	bv      %r0(%r25)    /* r14 */
2209	copy    %r14,%r1
2210	bv      %r0(%r25)    /* r15 */
2211	copy    %r15,%r1
2212	bv      %r0(%r25)    /* r16 - shadowed */
2213	ldi     -1,%r1
2214	bv      %r0(%r25)    /* r17 - shadowed */
2215	ldi     -1,%r1
2216	bv      %r0(%r25)    /* r18 */
2217	copy    %r18,%r1
2218	bv      %r0(%r25)    /* r19 */
2219	copy    %r19,%r1
2220	bv      %r0(%r25)    /* r20 */
2221	copy    %r20,%r1
2222	bv      %r0(%r25)    /* r21 */
2223	copy    %r21,%r1
2224	bv      %r0(%r25)    /* r22 */
2225	copy    %r22,%r1
2226	bv      %r0(%r25)    /* r23 */
2227	copy    %r23,%r1
2228	bv      %r0(%r25)    /* r24 - shadowed */
2229	ldi     -1,%r1
2230	bv      %r0(%r25)    /* r25 - shadowed */
2231	ldi     -1,%r1
2232	bv      %r0(%r25)    /* r26 */
2233	copy    %r26,%r1
2234	bv      %r0(%r25)    /* r27 */
2235	copy    %r27,%r1
2236	bv      %r0(%r25)    /* r28 */
2237	copy    %r28,%r1
2238	bv      %r0(%r25)    /* r29 */
2239	copy    %r29,%r1
2240	bv      %r0(%r25)    /* r30 */
2241	copy    %r30,%r1
2242	bv      %r0(%r25)    /* r31 */
2243	copy    %r31,%r1
2244ENDPROC_CFI(get_register)
2245
2246
2247ENTRY_CFI(set_register)
2248	/*
2249	 * set_register is used by the non access tlb miss handlers to
2250	 * copy the value of r1 into the general register specified in
2251	 * r8.
2252	 */
2253	blr     %r8,%r0
2254	nop
2255	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2256	copy    %r1,%r0
2257	bv      %r0(%r25)    /* r1 */
2258	copy    %r1,%r1
2259	bv      %r0(%r25)    /* r2 */
2260	copy    %r1,%r2
2261	bv      %r0(%r25)    /* r3 */
2262	copy    %r1,%r3
2263	bv      %r0(%r25)    /* r4 */
2264	copy    %r1,%r4
2265	bv      %r0(%r25)    /* r5 */
2266	copy    %r1,%r5
2267	bv      %r0(%r25)    /* r6 */
2268	copy    %r1,%r6
2269	bv      %r0(%r25)    /* r7 */
2270	copy    %r1,%r7
2271	bv      %r0(%r25)    /* r8 */
2272	copy    %r1,%r8
2273	bv      %r0(%r25)    /* r9 */
2274	copy    %r1,%r9
2275	bv      %r0(%r25)    /* r10 */
2276	copy    %r1,%r10
2277	bv      %r0(%r25)    /* r11 */
2278	copy    %r1,%r11
2279	bv      %r0(%r25)    /* r12 */
2280	copy    %r1,%r12
2281	bv      %r0(%r25)    /* r13 */
2282	copy    %r1,%r13
2283	bv      %r0(%r25)    /* r14 */
2284	copy    %r1,%r14
2285	bv      %r0(%r25)    /* r15 */
2286	copy    %r1,%r15
2287	bv      %r0(%r25)    /* r16 */
2288	copy    %r1,%r16
2289	bv      %r0(%r25)    /* r17 */
2290	copy    %r1,%r17
2291	bv      %r0(%r25)    /* r18 */
2292	copy    %r1,%r18
2293	bv      %r0(%r25)    /* r19 */
2294	copy    %r1,%r19
2295	bv      %r0(%r25)    /* r20 */
2296	copy    %r1,%r20
2297	bv      %r0(%r25)    /* r21 */
2298	copy    %r1,%r21
2299	bv      %r0(%r25)    /* r22 */
2300	copy    %r1,%r22
2301	bv      %r0(%r25)    /* r23 */
2302	copy    %r1,%r23
2303	bv      %r0(%r25)    /* r24 */
2304	copy    %r1,%r24
2305	bv      %r0(%r25)    /* r25 */
2306	copy    %r1,%r25
2307	bv      %r0(%r25)    /* r26 */
2308	copy    %r1,%r26
2309	bv      %r0(%r25)    /* r27 */
2310	copy    %r1,%r27
2311	bv      %r0(%r25)    /* r28 */
2312	copy    %r1,%r28
2313	bv      %r0(%r25)    /* r29 */
2314	copy    %r1,%r29
2315	bv      %r0(%r25)    /* r30 */
2316	copy    %r1,%r30
2317	bv      %r0(%r25)    /* r31 */
2318	copy    %r1,%r31
2319ENDPROC_CFI(set_register)
2320
2321