xref: /openbmc/linux/arch/parisc/kernel/entry.S (revision 6ee73861)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41
42#ifdef CONFIG_64BIT
43	.level 2.0w
44#else
45	.level 2.0
46#endif
47
48	.import         pa_dbit_lock,data
49
50	/* space_to_prot macro creates a prot id from a space id */
51
52#if (SPACEID_SHIFT) == 0
53	.macro  space_to_prot spc prot
54	depd,z  \spc,62,31,\prot
55	.endm
56#else
57	.macro  space_to_prot spc prot
58	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
59	.endm
60#endif
61
62	/* Switch to virtual mapping, trashing only %r1 */
63	.macro  virt_map
64	/* pcxt_ssm_bug */
65	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
66	mtsp	%r0, %sr4
67	mtsp	%r0, %sr5
68	mfsp	%sr7, %r1
69	or,=    %r0,%r1,%r0	/* Only save sr7 in sr3 if sr7 != 0 */
70	mtsp	%r1, %sr3
71	tovirt_r1 %r29
72	load32	KERNEL_PSW, %r1
73
74	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
75	mtsp	%r0, %sr6
76	mtsp	%r0, %sr7
77	mtctl	%r0, %cr17	/* Clear IIASQ tail */
78	mtctl	%r0, %cr17	/* Clear IIASQ head */
79	mtctl	%r1, %ipsw
80	load32	4f, %r1
81	mtctl	%r1, %cr18	/* Set IIAOQ tail */
82	ldo	4(%r1), %r1
83	mtctl	%r1, %cr18	/* Set IIAOQ head */
84	rfir
85	nop
864:
87	.endm
88
89	/*
90	 * The "get_stack" macros are responsible for determining the
91	 * kernel stack value.
92	 *
93	 *      If sr7 == 0
94	 *          Already using a kernel stack, so call the
95	 *          get_stack_use_r30 macro to push a pt_regs structure
96	 *          on the stack, and store registers there.
97	 *      else
98	 *          Need to set up a kernel stack, so call the
99	 *          get_stack_use_cr30 macro to set up a pointer
100	 *          to the pt_regs structure contained within the
101	 *          task pointer pointed to by cr30. Set the stack
102	 *          pointer to point to the end of the task structure.
103	 *
104	 * Note that we use shadowed registers for temps until
105	 * we can save %r26 and %r29. %r26 is used to preserve
106	 * %r8 (a shadowed register) which temporarily contained
107	 * either the fault type ("code") or the eirr. We need
108	 * to use a non-shadowed register to carry the value over
109	 * the rfir in virt_map. We use %r26 since this value winds
110	 * up being passed as the argument to either do_cpu_irq_mask
111	 * or handle_interruption. %r29 is used to hold a pointer
112	 * the register save area, and once again, it needs to
113	 * be a non-shadowed register so that it survives the rfir.
114	 *
115	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
116	 */
117
118	.macro  get_stack_use_cr30
119
120	/* we save the registers in the task struct */
121
122	mfctl   %cr30, %r1
123	tophys  %r1,%r9
124	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
125	tophys  %r1,%r9
126	ldo     TASK_REGS(%r9),%r9
127	STREG   %r30, PT_GR30(%r9)
128	STREG   %r29,PT_GR29(%r9)
129	STREG   %r26,PT_GR26(%r9)
130	copy    %r9,%r29
131	mfctl   %cr30, %r1
132	ldo	THREAD_SZ_ALGN(%r1), %r30
133	.endm
134
135	.macro  get_stack_use_r30
136
137	/* we put a struct pt_regs on the stack and save the registers there */
138
139	tophys  %r30,%r9
140	STREG   %r30,PT_GR30(%r9)
141	ldo	PT_SZ_ALGN(%r30),%r30
142	STREG   %r29,PT_GR29(%r9)
143	STREG   %r26,PT_GR26(%r9)
144	copy    %r9,%r29
145	.endm
146
147	.macro  rest_stack
148	LDREG   PT_GR1(%r29), %r1
149	LDREG   PT_GR30(%r29),%r30
150	LDREG   PT_GR29(%r29),%r29
151	.endm
152
153	/* default interruption handler
154	 * (calls traps.c:handle_interruption) */
155	.macro	def code
156	b	intr_save
157	ldi     \code, %r8
158	.align	32
159	.endm
160
161	/* Interrupt interruption handler
162	 * (calls irq.c:do_cpu_irq_mask) */
163	.macro	extint code
164	b	intr_extint
165	mfsp    %sr7,%r16
166	.align	32
167	.endm
168
169	.import	os_hpmc, code
170
171	/* HPMC handler */
172	.macro	hpmc code
173	nop			/* must be a NOP, will be patched later */
174	load32	PA(os_hpmc), %r3
175	bv,n	0(%r3)
176	nop
177	.word	0		/* checksum (will be patched) */
178	.word	PA(os_hpmc)	/* address of handler */
179	.word	0		/* length of handler */
180	.endm
181
182	/*
183	 * Performance Note: Instructions will be moved up into
184	 * this part of the code later on, once we are sure
185	 * that the tlb miss handlers are close to final form.
186	 */
187
188	/* Register definitions for tlb miss handler macros */
189
190	va  = r8	/* virtual address for which the trap occured */
191	spc = r24	/* space for which the trap occured */
192
193#ifndef CONFIG_64BIT
194
195	/*
196	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
197	 */
198
199	.macro	itlb_11 code
200
201	mfctl	%pcsq, spc
202	b	itlb_miss_11
203	mfctl	%pcoq, va
204
205	.align		32
206	.endm
207#endif
208
209	/*
210	 * itlb miss interruption handler (parisc 2.0)
211	 */
212
213	.macro	itlb_20 code
214	mfctl	%pcsq, spc
215#ifdef CONFIG_64BIT
216	b       itlb_miss_20w
217#else
218	b	itlb_miss_20
219#endif
220	mfctl	%pcoq, va
221
222	.align		32
223	.endm
224
225#ifndef CONFIG_64BIT
226	/*
227	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
228	 *
229	 * Note: naitlb misses will be treated
230	 * as an ordinary itlb miss for now.
231	 * However, note that naitlb misses
232	 * have the faulting address in the
233	 * IOR/ISR.
234	 */
235
236	.macro	naitlb_11 code
237
238	mfctl	%isr,spc
239	b	itlb_miss_11
240	mfctl 	%ior,va
241	/* FIXME: If user causes a naitlb miss, the priv level may not be in
242	 * lower bits of va, where the itlb miss handler is expecting them
243	 */
244
245	.align		32
246	.endm
247#endif
248
249	/*
250	 * naitlb miss interruption handler (parisc 2.0)
251	 *
252	 * Note: naitlb misses will be treated
253	 * as an ordinary itlb miss for now.
254	 * However, note that naitlb misses
255	 * have the faulting address in the
256	 * IOR/ISR.
257	 */
258
259	.macro	naitlb_20 code
260
261	mfctl	%isr,spc
262#ifdef CONFIG_64BIT
263	b       itlb_miss_20w
264#else
265	b	itlb_miss_20
266#endif
267	mfctl 	%ior,va
268	/* FIXME: If user causes a naitlb miss, the priv level may not be in
269	 * lower bits of va, where the itlb miss handler is expecting them
270	 */
271
272	.align		32
273	.endm
274
275#ifndef CONFIG_64BIT
276	/*
277	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
278	 */
279
280	.macro	dtlb_11 code
281
282	mfctl	%isr, spc
283	b	dtlb_miss_11
284	mfctl	%ior, va
285
286	.align		32
287	.endm
288#endif
289
290	/*
291	 * dtlb miss interruption handler (parisc 2.0)
292	 */
293
294	.macro	dtlb_20 code
295
296	mfctl	%isr, spc
297#ifdef CONFIG_64BIT
298	b       dtlb_miss_20w
299#else
300	b	dtlb_miss_20
301#endif
302	mfctl	%ior, va
303
304	.align		32
305	.endm
306
307#ifndef CONFIG_64BIT
308	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
309
310	.macro	nadtlb_11 code
311
312	mfctl	%isr,spc
313	b       nadtlb_miss_11
314	mfctl	%ior,va
315
316	.align		32
317	.endm
318#endif
319
320	/* nadtlb miss interruption handler (parisc 2.0) */
321
322	.macro	nadtlb_20 code
323
324	mfctl	%isr,spc
325#ifdef CONFIG_64BIT
326	b       nadtlb_miss_20w
327#else
328	b       nadtlb_miss_20
329#endif
330	mfctl	%ior,va
331
332	.align		32
333	.endm
334
335#ifndef CONFIG_64BIT
336	/*
337	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
338	 */
339
340	.macro	dbit_11 code
341
342	mfctl	%isr,spc
343	b	dbit_trap_11
344	mfctl	%ior,va
345
346	.align		32
347	.endm
348#endif
349
350	/*
351	 * dirty bit trap interruption handler (parisc 2.0)
352	 */
353
354	.macro	dbit_20 code
355
356	mfctl	%isr,spc
357#ifdef CONFIG_64BIT
358	b       dbit_trap_20w
359#else
360	b	dbit_trap_20
361#endif
362	mfctl	%ior,va
363
364	.align		32
365	.endm
366
367	/* The following are simple 32 vs 64 bit instruction
368	 * abstractions for the macros */
369	.macro		EXTR	reg1,start,length,reg2
370#ifdef CONFIG_64BIT
371	extrd,u		\reg1,32+(\start),\length,\reg2
372#else
373	extrw,u		\reg1,\start,\length,\reg2
374#endif
375	.endm
376
377	.macro		DEP	reg1,start,length,reg2
378#ifdef CONFIG_64BIT
379	depd		\reg1,32+(\start),\length,\reg2
380#else
381	depw		\reg1,\start,\length,\reg2
382#endif
383	.endm
384
385	.macro		DEPI	val,start,length,reg
386#ifdef CONFIG_64BIT
387	depdi		\val,32+(\start),\length,\reg
388#else
389	depwi		\val,\start,\length,\reg
390#endif
391	.endm
392
393	/* In LP64, the space contains part of the upper 32 bits of the
394	 * fault.  We have to extract this and place it in the va,
395	 * zeroing the corresponding bits in the space register */
396	.macro		space_adjust	spc,va,tmp
397#ifdef CONFIG_64BIT
398	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
399	depd		%r0,63,SPACEID_SHIFT,\spc
400	depd		\tmp,31,SPACEID_SHIFT,\va
401#endif
402	.endm
403
404	.import		swapper_pg_dir,code
405
406	/* Get the pgd.  For faults on space zero (kernel space), this
407	 * is simply swapper_pg_dir.  For user space faults, the
408	 * pgd is stored in %cr25 */
409	.macro		get_pgd		spc,reg
410	ldil		L%PA(swapper_pg_dir),\reg
411	ldo		R%PA(swapper_pg_dir)(\reg),\reg
412	or,COND(=)	%r0,\spc,%r0
413	mfctl		%cr25,\reg
414	.endm
415
416	/*
417		space_check(spc,tmp,fault)
418
419		spc - The space we saw the fault with.
420		tmp - The place to store the current space.
421		fault - Function to call on failure.
422
423		Only allow faults on different spaces from the
424		currently active one if we're the kernel
425
426	*/
427	.macro		space_check	spc,tmp,fault
428	mfsp		%sr7,\tmp
429	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
430					 * as kernel, so defeat the space
431					 * check if it is */
432	copy		\spc,\tmp
433	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
434	cmpb,COND(<>),n	\tmp,\spc,\fault
435	.endm
436
437	/* Look up a PTE in a 2-Level scheme (faulting at each
438	 * level if the entry isn't present
439	 *
440	 * NOTE: we use ldw even for LP64, since the short pointers
441	 * can address up to 1TB
442	 */
443	.macro		L2_ptep	pmd,pte,index,va,fault
444#if PT_NLEVELS == 3
445	EXTR		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
446#else
447	EXTR		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
448#endif
449	DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
450	copy		%r0,\pte
451	ldw,s		\index(\pmd),\pmd
452	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
453	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
454	copy		\pmd,%r9
455	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
456	EXTR		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
457	DEP		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
458	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd
459	LDREG		%r0(\pmd),\pte		/* pmd is now pte */
460	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
461	.endm
462
463	/* Look up PTE in a 3-Level scheme.
464	 *
465	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
466	 * first pmd adjacent to the pgd.  This means that we can
467	 * subtract a constant offset to get to it.  The pmd and pgd
468	 * sizes are arranged so that a single pmd covers 4GB (giving
469	 * a full LP64 process access to 8TB) so our lookups are
470	 * effectively L2 for the first 4GB of the kernel (i.e. for
471	 * all ILP32 processes and all the kernel for machines with
472	 * under 4GB of memory) */
473	.macro		L3_ptep pgd,pte,index,va,fault
474#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
475	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
476	copy		%r0,\pte
477	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
478	ldw,s		\index(\pgd),\pgd
479	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
480	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
481	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
482	shld		\pgd,PxD_VALUE_SHIFT,\index
483	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
484	copy		\index,\pgd
485	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
486	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
487#endif
488	L2_ptep		\pgd,\pte,\index,\va,\fault
489	.endm
490
491	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
492	 * don't needlessly dirty the cache line if it was already set */
493	.macro		update_ptep	ptep,pte,tmp,tmp1
494	ldi		_PAGE_ACCESSED,\tmp1
495	or		\tmp1,\pte,\tmp
496	and,COND(<>)	\tmp1,\pte,%r0
497	STREG		\tmp,0(\ptep)
498	.endm
499
500	/* Set the dirty bit (and accessed bit).  No need to be
501	 * clever, this is only used from the dirty fault */
502	.macro		update_dirty	ptep,pte,tmp
503	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
504	or		\tmp,\pte,\pte
505	STREG		\pte,0(\ptep)
506	.endm
507
508	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
509	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
510	#define PAGE_ADD_SHIFT  (PAGE_SHIFT-12)
511
512	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
513	.macro		convert_for_tlb_insert20 pte
514	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
515				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
516	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
517				(63-58)+PAGE_ADD_SHIFT,\pte
518	.endm
519
520	/* Convert the pte and prot to tlb insertion values.  How
521	 * this happens is quite subtle, read below */
522	.macro		make_insert_tlb	spc,pte,prot
523	space_to_prot   \spc \prot        /* create prot id from space */
524	/* The following is the real subtlety.  This is depositing
525	 * T <-> _PAGE_REFTRAP
526	 * D <-> _PAGE_DIRTY
527	 * B <-> _PAGE_DMB (memory break)
528	 *
529	 * Then incredible subtlety: The access rights are
530	 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
531	 * See 3-14 of the parisc 2.0 manual
532	 *
533	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
534	 * trigger an access rights trap in user space if the user
535	 * tries to read an unreadable page */
536	depd            \pte,8,7,\prot
537
538	/* PAGE_USER indicates the page can be read with user privileges,
539	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
540	 * contains _PAGE_READ */
541	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
542	depdi		7,11,3,\prot
543	/* If we're a gateway page, drop PL2 back to zero for promotion
544	 * to kernel privilege (so we can execute the page as kernel).
545	 * Any privilege promotion page always denys read and write */
546	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
547	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
548
549	/* Enforce uncacheable pages.
550	 * This should ONLY be use for MMIO on PA 2.0 machines.
551	 * Memory/DMA is cache coherent on all PA2.0 machines we support
552	 * (that means T-class is NOT supported) and the memory controllers
553	 * on most of those machines only handles cache transactions.
554	 */
555	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
556	depdi		1,12,1,\prot
557
558	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
559	convert_for_tlb_insert20 \pte
560	.endm
561
562	/* Identical macro to make_insert_tlb above, except it
563	 * makes the tlb entry for the differently formatted pa11
564	 * insertion instructions */
565	.macro		make_insert_tlb_11	spc,pte,prot
566	zdep		\spc,30,15,\prot
567	dep		\pte,8,7,\prot
568	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
569	depi		1,12,1,\prot
570	extru,=         \pte,_PAGE_USER_BIT,1,%r0
571	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
572	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
573	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
574
575	/* Get rid of prot bits and convert to page addr for iitlba */
576
577	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
578	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
579	.endm
580
581	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
582	 * to extend into I/O space if the address is 0xfXXXXXXX
583	 * so we extend the f's into the top word of the pte in
584	 * this case */
585	.macro		f_extend	pte,tmp
586	extrd,s		\pte,42,4,\tmp
587	addi,<>		1,\tmp,%r0
588	extrd,s		\pte,63,25,\pte
589	.endm
590
591	/* The alias region is an 8MB aligned 16MB to do clear and
592	 * copy user pages at addresses congruent with the user
593	 * virtual address.
594	 *
595	 * To use the alias page, you set %r26 up with the to TLB
596	 * entry (identifying the physical page) and %r23 up with
597	 * the from tlb entry (or nothing if only a to entry---for
598	 * clear_user_page_asm) */
599	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
600	cmpib,COND(<>),n 0,\spc,\fault
601	ldil		L%(TMPALIAS_MAP_START),\tmp
602#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
603	/* on LP64, ldi will sign extend into the upper 32 bits,
604	 * which is behaviour we don't want */
605	depdi		0,31,32,\tmp
606#endif
607	copy		\va,\tmp1
608	DEPI		0,31,23,\tmp1
609	cmpb,COND(<>),n	\tmp,\tmp1,\fault
610	ldi		(_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
611	depd,z		\prot,8,7,\prot
612	/*
613	 * OK, it is in the temp alias region, check whether "from" or "to".
614	 * Check "subtle" note in pacache.S re: r23/r26.
615	 */
616#ifdef CONFIG_64BIT
617	extrd,u,*=	\va,41,1,%r0
618#else
619	extrw,u,=	\va,9,1,%r0
620#endif
621	or,COND(tr)	%r23,%r0,\pte
622	or		%r26,%r0,\pte
623	.endm
624
625
626	/*
627	 * Align fault_vector_20 on 4K boundary so that both
628	 * fault_vector_11 and fault_vector_20 are on the
629	 * same page. This is only necessary as long as we
630	 * write protect the kernel text, which we may stop
631	 * doing once we use large page translations to cover
632	 * the static part of the kernel address space.
633	 */
634
635	.text
636
637	.align	PAGE_SIZE
638
639ENTRY(fault_vector_20)
640	/* First vector is invalid (0) */
641	.ascii	"cows can fly"
642	.byte 0
643	.align 32
644
645	hpmc		 1
646	def		 2
647	def		 3
648	extint		 4
649	def		 5
650	itlb_20		 6
651	def		 7
652	def		 8
653	def              9
654	def		10
655	def		11
656	def		12
657	def		13
658	def		14
659	dtlb_20		15
660#if 0
661	naitlb_20	16
662#else
663	def             16
664#endif
665	nadtlb_20	17
666	def		18
667	def		19
668	dbit_20		20
669	def		21
670	def		22
671	def		23
672	def		24
673	def		25
674	def		26
675	def		27
676	def		28
677	def		29
678	def		30
679	def		31
680END(fault_vector_20)
681
682#ifndef CONFIG_64BIT
683
684	.align 2048
685
686ENTRY(fault_vector_11)
687	/* First vector is invalid (0) */
688	.ascii	"cows can fly"
689	.byte 0
690	.align 32
691
692	hpmc		 1
693	def		 2
694	def		 3
695	extint		 4
696	def		 5
697	itlb_11		 6
698	def		 7
699	def		 8
700	def              9
701	def		10
702	def		11
703	def		12
704	def		13
705	def		14
706	dtlb_11		15
707#if 0
708	naitlb_11	16
709#else
710	def             16
711#endif
712	nadtlb_11	17
713	def		18
714	def		19
715	dbit_11		20
716	def		21
717	def		22
718	def		23
719	def		24
720	def		25
721	def		26
722	def		27
723	def		28
724	def		29
725	def		30
726	def		31
727END(fault_vector_11)
728
729#endif
730
731	.import		handle_interruption,code
732	.import		do_cpu_irq_mask,code
733
734	/*
735	 * r26 = function to be called
736	 * r25 = argument to pass in
737	 * r24 = flags for do_fork()
738	 *
739	 * Kernel threads don't ever return, so they don't need
740	 * a true register context. We just save away the arguments
741	 * for copy_thread/ret_ to properly set up the child.
742	 */
743
744#define CLONE_VM 0x100	/* Must agree with <linux/sched.h> */
745#define CLONE_UNTRACED 0x00800000
746
747	.import do_fork
748ENTRY(__kernel_thread)
749	STREG	%r2, -RP_OFFSET(%r30)
750
751	copy	%r30, %r1
752	ldo	PT_SZ_ALGN(%r30),%r30
753#ifdef CONFIG_64BIT
754	/* Yo, function pointers in wide mode are little structs... -PB */
755	ldd	24(%r26), %r2
756	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */
757	ldd	16(%r26), %r26
758
759	STREG	%r22, PT_GR22(%r1)	/* save r22 (arg5) */
760	copy	%r0, %r22		/* user_tid */
761#endif
762	STREG	%r26, PT_GR26(%r1)  /* Store function & argument for child */
763	STREG	%r25, PT_GR25(%r1)
764	ldil	L%CLONE_UNTRACED, %r26
765	ldo	CLONE_VM(%r26), %r26   /* Force CLONE_VM since only init_mm */
766	or	%r26, %r24, %r26      /* will have kernel mappings.	 */
767	ldi	1, %r25			/* stack_start, signals kernel thread */
768	stw	%r0, -52(%r30)	     	/* user_tid */
769#ifdef CONFIG_64BIT
770	ldo	-16(%r30),%r29		/* Reference param save area */
771#endif
772	BL	do_fork, %r2
773	copy	%r1, %r24		/* pt_regs */
774
775	/* Parent Returns here */
776
777	LDREG	-PT_SZ_ALGN-RP_OFFSET(%r30), %r2
778	ldo	-PT_SZ_ALGN(%r30), %r30
779	bv	%r0(%r2)
780	nop
781ENDPROC(__kernel_thread)
782
783	/*
784	 * Child Returns here
785	 *
786	 * copy_thread moved args from temp save area set up above
787	 * into task save area.
788	 */
789
790ENTRY(ret_from_kernel_thread)
791
792	/* Call schedule_tail first though */
793	BL	schedule_tail, %r2
794	nop
795
796	LDREG	TI_TASK-THREAD_SZ_ALGN(%r30), %r1
797	LDREG	TASK_PT_GR25(%r1), %r26
798#ifdef CONFIG_64BIT
799	LDREG	TASK_PT_GR27(%r1), %r27
800	LDREG	TASK_PT_GR22(%r1), %r22
801#endif
802	LDREG	TASK_PT_GR26(%r1), %r1
803	ble	0(%sr7, %r1)
804	copy	%r31, %r2
805
806#ifdef CONFIG_64BIT
807	ldo	-16(%r30),%r29		/* Reference param save area */
808	loadgp				/* Thread could have been in a module */
809#endif
810#ifndef CONFIG_64BIT
811	b	sys_exit
812#else
813	load32	sys_exit, %r1
814	bv	%r0(%r1)
815#endif
816	ldi	0, %r26
817ENDPROC(ret_from_kernel_thread)
818
819	.import	sys_execve, code
820ENTRY(__execve)
821	copy	%r2, %r15
822	copy	%r30, %r16
823	ldo	PT_SZ_ALGN(%r30), %r30
824	STREG	%r26, PT_GR26(%r16)
825	STREG	%r25, PT_GR25(%r16)
826	STREG	%r24, PT_GR24(%r16)
827#ifdef CONFIG_64BIT
828	ldo	-16(%r30),%r29		/* Reference param save area */
829#endif
830	BL	sys_execve, %r2
831	copy	%r16, %r26
832
833	cmpib,=,n 0,%r28,intr_return    /* forward */
834
835	/* yes, this will trap and die. */
836	copy	%r15, %r2
837	copy	%r16, %r30
838	bv	%r0(%r2)
839	nop
840ENDPROC(__execve)
841
842
843	/*
844	 * struct task_struct *_switch_to(struct task_struct *prev,
845	 *	struct task_struct *next)
846	 *
847	 * switch kernel stacks and return prev */
848ENTRY(_switch_to)
849	STREG	 %r2, -RP_OFFSET(%r30)
850
851	callee_save_float
852	callee_save
853
854	load32	_switch_to_ret, %r2
855
856	STREG	%r2, TASK_PT_KPC(%r26)
857	LDREG	TASK_PT_KPC(%r25), %r2
858
859	STREG	%r30, TASK_PT_KSP(%r26)
860	LDREG	TASK_PT_KSP(%r25), %r30
861	LDREG	TASK_THREAD_INFO(%r25), %r25
862	bv	%r0(%r2)
863	mtctl   %r25,%cr30
864
865_switch_to_ret:
866	mtctl	%r0, %cr0		/* Needed for single stepping */
867	callee_rest
868	callee_rest_float
869
870	LDREG	-RP_OFFSET(%r30), %r2
871	bv	%r0(%r2)
872	copy	%r26, %r28
873ENDPROC(_switch_to)
874
875	/*
876	 * Common rfi return path for interruptions, kernel execve, and
877	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
878	 * return via this path if the signal was received when the process
879	 * was running; if the process was blocked on a syscall then the
880	 * normal syscall_exit path is used.  All syscalls for traced
881	 * proceses exit via intr_restore.
882	 *
883	 * XXX If any syscalls that change a processes space id ever exit
884	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
885	 * adjust IASQ[0..1].
886	 *
887	 */
888
889	.align	PAGE_SIZE
890
891ENTRY(syscall_exit_rfi)
892	mfctl   %cr30,%r16
893	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
894	ldo	TASK_REGS(%r16),%r16
895	/* Force iaoq to userspace, as the user has had access to our current
896	 * context via sigcontext. Also Filter the PSW for the same reason.
897	 */
898	LDREG	PT_IAOQ0(%r16),%r19
899	depi	3,31,2,%r19
900	STREG	%r19,PT_IAOQ0(%r16)
901	LDREG	PT_IAOQ1(%r16),%r19
902	depi	3,31,2,%r19
903	STREG	%r19,PT_IAOQ1(%r16)
904	LDREG   PT_PSW(%r16),%r19
905	load32	USER_PSW_MASK,%r1
906#ifdef CONFIG_64BIT
907	load32	USER_PSW_HI_MASK,%r20
908	depd    %r20,31,32,%r1
909#endif
910	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
911	load32	USER_PSW,%r1
912	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
913	STREG   %r19,PT_PSW(%r16)
914
915	/*
916	 * If we aren't being traced, we never saved space registers
917	 * (we don't store them in the sigcontext), so set them
918	 * to "proper" values now (otherwise we'll wind up restoring
919	 * whatever was last stored in the task structure, which might
920	 * be inconsistent if an interrupt occured while on the gateway
921	 * page). Note that we may be "trashing" values the user put in
922	 * them, but we don't support the user changing them.
923	 */
924
925	STREG   %r0,PT_SR2(%r16)
926	mfsp    %sr3,%r19
927	STREG   %r19,PT_SR0(%r16)
928	STREG   %r19,PT_SR1(%r16)
929	STREG   %r19,PT_SR3(%r16)
930	STREG   %r19,PT_SR4(%r16)
931	STREG   %r19,PT_SR5(%r16)
932	STREG   %r19,PT_SR6(%r16)
933	STREG   %r19,PT_SR7(%r16)
934
935intr_return:
936	/* NOTE: Need to enable interrupts incase we schedule. */
937	ssm     PSW_SM_I, %r0
938
939intr_check_resched:
940
941	/* check for reschedule */
942	mfctl   %cr30,%r1
943	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
944	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
945
946	.import do_notify_resume,code
947intr_check_sig:
948	/* As above */
949	mfctl   %cr30,%r1
950	LDREG	TI_FLAGS(%r1),%r19
951	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20
952	and,COND(<>)	%r19, %r20, %r0
953	b,n	intr_restore	/* skip past if we've nothing to do */
954
955	/* This check is critical to having LWS
956	 * working. The IASQ is zero on the gateway
957	 * page and we cannot deliver any signals until
958	 * we get off the gateway page.
959	 *
960	 * Only do signals if we are returning to user space
961	 */
962	LDREG	PT_IASQ0(%r16), %r20
963	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
964	LDREG	PT_IASQ1(%r16), %r20
965	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
966
967	copy	%r0, %r25			/* long in_syscall = 0 */
968#ifdef CONFIG_64BIT
969	ldo	-16(%r30),%r29			/* Reference param save area */
970#endif
971
972	BL	do_notify_resume,%r2
973	copy	%r16, %r26			/* struct pt_regs *regs */
974
975	b,n	intr_check_sig
976
977intr_restore:
978	copy            %r16,%r29
979	ldo             PT_FR31(%r29),%r1
980	rest_fp         %r1
981	rest_general    %r29
982
983	/* inverse of virt_map */
984	pcxt_ssm_bug
985	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
986	tophys_r1       %r29
987
988	/* Restore space id's and special cr's from PT_REGS
989	 * structure pointed to by r29
990	 */
991	rest_specials	%r29
992
993	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
994	 * It also restores r1 and r30.
995	 */
996	rest_stack
997
998	rfi
999	nop
1000	nop
1001	nop
1002	nop
1003	nop
1004	nop
1005	nop
1006	nop
1007
1008#ifndef CONFIG_PREEMPT
1009# define intr_do_preempt	intr_restore
1010#endif /* !CONFIG_PREEMPT */
1011
1012	.import schedule,code
1013intr_do_resched:
1014	/* Only call schedule on return to userspace. If we're returning
1015	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
1016	 * we jump back to intr_restore.
1017	 */
1018	LDREG	PT_IASQ0(%r16), %r20
1019	cmpib,COND(=)	0, %r20, intr_do_preempt
1020	nop
1021	LDREG	PT_IASQ1(%r16), %r20
1022	cmpib,COND(=)	0, %r20, intr_do_preempt
1023	nop
1024
1025#ifdef CONFIG_64BIT
1026	ldo	-16(%r30),%r29		/* Reference param save area */
1027#endif
1028
1029	ldil	L%intr_check_sig, %r2
1030#ifndef CONFIG_64BIT
1031	b	schedule
1032#else
1033	load32	schedule, %r20
1034	bv	%r0(%r20)
1035#endif
1036	ldo	R%intr_check_sig(%r2), %r2
1037
1038	/* preempt the current task on returning to kernel
1039	 * mode from an interrupt, iff need_resched is set,
1040	 * and preempt_count is 0. otherwise, we continue on
1041	 * our merry way back to the current running task.
1042	 */
1043#ifdef CONFIG_PREEMPT
1044	.import preempt_schedule_irq,code
1045intr_do_preempt:
1046	rsm	PSW_SM_I, %r0		/* disable interrupts */
1047
1048	/* current_thread_info()->preempt_count */
1049	mfctl	%cr30, %r1
1050	LDREG	TI_PRE_COUNT(%r1), %r19
1051	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
1052	nop				/* prev insn branched backwards */
1053
1054	/* check if we interrupted a critical path */
1055	LDREG	PT_PSW(%r16), %r20
1056	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
1057	nop
1058
1059	BL	preempt_schedule_irq, %r2
1060	nop
1061
1062	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
1063#endif /* CONFIG_PREEMPT */
1064
1065	/*
1066	 * External interrupts.
1067	 */
1068
1069intr_extint:
1070	cmpib,COND(=),n 0,%r16,1f
1071
1072	get_stack_use_cr30
1073	b,n 2f
1074
10751:
1076	get_stack_use_r30
10772:
1078	save_specials	%r29
1079	virt_map
1080	save_general	%r29
1081
1082	ldo	PT_FR0(%r29), %r24
1083	save_fp	%r24
1084
1085	loadgp
1086
1087	copy	%r29, %r26	/* arg0 is pt_regs */
1088	copy	%r29, %r16	/* save pt_regs */
1089
1090	ldil	L%intr_return, %r2
1091
1092#ifdef CONFIG_64BIT
1093	ldo	-16(%r30),%r29	/* Reference param save area */
1094#endif
1095
1096	b	do_cpu_irq_mask
1097	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1098ENDPROC(syscall_exit_rfi)
1099
1100
1101	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1102
1103ENTRY(intr_save)		/* for os_hpmc */
1104	mfsp    %sr7,%r16
1105	cmpib,COND(=),n 0,%r16,1f
1106	get_stack_use_cr30
1107	b	2f
1108	copy    %r8,%r26
1109
11101:
1111	get_stack_use_r30
1112	copy    %r8,%r26
1113
11142:
1115	save_specials	%r29
1116
1117	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1118
1119	/*
1120	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1121	 *           traps.c.
1122	 *        2) Once we start executing code above 4 Gb, we need
1123	 *           to adjust iasq/iaoq here in the same way we
1124	 *           adjust isr/ior below.
1125	 */
1126
1127	cmpib,COND(=),n        6,%r26,skip_save_ior
1128
1129
1130	mfctl           %cr20, %r16 /* isr */
1131	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1132	mfctl           %cr21, %r17 /* ior */
1133
1134
1135#ifdef CONFIG_64BIT
1136	/*
1137	 * If the interrupted code was running with W bit off (32 bit),
1138	 * clear the b bits (bits 0 & 1) in the ior.
1139	 * save_specials left ipsw value in r8 for us to test.
1140	 */
1141	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1142	depdi           0,1,2,%r17
1143
1144	/*
1145	 * FIXME: This code has hardwired assumptions about the split
1146	 *        between space bits and offset bits. This will change
1147	 *        when we allow alternate page sizes.
1148	 */
1149
1150	/* adjust isr/ior. */
1151	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1152	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1153	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1154#endif
1155	STREG           %r16, PT_ISR(%r29)
1156	STREG           %r17, PT_IOR(%r29)
1157
1158
1159skip_save_ior:
1160	virt_map
1161	save_general	%r29
1162
1163	ldo		PT_FR0(%r29), %r25
1164	save_fp		%r25
1165
1166	loadgp
1167
1168	copy		%r29, %r25	/* arg1 is pt_regs */
1169#ifdef CONFIG_64BIT
1170	ldo		-16(%r30),%r29	/* Reference param save area */
1171#endif
1172
1173	ldil		L%intr_check_sig, %r2
1174	copy		%r25, %r16	/* save pt_regs */
1175
1176	b		handle_interruption
1177	ldo		R%intr_check_sig(%r2), %r2
1178ENDPROC(intr_save)
1179
1180
1181	/*
1182	 * Note for all tlb miss handlers:
1183	 *
1184	 * cr24 contains a pointer to the kernel address space
1185	 * page directory.
1186	 *
1187	 * cr25 contains a pointer to the current user address
1188	 * space page directory.
1189	 *
1190	 * sr3 will contain the space id of the user address space
1191	 * of the current running thread while that thread is
1192	 * running in the kernel.
1193	 */
1194
1195	/*
1196	 * register number allocations.  Note that these are all
1197	 * in the shadowed registers
1198	 */
1199
1200	t0 = r1		/* temporary register 0 */
1201	va = r8		/* virtual address for which the trap occured */
1202	t1 = r9		/* temporary register 1 */
1203	pte  = r16	/* pte/phys page # */
1204	prot = r17	/* prot bits */
1205	spc  = r24	/* space for which the trap occured */
1206	ptp = r25	/* page directory/page table pointer */
1207
1208#ifdef CONFIG_64BIT
1209
1210dtlb_miss_20w:
1211	space_adjust	spc,va,t0
1212	get_pgd		spc,ptp
1213	space_check	spc,t0,dtlb_fault
1214
1215	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1216
1217	update_ptep	ptp,pte,t0,t1
1218
1219	make_insert_tlb	spc,pte,prot
1220
1221	idtlbt          pte,prot
1222
1223	rfir
1224	nop
1225
1226dtlb_check_alias_20w:
1227	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1228
1229	idtlbt          pte,prot
1230
1231	rfir
1232	nop
1233
1234nadtlb_miss_20w:
1235	space_adjust	spc,va,t0
1236	get_pgd		spc,ptp
1237	space_check	spc,t0,nadtlb_fault
1238
1239	L3_ptep		ptp,pte,t0,va,nadtlb_check_flush_20w
1240
1241	update_ptep	ptp,pte,t0,t1
1242
1243	make_insert_tlb	spc,pte,prot
1244
1245	idtlbt          pte,prot
1246
1247	rfir
1248	nop
1249
1250nadtlb_check_flush_20w:
1251	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1252
1253	/* Insert a "flush only" translation */
1254
1255	depdi,z         7,7,3,prot
1256	depdi           1,10,1,prot
1257
1258	/* Drop prot bits from pte and convert to page addr for idtlbt */
1259	convert_for_tlb_insert20 pte
1260
1261	idtlbt          pte,prot
1262
1263	rfir
1264	nop
1265
1266#else
1267
1268dtlb_miss_11:
1269	get_pgd		spc,ptp
1270
1271	space_check	spc,t0,dtlb_fault
1272
1273	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1274
1275	update_ptep	ptp,pte,t0,t1
1276
1277	make_insert_tlb_11	spc,pte,prot
1278
1279	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1280	mtsp		spc,%sr1
1281
1282	idtlba		pte,(%sr1,va)
1283	idtlbp		prot,(%sr1,va)
1284
1285	mtsp		t0, %sr1	/* Restore sr1 */
1286
1287	rfir
1288	nop
1289
1290dtlb_check_alias_11:
1291
1292	/* Check to see if fault is in the temporary alias region */
1293
1294	cmpib,<>,n      0,spc,dtlb_fault /* forward */
1295	ldil            L%(TMPALIAS_MAP_START),t0
1296	copy            va,t1
1297	depwi           0,31,23,t1
1298	cmpb,<>,n       t0,t1,dtlb_fault /* forward */
1299	ldi             (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1300	depw,z          prot,8,7,prot
1301
1302	/*
1303	 * OK, it is in the temp alias region, check whether "from" or "to".
1304	 * Check "subtle" note in pacache.S re: r23/r26.
1305	 */
1306
1307	extrw,u,=       va,9,1,r0
1308	or,tr           %r23,%r0,pte    /* If "from" use "from" page */
1309	or              %r26,%r0,pte    /* else "to", use "to" page  */
1310
1311	idtlba          pte,(va)
1312	idtlbp          prot,(va)
1313
1314	rfir
1315	nop
1316
1317nadtlb_miss_11:
1318	get_pgd		spc,ptp
1319
1320	space_check	spc,t0,nadtlb_fault
1321
1322	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_11
1323
1324	update_ptep	ptp,pte,t0,t1
1325
1326	make_insert_tlb_11	spc,pte,prot
1327
1328
1329	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1330	mtsp		spc,%sr1
1331
1332	idtlba		pte,(%sr1,va)
1333	idtlbp		prot,(%sr1,va)
1334
1335	mtsp		t0, %sr1	/* Restore sr1 */
1336
1337	rfir
1338	nop
1339
1340nadtlb_check_flush_11:
1341	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1342
1343	/* Insert a "flush only" translation */
1344
1345	zdepi           7,7,3,prot
1346	depi            1,10,1,prot
1347
1348	/* Get rid of prot bits and convert to page addr for idtlba */
1349
1350	depi		0,31,ASM_PFN_PTE_SHIFT,pte
1351	SHRREG		pte,(ASM_PFN_PTE_SHIFT-(31-26)),pte
1352
1353	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1354	mtsp		spc,%sr1
1355
1356	idtlba		pte,(%sr1,va)
1357	idtlbp		prot,(%sr1,va)
1358
1359	mtsp		t0, %sr1	/* Restore sr1 */
1360
1361	rfir
1362	nop
1363
1364dtlb_miss_20:
1365	space_adjust	spc,va,t0
1366	get_pgd		spc,ptp
1367	space_check	spc,t0,dtlb_fault
1368
1369	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1370
1371	update_ptep	ptp,pte,t0,t1
1372
1373	make_insert_tlb	spc,pte,prot
1374
1375	f_extend	pte,t0
1376
1377	idtlbt          pte,prot
1378
1379	rfir
1380	nop
1381
1382dtlb_check_alias_20:
1383	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1384
1385	idtlbt          pte,prot
1386
1387	rfir
1388	nop
1389
1390nadtlb_miss_20:
1391	get_pgd		spc,ptp
1392
1393	space_check	spc,t0,nadtlb_fault
1394
1395	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_20
1396
1397	update_ptep	ptp,pte,t0,t1
1398
1399	make_insert_tlb	spc,pte,prot
1400
1401	f_extend	pte,t0
1402
1403        idtlbt          pte,prot
1404
1405	rfir
1406	nop
1407
1408nadtlb_check_flush_20:
1409	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1410
1411	/* Insert a "flush only" translation */
1412
1413	depdi,z         7,7,3,prot
1414	depdi           1,10,1,prot
1415
1416	/* Drop prot bits from pte and convert to page addr for idtlbt */
1417	convert_for_tlb_insert20 pte
1418
1419	idtlbt          pte,prot
1420
1421	rfir
1422	nop
1423#endif
1424
1425nadtlb_emulate:
1426
1427	/*
1428	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1429	 * probei instructions. We don't want to fault for these
1430	 * instructions (not only does it not make sense, it can cause
1431	 * deadlocks, since some flushes are done with the mmap
1432	 * semaphore held). If the translation doesn't exist, we can't
1433	 * insert a translation, so have to emulate the side effects
1434	 * of the instruction. Since we don't insert a translation
1435	 * we can get a lot of faults during a flush loop, so it makes
1436	 * sense to try to do it here with minimum overhead. We only
1437	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1438	 * and index registers are not shadowed. We defer everything
1439	 * else to the "slow" path.
1440	 */
1441
1442	mfctl           %cr19,%r9 /* Get iir */
1443
1444	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1445	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1446
1447	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1448	ldi             0x280,%r16
1449	and             %r9,%r16,%r17
1450	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1451	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1452	BL		get_register,%r25
1453	extrw,u         %r9,15,5,%r8           /* Get index register # */
1454	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1455	copy            %r1,%r24
1456	BL		get_register,%r25
1457	extrw,u         %r9,10,5,%r8           /* Get base register # */
1458	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1459	BL		set_register,%r25
1460	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1461
1462nadtlb_nullify:
1463	mfctl           %ipsw,%r8
1464	ldil            L%PSW_N,%r9
1465	or              %r8,%r9,%r8            /* Set PSW_N */
1466	mtctl           %r8,%ipsw
1467
1468	rfir
1469	nop
1470
1471	/*
1472		When there is no translation for the probe address then we
1473		must nullify the insn and return zero in the target regsiter.
1474		This will indicate to the calling code that it does not have
1475		write/read privileges to this address.
1476
1477		This should technically work for prober and probew in PA 1.1,
1478		and also probe,r and probe,w in PA 2.0
1479
1480		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1481		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1482
1483	*/
1484nadtlb_probe_check:
1485	ldi             0x80,%r16
1486	and             %r9,%r16,%r17
1487	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1488	BL              get_register,%r25      /* Find the target register */
1489	extrw,u         %r9,31,5,%r8           /* Get target register */
1490	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1491	BL		set_register,%r25
1492	copy            %r0,%r1                /* Write zero to target register */
1493	b nadtlb_nullify                       /* Nullify return insn */
1494	nop
1495
1496
1497#ifdef CONFIG_64BIT
1498itlb_miss_20w:
1499
1500	/*
1501	 * I miss is a little different, since we allow users to fault
1502	 * on the gateway page which is in the kernel address space.
1503	 */
1504
1505	space_adjust	spc,va,t0
1506	get_pgd		spc,ptp
1507	space_check	spc,t0,itlb_fault
1508
1509	L3_ptep		ptp,pte,t0,va,itlb_fault
1510
1511	update_ptep	ptp,pte,t0,t1
1512
1513	make_insert_tlb	spc,pte,prot
1514
1515	iitlbt          pte,prot
1516
1517	rfir
1518	nop
1519
1520#else
1521
1522itlb_miss_11:
1523	get_pgd		spc,ptp
1524
1525	space_check	spc,t0,itlb_fault
1526
1527	L2_ptep		ptp,pte,t0,va,itlb_fault
1528
1529	update_ptep	ptp,pte,t0,t1
1530
1531	make_insert_tlb_11	spc,pte,prot
1532
1533	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1534	mtsp		spc,%sr1
1535
1536	iitlba		pte,(%sr1,va)
1537	iitlbp		prot,(%sr1,va)
1538
1539	mtsp		t0, %sr1	/* Restore sr1 */
1540
1541	rfir
1542	nop
1543
1544itlb_miss_20:
1545	get_pgd		spc,ptp
1546
1547	space_check	spc,t0,itlb_fault
1548
1549	L2_ptep		ptp,pte,t0,va,itlb_fault
1550
1551	update_ptep	ptp,pte,t0,t1
1552
1553	make_insert_tlb	spc,pte,prot
1554
1555	f_extend	pte,t0
1556
1557	iitlbt          pte,prot
1558
1559	rfir
1560	nop
1561
1562#endif
1563
1564#ifdef CONFIG_64BIT
1565
1566dbit_trap_20w:
1567	space_adjust	spc,va,t0
1568	get_pgd		spc,ptp
1569	space_check	spc,t0,dbit_fault
1570
1571	L3_ptep		ptp,pte,t0,va,dbit_fault
1572
1573#ifdef CONFIG_SMP
1574	cmpib,COND(=),n        0,spc,dbit_nolock_20w
1575	load32		PA(pa_dbit_lock),t0
1576
1577dbit_spin_20w:
1578	LDCW		0(t0),t1
1579	cmpib,COND(=)         0,t1,dbit_spin_20w
1580	nop
1581
1582dbit_nolock_20w:
1583#endif
1584	update_dirty	ptp,pte,t1
1585
1586	make_insert_tlb	spc,pte,prot
1587
1588	idtlbt          pte,prot
1589#ifdef CONFIG_SMP
1590	cmpib,COND(=),n        0,spc,dbit_nounlock_20w
1591	ldi             1,t1
1592	stw             t1,0(t0)
1593
1594dbit_nounlock_20w:
1595#endif
1596
1597	rfir
1598	nop
1599#else
1600
1601dbit_trap_11:
1602
1603	get_pgd		spc,ptp
1604
1605	space_check	spc,t0,dbit_fault
1606
1607	L2_ptep		ptp,pte,t0,va,dbit_fault
1608
1609#ifdef CONFIG_SMP
1610	cmpib,COND(=),n        0,spc,dbit_nolock_11
1611	load32		PA(pa_dbit_lock),t0
1612
1613dbit_spin_11:
1614	LDCW		0(t0),t1
1615	cmpib,=         0,t1,dbit_spin_11
1616	nop
1617
1618dbit_nolock_11:
1619#endif
1620	update_dirty	ptp,pte,t1
1621
1622	make_insert_tlb_11	spc,pte,prot
1623
1624	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1625	mtsp		spc,%sr1
1626
1627	idtlba		pte,(%sr1,va)
1628	idtlbp		prot,(%sr1,va)
1629
1630	mtsp            t1, %sr1     /* Restore sr1 */
1631#ifdef CONFIG_SMP
1632	cmpib,COND(=),n        0,spc,dbit_nounlock_11
1633	ldi             1,t1
1634	stw             t1,0(t0)
1635
1636dbit_nounlock_11:
1637#endif
1638
1639	rfir
1640	nop
1641
1642dbit_trap_20:
1643	get_pgd		spc,ptp
1644
1645	space_check	spc,t0,dbit_fault
1646
1647	L2_ptep		ptp,pte,t0,va,dbit_fault
1648
1649#ifdef CONFIG_SMP
1650	cmpib,COND(=),n        0,spc,dbit_nolock_20
1651	load32		PA(pa_dbit_lock),t0
1652
1653dbit_spin_20:
1654	LDCW		0(t0),t1
1655	cmpib,=         0,t1,dbit_spin_20
1656	nop
1657
1658dbit_nolock_20:
1659#endif
1660	update_dirty	ptp,pte,t1
1661
1662	make_insert_tlb	spc,pte,prot
1663
1664	f_extend	pte,t1
1665
1666        idtlbt          pte,prot
1667
1668#ifdef CONFIG_SMP
1669	cmpib,COND(=),n        0,spc,dbit_nounlock_20
1670	ldi             1,t1
1671	stw             t1,0(t0)
1672
1673dbit_nounlock_20:
1674#endif
1675
1676	rfir
1677	nop
1678#endif
1679
1680	.import handle_interruption,code
1681
1682kernel_bad_space:
1683	b               intr_save
1684	ldi             31,%r8  /* Use an unused code */
1685
1686dbit_fault:
1687	b               intr_save
1688	ldi             20,%r8
1689
1690itlb_fault:
1691	b               intr_save
1692	ldi             6,%r8
1693
1694nadtlb_fault:
1695	b               intr_save
1696	ldi             17,%r8
1697
1698dtlb_fault:
1699	b               intr_save
1700	ldi             15,%r8
1701
1702	/* Register saving semantics for system calls:
1703
1704	   %r1		   clobbered by system call macro in userspace
1705	   %r2		   saved in PT_REGS by gateway page
1706	   %r3  - %r18	   preserved by C code (saved by signal code)
1707	   %r19 - %r20	   saved in PT_REGS by gateway page
1708	   %r21 - %r22	   non-standard syscall args
1709			   stored in kernel stack by gateway page
1710	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1711	   %r27 - %r30	   saved in PT_REGS by gateway page
1712	   %r31		   syscall return pointer
1713	 */
1714
1715	/* Floating point registers (FIXME: what do we do with these?)
1716
1717	   %fr0  - %fr3	   status/exception, not preserved
1718	   %fr4  - %fr7	   arguments
1719	   %fr8	 - %fr11   not preserved by C code
1720	   %fr12 - %fr21   preserved by C code
1721	   %fr22 - %fr31   not preserved by C code
1722	 */
1723
1724	.macro	reg_save regs
1725	STREG	%r3, PT_GR3(\regs)
1726	STREG	%r4, PT_GR4(\regs)
1727	STREG	%r5, PT_GR5(\regs)
1728	STREG	%r6, PT_GR6(\regs)
1729	STREG	%r7, PT_GR7(\regs)
1730	STREG	%r8, PT_GR8(\regs)
1731	STREG	%r9, PT_GR9(\regs)
1732	STREG   %r10,PT_GR10(\regs)
1733	STREG   %r11,PT_GR11(\regs)
1734	STREG   %r12,PT_GR12(\regs)
1735	STREG   %r13,PT_GR13(\regs)
1736	STREG   %r14,PT_GR14(\regs)
1737	STREG   %r15,PT_GR15(\regs)
1738	STREG   %r16,PT_GR16(\regs)
1739	STREG   %r17,PT_GR17(\regs)
1740	STREG   %r18,PT_GR18(\regs)
1741	.endm
1742
1743	.macro	reg_restore regs
1744	LDREG	PT_GR3(\regs), %r3
1745	LDREG	PT_GR4(\regs), %r4
1746	LDREG	PT_GR5(\regs), %r5
1747	LDREG	PT_GR6(\regs), %r6
1748	LDREG	PT_GR7(\regs), %r7
1749	LDREG	PT_GR8(\regs), %r8
1750	LDREG	PT_GR9(\regs), %r9
1751	LDREG   PT_GR10(\regs),%r10
1752	LDREG   PT_GR11(\regs),%r11
1753	LDREG   PT_GR12(\regs),%r12
1754	LDREG   PT_GR13(\regs),%r13
1755	LDREG   PT_GR14(\regs),%r14
1756	LDREG   PT_GR15(\regs),%r15
1757	LDREG   PT_GR16(\regs),%r16
1758	LDREG   PT_GR17(\regs),%r17
1759	LDREG   PT_GR18(\regs),%r18
1760	.endm
1761
1762ENTRY(sys_fork_wrapper)
1763	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1764	ldo	TASK_REGS(%r1),%r1
1765	reg_save %r1
1766	mfctl	%cr27, %r3
1767	STREG	%r3, PT_CR27(%r1)
1768
1769	STREG	%r2,-RP_OFFSET(%r30)
1770	ldo	FRAME_SIZE(%r30),%r30
1771#ifdef CONFIG_64BIT
1772	ldo	-16(%r30),%r29		/* Reference param save area */
1773#endif
1774
1775	/* These are call-clobbered registers and therefore
1776	   also syscall-clobbered (we hope). */
1777	STREG	%r2,PT_GR19(%r1)	/* save for child */
1778	STREG	%r30,PT_GR21(%r1)
1779
1780	LDREG	PT_GR30(%r1),%r25
1781	copy	%r1,%r24
1782	BL	sys_clone,%r2
1783	ldi	SIGCHLD,%r26
1784
1785	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1786wrapper_exit:
1787	ldo	-FRAME_SIZE(%r30),%r30		/* get the stackframe */
1788	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1789	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1790
1791	LDREG	PT_CR27(%r1), %r3
1792	mtctl	%r3, %cr27
1793	reg_restore %r1
1794
1795	/* strace expects syscall # to be preserved in r20 */
1796	ldi	__NR_fork,%r20
1797	bv %r0(%r2)
1798	STREG	%r20,PT_GR20(%r1)
1799ENDPROC(sys_fork_wrapper)
1800
1801	/* Set the return value for the child */
1802ENTRY(child_return)
1803	BL	schedule_tail, %r2
1804	nop
1805
1806	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1807	LDREG	TASK_PT_GR19(%r1),%r2
1808	b	wrapper_exit
1809	copy	%r0,%r28
1810ENDPROC(child_return)
1811
1812
1813ENTRY(sys_clone_wrapper)
1814	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1815	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1816	reg_save %r1
1817	mfctl	%cr27, %r3
1818	STREG	%r3, PT_CR27(%r1)
1819
1820	STREG	%r2,-RP_OFFSET(%r30)
1821	ldo	FRAME_SIZE(%r30),%r30
1822#ifdef CONFIG_64BIT
1823	ldo	-16(%r30),%r29		/* Reference param save area */
1824#endif
1825
1826	/* WARNING - Clobbers r19 and r21, userspace must save these! */
1827	STREG	%r2,PT_GR19(%r1)	/* save for child */
1828	STREG	%r30,PT_GR21(%r1)
1829	BL	sys_clone,%r2
1830	copy	%r1,%r24
1831
1832	b	wrapper_exit
1833	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1834ENDPROC(sys_clone_wrapper)
1835
1836
1837ENTRY(sys_vfork_wrapper)
1838	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1839	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1840	reg_save %r1
1841	mfctl	%cr27, %r3
1842	STREG	%r3, PT_CR27(%r1)
1843
1844	STREG	%r2,-RP_OFFSET(%r30)
1845	ldo	FRAME_SIZE(%r30),%r30
1846#ifdef CONFIG_64BIT
1847	ldo	-16(%r30),%r29		/* Reference param save area */
1848#endif
1849
1850	STREG	%r2,PT_GR19(%r1)	/* save for child */
1851	STREG	%r30,PT_GR21(%r1)
1852
1853	BL	sys_vfork,%r2
1854	copy	%r1,%r26
1855
1856	b	wrapper_exit
1857	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1858ENDPROC(sys_vfork_wrapper)
1859
1860
1861	.macro  execve_wrapper execve
1862	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1863	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1864
1865	/*
1866	 * Do we need to save/restore r3-r18 here?
1867	 * I don't think so. why would new thread need old
1868	 * threads registers?
1869	 */
1870
1871	/* %arg0 - %arg3 are already saved for us. */
1872
1873	STREG %r2,-RP_OFFSET(%r30)
1874	ldo FRAME_SIZE(%r30),%r30
1875#ifdef CONFIG_64BIT
1876	ldo	-16(%r30),%r29		/* Reference param save area */
1877#endif
1878	BL \execve,%r2
1879	copy %r1,%arg0
1880
1881	ldo -FRAME_SIZE(%r30),%r30
1882	LDREG -RP_OFFSET(%r30),%r2
1883
1884	/* If exec succeeded we need to load the args */
1885
1886	ldo -1024(%r0),%r1
1887	cmpb,>>= %r28,%r1,error_\execve
1888	copy %r2,%r19
1889
1890error_\execve:
1891	bv %r0(%r19)
1892	nop
1893	.endm
1894
1895	.import sys_execve
1896ENTRY(sys_execve_wrapper)
1897	execve_wrapper sys_execve
1898ENDPROC(sys_execve_wrapper)
1899
1900#ifdef CONFIG_64BIT
1901	.import sys32_execve
1902ENTRY(sys32_execve_wrapper)
1903	execve_wrapper sys32_execve
1904ENDPROC(sys32_execve_wrapper)
1905#endif
1906
1907ENTRY(sys_rt_sigreturn_wrapper)
1908	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1909	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1910	/* Don't save regs, we are going to restore them from sigcontext. */
1911	STREG	%r2, -RP_OFFSET(%r30)
1912#ifdef CONFIG_64BIT
1913	ldo	FRAME_SIZE(%r30), %r30
1914	BL	sys_rt_sigreturn,%r2
1915	ldo	-16(%r30),%r29		/* Reference param save area */
1916#else
1917	BL	sys_rt_sigreturn,%r2
1918	ldo	FRAME_SIZE(%r30), %r30
1919#endif
1920
1921	ldo	-FRAME_SIZE(%r30), %r30
1922	LDREG	-RP_OFFSET(%r30), %r2
1923
1924	/* FIXME: I think we need to restore a few more things here. */
1925	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1926	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1927	reg_restore %r1
1928
1929	/* If the signal was received while the process was blocked on a
1930	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1931	 * take us to syscall_exit_rfi and on to intr_return.
1932	 */
1933	bv	%r0(%r2)
1934	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1935ENDPROC(sys_rt_sigreturn_wrapper)
1936
1937ENTRY(sys_sigaltstack_wrapper)
1938	/* Get the user stack pointer */
1939	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1940	ldo	TASK_REGS(%r1),%r24	/* get pt regs */
1941	LDREG	TASK_PT_GR30(%r24),%r24
1942	STREG	%r2, -RP_OFFSET(%r30)
1943#ifdef CONFIG_64BIT
1944	ldo	FRAME_SIZE(%r30), %r30
1945	BL	do_sigaltstack,%r2
1946	ldo	-16(%r30),%r29		/* Reference param save area */
1947#else
1948	BL	do_sigaltstack,%r2
1949	ldo	FRAME_SIZE(%r30), %r30
1950#endif
1951
1952	ldo	-FRAME_SIZE(%r30), %r30
1953	LDREG	-RP_OFFSET(%r30), %r2
1954	bv	%r0(%r2)
1955	nop
1956ENDPROC(sys_sigaltstack_wrapper)
1957
1958#ifdef CONFIG_64BIT
1959ENTRY(sys32_sigaltstack_wrapper)
1960	/* Get the user stack pointer */
1961	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1962	LDREG	TASK_PT_GR30(%r24),%r24
1963	STREG	%r2, -RP_OFFSET(%r30)
1964	ldo	FRAME_SIZE(%r30), %r30
1965	BL	do_sigaltstack32,%r2
1966	ldo	-16(%r30),%r29		/* Reference param save area */
1967
1968	ldo	-FRAME_SIZE(%r30), %r30
1969	LDREG	-RP_OFFSET(%r30), %r2
1970	bv	%r0(%r2)
1971	nop
1972ENDPROC(sys32_sigaltstack_wrapper)
1973#endif
1974
1975ENTRY(syscall_exit)
1976	/* NOTE: HP-UX syscalls also come through here
1977	 * after hpux_syscall_exit fixes up return
1978	 * values. */
1979
1980	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1981	 * via syscall_exit_rfi if the signal was received while the process
1982	 * was running.
1983	 */
1984
1985	/* save return value now */
1986
1987	mfctl     %cr30, %r1
1988	LDREG     TI_TASK(%r1),%r1
1989	STREG     %r28,TASK_PT_GR28(%r1)
1990
1991#ifdef CONFIG_HPUX
1992/* <linux/personality.h> cannot be easily included */
1993#define PER_HPUX 0x10
1994	ldw	TASK_PERSONALITY(%r1),%r19
1995
1996	/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1997	ldo	  -PER_HPUX(%r19), %r19
1998	cmpib,COND(<>),n 0,%r19,1f
1999
2000	/* Save other hpux returns if personality is PER_HPUX */
2001	STREG     %r22,TASK_PT_GR22(%r1)
2002	STREG     %r29,TASK_PT_GR29(%r1)
20031:
2004
2005#endif /* CONFIG_HPUX */
2006
2007	/* Seems to me that dp could be wrong here, if the syscall involved
2008	 * calling a module, and nothing got round to restoring dp on return.
2009	 */
2010	loadgp
2011
2012syscall_check_resched:
2013
2014	/* check for reschedule */
2015
2016	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
2017	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2018
2019	.import do_signal,code
2020syscall_check_sig:
2021	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
2022	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
2023	and,COND(<>)	%r19, %r26, %r0
2024	b,n	syscall_restore	/* skip past if we've nothing to do */
2025
2026syscall_do_signal:
2027	/* Save callee-save registers (for sigcontext).
2028	 * FIXME: After this point the process structure should be
2029	 * consistent with all the relevant state of the process
2030	 * before the syscall.  We need to verify this.
2031	 */
2032	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2033	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
2034	reg_save %r26
2035
2036#ifdef CONFIG_64BIT
2037	ldo	-16(%r30),%r29			/* Reference param save area */
2038#endif
2039
2040	BL	do_notify_resume,%r2
2041	ldi	1, %r25				/* long in_syscall = 1 */
2042
2043	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2044	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
2045	reg_restore %r20
2046
2047	b,n     syscall_check_sig
2048
2049syscall_restore:
2050	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2051
2052	/* Are we being ptraced? */
2053	ldw	TASK_FLAGS(%r1),%r19
2054	ldi	(_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
2055	and,COND(=)	%r19,%r2,%r0
2056	b,n	syscall_restore_rfi
2057
2058	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
2059	rest_fp	%r19
2060
2061	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
2062	mtsar	%r19
2063
2064	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
2065	LDREG	TASK_PT_GR19(%r1),%r19
2066	LDREG   TASK_PT_GR20(%r1),%r20
2067	LDREG	TASK_PT_GR21(%r1),%r21
2068	LDREG	TASK_PT_GR22(%r1),%r22
2069	LDREG	TASK_PT_GR23(%r1),%r23
2070	LDREG	TASK_PT_GR24(%r1),%r24
2071	LDREG	TASK_PT_GR25(%r1),%r25
2072	LDREG	TASK_PT_GR26(%r1),%r26
2073	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
2074	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
2075	LDREG	TASK_PT_GR29(%r1),%r29
2076	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
2077
2078	/* NOTE: We use rsm/ssm pair to make this operation atomic */
2079	rsm     PSW_SM_I, %r0
2080	LDREG   TASK_PT_GR30(%r1),%r30             /* restore user sp */
2081	mfsp	%sr3,%r1			   /* Get users space id */
2082	mtsp    %r1,%sr7                           /* Restore sr7 */
2083	ssm     PSW_SM_I, %r0
2084
2085	/* Set sr2 to zero for userspace syscalls to work. */
2086	mtsp	%r0,%sr2
2087	mtsp	%r1,%sr4			   /* Restore sr4 */
2088	mtsp	%r1,%sr5			   /* Restore sr5 */
2089	mtsp	%r1,%sr6			   /* Restore sr6 */
2090
2091	depi	3,31,2,%r31			   /* ensure return to user mode. */
2092
2093#ifdef CONFIG_64BIT
2094	/* decide whether to reset the wide mode bit
2095	 *
2096	 * For a syscall, the W bit is stored in the lowest bit
2097	 * of sp.  Extract it and reset W if it is zero */
2098	extrd,u,*<>	%r30,63,1,%r1
2099	rsm	PSW_SM_W, %r0
2100	/* now reset the lowest bit of sp if it was set */
2101	xor	%r30,%r1,%r30
2102#endif
2103	be,n    0(%sr3,%r31)                       /* return to user space */
2104
2105	/* We have to return via an RFI, so that PSW T and R bits can be set
2106	 * appropriately.
2107	 * This sets up pt_regs so we can return via intr_restore, which is not
2108	 * the most efficient way of doing things, but it works.
2109	 */
2110syscall_restore_rfi:
2111	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
2112	mtctl	%r2,%cr0			   /*   for immediate trap */
2113	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
2114	ldi	0x0b,%r20			   /* Create new PSW */
2115	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
2116
2117	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
2118	 * set in thread_info.h and converted to PA bitmap
2119	 * numbers in asm-offsets.c */
2120
2121	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
2122	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
2123	depi	-1,27,1,%r20			   /* R bit */
2124
2125	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
2126	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
2127	depi	-1,7,1,%r20			   /* T bit */
2128
2129	STREG	%r20,TASK_PT_PSW(%r1)
2130
2131	/* Always store space registers, since sr3 can be changed (e.g. fork) */
2132
2133	mfsp    %sr3,%r25
2134	STREG   %r25,TASK_PT_SR3(%r1)
2135	STREG   %r25,TASK_PT_SR4(%r1)
2136	STREG   %r25,TASK_PT_SR5(%r1)
2137	STREG   %r25,TASK_PT_SR6(%r1)
2138	STREG   %r25,TASK_PT_SR7(%r1)
2139	STREG   %r25,TASK_PT_IASQ0(%r1)
2140	STREG   %r25,TASK_PT_IASQ1(%r1)
2141
2142	/* XXX W bit??? */
2143	/* Now if old D bit is clear, it means we didn't save all registers
2144	 * on syscall entry, so do that now.  This only happens on TRACEME
2145	 * calls, or if someone attached to us while we were on a syscall.
2146	 * We could make this more efficient by not saving r3-r18, but
2147	 * then we wouldn't be able to use the common intr_restore path.
2148	 * It is only for traced processes anyway, so performance is not
2149	 * an issue.
2150	 */
2151	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
2152	ldo	TASK_REGS(%r1),%r25
2153	reg_save %r25				   /* Save r3 to r18 */
2154
2155	/* Save the current sr */
2156	mfsp	%sr0,%r2
2157	STREG	%r2,TASK_PT_SR0(%r1)
2158
2159	/* Save the scratch sr */
2160	mfsp	%sr1,%r2
2161	STREG	%r2,TASK_PT_SR1(%r1)
2162
2163	/* sr2 should be set to zero for userspace syscalls */
2164	STREG	%r0,TASK_PT_SR2(%r1)
2165
2166pt_regs_ok:
2167	LDREG	TASK_PT_GR31(%r1),%r2
2168	depi	3,31,2,%r2			   /* ensure return to user mode. */
2169	STREG	%r2,TASK_PT_IAOQ0(%r1)
2170	ldo	4(%r2),%r2
2171	STREG	%r2,TASK_PT_IAOQ1(%r1)
2172	copy	%r25,%r16
2173	b	intr_restore
2174	nop
2175
2176	.import schedule,code
2177syscall_do_resched:
2178	BL	schedule,%r2
2179#ifdef CONFIG_64BIT
2180	ldo	-16(%r30),%r29		/* Reference param save area */
2181#else
2182	nop
2183#endif
2184	b	syscall_check_resched	/* if resched, we start over again */
2185	nop
2186ENDPROC(syscall_exit)
2187
2188
2189#ifdef CONFIG_FUNCTION_TRACER
2190	.import ftrace_function_trampoline,code
2191ENTRY(_mcount)
2192	copy	%r3, %arg2
2193	b	ftrace_function_trampoline
2194	nop
2195ENDPROC(_mcount)
2196
2197ENTRY(return_to_handler)
2198	load32	return_trampoline, %rp
2199	copy	%ret0, %arg0
2200	copy	%ret1, %arg1
2201	b	ftrace_return_to_handler
2202	nop
2203return_trampoline:
2204	copy	%ret0, %rp
2205	copy	%r23, %ret0
2206	copy	%r24, %ret1
2207
2208.globl ftrace_stub
2209ftrace_stub:
2210	bv	%r0(%rp)
2211	nop
2212ENDPROC(return_to_handler)
2213#endif	/* CONFIG_FUNCTION_TRACER */
2214
2215
2216get_register:
2217	/*
2218	 * get_register is used by the non access tlb miss handlers to
2219	 * copy the value of the general register specified in r8 into
2220	 * r1. This routine can't be used for shadowed registers, since
2221	 * the rfir will restore the original value. So, for the shadowed
2222	 * registers we put a -1 into r1 to indicate that the register
2223	 * should not be used (the register being copied could also have
2224	 * a -1 in it, but that is OK, it just means that we will have
2225	 * to use the slow path instead).
2226	 */
2227	blr     %r8,%r0
2228	nop
2229	bv      %r0(%r25)    /* r0 */
2230	copy    %r0,%r1
2231	bv      %r0(%r25)    /* r1 - shadowed */
2232	ldi     -1,%r1
2233	bv      %r0(%r25)    /* r2 */
2234	copy    %r2,%r1
2235	bv      %r0(%r25)    /* r3 */
2236	copy    %r3,%r1
2237	bv      %r0(%r25)    /* r4 */
2238	copy    %r4,%r1
2239	bv      %r0(%r25)    /* r5 */
2240	copy    %r5,%r1
2241	bv      %r0(%r25)    /* r6 */
2242	copy    %r6,%r1
2243	bv      %r0(%r25)    /* r7 */
2244	copy    %r7,%r1
2245	bv      %r0(%r25)    /* r8 - shadowed */
2246	ldi     -1,%r1
2247	bv      %r0(%r25)    /* r9 - shadowed */
2248	ldi     -1,%r1
2249	bv      %r0(%r25)    /* r10 */
2250	copy    %r10,%r1
2251	bv      %r0(%r25)    /* r11 */
2252	copy    %r11,%r1
2253	bv      %r0(%r25)    /* r12 */
2254	copy    %r12,%r1
2255	bv      %r0(%r25)    /* r13 */
2256	copy    %r13,%r1
2257	bv      %r0(%r25)    /* r14 */
2258	copy    %r14,%r1
2259	bv      %r0(%r25)    /* r15 */
2260	copy    %r15,%r1
2261	bv      %r0(%r25)    /* r16 - shadowed */
2262	ldi     -1,%r1
2263	bv      %r0(%r25)    /* r17 - shadowed */
2264	ldi     -1,%r1
2265	bv      %r0(%r25)    /* r18 */
2266	copy    %r18,%r1
2267	bv      %r0(%r25)    /* r19 */
2268	copy    %r19,%r1
2269	bv      %r0(%r25)    /* r20 */
2270	copy    %r20,%r1
2271	bv      %r0(%r25)    /* r21 */
2272	copy    %r21,%r1
2273	bv      %r0(%r25)    /* r22 */
2274	copy    %r22,%r1
2275	bv      %r0(%r25)    /* r23 */
2276	copy    %r23,%r1
2277	bv      %r0(%r25)    /* r24 - shadowed */
2278	ldi     -1,%r1
2279	bv      %r0(%r25)    /* r25 - shadowed */
2280	ldi     -1,%r1
2281	bv      %r0(%r25)    /* r26 */
2282	copy    %r26,%r1
2283	bv      %r0(%r25)    /* r27 */
2284	copy    %r27,%r1
2285	bv      %r0(%r25)    /* r28 */
2286	copy    %r28,%r1
2287	bv      %r0(%r25)    /* r29 */
2288	copy    %r29,%r1
2289	bv      %r0(%r25)    /* r30 */
2290	copy    %r30,%r1
2291	bv      %r0(%r25)    /* r31 */
2292	copy    %r31,%r1
2293
2294
2295set_register:
2296	/*
2297	 * set_register is used by the non access tlb miss handlers to
2298	 * copy the value of r1 into the general register specified in
2299	 * r8.
2300	 */
2301	blr     %r8,%r0
2302	nop
2303	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2304	copy    %r1,%r0
2305	bv      %r0(%r25)    /* r1 */
2306	copy    %r1,%r1
2307	bv      %r0(%r25)    /* r2 */
2308	copy    %r1,%r2
2309	bv      %r0(%r25)    /* r3 */
2310	copy    %r1,%r3
2311	bv      %r0(%r25)    /* r4 */
2312	copy    %r1,%r4
2313	bv      %r0(%r25)    /* r5 */
2314	copy    %r1,%r5
2315	bv      %r0(%r25)    /* r6 */
2316	copy    %r1,%r6
2317	bv      %r0(%r25)    /* r7 */
2318	copy    %r1,%r7
2319	bv      %r0(%r25)    /* r8 */
2320	copy    %r1,%r8
2321	bv      %r0(%r25)    /* r9 */
2322	copy    %r1,%r9
2323	bv      %r0(%r25)    /* r10 */
2324	copy    %r1,%r10
2325	bv      %r0(%r25)    /* r11 */
2326	copy    %r1,%r11
2327	bv      %r0(%r25)    /* r12 */
2328	copy    %r1,%r12
2329	bv      %r0(%r25)    /* r13 */
2330	copy    %r1,%r13
2331	bv      %r0(%r25)    /* r14 */
2332	copy    %r1,%r14
2333	bv      %r0(%r25)    /* r15 */
2334	copy    %r1,%r15
2335	bv      %r0(%r25)    /* r16 */
2336	copy    %r1,%r16
2337	bv      %r0(%r25)    /* r17 */
2338	copy    %r1,%r17
2339	bv      %r0(%r25)    /* r18 */
2340	copy    %r1,%r18
2341	bv      %r0(%r25)    /* r19 */
2342	copy    %r1,%r19
2343	bv      %r0(%r25)    /* r20 */
2344	copy    %r1,%r20
2345	bv      %r0(%r25)    /* r21 */
2346	copy    %r1,%r21
2347	bv      %r0(%r25)    /* r22 */
2348	copy    %r1,%r22
2349	bv      %r0(%r25)    /* r23 */
2350	copy    %r1,%r23
2351	bv      %r0(%r25)    /* r24 */
2352	copy    %r1,%r24
2353	bv      %r0(%r25)    /* r25 */
2354	copy    %r1,%r25
2355	bv      %r0(%r25)    /* r26 */
2356	copy    %r1,%r26
2357	bv      %r0(%r25)    /* r27 */
2358	copy    %r1,%r27
2359	bv      %r0(%r25)    /* r28 */
2360	copy    %r1,%r28
2361	bv      %r0(%r25)    /* r29 */
2362	copy    %r1,%r29
2363	bv      %r0(%r25)    /* r30 */
2364	copy    %r1,%r30
2365	bv      %r0(%r25)    /* r31 */
2366	copy    %r1,%r31
2367
2368