xref: /openbmc/linux/arch/parisc/kernel/entry.S (revision 732a675a)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41#include <linux/init.h>
42
43#ifdef CONFIG_64BIT
44	.level 2.0w
45#else
46	.level 2.0
47#endif
48
49	.import         pa_dbit_lock,data
50
51	/* space_to_prot macro creates a prot id from a space id */
52
53#if (SPACEID_SHIFT) == 0
54	.macro  space_to_prot spc prot
55	depd,z  \spc,62,31,\prot
56	.endm
57#else
58	.macro  space_to_prot spc prot
59	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
60	.endm
61#endif
62
63	/* Switch to virtual mapping, trashing only %r1 */
64	.macro  virt_map
65	/* pcxt_ssm_bug */
66	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
67	mtsp	%r0, %sr4
68	mtsp	%r0, %sr5
69	mfsp	%sr7, %r1
70	or,=    %r0,%r1,%r0	/* Only save sr7 in sr3 if sr7 != 0 */
71	mtsp	%r1, %sr3
72	tovirt_r1 %r29
73	load32	KERNEL_PSW, %r1
74
75	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
76	mtsp	%r0, %sr6
77	mtsp	%r0, %sr7
78	mtctl	%r0, %cr17	/* Clear IIASQ tail */
79	mtctl	%r0, %cr17	/* Clear IIASQ head */
80	mtctl	%r1, %ipsw
81	load32	4f, %r1
82	mtctl	%r1, %cr18	/* Set IIAOQ tail */
83	ldo	4(%r1), %r1
84	mtctl	%r1, %cr18	/* Set IIAOQ head */
85	rfir
86	nop
874:
88	.endm
89
90	/*
91	 * The "get_stack" macros are responsible for determining the
92	 * kernel stack value.
93	 *
94	 *      If sr7 == 0
95	 *          Already using a kernel stack, so call the
96	 *          get_stack_use_r30 macro to push a pt_regs structure
97	 *          on the stack, and store registers there.
98	 *      else
99	 *          Need to set up a kernel stack, so call the
100	 *          get_stack_use_cr30 macro to set up a pointer
101	 *          to the pt_regs structure contained within the
102	 *          task pointer pointed to by cr30. Set the stack
103	 *          pointer to point to the end of the task structure.
104	 *
105	 * Note that we use shadowed registers for temps until
106	 * we can save %r26 and %r29. %r26 is used to preserve
107	 * %r8 (a shadowed register) which temporarily contained
108	 * either the fault type ("code") or the eirr. We need
109	 * to use a non-shadowed register to carry the value over
110	 * the rfir in virt_map. We use %r26 since this value winds
111	 * up being passed as the argument to either do_cpu_irq_mask
112	 * or handle_interruption. %r29 is used to hold a pointer
113	 * the register save area, and once again, it needs to
114	 * be a non-shadowed register so that it survives the rfir.
115	 *
116	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
117	 */
118
119	.macro  get_stack_use_cr30
120
121	/* we save the registers in the task struct */
122
123	mfctl   %cr30, %r1
124	tophys  %r1,%r9
125	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
126	tophys  %r1,%r9
127	ldo     TASK_REGS(%r9),%r9
128	STREG   %r30, PT_GR30(%r9)
129	STREG   %r29,PT_GR29(%r9)
130	STREG   %r26,PT_GR26(%r9)
131	copy    %r9,%r29
132	mfctl   %cr30, %r1
133	ldo	THREAD_SZ_ALGN(%r1), %r30
134	.endm
135
136	.macro  get_stack_use_r30
137
138	/* we put a struct pt_regs on the stack and save the registers there */
139
140	tophys  %r30,%r9
141	STREG   %r30,PT_GR30(%r9)
142	ldo	PT_SZ_ALGN(%r30),%r30
143	STREG   %r29,PT_GR29(%r9)
144	STREG   %r26,PT_GR26(%r9)
145	copy    %r9,%r29
146	.endm
147
148	.macro  rest_stack
149	LDREG   PT_GR1(%r29), %r1
150	LDREG   PT_GR30(%r29),%r30
151	LDREG   PT_GR29(%r29),%r29
152	.endm
153
154	/* default interruption handler
155	 * (calls traps.c:handle_interruption) */
156	.macro	def code
157	b	intr_save
158	ldi     \code, %r8
159	.align	32
160	.endm
161
162	/* Interrupt interruption handler
163	 * (calls irq.c:do_cpu_irq_mask) */
164	.macro	extint code
165	b	intr_extint
166	mfsp    %sr7,%r16
167	.align	32
168	.endm
169
170	.import	os_hpmc, code
171
172	/* HPMC handler */
173	.macro	hpmc code
174	nop			/* must be a NOP, will be patched later */
175	load32	PA(os_hpmc), %r3
176	bv,n	0(%r3)
177	nop
178	.word	0		/* checksum (will be patched) */
179	.word	PA(os_hpmc)	/* address of handler */
180	.word	0		/* length of handler */
181	.endm
182
183	/*
184	 * Performance Note: Instructions will be moved up into
185	 * this part of the code later on, once we are sure
186	 * that the tlb miss handlers are close to final form.
187	 */
188
189	/* Register definitions for tlb miss handler macros */
190
191	va  = r8	/* virtual address for which the trap occured */
192	spc = r24	/* space for which the trap occured */
193
194#ifndef CONFIG_64BIT
195
196	/*
197	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
198	 */
199
200	.macro	itlb_11 code
201
202	mfctl	%pcsq, spc
203	b	itlb_miss_11
204	mfctl	%pcoq, va
205
206	.align		32
207	.endm
208#endif
209
210	/*
211	 * itlb miss interruption handler (parisc 2.0)
212	 */
213
214	.macro	itlb_20 code
215	mfctl	%pcsq, spc
216#ifdef CONFIG_64BIT
217	b       itlb_miss_20w
218#else
219	b	itlb_miss_20
220#endif
221	mfctl	%pcoq, va
222
223	.align		32
224	.endm
225
226#ifndef CONFIG_64BIT
227	/*
228	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
229	 *
230	 * Note: naitlb misses will be treated
231	 * as an ordinary itlb miss for now.
232	 * However, note that naitlb misses
233	 * have the faulting address in the
234	 * IOR/ISR.
235	 */
236
237	.macro	naitlb_11 code
238
239	mfctl	%isr,spc
240	b	itlb_miss_11
241	mfctl 	%ior,va
242	/* FIXME: If user causes a naitlb miss, the priv level may not be in
243	 * lower bits of va, where the itlb miss handler is expecting them
244	 */
245
246	.align		32
247	.endm
248#endif
249
250	/*
251	 * naitlb miss interruption handler (parisc 2.0)
252	 *
253	 * Note: naitlb misses will be treated
254	 * as an ordinary itlb miss for now.
255	 * However, note that naitlb misses
256	 * have the faulting address in the
257	 * IOR/ISR.
258	 */
259
260	.macro	naitlb_20 code
261
262	mfctl	%isr,spc
263#ifdef CONFIG_64BIT
264	b       itlb_miss_20w
265#else
266	b	itlb_miss_20
267#endif
268	mfctl 	%ior,va
269	/* FIXME: If user causes a naitlb miss, the priv level may not be in
270	 * lower bits of va, where the itlb miss handler is expecting them
271	 */
272
273	.align		32
274	.endm
275
276#ifndef CONFIG_64BIT
277	/*
278	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
279	 */
280
281	.macro	dtlb_11 code
282
283	mfctl	%isr, spc
284	b	dtlb_miss_11
285	mfctl	%ior, va
286
287	.align		32
288	.endm
289#endif
290
291	/*
292	 * dtlb miss interruption handler (parisc 2.0)
293	 */
294
295	.macro	dtlb_20 code
296
297	mfctl	%isr, spc
298#ifdef CONFIG_64BIT
299	b       dtlb_miss_20w
300#else
301	b	dtlb_miss_20
302#endif
303	mfctl	%ior, va
304
305	.align		32
306	.endm
307
308#ifndef CONFIG_64BIT
309	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
310
311	.macro	nadtlb_11 code
312
313	mfctl	%isr,spc
314	b       nadtlb_miss_11
315	mfctl	%ior,va
316
317	.align		32
318	.endm
319#endif
320
321	/* nadtlb miss interruption handler (parisc 2.0) */
322
323	.macro	nadtlb_20 code
324
325	mfctl	%isr,spc
326#ifdef CONFIG_64BIT
327	b       nadtlb_miss_20w
328#else
329	b       nadtlb_miss_20
330#endif
331	mfctl	%ior,va
332
333	.align		32
334	.endm
335
336#ifndef CONFIG_64BIT
337	/*
338	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
339	 */
340
341	.macro	dbit_11 code
342
343	mfctl	%isr,spc
344	b	dbit_trap_11
345	mfctl	%ior,va
346
347	.align		32
348	.endm
349#endif
350
351	/*
352	 * dirty bit trap interruption handler (parisc 2.0)
353	 */
354
355	.macro	dbit_20 code
356
357	mfctl	%isr,spc
358#ifdef CONFIG_64BIT
359	b       dbit_trap_20w
360#else
361	b	dbit_trap_20
362#endif
363	mfctl	%ior,va
364
365	.align		32
366	.endm
367
368	/* The following are simple 32 vs 64 bit instruction
369	 * abstractions for the macros */
370	.macro		EXTR	reg1,start,length,reg2
371#ifdef CONFIG_64BIT
372	extrd,u		\reg1,32+\start,\length,\reg2
373#else
374	extrw,u		\reg1,\start,\length,\reg2
375#endif
376	.endm
377
378	.macro		DEP	reg1,start,length,reg2
379#ifdef CONFIG_64BIT
380	depd		\reg1,32+\start,\length,\reg2
381#else
382	depw		\reg1,\start,\length,\reg2
383#endif
384	.endm
385
386	.macro		DEPI	val,start,length,reg
387#ifdef CONFIG_64BIT
388	depdi		\val,32+\start,\length,\reg
389#else
390	depwi		\val,\start,\length,\reg
391#endif
392	.endm
393
394	/* In LP64, the space contains part of the upper 32 bits of the
395	 * fault.  We have to extract this and place it in the va,
396	 * zeroing the corresponding bits in the space register */
397	.macro		space_adjust	spc,va,tmp
398#ifdef CONFIG_64BIT
399	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
400	depd		%r0,63,SPACEID_SHIFT,\spc
401	depd		\tmp,31,SPACEID_SHIFT,\va
402#endif
403	.endm
404
405	.import		swapper_pg_dir,code
406
407	/* Get the pgd.  For faults on space zero (kernel space), this
408	 * is simply swapper_pg_dir.  For user space faults, the
409	 * pgd is stored in %cr25 */
410	.macro		get_pgd		spc,reg
411	ldil		L%PA(swapper_pg_dir),\reg
412	ldo		R%PA(swapper_pg_dir)(\reg),\reg
413	or,COND(=)	%r0,\spc,%r0
414	mfctl		%cr25,\reg
415	.endm
416
417	/*
418		space_check(spc,tmp,fault)
419
420		spc - The space we saw the fault with.
421		tmp - The place to store the current space.
422		fault - Function to call on failure.
423
424		Only allow faults on different spaces from the
425		currently active one if we're the kernel
426
427	*/
428	.macro		space_check	spc,tmp,fault
429	mfsp		%sr7,\tmp
430	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
431					 * as kernel, so defeat the space
432					 * check if it is */
433	copy		\spc,\tmp
434	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
435	cmpb,COND(<>),n	\tmp,\spc,\fault
436	.endm
437
438	/* Look up a PTE in a 2-Level scheme (faulting at each
439	 * level if the entry isn't present
440	 *
441	 * NOTE: we use ldw even for LP64, since the short pointers
442	 * can address up to 1TB
443	 */
444	.macro		L2_ptep	pmd,pte,index,va,fault
445#if PT_NLEVELS == 3
446	EXTR		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
447#else
448	EXTR		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
449#endif
450	DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
451	copy		%r0,\pte
452	ldw,s		\index(\pmd),\pmd
453	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
454	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
455	copy		\pmd,%r9
456	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
457	EXTR		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
458	DEP		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
459	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd
460	LDREG		%r0(\pmd),\pte		/* pmd is now pte */
461	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
462	.endm
463
464	/* Look up PTE in a 3-Level scheme.
465	 *
466	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
467	 * first pmd adjacent to the pgd.  This means that we can
468	 * subtract a constant offset to get to it.  The pmd and pgd
469	 * sizes are arranged so that a single pmd covers 4GB (giving
470	 * a full LP64 process access to 8TB) so our lookups are
471	 * effectively L2 for the first 4GB of the kernel (i.e. for
472	 * all ILP32 processes and all the kernel for machines with
473	 * under 4GB of memory) */
474	.macro		L3_ptep pgd,pte,index,va,fault
475#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
476	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
477	copy		%r0,\pte
478	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
479	ldw,s		\index(\pgd),\pgd
480	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
481	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
482	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
483	shld		\pgd,PxD_VALUE_SHIFT,\index
484	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
485	copy		\index,\pgd
486	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
487	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
488#endif
489	L2_ptep		\pgd,\pte,\index,\va,\fault
490	.endm
491
492	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
493	 * don't needlessly dirty the cache line if it was already set */
494	.macro		update_ptep	ptep,pte,tmp,tmp1
495	ldi		_PAGE_ACCESSED,\tmp1
496	or		\tmp1,\pte,\tmp
497	and,COND(<>)	\tmp1,\pte,%r0
498	STREG		\tmp,0(\ptep)
499	.endm
500
501	/* Set the dirty bit (and accessed bit).  No need to be
502	 * clever, this is only used from the dirty fault */
503	.macro		update_dirty	ptep,pte,tmp
504	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
505	or		\tmp,\pte,\pte
506	STREG		\pte,0(\ptep)
507	.endm
508
509	/* Convert the pte and prot to tlb insertion values.  How
510	 * this happens is quite subtle, read below */
511	.macro		make_insert_tlb	spc,pte,prot
512	space_to_prot   \spc \prot        /* create prot id from space */
513	/* The following is the real subtlety.  This is depositing
514	 * T <-> _PAGE_REFTRAP
515	 * D <-> _PAGE_DIRTY
516	 * B <-> _PAGE_DMB (memory break)
517	 *
518	 * Then incredible subtlety: The access rights are
519	 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
520	 * See 3-14 of the parisc 2.0 manual
521	 *
522	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
523	 * trigger an access rights trap in user space if the user
524	 * tries to read an unreadable page */
525	depd            \pte,8,7,\prot
526
527	/* PAGE_USER indicates the page can be read with user privileges,
528	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
529	 * contains _PAGE_READ */
530	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
531	depdi		7,11,3,\prot
532	/* If we're a gateway page, drop PL2 back to zero for promotion
533	 * to kernel privilege (so we can execute the page as kernel).
534	 * Any privilege promotion page always denys read and write */
535	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
536	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
537
538	/* Enforce uncacheable pages.
539	 * This should ONLY be use for MMIO on PA 2.0 machines.
540	 * Memory/DMA is cache coherent on all PA2.0 machines we support
541	 * (that means T-class is NOT supported) and the memory controllers
542	 * on most of those machines only handles cache transactions.
543	 */
544	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
545	depi		1,12,1,\prot
546
547	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
548	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
549	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
550	.endm
551
552	/* Identical macro to make_insert_tlb above, except it
553	 * makes the tlb entry for the differently formatted pa11
554	 * insertion instructions */
555	.macro		make_insert_tlb_11	spc,pte,prot
556	zdep		\spc,30,15,\prot
557	dep		\pte,8,7,\prot
558	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
559	depi		1,12,1,\prot
560	extru,=         \pte,_PAGE_USER_BIT,1,%r0
561	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
562	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
563	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
564
565	/* Get rid of prot bits and convert to page addr for iitlba */
566
567	depi		_PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
568	extru		\pte,24,25,\pte
569	.endm
570
571	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
572	 * to extend into I/O space if the address is 0xfXXXXXXX
573	 * so we extend the f's into the top word of the pte in
574	 * this case */
575	.macro		f_extend	pte,tmp
576	extrd,s		\pte,42,4,\tmp
577	addi,<>		1,\tmp,%r0
578	extrd,s		\pte,63,25,\pte
579	.endm
580
581	/* The alias region is an 8MB aligned 16MB to do clear and
582	 * copy user pages at addresses congruent with the user
583	 * virtual address.
584	 *
585	 * To use the alias page, you set %r26 up with the to TLB
586	 * entry (identifying the physical page) and %r23 up with
587	 * the from tlb entry (or nothing if only a to entry---for
588	 * clear_user_page_asm) */
589	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
590	cmpib,COND(<>),n 0,\spc,\fault
591	ldil		L%(TMPALIAS_MAP_START),\tmp
592#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
593	/* on LP64, ldi will sign extend into the upper 32 bits,
594	 * which is behaviour we don't want */
595	depdi		0,31,32,\tmp
596#endif
597	copy		\va,\tmp1
598	DEPI		0,31,23,\tmp1
599	cmpb,COND(<>),n	\tmp,\tmp1,\fault
600	ldi		(_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
601	depd,z		\prot,8,7,\prot
602	/*
603	 * OK, it is in the temp alias region, check whether "from" or "to".
604	 * Check "subtle" note in pacache.S re: r23/r26.
605	 */
606#ifdef CONFIG_64BIT
607	extrd,u,*=	\va,41,1,%r0
608#else
609	extrw,u,=	\va,9,1,%r0
610#endif
611	or,COND(tr)	%r23,%r0,\pte
612	or		%r26,%r0,\pte
613	.endm
614
615
616	/*
617	 * Align fault_vector_20 on 4K boundary so that both
618	 * fault_vector_11 and fault_vector_20 are on the
619	 * same page. This is only necessary as long as we
620	 * write protect the kernel text, which we may stop
621	 * doing once we use large page translations to cover
622	 * the static part of the kernel address space.
623	 */
624
625	__HEAD
626
627	.align	PAGE_SIZE
628
629ENTRY(fault_vector_20)
630	/* First vector is invalid (0) */
631	.ascii	"cows can fly"
632	.byte 0
633	.align 32
634
635	hpmc		 1
636	def		 2
637	def		 3
638	extint		 4
639	def		 5
640	itlb_20		 6
641	def		 7
642	def		 8
643	def              9
644	def		10
645	def		11
646	def		12
647	def		13
648	def		14
649	dtlb_20		15
650#if 0
651	naitlb_20	16
652#else
653	def             16
654#endif
655	nadtlb_20	17
656	def		18
657	def		19
658	dbit_20		20
659	def		21
660	def		22
661	def		23
662	def		24
663	def		25
664	def		26
665	def		27
666	def		28
667	def		29
668	def		30
669	def		31
670END(fault_vector_20)
671
672#ifndef CONFIG_64BIT
673
674	.align 2048
675
676ENTRY(fault_vector_11)
677	/* First vector is invalid (0) */
678	.ascii	"cows can fly"
679	.byte 0
680	.align 32
681
682	hpmc		 1
683	def		 2
684	def		 3
685	extint		 4
686	def		 5
687	itlb_11		 6
688	def		 7
689	def		 8
690	def              9
691	def		10
692	def		11
693	def		12
694	def		13
695	def		14
696	dtlb_11		15
697#if 0
698	naitlb_11	16
699#else
700	def             16
701#endif
702	nadtlb_11	17
703	def		18
704	def		19
705	dbit_11		20
706	def		21
707	def		22
708	def		23
709	def		24
710	def		25
711	def		26
712	def		27
713	def		28
714	def		29
715	def		30
716	def		31
717END(fault_vector_11)
718
719#endif
720
721	.import		handle_interruption,code
722	.import		do_cpu_irq_mask,code
723
724	/*
725	 * r26 = function to be called
726	 * r25 = argument to pass in
727	 * r24 = flags for do_fork()
728	 *
729	 * Kernel threads don't ever return, so they don't need
730	 * a true register context. We just save away the arguments
731	 * for copy_thread/ret_ to properly set up the child.
732	 */
733
734#define CLONE_VM 0x100	/* Must agree with <linux/sched.h> */
735#define CLONE_UNTRACED 0x00800000
736
737	.import do_fork
738ENTRY(__kernel_thread)
739	STREG	%r2, -RP_OFFSET(%r30)
740
741	copy	%r30, %r1
742	ldo	PT_SZ_ALGN(%r30),%r30
743#ifdef CONFIG_64BIT
744	/* Yo, function pointers in wide mode are little structs... -PB */
745	ldd	24(%r26), %r2
746	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */
747	ldd	16(%r26), %r26
748
749	STREG	%r22, PT_GR22(%r1)	/* save r22 (arg5) */
750	copy	%r0, %r22		/* user_tid */
751#endif
752	STREG	%r26, PT_GR26(%r1)  /* Store function & argument for child */
753	STREG	%r25, PT_GR25(%r1)
754	ldil	L%CLONE_UNTRACED, %r26
755	ldo	CLONE_VM(%r26), %r26   /* Force CLONE_VM since only init_mm */
756	or	%r26, %r24, %r26      /* will have kernel mappings.	 */
757	ldi	1, %r25			/* stack_start, signals kernel thread */
758	stw	%r0, -52(%r30)	     	/* user_tid */
759#ifdef CONFIG_64BIT
760	ldo	-16(%r30),%r29		/* Reference param save area */
761#endif
762	BL	do_fork, %r2
763	copy	%r1, %r24		/* pt_regs */
764
765	/* Parent Returns here */
766
767	LDREG	-PT_SZ_ALGN-RP_OFFSET(%r30), %r2
768	ldo	-PT_SZ_ALGN(%r30), %r30
769	bv	%r0(%r2)
770	nop
771ENDPROC(__kernel_thread)
772
773	/*
774	 * Child Returns here
775	 *
776	 * copy_thread moved args from temp save area set up above
777	 * into task save area.
778	 */
779
780ENTRY(ret_from_kernel_thread)
781
782	/* Call schedule_tail first though */
783	BL	schedule_tail, %r2
784	nop
785
786	LDREG	TI_TASK-THREAD_SZ_ALGN(%r30), %r1
787	LDREG	TASK_PT_GR25(%r1), %r26
788#ifdef CONFIG_64BIT
789	LDREG	TASK_PT_GR27(%r1), %r27
790	LDREG	TASK_PT_GR22(%r1), %r22
791#endif
792	LDREG	TASK_PT_GR26(%r1), %r1
793	ble	0(%sr7, %r1)
794	copy	%r31, %r2
795
796#ifdef CONFIG_64BIT
797	ldo	-16(%r30),%r29		/* Reference param save area */
798	loadgp				/* Thread could have been in a module */
799#endif
800#ifndef CONFIG_64BIT
801	b	sys_exit
802#else
803	load32	sys_exit, %r1
804	bv	%r0(%r1)
805#endif
806	ldi	0, %r26
807ENDPROC(ret_from_kernel_thread)
808
809	.import	sys_execve, code
810ENTRY(__execve)
811	copy	%r2, %r15
812	copy	%r30, %r16
813	ldo	PT_SZ_ALGN(%r30), %r30
814	STREG	%r26, PT_GR26(%r16)
815	STREG	%r25, PT_GR25(%r16)
816	STREG	%r24, PT_GR24(%r16)
817#ifdef CONFIG_64BIT
818	ldo	-16(%r30),%r29		/* Reference param save area */
819#endif
820	BL	sys_execve, %r2
821	copy	%r16, %r26
822
823	cmpib,=,n 0,%r28,intr_return    /* forward */
824
825	/* yes, this will trap and die. */
826	copy	%r15, %r2
827	copy	%r16, %r30
828	bv	%r0(%r2)
829	nop
830ENDPROC(__execve)
831
832
833	/*
834	 * struct task_struct *_switch_to(struct task_struct *prev,
835	 *	struct task_struct *next)
836	 *
837	 * switch kernel stacks and return prev */
838ENTRY(_switch_to)
839	STREG	 %r2, -RP_OFFSET(%r30)
840
841	callee_save_float
842	callee_save
843
844	load32	_switch_to_ret, %r2
845
846	STREG	%r2, TASK_PT_KPC(%r26)
847	LDREG	TASK_PT_KPC(%r25), %r2
848
849	STREG	%r30, TASK_PT_KSP(%r26)
850	LDREG	TASK_PT_KSP(%r25), %r30
851	LDREG	TASK_THREAD_INFO(%r25), %r25
852	bv	%r0(%r2)
853	mtctl   %r25,%cr30
854
855_switch_to_ret:
856	mtctl	%r0, %cr0		/* Needed for single stepping */
857	callee_rest
858	callee_rest_float
859
860	LDREG	-RP_OFFSET(%r30), %r2
861	bv	%r0(%r2)
862	copy	%r26, %r28
863ENDPROC(_switch_to)
864
865	/*
866	 * Common rfi return path for interruptions, kernel execve, and
867	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
868	 * return via this path if the signal was received when the process
869	 * was running; if the process was blocked on a syscall then the
870	 * normal syscall_exit path is used.  All syscalls for traced
871	 * proceses exit via intr_restore.
872	 *
873	 * XXX If any syscalls that change a processes space id ever exit
874	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
875	 * adjust IASQ[0..1].
876	 *
877	 */
878
879	.align	PAGE_SIZE
880
881ENTRY(syscall_exit_rfi)
882	mfctl   %cr30,%r16
883	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
884	ldo	TASK_REGS(%r16),%r16
885	/* Force iaoq to userspace, as the user has had access to our current
886	 * context via sigcontext. Also Filter the PSW for the same reason.
887	 */
888	LDREG	PT_IAOQ0(%r16),%r19
889	depi	3,31,2,%r19
890	STREG	%r19,PT_IAOQ0(%r16)
891	LDREG	PT_IAOQ1(%r16),%r19
892	depi	3,31,2,%r19
893	STREG	%r19,PT_IAOQ1(%r16)
894	LDREG   PT_PSW(%r16),%r19
895	load32	USER_PSW_MASK,%r1
896#ifdef CONFIG_64BIT
897	load32	USER_PSW_HI_MASK,%r20
898	depd    %r20,31,32,%r1
899#endif
900	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
901	load32	USER_PSW,%r1
902	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
903	STREG   %r19,PT_PSW(%r16)
904
905	/*
906	 * If we aren't being traced, we never saved space registers
907	 * (we don't store them in the sigcontext), so set them
908	 * to "proper" values now (otherwise we'll wind up restoring
909	 * whatever was last stored in the task structure, which might
910	 * be inconsistent if an interrupt occured while on the gateway
911	 * page). Note that we may be "trashing" values the user put in
912	 * them, but we don't support the user changing them.
913	 */
914
915	STREG   %r0,PT_SR2(%r16)
916	mfsp    %sr3,%r19
917	STREG   %r19,PT_SR0(%r16)
918	STREG   %r19,PT_SR1(%r16)
919	STREG   %r19,PT_SR3(%r16)
920	STREG   %r19,PT_SR4(%r16)
921	STREG   %r19,PT_SR5(%r16)
922	STREG   %r19,PT_SR6(%r16)
923	STREG   %r19,PT_SR7(%r16)
924
925intr_return:
926	/* NOTE: Need to enable interrupts incase we schedule. */
927	ssm     PSW_SM_I, %r0
928
929intr_check_resched:
930
931	/* check for reschedule */
932	mfctl   %cr30,%r1
933	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
934	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
935
936	.import do_notify_resume,code
937intr_check_sig:
938	/* As above */
939	mfctl   %cr30,%r1
940	LDREG	TI_FLAGS(%r1),%r19
941	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
942	and,COND(<>)	%r19, %r20, %r0
943	b,n	intr_restore	/* skip past if we've nothing to do */
944
945	/* This check is critical to having LWS
946	 * working. The IASQ is zero on the gateway
947	 * page and we cannot deliver any signals until
948	 * we get off the gateway page.
949	 *
950	 * Only do signals if we are returning to user space
951	 */
952	LDREG	PT_IASQ0(%r16), %r20
953	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
954	LDREG	PT_IASQ1(%r16), %r20
955	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
956
957	copy	%r0, %r25			/* long in_syscall = 0 */
958#ifdef CONFIG_64BIT
959	ldo	-16(%r30),%r29			/* Reference param save area */
960#endif
961
962	BL	do_notify_resume,%r2
963	copy	%r16, %r26			/* struct pt_regs *regs */
964
965	b,n	intr_check_sig
966
967intr_restore:
968	copy            %r16,%r29
969	ldo             PT_FR31(%r29),%r1
970	rest_fp         %r1
971	rest_general    %r29
972
973	/* inverse of virt_map */
974	pcxt_ssm_bug
975	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
976	tophys_r1       %r29
977
978	/* Restore space id's and special cr's from PT_REGS
979	 * structure pointed to by r29
980	 */
981	rest_specials	%r29
982
983	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
984	 * It also restores r1 and r30.
985	 */
986	rest_stack
987
988	rfi
989	nop
990	nop
991	nop
992	nop
993	nop
994	nop
995	nop
996	nop
997
998#ifndef CONFIG_PREEMPT
999# define intr_do_preempt	intr_restore
1000#endif /* !CONFIG_PREEMPT */
1001
1002	.import schedule,code
1003intr_do_resched:
1004	/* Only call schedule on return to userspace. If we're returning
1005	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
1006	 * we jump back to intr_restore.
1007	 */
1008	LDREG	PT_IASQ0(%r16), %r20
1009	cmpib,COND(=)	0, %r20, intr_do_preempt
1010	nop
1011	LDREG	PT_IASQ1(%r16), %r20
1012	cmpib,COND(=)	0, %r20, intr_do_preempt
1013	nop
1014
1015#ifdef CONFIG_64BIT
1016	ldo	-16(%r30),%r29		/* Reference param save area */
1017#endif
1018
1019	ldil	L%intr_check_sig, %r2
1020#ifndef CONFIG_64BIT
1021	b	schedule
1022#else
1023	load32	schedule, %r20
1024	bv	%r0(%r20)
1025#endif
1026	ldo	R%intr_check_sig(%r2), %r2
1027
1028	/* preempt the current task on returning to kernel
1029	 * mode from an interrupt, iff need_resched is set,
1030	 * and preempt_count is 0. otherwise, we continue on
1031	 * our merry way back to the current running task.
1032	 */
1033#ifdef CONFIG_PREEMPT
1034	.import preempt_schedule_irq,code
1035intr_do_preempt:
1036	rsm	PSW_SM_I, %r0		/* disable interrupts */
1037
1038	/* current_thread_info()->preempt_count */
1039	mfctl	%cr30, %r1
1040	LDREG	TI_PRE_COUNT(%r1), %r19
1041	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
1042	nop				/* prev insn branched backwards */
1043
1044	/* check if we interrupted a critical path */
1045	LDREG	PT_PSW(%r16), %r20
1046	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
1047	nop
1048
1049	BL	preempt_schedule_irq, %r2
1050	nop
1051
1052	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
1053#endif /* CONFIG_PREEMPT */
1054
1055	/*
1056	 * External interrupts.
1057	 */
1058
1059intr_extint:
1060	cmpib,COND(=),n 0,%r16,1f
1061
1062	get_stack_use_cr30
1063	b,n 2f
1064
10651:
1066	get_stack_use_r30
10672:
1068	save_specials	%r29
1069	virt_map
1070	save_general	%r29
1071
1072	ldo	PT_FR0(%r29), %r24
1073	save_fp	%r24
1074
1075	loadgp
1076
1077	copy	%r29, %r26	/* arg0 is pt_regs */
1078	copy	%r29, %r16	/* save pt_regs */
1079
1080	ldil	L%intr_return, %r2
1081
1082#ifdef CONFIG_64BIT
1083	ldo	-16(%r30),%r29	/* Reference param save area */
1084#endif
1085
1086	b	do_cpu_irq_mask
1087	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1088ENDPROC(syscall_exit_rfi)
1089
1090
1091	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1092
1093ENTRY(intr_save)		/* for os_hpmc */
1094	mfsp    %sr7,%r16
1095	cmpib,COND(=),n 0,%r16,1f
1096	get_stack_use_cr30
1097	b	2f
1098	copy    %r8,%r26
1099
11001:
1101	get_stack_use_r30
1102	copy    %r8,%r26
1103
11042:
1105	save_specials	%r29
1106
1107	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1108
1109	/*
1110	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1111	 *           traps.c.
1112	 *        2) Once we start executing code above 4 Gb, we need
1113	 *           to adjust iasq/iaoq here in the same way we
1114	 *           adjust isr/ior below.
1115	 */
1116
1117	cmpib,COND(=),n        6,%r26,skip_save_ior
1118
1119
1120	mfctl           %cr20, %r16 /* isr */
1121	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1122	mfctl           %cr21, %r17 /* ior */
1123
1124
1125#ifdef CONFIG_64BIT
1126	/*
1127	 * If the interrupted code was running with W bit off (32 bit),
1128	 * clear the b bits (bits 0 & 1) in the ior.
1129	 * save_specials left ipsw value in r8 for us to test.
1130	 */
1131	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1132	depdi           0,1,2,%r17
1133
1134	/*
1135	 * FIXME: This code has hardwired assumptions about the split
1136	 *        between space bits and offset bits. This will change
1137	 *        when we allow alternate page sizes.
1138	 */
1139
1140	/* adjust isr/ior. */
1141	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1142	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1143	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1144#endif
1145	STREG           %r16, PT_ISR(%r29)
1146	STREG           %r17, PT_IOR(%r29)
1147
1148
1149skip_save_ior:
1150	virt_map
1151	save_general	%r29
1152
1153	ldo		PT_FR0(%r29), %r25
1154	save_fp		%r25
1155
1156	loadgp
1157
1158	copy		%r29, %r25	/* arg1 is pt_regs */
1159#ifdef CONFIG_64BIT
1160	ldo		-16(%r30),%r29	/* Reference param save area */
1161#endif
1162
1163	ldil		L%intr_check_sig, %r2
1164	copy		%r25, %r16	/* save pt_regs */
1165
1166	b		handle_interruption
1167	ldo		R%intr_check_sig(%r2), %r2
1168ENDPROC(intr_save)
1169
1170
1171	/*
1172	 * Note for all tlb miss handlers:
1173	 *
1174	 * cr24 contains a pointer to the kernel address space
1175	 * page directory.
1176	 *
1177	 * cr25 contains a pointer to the current user address
1178	 * space page directory.
1179	 *
1180	 * sr3 will contain the space id of the user address space
1181	 * of the current running thread while that thread is
1182	 * running in the kernel.
1183	 */
1184
1185	/*
1186	 * register number allocations.  Note that these are all
1187	 * in the shadowed registers
1188	 */
1189
1190	t0 = r1		/* temporary register 0 */
1191	va = r8		/* virtual address for which the trap occured */
1192	t1 = r9		/* temporary register 1 */
1193	pte  = r16	/* pte/phys page # */
1194	prot = r17	/* prot bits */
1195	spc  = r24	/* space for which the trap occured */
1196	ptp = r25	/* page directory/page table pointer */
1197
1198#ifdef CONFIG_64BIT
1199
1200dtlb_miss_20w:
1201	space_adjust	spc,va,t0
1202	get_pgd		spc,ptp
1203	space_check	spc,t0,dtlb_fault
1204
1205	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1206
1207	update_ptep	ptp,pte,t0,t1
1208
1209	make_insert_tlb	spc,pte,prot
1210
1211	idtlbt          pte,prot
1212
1213	rfir
1214	nop
1215
1216dtlb_check_alias_20w:
1217	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1218
1219	idtlbt          pte,prot
1220
1221	rfir
1222	nop
1223
1224nadtlb_miss_20w:
1225	space_adjust	spc,va,t0
1226	get_pgd		spc,ptp
1227	space_check	spc,t0,nadtlb_fault
1228
1229	L3_ptep		ptp,pte,t0,va,nadtlb_check_flush_20w
1230
1231	update_ptep	ptp,pte,t0,t1
1232
1233	make_insert_tlb	spc,pte,prot
1234
1235	idtlbt          pte,prot
1236
1237	rfir
1238	nop
1239
1240nadtlb_check_flush_20w:
1241	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1242
1243	/* Insert a "flush only" translation */
1244
1245	depdi,z         7,7,3,prot
1246	depdi           1,10,1,prot
1247
1248	/* Get rid of prot bits and convert to page addr for idtlbt */
1249
1250	depdi		0,63,12,pte
1251	extrd,u         pte,56,52,pte
1252	idtlbt          pte,prot
1253
1254	rfir
1255	nop
1256
1257#else
1258
1259dtlb_miss_11:
1260	get_pgd		spc,ptp
1261
1262	space_check	spc,t0,dtlb_fault
1263
1264	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1265
1266	update_ptep	ptp,pte,t0,t1
1267
1268	make_insert_tlb_11	spc,pte,prot
1269
1270	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1271	mtsp		spc,%sr1
1272
1273	idtlba		pte,(%sr1,va)
1274	idtlbp		prot,(%sr1,va)
1275
1276	mtsp		t0, %sr1	/* Restore sr1 */
1277
1278	rfir
1279	nop
1280
1281dtlb_check_alias_11:
1282
1283	/* Check to see if fault is in the temporary alias region */
1284
1285	cmpib,<>,n      0,spc,dtlb_fault /* forward */
1286	ldil            L%(TMPALIAS_MAP_START),t0
1287	copy            va,t1
1288	depwi           0,31,23,t1
1289	cmpb,<>,n       t0,t1,dtlb_fault /* forward */
1290	ldi             (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1291	depw,z          prot,8,7,prot
1292
1293	/*
1294	 * OK, it is in the temp alias region, check whether "from" or "to".
1295	 * Check "subtle" note in pacache.S re: r23/r26.
1296	 */
1297
1298	extrw,u,=       va,9,1,r0
1299	or,tr           %r23,%r0,pte    /* If "from" use "from" page */
1300	or              %r26,%r0,pte    /* else "to", use "to" page  */
1301
1302	idtlba          pte,(va)
1303	idtlbp          prot,(va)
1304
1305	rfir
1306	nop
1307
1308nadtlb_miss_11:
1309	get_pgd		spc,ptp
1310
1311	space_check	spc,t0,nadtlb_fault
1312
1313	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_11
1314
1315	update_ptep	ptp,pte,t0,t1
1316
1317	make_insert_tlb_11	spc,pte,prot
1318
1319
1320	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1321	mtsp		spc,%sr1
1322
1323	idtlba		pte,(%sr1,va)
1324	idtlbp		prot,(%sr1,va)
1325
1326	mtsp		t0, %sr1	/* Restore sr1 */
1327
1328	rfir
1329	nop
1330
1331nadtlb_check_flush_11:
1332	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1333
1334	/* Insert a "flush only" translation */
1335
1336	zdepi           7,7,3,prot
1337	depi            1,10,1,prot
1338
1339	/* Get rid of prot bits and convert to page addr for idtlba */
1340
1341	depi		0,31,12,pte
1342	extru		pte,24,25,pte
1343
1344	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1345	mtsp		spc,%sr1
1346
1347	idtlba		pte,(%sr1,va)
1348	idtlbp		prot,(%sr1,va)
1349
1350	mtsp		t0, %sr1	/* Restore sr1 */
1351
1352	rfir
1353	nop
1354
1355dtlb_miss_20:
1356	space_adjust	spc,va,t0
1357	get_pgd		spc,ptp
1358	space_check	spc,t0,dtlb_fault
1359
1360	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1361
1362	update_ptep	ptp,pte,t0,t1
1363
1364	make_insert_tlb	spc,pte,prot
1365
1366	f_extend	pte,t0
1367
1368	idtlbt          pte,prot
1369
1370	rfir
1371	nop
1372
1373dtlb_check_alias_20:
1374	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1375
1376	idtlbt          pte,prot
1377
1378	rfir
1379	nop
1380
1381nadtlb_miss_20:
1382	get_pgd		spc,ptp
1383
1384	space_check	spc,t0,nadtlb_fault
1385
1386	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_20
1387
1388	update_ptep	ptp,pte,t0,t1
1389
1390	make_insert_tlb	spc,pte,prot
1391
1392	f_extend	pte,t0
1393
1394        idtlbt          pte,prot
1395
1396	rfir
1397	nop
1398
1399nadtlb_check_flush_20:
1400	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1401
1402	/* Insert a "flush only" translation */
1403
1404	depdi,z         7,7,3,prot
1405	depdi           1,10,1,prot
1406
1407	/* Get rid of prot bits and convert to page addr for idtlbt */
1408
1409	depdi		0,63,12,pte
1410	extrd,u         pte,56,32,pte
1411	idtlbt          pte,prot
1412
1413	rfir
1414	nop
1415#endif
1416
1417nadtlb_emulate:
1418
1419	/*
1420	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1421	 * probei instructions. We don't want to fault for these
1422	 * instructions (not only does it not make sense, it can cause
1423	 * deadlocks, since some flushes are done with the mmap
1424	 * semaphore held). If the translation doesn't exist, we can't
1425	 * insert a translation, so have to emulate the side effects
1426	 * of the instruction. Since we don't insert a translation
1427	 * we can get a lot of faults during a flush loop, so it makes
1428	 * sense to try to do it here with minimum overhead. We only
1429	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1430	 * and index registers are not shadowed. We defer everything
1431	 * else to the "slow" path.
1432	 */
1433
1434	mfctl           %cr19,%r9 /* Get iir */
1435
1436	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1437	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1438
1439	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1440	ldi             0x280,%r16
1441	and             %r9,%r16,%r17
1442	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1443	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1444	BL		get_register,%r25
1445	extrw,u         %r9,15,5,%r8           /* Get index register # */
1446	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1447	copy            %r1,%r24
1448	BL		get_register,%r25
1449	extrw,u         %r9,10,5,%r8           /* Get base register # */
1450	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1451	BL		set_register,%r25
1452	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1453
1454nadtlb_nullify:
1455	mfctl           %ipsw,%r8
1456	ldil            L%PSW_N,%r9
1457	or              %r8,%r9,%r8            /* Set PSW_N */
1458	mtctl           %r8,%ipsw
1459
1460	rfir
1461	nop
1462
1463	/*
1464		When there is no translation for the probe address then we
1465		must nullify the insn and return zero in the target regsiter.
1466		This will indicate to the calling code that it does not have
1467		write/read privileges to this address.
1468
1469		This should technically work for prober and probew in PA 1.1,
1470		and also probe,r and probe,w in PA 2.0
1471
1472		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1473		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1474
1475	*/
1476nadtlb_probe_check:
1477	ldi             0x80,%r16
1478	and             %r9,%r16,%r17
1479	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1480	BL              get_register,%r25      /* Find the target register */
1481	extrw,u         %r9,31,5,%r8           /* Get target register */
1482	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1483	BL		set_register,%r25
1484	copy            %r0,%r1                /* Write zero to target register */
1485	b nadtlb_nullify                       /* Nullify return insn */
1486	nop
1487
1488
1489#ifdef CONFIG_64BIT
1490itlb_miss_20w:
1491
1492	/*
1493	 * I miss is a little different, since we allow users to fault
1494	 * on the gateway page which is in the kernel address space.
1495	 */
1496
1497	space_adjust	spc,va,t0
1498	get_pgd		spc,ptp
1499	space_check	spc,t0,itlb_fault
1500
1501	L3_ptep		ptp,pte,t0,va,itlb_fault
1502
1503	update_ptep	ptp,pte,t0,t1
1504
1505	make_insert_tlb	spc,pte,prot
1506
1507	iitlbt          pte,prot
1508
1509	rfir
1510	nop
1511
1512#else
1513
1514itlb_miss_11:
1515	get_pgd		spc,ptp
1516
1517	space_check	spc,t0,itlb_fault
1518
1519	L2_ptep		ptp,pte,t0,va,itlb_fault
1520
1521	update_ptep	ptp,pte,t0,t1
1522
1523	make_insert_tlb_11	spc,pte,prot
1524
1525	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1526	mtsp		spc,%sr1
1527
1528	iitlba		pte,(%sr1,va)
1529	iitlbp		prot,(%sr1,va)
1530
1531	mtsp		t0, %sr1	/* Restore sr1 */
1532
1533	rfir
1534	nop
1535
1536itlb_miss_20:
1537	get_pgd		spc,ptp
1538
1539	space_check	spc,t0,itlb_fault
1540
1541	L2_ptep		ptp,pte,t0,va,itlb_fault
1542
1543	update_ptep	ptp,pte,t0,t1
1544
1545	make_insert_tlb	spc,pte,prot
1546
1547	f_extend	pte,t0
1548
1549	iitlbt          pte,prot
1550
1551	rfir
1552	nop
1553
1554#endif
1555
1556#ifdef CONFIG_64BIT
1557
1558dbit_trap_20w:
1559	space_adjust	spc,va,t0
1560	get_pgd		spc,ptp
1561	space_check	spc,t0,dbit_fault
1562
1563	L3_ptep		ptp,pte,t0,va,dbit_fault
1564
1565#ifdef CONFIG_SMP
1566	cmpib,COND(=),n        0,spc,dbit_nolock_20w
1567	load32		PA(pa_dbit_lock),t0
1568
1569dbit_spin_20w:
1570	LDCW		0(t0),t1
1571	cmpib,COND(=)         0,t1,dbit_spin_20w
1572	nop
1573
1574dbit_nolock_20w:
1575#endif
1576	update_dirty	ptp,pte,t1
1577
1578	make_insert_tlb	spc,pte,prot
1579
1580	idtlbt          pte,prot
1581#ifdef CONFIG_SMP
1582	cmpib,COND(=),n        0,spc,dbit_nounlock_20w
1583	ldi             1,t1
1584	stw             t1,0(t0)
1585
1586dbit_nounlock_20w:
1587#endif
1588
1589	rfir
1590	nop
1591#else
1592
1593dbit_trap_11:
1594
1595	get_pgd		spc,ptp
1596
1597	space_check	spc,t0,dbit_fault
1598
1599	L2_ptep		ptp,pte,t0,va,dbit_fault
1600
1601#ifdef CONFIG_SMP
1602	cmpib,COND(=),n        0,spc,dbit_nolock_11
1603	load32		PA(pa_dbit_lock),t0
1604
1605dbit_spin_11:
1606	LDCW		0(t0),t1
1607	cmpib,=         0,t1,dbit_spin_11
1608	nop
1609
1610dbit_nolock_11:
1611#endif
1612	update_dirty	ptp,pte,t1
1613
1614	make_insert_tlb_11	spc,pte,prot
1615
1616	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1617	mtsp		spc,%sr1
1618
1619	idtlba		pte,(%sr1,va)
1620	idtlbp		prot,(%sr1,va)
1621
1622	mtsp            t1, %sr1     /* Restore sr1 */
1623#ifdef CONFIG_SMP
1624	cmpib,COND(=),n        0,spc,dbit_nounlock_11
1625	ldi             1,t1
1626	stw             t1,0(t0)
1627
1628dbit_nounlock_11:
1629#endif
1630
1631	rfir
1632	nop
1633
1634dbit_trap_20:
1635	get_pgd		spc,ptp
1636
1637	space_check	spc,t0,dbit_fault
1638
1639	L2_ptep		ptp,pte,t0,va,dbit_fault
1640
1641#ifdef CONFIG_SMP
1642	cmpib,COND(=),n        0,spc,dbit_nolock_20
1643	load32		PA(pa_dbit_lock),t0
1644
1645dbit_spin_20:
1646	LDCW		0(t0),t1
1647	cmpib,=         0,t1,dbit_spin_20
1648	nop
1649
1650dbit_nolock_20:
1651#endif
1652	update_dirty	ptp,pte,t1
1653
1654	make_insert_tlb	spc,pte,prot
1655
1656	f_extend	pte,t1
1657
1658        idtlbt          pte,prot
1659
1660#ifdef CONFIG_SMP
1661	cmpib,COND(=),n        0,spc,dbit_nounlock_20
1662	ldi             1,t1
1663	stw             t1,0(t0)
1664
1665dbit_nounlock_20:
1666#endif
1667
1668	rfir
1669	nop
1670#endif
1671
1672	.import handle_interruption,code
1673
1674kernel_bad_space:
1675	b               intr_save
1676	ldi             31,%r8  /* Use an unused code */
1677
1678dbit_fault:
1679	b               intr_save
1680	ldi             20,%r8
1681
1682itlb_fault:
1683	b               intr_save
1684	ldi             6,%r8
1685
1686nadtlb_fault:
1687	b               intr_save
1688	ldi             17,%r8
1689
1690dtlb_fault:
1691	b               intr_save
1692	ldi             15,%r8
1693
1694	/* Register saving semantics for system calls:
1695
1696	   %r1		   clobbered by system call macro in userspace
1697	   %r2		   saved in PT_REGS by gateway page
1698	   %r3  - %r18	   preserved by C code (saved by signal code)
1699	   %r19 - %r20	   saved in PT_REGS by gateway page
1700	   %r21 - %r22	   non-standard syscall args
1701			   stored in kernel stack by gateway page
1702	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1703	   %r27 - %r30	   saved in PT_REGS by gateway page
1704	   %r31		   syscall return pointer
1705	 */
1706
1707	/* Floating point registers (FIXME: what do we do with these?)
1708
1709	   %fr0  - %fr3	   status/exception, not preserved
1710	   %fr4  - %fr7	   arguments
1711	   %fr8	 - %fr11   not preserved by C code
1712	   %fr12 - %fr21   preserved by C code
1713	   %fr22 - %fr31   not preserved by C code
1714	 */
1715
1716	.macro	reg_save regs
1717	STREG	%r3, PT_GR3(\regs)
1718	STREG	%r4, PT_GR4(\regs)
1719	STREG	%r5, PT_GR5(\regs)
1720	STREG	%r6, PT_GR6(\regs)
1721	STREG	%r7, PT_GR7(\regs)
1722	STREG	%r8, PT_GR8(\regs)
1723	STREG	%r9, PT_GR9(\regs)
1724	STREG   %r10,PT_GR10(\regs)
1725	STREG   %r11,PT_GR11(\regs)
1726	STREG   %r12,PT_GR12(\regs)
1727	STREG   %r13,PT_GR13(\regs)
1728	STREG   %r14,PT_GR14(\regs)
1729	STREG   %r15,PT_GR15(\regs)
1730	STREG   %r16,PT_GR16(\regs)
1731	STREG   %r17,PT_GR17(\regs)
1732	STREG   %r18,PT_GR18(\regs)
1733	.endm
1734
1735	.macro	reg_restore regs
1736	LDREG	PT_GR3(\regs), %r3
1737	LDREG	PT_GR4(\regs), %r4
1738	LDREG	PT_GR5(\regs), %r5
1739	LDREG	PT_GR6(\regs), %r6
1740	LDREG	PT_GR7(\regs), %r7
1741	LDREG	PT_GR8(\regs), %r8
1742	LDREG	PT_GR9(\regs), %r9
1743	LDREG   PT_GR10(\regs),%r10
1744	LDREG   PT_GR11(\regs),%r11
1745	LDREG   PT_GR12(\regs),%r12
1746	LDREG   PT_GR13(\regs),%r13
1747	LDREG   PT_GR14(\regs),%r14
1748	LDREG   PT_GR15(\regs),%r15
1749	LDREG   PT_GR16(\regs),%r16
1750	LDREG   PT_GR17(\regs),%r17
1751	LDREG   PT_GR18(\regs),%r18
1752	.endm
1753
1754ENTRY(sys_fork_wrapper)
1755	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1756	ldo	TASK_REGS(%r1),%r1
1757	reg_save %r1
1758	mfctl	%cr27, %r3
1759	STREG	%r3, PT_CR27(%r1)
1760
1761	STREG	%r2,-RP_OFFSET(%r30)
1762	ldo	FRAME_SIZE(%r30),%r30
1763#ifdef CONFIG_64BIT
1764	ldo	-16(%r30),%r29		/* Reference param save area */
1765#endif
1766
1767	/* These are call-clobbered registers and therefore
1768	   also syscall-clobbered (we hope). */
1769	STREG	%r2,PT_GR19(%r1)	/* save for child */
1770	STREG	%r30,PT_GR21(%r1)
1771
1772	LDREG	PT_GR30(%r1),%r25
1773	copy	%r1,%r24
1774	BL	sys_clone,%r2
1775	ldi	SIGCHLD,%r26
1776
1777	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1778wrapper_exit:
1779	ldo	-FRAME_SIZE(%r30),%r30		/* get the stackframe */
1780	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1781	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1782
1783	LDREG	PT_CR27(%r1), %r3
1784	mtctl	%r3, %cr27
1785	reg_restore %r1
1786
1787	/* strace expects syscall # to be preserved in r20 */
1788	ldi	__NR_fork,%r20
1789	bv %r0(%r2)
1790	STREG	%r20,PT_GR20(%r1)
1791ENDPROC(sys_fork_wrapper)
1792
1793	/* Set the return value for the child */
1794ENTRY(child_return)
1795	BL	schedule_tail, %r2
1796	nop
1797
1798	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1799	LDREG	TASK_PT_GR19(%r1),%r2
1800	b	wrapper_exit
1801	copy	%r0,%r28
1802ENDPROC(child_return)
1803
1804
1805ENTRY(sys_clone_wrapper)
1806	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1807	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1808	reg_save %r1
1809	mfctl	%cr27, %r3
1810	STREG	%r3, PT_CR27(%r1)
1811
1812	STREG	%r2,-RP_OFFSET(%r30)
1813	ldo	FRAME_SIZE(%r30),%r30
1814#ifdef CONFIG_64BIT
1815	ldo	-16(%r30),%r29		/* Reference param save area */
1816#endif
1817
1818	/* WARNING - Clobbers r19 and r21, userspace must save these! */
1819	STREG	%r2,PT_GR19(%r1)	/* save for child */
1820	STREG	%r30,PT_GR21(%r1)
1821	BL	sys_clone,%r2
1822	copy	%r1,%r24
1823
1824	b	wrapper_exit
1825	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1826ENDPROC(sys_clone_wrapper)
1827
1828
1829ENTRY(sys_vfork_wrapper)
1830	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1831	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1832	reg_save %r1
1833	mfctl	%cr27, %r3
1834	STREG	%r3, PT_CR27(%r1)
1835
1836	STREG	%r2,-RP_OFFSET(%r30)
1837	ldo	FRAME_SIZE(%r30),%r30
1838#ifdef CONFIG_64BIT
1839	ldo	-16(%r30),%r29		/* Reference param save area */
1840#endif
1841
1842	STREG	%r2,PT_GR19(%r1)	/* save for child */
1843	STREG	%r30,PT_GR21(%r1)
1844
1845	BL	sys_vfork,%r2
1846	copy	%r1,%r26
1847
1848	b	wrapper_exit
1849	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1850ENDPROC(sys_vfork_wrapper)
1851
1852
1853	.macro  execve_wrapper execve
1854	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1855	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1856
1857	/*
1858	 * Do we need to save/restore r3-r18 here?
1859	 * I don't think so. why would new thread need old
1860	 * threads registers?
1861	 */
1862
1863	/* %arg0 - %arg3 are already saved for us. */
1864
1865	STREG %r2,-RP_OFFSET(%r30)
1866	ldo FRAME_SIZE(%r30),%r30
1867#ifdef CONFIG_64BIT
1868	ldo	-16(%r30),%r29		/* Reference param save area */
1869#endif
1870	BL \execve,%r2
1871	copy %r1,%arg0
1872
1873	ldo -FRAME_SIZE(%r30),%r30
1874	LDREG -RP_OFFSET(%r30),%r2
1875
1876	/* If exec succeeded we need to load the args */
1877
1878	ldo -1024(%r0),%r1
1879	cmpb,>>= %r28,%r1,error_\execve
1880	copy %r2,%r19
1881
1882error_\execve:
1883	bv %r0(%r19)
1884	nop
1885	.endm
1886
1887	.import sys_execve
1888ENTRY(sys_execve_wrapper)
1889	execve_wrapper sys_execve
1890ENDPROC(sys_execve_wrapper)
1891
1892#ifdef CONFIG_64BIT
1893	.import sys32_execve
1894ENTRY(sys32_execve_wrapper)
1895	execve_wrapper sys32_execve
1896ENDPROC(sys32_execve_wrapper)
1897#endif
1898
1899ENTRY(sys_rt_sigreturn_wrapper)
1900	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1901	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1902	/* Don't save regs, we are going to restore them from sigcontext. */
1903	STREG	%r2, -RP_OFFSET(%r30)
1904#ifdef CONFIG_64BIT
1905	ldo	FRAME_SIZE(%r30), %r30
1906	BL	sys_rt_sigreturn,%r2
1907	ldo	-16(%r30),%r29		/* Reference param save area */
1908#else
1909	BL	sys_rt_sigreturn,%r2
1910	ldo	FRAME_SIZE(%r30), %r30
1911#endif
1912
1913	ldo	-FRAME_SIZE(%r30), %r30
1914	LDREG	-RP_OFFSET(%r30), %r2
1915
1916	/* FIXME: I think we need to restore a few more things here. */
1917	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1918	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1919	reg_restore %r1
1920
1921	/* If the signal was received while the process was blocked on a
1922	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1923	 * take us to syscall_exit_rfi and on to intr_return.
1924	 */
1925	bv	%r0(%r2)
1926	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1927ENDPROC(sys_rt_sigreturn_wrapper)
1928
1929ENTRY(sys_sigaltstack_wrapper)
1930	/* Get the user stack pointer */
1931	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1932	ldo	TASK_REGS(%r1),%r24	/* get pt regs */
1933	LDREG	TASK_PT_GR30(%r24),%r24
1934	STREG	%r2, -RP_OFFSET(%r30)
1935#ifdef CONFIG_64BIT
1936	ldo	FRAME_SIZE(%r30), %r30
1937	BL	do_sigaltstack,%r2
1938	ldo	-16(%r30),%r29		/* Reference param save area */
1939#else
1940	BL	do_sigaltstack,%r2
1941	ldo	FRAME_SIZE(%r30), %r30
1942#endif
1943
1944	ldo	-FRAME_SIZE(%r30), %r30
1945	LDREG	-RP_OFFSET(%r30), %r2
1946	bv	%r0(%r2)
1947	nop
1948ENDPROC(sys_sigaltstack_wrapper)
1949
1950#ifdef CONFIG_64BIT
1951ENTRY(sys32_sigaltstack_wrapper)
1952	/* Get the user stack pointer */
1953	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1954	LDREG	TASK_PT_GR30(%r24),%r24
1955	STREG	%r2, -RP_OFFSET(%r30)
1956	ldo	FRAME_SIZE(%r30), %r30
1957	BL	do_sigaltstack32,%r2
1958	ldo	-16(%r30),%r29		/* Reference param save area */
1959
1960	ldo	-FRAME_SIZE(%r30), %r30
1961	LDREG	-RP_OFFSET(%r30), %r2
1962	bv	%r0(%r2)
1963	nop
1964ENDPROC(sys32_sigaltstack_wrapper)
1965#endif
1966
1967ENTRY(syscall_exit)
1968	/* NOTE: HP-UX syscalls also come through here
1969	 * after hpux_syscall_exit fixes up return
1970	 * values. */
1971
1972	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1973	 * via syscall_exit_rfi if the signal was received while the process
1974	 * was running.
1975	 */
1976
1977	/* save return value now */
1978
1979	mfctl     %cr30, %r1
1980	LDREG     TI_TASK(%r1),%r1
1981	STREG     %r28,TASK_PT_GR28(%r1)
1982
1983#ifdef CONFIG_HPUX
1984/* <linux/personality.h> cannot be easily included */
1985#define PER_HPUX 0x10
1986	ldw	TASK_PERSONALITY(%r1),%r19
1987
1988	/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1989	ldo	  -PER_HPUX(%r19), %r19
1990	cmpib,COND(<>),n 0,%r19,1f
1991
1992	/* Save other hpux returns if personality is PER_HPUX */
1993	STREG     %r22,TASK_PT_GR22(%r1)
1994	STREG     %r29,TASK_PT_GR29(%r1)
19951:
1996
1997#endif /* CONFIG_HPUX */
1998
1999	/* Seems to me that dp could be wrong here, if the syscall involved
2000	 * calling a module, and nothing got round to restoring dp on return.
2001	 */
2002	loadgp
2003
2004syscall_check_resched:
2005
2006	/* check for reschedule */
2007
2008	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
2009	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2010
2011	.import do_signal,code
2012syscall_check_sig:
2013	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
2014	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
2015	and,COND(<>)	%r19, %r26, %r0
2016	b,n	syscall_restore	/* skip past if we've nothing to do */
2017
2018syscall_do_signal:
2019	/* Save callee-save registers (for sigcontext).
2020	 * FIXME: After this point the process structure should be
2021	 * consistent with all the relevant state of the process
2022	 * before the syscall.  We need to verify this.
2023	 */
2024	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2025	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
2026	reg_save %r26
2027
2028#ifdef CONFIG_64BIT
2029	ldo	-16(%r30),%r29			/* Reference param save area */
2030#endif
2031
2032	BL	do_notify_resume,%r2
2033	ldi	1, %r25				/* long in_syscall = 1 */
2034
2035	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2036	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
2037	reg_restore %r20
2038
2039	b,n     syscall_check_sig
2040
2041syscall_restore:
2042	/* Are we being ptraced? */
2043	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2044
2045	ldw	TASK_PTRACE(%r1), %r19
2046	bb,<	%r19,31,syscall_restore_rfi
2047	nop
2048
2049	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
2050	rest_fp	%r19
2051
2052	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
2053	mtsar	%r19
2054
2055	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
2056	LDREG	TASK_PT_GR19(%r1),%r19
2057	LDREG   TASK_PT_GR20(%r1),%r20
2058	LDREG	TASK_PT_GR21(%r1),%r21
2059	LDREG	TASK_PT_GR22(%r1),%r22
2060	LDREG	TASK_PT_GR23(%r1),%r23
2061	LDREG	TASK_PT_GR24(%r1),%r24
2062	LDREG	TASK_PT_GR25(%r1),%r25
2063	LDREG	TASK_PT_GR26(%r1),%r26
2064	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
2065	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
2066	LDREG	TASK_PT_GR29(%r1),%r29
2067	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
2068
2069	/* NOTE: We use rsm/ssm pair to make this operation atomic */
2070	rsm     PSW_SM_I, %r0
2071	LDREG   TASK_PT_GR30(%r1),%r30             /* restore user sp */
2072	mfsp	%sr3,%r1			   /* Get users space id */
2073	mtsp    %r1,%sr7                           /* Restore sr7 */
2074	ssm     PSW_SM_I, %r0
2075
2076	/* Set sr2 to zero for userspace syscalls to work. */
2077	mtsp	%r0,%sr2
2078	mtsp	%r1,%sr4			   /* Restore sr4 */
2079	mtsp	%r1,%sr5			   /* Restore sr5 */
2080	mtsp	%r1,%sr6			   /* Restore sr6 */
2081
2082	depi	3,31,2,%r31			   /* ensure return to user mode. */
2083
2084#ifdef CONFIG_64BIT
2085	/* decide whether to reset the wide mode bit
2086	 *
2087	 * For a syscall, the W bit is stored in the lowest bit
2088	 * of sp.  Extract it and reset W if it is zero */
2089	extrd,u,*<>	%r30,63,1,%r1
2090	rsm	PSW_SM_W, %r0
2091	/* now reset the lowest bit of sp if it was set */
2092	xor	%r30,%r1,%r30
2093#endif
2094	be,n    0(%sr3,%r31)                       /* return to user space */
2095
2096	/* We have to return via an RFI, so that PSW T and R bits can be set
2097	 * appropriately.
2098	 * This sets up pt_regs so we can return via intr_restore, which is not
2099	 * the most efficient way of doing things, but it works.
2100	 */
2101syscall_restore_rfi:
2102	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
2103	mtctl	%r2,%cr0			   /*   for immediate trap */
2104	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
2105	ldi	0x0b,%r20			   /* Create new PSW */
2106	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
2107
2108	/* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2109	 * set in include/linux/ptrace.h and converted to PA bitmap
2110	 * numbers in asm-offsets.c */
2111
2112	/* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2113	extru,=	%r19,PA_SINGLESTEP_BIT,1,%r0
2114	depi	-1,27,1,%r20			   /* R bit */
2115
2116	/* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2117	extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2118	depi	-1,7,1,%r20			   /* T bit */
2119
2120	STREG	%r20,TASK_PT_PSW(%r1)
2121
2122	/* Always store space registers, since sr3 can be changed (e.g. fork) */
2123
2124	mfsp    %sr3,%r25
2125	STREG   %r25,TASK_PT_SR3(%r1)
2126	STREG   %r25,TASK_PT_SR4(%r1)
2127	STREG   %r25,TASK_PT_SR5(%r1)
2128	STREG   %r25,TASK_PT_SR6(%r1)
2129	STREG   %r25,TASK_PT_SR7(%r1)
2130	STREG   %r25,TASK_PT_IASQ0(%r1)
2131	STREG   %r25,TASK_PT_IASQ1(%r1)
2132
2133	/* XXX W bit??? */
2134	/* Now if old D bit is clear, it means we didn't save all registers
2135	 * on syscall entry, so do that now.  This only happens on TRACEME
2136	 * calls, or if someone attached to us while we were on a syscall.
2137	 * We could make this more efficient by not saving r3-r18, but
2138	 * then we wouldn't be able to use the common intr_restore path.
2139	 * It is only for traced processes anyway, so performance is not
2140	 * an issue.
2141	 */
2142	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
2143	ldo	TASK_REGS(%r1),%r25
2144	reg_save %r25				   /* Save r3 to r18 */
2145
2146	/* Save the current sr */
2147	mfsp	%sr0,%r2
2148	STREG	%r2,TASK_PT_SR0(%r1)
2149
2150	/* Save the scratch sr */
2151	mfsp	%sr1,%r2
2152	STREG	%r2,TASK_PT_SR1(%r1)
2153
2154	/* sr2 should be set to zero for userspace syscalls */
2155	STREG	%r0,TASK_PT_SR2(%r1)
2156
2157pt_regs_ok:
2158	LDREG	TASK_PT_GR31(%r1),%r2
2159	depi	3,31,2,%r2			   /* ensure return to user mode. */
2160	STREG	%r2,TASK_PT_IAOQ0(%r1)
2161	ldo	4(%r2),%r2
2162	STREG	%r2,TASK_PT_IAOQ1(%r1)
2163	copy	%r25,%r16
2164	b	intr_restore
2165	nop
2166
2167	.import schedule,code
2168syscall_do_resched:
2169	BL	schedule,%r2
2170#ifdef CONFIG_64BIT
2171	ldo	-16(%r30),%r29		/* Reference param save area */
2172#else
2173	nop
2174#endif
2175	b	syscall_check_resched	/* if resched, we start over again */
2176	nop
2177ENDPROC(syscall_exit)
2178
2179
2180get_register:
2181	/*
2182	 * get_register is used by the non access tlb miss handlers to
2183	 * copy the value of the general register specified in r8 into
2184	 * r1. This routine can't be used for shadowed registers, since
2185	 * the rfir will restore the original value. So, for the shadowed
2186	 * registers we put a -1 into r1 to indicate that the register
2187	 * should not be used (the register being copied could also have
2188	 * a -1 in it, but that is OK, it just means that we will have
2189	 * to use the slow path instead).
2190	 */
2191	blr     %r8,%r0
2192	nop
2193	bv      %r0(%r25)    /* r0 */
2194	copy    %r0,%r1
2195	bv      %r0(%r25)    /* r1 - shadowed */
2196	ldi     -1,%r1
2197	bv      %r0(%r25)    /* r2 */
2198	copy    %r2,%r1
2199	bv      %r0(%r25)    /* r3 */
2200	copy    %r3,%r1
2201	bv      %r0(%r25)    /* r4 */
2202	copy    %r4,%r1
2203	bv      %r0(%r25)    /* r5 */
2204	copy    %r5,%r1
2205	bv      %r0(%r25)    /* r6 */
2206	copy    %r6,%r1
2207	bv      %r0(%r25)    /* r7 */
2208	copy    %r7,%r1
2209	bv      %r0(%r25)    /* r8 - shadowed */
2210	ldi     -1,%r1
2211	bv      %r0(%r25)    /* r9 - shadowed */
2212	ldi     -1,%r1
2213	bv      %r0(%r25)    /* r10 */
2214	copy    %r10,%r1
2215	bv      %r0(%r25)    /* r11 */
2216	copy    %r11,%r1
2217	bv      %r0(%r25)    /* r12 */
2218	copy    %r12,%r1
2219	bv      %r0(%r25)    /* r13 */
2220	copy    %r13,%r1
2221	bv      %r0(%r25)    /* r14 */
2222	copy    %r14,%r1
2223	bv      %r0(%r25)    /* r15 */
2224	copy    %r15,%r1
2225	bv      %r0(%r25)    /* r16 - shadowed */
2226	ldi     -1,%r1
2227	bv      %r0(%r25)    /* r17 - shadowed */
2228	ldi     -1,%r1
2229	bv      %r0(%r25)    /* r18 */
2230	copy    %r18,%r1
2231	bv      %r0(%r25)    /* r19 */
2232	copy    %r19,%r1
2233	bv      %r0(%r25)    /* r20 */
2234	copy    %r20,%r1
2235	bv      %r0(%r25)    /* r21 */
2236	copy    %r21,%r1
2237	bv      %r0(%r25)    /* r22 */
2238	copy    %r22,%r1
2239	bv      %r0(%r25)    /* r23 */
2240	copy    %r23,%r1
2241	bv      %r0(%r25)    /* r24 - shadowed */
2242	ldi     -1,%r1
2243	bv      %r0(%r25)    /* r25 - shadowed */
2244	ldi     -1,%r1
2245	bv      %r0(%r25)    /* r26 */
2246	copy    %r26,%r1
2247	bv      %r0(%r25)    /* r27 */
2248	copy    %r27,%r1
2249	bv      %r0(%r25)    /* r28 */
2250	copy    %r28,%r1
2251	bv      %r0(%r25)    /* r29 */
2252	copy    %r29,%r1
2253	bv      %r0(%r25)    /* r30 */
2254	copy    %r30,%r1
2255	bv      %r0(%r25)    /* r31 */
2256	copy    %r31,%r1
2257
2258
2259set_register:
2260	/*
2261	 * set_register is used by the non access tlb miss handlers to
2262	 * copy the value of r1 into the general register specified in
2263	 * r8.
2264	 */
2265	blr     %r8,%r0
2266	nop
2267	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2268	copy    %r1,%r0
2269	bv      %r0(%r25)    /* r1 */
2270	copy    %r1,%r1
2271	bv      %r0(%r25)    /* r2 */
2272	copy    %r1,%r2
2273	bv      %r0(%r25)    /* r3 */
2274	copy    %r1,%r3
2275	bv      %r0(%r25)    /* r4 */
2276	copy    %r1,%r4
2277	bv      %r0(%r25)    /* r5 */
2278	copy    %r1,%r5
2279	bv      %r0(%r25)    /* r6 */
2280	copy    %r1,%r6
2281	bv      %r0(%r25)    /* r7 */
2282	copy    %r1,%r7
2283	bv      %r0(%r25)    /* r8 */
2284	copy    %r1,%r8
2285	bv      %r0(%r25)    /* r9 */
2286	copy    %r1,%r9
2287	bv      %r0(%r25)    /* r10 */
2288	copy    %r1,%r10
2289	bv      %r0(%r25)    /* r11 */
2290	copy    %r1,%r11
2291	bv      %r0(%r25)    /* r12 */
2292	copy    %r1,%r12
2293	bv      %r0(%r25)    /* r13 */
2294	copy    %r1,%r13
2295	bv      %r0(%r25)    /* r14 */
2296	copy    %r1,%r14
2297	bv      %r0(%r25)    /* r15 */
2298	copy    %r1,%r15
2299	bv      %r0(%r25)    /* r16 */
2300	copy    %r1,%r16
2301	bv      %r0(%r25)    /* r17 */
2302	copy    %r1,%r17
2303	bv      %r0(%r25)    /* r18 */
2304	copy    %r1,%r18
2305	bv      %r0(%r25)    /* r19 */
2306	copy    %r1,%r19
2307	bv      %r0(%r25)    /* r20 */
2308	copy    %r1,%r20
2309	bv      %r0(%r25)    /* r21 */
2310	copy    %r1,%r21
2311	bv      %r0(%r25)    /* r22 */
2312	copy    %r1,%r22
2313	bv      %r0(%r25)    /* r23 */
2314	copy    %r1,%r23
2315	bv      %r0(%r25)    /* r24 */
2316	copy    %r1,%r24
2317	bv      %r0(%r25)    /* r25 */
2318	copy    %r1,%r25
2319	bv      %r0(%r25)    /* r26 */
2320	copy    %r1,%r26
2321	bv      %r0(%r25)    /* r27 */
2322	copy    %r1,%r27
2323	bv      %r0(%r25)    /* r28 */
2324	copy    %r1,%r28
2325	bv      %r0(%r25)    /* r29 */
2326	copy    %r1,%r29
2327	bv      %r0(%r25)    /* r30 */
2328	copy    %r1,%r30
2329	bv      %r0(%r25)    /* r31 */
2330	copy    %r1,%r31
2331
2332