xref: /openbmc/linux/arch/parisc/kernel/entry.S (revision 22246614)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41
42#ifdef CONFIG_64BIT
43#define CMPIB           cmpib,*
44#define CMPB            cmpb,*
45#define COND(x)		*x
46
47	.level 2.0w
48#else
49#define CMPIB           cmpib,
50#define CMPB            cmpb,
51#define COND(x)		x
52
53	.level 2.0
54#endif
55
56	.import         pa_dbit_lock,data
57
58	/* space_to_prot macro creates a prot id from a space id */
59
60#if (SPACEID_SHIFT) == 0
61	.macro  space_to_prot spc prot
62	depd,z  \spc,62,31,\prot
63	.endm
64#else
65	.macro  space_to_prot spc prot
66	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
67	.endm
68#endif
69
70	/* Switch to virtual mapping, trashing only %r1 */
71	.macro  virt_map
72	/* pcxt_ssm_bug */
73	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
74	mtsp	%r0, %sr4
75	mtsp	%r0, %sr5
76	mfsp	%sr7, %r1
77	or,=    %r0,%r1,%r0	/* Only save sr7 in sr3 if sr7 != 0 */
78	mtsp	%r1, %sr3
79	tovirt_r1 %r29
80	load32	KERNEL_PSW, %r1
81
82	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
83	mtsp	%r0, %sr6
84	mtsp	%r0, %sr7
85	mtctl	%r0, %cr17	/* Clear IIASQ tail */
86	mtctl	%r0, %cr17	/* Clear IIASQ head */
87	mtctl	%r1, %ipsw
88	load32	4f, %r1
89	mtctl	%r1, %cr18	/* Set IIAOQ tail */
90	ldo	4(%r1), %r1
91	mtctl	%r1, %cr18	/* Set IIAOQ head */
92	rfir
93	nop
944:
95	.endm
96
97	/*
98	 * The "get_stack" macros are responsible for determining the
99	 * kernel stack value.
100	 *
101	 *      If sr7 == 0
102	 *          Already using a kernel stack, so call the
103	 *          get_stack_use_r30 macro to push a pt_regs structure
104	 *          on the stack, and store registers there.
105	 *      else
106	 *          Need to set up a kernel stack, so call the
107	 *          get_stack_use_cr30 macro to set up a pointer
108	 *          to the pt_regs structure contained within the
109	 *          task pointer pointed to by cr30. Set the stack
110	 *          pointer to point to the end of the task structure.
111	 *
112	 * Note that we use shadowed registers for temps until
113	 * we can save %r26 and %r29. %r26 is used to preserve
114	 * %r8 (a shadowed register) which temporarily contained
115	 * either the fault type ("code") or the eirr. We need
116	 * to use a non-shadowed register to carry the value over
117	 * the rfir in virt_map. We use %r26 since this value winds
118	 * up being passed as the argument to either do_cpu_irq_mask
119	 * or handle_interruption. %r29 is used to hold a pointer
120	 * the register save area, and once again, it needs to
121	 * be a non-shadowed register so that it survives the rfir.
122	 *
123	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
124	 */
125
126	.macro  get_stack_use_cr30
127
128	/* we save the registers in the task struct */
129
130	mfctl   %cr30, %r1
131	tophys  %r1,%r9
132	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
133	tophys  %r1,%r9
134	ldo     TASK_REGS(%r9),%r9
135	STREG   %r30, PT_GR30(%r9)
136	STREG   %r29,PT_GR29(%r9)
137	STREG   %r26,PT_GR26(%r9)
138	copy    %r9,%r29
139	mfctl   %cr30, %r1
140	ldo	THREAD_SZ_ALGN(%r1), %r30
141	.endm
142
143	.macro  get_stack_use_r30
144
145	/* we put a struct pt_regs on the stack and save the registers there */
146
147	tophys  %r30,%r9
148	STREG   %r30,PT_GR30(%r9)
149	ldo	PT_SZ_ALGN(%r30),%r30
150	STREG   %r29,PT_GR29(%r9)
151	STREG   %r26,PT_GR26(%r9)
152	copy    %r9,%r29
153	.endm
154
155	.macro  rest_stack
156	LDREG   PT_GR1(%r29), %r1
157	LDREG   PT_GR30(%r29),%r30
158	LDREG   PT_GR29(%r29),%r29
159	.endm
160
161	/* default interruption handler
162	 * (calls traps.c:handle_interruption) */
163	.macro	def code
164	b	intr_save
165	ldi     \code, %r8
166	.align	32
167	.endm
168
169	/* Interrupt interruption handler
170	 * (calls irq.c:do_cpu_irq_mask) */
171	.macro	extint code
172	b	intr_extint
173	mfsp    %sr7,%r16
174	.align	32
175	.endm
176
177	.import	os_hpmc, code
178
179	/* HPMC handler */
180	.macro	hpmc code
181	nop			/* must be a NOP, will be patched later */
182	load32	PA(os_hpmc), %r3
183	bv,n	0(%r3)
184	nop
185	.word	0		/* checksum (will be patched) */
186	.word	PA(os_hpmc)	/* address of handler */
187	.word	0		/* length of handler */
188	.endm
189
190	/*
191	 * Performance Note: Instructions will be moved up into
192	 * this part of the code later on, once we are sure
193	 * that the tlb miss handlers are close to final form.
194	 */
195
196	/* Register definitions for tlb miss handler macros */
197
198	va  = r8	/* virtual address for which the trap occured */
199	spc = r24	/* space for which the trap occured */
200
201#ifndef CONFIG_64BIT
202
203	/*
204	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
205	 */
206
207	.macro	itlb_11 code
208
209	mfctl	%pcsq, spc
210	b	itlb_miss_11
211	mfctl	%pcoq, va
212
213	.align		32
214	.endm
215#endif
216
217	/*
218	 * itlb miss interruption handler (parisc 2.0)
219	 */
220
221	.macro	itlb_20 code
222	mfctl	%pcsq, spc
223#ifdef CONFIG_64BIT
224	b       itlb_miss_20w
225#else
226	b	itlb_miss_20
227#endif
228	mfctl	%pcoq, va
229
230	.align		32
231	.endm
232
233#ifndef CONFIG_64BIT
234	/*
235	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
236	 *
237	 * Note: naitlb misses will be treated
238	 * as an ordinary itlb miss for now.
239	 * However, note that naitlb misses
240	 * have the faulting address in the
241	 * IOR/ISR.
242	 */
243
244	.macro	naitlb_11 code
245
246	mfctl	%isr,spc
247	b	itlb_miss_11
248	mfctl 	%ior,va
249	/* FIXME: If user causes a naitlb miss, the priv level may not be in
250	 * lower bits of va, where the itlb miss handler is expecting them
251	 */
252
253	.align		32
254	.endm
255#endif
256
257	/*
258	 * naitlb miss interruption handler (parisc 2.0)
259	 *
260	 * Note: naitlb misses will be treated
261	 * as an ordinary itlb miss for now.
262	 * However, note that naitlb misses
263	 * have the faulting address in the
264	 * IOR/ISR.
265	 */
266
267	.macro	naitlb_20 code
268
269	mfctl	%isr,spc
270#ifdef CONFIG_64BIT
271	b       itlb_miss_20w
272#else
273	b	itlb_miss_20
274#endif
275	mfctl 	%ior,va
276	/* FIXME: If user causes a naitlb miss, the priv level may not be in
277	 * lower bits of va, where the itlb miss handler is expecting them
278	 */
279
280	.align		32
281	.endm
282
283#ifndef CONFIG_64BIT
284	/*
285	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
286	 */
287
288	.macro	dtlb_11 code
289
290	mfctl	%isr, spc
291	b	dtlb_miss_11
292	mfctl	%ior, va
293
294	.align		32
295	.endm
296#endif
297
298	/*
299	 * dtlb miss interruption handler (parisc 2.0)
300	 */
301
302	.macro	dtlb_20 code
303
304	mfctl	%isr, spc
305#ifdef CONFIG_64BIT
306	b       dtlb_miss_20w
307#else
308	b	dtlb_miss_20
309#endif
310	mfctl	%ior, va
311
312	.align		32
313	.endm
314
315#ifndef CONFIG_64BIT
316	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
317
318	.macro	nadtlb_11 code
319
320	mfctl	%isr,spc
321	b       nadtlb_miss_11
322	mfctl	%ior,va
323
324	.align		32
325	.endm
326#endif
327
328	/* nadtlb miss interruption handler (parisc 2.0) */
329
330	.macro	nadtlb_20 code
331
332	mfctl	%isr,spc
333#ifdef CONFIG_64BIT
334	b       nadtlb_miss_20w
335#else
336	b       nadtlb_miss_20
337#endif
338	mfctl	%ior,va
339
340	.align		32
341	.endm
342
343#ifndef CONFIG_64BIT
344	/*
345	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
346	 */
347
348	.macro	dbit_11 code
349
350	mfctl	%isr,spc
351	b	dbit_trap_11
352	mfctl	%ior,va
353
354	.align		32
355	.endm
356#endif
357
358	/*
359	 * dirty bit trap interruption handler (parisc 2.0)
360	 */
361
362	.macro	dbit_20 code
363
364	mfctl	%isr,spc
365#ifdef CONFIG_64BIT
366	b       dbit_trap_20w
367#else
368	b	dbit_trap_20
369#endif
370	mfctl	%ior,va
371
372	.align		32
373	.endm
374
375	/* The following are simple 32 vs 64 bit instruction
376	 * abstractions for the macros */
377	.macro		EXTR	reg1,start,length,reg2
378#ifdef CONFIG_64BIT
379	extrd,u		\reg1,32+\start,\length,\reg2
380#else
381	extrw,u		\reg1,\start,\length,\reg2
382#endif
383	.endm
384
385	.macro		DEP	reg1,start,length,reg2
386#ifdef CONFIG_64BIT
387	depd		\reg1,32+\start,\length,\reg2
388#else
389	depw		\reg1,\start,\length,\reg2
390#endif
391	.endm
392
393	.macro		DEPI	val,start,length,reg
394#ifdef CONFIG_64BIT
395	depdi		\val,32+\start,\length,\reg
396#else
397	depwi		\val,\start,\length,\reg
398#endif
399	.endm
400
401	/* In LP64, the space contains part of the upper 32 bits of the
402	 * fault.  We have to extract this and place it in the va,
403	 * zeroing the corresponding bits in the space register */
404	.macro		space_adjust	spc,va,tmp
405#ifdef CONFIG_64BIT
406	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
407	depd		%r0,63,SPACEID_SHIFT,\spc
408	depd		\tmp,31,SPACEID_SHIFT,\va
409#endif
410	.endm
411
412	.import		swapper_pg_dir,code
413
414	/* Get the pgd.  For faults on space zero (kernel space), this
415	 * is simply swapper_pg_dir.  For user space faults, the
416	 * pgd is stored in %cr25 */
417	.macro		get_pgd		spc,reg
418	ldil		L%PA(swapper_pg_dir),\reg
419	ldo		R%PA(swapper_pg_dir)(\reg),\reg
420	or,COND(=)	%r0,\spc,%r0
421	mfctl		%cr25,\reg
422	.endm
423
424	/*
425		space_check(spc,tmp,fault)
426
427		spc - The space we saw the fault with.
428		tmp - The place to store the current space.
429		fault - Function to call on failure.
430
431		Only allow faults on different spaces from the
432		currently active one if we're the kernel
433
434	*/
435	.macro		space_check	spc,tmp,fault
436	mfsp		%sr7,\tmp
437	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
438					 * as kernel, so defeat the space
439					 * check if it is */
440	copy		\spc,\tmp
441	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
442	cmpb,COND(<>),n	\tmp,\spc,\fault
443	.endm
444
445	/* Look up a PTE in a 2-Level scheme (faulting at each
446	 * level if the entry isn't present
447	 *
448	 * NOTE: we use ldw even for LP64, since the short pointers
449	 * can address up to 1TB
450	 */
451	.macro		L2_ptep	pmd,pte,index,va,fault
452#if PT_NLEVELS == 3
453	EXTR		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
454#else
455	EXTR		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
456#endif
457	DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
458	copy		%r0,\pte
459	ldw,s		\index(\pmd),\pmd
460	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
461	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
462	copy		\pmd,%r9
463	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
464	EXTR		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
465	DEP		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
466	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd
467	LDREG		%r0(\pmd),\pte		/* pmd is now pte */
468	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
469	.endm
470
471	/* Look up PTE in a 3-Level scheme.
472	 *
473	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
474	 * first pmd adjacent to the pgd.  This means that we can
475	 * subtract a constant offset to get to it.  The pmd and pgd
476	 * sizes are arranged so that a single pmd covers 4GB (giving
477	 * a full LP64 process access to 8TB) so our lookups are
478	 * effectively L2 for the first 4GB of the kernel (i.e. for
479	 * all ILP32 processes and all the kernel for machines with
480	 * under 4GB of memory) */
481	.macro		L3_ptep pgd,pte,index,va,fault
482#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
483	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
484	copy		%r0,\pte
485	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
486	ldw,s		\index(\pgd),\pgd
487	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
488	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
489	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
490	shld		\pgd,PxD_VALUE_SHIFT,\index
491	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
492	copy		\index,\pgd
493	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
494	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
495#endif
496	L2_ptep		\pgd,\pte,\index,\va,\fault
497	.endm
498
499	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
500	 * don't needlessly dirty the cache line if it was already set */
501	.macro		update_ptep	ptep,pte,tmp,tmp1
502	ldi		_PAGE_ACCESSED,\tmp1
503	or		\tmp1,\pte,\tmp
504	and,COND(<>)	\tmp1,\pte,%r0
505	STREG		\tmp,0(\ptep)
506	.endm
507
508	/* Set the dirty bit (and accessed bit).  No need to be
509	 * clever, this is only used from the dirty fault */
510	.macro		update_dirty	ptep,pte,tmp
511	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
512	or		\tmp,\pte,\pte
513	STREG		\pte,0(\ptep)
514	.endm
515
516	/* Convert the pte and prot to tlb insertion values.  How
517	 * this happens is quite subtle, read below */
518	.macro		make_insert_tlb	spc,pte,prot
519	space_to_prot   \spc \prot        /* create prot id from space */
520	/* The following is the real subtlety.  This is depositing
521	 * T <-> _PAGE_REFTRAP
522	 * D <-> _PAGE_DIRTY
523	 * B <-> _PAGE_DMB (memory break)
524	 *
525	 * Then incredible subtlety: The access rights are
526	 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
527	 * See 3-14 of the parisc 2.0 manual
528	 *
529	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
530	 * trigger an access rights trap in user space if the user
531	 * tries to read an unreadable page */
532	depd            \pte,8,7,\prot
533
534	/* PAGE_USER indicates the page can be read with user privileges,
535	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
536	 * contains _PAGE_READ */
537	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
538	depdi		7,11,3,\prot
539	/* If we're a gateway page, drop PL2 back to zero for promotion
540	 * to kernel privilege (so we can execute the page as kernel).
541	 * Any privilege promotion page always denys read and write */
542	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
543	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
544
545	/* Enforce uncacheable pages.
546	 * This should ONLY be use for MMIO on PA 2.0 machines.
547	 * Memory/DMA is cache coherent on all PA2.0 machines we support
548	 * (that means T-class is NOT supported) and the memory controllers
549	 * on most of those machines only handles cache transactions.
550	 */
551	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
552	depi		1,12,1,\prot
553
554	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
555	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
556	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
557	.endm
558
559	/* Identical macro to make_insert_tlb above, except it
560	 * makes the tlb entry for the differently formatted pa11
561	 * insertion instructions */
562	.macro		make_insert_tlb_11	spc,pte,prot
563	zdep		\spc,30,15,\prot
564	dep		\pte,8,7,\prot
565	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
566	depi		1,12,1,\prot
567	extru,=         \pte,_PAGE_USER_BIT,1,%r0
568	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
569	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
570	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
571
572	/* Get rid of prot bits and convert to page addr for iitlba */
573
574	depi		_PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
575	extru		\pte,24,25,\pte
576	.endm
577
578	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
579	 * to extend into I/O space if the address is 0xfXXXXXXX
580	 * so we extend the f's into the top word of the pte in
581	 * this case */
582	.macro		f_extend	pte,tmp
583	extrd,s		\pte,42,4,\tmp
584	addi,<>		1,\tmp,%r0
585	extrd,s		\pte,63,25,\pte
586	.endm
587
588	/* The alias region is an 8MB aligned 16MB to do clear and
589	 * copy user pages at addresses congruent with the user
590	 * virtual address.
591	 *
592	 * To use the alias page, you set %r26 up with the to TLB
593	 * entry (identifying the physical page) and %r23 up with
594	 * the from tlb entry (or nothing if only a to entry---for
595	 * clear_user_page_asm) */
596	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
597	cmpib,COND(<>),n 0,\spc,\fault
598	ldil		L%(TMPALIAS_MAP_START),\tmp
599#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
600	/* on LP64, ldi will sign extend into the upper 32 bits,
601	 * which is behaviour we don't want */
602	depdi		0,31,32,\tmp
603#endif
604	copy		\va,\tmp1
605	DEPI		0,31,23,\tmp1
606	cmpb,COND(<>),n	\tmp,\tmp1,\fault
607	ldi		(_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
608	depd,z		\prot,8,7,\prot
609	/*
610	 * OK, it is in the temp alias region, check whether "from" or "to".
611	 * Check "subtle" note in pacache.S re: r23/r26.
612	 */
613#ifdef CONFIG_64BIT
614	extrd,u,*=	\va,41,1,%r0
615#else
616	extrw,u,=	\va,9,1,%r0
617#endif
618	or,COND(tr)	%r23,%r0,\pte
619	or		%r26,%r0,\pte
620	.endm
621
622
623	/*
624	 * Align fault_vector_20 on 4K boundary so that both
625	 * fault_vector_11 and fault_vector_20 are on the
626	 * same page. This is only necessary as long as we
627	 * write protect the kernel text, which we may stop
628	 * doing once we use large page translations to cover
629	 * the static part of the kernel address space.
630	 */
631
632	.text
633
634	.align	PAGE_SIZE
635
636ENTRY(fault_vector_20)
637	/* First vector is invalid (0) */
638	.ascii	"cows can fly"
639	.byte 0
640	.align 32
641
642	hpmc		 1
643	def		 2
644	def		 3
645	extint		 4
646	def		 5
647	itlb_20		 6
648	def		 7
649	def		 8
650	def              9
651	def		10
652	def		11
653	def		12
654	def		13
655	def		14
656	dtlb_20		15
657#if 0
658	naitlb_20	16
659#else
660	def             16
661#endif
662	nadtlb_20	17
663	def		18
664	def		19
665	dbit_20		20
666	def		21
667	def		22
668	def		23
669	def		24
670	def		25
671	def		26
672	def		27
673	def		28
674	def		29
675	def		30
676	def		31
677END(fault_vector_20)
678
679#ifndef CONFIG_64BIT
680
681	.align 2048
682
683ENTRY(fault_vector_11)
684	/* First vector is invalid (0) */
685	.ascii	"cows can fly"
686	.byte 0
687	.align 32
688
689	hpmc		 1
690	def		 2
691	def		 3
692	extint		 4
693	def		 5
694	itlb_11		 6
695	def		 7
696	def		 8
697	def              9
698	def		10
699	def		11
700	def		12
701	def		13
702	def		14
703	dtlb_11		15
704#if 0
705	naitlb_11	16
706#else
707	def             16
708#endif
709	nadtlb_11	17
710	def		18
711	def		19
712	dbit_11		20
713	def		21
714	def		22
715	def		23
716	def		24
717	def		25
718	def		26
719	def		27
720	def		28
721	def		29
722	def		30
723	def		31
724END(fault_vector_11)
725
726#endif
727
728	.import		handle_interruption,code
729	.import		do_cpu_irq_mask,code
730
731	/*
732	 * r26 = function to be called
733	 * r25 = argument to pass in
734	 * r24 = flags for do_fork()
735	 *
736	 * Kernel threads don't ever return, so they don't need
737	 * a true register context. We just save away the arguments
738	 * for copy_thread/ret_ to properly set up the child.
739	 */
740
741#define CLONE_VM 0x100	/* Must agree with <linux/sched.h> */
742#define CLONE_UNTRACED 0x00800000
743
744	.import do_fork
745ENTRY(__kernel_thread)
746	STREG	%r2, -RP_OFFSET(%r30)
747
748	copy	%r30, %r1
749	ldo	PT_SZ_ALGN(%r30),%r30
750#ifdef CONFIG_64BIT
751	/* Yo, function pointers in wide mode are little structs... -PB */
752	ldd	24(%r26), %r2
753	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */
754	ldd	16(%r26), %r26
755
756	STREG	%r22, PT_GR22(%r1)	/* save r22 (arg5) */
757	copy	%r0, %r22		/* user_tid */
758#endif
759	STREG	%r26, PT_GR26(%r1)  /* Store function & argument for child */
760	STREG	%r25, PT_GR25(%r1)
761	ldil	L%CLONE_UNTRACED, %r26
762	ldo	CLONE_VM(%r26), %r26   /* Force CLONE_VM since only init_mm */
763	or	%r26, %r24, %r26      /* will have kernel mappings.	 */
764	ldi	1, %r25			/* stack_start, signals kernel thread */
765	stw	%r0, -52(%r30)	     	/* user_tid */
766#ifdef CONFIG_64BIT
767	ldo	-16(%r30),%r29		/* Reference param save area */
768#endif
769	BL	do_fork, %r2
770	copy	%r1, %r24		/* pt_regs */
771
772	/* Parent Returns here */
773
774	LDREG	-PT_SZ_ALGN-RP_OFFSET(%r30), %r2
775	ldo	-PT_SZ_ALGN(%r30), %r30
776	bv	%r0(%r2)
777	nop
778ENDPROC(__kernel_thread)
779
780	/*
781	 * Child Returns here
782	 *
783	 * copy_thread moved args from temp save area set up above
784	 * into task save area.
785	 */
786
787ENTRY(ret_from_kernel_thread)
788
789	/* Call schedule_tail first though */
790	BL	schedule_tail, %r2
791	nop
792
793	LDREG	TI_TASK-THREAD_SZ_ALGN(%r30), %r1
794	LDREG	TASK_PT_GR25(%r1), %r26
795#ifdef CONFIG_64BIT
796	LDREG	TASK_PT_GR27(%r1), %r27
797	LDREG	TASK_PT_GR22(%r1), %r22
798#endif
799	LDREG	TASK_PT_GR26(%r1), %r1
800	ble	0(%sr7, %r1)
801	copy	%r31, %r2
802
803#ifdef CONFIG_64BIT
804	ldo	-16(%r30),%r29		/* Reference param save area */
805	loadgp				/* Thread could have been in a module */
806#endif
807#ifndef CONFIG_64BIT
808	b	sys_exit
809#else
810	load32	sys_exit, %r1
811	bv	%r0(%r1)
812#endif
813	ldi	0, %r26
814ENDPROC(ret_from_kernel_thread)
815
816	.import	sys_execve, code
817ENTRY(__execve)
818	copy	%r2, %r15
819	copy	%r30, %r16
820	ldo	PT_SZ_ALGN(%r30), %r30
821	STREG	%r26, PT_GR26(%r16)
822	STREG	%r25, PT_GR25(%r16)
823	STREG	%r24, PT_GR24(%r16)
824#ifdef CONFIG_64BIT
825	ldo	-16(%r30),%r29		/* Reference param save area */
826#endif
827	BL	sys_execve, %r2
828	copy	%r16, %r26
829
830	cmpib,=,n 0,%r28,intr_return    /* forward */
831
832	/* yes, this will trap and die. */
833	copy	%r15, %r2
834	copy	%r16, %r30
835	bv	%r0(%r2)
836	nop
837ENDPROC(__execve)
838
839
840	/*
841	 * struct task_struct *_switch_to(struct task_struct *prev,
842	 *	struct task_struct *next)
843	 *
844	 * switch kernel stacks and return prev */
845ENTRY(_switch_to)
846	STREG	 %r2, -RP_OFFSET(%r30)
847
848	callee_save_float
849	callee_save
850
851	load32	_switch_to_ret, %r2
852
853	STREG	%r2, TASK_PT_KPC(%r26)
854	LDREG	TASK_PT_KPC(%r25), %r2
855
856	STREG	%r30, TASK_PT_KSP(%r26)
857	LDREG	TASK_PT_KSP(%r25), %r30
858	LDREG	TASK_THREAD_INFO(%r25), %r25
859	bv	%r0(%r2)
860	mtctl   %r25,%cr30
861
862_switch_to_ret:
863	mtctl	%r0, %cr0		/* Needed for single stepping */
864	callee_rest
865	callee_rest_float
866
867	LDREG	-RP_OFFSET(%r30), %r2
868	bv	%r0(%r2)
869	copy	%r26, %r28
870ENDPROC(_switch_to)
871
872	/*
873	 * Common rfi return path for interruptions, kernel execve, and
874	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
875	 * return via this path if the signal was received when the process
876	 * was running; if the process was blocked on a syscall then the
877	 * normal syscall_exit path is used.  All syscalls for traced
878	 * proceses exit via intr_restore.
879	 *
880	 * XXX If any syscalls that change a processes space id ever exit
881	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
882	 * adjust IASQ[0..1].
883	 *
884	 */
885
886	.align	PAGE_SIZE
887
888ENTRY(syscall_exit_rfi)
889	mfctl   %cr30,%r16
890	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
891	ldo	TASK_REGS(%r16),%r16
892	/* Force iaoq to userspace, as the user has had access to our current
893	 * context via sigcontext. Also Filter the PSW for the same reason.
894	 */
895	LDREG	PT_IAOQ0(%r16),%r19
896	depi	3,31,2,%r19
897	STREG	%r19,PT_IAOQ0(%r16)
898	LDREG	PT_IAOQ1(%r16),%r19
899	depi	3,31,2,%r19
900	STREG	%r19,PT_IAOQ1(%r16)
901	LDREG   PT_PSW(%r16),%r19
902	load32	USER_PSW_MASK,%r1
903#ifdef CONFIG_64BIT
904	load32	USER_PSW_HI_MASK,%r20
905	depd    %r20,31,32,%r1
906#endif
907	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
908	load32	USER_PSW,%r1
909	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
910	STREG   %r19,PT_PSW(%r16)
911
912	/*
913	 * If we aren't being traced, we never saved space registers
914	 * (we don't store them in the sigcontext), so set them
915	 * to "proper" values now (otherwise we'll wind up restoring
916	 * whatever was last stored in the task structure, which might
917	 * be inconsistent if an interrupt occured while on the gateway
918	 * page). Note that we may be "trashing" values the user put in
919	 * them, but we don't support the user changing them.
920	 */
921
922	STREG   %r0,PT_SR2(%r16)
923	mfsp    %sr3,%r19
924	STREG   %r19,PT_SR0(%r16)
925	STREG   %r19,PT_SR1(%r16)
926	STREG   %r19,PT_SR3(%r16)
927	STREG   %r19,PT_SR4(%r16)
928	STREG   %r19,PT_SR5(%r16)
929	STREG   %r19,PT_SR6(%r16)
930	STREG   %r19,PT_SR7(%r16)
931
932intr_return:
933	/* NOTE: Need to enable interrupts incase we schedule. */
934	ssm     PSW_SM_I, %r0
935
936intr_check_resched:
937
938	/* check for reschedule */
939	mfctl   %cr30,%r1
940	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
941	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
942
943	.import do_notify_resume,code
944intr_check_sig:
945	/* As above */
946	mfctl   %cr30,%r1
947	LDREG	TI_FLAGS(%r1),%r19
948	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
949	and,COND(<>)	%r19, %r20, %r0
950	b,n	intr_restore	/* skip past if we've nothing to do */
951
952	/* This check is critical to having LWS
953	 * working. The IASQ is zero on the gateway
954	 * page and we cannot deliver any signals until
955	 * we get off the gateway page.
956	 *
957	 * Only do signals if we are returning to user space
958	 */
959	LDREG	PT_IASQ0(%r16), %r20
960	CMPIB=,n 0,%r20,intr_restore /* backward */
961	LDREG	PT_IASQ1(%r16), %r20
962	CMPIB=,n 0,%r20,intr_restore /* backward */
963
964	copy	%r0, %r25			/* long in_syscall = 0 */
965#ifdef CONFIG_64BIT
966	ldo	-16(%r30),%r29			/* Reference param save area */
967#endif
968
969	BL	do_notify_resume,%r2
970	copy	%r16, %r26			/* struct pt_regs *regs */
971
972	b,n	intr_check_sig
973
974intr_restore:
975	copy            %r16,%r29
976	ldo             PT_FR31(%r29),%r1
977	rest_fp         %r1
978	rest_general    %r29
979
980	/* inverse of virt_map */
981	pcxt_ssm_bug
982	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
983	tophys_r1       %r29
984
985	/* Restore space id's and special cr's from PT_REGS
986	 * structure pointed to by r29
987	 */
988	rest_specials	%r29
989
990	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
991	 * It also restores r1 and r30.
992	 */
993	rest_stack
994
995	rfi
996	nop
997	nop
998	nop
999	nop
1000	nop
1001	nop
1002	nop
1003	nop
1004
1005#ifndef CONFIG_PREEMPT
1006# define intr_do_preempt	intr_restore
1007#endif /* !CONFIG_PREEMPT */
1008
1009	.import schedule,code
1010intr_do_resched:
1011	/* Only call schedule on return to userspace. If we're returning
1012	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
1013	 * we jump back to intr_restore.
1014	 */
1015	LDREG	PT_IASQ0(%r16), %r20
1016	CMPIB=	0, %r20, intr_do_preempt
1017	nop
1018	LDREG	PT_IASQ1(%r16), %r20
1019	CMPIB=	0, %r20, intr_do_preempt
1020	nop
1021
1022#ifdef CONFIG_64BIT
1023	ldo	-16(%r30),%r29		/* Reference param save area */
1024#endif
1025
1026	ldil	L%intr_check_sig, %r2
1027#ifndef CONFIG_64BIT
1028	b	schedule
1029#else
1030	load32	schedule, %r20
1031	bv	%r0(%r20)
1032#endif
1033	ldo	R%intr_check_sig(%r2), %r2
1034
1035	/* preempt the current task on returning to kernel
1036	 * mode from an interrupt, iff need_resched is set,
1037	 * and preempt_count is 0. otherwise, we continue on
1038	 * our merry way back to the current running task.
1039	 */
1040#ifdef CONFIG_PREEMPT
1041	.import preempt_schedule_irq,code
1042intr_do_preempt:
1043	rsm	PSW_SM_I, %r0		/* disable interrupts */
1044
1045	/* current_thread_info()->preempt_count */
1046	mfctl	%cr30, %r1
1047	LDREG	TI_PRE_COUNT(%r1), %r19
1048	CMPIB<>	0, %r19, intr_restore	/* if preempt_count > 0 */
1049	nop				/* prev insn branched backwards */
1050
1051	/* check if we interrupted a critical path */
1052	LDREG	PT_PSW(%r16), %r20
1053	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
1054	nop
1055
1056	BL	preempt_schedule_irq, %r2
1057	nop
1058
1059	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
1060#endif /* CONFIG_PREEMPT */
1061
1062	/*
1063	 * External interrupts.
1064	 */
1065
1066intr_extint:
1067	CMPIB=,n 0,%r16,1f
1068
1069	get_stack_use_cr30
1070	b,n 2f
1071
10721:
1073	get_stack_use_r30
10742:
1075	save_specials	%r29
1076	virt_map
1077	save_general	%r29
1078
1079	ldo	PT_FR0(%r29), %r24
1080	save_fp	%r24
1081
1082	loadgp
1083
1084	copy	%r29, %r26	/* arg0 is pt_regs */
1085	copy	%r29, %r16	/* save pt_regs */
1086
1087	ldil	L%intr_return, %r2
1088
1089#ifdef CONFIG_64BIT
1090	ldo	-16(%r30),%r29	/* Reference param save area */
1091#endif
1092
1093	b	do_cpu_irq_mask
1094	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1095ENDPROC(syscall_exit_rfi)
1096
1097
1098	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1099
1100ENTRY(intr_save)		/* for os_hpmc */
1101	mfsp    %sr7,%r16
1102	CMPIB=,n 0,%r16,1f
1103	get_stack_use_cr30
1104	b	2f
1105	copy    %r8,%r26
1106
11071:
1108	get_stack_use_r30
1109	copy    %r8,%r26
1110
11112:
1112	save_specials	%r29
1113
1114	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1115
1116	/*
1117	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1118	 *           traps.c.
1119	 *        2) Once we start executing code above 4 Gb, we need
1120	 *           to adjust iasq/iaoq here in the same way we
1121	 *           adjust isr/ior below.
1122	 */
1123
1124	CMPIB=,n        6,%r26,skip_save_ior
1125
1126
1127	mfctl           %cr20, %r16 /* isr */
1128	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1129	mfctl           %cr21, %r17 /* ior */
1130
1131
1132#ifdef CONFIG_64BIT
1133	/*
1134	 * If the interrupted code was running with W bit off (32 bit),
1135	 * clear the b bits (bits 0 & 1) in the ior.
1136	 * save_specials left ipsw value in r8 for us to test.
1137	 */
1138	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1139	depdi           0,1,2,%r17
1140
1141	/*
1142	 * FIXME: This code has hardwired assumptions about the split
1143	 *        between space bits and offset bits. This will change
1144	 *        when we allow alternate page sizes.
1145	 */
1146
1147	/* adjust isr/ior. */
1148	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1149	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1150	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1151#endif
1152	STREG           %r16, PT_ISR(%r29)
1153	STREG           %r17, PT_IOR(%r29)
1154
1155
1156skip_save_ior:
1157	virt_map
1158	save_general	%r29
1159
1160	ldo		PT_FR0(%r29), %r25
1161	save_fp		%r25
1162
1163	loadgp
1164
1165	copy		%r29, %r25	/* arg1 is pt_regs */
1166#ifdef CONFIG_64BIT
1167	ldo		-16(%r30),%r29	/* Reference param save area */
1168#endif
1169
1170	ldil		L%intr_check_sig, %r2
1171	copy		%r25, %r16	/* save pt_regs */
1172
1173	b		handle_interruption
1174	ldo		R%intr_check_sig(%r2), %r2
1175ENDPROC(intr_save)
1176
1177
1178	/*
1179	 * Note for all tlb miss handlers:
1180	 *
1181	 * cr24 contains a pointer to the kernel address space
1182	 * page directory.
1183	 *
1184	 * cr25 contains a pointer to the current user address
1185	 * space page directory.
1186	 *
1187	 * sr3 will contain the space id of the user address space
1188	 * of the current running thread while that thread is
1189	 * running in the kernel.
1190	 */
1191
1192	/*
1193	 * register number allocations.  Note that these are all
1194	 * in the shadowed registers
1195	 */
1196
1197	t0 = r1		/* temporary register 0 */
1198	va = r8		/* virtual address for which the trap occured */
1199	t1 = r9		/* temporary register 1 */
1200	pte  = r16	/* pte/phys page # */
1201	prot = r17	/* prot bits */
1202	spc  = r24	/* space for which the trap occured */
1203	ptp = r25	/* page directory/page table pointer */
1204
1205#ifdef CONFIG_64BIT
1206
1207dtlb_miss_20w:
1208	space_adjust	spc,va,t0
1209	get_pgd		spc,ptp
1210	space_check	spc,t0,dtlb_fault
1211
1212	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1213
1214	update_ptep	ptp,pte,t0,t1
1215
1216	make_insert_tlb	spc,pte,prot
1217
1218	idtlbt          pte,prot
1219
1220	rfir
1221	nop
1222
1223dtlb_check_alias_20w:
1224	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1225
1226	idtlbt          pte,prot
1227
1228	rfir
1229	nop
1230
1231nadtlb_miss_20w:
1232	space_adjust	spc,va,t0
1233	get_pgd		spc,ptp
1234	space_check	spc,t0,nadtlb_fault
1235
1236	L3_ptep		ptp,pte,t0,va,nadtlb_check_flush_20w
1237
1238	update_ptep	ptp,pte,t0,t1
1239
1240	make_insert_tlb	spc,pte,prot
1241
1242	idtlbt          pte,prot
1243
1244	rfir
1245	nop
1246
1247nadtlb_check_flush_20w:
1248	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1249
1250	/* Insert a "flush only" translation */
1251
1252	depdi,z         7,7,3,prot
1253	depdi           1,10,1,prot
1254
1255	/* Get rid of prot bits and convert to page addr for idtlbt */
1256
1257	depdi		0,63,12,pte
1258	extrd,u         pte,56,52,pte
1259	idtlbt          pte,prot
1260
1261	rfir
1262	nop
1263
1264#else
1265
1266dtlb_miss_11:
1267	get_pgd		spc,ptp
1268
1269	space_check	spc,t0,dtlb_fault
1270
1271	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1272
1273	update_ptep	ptp,pte,t0,t1
1274
1275	make_insert_tlb_11	spc,pte,prot
1276
1277	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1278	mtsp		spc,%sr1
1279
1280	idtlba		pte,(%sr1,va)
1281	idtlbp		prot,(%sr1,va)
1282
1283	mtsp		t0, %sr1	/* Restore sr1 */
1284
1285	rfir
1286	nop
1287
1288dtlb_check_alias_11:
1289
1290	/* Check to see if fault is in the temporary alias region */
1291
1292	cmpib,<>,n      0,spc,dtlb_fault /* forward */
1293	ldil            L%(TMPALIAS_MAP_START),t0
1294	copy            va,t1
1295	depwi           0,31,23,t1
1296	cmpb,<>,n       t0,t1,dtlb_fault /* forward */
1297	ldi             (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1298	depw,z          prot,8,7,prot
1299
1300	/*
1301	 * OK, it is in the temp alias region, check whether "from" or "to".
1302	 * Check "subtle" note in pacache.S re: r23/r26.
1303	 */
1304
1305	extrw,u,=       va,9,1,r0
1306	or,tr           %r23,%r0,pte    /* If "from" use "from" page */
1307	or              %r26,%r0,pte    /* else "to", use "to" page  */
1308
1309	idtlba          pte,(va)
1310	idtlbp          prot,(va)
1311
1312	rfir
1313	nop
1314
1315nadtlb_miss_11:
1316	get_pgd		spc,ptp
1317
1318	space_check	spc,t0,nadtlb_fault
1319
1320	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_11
1321
1322	update_ptep	ptp,pte,t0,t1
1323
1324	make_insert_tlb_11	spc,pte,prot
1325
1326
1327	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1328	mtsp		spc,%sr1
1329
1330	idtlba		pte,(%sr1,va)
1331	idtlbp		prot,(%sr1,va)
1332
1333	mtsp		t0, %sr1	/* Restore sr1 */
1334
1335	rfir
1336	nop
1337
1338nadtlb_check_flush_11:
1339	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1340
1341	/* Insert a "flush only" translation */
1342
1343	zdepi           7,7,3,prot
1344	depi            1,10,1,prot
1345
1346	/* Get rid of prot bits and convert to page addr for idtlba */
1347
1348	depi		0,31,12,pte
1349	extru		pte,24,25,pte
1350
1351	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1352	mtsp		spc,%sr1
1353
1354	idtlba		pte,(%sr1,va)
1355	idtlbp		prot,(%sr1,va)
1356
1357	mtsp		t0, %sr1	/* Restore sr1 */
1358
1359	rfir
1360	nop
1361
1362dtlb_miss_20:
1363	space_adjust	spc,va,t0
1364	get_pgd		spc,ptp
1365	space_check	spc,t0,dtlb_fault
1366
1367	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1368
1369	update_ptep	ptp,pte,t0,t1
1370
1371	make_insert_tlb	spc,pte,prot
1372
1373	f_extend	pte,t0
1374
1375	idtlbt          pte,prot
1376
1377	rfir
1378	nop
1379
1380dtlb_check_alias_20:
1381	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1382
1383	idtlbt          pte,prot
1384
1385	rfir
1386	nop
1387
1388nadtlb_miss_20:
1389	get_pgd		spc,ptp
1390
1391	space_check	spc,t0,nadtlb_fault
1392
1393	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_20
1394
1395	update_ptep	ptp,pte,t0,t1
1396
1397	make_insert_tlb	spc,pte,prot
1398
1399	f_extend	pte,t0
1400
1401        idtlbt          pte,prot
1402
1403	rfir
1404	nop
1405
1406nadtlb_check_flush_20:
1407	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1408
1409	/* Insert a "flush only" translation */
1410
1411	depdi,z         7,7,3,prot
1412	depdi           1,10,1,prot
1413
1414	/* Get rid of prot bits and convert to page addr for idtlbt */
1415
1416	depdi		0,63,12,pte
1417	extrd,u         pte,56,32,pte
1418	idtlbt          pte,prot
1419
1420	rfir
1421	nop
1422#endif
1423
1424nadtlb_emulate:
1425
1426	/*
1427	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1428	 * probei instructions. We don't want to fault for these
1429	 * instructions (not only does it not make sense, it can cause
1430	 * deadlocks, since some flushes are done with the mmap
1431	 * semaphore held). If the translation doesn't exist, we can't
1432	 * insert a translation, so have to emulate the side effects
1433	 * of the instruction. Since we don't insert a translation
1434	 * we can get a lot of faults during a flush loop, so it makes
1435	 * sense to try to do it here with minimum overhead. We only
1436	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1437	 * and index registers are not shadowed. We defer everything
1438	 * else to the "slow" path.
1439	 */
1440
1441	mfctl           %cr19,%r9 /* Get iir */
1442
1443	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1444	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1445
1446	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1447	ldi             0x280,%r16
1448	and             %r9,%r16,%r17
1449	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1450	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1451	BL		get_register,%r25
1452	extrw,u         %r9,15,5,%r8           /* Get index register # */
1453	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1454	copy            %r1,%r24
1455	BL		get_register,%r25
1456	extrw,u         %r9,10,5,%r8           /* Get base register # */
1457	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1458	BL		set_register,%r25
1459	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1460
1461nadtlb_nullify:
1462	mfctl           %ipsw,%r8
1463	ldil            L%PSW_N,%r9
1464	or              %r8,%r9,%r8            /* Set PSW_N */
1465	mtctl           %r8,%ipsw
1466
1467	rfir
1468	nop
1469
1470	/*
1471		When there is no translation for the probe address then we
1472		must nullify the insn and return zero in the target regsiter.
1473		This will indicate to the calling code that it does not have
1474		write/read privileges to this address.
1475
1476		This should technically work for prober and probew in PA 1.1,
1477		and also probe,r and probe,w in PA 2.0
1478
1479		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1480		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1481
1482	*/
1483nadtlb_probe_check:
1484	ldi             0x80,%r16
1485	and             %r9,%r16,%r17
1486	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1487	BL              get_register,%r25      /* Find the target register */
1488	extrw,u         %r9,31,5,%r8           /* Get target register */
1489	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1490	BL		set_register,%r25
1491	copy            %r0,%r1                /* Write zero to target register */
1492	b nadtlb_nullify                       /* Nullify return insn */
1493	nop
1494
1495
1496#ifdef CONFIG_64BIT
1497itlb_miss_20w:
1498
1499	/*
1500	 * I miss is a little different, since we allow users to fault
1501	 * on the gateway page which is in the kernel address space.
1502	 */
1503
1504	space_adjust	spc,va,t0
1505	get_pgd		spc,ptp
1506	space_check	spc,t0,itlb_fault
1507
1508	L3_ptep		ptp,pte,t0,va,itlb_fault
1509
1510	update_ptep	ptp,pte,t0,t1
1511
1512	make_insert_tlb	spc,pte,prot
1513
1514	iitlbt          pte,prot
1515
1516	rfir
1517	nop
1518
1519#else
1520
1521itlb_miss_11:
1522	get_pgd		spc,ptp
1523
1524	space_check	spc,t0,itlb_fault
1525
1526	L2_ptep		ptp,pte,t0,va,itlb_fault
1527
1528	update_ptep	ptp,pte,t0,t1
1529
1530	make_insert_tlb_11	spc,pte,prot
1531
1532	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1533	mtsp		spc,%sr1
1534
1535	iitlba		pte,(%sr1,va)
1536	iitlbp		prot,(%sr1,va)
1537
1538	mtsp		t0, %sr1	/* Restore sr1 */
1539
1540	rfir
1541	nop
1542
1543itlb_miss_20:
1544	get_pgd		spc,ptp
1545
1546	space_check	spc,t0,itlb_fault
1547
1548	L2_ptep		ptp,pte,t0,va,itlb_fault
1549
1550	update_ptep	ptp,pte,t0,t1
1551
1552	make_insert_tlb	spc,pte,prot
1553
1554	f_extend	pte,t0
1555
1556	iitlbt          pte,prot
1557
1558	rfir
1559	nop
1560
1561#endif
1562
1563#ifdef CONFIG_64BIT
1564
1565dbit_trap_20w:
1566	space_adjust	spc,va,t0
1567	get_pgd		spc,ptp
1568	space_check	spc,t0,dbit_fault
1569
1570	L3_ptep		ptp,pte,t0,va,dbit_fault
1571
1572#ifdef CONFIG_SMP
1573	CMPIB=,n        0,spc,dbit_nolock_20w
1574	load32		PA(pa_dbit_lock),t0
1575
1576dbit_spin_20w:
1577	LDCW		0(t0),t1
1578	cmpib,=         0,t1,dbit_spin_20w
1579	nop
1580
1581dbit_nolock_20w:
1582#endif
1583	update_dirty	ptp,pte,t1
1584
1585	make_insert_tlb	spc,pte,prot
1586
1587	idtlbt          pte,prot
1588#ifdef CONFIG_SMP
1589	CMPIB=,n        0,spc,dbit_nounlock_20w
1590	ldi             1,t1
1591	stw             t1,0(t0)
1592
1593dbit_nounlock_20w:
1594#endif
1595
1596	rfir
1597	nop
1598#else
1599
1600dbit_trap_11:
1601
1602	get_pgd		spc,ptp
1603
1604	space_check	spc,t0,dbit_fault
1605
1606	L2_ptep		ptp,pte,t0,va,dbit_fault
1607
1608#ifdef CONFIG_SMP
1609	CMPIB=,n        0,spc,dbit_nolock_11
1610	load32		PA(pa_dbit_lock),t0
1611
1612dbit_spin_11:
1613	LDCW		0(t0),t1
1614	cmpib,=         0,t1,dbit_spin_11
1615	nop
1616
1617dbit_nolock_11:
1618#endif
1619	update_dirty	ptp,pte,t1
1620
1621	make_insert_tlb_11	spc,pte,prot
1622
1623	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1624	mtsp		spc,%sr1
1625
1626	idtlba		pte,(%sr1,va)
1627	idtlbp		prot,(%sr1,va)
1628
1629	mtsp            t1, %sr1     /* Restore sr1 */
1630#ifdef CONFIG_SMP
1631	CMPIB=,n        0,spc,dbit_nounlock_11
1632	ldi             1,t1
1633	stw             t1,0(t0)
1634
1635dbit_nounlock_11:
1636#endif
1637
1638	rfir
1639	nop
1640
1641dbit_trap_20:
1642	get_pgd		spc,ptp
1643
1644	space_check	spc,t0,dbit_fault
1645
1646	L2_ptep		ptp,pte,t0,va,dbit_fault
1647
1648#ifdef CONFIG_SMP
1649	CMPIB=,n        0,spc,dbit_nolock_20
1650	load32		PA(pa_dbit_lock),t0
1651
1652dbit_spin_20:
1653	LDCW		0(t0),t1
1654	cmpib,=         0,t1,dbit_spin_20
1655	nop
1656
1657dbit_nolock_20:
1658#endif
1659	update_dirty	ptp,pte,t1
1660
1661	make_insert_tlb	spc,pte,prot
1662
1663	f_extend	pte,t1
1664
1665        idtlbt          pte,prot
1666
1667#ifdef CONFIG_SMP
1668	CMPIB=,n        0,spc,dbit_nounlock_20
1669	ldi             1,t1
1670	stw             t1,0(t0)
1671
1672dbit_nounlock_20:
1673#endif
1674
1675	rfir
1676	nop
1677#endif
1678
1679	.import handle_interruption,code
1680
1681kernel_bad_space:
1682	b               intr_save
1683	ldi             31,%r8  /* Use an unused code */
1684
1685dbit_fault:
1686	b               intr_save
1687	ldi             20,%r8
1688
1689itlb_fault:
1690	b               intr_save
1691	ldi             6,%r8
1692
1693nadtlb_fault:
1694	b               intr_save
1695	ldi             17,%r8
1696
1697dtlb_fault:
1698	b               intr_save
1699	ldi             15,%r8
1700
1701	/* Register saving semantics for system calls:
1702
1703	   %r1		   clobbered by system call macro in userspace
1704	   %r2		   saved in PT_REGS by gateway page
1705	   %r3  - %r18	   preserved by C code (saved by signal code)
1706	   %r19 - %r20	   saved in PT_REGS by gateway page
1707	   %r21 - %r22	   non-standard syscall args
1708			   stored in kernel stack by gateway page
1709	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1710	   %r27 - %r30	   saved in PT_REGS by gateway page
1711	   %r31		   syscall return pointer
1712	 */
1713
1714	/* Floating point registers (FIXME: what do we do with these?)
1715
1716	   %fr0  - %fr3	   status/exception, not preserved
1717	   %fr4  - %fr7	   arguments
1718	   %fr8	 - %fr11   not preserved by C code
1719	   %fr12 - %fr21   preserved by C code
1720	   %fr22 - %fr31   not preserved by C code
1721	 */
1722
1723	.macro	reg_save regs
1724	STREG	%r3, PT_GR3(\regs)
1725	STREG	%r4, PT_GR4(\regs)
1726	STREG	%r5, PT_GR5(\regs)
1727	STREG	%r6, PT_GR6(\regs)
1728	STREG	%r7, PT_GR7(\regs)
1729	STREG	%r8, PT_GR8(\regs)
1730	STREG	%r9, PT_GR9(\regs)
1731	STREG   %r10,PT_GR10(\regs)
1732	STREG   %r11,PT_GR11(\regs)
1733	STREG   %r12,PT_GR12(\regs)
1734	STREG   %r13,PT_GR13(\regs)
1735	STREG   %r14,PT_GR14(\regs)
1736	STREG   %r15,PT_GR15(\regs)
1737	STREG   %r16,PT_GR16(\regs)
1738	STREG   %r17,PT_GR17(\regs)
1739	STREG   %r18,PT_GR18(\regs)
1740	.endm
1741
1742	.macro	reg_restore regs
1743	LDREG	PT_GR3(\regs), %r3
1744	LDREG	PT_GR4(\regs), %r4
1745	LDREG	PT_GR5(\regs), %r5
1746	LDREG	PT_GR6(\regs), %r6
1747	LDREG	PT_GR7(\regs), %r7
1748	LDREG	PT_GR8(\regs), %r8
1749	LDREG	PT_GR9(\regs), %r9
1750	LDREG   PT_GR10(\regs),%r10
1751	LDREG   PT_GR11(\regs),%r11
1752	LDREG   PT_GR12(\regs),%r12
1753	LDREG   PT_GR13(\regs),%r13
1754	LDREG   PT_GR14(\regs),%r14
1755	LDREG   PT_GR15(\regs),%r15
1756	LDREG   PT_GR16(\regs),%r16
1757	LDREG   PT_GR17(\regs),%r17
1758	LDREG   PT_GR18(\regs),%r18
1759	.endm
1760
1761ENTRY(sys_fork_wrapper)
1762	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1763	ldo	TASK_REGS(%r1),%r1
1764	reg_save %r1
1765	mfctl	%cr27, %r3
1766	STREG	%r3, PT_CR27(%r1)
1767
1768	STREG	%r2,-RP_OFFSET(%r30)
1769	ldo	FRAME_SIZE(%r30),%r30
1770#ifdef CONFIG_64BIT
1771	ldo	-16(%r30),%r29		/* Reference param save area */
1772#endif
1773
1774	/* These are call-clobbered registers and therefore
1775	   also syscall-clobbered (we hope). */
1776	STREG	%r2,PT_GR19(%r1)	/* save for child */
1777	STREG	%r30,PT_GR21(%r1)
1778
1779	LDREG	PT_GR30(%r1),%r25
1780	copy	%r1,%r24
1781	BL	sys_clone,%r2
1782	ldi	SIGCHLD,%r26
1783
1784	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1785wrapper_exit:
1786	ldo	-FRAME_SIZE(%r30),%r30		/* get the stackframe */
1787	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1788	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1789
1790	LDREG	PT_CR27(%r1), %r3
1791	mtctl	%r3, %cr27
1792	reg_restore %r1
1793
1794	/* strace expects syscall # to be preserved in r20 */
1795	ldi	__NR_fork,%r20
1796	bv %r0(%r2)
1797	STREG	%r20,PT_GR20(%r1)
1798ENDPROC(sys_fork_wrapper)
1799
1800	/* Set the return value for the child */
1801ENTRY(child_return)
1802	BL	schedule_tail, %r2
1803	nop
1804
1805	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1806	LDREG	TASK_PT_GR19(%r1),%r2
1807	b	wrapper_exit
1808	copy	%r0,%r28
1809ENDPROC(child_return)
1810
1811
1812ENTRY(sys_clone_wrapper)
1813	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1814	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1815	reg_save %r1
1816	mfctl	%cr27, %r3
1817	STREG	%r3, PT_CR27(%r1)
1818
1819	STREG	%r2,-RP_OFFSET(%r30)
1820	ldo	FRAME_SIZE(%r30),%r30
1821#ifdef CONFIG_64BIT
1822	ldo	-16(%r30),%r29		/* Reference param save area */
1823#endif
1824
1825	/* WARNING - Clobbers r19 and r21, userspace must save these! */
1826	STREG	%r2,PT_GR19(%r1)	/* save for child */
1827	STREG	%r30,PT_GR21(%r1)
1828	BL	sys_clone,%r2
1829	copy	%r1,%r24
1830
1831	b	wrapper_exit
1832	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1833ENDPROC(sys_clone_wrapper)
1834
1835
1836ENTRY(sys_vfork_wrapper)
1837	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1838	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1839	reg_save %r1
1840	mfctl	%cr27, %r3
1841	STREG	%r3, PT_CR27(%r1)
1842
1843	STREG	%r2,-RP_OFFSET(%r30)
1844	ldo	FRAME_SIZE(%r30),%r30
1845#ifdef CONFIG_64BIT
1846	ldo	-16(%r30),%r29		/* Reference param save area */
1847#endif
1848
1849	STREG	%r2,PT_GR19(%r1)	/* save for child */
1850	STREG	%r30,PT_GR21(%r1)
1851
1852	BL	sys_vfork,%r2
1853	copy	%r1,%r26
1854
1855	b	wrapper_exit
1856	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1857ENDPROC(sys_vfork_wrapper)
1858
1859
1860	.macro  execve_wrapper execve
1861	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1862	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1863
1864	/*
1865	 * Do we need to save/restore r3-r18 here?
1866	 * I don't think so. why would new thread need old
1867	 * threads registers?
1868	 */
1869
1870	/* %arg0 - %arg3 are already saved for us. */
1871
1872	STREG %r2,-RP_OFFSET(%r30)
1873	ldo FRAME_SIZE(%r30),%r30
1874#ifdef CONFIG_64BIT
1875	ldo	-16(%r30),%r29		/* Reference param save area */
1876#endif
1877	BL \execve,%r2
1878	copy %r1,%arg0
1879
1880	ldo -FRAME_SIZE(%r30),%r30
1881	LDREG -RP_OFFSET(%r30),%r2
1882
1883	/* If exec succeeded we need to load the args */
1884
1885	ldo -1024(%r0),%r1
1886	cmpb,>>= %r28,%r1,error_\execve
1887	copy %r2,%r19
1888
1889error_\execve:
1890	bv %r0(%r19)
1891	nop
1892	.endm
1893
1894	.import sys_execve
1895ENTRY(sys_execve_wrapper)
1896	execve_wrapper sys_execve
1897ENDPROC(sys_execve_wrapper)
1898
1899#ifdef CONFIG_64BIT
1900	.import sys32_execve
1901ENTRY(sys32_execve_wrapper)
1902	execve_wrapper sys32_execve
1903ENDPROC(sys32_execve_wrapper)
1904#endif
1905
1906ENTRY(sys_rt_sigreturn_wrapper)
1907	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1908	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1909	/* Don't save regs, we are going to restore them from sigcontext. */
1910	STREG	%r2, -RP_OFFSET(%r30)
1911#ifdef CONFIG_64BIT
1912	ldo	FRAME_SIZE(%r30), %r30
1913	BL	sys_rt_sigreturn,%r2
1914	ldo	-16(%r30),%r29		/* Reference param save area */
1915#else
1916	BL	sys_rt_sigreturn,%r2
1917	ldo	FRAME_SIZE(%r30), %r30
1918#endif
1919
1920	ldo	-FRAME_SIZE(%r30), %r30
1921	LDREG	-RP_OFFSET(%r30), %r2
1922
1923	/* FIXME: I think we need to restore a few more things here. */
1924	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1925	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1926	reg_restore %r1
1927
1928	/* If the signal was received while the process was blocked on a
1929	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1930	 * take us to syscall_exit_rfi and on to intr_return.
1931	 */
1932	bv	%r0(%r2)
1933	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1934ENDPROC(sys_rt_sigreturn_wrapper)
1935
1936ENTRY(sys_sigaltstack_wrapper)
1937	/* Get the user stack pointer */
1938	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1939	ldo	TASK_REGS(%r1),%r24	/* get pt regs */
1940	LDREG	TASK_PT_GR30(%r24),%r24
1941	STREG	%r2, -RP_OFFSET(%r30)
1942#ifdef CONFIG_64BIT
1943	ldo	FRAME_SIZE(%r30), %r30
1944	BL	do_sigaltstack,%r2
1945	ldo	-16(%r30),%r29		/* Reference param save area */
1946#else
1947	BL	do_sigaltstack,%r2
1948	ldo	FRAME_SIZE(%r30), %r30
1949#endif
1950
1951	ldo	-FRAME_SIZE(%r30), %r30
1952	LDREG	-RP_OFFSET(%r30), %r2
1953	bv	%r0(%r2)
1954	nop
1955ENDPROC(sys_sigaltstack_wrapper)
1956
1957#ifdef CONFIG_64BIT
1958ENTRY(sys32_sigaltstack_wrapper)
1959	/* Get the user stack pointer */
1960	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1961	LDREG	TASK_PT_GR30(%r24),%r24
1962	STREG	%r2, -RP_OFFSET(%r30)
1963	ldo	FRAME_SIZE(%r30), %r30
1964	BL	do_sigaltstack32,%r2
1965	ldo	-16(%r30),%r29		/* Reference param save area */
1966
1967	ldo	-FRAME_SIZE(%r30), %r30
1968	LDREG	-RP_OFFSET(%r30), %r2
1969	bv	%r0(%r2)
1970	nop
1971ENDPROC(sys32_sigaltstack_wrapper)
1972#endif
1973
1974ENTRY(syscall_exit)
1975	/* NOTE: HP-UX syscalls also come through here
1976	 * after hpux_syscall_exit fixes up return
1977	 * values. */
1978
1979	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1980	 * via syscall_exit_rfi if the signal was received while the process
1981	 * was running.
1982	 */
1983
1984	/* save return value now */
1985
1986	mfctl     %cr30, %r1
1987	LDREG     TI_TASK(%r1),%r1
1988	STREG     %r28,TASK_PT_GR28(%r1)
1989
1990#ifdef CONFIG_HPUX
1991/* <linux/personality.h> cannot be easily included */
1992#define PER_HPUX 0x10
1993	ldw	TASK_PERSONALITY(%r1),%r19
1994
1995	/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1996	ldo	  -PER_HPUX(%r19), %r19
1997	CMPIB<>,n 0,%r19,1f
1998
1999	/* Save other hpux returns if personality is PER_HPUX */
2000	STREG     %r22,TASK_PT_GR22(%r1)
2001	STREG     %r29,TASK_PT_GR29(%r1)
20021:
2003
2004#endif /* CONFIG_HPUX */
2005
2006	/* Seems to me that dp could be wrong here, if the syscall involved
2007	 * calling a module, and nothing got round to restoring dp on return.
2008	 */
2009	loadgp
2010
2011syscall_check_resched:
2012
2013	/* check for reschedule */
2014
2015	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
2016	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2017
2018	.import do_signal,code
2019syscall_check_sig:
2020	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
2021	ldi	(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
2022	and,COND(<>)	%r19, %r26, %r0
2023	b,n	syscall_restore	/* skip past if we've nothing to do */
2024
2025syscall_do_signal:
2026	/* Save callee-save registers (for sigcontext).
2027	 * FIXME: After this point the process structure should be
2028	 * consistent with all the relevant state of the process
2029	 * before the syscall.  We need to verify this.
2030	 */
2031	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2032	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
2033	reg_save %r26
2034
2035#ifdef CONFIG_64BIT
2036	ldo	-16(%r30),%r29			/* Reference param save area */
2037#endif
2038
2039	BL	do_notify_resume,%r2
2040	ldi	1, %r25				/* long in_syscall = 1 */
2041
2042	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2043	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
2044	reg_restore %r20
2045
2046	b,n     syscall_check_sig
2047
2048syscall_restore:
2049	/* Are we being ptraced? */
2050	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2051
2052	ldw	TASK_PTRACE(%r1), %r19
2053	bb,<	%r19,31,syscall_restore_rfi
2054	nop
2055
2056	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
2057	rest_fp	%r19
2058
2059	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
2060	mtsar	%r19
2061
2062	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
2063	LDREG	TASK_PT_GR19(%r1),%r19
2064	LDREG   TASK_PT_GR20(%r1),%r20
2065	LDREG	TASK_PT_GR21(%r1),%r21
2066	LDREG	TASK_PT_GR22(%r1),%r22
2067	LDREG	TASK_PT_GR23(%r1),%r23
2068	LDREG	TASK_PT_GR24(%r1),%r24
2069	LDREG	TASK_PT_GR25(%r1),%r25
2070	LDREG	TASK_PT_GR26(%r1),%r26
2071	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
2072	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
2073	LDREG	TASK_PT_GR29(%r1),%r29
2074	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
2075
2076	/* NOTE: We use rsm/ssm pair to make this operation atomic */
2077	rsm     PSW_SM_I, %r0
2078	LDREG   TASK_PT_GR30(%r1),%r30             /* restore user sp */
2079	mfsp	%sr3,%r1			   /* Get users space id */
2080	mtsp    %r1,%sr7                           /* Restore sr7 */
2081	ssm     PSW_SM_I, %r0
2082
2083	/* Set sr2 to zero for userspace syscalls to work. */
2084	mtsp	%r0,%sr2
2085	mtsp	%r1,%sr4			   /* Restore sr4 */
2086	mtsp	%r1,%sr5			   /* Restore sr5 */
2087	mtsp	%r1,%sr6			   /* Restore sr6 */
2088
2089	depi	3,31,2,%r31			   /* ensure return to user mode. */
2090
2091#ifdef CONFIG_64BIT
2092	/* decide whether to reset the wide mode bit
2093	 *
2094	 * For a syscall, the W bit is stored in the lowest bit
2095	 * of sp.  Extract it and reset W if it is zero */
2096	extrd,u,*<>	%r30,63,1,%r1
2097	rsm	PSW_SM_W, %r0
2098	/* now reset the lowest bit of sp if it was set */
2099	xor	%r30,%r1,%r30
2100#endif
2101	be,n    0(%sr3,%r31)                       /* return to user space */
2102
2103	/* We have to return via an RFI, so that PSW T and R bits can be set
2104	 * appropriately.
2105	 * This sets up pt_regs so we can return via intr_restore, which is not
2106	 * the most efficient way of doing things, but it works.
2107	 */
2108syscall_restore_rfi:
2109	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
2110	mtctl	%r2,%cr0			   /*   for immediate trap */
2111	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
2112	ldi	0x0b,%r20			   /* Create new PSW */
2113	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
2114
2115	/* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2116	 * set in include/linux/ptrace.h and converted to PA bitmap
2117	 * numbers in asm-offsets.c */
2118
2119	/* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2120	extru,=	%r19,PA_SINGLESTEP_BIT,1,%r0
2121	depi	-1,27,1,%r20			   /* R bit */
2122
2123	/* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2124	extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2125	depi	-1,7,1,%r20			   /* T bit */
2126
2127	STREG	%r20,TASK_PT_PSW(%r1)
2128
2129	/* Always store space registers, since sr3 can be changed (e.g. fork) */
2130
2131	mfsp    %sr3,%r25
2132	STREG   %r25,TASK_PT_SR3(%r1)
2133	STREG   %r25,TASK_PT_SR4(%r1)
2134	STREG   %r25,TASK_PT_SR5(%r1)
2135	STREG   %r25,TASK_PT_SR6(%r1)
2136	STREG   %r25,TASK_PT_SR7(%r1)
2137	STREG   %r25,TASK_PT_IASQ0(%r1)
2138	STREG   %r25,TASK_PT_IASQ1(%r1)
2139
2140	/* XXX W bit??? */
2141	/* Now if old D bit is clear, it means we didn't save all registers
2142	 * on syscall entry, so do that now.  This only happens on TRACEME
2143	 * calls, or if someone attached to us while we were on a syscall.
2144	 * We could make this more efficient by not saving r3-r18, but
2145	 * then we wouldn't be able to use the common intr_restore path.
2146	 * It is only for traced processes anyway, so performance is not
2147	 * an issue.
2148	 */
2149	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
2150	ldo	TASK_REGS(%r1),%r25
2151	reg_save %r25				   /* Save r3 to r18 */
2152
2153	/* Save the current sr */
2154	mfsp	%sr0,%r2
2155	STREG	%r2,TASK_PT_SR0(%r1)
2156
2157	/* Save the scratch sr */
2158	mfsp	%sr1,%r2
2159	STREG	%r2,TASK_PT_SR1(%r1)
2160
2161	/* sr2 should be set to zero for userspace syscalls */
2162	STREG	%r0,TASK_PT_SR2(%r1)
2163
2164pt_regs_ok:
2165	LDREG	TASK_PT_GR31(%r1),%r2
2166	depi	3,31,2,%r2			   /* ensure return to user mode. */
2167	STREG	%r2,TASK_PT_IAOQ0(%r1)
2168	ldo	4(%r2),%r2
2169	STREG	%r2,TASK_PT_IAOQ1(%r1)
2170	copy	%r25,%r16
2171	b	intr_restore
2172	nop
2173
2174	.import schedule,code
2175syscall_do_resched:
2176	BL	schedule,%r2
2177#ifdef CONFIG_64BIT
2178	ldo	-16(%r30),%r29		/* Reference param save area */
2179#else
2180	nop
2181#endif
2182	b	syscall_check_resched	/* if resched, we start over again */
2183	nop
2184ENDPROC(syscall_exit)
2185
2186
2187get_register:
2188	/*
2189	 * get_register is used by the non access tlb miss handlers to
2190	 * copy the value of the general register specified in r8 into
2191	 * r1. This routine can't be used for shadowed registers, since
2192	 * the rfir will restore the original value. So, for the shadowed
2193	 * registers we put a -1 into r1 to indicate that the register
2194	 * should not be used (the register being copied could also have
2195	 * a -1 in it, but that is OK, it just means that we will have
2196	 * to use the slow path instead).
2197	 */
2198	blr     %r8,%r0
2199	nop
2200	bv      %r0(%r25)    /* r0 */
2201	copy    %r0,%r1
2202	bv      %r0(%r25)    /* r1 - shadowed */
2203	ldi     -1,%r1
2204	bv      %r0(%r25)    /* r2 */
2205	copy    %r2,%r1
2206	bv      %r0(%r25)    /* r3 */
2207	copy    %r3,%r1
2208	bv      %r0(%r25)    /* r4 */
2209	copy    %r4,%r1
2210	bv      %r0(%r25)    /* r5 */
2211	copy    %r5,%r1
2212	bv      %r0(%r25)    /* r6 */
2213	copy    %r6,%r1
2214	bv      %r0(%r25)    /* r7 */
2215	copy    %r7,%r1
2216	bv      %r0(%r25)    /* r8 - shadowed */
2217	ldi     -1,%r1
2218	bv      %r0(%r25)    /* r9 - shadowed */
2219	ldi     -1,%r1
2220	bv      %r0(%r25)    /* r10 */
2221	copy    %r10,%r1
2222	bv      %r0(%r25)    /* r11 */
2223	copy    %r11,%r1
2224	bv      %r0(%r25)    /* r12 */
2225	copy    %r12,%r1
2226	bv      %r0(%r25)    /* r13 */
2227	copy    %r13,%r1
2228	bv      %r0(%r25)    /* r14 */
2229	copy    %r14,%r1
2230	bv      %r0(%r25)    /* r15 */
2231	copy    %r15,%r1
2232	bv      %r0(%r25)    /* r16 - shadowed */
2233	ldi     -1,%r1
2234	bv      %r0(%r25)    /* r17 - shadowed */
2235	ldi     -1,%r1
2236	bv      %r0(%r25)    /* r18 */
2237	copy    %r18,%r1
2238	bv      %r0(%r25)    /* r19 */
2239	copy    %r19,%r1
2240	bv      %r0(%r25)    /* r20 */
2241	copy    %r20,%r1
2242	bv      %r0(%r25)    /* r21 */
2243	copy    %r21,%r1
2244	bv      %r0(%r25)    /* r22 */
2245	copy    %r22,%r1
2246	bv      %r0(%r25)    /* r23 */
2247	copy    %r23,%r1
2248	bv      %r0(%r25)    /* r24 - shadowed */
2249	ldi     -1,%r1
2250	bv      %r0(%r25)    /* r25 - shadowed */
2251	ldi     -1,%r1
2252	bv      %r0(%r25)    /* r26 */
2253	copy    %r26,%r1
2254	bv      %r0(%r25)    /* r27 */
2255	copy    %r27,%r1
2256	bv      %r0(%r25)    /* r28 */
2257	copy    %r28,%r1
2258	bv      %r0(%r25)    /* r29 */
2259	copy    %r29,%r1
2260	bv      %r0(%r25)    /* r30 */
2261	copy    %r30,%r1
2262	bv      %r0(%r25)    /* r31 */
2263	copy    %r31,%r1
2264
2265
2266set_register:
2267	/*
2268	 * set_register is used by the non access tlb miss handlers to
2269	 * copy the value of r1 into the general register specified in
2270	 * r8.
2271	 */
2272	blr     %r8,%r0
2273	nop
2274	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2275	copy    %r1,%r0
2276	bv      %r0(%r25)    /* r1 */
2277	copy    %r1,%r1
2278	bv      %r0(%r25)    /* r2 */
2279	copy    %r1,%r2
2280	bv      %r0(%r25)    /* r3 */
2281	copy    %r1,%r3
2282	bv      %r0(%r25)    /* r4 */
2283	copy    %r1,%r4
2284	bv      %r0(%r25)    /* r5 */
2285	copy    %r1,%r5
2286	bv      %r0(%r25)    /* r6 */
2287	copy    %r1,%r6
2288	bv      %r0(%r25)    /* r7 */
2289	copy    %r1,%r7
2290	bv      %r0(%r25)    /* r8 */
2291	copy    %r1,%r8
2292	bv      %r0(%r25)    /* r9 */
2293	copy    %r1,%r9
2294	bv      %r0(%r25)    /* r10 */
2295	copy    %r1,%r10
2296	bv      %r0(%r25)    /* r11 */
2297	copy    %r1,%r11
2298	bv      %r0(%r25)    /* r12 */
2299	copy    %r1,%r12
2300	bv      %r0(%r25)    /* r13 */
2301	copy    %r1,%r13
2302	bv      %r0(%r25)    /* r14 */
2303	copy    %r1,%r14
2304	bv      %r0(%r25)    /* r15 */
2305	copy    %r1,%r15
2306	bv      %r0(%r25)    /* r16 */
2307	copy    %r1,%r16
2308	bv      %r0(%r25)    /* r17 */
2309	copy    %r1,%r17
2310	bv      %r0(%r25)    /* r18 */
2311	copy    %r1,%r18
2312	bv      %r0(%r25)    /* r19 */
2313	copy    %r1,%r19
2314	bv      %r0(%r25)    /* r20 */
2315	copy    %r1,%r20
2316	bv      %r0(%r25)    /* r21 */
2317	copy    %r1,%r21
2318	bv      %r0(%r25)    /* r22 */
2319	copy    %r1,%r22
2320	bv      %r0(%r25)    /* r23 */
2321	copy    %r1,%r23
2322	bv      %r0(%r25)    /* r24 */
2323	copy    %r1,%r24
2324	bv      %r0(%r25)    /* r25 */
2325	copy    %r1,%r25
2326	bv      %r0(%r25)    /* r26 */
2327	copy    %r1,%r26
2328	bv      %r0(%r25)    /* r27 */
2329	copy    %r1,%r27
2330	bv      %r0(%r25)    /* r28 */
2331	copy    %r1,%r28
2332	bv      %r0(%r25)    /* r29 */
2333	copy    %r1,%r29
2334	bv      %r0(%r25)    /* r30 */
2335	copy    %r1,%r30
2336	bv      %r0(%r25)    /* r31 */
2337	copy    %r1,%r31
2338
2339