1#include <asm/asm-offsets.h>
2#include <asm/bug.h>
3#ifdef CONFIG_PPC_BOOK3S
4#include <asm/exception-64s.h>
5#else
6#include <asm/exception-64e.h>
7#endif
8#include <asm/feature-fixups.h>
9#include <asm/head-64.h>
10#include <asm/hw_irq.h>
11#include <asm/kup.h>
12#include <asm/mmu.h>
13#include <asm/ppc_asm.h>
14#include <asm/ptrace.h>
15
16	.section	".toc","aw"
17SYS_CALL_TABLE:
18	.tc sys_call_table[TC],sys_call_table
19
20#ifdef CONFIG_COMPAT
21COMPAT_SYS_CALL_TABLE:
22	.tc compat_sys_call_table[TC],compat_sys_call_table
23#endif
24	.previous
25
26	.align 7
27
28.macro DEBUG_SRR_VALID srr
29#ifdef CONFIG_PPC_RFI_SRR_DEBUG
30	.ifc \srr,srr
31	mfspr	r11,SPRN_SRR0
32	ld	r12,_NIP(r1)
33	clrrdi  r11,r11,2
34	clrrdi  r12,r12,2
35100:	tdne	r11,r12
36	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
37	mfspr	r11,SPRN_SRR1
38	ld	r12,_MSR(r1)
39100:	tdne	r11,r12
40	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
41	.else
42	mfspr	r11,SPRN_HSRR0
43	ld	r12,_NIP(r1)
44	clrrdi  r11,r11,2
45	clrrdi  r12,r12,2
46100:	tdne	r11,r12
47	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
48	mfspr	r11,SPRN_HSRR1
49	ld	r12,_MSR(r1)
50100:	tdne	r11,r12
51	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
52	.endif
53#endif
54.endm
55
56#ifdef CONFIG_PPC_BOOK3S
57.macro system_call_vectored name trapnr
58	.globl system_call_vectored_\name
59system_call_vectored_\name:
60_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
61	SCV_INTERRUPT_TO_KERNEL
62	mr	r10,r1
63	ld	r1,PACAKSAVE(r13)
64	std	r10,0(r1)
65	std	r11,_NIP(r1)
66	std	r12,_MSR(r1)
67	std	r0,GPR0(r1)
68	std	r10,GPR1(r1)
69	std	r2,GPR2(r1)
70	ld	r2,PACATOC(r13)
71	mfcr	r12
72	li	r11,0
73	/* Can we avoid saving r3-r8 in common case? */
74	std	r3,GPR3(r1)
75	std	r4,GPR4(r1)
76	std	r5,GPR5(r1)
77	std	r6,GPR6(r1)
78	std	r7,GPR7(r1)
79	std	r8,GPR8(r1)
80	/* Zero r9-r12, this should only be required when restoring all GPRs */
81	std	r11,GPR9(r1)
82	std	r11,GPR10(r1)
83	std	r11,GPR11(r1)
84	std	r11,GPR12(r1)
85	std	r9,GPR13(r1)
86	SAVE_NVGPRS(r1)
87	std	r11,_XER(r1)
88	std	r11,_LINK(r1)
89	std	r11,_CTR(r1)
90
91	li	r11,\trapnr
92	std	r11,_TRAP(r1)
93	std	r12,_CCR(r1)
94	addi	r10,r1,STACK_FRAME_OVERHEAD
95	ld	r11,exception_marker@toc(r2)
96	std	r11,-16(r10)		/* "regshere" marker */
97
98BEGIN_FTR_SECTION
99	HMT_MEDIUM
100END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
101
102	/*
103	 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
104	 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
105	 * and interrupts may be masked and pending already.
106	 * system_call_exception() will call trace_hardirqs_off() which means
107	 * interrupts could already have been blocked before trace_hardirqs_off,
108	 * but this is the best we can do.
109	 */
110
111	/* Calling convention has r9 = orig r0, r10 = regs */
112	mr	r9,r0
113	bl	system_call_exception
114
115.Lsyscall_vectored_\name\()_exit:
116	addi	r4,r1,STACK_FRAME_OVERHEAD
117	li	r5,1 /* scv */
118	bl	syscall_exit_prepare
119	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
120.Lsyscall_vectored_\name\()_rst_start:
121	lbz	r11,PACAIRQHAPPENED(r13)
122	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
123	bne-	syscall_vectored_\name\()_restart
124	li	r11,IRQS_ENABLED
125	stb	r11,PACAIRQSOFTMASK(r13)
126	li	r11,0
127	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
128
129	ld	r2,_CCR(r1)
130	ld	r4,_NIP(r1)
131	ld	r5,_MSR(r1)
132
133BEGIN_FTR_SECTION
134	stdcx.	r0,0,r1			/* to clear the reservation */
135END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
136
137BEGIN_FTR_SECTION
138	HMT_MEDIUM_LOW
139END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
140
141	cmpdi	r3,0
142	bne	.Lsyscall_vectored_\name\()_restore_regs
143
144	/* rfscv returns with LR->NIA and CTR->MSR */
145	mtlr	r4
146	mtctr	r5
147
148	/* Could zero these as per ABI, but we may consider a stricter ABI
149	 * which preserves these if libc implementations can benefit, so
150	 * restore them for now until further measurement is done. */
151	ld	r0,GPR0(r1)
152	ld	r4,GPR4(r1)
153	ld	r5,GPR5(r1)
154	ld	r6,GPR6(r1)
155	ld	r7,GPR7(r1)
156	ld	r8,GPR8(r1)
157	/* Zero volatile regs that may contain sensitive kernel data */
158	li	r9,0
159	li	r10,0
160	li	r11,0
161	li	r12,0
162	mtspr	SPRN_XER,r0
163
164	/*
165	 * We don't need to restore AMR on the way back to userspace for KUAP.
166	 * The value of AMR only matters while we're in the kernel.
167	 */
168	mtcr	r2
169	REST_GPRS(2, 3, r1)
170	REST_GPR(13, r1)
171	REST_GPR(1, r1)
172	RFSCV_TO_USER
173	b	.	/* prevent speculative execution */
174
175.Lsyscall_vectored_\name\()_restore_regs:
176	mtspr	SPRN_SRR0,r4
177	mtspr	SPRN_SRR1,r5
178
179	ld	r3,_CTR(r1)
180	ld	r4,_LINK(r1)
181	ld	r5,_XER(r1)
182
183	REST_NVGPRS(r1)
184	ld	r0,GPR0(r1)
185	mtcr	r2
186	mtctr	r3
187	mtlr	r4
188	mtspr	SPRN_XER,r5
189	REST_GPRS(2, 13, r1)
190	REST_GPR(1, r1)
191	RFI_TO_USER
192.Lsyscall_vectored_\name\()_rst_end:
193
194syscall_vectored_\name\()_restart:
195_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
196	GET_PACA(r13)
197	ld	r1,PACA_EXIT_SAVE_R1(r13)
198	ld	r2,PACATOC(r13)
199	ld	r3,RESULT(r1)
200	addi	r4,r1,STACK_FRAME_OVERHEAD
201	li	r11,IRQS_ALL_DISABLED
202	stb	r11,PACAIRQSOFTMASK(r13)
203	bl	syscall_exit_restart
204	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
205	b	.Lsyscall_vectored_\name\()_rst_start
2061:
207
208SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
209RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
210
211.endm
212
213system_call_vectored common 0x3000
214
215/*
216 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
217 * which is tested by system_call_exception when r0 is -1 (as set by vector
218 * entry code).
219 */
220system_call_vectored sigill 0x7ff0
221
222
223/*
224 * Entered via kernel return set up by kernel/sstep.c, must match entry regs
225 */
226	.globl system_call_vectored_emulate
227system_call_vectored_emulate:
228_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
229	li	r10,IRQS_ALL_DISABLED
230	stb	r10,PACAIRQSOFTMASK(r13)
231	b	system_call_vectored_common
232#endif /* CONFIG_PPC_BOOK3S */
233
234	.balign IFETCH_ALIGN_BYTES
235	.globl system_call_common_real
236system_call_common_real:
237_ASM_NOKPROBE_SYMBOL(system_call_common_real)
238	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
239	mtmsrd	r10
240
241	.balign IFETCH_ALIGN_BYTES
242	.globl system_call_common
243system_call_common:
244_ASM_NOKPROBE_SYMBOL(system_call_common)
245	mr	r10,r1
246	ld	r1,PACAKSAVE(r13)
247	std	r10,0(r1)
248	std	r11,_NIP(r1)
249	std	r12,_MSR(r1)
250	std	r0,GPR0(r1)
251	std	r10,GPR1(r1)
252	std	r2,GPR2(r1)
253#ifdef CONFIG_PPC_FSL_BOOK3E
254START_BTB_FLUSH_SECTION
255	BTB_FLUSH(r10)
256END_BTB_FLUSH_SECTION
257#endif
258	ld	r2,PACATOC(r13)
259	mfcr	r12
260	li	r11,0
261	/* Can we avoid saving r3-r8 in common case? */
262	std	r3,GPR3(r1)
263	std	r4,GPR4(r1)
264	std	r5,GPR5(r1)
265	std	r6,GPR6(r1)
266	std	r7,GPR7(r1)
267	std	r8,GPR8(r1)
268	/* Zero r9-r12, this should only be required when restoring all GPRs */
269	std	r11,GPR9(r1)
270	std	r11,GPR10(r1)
271	std	r11,GPR11(r1)
272	std	r11,GPR12(r1)
273	std	r9,GPR13(r1)
274	SAVE_NVGPRS(r1)
275	std	r11,_XER(r1)
276	std	r11,_CTR(r1)
277	mflr	r10
278
279	/*
280	 * This clears CR0.SO (bit 28), which is the error indication on
281	 * return from this system call.
282	 */
283	rldimi	r12,r11,28,(63-28)
284	li	r11,0xc00
285	std	r10,_LINK(r1)
286	std	r11,_TRAP(r1)
287	std	r12,_CCR(r1)
288	addi	r10,r1,STACK_FRAME_OVERHEAD
289	ld	r11,exception_marker@toc(r2)
290	std	r11,-16(r10)		/* "regshere" marker */
291
292#ifdef CONFIG_PPC_BOOK3S
293	li	r11,1
294	stb	r11,PACASRR_VALID(r13)
295#endif
296
297	/*
298	 * We always enter kernel from userspace with irq soft-mask enabled and
299	 * nothing pending. system_call_exception() will call
300	 * trace_hardirqs_off().
301	 */
302	li	r11,IRQS_ALL_DISABLED
303	stb	r11,PACAIRQSOFTMASK(r13)
304#ifdef CONFIG_PPC_BOOK3S
305	li	r12,-1 /* Set MSR_EE and MSR_RI */
306	mtmsrd	r12,1
307#else
308	wrteei	1
309#endif
310
311	/* Calling convention has r9 = orig r0, r10 = regs */
312	mr	r9,r0
313	bl	system_call_exception
314
315.Lsyscall_exit:
316	addi	r4,r1,STACK_FRAME_OVERHEAD
317	li	r5,0 /* !scv */
318	bl	syscall_exit_prepare
319	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
320#ifdef CONFIG_PPC_BOOK3S
321.Lsyscall_rst_start:
322	lbz	r11,PACAIRQHAPPENED(r13)
323	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
324	bne-	syscall_restart
325#endif
326	li	r11,IRQS_ENABLED
327	stb	r11,PACAIRQSOFTMASK(r13)
328	li	r11,0
329	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
330
331	ld	r2,_CCR(r1)
332	ld	r6,_LINK(r1)
333	mtlr	r6
334
335#ifdef CONFIG_PPC_BOOK3S
336	lbz	r4,PACASRR_VALID(r13)
337	cmpdi	r4,0
338	bne	1f
339	li	r4,0
340	stb	r4,PACASRR_VALID(r13)
341#endif
342	ld	r4,_NIP(r1)
343	ld	r5,_MSR(r1)
344	mtspr	SPRN_SRR0,r4
345	mtspr	SPRN_SRR1,r5
3461:
347	DEBUG_SRR_VALID srr
348
349BEGIN_FTR_SECTION
350	stdcx.	r0,0,r1			/* to clear the reservation */
351END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
352
353	cmpdi	r3,0
354	bne	.Lsyscall_restore_regs
355	/* Zero volatile regs that may contain sensitive kernel data */
356	li	r0,0
357	li	r4,0
358	li	r5,0
359	li	r6,0
360	li	r7,0
361	li	r8,0
362	li	r9,0
363	li	r10,0
364	li	r11,0
365	li	r12,0
366	mtctr	r0
367	mtspr	SPRN_XER,r0
368.Lsyscall_restore_regs_cont:
369
370BEGIN_FTR_SECTION
371	HMT_MEDIUM_LOW
372END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
373
374	/*
375	 * We don't need to restore AMR on the way back to userspace for KUAP.
376	 * The value of AMR only matters while we're in the kernel.
377	 */
378	mtcr	r2
379	REST_GPRS(2, 3, r1)
380	REST_GPR(13, r1)
381	REST_GPR(1, r1)
382	RFI_TO_USER
383	b	.	/* prevent speculative execution */
384
385.Lsyscall_restore_regs:
386	ld	r3,_CTR(r1)
387	ld	r4,_XER(r1)
388	REST_NVGPRS(r1)
389	mtctr	r3
390	mtspr	SPRN_XER,r4
391	ld	r0,GPR0(r1)
392	REST_GPRS(4, 12, r1)
393	b	.Lsyscall_restore_regs_cont
394.Lsyscall_rst_end:
395
396#ifdef CONFIG_PPC_BOOK3S
397syscall_restart:
398_ASM_NOKPROBE_SYMBOL(syscall_restart)
399	GET_PACA(r13)
400	ld	r1,PACA_EXIT_SAVE_R1(r13)
401	ld	r2,PACATOC(r13)
402	ld	r3,RESULT(r1)
403	addi	r4,r1,STACK_FRAME_OVERHEAD
404	li	r11,IRQS_ALL_DISABLED
405	stb	r11,PACAIRQSOFTMASK(r13)
406	bl	syscall_exit_restart
407	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
408	b	.Lsyscall_rst_start
4091:
410
411SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
412RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
413#endif
414
415	/*
416	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
417	 * touched, no exit work created, then this can be used.
418	 */
419	.balign IFETCH_ALIGN_BYTES
420	.globl fast_interrupt_return_srr
421fast_interrupt_return_srr:
422_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
423	kuap_check_amr r3, r4
424	ld	r5,_MSR(r1)
425	andi.	r0,r5,MSR_PR
426#ifdef CONFIG_PPC_BOOK3S
427	beq	1f
428	kuap_user_restore r3, r4
429	b	.Lfast_user_interrupt_return_srr
4301:	kuap_kernel_restore r3, r4
431	andi.	r0,r5,MSR_RI
432	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
433	bne+	.Lfast_kernel_interrupt_return_srr
434	addi	r3,r1,STACK_FRAME_OVERHEAD
435	bl	unrecoverable_exception
436	b	. /* should not get here */
437#else
438	bne	.Lfast_user_interrupt_return_srr
439	b	.Lfast_kernel_interrupt_return_srr
440#endif
441
442.macro interrupt_return_macro srr
443	.balign IFETCH_ALIGN_BYTES
444	.globl interrupt_return_\srr
445interrupt_return_\srr\():
446_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
447	ld	r4,_MSR(r1)
448	andi.	r0,r4,MSR_PR
449	beq	interrupt_return_\srr\()_kernel
450interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
451_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
452	addi	r3,r1,STACK_FRAME_OVERHEAD
453	bl	interrupt_exit_user_prepare
454	cmpdi	r3,0
455	bne-	.Lrestore_nvgprs_\srr
456.Lrestore_nvgprs_\srr\()_cont:
457	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
458#ifdef CONFIG_PPC_BOOK3S
459.Linterrupt_return_\srr\()_user_rst_start:
460	lbz	r11,PACAIRQHAPPENED(r13)
461	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
462	bne-	interrupt_return_\srr\()_user_restart
463#endif
464	li	r11,IRQS_ENABLED
465	stb	r11,PACAIRQSOFTMASK(r13)
466	li	r11,0
467	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
468
469.Lfast_user_interrupt_return_\srr\():
470#ifdef CONFIG_PPC_BOOK3S
471	.ifc \srr,srr
472	lbz	r4,PACASRR_VALID(r13)
473	.else
474	lbz	r4,PACAHSRR_VALID(r13)
475	.endif
476	cmpdi	r4,0
477	li	r4,0
478	bne	1f
479#endif
480	ld	r11,_NIP(r1)
481	ld	r12,_MSR(r1)
482	.ifc \srr,srr
483	mtspr	SPRN_SRR0,r11
484	mtspr	SPRN_SRR1,r12
4851:
486#ifdef CONFIG_PPC_BOOK3S
487	stb	r4,PACASRR_VALID(r13)
488#endif
489	.else
490	mtspr	SPRN_HSRR0,r11
491	mtspr	SPRN_HSRR1,r12
4921:
493#ifdef CONFIG_PPC_BOOK3S
494	stb	r4,PACAHSRR_VALID(r13)
495#endif
496	.endif
497	DEBUG_SRR_VALID \srr
498
499#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
500	lbz	r4,PACAIRQSOFTMASK(r13)
501	tdnei	r4,IRQS_ENABLED
502#endif
503
504BEGIN_FTR_SECTION
505	ld	r10,_PPR(r1)
506	mtspr	SPRN_PPR,r10
507END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
508
509BEGIN_FTR_SECTION
510	stdcx.	r0,0,r1		/* to clear the reservation */
511FTR_SECTION_ELSE
512	ldarx	r0,0,r1
513ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
514
515	ld	r3,_CCR(r1)
516	ld	r4,_LINK(r1)
517	ld	r5,_CTR(r1)
518	ld	r6,_XER(r1)
519	li	r0,0
520
521	REST_GPRS(7, 13, r1)
522
523	mtcr	r3
524	mtlr	r4
525	mtctr	r5
526	mtspr	SPRN_XER,r6
527
528	REST_GPRS(2, 6, r1)
529	REST_GPR(0, r1)
530	REST_GPR(1, r1)
531	.ifc \srr,srr
532	RFI_TO_USER
533	.else
534	HRFI_TO_USER
535	.endif
536	b	.	/* prevent speculative execution */
537.Linterrupt_return_\srr\()_user_rst_end:
538
539.Lrestore_nvgprs_\srr\():
540	REST_NVGPRS(r1)
541	b	.Lrestore_nvgprs_\srr\()_cont
542
543#ifdef CONFIG_PPC_BOOK3S
544interrupt_return_\srr\()_user_restart:
545_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
546	GET_PACA(r13)
547	ld	r1,PACA_EXIT_SAVE_R1(r13)
548	ld	r2,PACATOC(r13)
549	addi	r3,r1,STACK_FRAME_OVERHEAD
550	li	r11,IRQS_ALL_DISABLED
551	stb	r11,PACAIRQSOFTMASK(r13)
552	bl	interrupt_exit_user_restart
553	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
554	b	.Linterrupt_return_\srr\()_user_rst_start
5551:
556
557SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
558RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
559#endif
560
561	.balign IFETCH_ALIGN_BYTES
562interrupt_return_\srr\()_kernel:
563_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
564	addi	r3,r1,STACK_FRAME_OVERHEAD
565	bl	interrupt_exit_kernel_prepare
566
567	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
568.Linterrupt_return_\srr\()_kernel_rst_start:
569	ld	r11,SOFTE(r1)
570	cmpwi	r11,IRQS_ENABLED
571	stb	r11,PACAIRQSOFTMASK(r13)
572	bne	1f
573#ifdef CONFIG_PPC_BOOK3S
574	lbz	r11,PACAIRQHAPPENED(r13)
575	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
576	bne-	interrupt_return_\srr\()_kernel_restart
577#endif
578	li	r11,0
579	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
5801:
581
582.Lfast_kernel_interrupt_return_\srr\():
583	cmpdi	cr1,r3,0
584#ifdef CONFIG_PPC_BOOK3S
585	.ifc \srr,srr
586	lbz	r4,PACASRR_VALID(r13)
587	.else
588	lbz	r4,PACAHSRR_VALID(r13)
589	.endif
590	cmpdi	r4,0
591	li	r4,0
592	bne	1f
593#endif
594	ld	r11,_NIP(r1)
595	ld	r12,_MSR(r1)
596	.ifc \srr,srr
597	mtspr	SPRN_SRR0,r11
598	mtspr	SPRN_SRR1,r12
5991:
600#ifdef CONFIG_PPC_BOOK3S
601	stb	r4,PACASRR_VALID(r13)
602#endif
603	.else
604	mtspr	SPRN_HSRR0,r11
605	mtspr	SPRN_HSRR1,r12
6061:
607#ifdef CONFIG_PPC_BOOK3S
608	stb	r4,PACAHSRR_VALID(r13)
609#endif
610	.endif
611	DEBUG_SRR_VALID \srr
612
613BEGIN_FTR_SECTION
614	stdcx.	r0,0,r1		/* to clear the reservation */
615FTR_SECTION_ELSE
616	ldarx	r0,0,r1
617ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
618
619	ld	r3,_LINK(r1)
620	ld	r4,_CTR(r1)
621	ld	r5,_XER(r1)
622	ld	r6,_CCR(r1)
623	li	r0,0
624
625	REST_GPRS(7, 12, r1)
626
627	mtlr	r3
628	mtctr	r4
629	mtspr	SPRN_XER,r5
630
631	/*
632	 * Leaving a stale exception_marker on the stack can confuse
633	 * the reliable stack unwinder later on. Clear it.
634	 */
635	std	r0,STACK_FRAME_OVERHEAD-16(r1)
636
637	REST_GPRS(2, 5, r1)
638
639	bne-	cr1,1f /* emulate stack store */
640	mtcr	r6
641	REST_GPR(6, r1)
642	REST_GPR(0, r1)
643	REST_GPR(1, r1)
644	.ifc \srr,srr
645	RFI_TO_KERNEL
646	.else
647	HRFI_TO_KERNEL
648	.endif
649	b	.	/* prevent speculative execution */
650
6511:	/*
652	 * Emulate stack store with update. New r1 value was already calculated
653	 * and updated in our interrupt regs by emulate_loadstore, but we can't
654	 * store the previous value of r1 to the stack before re-loading our
655	 * registers from it, otherwise they could be clobbered.  Use
656	 * PACA_EXGEN as temporary storage to hold the store data, as
657	 * interrupts are disabled here so it won't be clobbered.
658	 */
659	mtcr	r6
660	std	r9,PACA_EXGEN+0(r13)
661	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
662	REST_GPR(6, r1)
663	REST_GPR(0, r1)
664	REST_GPR(1, r1)
665	std	r9,0(r1) /* perform store component of stdu */
666	ld	r9,PACA_EXGEN+0(r13)
667
668	.ifc \srr,srr
669	RFI_TO_KERNEL
670	.else
671	HRFI_TO_KERNEL
672	.endif
673	b	.	/* prevent speculative execution */
674.Linterrupt_return_\srr\()_kernel_rst_end:
675
676#ifdef CONFIG_PPC_BOOK3S
677interrupt_return_\srr\()_kernel_restart:
678_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
679	GET_PACA(r13)
680	ld	r1,PACA_EXIT_SAVE_R1(r13)
681	ld	r2,PACATOC(r13)
682	addi	r3,r1,STACK_FRAME_OVERHEAD
683	li	r11,IRQS_ALL_DISABLED
684	stb	r11,PACAIRQSOFTMASK(r13)
685	bl	interrupt_exit_kernel_restart
686	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
687	b	.Linterrupt_return_\srr\()_kernel_rst_start
6881:
689
690SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
691RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
692#endif
693
694.endm
695
696interrupt_return_macro srr
697#ifdef CONFIG_PPC_BOOK3S
698interrupt_return_macro hsrr
699
700	.globl __end_soft_masked
701__end_soft_masked:
702DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
703#endif /* CONFIG_PPC_BOOK3S */
704
705#ifdef CONFIG_PPC_BOOK3S
706_GLOBAL(ret_from_fork_scv)
707	bl	schedule_tail
708	REST_NVGPRS(r1)
709	li	r3,0	/* fork() return value */
710	b	.Lsyscall_vectored_common_exit
711#endif
712
713_GLOBAL(ret_from_fork)
714	bl	schedule_tail
715	REST_NVGPRS(r1)
716	li	r3,0	/* fork() return value */
717	b	.Lsyscall_exit
718
719_GLOBAL(ret_from_kernel_thread)
720	bl	schedule_tail
721	REST_NVGPRS(r1)
722	mtctr	r14
723	mr	r3,r15
724#ifdef PPC64_ELF_ABI_v2
725	mr	r12,r14
726#endif
727	bctrl
728	li	r3,0
729	b	.Lsyscall_exit
730