1#include <asm/asm-offsets.h>
2#include <asm/bug.h>
3#ifdef CONFIG_PPC_BOOK3S
4#include <asm/exception-64s.h>
5#else
6#include <asm/exception-64e.h>
7#endif
8#include <asm/feature-fixups.h>
9#include <asm/head-64.h>
10#include <asm/hw_irq.h>
11#include <asm/kup.h>
12#include <asm/mmu.h>
13#include <asm/ppc_asm.h>
14#include <asm/ptrace.h>
15
16	.section	".toc","aw"
17SYS_CALL_TABLE:
18	.tc sys_call_table[TC],sys_call_table
19
20#ifdef CONFIG_COMPAT
21COMPAT_SYS_CALL_TABLE:
22	.tc compat_sys_call_table[TC],compat_sys_call_table
23#endif
24	.previous
25
26	.align 7
27
28.macro DEBUG_SRR_VALID srr
29#ifdef CONFIG_PPC_RFI_SRR_DEBUG
30	.ifc \srr,srr
31	mfspr	r11,SPRN_SRR0
32	ld	r12,_NIP(r1)
33	clrrdi  r11,r11,2
34	clrrdi  r12,r12,2
35100:	tdne	r11,r12
36	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
37	mfspr	r11,SPRN_SRR1
38	ld	r12,_MSR(r1)
39100:	tdne	r11,r12
40	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
41	.else
42	mfspr	r11,SPRN_HSRR0
43	ld	r12,_NIP(r1)
44	clrrdi  r11,r11,2
45	clrrdi  r12,r12,2
46100:	tdne	r11,r12
47	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
48	mfspr	r11,SPRN_HSRR1
49	ld	r12,_MSR(r1)
50100:	tdne	r11,r12
51	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
52	.endif
53#endif
54.endm
55
56#ifdef CONFIG_PPC_BOOK3S
57.macro system_call_vectored name trapnr
58	.globl system_call_vectored_\name
59system_call_vectored_\name:
60_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
61	SCV_INTERRUPT_TO_KERNEL
62	mr	r10,r1
63	ld	r1,PACAKSAVE(r13)
64	std	r10,0(r1)
65	std	r11,_NIP(r1)
66	std	r12,_MSR(r1)
67	std	r0,GPR0(r1)
68	std	r10,GPR1(r1)
69	std	r2,GPR2(r1)
70	ld	r2,PACATOC(r13)
71	mfcr	r12
72	li	r11,0
73	/* Can we avoid saving r3-r8 in common case? */
74	std	r3,GPR3(r1)
75	std	r4,GPR4(r1)
76	std	r5,GPR5(r1)
77	std	r6,GPR6(r1)
78	std	r7,GPR7(r1)
79	std	r8,GPR8(r1)
80	/* Zero r9-r12, this should only be required when restoring all GPRs */
81	std	r11,GPR9(r1)
82	std	r11,GPR10(r1)
83	std	r11,GPR11(r1)
84	std	r11,GPR12(r1)
85	std	r9,GPR13(r1)
86	SAVE_NVGPRS(r1)
87	std	r11,_XER(r1)
88	std	r11,_LINK(r1)
89	std	r11,_CTR(r1)
90
91	li	r11,\trapnr
92	std	r11,_TRAP(r1)
93	std	r12,_CCR(r1)
94	addi	r10,r1,STACK_FRAME_OVERHEAD
95	ld	r11,exception_marker@toc(r2)
96	std	r11,-16(r10)		/* "regshere" marker */
97
98BEGIN_FTR_SECTION
99	HMT_MEDIUM
100END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
101
102	/*
103	 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
104	 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
105	 * and interrupts may be masked and pending already.
106	 * system_call_exception() will call trace_hardirqs_off() which means
107	 * interrupts could already have been blocked before trace_hardirqs_off,
108	 * but this is the best we can do.
109	 */
110
111	/* Calling convention has r9 = orig r0, r10 = regs */
112	mr	r9,r0
113	bl	system_call_exception
114
115.Lsyscall_vectored_\name\()_exit:
116	addi	r4,r1,STACK_FRAME_OVERHEAD
117	li	r5,1 /* scv */
118	bl	syscall_exit_prepare
119	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
120.Lsyscall_vectored_\name\()_rst_start:
121	lbz	r11,PACAIRQHAPPENED(r13)
122	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
123	bne-	syscall_vectored_\name\()_restart
124	li	r11,IRQS_ENABLED
125	stb	r11,PACAIRQSOFTMASK(r13)
126	li	r11,0
127	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
128
129	ld	r2,_CCR(r1)
130	ld	r4,_NIP(r1)
131	ld	r5,_MSR(r1)
132
133BEGIN_FTR_SECTION
134	stdcx.	r0,0,r1			/* to clear the reservation */
135END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
136
137BEGIN_FTR_SECTION
138	HMT_MEDIUM_LOW
139END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
140
141	cmpdi	r3,0
142	bne	.Lsyscall_vectored_\name\()_restore_regs
143
144	/* rfscv returns with LR->NIA and CTR->MSR */
145	mtlr	r4
146	mtctr	r5
147
148	/* Could zero these as per ABI, but we may consider a stricter ABI
149	 * which preserves these if libc implementations can benefit, so
150	 * restore them for now until further measurement is done. */
151	ld	r0,GPR0(r1)
152	ld	r4,GPR4(r1)
153	ld	r5,GPR5(r1)
154	ld	r6,GPR6(r1)
155	ld	r7,GPR7(r1)
156	ld	r8,GPR8(r1)
157	/* Zero volatile regs that may contain sensitive kernel data */
158	li	r9,0
159	li	r10,0
160	li	r11,0
161	li	r12,0
162	mtspr	SPRN_XER,r0
163
164	/*
165	 * We don't need to restore AMR on the way back to userspace for KUAP.
166	 * The value of AMR only matters while we're in the kernel.
167	 */
168	mtcr	r2
169	REST_GPRS(2, 3, r1)
170	REST_GPR(13, r1)
171	REST_GPR(1, r1)
172	RFSCV_TO_USER
173	b	.	/* prevent speculative execution */
174
175.Lsyscall_vectored_\name\()_restore_regs:
176	mtspr	SPRN_SRR0,r4
177	mtspr	SPRN_SRR1,r5
178
179	ld	r3,_CTR(r1)
180	ld	r4,_LINK(r1)
181	ld	r5,_XER(r1)
182
183	REST_NVGPRS(r1)
184	ld	r0,GPR0(r1)
185	mtcr	r2
186	mtctr	r3
187	mtlr	r4
188	mtspr	SPRN_XER,r5
189	REST_GPRS(2, 13, r1)
190	REST_GPR(1, r1)
191	RFI_TO_USER
192.Lsyscall_vectored_\name\()_rst_end:
193
194syscall_vectored_\name\()_restart:
195_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
196	GET_PACA(r13)
197	ld	r1,PACA_EXIT_SAVE_R1(r13)
198	ld	r2,PACATOC(r13)
199	ld	r3,RESULT(r1)
200	addi	r4,r1,STACK_FRAME_OVERHEAD
201	li	r11,IRQS_ALL_DISABLED
202	stb	r11,PACAIRQSOFTMASK(r13)
203	bl	syscall_exit_restart
204	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
205	b	.Lsyscall_vectored_\name\()_rst_start
2061:
207
208SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
209RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
210
211.endm
212
213system_call_vectored common 0x3000
214
215/*
216 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
217 * which is tested by system_call_exception when r0 is -1 (as set by vector
218 * entry code).
219 */
220system_call_vectored sigill 0x7ff0
221
222#endif /* CONFIG_PPC_BOOK3S */
223
224	.balign IFETCH_ALIGN_BYTES
225	.globl system_call_common_real
226system_call_common_real:
227_ASM_NOKPROBE_SYMBOL(system_call_common_real)
228	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
229	mtmsrd	r10
230
231	.balign IFETCH_ALIGN_BYTES
232	.globl system_call_common
233system_call_common:
234_ASM_NOKPROBE_SYMBOL(system_call_common)
235	mr	r10,r1
236	ld	r1,PACAKSAVE(r13)
237	std	r10,0(r1)
238	std	r11,_NIP(r1)
239	std	r12,_MSR(r1)
240	std	r0,GPR0(r1)
241	std	r10,GPR1(r1)
242	std	r2,GPR2(r1)
243#ifdef CONFIG_PPC_FSL_BOOK3E
244START_BTB_FLUSH_SECTION
245	BTB_FLUSH(r10)
246END_BTB_FLUSH_SECTION
247#endif
248	ld	r2,PACATOC(r13)
249	mfcr	r12
250	li	r11,0
251	/* Can we avoid saving r3-r8 in common case? */
252	std	r3,GPR3(r1)
253	std	r4,GPR4(r1)
254	std	r5,GPR5(r1)
255	std	r6,GPR6(r1)
256	std	r7,GPR7(r1)
257	std	r8,GPR8(r1)
258	/* Zero r9-r12, this should only be required when restoring all GPRs */
259	std	r11,GPR9(r1)
260	std	r11,GPR10(r1)
261	std	r11,GPR11(r1)
262	std	r11,GPR12(r1)
263	std	r9,GPR13(r1)
264	SAVE_NVGPRS(r1)
265	std	r11,_XER(r1)
266	std	r11,_CTR(r1)
267	mflr	r10
268
269	/*
270	 * This clears CR0.SO (bit 28), which is the error indication on
271	 * return from this system call.
272	 */
273	rldimi	r12,r11,28,(63-28)
274	li	r11,0xc00
275	std	r10,_LINK(r1)
276	std	r11,_TRAP(r1)
277	std	r12,_CCR(r1)
278	addi	r10,r1,STACK_FRAME_OVERHEAD
279	ld	r11,exception_marker@toc(r2)
280	std	r11,-16(r10)		/* "regshere" marker */
281
282#ifdef CONFIG_PPC_BOOK3S
283	li	r11,1
284	stb	r11,PACASRR_VALID(r13)
285#endif
286
287	/*
288	 * We always enter kernel from userspace with irq soft-mask enabled and
289	 * nothing pending. system_call_exception() will call
290	 * trace_hardirqs_off().
291	 */
292	li	r11,IRQS_ALL_DISABLED
293	stb	r11,PACAIRQSOFTMASK(r13)
294#ifdef CONFIG_PPC_BOOK3S
295	li	r12,-1 /* Set MSR_EE and MSR_RI */
296	mtmsrd	r12,1
297#else
298	wrteei	1
299#endif
300
301	/* Calling convention has r9 = orig r0, r10 = regs */
302	mr	r9,r0
303	bl	system_call_exception
304
305.Lsyscall_exit:
306	addi	r4,r1,STACK_FRAME_OVERHEAD
307	li	r5,0 /* !scv */
308	bl	syscall_exit_prepare
309	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
310#ifdef CONFIG_PPC_BOOK3S
311.Lsyscall_rst_start:
312	lbz	r11,PACAIRQHAPPENED(r13)
313	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
314	bne-	syscall_restart
315#endif
316	li	r11,IRQS_ENABLED
317	stb	r11,PACAIRQSOFTMASK(r13)
318	li	r11,0
319	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
320
321	ld	r2,_CCR(r1)
322	ld	r6,_LINK(r1)
323	mtlr	r6
324
325#ifdef CONFIG_PPC_BOOK3S
326	lbz	r4,PACASRR_VALID(r13)
327	cmpdi	r4,0
328	bne	1f
329	li	r4,0
330	stb	r4,PACASRR_VALID(r13)
331#endif
332	ld	r4,_NIP(r1)
333	ld	r5,_MSR(r1)
334	mtspr	SPRN_SRR0,r4
335	mtspr	SPRN_SRR1,r5
3361:
337	DEBUG_SRR_VALID srr
338
339BEGIN_FTR_SECTION
340	stdcx.	r0,0,r1			/* to clear the reservation */
341END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
342
343	cmpdi	r3,0
344	bne	.Lsyscall_restore_regs
345	/* Zero volatile regs that may contain sensitive kernel data */
346	li	r0,0
347	li	r4,0
348	li	r5,0
349	li	r6,0
350	li	r7,0
351	li	r8,0
352	li	r9,0
353	li	r10,0
354	li	r11,0
355	li	r12,0
356	mtctr	r0
357	mtspr	SPRN_XER,r0
358.Lsyscall_restore_regs_cont:
359
360BEGIN_FTR_SECTION
361	HMT_MEDIUM_LOW
362END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
363
364	/*
365	 * We don't need to restore AMR on the way back to userspace for KUAP.
366	 * The value of AMR only matters while we're in the kernel.
367	 */
368	mtcr	r2
369	REST_GPRS(2, 3, r1)
370	REST_GPR(13, r1)
371	REST_GPR(1, r1)
372	RFI_TO_USER
373	b	.	/* prevent speculative execution */
374
375.Lsyscall_restore_regs:
376	ld	r3,_CTR(r1)
377	ld	r4,_XER(r1)
378	REST_NVGPRS(r1)
379	mtctr	r3
380	mtspr	SPRN_XER,r4
381	ld	r0,GPR0(r1)
382	REST_GPRS(4, 12, r1)
383	b	.Lsyscall_restore_regs_cont
384.Lsyscall_rst_end:
385
386#ifdef CONFIG_PPC_BOOK3S
387syscall_restart:
388_ASM_NOKPROBE_SYMBOL(syscall_restart)
389	GET_PACA(r13)
390	ld	r1,PACA_EXIT_SAVE_R1(r13)
391	ld	r2,PACATOC(r13)
392	ld	r3,RESULT(r1)
393	addi	r4,r1,STACK_FRAME_OVERHEAD
394	li	r11,IRQS_ALL_DISABLED
395	stb	r11,PACAIRQSOFTMASK(r13)
396	bl	syscall_exit_restart
397	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
398	b	.Lsyscall_rst_start
3991:
400
401SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
402RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
403#endif
404
405	/*
406	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
407	 * touched, no exit work created, then this can be used.
408	 */
409	.balign IFETCH_ALIGN_BYTES
410	.globl fast_interrupt_return_srr
411fast_interrupt_return_srr:
412_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
413	kuap_check_amr r3, r4
414	ld	r5,_MSR(r1)
415	andi.	r0,r5,MSR_PR
416#ifdef CONFIG_PPC_BOOK3S
417	beq	1f
418	kuap_user_restore r3, r4
419	b	.Lfast_user_interrupt_return_srr
4201:	kuap_kernel_restore r3, r4
421	andi.	r0,r5,MSR_RI
422	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
423	bne+	.Lfast_kernel_interrupt_return_srr
424	addi	r3,r1,STACK_FRAME_OVERHEAD
425	bl	unrecoverable_exception
426	b	. /* should not get here */
427#else
428	bne	.Lfast_user_interrupt_return_srr
429	b	.Lfast_kernel_interrupt_return_srr
430#endif
431
432.macro interrupt_return_macro srr
433	.balign IFETCH_ALIGN_BYTES
434	.globl interrupt_return_\srr
435interrupt_return_\srr\():
436_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
437	ld	r4,_MSR(r1)
438	andi.	r0,r4,MSR_PR
439	beq	interrupt_return_\srr\()_kernel
440interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
441_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
442	addi	r3,r1,STACK_FRAME_OVERHEAD
443	bl	interrupt_exit_user_prepare
444	cmpdi	r3,0
445	bne-	.Lrestore_nvgprs_\srr
446.Lrestore_nvgprs_\srr\()_cont:
447	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
448#ifdef CONFIG_PPC_BOOK3S
449.Linterrupt_return_\srr\()_user_rst_start:
450	lbz	r11,PACAIRQHAPPENED(r13)
451	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
452	bne-	interrupt_return_\srr\()_user_restart
453#endif
454	li	r11,IRQS_ENABLED
455	stb	r11,PACAIRQSOFTMASK(r13)
456	li	r11,0
457	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
458
459.Lfast_user_interrupt_return_\srr\():
460#ifdef CONFIG_PPC_BOOK3S
461	.ifc \srr,srr
462	lbz	r4,PACASRR_VALID(r13)
463	.else
464	lbz	r4,PACAHSRR_VALID(r13)
465	.endif
466	cmpdi	r4,0
467	li	r4,0
468	bne	1f
469#endif
470	ld	r11,_NIP(r1)
471	ld	r12,_MSR(r1)
472	.ifc \srr,srr
473	mtspr	SPRN_SRR0,r11
474	mtspr	SPRN_SRR1,r12
4751:
476#ifdef CONFIG_PPC_BOOK3S
477	stb	r4,PACASRR_VALID(r13)
478#endif
479	.else
480	mtspr	SPRN_HSRR0,r11
481	mtspr	SPRN_HSRR1,r12
4821:
483#ifdef CONFIG_PPC_BOOK3S
484	stb	r4,PACAHSRR_VALID(r13)
485#endif
486	.endif
487	DEBUG_SRR_VALID \srr
488
489#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
490	lbz	r4,PACAIRQSOFTMASK(r13)
491	tdnei	r4,IRQS_ENABLED
492#endif
493
494BEGIN_FTR_SECTION
495	ld	r10,_PPR(r1)
496	mtspr	SPRN_PPR,r10
497END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
498
499BEGIN_FTR_SECTION
500	stdcx.	r0,0,r1		/* to clear the reservation */
501FTR_SECTION_ELSE
502	ldarx	r0,0,r1
503ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
504
505	ld	r3,_CCR(r1)
506	ld	r4,_LINK(r1)
507	ld	r5,_CTR(r1)
508	ld	r6,_XER(r1)
509	li	r0,0
510
511	REST_GPRS(7, 13, r1)
512
513	mtcr	r3
514	mtlr	r4
515	mtctr	r5
516	mtspr	SPRN_XER,r6
517
518	REST_GPRS(2, 6, r1)
519	REST_GPR(0, r1)
520	REST_GPR(1, r1)
521	.ifc \srr,srr
522	RFI_TO_USER
523	.else
524	HRFI_TO_USER
525	.endif
526	b	.	/* prevent speculative execution */
527.Linterrupt_return_\srr\()_user_rst_end:
528
529.Lrestore_nvgprs_\srr\():
530	REST_NVGPRS(r1)
531	b	.Lrestore_nvgprs_\srr\()_cont
532
533#ifdef CONFIG_PPC_BOOK3S
534interrupt_return_\srr\()_user_restart:
535_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
536	GET_PACA(r13)
537	ld	r1,PACA_EXIT_SAVE_R1(r13)
538	ld	r2,PACATOC(r13)
539	addi	r3,r1,STACK_FRAME_OVERHEAD
540	li	r11,IRQS_ALL_DISABLED
541	stb	r11,PACAIRQSOFTMASK(r13)
542	bl	interrupt_exit_user_restart
543	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
544	b	.Linterrupt_return_\srr\()_user_rst_start
5451:
546
547SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
548RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
549#endif
550
551	.balign IFETCH_ALIGN_BYTES
552interrupt_return_\srr\()_kernel:
553_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
554	addi	r3,r1,STACK_FRAME_OVERHEAD
555	bl	interrupt_exit_kernel_prepare
556
557	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
558.Linterrupt_return_\srr\()_kernel_rst_start:
559	ld	r11,SOFTE(r1)
560	cmpwi	r11,IRQS_ENABLED
561	stb	r11,PACAIRQSOFTMASK(r13)
562	bne	1f
563#ifdef CONFIG_PPC_BOOK3S
564	lbz	r11,PACAIRQHAPPENED(r13)
565	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
566	bne-	interrupt_return_\srr\()_kernel_restart
567#endif
568	li	r11,0
569	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
5701:
571
572.Lfast_kernel_interrupt_return_\srr\():
573	cmpdi	cr1,r3,0
574#ifdef CONFIG_PPC_BOOK3S
575	.ifc \srr,srr
576	lbz	r4,PACASRR_VALID(r13)
577	.else
578	lbz	r4,PACAHSRR_VALID(r13)
579	.endif
580	cmpdi	r4,0
581	li	r4,0
582	bne	1f
583#endif
584	ld	r11,_NIP(r1)
585	ld	r12,_MSR(r1)
586	.ifc \srr,srr
587	mtspr	SPRN_SRR0,r11
588	mtspr	SPRN_SRR1,r12
5891:
590#ifdef CONFIG_PPC_BOOK3S
591	stb	r4,PACASRR_VALID(r13)
592#endif
593	.else
594	mtspr	SPRN_HSRR0,r11
595	mtspr	SPRN_HSRR1,r12
5961:
597#ifdef CONFIG_PPC_BOOK3S
598	stb	r4,PACAHSRR_VALID(r13)
599#endif
600	.endif
601	DEBUG_SRR_VALID \srr
602
603BEGIN_FTR_SECTION
604	stdcx.	r0,0,r1		/* to clear the reservation */
605FTR_SECTION_ELSE
606	ldarx	r0,0,r1
607ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
608
609	ld	r3,_LINK(r1)
610	ld	r4,_CTR(r1)
611	ld	r5,_XER(r1)
612	ld	r6,_CCR(r1)
613	li	r0,0
614
615	REST_GPRS(7, 12, r1)
616
617	mtlr	r3
618	mtctr	r4
619	mtspr	SPRN_XER,r5
620
621	/*
622	 * Leaving a stale exception_marker on the stack can confuse
623	 * the reliable stack unwinder later on. Clear it.
624	 */
625	std	r0,STACK_FRAME_OVERHEAD-16(r1)
626
627	REST_GPRS(2, 5, r1)
628
629	bne-	cr1,1f /* emulate stack store */
630	mtcr	r6
631	REST_GPR(6, r1)
632	REST_GPR(0, r1)
633	REST_GPR(1, r1)
634	.ifc \srr,srr
635	RFI_TO_KERNEL
636	.else
637	HRFI_TO_KERNEL
638	.endif
639	b	.	/* prevent speculative execution */
640
6411:	/*
642	 * Emulate stack store with update. New r1 value was already calculated
643	 * and updated in our interrupt regs by emulate_loadstore, but we can't
644	 * store the previous value of r1 to the stack before re-loading our
645	 * registers from it, otherwise they could be clobbered.  Use
646	 * PACA_EXGEN as temporary storage to hold the store data, as
647	 * interrupts are disabled here so it won't be clobbered.
648	 */
649	mtcr	r6
650	std	r9,PACA_EXGEN+0(r13)
651	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
652	REST_GPR(6, r1)
653	REST_GPR(0, r1)
654	REST_GPR(1, r1)
655	std	r9,0(r1) /* perform store component of stdu */
656	ld	r9,PACA_EXGEN+0(r13)
657
658	.ifc \srr,srr
659	RFI_TO_KERNEL
660	.else
661	HRFI_TO_KERNEL
662	.endif
663	b	.	/* prevent speculative execution */
664.Linterrupt_return_\srr\()_kernel_rst_end:
665
666#ifdef CONFIG_PPC_BOOK3S
667interrupt_return_\srr\()_kernel_restart:
668_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
669	GET_PACA(r13)
670	ld	r1,PACA_EXIT_SAVE_R1(r13)
671	ld	r2,PACATOC(r13)
672	addi	r3,r1,STACK_FRAME_OVERHEAD
673	li	r11,IRQS_ALL_DISABLED
674	stb	r11,PACAIRQSOFTMASK(r13)
675	bl	interrupt_exit_kernel_restart
676	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
677	b	.Linterrupt_return_\srr\()_kernel_rst_start
6781:
679
680SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
681RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
682#endif
683
684.endm
685
686interrupt_return_macro srr
687#ifdef CONFIG_PPC_BOOK3S
688interrupt_return_macro hsrr
689
690	.globl __end_soft_masked
691__end_soft_masked:
692DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
693#endif /* CONFIG_PPC_BOOK3S */
694
695#ifdef CONFIG_PPC_BOOK3S
696_GLOBAL(ret_from_fork_scv)
697	bl	schedule_tail
698	REST_NVGPRS(r1)
699	li	r3,0	/* fork() return value */
700	b	.Lsyscall_vectored_common_exit
701#endif
702
703_GLOBAL(ret_from_fork)
704	bl	schedule_tail
705	REST_NVGPRS(r1)
706	li	r3,0	/* fork() return value */
707	b	.Lsyscall_exit
708
709_GLOBAL(ret_from_kernel_thread)
710	bl	schedule_tail
711	REST_NVGPRS(r1)
712	mtctr	r14
713	mr	r3,r15
714#ifdef CONFIG_PPC64_ELF_ABI_V2
715	mr	r12,r14
716#endif
717	bctrl
718	li	r3,0
719	b	.Lsyscall_exit
720