1#include <asm/asm-offsets.h>
2#include <asm/bug.h>
3#ifdef CONFIG_PPC_BOOK3S
4#include <asm/exception-64s.h>
5#else
6#include <asm/exception-64e.h>
7#endif
8#include <asm/feature-fixups.h>
9#include <asm/head-64.h>
10#include <asm/hw_irq.h>
11#include <asm/kup.h>
12#include <asm/mmu.h>
13#include <asm/ppc_asm.h>
14#include <asm/ptrace.h>
15
16	.section	".toc","aw"
17SYS_CALL_TABLE:
18	.tc sys_call_table[TC],sys_call_table
19
20#ifdef CONFIG_COMPAT
21COMPAT_SYS_CALL_TABLE:
22	.tc compat_sys_call_table[TC],compat_sys_call_table
23#endif
24	.previous
25
26	.align 7
27
28.macro DEBUG_SRR_VALID srr
29#ifdef CONFIG_PPC_RFI_SRR_DEBUG
30	.ifc \srr,srr
31	mfspr	r11,SPRN_SRR0
32	ld	r12,_NIP(r1)
33	clrrdi  r12,r12,2
34100:	tdne	r11,r12
35	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
36	mfspr	r11,SPRN_SRR1
37	ld	r12,_MSR(r1)
38100:	tdne	r11,r12
39	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
40	.else
41	mfspr	r11,SPRN_HSRR0
42	ld	r12,_NIP(r1)
43	clrrdi  r12,r12,2
44100:	tdne	r11,r12
45	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
46	mfspr	r11,SPRN_HSRR1
47	ld	r12,_MSR(r1)
48100:	tdne	r11,r12
49	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
50	.endif
51#endif
52.endm
53
54#ifdef CONFIG_PPC_BOOK3S
55.macro system_call_vectored name trapnr
56	.globl system_call_vectored_\name
57system_call_vectored_\name:
58_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
59	SCV_INTERRUPT_TO_KERNEL
60	mr	r10,r1
61	ld	r1,PACAKSAVE(r13)
62	std	r10,0(r1)
63	std	r11,_NIP(r1)
64	std	r12,_MSR(r1)
65	std	r0,GPR0(r1)
66	std	r10,GPR1(r1)
67	std	r2,GPR2(r1)
68	ld	r2,PACATOC(r13)
69	mfcr	r12
70	li	r11,0
71	/* Can we avoid saving r3-r8 in common case? */
72	std	r3,GPR3(r1)
73	std	r4,GPR4(r1)
74	std	r5,GPR5(r1)
75	std	r6,GPR6(r1)
76	std	r7,GPR7(r1)
77	std	r8,GPR8(r1)
78	/* Zero r9-r12, this should only be required when restoring all GPRs */
79	std	r11,GPR9(r1)
80	std	r11,GPR10(r1)
81	std	r11,GPR11(r1)
82	std	r11,GPR12(r1)
83	std	r9,GPR13(r1)
84	SAVE_NVGPRS(r1)
85	std	r11,_XER(r1)
86	std	r11,_LINK(r1)
87	std	r11,_CTR(r1)
88
89	li	r11,\trapnr
90	std	r11,_TRAP(r1)
91	std	r12,_CCR(r1)
92	addi	r10,r1,STACK_FRAME_OVERHEAD
93	ld	r11,exception_marker@toc(r2)
94	std	r11,-16(r10)		/* "regshere" marker */
95
96BEGIN_FTR_SECTION
97	HMT_MEDIUM
98END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
99
100	/*
101	 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
102	 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
103	 * and interrupts may be masked and pending already.
104	 * system_call_exception() will call trace_hardirqs_off() which means
105	 * interrupts could already have been blocked before trace_hardirqs_off,
106	 * but this is the best we can do.
107	 */
108
109	/* Calling convention has r9 = orig r0, r10 = regs */
110	mr	r9,r0
111	bl	system_call_exception
112
113.Lsyscall_vectored_\name\()_exit:
114	addi	r4,r1,STACK_FRAME_OVERHEAD
115	li	r5,1 /* scv */
116	bl	syscall_exit_prepare
117	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
118.Lsyscall_vectored_\name\()_rst_start:
119	lbz	r11,PACAIRQHAPPENED(r13)
120	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
121	bne-	syscall_vectored_\name\()_restart
122	li	r11,IRQS_ENABLED
123	stb	r11,PACAIRQSOFTMASK(r13)
124	li	r11,0
125	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
126
127	ld	r2,_CCR(r1)
128	ld	r4,_NIP(r1)
129	ld	r5,_MSR(r1)
130
131BEGIN_FTR_SECTION
132	stdcx.	r0,0,r1			/* to clear the reservation */
133END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
134
135BEGIN_FTR_SECTION
136	HMT_MEDIUM_LOW
137END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
138
139	cmpdi	r3,0
140	bne	.Lsyscall_vectored_\name\()_restore_regs
141
142	/* rfscv returns with LR->NIA and CTR->MSR */
143	mtlr	r4
144	mtctr	r5
145
146	/* Could zero these as per ABI, but we may consider a stricter ABI
147	 * which preserves these if libc implementations can benefit, so
148	 * restore them for now until further measurement is done. */
149	ld	r0,GPR0(r1)
150	ld	r4,GPR4(r1)
151	ld	r5,GPR5(r1)
152	ld	r6,GPR6(r1)
153	ld	r7,GPR7(r1)
154	ld	r8,GPR8(r1)
155	/* Zero volatile regs that may contain sensitive kernel data */
156	li	r9,0
157	li	r10,0
158	li	r11,0
159	li	r12,0
160	mtspr	SPRN_XER,r0
161
162	/*
163	 * We don't need to restore AMR on the way back to userspace for KUAP.
164	 * The value of AMR only matters while we're in the kernel.
165	 */
166	mtcr	r2
167	REST_GPRS(2, 3, r1)
168	REST_GPR(13, r1)
169	REST_GPR(1, r1)
170	RFSCV_TO_USER
171	b	.	/* prevent speculative execution */
172
173.Lsyscall_vectored_\name\()_restore_regs:
174	mtspr	SPRN_SRR0,r4
175	mtspr	SPRN_SRR1,r5
176
177	ld	r3,_CTR(r1)
178	ld	r4,_LINK(r1)
179	ld	r5,_XER(r1)
180
181	REST_NVGPRS(r1)
182	ld	r0,GPR0(r1)
183	mtcr	r2
184	mtctr	r3
185	mtlr	r4
186	mtspr	SPRN_XER,r5
187	REST_GPRS(2, 13, r1)
188	REST_GPR(1, r1)
189	RFI_TO_USER
190.Lsyscall_vectored_\name\()_rst_end:
191
192syscall_vectored_\name\()_restart:
193_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
194	GET_PACA(r13)
195	ld	r1,PACA_EXIT_SAVE_R1(r13)
196	ld	r2,PACATOC(r13)
197	ld	r3,RESULT(r1)
198	addi	r4,r1,STACK_FRAME_OVERHEAD
199	li	r11,IRQS_ALL_DISABLED
200	stb	r11,PACAIRQSOFTMASK(r13)
201	bl	syscall_exit_restart
202	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
203	b	.Lsyscall_vectored_\name\()_rst_start
2041:
205
206SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
207RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
208
209.endm
210
211system_call_vectored common 0x3000
212
213/*
214 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
215 * which is tested by system_call_exception when r0 is -1 (as set by vector
216 * entry code).
217 */
218system_call_vectored sigill 0x7ff0
219
220
221/*
222 * Entered via kernel return set up by kernel/sstep.c, must match entry regs
223 */
224	.globl system_call_vectored_emulate
225system_call_vectored_emulate:
226_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
227	li	r10,IRQS_ALL_DISABLED
228	stb	r10,PACAIRQSOFTMASK(r13)
229	b	system_call_vectored_common
230#endif /* CONFIG_PPC_BOOK3S */
231
232	.balign IFETCH_ALIGN_BYTES
233	.globl system_call_common_real
234system_call_common_real:
235_ASM_NOKPROBE_SYMBOL(system_call_common_real)
236	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
237	mtmsrd	r10
238
239	.balign IFETCH_ALIGN_BYTES
240	.globl system_call_common
241system_call_common:
242_ASM_NOKPROBE_SYMBOL(system_call_common)
243	mr	r10,r1
244	ld	r1,PACAKSAVE(r13)
245	std	r10,0(r1)
246	std	r11,_NIP(r1)
247	std	r12,_MSR(r1)
248	std	r0,GPR0(r1)
249	std	r10,GPR1(r1)
250	std	r2,GPR2(r1)
251#ifdef CONFIG_PPC_FSL_BOOK3E
252START_BTB_FLUSH_SECTION
253	BTB_FLUSH(r10)
254END_BTB_FLUSH_SECTION
255#endif
256	ld	r2,PACATOC(r13)
257	mfcr	r12
258	li	r11,0
259	/* Can we avoid saving r3-r8 in common case? */
260	std	r3,GPR3(r1)
261	std	r4,GPR4(r1)
262	std	r5,GPR5(r1)
263	std	r6,GPR6(r1)
264	std	r7,GPR7(r1)
265	std	r8,GPR8(r1)
266	/* Zero r9-r12, this should only be required when restoring all GPRs */
267	std	r11,GPR9(r1)
268	std	r11,GPR10(r1)
269	std	r11,GPR11(r1)
270	std	r11,GPR12(r1)
271	std	r9,GPR13(r1)
272	SAVE_NVGPRS(r1)
273	std	r11,_XER(r1)
274	std	r11,_CTR(r1)
275	mflr	r10
276
277	/*
278	 * This clears CR0.SO (bit 28), which is the error indication on
279	 * return from this system call.
280	 */
281	rldimi	r12,r11,28,(63-28)
282	li	r11,0xc00
283	std	r10,_LINK(r1)
284	std	r11,_TRAP(r1)
285	std	r12,_CCR(r1)
286	addi	r10,r1,STACK_FRAME_OVERHEAD
287	ld	r11,exception_marker@toc(r2)
288	std	r11,-16(r10)		/* "regshere" marker */
289
290#ifdef CONFIG_PPC_BOOK3S
291	li	r11,1
292	stb	r11,PACASRR_VALID(r13)
293#endif
294
295	/*
296	 * We always enter kernel from userspace with irq soft-mask enabled and
297	 * nothing pending. system_call_exception() will call
298	 * trace_hardirqs_off().
299	 */
300	li	r11,IRQS_ALL_DISABLED
301	stb	r11,PACAIRQSOFTMASK(r13)
302#ifdef CONFIG_PPC_BOOK3S
303	li	r12,-1 /* Set MSR_EE and MSR_RI */
304	mtmsrd	r12,1
305#else
306	wrteei	1
307#endif
308
309	/* Calling convention has r9 = orig r0, r10 = regs */
310	mr	r9,r0
311	bl	system_call_exception
312
313.Lsyscall_exit:
314	addi	r4,r1,STACK_FRAME_OVERHEAD
315	li	r5,0 /* !scv */
316	bl	syscall_exit_prepare
317	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
318#ifdef CONFIG_PPC_BOOK3S
319.Lsyscall_rst_start:
320	lbz	r11,PACAIRQHAPPENED(r13)
321	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
322	bne-	syscall_restart
323#endif
324	li	r11,IRQS_ENABLED
325	stb	r11,PACAIRQSOFTMASK(r13)
326	li	r11,0
327	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
328
329	ld	r2,_CCR(r1)
330	ld	r6,_LINK(r1)
331	mtlr	r6
332
333#ifdef CONFIG_PPC_BOOK3S
334	lbz	r4,PACASRR_VALID(r13)
335	cmpdi	r4,0
336	bne	1f
337	li	r4,0
338	stb	r4,PACASRR_VALID(r13)
339#endif
340	ld	r4,_NIP(r1)
341	ld	r5,_MSR(r1)
342	mtspr	SPRN_SRR0,r4
343	mtspr	SPRN_SRR1,r5
3441:
345	DEBUG_SRR_VALID srr
346
347BEGIN_FTR_SECTION
348	stdcx.	r0,0,r1			/* to clear the reservation */
349END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
350
351	cmpdi	r3,0
352	bne	.Lsyscall_restore_regs
353	/* Zero volatile regs that may contain sensitive kernel data */
354	li	r0,0
355	li	r4,0
356	li	r5,0
357	li	r6,0
358	li	r7,0
359	li	r8,0
360	li	r9,0
361	li	r10,0
362	li	r11,0
363	li	r12,0
364	mtctr	r0
365	mtspr	SPRN_XER,r0
366.Lsyscall_restore_regs_cont:
367
368BEGIN_FTR_SECTION
369	HMT_MEDIUM_LOW
370END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
371
372	/*
373	 * We don't need to restore AMR on the way back to userspace for KUAP.
374	 * The value of AMR only matters while we're in the kernel.
375	 */
376	mtcr	r2
377	REST_GPRS(2, 3, r1)
378	REST_GPR(13, r1)
379	REST_GPR(1, r1)
380	RFI_TO_USER
381	b	.	/* prevent speculative execution */
382
383.Lsyscall_restore_regs:
384	ld	r3,_CTR(r1)
385	ld	r4,_XER(r1)
386	REST_NVGPRS(r1)
387	mtctr	r3
388	mtspr	SPRN_XER,r4
389	ld	r0,GPR0(r1)
390	REST_GPRS(4, 12, r1)
391	b	.Lsyscall_restore_regs_cont
392.Lsyscall_rst_end:
393
394#ifdef CONFIG_PPC_BOOK3S
395syscall_restart:
396_ASM_NOKPROBE_SYMBOL(syscall_restart)
397	GET_PACA(r13)
398	ld	r1,PACA_EXIT_SAVE_R1(r13)
399	ld	r2,PACATOC(r13)
400	ld	r3,RESULT(r1)
401	addi	r4,r1,STACK_FRAME_OVERHEAD
402	li	r11,IRQS_ALL_DISABLED
403	stb	r11,PACAIRQSOFTMASK(r13)
404	bl	syscall_exit_restart
405	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
406	b	.Lsyscall_rst_start
4071:
408
409SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
410RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
411#endif
412
413	/*
414	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
415	 * touched, no exit work created, then this can be used.
416	 */
417	.balign IFETCH_ALIGN_BYTES
418	.globl fast_interrupt_return_srr
419fast_interrupt_return_srr:
420_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
421	kuap_check_amr r3, r4
422	ld	r5,_MSR(r1)
423	andi.	r0,r5,MSR_PR
424#ifdef CONFIG_PPC_BOOK3S
425	beq	1f
426	kuap_user_restore r3, r4
427	b	.Lfast_user_interrupt_return_srr
4281:	kuap_kernel_restore r3, r4
429	andi.	r0,r5,MSR_RI
430	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
431	bne+	.Lfast_kernel_interrupt_return_srr
432	addi	r3,r1,STACK_FRAME_OVERHEAD
433	bl	unrecoverable_exception
434	b	. /* should not get here */
435#else
436	bne	.Lfast_user_interrupt_return_srr
437	b	.Lfast_kernel_interrupt_return_srr
438#endif
439
440.macro interrupt_return_macro srr
441	.balign IFETCH_ALIGN_BYTES
442	.globl interrupt_return_\srr
443interrupt_return_\srr\():
444_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
445	ld	r4,_MSR(r1)
446	andi.	r0,r4,MSR_PR
447	beq	interrupt_return_\srr\()_kernel
448interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
449_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
450	addi	r3,r1,STACK_FRAME_OVERHEAD
451	bl	interrupt_exit_user_prepare
452	cmpdi	r3,0
453	bne-	.Lrestore_nvgprs_\srr
454.Lrestore_nvgprs_\srr\()_cont:
455	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
456#ifdef CONFIG_PPC_BOOK3S
457.Linterrupt_return_\srr\()_user_rst_start:
458	lbz	r11,PACAIRQHAPPENED(r13)
459	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
460	bne-	interrupt_return_\srr\()_user_restart
461#endif
462	li	r11,IRQS_ENABLED
463	stb	r11,PACAIRQSOFTMASK(r13)
464	li	r11,0
465	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
466
467.Lfast_user_interrupt_return_\srr\():
468#ifdef CONFIG_PPC_BOOK3S
469	.ifc \srr,srr
470	lbz	r4,PACASRR_VALID(r13)
471	.else
472	lbz	r4,PACAHSRR_VALID(r13)
473	.endif
474	cmpdi	r4,0
475	li	r4,0
476	bne	1f
477#endif
478	ld	r11,_NIP(r1)
479	ld	r12,_MSR(r1)
480	.ifc \srr,srr
481	mtspr	SPRN_SRR0,r11
482	mtspr	SPRN_SRR1,r12
4831:
484#ifdef CONFIG_PPC_BOOK3S
485	stb	r4,PACASRR_VALID(r13)
486#endif
487	.else
488	mtspr	SPRN_HSRR0,r11
489	mtspr	SPRN_HSRR1,r12
4901:
491#ifdef CONFIG_PPC_BOOK3S
492	stb	r4,PACAHSRR_VALID(r13)
493#endif
494	.endif
495	DEBUG_SRR_VALID \srr
496
497#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
498	lbz	r4,PACAIRQSOFTMASK(r13)
499	tdnei	r4,IRQS_ENABLED
500#endif
501
502BEGIN_FTR_SECTION
503	ld	r10,_PPR(r1)
504	mtspr	SPRN_PPR,r10
505END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
506
507BEGIN_FTR_SECTION
508	stdcx.	r0,0,r1		/* to clear the reservation */
509FTR_SECTION_ELSE
510	ldarx	r0,0,r1
511ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
512
513	ld	r3,_CCR(r1)
514	ld	r4,_LINK(r1)
515	ld	r5,_CTR(r1)
516	ld	r6,_XER(r1)
517	li	r0,0
518
519	REST_GPRS(7, 13, r1)
520
521	mtcr	r3
522	mtlr	r4
523	mtctr	r5
524	mtspr	SPRN_XER,r6
525
526	REST_GPRS(2, 6, r1)
527	REST_GPR(0, r1)
528	REST_GPR(1, r1)
529	.ifc \srr,srr
530	RFI_TO_USER
531	.else
532	HRFI_TO_USER
533	.endif
534	b	.	/* prevent speculative execution */
535.Linterrupt_return_\srr\()_user_rst_end:
536
537.Lrestore_nvgprs_\srr\():
538	REST_NVGPRS(r1)
539	b	.Lrestore_nvgprs_\srr\()_cont
540
541#ifdef CONFIG_PPC_BOOK3S
542interrupt_return_\srr\()_user_restart:
543_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
544	GET_PACA(r13)
545	ld	r1,PACA_EXIT_SAVE_R1(r13)
546	ld	r2,PACATOC(r13)
547	addi	r3,r1,STACK_FRAME_OVERHEAD
548	li	r11,IRQS_ALL_DISABLED
549	stb	r11,PACAIRQSOFTMASK(r13)
550	bl	interrupt_exit_user_restart
551	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
552	b	.Linterrupt_return_\srr\()_user_rst_start
5531:
554
555SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
556RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
557#endif
558
559	.balign IFETCH_ALIGN_BYTES
560interrupt_return_\srr\()_kernel:
561_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
562	addi	r3,r1,STACK_FRAME_OVERHEAD
563	bl	interrupt_exit_kernel_prepare
564
565	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
566.Linterrupt_return_\srr\()_kernel_rst_start:
567	ld	r11,SOFTE(r1)
568	cmpwi	r11,IRQS_ENABLED
569	stb	r11,PACAIRQSOFTMASK(r13)
570	bne	1f
571#ifdef CONFIG_PPC_BOOK3S
572	lbz	r11,PACAIRQHAPPENED(r13)
573	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
574	bne-	interrupt_return_\srr\()_kernel_restart
575#endif
576	li	r11,0
577	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
5781:
579
580.Lfast_kernel_interrupt_return_\srr\():
581	cmpdi	cr1,r3,0
582#ifdef CONFIG_PPC_BOOK3S
583	.ifc \srr,srr
584	lbz	r4,PACASRR_VALID(r13)
585	.else
586	lbz	r4,PACAHSRR_VALID(r13)
587	.endif
588	cmpdi	r4,0
589	li	r4,0
590	bne	1f
591#endif
592	ld	r11,_NIP(r1)
593	ld	r12,_MSR(r1)
594	.ifc \srr,srr
595	mtspr	SPRN_SRR0,r11
596	mtspr	SPRN_SRR1,r12
5971:
598#ifdef CONFIG_PPC_BOOK3S
599	stb	r4,PACASRR_VALID(r13)
600#endif
601	.else
602	mtspr	SPRN_HSRR0,r11
603	mtspr	SPRN_HSRR1,r12
6041:
605#ifdef CONFIG_PPC_BOOK3S
606	stb	r4,PACAHSRR_VALID(r13)
607#endif
608	.endif
609	DEBUG_SRR_VALID \srr
610
611BEGIN_FTR_SECTION
612	stdcx.	r0,0,r1		/* to clear the reservation */
613FTR_SECTION_ELSE
614	ldarx	r0,0,r1
615ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
616
617	ld	r3,_LINK(r1)
618	ld	r4,_CTR(r1)
619	ld	r5,_XER(r1)
620	ld	r6,_CCR(r1)
621	li	r0,0
622
623	REST_GPRS(7, 12, r1)
624
625	mtlr	r3
626	mtctr	r4
627	mtspr	SPRN_XER,r5
628
629	/*
630	 * Leaving a stale exception_marker on the stack can confuse
631	 * the reliable stack unwinder later on. Clear it.
632	 */
633	std	r0,STACK_FRAME_OVERHEAD-16(r1)
634
635	REST_GPRS(2, 5, r1)
636
637	bne-	cr1,1f /* emulate stack store */
638	mtcr	r6
639	REST_GPR(6, r1)
640	REST_GPR(0, r1)
641	REST_GPR(1, r1)
642	.ifc \srr,srr
643	RFI_TO_KERNEL
644	.else
645	HRFI_TO_KERNEL
646	.endif
647	b	.	/* prevent speculative execution */
648
6491:	/*
650	 * Emulate stack store with update. New r1 value was already calculated
651	 * and updated in our interrupt regs by emulate_loadstore, but we can't
652	 * store the previous value of r1 to the stack before re-loading our
653	 * registers from it, otherwise they could be clobbered.  Use
654	 * PACA_EXGEN as temporary storage to hold the store data, as
655	 * interrupts are disabled here so it won't be clobbered.
656	 */
657	mtcr	r6
658	std	r9,PACA_EXGEN+0(r13)
659	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
660	REST_GPR(6, r1)
661	REST_GPR(0, r1)
662	REST_GPR(1, r1)
663	std	r9,0(r1) /* perform store component of stdu */
664	ld	r9,PACA_EXGEN+0(r13)
665
666	.ifc \srr,srr
667	RFI_TO_KERNEL
668	.else
669	HRFI_TO_KERNEL
670	.endif
671	b	.	/* prevent speculative execution */
672.Linterrupt_return_\srr\()_kernel_rst_end:
673
674#ifdef CONFIG_PPC_BOOK3S
675interrupt_return_\srr\()_kernel_restart:
676_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
677	GET_PACA(r13)
678	ld	r1,PACA_EXIT_SAVE_R1(r13)
679	ld	r2,PACATOC(r13)
680	addi	r3,r1,STACK_FRAME_OVERHEAD
681	li	r11,IRQS_ALL_DISABLED
682	stb	r11,PACAIRQSOFTMASK(r13)
683	bl	interrupt_exit_kernel_restart
684	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
685	b	.Linterrupt_return_\srr\()_kernel_rst_start
6861:
687
688SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
689RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
690#endif
691
692.endm
693
694interrupt_return_macro srr
695#ifdef CONFIG_PPC_BOOK3S
696interrupt_return_macro hsrr
697
698	.globl __end_soft_masked
699__end_soft_masked:
700DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
701#endif /* CONFIG_PPC_BOOK3S */
702
703#ifdef CONFIG_PPC_BOOK3S
704_GLOBAL(ret_from_fork_scv)
705	bl	schedule_tail
706	REST_NVGPRS(r1)
707	li	r3,0	/* fork() return value */
708	b	.Lsyscall_vectored_common_exit
709#endif
710
711_GLOBAL(ret_from_fork)
712	bl	schedule_tail
713	REST_NVGPRS(r1)
714	li	r3,0	/* fork() return value */
715	b	.Lsyscall_exit
716
717_GLOBAL(ret_from_kernel_thread)
718	bl	schedule_tail
719	REST_NVGPRS(r1)
720	mtctr	r14
721	mr	r3,r15
722#ifdef PPC64_ELF_ABI_v2
723	mr	r12,r14
724#endif
725	bctrl
726	li	r3,0
727	b	.Lsyscall_exit
728