1#include <asm/asm-offsets.h>
2#include <asm/bug.h>
3#ifdef CONFIG_PPC_BOOK3S
4#include <asm/exception-64s.h>
5#else
6#include <asm/exception-64e.h>
7#endif
8#include <asm/feature-fixups.h>
9#include <asm/head-64.h>
10#include <asm/hw_irq.h>
11#include <asm/kup.h>
12#include <asm/mmu.h>
13#include <asm/ppc_asm.h>
14#include <asm/ptrace.h>
15
16	.align 7
17
18.macro DEBUG_SRR_VALID srr
19#ifdef CONFIG_PPC_RFI_SRR_DEBUG
20	.ifc \srr,srr
21	mfspr	r11,SPRN_SRR0
22	ld	r12,_NIP(r1)
23	clrrdi  r11,r11,2
24	clrrdi  r12,r12,2
25100:	tdne	r11,r12
26	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
27	mfspr	r11,SPRN_SRR1
28	ld	r12,_MSR(r1)
29100:	tdne	r11,r12
30	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
31	.else
32	mfspr	r11,SPRN_HSRR0
33	ld	r12,_NIP(r1)
34	clrrdi  r11,r11,2
35	clrrdi  r12,r12,2
36100:	tdne	r11,r12
37	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
38	mfspr	r11,SPRN_HSRR1
39	ld	r12,_MSR(r1)
40100:	tdne	r11,r12
41	EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
42	.endif
43#endif
44.endm
45
46#ifdef CONFIG_PPC_BOOK3S
47.macro system_call_vectored name trapnr
48	.globl system_call_vectored_\name
49system_call_vectored_\name:
50_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
51	SCV_INTERRUPT_TO_KERNEL
52	mr	r10,r1
53	ld	r1,PACAKSAVE(r13)
54	std	r10,0(r1)
55	std	r11,_NIP(r1)
56	std	r12,_MSR(r1)
57	std	r0,GPR0(r1)
58	std	r10,GPR1(r1)
59	std	r2,GPR2(r1)
60	LOAD_PACA_TOC()
61	mfcr	r12
62	li	r11,0
63	/* Save syscall parameters in r3-r8 */
64	SAVE_GPRS(3, 8, r1)
65	/* Zero r9-r12, this should only be required when restoring all GPRs */
66	std	r11,GPR9(r1)
67	std	r11,GPR10(r1)
68	std	r11,GPR11(r1)
69	std	r11,GPR12(r1)
70	std	r9,GPR13(r1)
71	SAVE_NVGPRS(r1)
72	std	r11,_XER(r1)
73	std	r11,_LINK(r1)
74	std	r11,_CTR(r1)
75
76	li	r11,\trapnr
77	std	r11,_TRAP(r1)
78	std	r12,_CCR(r1)
79	std	r3,ORIG_GPR3(r1)
80	/* Calling convention has r3 = regs, r4 = orig r0 */
81	addi	r3,r1,STACK_FRAME_OVERHEAD
82	mr	r4,r0
83	LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
84	std	r11,-16(r3)		/* "regshere" marker */
85
86BEGIN_FTR_SECTION
87	HMT_MEDIUM
88END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
89
90	/*
91	 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
92	 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
93	 * and interrupts may be masked and pending already.
94	 * system_call_exception() will call trace_hardirqs_off() which means
95	 * interrupts could already have been blocked before trace_hardirqs_off,
96	 * but this is the best we can do.
97	 */
98
99	bl	system_call_exception
100
101.Lsyscall_vectored_\name\()_exit:
102	addi	r4,r1,STACK_FRAME_OVERHEAD
103	li	r5,1 /* scv */
104	bl	syscall_exit_prepare
105	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
106.Lsyscall_vectored_\name\()_rst_start:
107	lbz	r11,PACAIRQHAPPENED(r13)
108	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
109	bne-	syscall_vectored_\name\()_restart
110	li	r11,IRQS_ENABLED
111	stb	r11,PACAIRQSOFTMASK(r13)
112	li	r11,0
113	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
114
115	ld	r2,_CCR(r1)
116	ld	r4,_NIP(r1)
117	ld	r5,_MSR(r1)
118
119BEGIN_FTR_SECTION
120	stdcx.	r0,0,r1			/* to clear the reservation */
121END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
122
123BEGIN_FTR_SECTION
124	HMT_MEDIUM_LOW
125END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
126
127	cmpdi	r3,0
128	bne	.Lsyscall_vectored_\name\()_restore_regs
129
130	/* rfscv returns with LR->NIA and CTR->MSR */
131	mtlr	r4
132	mtctr	r5
133
134	/* Could zero these as per ABI, but we may consider a stricter ABI
135	 * which preserves these if libc implementations can benefit, so
136	 * restore them for now until further measurement is done. */
137	REST_GPR(0, r1)
138	REST_GPRS(4, 8, r1)
139	/* Zero volatile regs that may contain sensitive kernel data */
140	ZEROIZE_GPRS(9, 12)
141	mtspr	SPRN_XER,r0
142
143	/*
144	 * We don't need to restore AMR on the way back to userspace for KUAP.
145	 * The value of AMR only matters while we're in the kernel.
146	 */
147	mtcr	r2
148	REST_GPRS(2, 3, r1)
149	REST_GPR(13, r1)
150	REST_GPR(1, r1)
151	RFSCV_TO_USER
152	b	.	/* prevent speculative execution */
153
154.Lsyscall_vectored_\name\()_restore_regs:
155	mtspr	SPRN_SRR0,r4
156	mtspr	SPRN_SRR1,r5
157
158	ld	r3,_CTR(r1)
159	ld	r4,_LINK(r1)
160	ld	r5,_XER(r1)
161
162	REST_NVGPRS(r1)
163	REST_GPR(0, r1)
164	mtcr	r2
165	mtctr	r3
166	mtlr	r4
167	mtspr	SPRN_XER,r5
168	REST_GPRS(2, 13, r1)
169	REST_GPR(1, r1)
170	RFI_TO_USER
171.Lsyscall_vectored_\name\()_rst_end:
172
173syscall_vectored_\name\()_restart:
174_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
175	GET_PACA(r13)
176	ld	r1,PACA_EXIT_SAVE_R1(r13)
177	LOAD_PACA_TOC()
178	ld	r3,RESULT(r1)
179	addi	r4,r1,STACK_FRAME_OVERHEAD
180	li	r11,IRQS_ALL_DISABLED
181	stb	r11,PACAIRQSOFTMASK(r13)
182	bl	syscall_exit_restart
183	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
184	b	.Lsyscall_vectored_\name\()_rst_start
1851:
186
187SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
188RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
189
190.endm
191
192system_call_vectored common 0x3000
193
194/*
195 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
196 * which is tested by system_call_exception when r0 is -1 (as set by vector
197 * entry code).
198 */
199system_call_vectored sigill 0x7ff0
200
201#endif /* CONFIG_PPC_BOOK3S */
202
203	.balign IFETCH_ALIGN_BYTES
204	.globl system_call_common_real
205system_call_common_real:
206_ASM_NOKPROBE_SYMBOL(system_call_common_real)
207	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
208	mtmsrd	r10
209
210	.balign IFETCH_ALIGN_BYTES
211	.globl system_call_common
212system_call_common:
213_ASM_NOKPROBE_SYMBOL(system_call_common)
214	mr	r10,r1
215	ld	r1,PACAKSAVE(r13)
216	std	r10,0(r1)
217	std	r11,_NIP(r1)
218	std	r12,_MSR(r1)
219	std	r0,GPR0(r1)
220	std	r10,GPR1(r1)
221	std	r2,GPR2(r1)
222#ifdef CONFIG_PPC_E500
223START_BTB_FLUSH_SECTION
224	BTB_FLUSH(r10)
225END_BTB_FLUSH_SECTION
226#endif
227	LOAD_PACA_TOC()
228	mfcr	r12
229	li	r11,0
230	/* Save syscall parameters in r3-r8 */
231	SAVE_GPRS(3, 8, r1)
232	/* Zero r9-r12, this should only be required when restoring all GPRs */
233	std	r11,GPR9(r1)
234	std	r11,GPR10(r1)
235	std	r11,GPR11(r1)
236	std	r11,GPR12(r1)
237	std	r9,GPR13(r1)
238	SAVE_NVGPRS(r1)
239	std	r11,_XER(r1)
240	std	r11,_CTR(r1)
241	mflr	r10
242
243	/*
244	 * This clears CR0.SO (bit 28), which is the error indication on
245	 * return from this system call.
246	 */
247	rldimi	r12,r11,28,(63-28)
248	li	r11,0xc00
249	std	r10,_LINK(r1)
250	std	r11,_TRAP(r1)
251	std	r12,_CCR(r1)
252	std	r3,ORIG_GPR3(r1)
253	/* Calling convention has r3 = regs, r4 = orig r0 */
254	addi	r3,r1,STACK_FRAME_OVERHEAD
255	mr	r4,r0
256	LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
257	std	r11,-16(r3)		/* "regshere" marker */
258
259#ifdef CONFIG_PPC_BOOK3S
260	li	r11,1
261	stb	r11,PACASRR_VALID(r13)
262#endif
263
264	/*
265	 * We always enter kernel from userspace with irq soft-mask enabled and
266	 * nothing pending. system_call_exception() will call
267	 * trace_hardirqs_off().
268	 */
269	li	r11,IRQS_ALL_DISABLED
270	stb	r11,PACAIRQSOFTMASK(r13)
271#ifdef CONFIG_PPC_BOOK3S
272	li	r12,-1 /* Set MSR_EE and MSR_RI */
273	mtmsrd	r12,1
274#else
275	wrteei	1
276#endif
277
278	bl	system_call_exception
279
280.Lsyscall_exit:
281	addi	r4,r1,STACK_FRAME_OVERHEAD
282	li	r5,0 /* !scv */
283	bl	syscall_exit_prepare
284	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
285#ifdef CONFIG_PPC_BOOK3S
286.Lsyscall_rst_start:
287	lbz	r11,PACAIRQHAPPENED(r13)
288	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
289	bne-	syscall_restart
290#endif
291	li	r11,IRQS_ENABLED
292	stb	r11,PACAIRQSOFTMASK(r13)
293	li	r11,0
294	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
295
296	ld	r2,_CCR(r1)
297	ld	r6,_LINK(r1)
298	mtlr	r6
299
300#ifdef CONFIG_PPC_BOOK3S
301	lbz	r4,PACASRR_VALID(r13)
302	cmpdi	r4,0
303	bne	1f
304	li	r4,0
305	stb	r4,PACASRR_VALID(r13)
306#endif
307	ld	r4,_NIP(r1)
308	ld	r5,_MSR(r1)
309	mtspr	SPRN_SRR0,r4
310	mtspr	SPRN_SRR1,r5
3111:
312	DEBUG_SRR_VALID srr
313
314BEGIN_FTR_SECTION
315	stdcx.	r0,0,r1			/* to clear the reservation */
316END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
317
318	cmpdi	r3,0
319	bne	.Lsyscall_restore_regs
320	/* Zero volatile regs that may contain sensitive kernel data */
321	ZEROIZE_GPR(0)
322	ZEROIZE_GPRS(4, 12)
323	mtctr	r0
324	mtspr	SPRN_XER,r0
325.Lsyscall_restore_regs_cont:
326
327BEGIN_FTR_SECTION
328	HMT_MEDIUM_LOW
329END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
330
331	/*
332	 * We don't need to restore AMR on the way back to userspace for KUAP.
333	 * The value of AMR only matters while we're in the kernel.
334	 */
335	mtcr	r2
336	REST_GPRS(2, 3, r1)
337	REST_GPR(13, r1)
338	REST_GPR(1, r1)
339	RFI_TO_USER
340	b	.	/* prevent speculative execution */
341
342.Lsyscall_restore_regs:
343	ld	r3,_CTR(r1)
344	ld	r4,_XER(r1)
345	REST_NVGPRS(r1)
346	mtctr	r3
347	mtspr	SPRN_XER,r4
348	REST_GPR(0, r1)
349	REST_GPRS(4, 12, r1)
350	b	.Lsyscall_restore_regs_cont
351.Lsyscall_rst_end:
352
353#ifdef CONFIG_PPC_BOOK3S
354syscall_restart:
355_ASM_NOKPROBE_SYMBOL(syscall_restart)
356	GET_PACA(r13)
357	ld	r1,PACA_EXIT_SAVE_R1(r13)
358	LOAD_PACA_TOC()
359	ld	r3,RESULT(r1)
360	addi	r4,r1,STACK_FRAME_OVERHEAD
361	li	r11,IRQS_ALL_DISABLED
362	stb	r11,PACAIRQSOFTMASK(r13)
363	bl	syscall_exit_restart
364	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
365	b	.Lsyscall_rst_start
3661:
367
368SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
369RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
370#endif
371
372	/*
373	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
374	 * touched, no exit work created, then this can be used.
375	 */
376	.balign IFETCH_ALIGN_BYTES
377	.globl fast_interrupt_return_srr
378fast_interrupt_return_srr:
379_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
380	kuap_check_amr r3, r4
381	ld	r5,_MSR(r1)
382	andi.	r0,r5,MSR_PR
383#ifdef CONFIG_PPC_BOOK3S
384	beq	1f
385	kuap_user_restore r3, r4
386	b	.Lfast_user_interrupt_return_srr
3871:	kuap_kernel_restore r3, r4
388	andi.	r0,r5,MSR_RI
389	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
390	bne+	.Lfast_kernel_interrupt_return_srr
391	addi	r3,r1,STACK_FRAME_OVERHEAD
392	bl	unrecoverable_exception
393	b	. /* should not get here */
394#else
395	bne	.Lfast_user_interrupt_return_srr
396	b	.Lfast_kernel_interrupt_return_srr
397#endif
398
399.macro interrupt_return_macro srr
400	.balign IFETCH_ALIGN_BYTES
401	.globl interrupt_return_\srr
402interrupt_return_\srr\():
403_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
404	ld	r4,_MSR(r1)
405	andi.	r0,r4,MSR_PR
406	beq	interrupt_return_\srr\()_kernel
407interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
408_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
409	addi	r3,r1,STACK_FRAME_OVERHEAD
410	bl	interrupt_exit_user_prepare
411	cmpdi	r3,0
412	bne-	.Lrestore_nvgprs_\srr
413.Lrestore_nvgprs_\srr\()_cont:
414	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
415#ifdef CONFIG_PPC_BOOK3S
416.Linterrupt_return_\srr\()_user_rst_start:
417	lbz	r11,PACAIRQHAPPENED(r13)
418	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
419	bne-	interrupt_return_\srr\()_user_restart
420#endif
421	li	r11,IRQS_ENABLED
422	stb	r11,PACAIRQSOFTMASK(r13)
423	li	r11,0
424	stb	r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
425
426.Lfast_user_interrupt_return_\srr\():
427#ifdef CONFIG_PPC_BOOK3S
428	.ifc \srr,srr
429	lbz	r4,PACASRR_VALID(r13)
430	.else
431	lbz	r4,PACAHSRR_VALID(r13)
432	.endif
433	cmpdi	r4,0
434	li	r4,0
435	bne	1f
436#endif
437	ld	r11,_NIP(r1)
438	ld	r12,_MSR(r1)
439	.ifc \srr,srr
440	mtspr	SPRN_SRR0,r11
441	mtspr	SPRN_SRR1,r12
4421:
443#ifdef CONFIG_PPC_BOOK3S
444	stb	r4,PACASRR_VALID(r13)
445#endif
446	.else
447	mtspr	SPRN_HSRR0,r11
448	mtspr	SPRN_HSRR1,r12
4491:
450#ifdef CONFIG_PPC_BOOK3S
451	stb	r4,PACAHSRR_VALID(r13)
452#endif
453	.endif
454	DEBUG_SRR_VALID \srr
455
456#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
457	lbz	r4,PACAIRQSOFTMASK(r13)
458	tdnei	r4,IRQS_ENABLED
459#endif
460
461BEGIN_FTR_SECTION
462	ld	r10,_PPR(r1)
463	mtspr	SPRN_PPR,r10
464END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
465
466BEGIN_FTR_SECTION
467	stdcx.	r0,0,r1		/* to clear the reservation */
468FTR_SECTION_ELSE
469	ldarx	r0,0,r1
470ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
471
472	ld	r3,_CCR(r1)
473	ld	r4,_LINK(r1)
474	ld	r5,_CTR(r1)
475	ld	r6,_XER(r1)
476	li	r0,0
477
478	REST_GPRS(7, 13, r1)
479
480	mtcr	r3
481	mtlr	r4
482	mtctr	r5
483	mtspr	SPRN_XER,r6
484
485	REST_GPRS(2, 6, r1)
486	REST_GPR(0, r1)
487	REST_GPR(1, r1)
488	.ifc \srr,srr
489	RFI_TO_USER
490	.else
491	HRFI_TO_USER
492	.endif
493	b	.	/* prevent speculative execution */
494.Linterrupt_return_\srr\()_user_rst_end:
495
496.Lrestore_nvgprs_\srr\():
497	REST_NVGPRS(r1)
498	b	.Lrestore_nvgprs_\srr\()_cont
499
500#ifdef CONFIG_PPC_BOOK3S
501interrupt_return_\srr\()_user_restart:
502_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
503	GET_PACA(r13)
504	ld	r1,PACA_EXIT_SAVE_R1(r13)
505	LOAD_PACA_TOC()
506	addi	r3,r1,STACK_FRAME_OVERHEAD
507	li	r11,IRQS_ALL_DISABLED
508	stb	r11,PACAIRQSOFTMASK(r13)
509	bl	interrupt_exit_user_restart
510	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
511	b	.Linterrupt_return_\srr\()_user_rst_start
5121:
513
514SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
515RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
516#endif
517
518	.balign IFETCH_ALIGN_BYTES
519interrupt_return_\srr\()_kernel:
520_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
521	addi	r3,r1,STACK_FRAME_OVERHEAD
522	bl	interrupt_exit_kernel_prepare
523
524	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
525.Linterrupt_return_\srr\()_kernel_rst_start:
526	ld	r11,SOFTE(r1)
527	cmpwi	r11,IRQS_ENABLED
528	stb	r11,PACAIRQSOFTMASK(r13)
529	beq	.Linterrupt_return_\srr\()_soft_enabled
530
531	/*
532	 * Returning to soft-disabled context.
533	 * Check if a MUST_HARD_MASK interrupt has become pending, in which
534	 * case we need to disable MSR[EE] in the return context.
535	 *
536	 * The MSR[EE] check catches among other things the short incoherency
537	 * in hard_irq_disable() between clearing MSR[EE] and setting
538	 * PACA_IRQ_HARD_DIS.
539	 */
540	ld	r12,_MSR(r1)
541	andi.	r10,r12,MSR_EE
542	beq	.Lfast_kernel_interrupt_return_\srr\() // EE already disabled
543	lbz	r11,PACAIRQHAPPENED(r13)
544	andi.	r10,r11,PACA_IRQ_MUST_HARD_MASK
545	bne	1f // HARD_MASK is pending
546	// No HARD_MASK pending, clear possible HARD_DIS set by interrupt
547	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
548	stb	r11,PACAIRQHAPPENED(r13)
549	b	.Lfast_kernel_interrupt_return_\srr\()
550
551
5521:	/* Must clear MSR_EE from _MSR */
553#ifdef CONFIG_PPC_BOOK3S
554	li	r10,0
555	/* Clear valid before changing _MSR */
556	.ifc \srr,srr
557	stb	r10,PACASRR_VALID(r13)
558	.else
559	stb	r10,PACAHSRR_VALID(r13)
560	.endif
561#endif
562	xori	r12,r12,MSR_EE
563	std	r12,_MSR(r1)
564	b	.Lfast_kernel_interrupt_return_\srr\()
565
566.Linterrupt_return_\srr\()_soft_enabled:
567	/*
568	 * In the soft-enabled case, need to double-check that we have no
569	 * pending interrupts that might have come in before we reached the
570	 * restart section of code, and restart the exit so those can be
571	 * handled.
572	 *
573	 * If there are none, it is be possible that the interrupt still
574	 * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the
575	 * interrupted context. This clear will not clobber a new pending
576	 * interrupt coming in, because we're in the restart section, so
577	 * such would return to the restart location.
578	 */
579#ifdef CONFIG_PPC_BOOK3S
580	lbz	r11,PACAIRQHAPPENED(r13)
581	andi.	r11,r11,(~PACA_IRQ_HARD_DIS)@l
582	bne-	interrupt_return_\srr\()_kernel_restart
583#endif
584	li	r11,0
585	stb	r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
586
587.Lfast_kernel_interrupt_return_\srr\():
588	cmpdi	cr1,r3,0
589#ifdef CONFIG_PPC_BOOK3S
590	.ifc \srr,srr
591	lbz	r4,PACASRR_VALID(r13)
592	.else
593	lbz	r4,PACAHSRR_VALID(r13)
594	.endif
595	cmpdi	r4,0
596	li	r4,0
597	bne	1f
598#endif
599	ld	r11,_NIP(r1)
600	ld	r12,_MSR(r1)
601	.ifc \srr,srr
602	mtspr	SPRN_SRR0,r11
603	mtspr	SPRN_SRR1,r12
6041:
605#ifdef CONFIG_PPC_BOOK3S
606	stb	r4,PACASRR_VALID(r13)
607#endif
608	.else
609	mtspr	SPRN_HSRR0,r11
610	mtspr	SPRN_HSRR1,r12
6111:
612#ifdef CONFIG_PPC_BOOK3S
613	stb	r4,PACAHSRR_VALID(r13)
614#endif
615	.endif
616	DEBUG_SRR_VALID \srr
617
618BEGIN_FTR_SECTION
619	stdcx.	r0,0,r1		/* to clear the reservation */
620FTR_SECTION_ELSE
621	ldarx	r0,0,r1
622ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
623
624	ld	r3,_LINK(r1)
625	ld	r4,_CTR(r1)
626	ld	r5,_XER(r1)
627	ld	r6,_CCR(r1)
628	li	r0,0
629
630	REST_GPRS(7, 12, r1)
631
632	mtlr	r3
633	mtctr	r4
634	mtspr	SPRN_XER,r5
635
636	/*
637	 * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
638	 * the reliable stack unwinder later on. Clear it.
639	 */
640	std	r0,STACK_FRAME_OVERHEAD-16(r1)
641
642	REST_GPRS(2, 5, r1)
643
644	bne-	cr1,1f /* emulate stack store */
645	mtcr	r6
646	REST_GPR(6, r1)
647	REST_GPR(0, r1)
648	REST_GPR(1, r1)
649	.ifc \srr,srr
650	RFI_TO_KERNEL
651	.else
652	HRFI_TO_KERNEL
653	.endif
654	b	.	/* prevent speculative execution */
655
6561:	/*
657	 * Emulate stack store with update. New r1 value was already calculated
658	 * and updated in our interrupt regs by emulate_loadstore, but we can't
659	 * store the previous value of r1 to the stack before re-loading our
660	 * registers from it, otherwise they could be clobbered.  Use
661	 * PACA_EXGEN as temporary storage to hold the store data, as
662	 * interrupts are disabled here so it won't be clobbered.
663	 */
664	mtcr	r6
665	std	r9,PACA_EXGEN+0(r13)
666	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
667	REST_GPR(6, r1)
668	REST_GPR(0, r1)
669	REST_GPR(1, r1)
670	std	r9,0(r1) /* perform store component of stdu */
671	ld	r9,PACA_EXGEN+0(r13)
672
673	.ifc \srr,srr
674	RFI_TO_KERNEL
675	.else
676	HRFI_TO_KERNEL
677	.endif
678	b	.	/* prevent speculative execution */
679.Linterrupt_return_\srr\()_kernel_rst_end:
680
681#ifdef CONFIG_PPC_BOOK3S
682interrupt_return_\srr\()_kernel_restart:
683_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
684	GET_PACA(r13)
685	ld	r1,PACA_EXIT_SAVE_R1(r13)
686	LOAD_PACA_TOC()
687	addi	r3,r1,STACK_FRAME_OVERHEAD
688	li	r11,IRQS_ALL_DISABLED
689	stb	r11,PACAIRQSOFTMASK(r13)
690	bl	interrupt_exit_kernel_restart
691	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
692	b	.Linterrupt_return_\srr\()_kernel_rst_start
6931:
694
695SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
696RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
697#endif
698
699.endm
700
701interrupt_return_macro srr
702#ifdef CONFIG_PPC_BOOK3S
703interrupt_return_macro hsrr
704
705	.globl __end_soft_masked
706__end_soft_masked:
707DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
708#endif /* CONFIG_PPC_BOOK3S */
709
710#ifdef CONFIG_PPC_BOOK3S
711_GLOBAL(ret_from_fork_scv)
712	bl	schedule_tail
713	REST_NVGPRS(r1)
714	li	r3,0	/* fork() return value */
715	b	.Lsyscall_vectored_common_exit
716#endif
717
718_GLOBAL(ret_from_fork)
719	bl	schedule_tail
720	REST_NVGPRS(r1)
721	li	r3,0	/* fork() return value */
722	b	.Lsyscall_exit
723
724_GLOBAL(ret_from_kernel_thread)
725	bl	schedule_tail
726	REST_NVGPRS(r1)
727	mtctr	r14
728	mr	r3,r15
729#ifdef CONFIG_PPC64_ELF_ABI_V2
730	mr	r12,r14
731#endif
732	bctrl
733	li	r3,0
734	b	.Lsyscall_exit
735