xref: /openbmc/linux/arch/powerpc/kernel/entry_32.S (revision 29d97219)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/feature-fixups.h>
32#include <asm/barrier.h>
33#include <asm/kup.h>
34#include <asm/bug.h>
35
36#include "head_32.h"
37
38/*
39 * powerpc relies on return from interrupt/syscall being context synchronising
40 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
41 * synchronisation instructions.
42 */
43
44/*
45 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
46 * fit into one page in order to not encounter a TLB miss between the
47 * modification of srr0/srr1 and the associated rfi.
48 */
49	.align	12
50
51#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
52	.globl	prepare_transfer_to_handler
53prepare_transfer_to_handler:
54	/* if from kernel, check interrupted DOZE/NAP mode */
55	lwz	r12,TI_LOCAL_FLAGS(r2)
56	mtcrf	0x01,r12
57	bt-	31-TLF_NAPPING,4f
58	bt-	31-TLF_SLEEPING,7f
59	blr
60
614:	rlwinm	r12,r12,0,~_TLF_NAPPING
62	stw	r12,TI_LOCAL_FLAGS(r2)
63	b	power_save_ppc32_restore
64
657:	rlwinm	r12,r12,0,~_TLF_SLEEPING
66	stw	r12,TI_LOCAL_FLAGS(r2)
67	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
68	rlwinm	r9,r9,0,~MSR_EE
69	lwz	r12,_LINK(r11)		/* and return to address in LR */
70	lwz	r2, GPR2(r11)
71	b	fast_exception_return
72_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
73#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
74
75	.globl	transfer_to_syscall
76transfer_to_syscall:
77	SAVE_NVGPRS(r1)
78
79	/* Calling convention has r9 = orig r0, r10 = regs */
80	addi	r10,r1,STACK_FRAME_OVERHEAD
81	mr	r9,r0
82	bl	system_call_exception
83
84ret_from_syscall:
85	addi    r4,r1,STACK_FRAME_OVERHEAD
86	li	r5,0
87	bl	syscall_exit_prepare
88#ifdef CONFIG_PPC_47x
89	lis	r4,icache_44x_need_flush@ha
90	lwz	r5,icache_44x_need_flush@l(r4)
91	cmplwi	cr0,r5,0
92	bne-	2f
93#endif /* CONFIG_PPC_47x */
94	lwz	r4,_LINK(r1)
95	lwz	r5,_CCR(r1)
96	mtlr	r4
97	lwz	r7,_NIP(r1)
98	lwz	r8,_MSR(r1)
99	cmpwi	r3,0
100	lwz	r3,GPR3(r1)
101syscall_exit_finish:
102	mtspr	SPRN_SRR0,r7
103	mtspr	SPRN_SRR1,r8
104
105	bne	3f
106	mtcr	r5
107
1081:	lwz	r2,GPR2(r1)
109	lwz	r1,GPR1(r1)
110	rfi
111#ifdef CONFIG_40x
112	b .	/* Prevent prefetch past rfi */
113#endif
114
1153:	mtcr	r5
116	lwz	r4,_CTR(r1)
117	lwz	r5,_XER(r1)
118	REST_NVGPRS(r1)
119	mtctr	r4
120	mtxer	r5
121	lwz	r0,GPR0(r1)
122	lwz	r3,GPR3(r1)
123	REST_8GPRS(4,r1)
124	lwz	r12,GPR12(r1)
125	b	1b
126
127#ifdef CONFIG_44x
1282:	li	r7,0
129	iccci	r0,r0
130	stw	r7,icache_44x_need_flush@l(r4)
131	b	1b
132#endif  /* CONFIG_44x */
133
134	.globl	ret_from_fork
135ret_from_fork:
136	REST_NVGPRS(r1)
137	bl	schedule_tail
138	li	r3,0
139	b	ret_from_syscall
140
141	.globl	ret_from_kernel_thread
142ret_from_kernel_thread:
143	REST_NVGPRS(r1)
144	bl	schedule_tail
145	mtlr	r14
146	mr	r3,r15
147	PPC440EP_ERR42
148	blrl
149	li	r3,0
150	b	ret_from_syscall
151
152/*
153 * This routine switches between two different tasks.  The process
154 * state of one is saved on its kernel stack.  Then the state
155 * of the other is restored from its kernel stack.  The memory
156 * management hardware is updated to the second process's state.
157 * Finally, we can return to the second process.
158 * On entry, r3 points to the THREAD for the current task, r4
159 * points to the THREAD for the new task.
160 *
161 * This routine is always called with interrupts disabled.
162 *
163 * Note: there are two ways to get to the "going out" portion
164 * of this code; either by coming in via the entry (_switch)
165 * or via "fork" which must set up an environment equivalent
166 * to the "_switch" path.  If you change this , you'll have to
167 * change the fork code also.
168 *
169 * The code which creates the new task context is in 'copy_thread'
170 * in arch/ppc/kernel/process.c
171 */
172_GLOBAL(_switch)
173	stwu	r1,-INT_FRAME_SIZE(r1)
174	mflr	r0
175	stw	r0,INT_FRAME_SIZE+4(r1)
176	/* r3-r12 are caller saved -- Cort */
177	SAVE_NVGPRS(r1)
178	stw	r0,_NIP(r1)	/* Return to switch caller */
179	mfmsr	r11
180	li	r0,MSR_FP	/* Disable floating-point */
181#ifdef CONFIG_ALTIVEC
182BEGIN_FTR_SECTION
183	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
184	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
185	stw	r12,THREAD+THREAD_VRSAVE(r2)
186END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
187#endif /* CONFIG_ALTIVEC */
188#ifdef CONFIG_SPE
189BEGIN_FTR_SECTION
190	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
191	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
192	stw	r12,THREAD+THREAD_SPEFSCR(r2)
193END_FTR_SECTION_IFSET(CPU_FTR_SPE)
194#endif /* CONFIG_SPE */
195	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
196	beq+	1f
197	andc	r11,r11,r0
198	mtmsr	r11
199	isync
2001:	stw	r11,_MSR(r1)
201	mfcr	r10
202	stw	r10,_CCR(r1)
203	stw	r1,KSP(r3)	/* Set old stack pointer */
204
205#ifdef CONFIG_SMP
206	/* We need a sync somewhere here to make sure that if the
207	 * previous task gets rescheduled on another CPU, it sees all
208	 * stores it has performed on this one.
209	 */
210	sync
211#endif /* CONFIG_SMP */
212
213	tophys(r0,r4)
214	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
215	lwz	r1,KSP(r4)	/* Load new stack pointer */
216
217	/* save the old current 'last' for return value */
218	mr	r3,r2
219	addi	r2,r4,-THREAD	/* Update current */
220
221#ifdef CONFIG_ALTIVEC
222BEGIN_FTR_SECTION
223	lwz	r0,THREAD+THREAD_VRSAVE(r2)
224	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
225END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
226#endif /* CONFIG_ALTIVEC */
227#ifdef CONFIG_SPE
228BEGIN_FTR_SECTION
229	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
230	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
231END_FTR_SECTION_IFSET(CPU_FTR_SPE)
232#endif /* CONFIG_SPE */
233
234	lwz	r0,_CCR(r1)
235	mtcrf	0xFF,r0
236	/* r3-r12 are destroyed -- Cort */
237	REST_NVGPRS(r1)
238
239	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
240	mtlr	r4
241	addi	r1,r1,INT_FRAME_SIZE
242	blr
243
244	.globl	fast_exception_return
245fast_exception_return:
246#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
247	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
248	beq	3f			/* if not, we've got problems */
249#endif
250
2512:	REST_4GPRS(3, r11)
252	lwz	r10,_CCR(r11)
253	REST_2GPRS(1, r11)
254	mtcr	r10
255	lwz	r10,_LINK(r11)
256	mtlr	r10
257	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
258	li	r10, 0
259	stw	r10, 8(r11)
260	REST_GPR(10, r11)
261#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
262	mtspr	SPRN_NRI, r0
263#endif
264	mtspr	SPRN_SRR1,r9
265	mtspr	SPRN_SRR0,r12
266	REST_GPR(9, r11)
267	REST_GPR(12, r11)
268	lwz	r11,GPR11(r11)
269	rfi
270#ifdef CONFIG_40x
271	b .	/* Prevent prefetch past rfi */
272#endif
273_ASM_NOKPROBE_SYMBOL(fast_exception_return)
274
275/* aargh, a nonrecoverable interrupt, panic */
276/* aargh, we don't know which trap this is */
2773:
278	li	r10,-1
279	stw	r10,_TRAP(r11)
280	prepare_transfer_to_handler
281	bl	unrecoverable_exception
282	trap	/* should not get here */
283
284	.globl interrupt_return
285interrupt_return:
286	lwz	r4,_MSR(r1)
287	addi	r3,r1,STACK_FRAME_OVERHEAD
288	andi.	r0,r4,MSR_PR
289	beq	.Lkernel_interrupt_return
290	bl	interrupt_exit_user_prepare
291	cmpwi	r3,0
292	bne-	.Lrestore_nvgprs
293
294.Lfast_user_interrupt_return:
295	lwz	r11,_NIP(r1)
296	lwz	r12,_MSR(r1)
297	mtspr	SPRN_SRR0,r11
298	mtspr	SPRN_SRR1,r12
299
300BEGIN_FTR_SECTION
301	stwcx.	r0,0,r1		/* to clear the reservation */
302FTR_SECTION_ELSE
303	lwarx	r0,0,r1
304ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
305
306	lwz	r3,_CCR(r1)
307	lwz	r4,_LINK(r1)
308	lwz	r5,_CTR(r1)
309	lwz	r6,_XER(r1)
310	li	r0,0
311
312	/*
313	 * Leaving a stale exception_marker on the stack can confuse
314	 * the reliable stack unwinder later on. Clear it.
315	 */
316	stw	r0,8(r1)
317	REST_4GPRS(7, r1)
318	REST_2GPRS(11, r1)
319
320	mtcr	r3
321	mtlr	r4
322	mtctr	r5
323	mtspr	SPRN_XER,r6
324
325	REST_4GPRS(2, r1)
326	REST_GPR(6, r1)
327	REST_GPR(0, r1)
328	REST_GPR(1, r1)
329	rfi
330#ifdef CONFIG_40x
331	b .	/* Prevent prefetch past rfi */
332#endif
333
334.Lrestore_nvgprs:
335	REST_NVGPRS(r1)
336	b	.Lfast_user_interrupt_return
337
338.Lkernel_interrupt_return:
339	bl	interrupt_exit_kernel_prepare
340
341.Lfast_kernel_interrupt_return:
342	cmpwi	cr1,r3,0
343	lwz	r11,_NIP(r1)
344	lwz	r12,_MSR(r1)
345	mtspr	SPRN_SRR0,r11
346	mtspr	SPRN_SRR1,r12
347
348BEGIN_FTR_SECTION
349	stwcx.	r0,0,r1		/* to clear the reservation */
350FTR_SECTION_ELSE
351	lwarx	r0,0,r1
352ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
353
354	lwz	r3,_LINK(r1)
355	lwz	r4,_CTR(r1)
356	lwz	r5,_XER(r1)
357	lwz	r6,_CCR(r1)
358	li	r0,0
359
360	REST_4GPRS(7, r1)
361	REST_2GPRS(11, r1)
362
363	mtlr	r3
364	mtctr	r4
365	mtspr	SPRN_XER,r5
366
367	/*
368	 * Leaving a stale exception_marker on the stack can confuse
369	 * the reliable stack unwinder later on. Clear it.
370	 */
371	stw	r0,8(r1)
372
373	REST_4GPRS(2, r1)
374
375	bne-	cr1,1f /* emulate stack store */
376	mtcr	r6
377	REST_GPR(6, r1)
378	REST_GPR(0, r1)
379	REST_GPR(1, r1)
380	rfi
381#ifdef CONFIG_40x
382	b .	/* Prevent prefetch past rfi */
383#endif
384
3851:	/*
386	 * Emulate stack store with update. New r1 value was already calculated
387	 * and updated in our interrupt regs by emulate_loadstore, but we can't
388	 * store the previous value of r1 to the stack before re-loading our
389	 * registers from it, otherwise they could be clobbered.  Use
390	 * SPRG Scratch0 as temporary storage to hold the store
391	 * data, as interrupts are disabled here so it won't be clobbered.
392	 */
393	mtcr	r6
394#ifdef CONFIG_BOOKE
395	mtspr	SPRN_SPRG_WSCRATCH0, r9
396#else
397	mtspr	SPRN_SPRG_SCRATCH0, r9
398#endif
399	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
400	REST_GPR(6, r1)
401	REST_GPR(0, r1)
402	REST_GPR(1, r1)
403	stw	r9,0(r1) /* perform store component of stwu */
404#ifdef CONFIG_BOOKE
405	mfspr	r9, SPRN_SPRG_RSCRATCH0
406#else
407	mfspr	r9, SPRN_SPRG_SCRATCH0
408#endif
409	rfi
410#ifdef CONFIG_40x
411	b .	/* Prevent prefetch past rfi */
412#endif
413_ASM_NOKPROBE_SYMBOL(interrupt_return)
414
415#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
416
417/*
418 * Returning from a critical interrupt in user mode doesn't need
419 * to be any different from a normal exception.  For a critical
420 * interrupt in the kernel, we just return (without checking for
421 * preemption) since the interrupt may have happened at some crucial
422 * place (e.g. inside the TLB miss handler), and because we will be
423 * running with r1 pointing into critical_stack, not the current
424 * process's kernel stack (and therefore current_thread_info() will
425 * give the wrong answer).
426 * We have to restore various SPRs that may have been in use at the
427 * time of the critical interrupt.
428 *
429 */
430#ifdef CONFIG_40x
431#define PPC_40x_TURN_OFF_MSR_DR						    \
432	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
433	 * assume the instructions here are mapped by a pinned TLB entry */ \
434	li	r10,MSR_IR;						    \
435	mtmsr	r10;							    \
436	isync;								    \
437	tophys(r1, r1);
438#else
439#define PPC_40x_TURN_OFF_MSR_DR
440#endif
441
442#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
443	REST_NVGPRS(r1);						\
444	lwz	r3,_MSR(r1);						\
445	andi.	r3,r3,MSR_PR;						\
446	bne	interrupt_return;					\
447	lwz	r0,GPR0(r1);						\
448	lwz	r2,GPR2(r1);						\
449	REST_4GPRS(3, r1);						\
450	REST_2GPRS(7, r1);						\
451	lwz	r10,_XER(r1);						\
452	lwz	r11,_CTR(r1);						\
453	mtspr	SPRN_XER,r10;						\
454	mtctr	r11;							\
455	stwcx.	r0,0,r1;		/* to clear the reservation */	\
456	lwz	r11,_LINK(r1);						\
457	mtlr	r11;							\
458	lwz	r10,_CCR(r1);						\
459	mtcrf	0xff,r10;						\
460	PPC_40x_TURN_OFF_MSR_DR;					\
461	lwz	r9,_DEAR(r1);						\
462	lwz	r10,_ESR(r1);						\
463	mtspr	SPRN_DEAR,r9;						\
464	mtspr	SPRN_ESR,r10;						\
465	lwz	r11,_NIP(r1);						\
466	lwz	r12,_MSR(r1);						\
467	mtspr	exc_lvl_srr0,r11;					\
468	mtspr	exc_lvl_srr1,r12;					\
469	lwz	r9,GPR9(r1);						\
470	lwz	r12,GPR12(r1);						\
471	lwz	r10,GPR10(r1);						\
472	lwz	r11,GPR11(r1);						\
473	lwz	r1,GPR1(r1);						\
474	exc_lvl_rfi;							\
475	b	.;		/* prevent prefetch past exc_lvl_rfi */
476
477#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
478	lwz	r9,_##exc_lvl_srr0(r1);					\
479	lwz	r10,_##exc_lvl_srr1(r1);				\
480	mtspr	SPRN_##exc_lvl_srr0,r9;					\
481	mtspr	SPRN_##exc_lvl_srr1,r10;
482
483#if defined(CONFIG_PPC_BOOK3E_MMU)
484#ifdef CONFIG_PHYS_64BIT
485#define	RESTORE_MAS7							\
486	lwz	r11,MAS7(r1);						\
487	mtspr	SPRN_MAS7,r11;
488#else
489#define	RESTORE_MAS7
490#endif /* CONFIG_PHYS_64BIT */
491#define RESTORE_MMU_REGS						\
492	lwz	r9,MAS0(r1);						\
493	lwz	r10,MAS1(r1);						\
494	lwz	r11,MAS2(r1);						\
495	mtspr	SPRN_MAS0,r9;						\
496	lwz	r9,MAS3(r1);						\
497	mtspr	SPRN_MAS1,r10;						\
498	lwz	r10,MAS6(r1);						\
499	mtspr	SPRN_MAS2,r11;						\
500	mtspr	SPRN_MAS3,r9;						\
501	mtspr	SPRN_MAS6,r10;						\
502	RESTORE_MAS7;
503#elif defined(CONFIG_44x)
504#define RESTORE_MMU_REGS						\
505	lwz	r9,MMUCR(r1);						\
506	mtspr	SPRN_MMUCR,r9;
507#else
508#define RESTORE_MMU_REGS
509#endif
510
511#ifdef CONFIG_40x
512	.globl	ret_from_crit_exc
513ret_from_crit_exc:
514	lis	r9,crit_srr0@ha;
515	lwz	r9,crit_srr0@l(r9);
516	lis	r10,crit_srr1@ha;
517	lwz	r10,crit_srr1@l(r10);
518	mtspr	SPRN_SRR0,r9;
519	mtspr	SPRN_SRR1,r10;
520	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
521_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
522#endif /* CONFIG_40x */
523
524#ifdef CONFIG_BOOKE
525	.globl	ret_from_crit_exc
526ret_from_crit_exc:
527	RESTORE_xSRR(SRR0,SRR1);
528	RESTORE_MMU_REGS;
529	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
530_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
531
532	.globl	ret_from_debug_exc
533ret_from_debug_exc:
534	RESTORE_xSRR(SRR0,SRR1);
535	RESTORE_xSRR(CSRR0,CSRR1);
536	RESTORE_MMU_REGS;
537	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
538_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
539
540	.globl	ret_from_mcheck_exc
541ret_from_mcheck_exc:
542	RESTORE_xSRR(SRR0,SRR1);
543	RESTORE_xSRR(CSRR0,CSRR1);
544	RESTORE_xSRR(DSRR0,DSRR1);
545	RESTORE_MMU_REGS;
546	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
547_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
548#endif /* CONFIG_BOOKE */
549#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
550
551/*
552 * PROM code for specific machines follows.  Put it
553 * here so it's easy to add arch-specific sections later.
554 * -- Cort
555 */
556#ifdef CONFIG_PPC_RTAS
557/*
558 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
559 * called with the MMU off.
560 */
561_GLOBAL(enter_rtas)
562	stwu	r1,-INT_FRAME_SIZE(r1)
563	mflr	r0
564	stw	r0,INT_FRAME_SIZE+4(r1)
565	LOAD_REG_ADDR(r4, rtas)
566	lis	r6,1f@ha	/* physical return address for rtas */
567	addi	r6,r6,1f@l
568	tophys(r6,r6)
569	lwz	r8,RTASENTRY(r4)
570	lwz	r4,RTASBASE(r4)
571	mfmsr	r9
572	stw	r9,8(r1)
573	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
574	mtmsr	r0	/* disable interrupts so SRR0/1 don't get trashed */
575	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
576	mtlr	r6
577	stw	r1, THREAD + RTAS_SP(r2)
578	mtspr	SPRN_SRR0,r8
579	mtspr	SPRN_SRR1,r9
580	rfi
5811:
582	lis	r8, 1f@h
583	ori	r8, r8, 1f@l
584	LOAD_REG_IMMEDIATE(r9,MSR_KERNEL)
585	mtspr	SPRN_SRR0,r8
586	mtspr	SPRN_SRR1,r9
587	rfi			/* Reactivate MMU translation */
5881:
589	lwz	r8,INT_FRAME_SIZE+4(r1)	/* get return address */
590	lwz	r9,8(r1)	/* original msr value */
591	addi	r1,r1,INT_FRAME_SIZE
592	li	r0,0
593	stw	r0, THREAD + RTAS_SP(r2)
594	mtlr	r8
595	mtmsr	r9
596	blr			/* return to caller */
597_ASM_NOKPROBE_SYMBOL(enter_rtas)
598#endif /* CONFIG_PPC_RTAS */
599