xref: /openbmc/linux/arch/s390/kernel/entry.S (revision 3d3337de)
1/*
2 *    S390 low-level entry points.
3 *
4 *    Copyright IBM Corp. 1999, 2012
5 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 *		 Hartmut Penner (hp@de.ibm.com),
7 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/processor.h>
14#include <asm/cache.h>
15#include <asm/errno.h>
16#include <asm/ptrace.h>
17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h>
19#include <asm/unistd.h>
20#include <asm/page.h>
21#include <asm/sigp.h>
22#include <asm/irq.h>
23
24__PT_R0      =	__PT_GPRS
25__PT_R1      =	__PT_GPRS + 8
26__PT_R2      =	__PT_GPRS + 16
27__PT_R3      =	__PT_GPRS + 24
28__PT_R4      =	__PT_GPRS + 32
29__PT_R5      =	__PT_GPRS + 40
30__PT_R6      =	__PT_GPRS + 48
31__PT_R7      =	__PT_GPRS + 56
32__PT_R8      =	__PT_GPRS + 64
33__PT_R9      =	__PT_GPRS + 72
34__PT_R10     =	__PT_GPRS + 80
35__PT_R11     =	__PT_GPRS + 88
36__PT_R12     =	__PT_GPRS + 96
37__PT_R13     =	__PT_GPRS + 104
38__PT_R14     =	__PT_GPRS + 112
39__PT_R15     =	__PT_GPRS + 120
40
41STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
42STACK_SIZE  = 1 << STACK_SHIFT
43STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
44
45_TIF_WORK	= (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
46		   _TIF_UPROBE)
47_TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
48		   _TIF_SYSCALL_TRACEPOINT)
49_CIF_WORK	= (_CIF_MCCK_PENDING | _CIF_ASCE)
50_PIF_WORK	= (_PIF_PER_TRAP)
51
52#define BASED(name) name-system_call(%r13)
53
54	.macro	TRACE_IRQS_ON
55#ifdef CONFIG_TRACE_IRQFLAGS
56	basr	%r2,%r0
57	brasl	%r14,trace_hardirqs_on_caller
58#endif
59	.endm
60
61	.macro	TRACE_IRQS_OFF
62#ifdef CONFIG_TRACE_IRQFLAGS
63	basr	%r2,%r0
64	brasl	%r14,trace_hardirqs_off_caller
65#endif
66	.endm
67
68	.macro	LOCKDEP_SYS_EXIT
69#ifdef CONFIG_LOCKDEP
70	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
71	jz	.+10
72	brasl	%r14,lockdep_sys_exit
73#endif
74	.endm
75
76	.macro LPP newpp
77#if IS_ENABLED(CONFIG_KVM)
78	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_LPP
79	jz	.+8
80	.insn	s,0xb2800000,\newpp
81#endif
82	.endm
83
84	.macro	HANDLE_SIE_INTERCEPT scratch,reason
85#if IS_ENABLED(CONFIG_KVM)
86	tmhh	%r8,0x0001		# interrupting from user ?
87	jnz	.+62
88	lgr	\scratch,%r9
89	slg	\scratch,BASED(.Lsie_critical)
90	clg	\scratch,BASED(.Lsie_critical_length)
91	.if	\reason==1
92	# Some program interrupts are suppressing (e.g. protection).
93	# We must also check the instruction after SIE in that case.
94	# do_protection_exception will rewind to .Lrewind_pad
95	jh	.+42
96	.else
97	jhe	.+42
98	.endif
99	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
100	LPP	__SF_EMPTY+16(%r15)		# set host id
101	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
102	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
103	larl	%r9,sie_exit			# skip forward to sie_exit
104	mvi	__SF_EMPTY+31(%r15),\reason	# set exit reason
105#endif
106	.endm
107
108	.macro	CHECK_STACK stacksize,savearea
109#ifdef CONFIG_CHECK_STACK
110	tml	%r15,\stacksize - CONFIG_STACK_GUARD
111	lghi	%r14,\savearea
112	jz	stack_overflow
113#endif
114	.endm
115
116	.macro	SWITCH_ASYNC savearea,stack,shift
117	tmhh	%r8,0x0001		# interrupting from user ?
118	jnz	1f
119	lgr	%r14,%r9
120	slg	%r14,BASED(.Lcritical_start)
121	clg	%r14,BASED(.Lcritical_length)
122	jhe	0f
123	lghi	%r11,\savearea		# inside critical section, do cleanup
124	brasl	%r14,cleanup_critical
125	tmhh	%r8,0x0001		# retest problem state after cleanup
126	jnz	1f
1270:	lg	%r14,\stack		# are we already on the target stack?
128	slgr	%r14,%r15
129	srag	%r14,%r14,\shift
130	jnz	1f
131	CHECK_STACK 1<<\shift,\savearea
132	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
133	j	2f
1341:	lg	%r15,\stack		# load target stack
1352:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
136	.endm
137
138	.macro UPDATE_VTIME scratch,enter_timer
139	lg	\scratch,__LC_EXIT_TIMER
140	slg	\scratch,\enter_timer
141	alg	\scratch,__LC_USER_TIMER
142	stg	\scratch,__LC_USER_TIMER
143	lg	\scratch,__LC_LAST_UPDATE_TIMER
144	slg	\scratch,__LC_EXIT_TIMER
145	alg	\scratch,__LC_SYSTEM_TIMER
146	stg	\scratch,__LC_SYSTEM_TIMER
147	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
148	.endm
149
150	.macro	LAST_BREAK scratch
151	srag	\scratch,%r10,23
152	jz	.+10
153	stg	%r10,__TI_last_break(%r12)
154	.endm
155
156	.macro REENABLE_IRQS
157	stg	%r8,__LC_RETURN_PSW
158	ni	__LC_RETURN_PSW,0xbf
159	ssm	__LC_RETURN_PSW
160	.endm
161
162	.macro STCK savearea
163#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
164	.insn	s,0xb27c0000,\savearea		# store clock fast
165#else
166	.insn	s,0xb2050000,\savearea		# store clock
167#endif
168	.endm
169
170	.section .kprobes.text, "ax"
171
172/*
173 * Scheduler resume function, called by switch_to
174 *  gpr2 = (task_struct *) prev
175 *  gpr3 = (task_struct *) next
176 * Returns:
177 *  gpr2 = prev
178 */
179ENTRY(__switch_to)
180	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
181	stg	%r15,__THREAD_ksp(%r2)		# store kernel stack of prev
182	lg	%r4,__THREAD_info(%r2)		# get thread_info of prev
183	lg	%r5,__THREAD_info(%r3)		# get thread_info of next
184	lgr	%r15,%r5
185	aghi	%r15,STACK_INIT			# end of kernel stack of next
186	stg	%r3,__LC_CURRENT		# store task struct of next
187	stg	%r5,__LC_THREAD_INFO		# store thread info of next
188	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
189	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
190	mvc	__LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
191	lg	%r15,__THREAD_ksp(%r3)		# load kernel stack of next
192	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
193	br	%r14
194
195.L__critical_start:
196/*
197 * SVC interrupt handler routine. System calls are synchronous events and
198 * are executed with interrupts enabled.
199 */
200
201ENTRY(system_call)
202	stpt	__LC_SYNC_ENTER_TIMER
203.Lsysc_stmg:
204	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
205	lg	%r10,__LC_LAST_BREAK
206	lg	%r12,__LC_THREAD_INFO
207	lghi	%r14,_PIF_SYSCALL
208.Lsysc_per:
209	lg	%r15,__LC_KERNEL_STACK
210	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
211.Lsysc_vtime:
212	UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
213	LAST_BREAK %r13
214	stmg	%r0,%r7,__PT_R0(%r11)
215	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
216	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
217	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
218	stg	%r14,__PT_FLAGS(%r11)
219.Lsysc_do_svc:
220	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
221	llgh	%r8,__PT_INT_CODE+2(%r11)
222	slag	%r8,%r8,2			# shift and test for svc 0
223	jnz	.Lsysc_nr_ok
224	# svc 0: system call number in %r1
225	llgfr	%r1,%r1				# clear high word in r1
226	cghi	%r1,NR_syscalls
227	jnl	.Lsysc_nr_ok
228	sth	%r1,__PT_INT_CODE+2(%r11)
229	slag	%r8,%r1,2
230.Lsysc_nr_ok:
231	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
232	stg	%r2,__PT_ORIG_GPR2(%r11)
233	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
234	lgf	%r9,0(%r8,%r10)			# get system call add.
235	tm	__TI_flags+7(%r12),_TIF_TRACE
236	jnz	.Lsysc_tracesys
237	basr	%r14,%r9			# call sys_xxxx
238	stg	%r2,__PT_R2(%r11)		# store return value
239
240.Lsysc_return:
241	LOCKDEP_SYS_EXIT
242.Lsysc_tif:
243	tm	__PT_PSW+1(%r11),0x01		# returning to user ?
244	jno	.Lsysc_restore
245	tm	__PT_FLAGS+7(%r11),_PIF_WORK
246	jnz	.Lsysc_work
247	tm	__TI_flags+7(%r12),_TIF_WORK
248	jnz	.Lsysc_work			# check for work
249	tm	__LC_CPU_FLAGS+7,_CIF_WORK
250	jnz	.Lsysc_work
251.Lsysc_restore:
252	lg	%r14,__LC_VDSO_PER_CPU
253	lmg	%r0,%r10,__PT_R0(%r11)
254	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
255	stpt	__LC_EXIT_TIMER
256	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
257	lmg	%r11,%r15,__PT_R11(%r11)
258	lpswe	__LC_RETURN_PSW
259.Lsysc_done:
260
261#
262# One of the work bits is on. Find out which one.
263#
264.Lsysc_work:
265	tm	__LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
266	jo	.Lsysc_mcck_pending
267	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
268	jo	.Lsysc_reschedule
269#ifdef CONFIG_UPROBES
270	tm	__TI_flags+7(%r12),_TIF_UPROBE
271	jo	.Lsysc_uprobe_notify
272#endif
273	tm	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
274	jo	.Lsysc_singlestep
275	tm	__TI_flags+7(%r12),_TIF_SIGPENDING
276	jo	.Lsysc_sigpending
277	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
278	jo	.Lsysc_notify_resume
279	tm	__LC_CPU_FLAGS+7,_CIF_ASCE
280	jo	.Lsysc_uaccess
281	j	.Lsysc_return		# beware of critical section cleanup
282
283#
284# _TIF_NEED_RESCHED is set, call schedule
285#
286.Lsysc_reschedule:
287	larl	%r14,.Lsysc_return
288	jg	schedule
289
290#
291# _CIF_MCCK_PENDING is set, call handler
292#
293.Lsysc_mcck_pending:
294	larl	%r14,.Lsysc_return
295	jg	s390_handle_mcck	# TIF bit will be cleared by handler
296
297#
298# _CIF_ASCE is set, load user space asce
299#
300.Lsysc_uaccess:
301	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
302	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
303	j	.Lsysc_return
304
305#
306# _TIF_SIGPENDING is set, call do_signal
307#
308.Lsysc_sigpending:
309	lgr	%r2,%r11		# pass pointer to pt_regs
310	brasl	%r14,do_signal
311	tm	__PT_FLAGS+7(%r11),_PIF_SYSCALL
312	jno	.Lsysc_return
313	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
314	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
315	lghi	%r8,0			# svc 0 returns -ENOSYS
316	llgh	%r1,__PT_INT_CODE+2(%r11)	# load new svc number
317	cghi	%r1,NR_syscalls
318	jnl	.Lsysc_nr_ok		# invalid svc number -> do svc 0
319	slag	%r8,%r1,2
320	j	.Lsysc_nr_ok		# restart svc
321
322#
323# _TIF_NOTIFY_RESUME is set, call do_notify_resume
324#
325.Lsysc_notify_resume:
326	lgr	%r2,%r11		# pass pointer to pt_regs
327	larl	%r14,.Lsysc_return
328	jg	do_notify_resume
329
330#
331# _TIF_UPROBE is set, call uprobe_notify_resume
332#
333#ifdef CONFIG_UPROBES
334.Lsysc_uprobe_notify:
335	lgr	%r2,%r11		# pass pointer to pt_regs
336	larl	%r14,.Lsysc_return
337	jg	uprobe_notify_resume
338#endif
339
340#
341# _PIF_PER_TRAP is set, call do_per_trap
342#
343.Lsysc_singlestep:
344	ni	__PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
345	lgr	%r2,%r11		# pass pointer to pt_regs
346	larl	%r14,.Lsysc_return
347	jg	do_per_trap
348
349#
350# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
351# and after the system call
352#
353.Lsysc_tracesys:
354	lgr	%r2,%r11		# pass pointer to pt_regs
355	la	%r3,0
356	llgh	%r0,__PT_INT_CODE+2(%r11)
357	stg	%r0,__PT_R2(%r11)
358	brasl	%r14,do_syscall_trace_enter
359	lghi	%r0,NR_syscalls
360	clgr	%r0,%r2
361	jnh	.Lsysc_tracenogo
362	sllg	%r8,%r2,2
363	lgf	%r9,0(%r8,%r10)
364.Lsysc_tracego:
365	lmg	%r3,%r7,__PT_R3(%r11)
366	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
367	lg	%r2,__PT_ORIG_GPR2(%r11)
368	basr	%r14,%r9		# call sys_xxx
369	stg	%r2,__PT_R2(%r11)	# store return value
370.Lsysc_tracenogo:
371	tm	__TI_flags+7(%r12),_TIF_TRACE
372	jz	.Lsysc_return
373	lgr	%r2,%r11		# pass pointer to pt_regs
374	larl	%r14,.Lsysc_return
375	jg	do_syscall_trace_exit
376
377#
378# a new process exits the kernel with ret_from_fork
379#
380ENTRY(ret_from_fork)
381	la	%r11,STACK_FRAME_OVERHEAD(%r15)
382	lg	%r12,__LC_THREAD_INFO
383	brasl	%r14,schedule_tail
384	TRACE_IRQS_ON
385	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
386	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
387	jne	.Lsysc_tracenogo
388	# it's a kernel thread
389	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
390ENTRY(kernel_thread_starter)
391	la	%r2,0(%r10)
392	basr	%r14,%r9
393	j	.Lsysc_tracenogo
394
395/*
396 * Program check handler routine
397 */
398
399ENTRY(pgm_check_handler)
400	stpt	__LC_SYNC_ENTER_TIMER
401	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
402	lg	%r10,__LC_LAST_BREAK
403	lg	%r12,__LC_THREAD_INFO
404	larl	%r13,system_call
405	lmg	%r8,%r9,__LC_PGM_OLD_PSW
406	HANDLE_SIE_INTERCEPT %r14,1
407	tmhh	%r8,0x0001		# test problem state bit
408	jnz	1f			# -> fault in user space
409	tmhh	%r8,0x4000		# PER bit set in old PSW ?
410	jnz	0f			# -> enabled, can't be a double fault
411	tm	__LC_PGM_ILC+3,0x80	# check for per exception
412	jnz	.Lpgm_svcper		# -> single stepped svc
4130:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
414	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
415	j	2f
4161:	UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
417	LAST_BREAK %r14
418	lg	%r15,__LC_KERNEL_STACK
419	lg	%r14,__TI_task(%r12)
420	lghi	%r13,__LC_PGM_TDB
421	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
422	jz	2f
423	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
4242:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
425	stmg	%r0,%r7,__PT_R0(%r11)
426	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
427	stmg	%r8,%r9,__PT_PSW(%r11)
428	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
429	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
430	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
431	stg	%r10,__PT_ARGS(%r11)
432	tm	__LC_PGM_ILC+3,0x80	# check for per exception
433	jz	0f
434	tmhh	%r8,0x0001		# kernel per event ?
435	jz	.Lpgm_kprobe
436	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
437	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
438	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
439	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
4400:	REENABLE_IRQS
441	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
442	larl	%r1,pgm_check_table
443	llgh	%r10,__PT_INT_CODE+2(%r11)
444	nill	%r10,0x007f
445	sll	%r10,2
446	je	.Lsysc_return
447	lgf	%r1,0(%r10,%r1)		# load address of handler routine
448	lgr	%r2,%r11		# pass pointer to pt_regs
449	basr	%r14,%r1		# branch to interrupt-handler
450	j	.Lsysc_return
451
452#
453# PER event in supervisor state, must be kprobes
454#
455.Lpgm_kprobe:
456	REENABLE_IRQS
457	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
458	lgr	%r2,%r11		# pass pointer to pt_regs
459	brasl	%r14,do_per_trap
460	j	.Lsysc_return
461
462#
463# single stepped system call
464#
465.Lpgm_svcper:
466	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
467	larl	%r14,.Lsysc_per
468	stg	%r14,__LC_RETURN_PSW+8
469	lghi	%r14,_PIF_SYSCALL | _PIF_PER_TRAP
470	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per and enable irqs
471
472/*
473 * IO interrupt handler routine
474 */
475ENTRY(io_int_handler)
476	STCK	__LC_INT_CLOCK
477	stpt	__LC_ASYNC_ENTER_TIMER
478	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
479	lg	%r10,__LC_LAST_BREAK
480	lg	%r12,__LC_THREAD_INFO
481	larl	%r13,system_call
482	lmg	%r8,%r9,__LC_IO_OLD_PSW
483	HANDLE_SIE_INTERCEPT %r14,2
484	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
485	tmhh	%r8,0x0001		# interrupting from user?
486	jz	.Lio_skip
487	UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
488	LAST_BREAK %r14
489.Lio_skip:
490	stmg	%r0,%r7,__PT_R0(%r11)
491	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
492	stmg	%r8,%r9,__PT_PSW(%r11)
493	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
494	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
495	TRACE_IRQS_OFF
496	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
497.Lio_loop:
498	lgr	%r2,%r11		# pass pointer to pt_regs
499	lghi	%r3,IO_INTERRUPT
500	tm	__PT_INT_CODE+8(%r11),0x80	# adapter interrupt ?
501	jz	.Lio_call
502	lghi	%r3,THIN_INTERRUPT
503.Lio_call:
504	brasl	%r14,do_IRQ
505	tm	__LC_MACHINE_FLAGS+6,0x10	# MACHINE_FLAG_LPAR
506	jz	.Lio_return
507	tpi	0
508	jz	.Lio_return
509	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
510	j	.Lio_loop
511.Lio_return:
512	LOCKDEP_SYS_EXIT
513	TRACE_IRQS_ON
514.Lio_tif:
515	tm	__TI_flags+7(%r12),_TIF_WORK
516	jnz	.Lio_work		# there is work to do (signals etc.)
517	tm	__LC_CPU_FLAGS+7,_CIF_WORK
518	jnz	.Lio_work
519.Lio_restore:
520	lg	%r14,__LC_VDSO_PER_CPU
521	lmg	%r0,%r10,__PT_R0(%r11)
522	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
523	stpt	__LC_EXIT_TIMER
524	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
525	lmg	%r11,%r15,__PT_R11(%r11)
526	lpswe	__LC_RETURN_PSW
527.Lio_done:
528
529#
530# There is work todo, find out in which context we have been interrupted:
531# 1) if we return to user space we can do all _TIF_WORK work
532# 2) if we return to kernel code and kvm is enabled check if we need to
533#    modify the psw to leave SIE
534# 3) if we return to kernel code and preemptive scheduling is enabled check
535#    the preemption counter and if it is zero call preempt_schedule_irq
536# Before any work can be done, a switch to the kernel stack is required.
537#
538.Lio_work:
539	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
540	jo	.Lio_work_user		# yes -> do resched & signal
541#ifdef CONFIG_PREEMPT
542	# check for preemptive scheduling
543	icm	%r0,15,__TI_precount(%r12)
544	jnz	.Lio_restore		# preemption is disabled
545	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
546	jno	.Lio_restore
547	# switch to kernel stack
548	lg	%r1,__PT_R15(%r11)
549	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
550	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
551	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
552	la	%r11,STACK_FRAME_OVERHEAD(%r1)
553	lgr	%r15,%r1
554	# TRACE_IRQS_ON already done at .Lio_return, call
555	# TRACE_IRQS_OFF to keep things symmetrical
556	TRACE_IRQS_OFF
557	brasl	%r14,preempt_schedule_irq
558	j	.Lio_return
559#else
560	j	.Lio_restore
561#endif
562
563#
564# Need to do work before returning to userspace, switch to kernel stack
565#
566.Lio_work_user:
567	lg	%r1,__LC_KERNEL_STACK
568	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
569	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
570	la	%r11,STACK_FRAME_OVERHEAD(%r1)
571	lgr	%r15,%r1
572
573#
574# One of the work bits is on. Find out which one.
575#
576.Lio_work_tif:
577	tm	__LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
578	jo	.Lio_mcck_pending
579	tm	__TI_flags+7(%r12),_TIF_NEED_RESCHED
580	jo	.Lio_reschedule
581	tm	__TI_flags+7(%r12),_TIF_SIGPENDING
582	jo	.Lio_sigpending
583	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
584	jo	.Lio_notify_resume
585	tm	__LC_CPU_FLAGS+7,_CIF_ASCE
586	jo	.Lio_uaccess
587	j	.Lio_return		# beware of critical section cleanup
588
589#
590# _CIF_MCCK_PENDING is set, call handler
591#
592.Lio_mcck_pending:
593	# TRACE_IRQS_ON already done at .Lio_return
594	brasl	%r14,s390_handle_mcck	# TIF bit will be cleared by handler
595	TRACE_IRQS_OFF
596	j	.Lio_return
597
598#
599# _CIF_ASCE is set, load user space asce
600#
601.Lio_uaccess:
602	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE
603	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
604	j	.Lio_return
605
606#
607# _TIF_NEED_RESCHED is set, call schedule
608#
609.Lio_reschedule:
610	# TRACE_IRQS_ON already done at .Lio_return
611	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
612	brasl	%r14,schedule		# call scheduler
613	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
614	TRACE_IRQS_OFF
615	j	.Lio_return
616
617#
618# _TIF_SIGPENDING or is set, call do_signal
619#
620.Lio_sigpending:
621	# TRACE_IRQS_ON already done at .Lio_return
622	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
623	lgr	%r2,%r11		# pass pointer to pt_regs
624	brasl	%r14,do_signal
625	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
626	TRACE_IRQS_OFF
627	j	.Lio_return
628
629#
630# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
631#
632.Lio_notify_resume:
633	# TRACE_IRQS_ON already done at .Lio_return
634	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
635	lgr	%r2,%r11		# pass pointer to pt_regs
636	brasl	%r14,do_notify_resume
637	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
638	TRACE_IRQS_OFF
639	j	.Lio_return
640
641/*
642 * External interrupt handler routine
643 */
644ENTRY(ext_int_handler)
645	STCK	__LC_INT_CLOCK
646	stpt	__LC_ASYNC_ENTER_TIMER
647	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
648	lg	%r10,__LC_LAST_BREAK
649	lg	%r12,__LC_THREAD_INFO
650	larl	%r13,system_call
651	lmg	%r8,%r9,__LC_EXT_OLD_PSW
652	HANDLE_SIE_INTERCEPT %r14,3
653	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
654	tmhh	%r8,0x0001		# interrupting from user ?
655	jz	.Lext_skip
656	UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
657	LAST_BREAK %r14
658.Lext_skip:
659	stmg	%r0,%r7,__PT_R0(%r11)
660	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
661	stmg	%r8,%r9,__PT_PSW(%r11)
662	lghi	%r1,__LC_EXT_PARAMS2
663	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
664	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
665	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
666	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
667	TRACE_IRQS_OFF
668	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
669	lgr	%r2,%r11		# pass pointer to pt_regs
670	lghi	%r3,EXT_INTERRUPT
671	brasl	%r14,do_IRQ
672	j	.Lio_return
673
674/*
675 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
676 */
677ENTRY(psw_idle)
678	stg	%r3,__SF_EMPTY(%r15)
679	larl	%r1,.Lpsw_idle_lpsw+4
680	stg	%r1,__SF_EMPTY+8(%r15)
681	STCK	__CLOCK_IDLE_ENTER(%r2)
682	stpt	__TIMER_IDLE_ENTER(%r2)
683.Lpsw_idle_lpsw:
684	lpswe	__SF_EMPTY(%r15)
685	br	%r14
686.Lpsw_idle_end:
687
688.L__critical_end:
689
690/*
691 * Machine check handler routines
692 */
693ENTRY(mcck_int_handler)
694	STCK	__LC_MCCK_CLOCK
695	la	%r1,4095		# revalidate r1
696	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
697	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
698	lg	%r10,__LC_LAST_BREAK
699	lg	%r12,__LC_THREAD_INFO
700	larl	%r13,system_call
701	lmg	%r8,%r9,__LC_MCK_OLD_PSW
702	HANDLE_SIE_INTERCEPT %r14,4
703	tm	__LC_MCCK_CODE,0x80	# system damage?
704	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
705	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
706	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
707	tm	__LC_MCCK_CODE+5,0x02	# stored cpu timer value valid?
708	jo	3f
709	la	%r14,__LC_SYNC_ENTER_TIMER
710	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
711	jl	0f
712	la	%r14,__LC_ASYNC_ENTER_TIMER
7130:	clc	0(8,%r14),__LC_EXIT_TIMER
714	jl	1f
715	la	%r14,__LC_EXIT_TIMER
7161:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
717	jl	2f
718	la	%r14,__LC_LAST_UPDATE_TIMER
7192:	spt	0(%r14)
720	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
7213:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
722	jno	.Lmcck_panic		# no -> skip cleanup critical
723	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
724	tm	%r8,0x0001		# interrupting from user ?
725	jz	.Lmcck_skip
726	UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
727	LAST_BREAK %r14
728.Lmcck_skip:
729	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
730	stmg	%r0,%r7,__PT_R0(%r11)
731	mvc	__PT_R8(64,%r11),0(%r14)
732	stmg	%r8,%r9,__PT_PSW(%r11)
733	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
734	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
735	lgr	%r2,%r11		# pass pointer to pt_regs
736	brasl	%r14,s390_do_machine_check
737	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
738	jno	.Lmcck_return
739	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
740	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
741	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
742	la	%r11,STACK_FRAME_OVERHEAD(%r1)
743	lgr	%r15,%r1
744	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
745	tm	__LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
746	jno	.Lmcck_return
747	TRACE_IRQS_OFF
748	brasl	%r14,s390_handle_mcck
749	TRACE_IRQS_ON
750.Lmcck_return:
751	lg	%r14,__LC_VDSO_PER_CPU
752	lmg	%r0,%r10,__PT_R0(%r11)
753	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
754	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
755	jno	0f
756	stpt	__LC_EXIT_TIMER
757	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
7580:	lmg	%r11,%r15,__PT_R11(%r11)
759	lpswe	__LC_RETURN_MCCK_PSW
760
761.Lmcck_panic:
762	lg	%r14,__LC_PANIC_STACK
763	slgr	%r14,%r15
764	srag	%r14,%r14,PAGE_SHIFT
765	jz	0f
766	lg	%r15,__LC_PANIC_STACK
7670:	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
768	j	.Lmcck_skip
769
770#
771# PSW restart interrupt handler
772#
773ENTRY(restart_int_handler)
774	stg	%r15,__LC_SAVE_AREA_RESTART
775	lg	%r15,__LC_RESTART_STACK
776	aghi	%r15,-__PT_SIZE			# create pt_regs on stack
777	xc	0(__PT_SIZE,%r15),0(%r15)
778	stmg	%r0,%r14,__PT_R0(%r15)
779	mvc	__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
780	mvc	__PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
781	aghi	%r15,-STACK_FRAME_OVERHEAD	# create stack frame on stack
782	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
783	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
784	lg	%r2,__LC_RESTART_DATA
785	lg	%r3,__LC_RESTART_SOURCE
786	ltgr	%r3,%r3				# test source cpu address
787	jm	1f				# negative -> skip source stop
7880:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
789	brc	10,0b				# wait for status stored
7901:	basr	%r14,%r1			# call function
791	stap	__SF_EMPTY(%r15)		# store cpu address
792	llgh	%r3,__SF_EMPTY(%r15)
7932:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
794	brc	2,2b
7953:	j	3b
796
797	.section .kprobes.text, "ax"
798
799#ifdef CONFIG_CHECK_STACK
800/*
801 * The synchronous or the asynchronous stack overflowed. We are dead.
802 * No need to properly save the registers, we are going to panic anyway.
803 * Setup a pt_regs so that show_trace can provide a good call trace.
804 */
805stack_overflow:
806	lg	%r15,__LC_PANIC_STACK	# change to panic stack
807	la	%r11,STACK_FRAME_OVERHEAD(%r15)
808	stmg	%r0,%r7,__PT_R0(%r11)
809	stmg	%r8,%r9,__PT_PSW(%r11)
810	mvc	__PT_R8(64,%r11),0(%r14)
811	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
812	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
813	lgr	%r2,%r11		# pass pointer to pt_regs
814	jg	kernel_stack_overflow
815#endif
816
817	.align	8
818.Lcleanup_table:
819	.quad	system_call
820	.quad	.Lsysc_do_svc
821	.quad	.Lsysc_tif
822	.quad	.Lsysc_restore
823	.quad	.Lsysc_done
824	.quad	.Lio_tif
825	.quad	.Lio_restore
826	.quad	.Lio_done
827	.quad	psw_idle
828	.quad	.Lpsw_idle_end
829
830cleanup_critical:
831	clg	%r9,BASED(.Lcleanup_table)	# system_call
832	jl	0f
833	clg	%r9,BASED(.Lcleanup_table+8)	# .Lsysc_do_svc
834	jl	.Lcleanup_system_call
835	clg	%r9,BASED(.Lcleanup_table+16)	# .Lsysc_tif
836	jl	0f
837	clg	%r9,BASED(.Lcleanup_table+24)	# .Lsysc_restore
838	jl	.Lcleanup_sysc_tif
839	clg	%r9,BASED(.Lcleanup_table+32)	# .Lsysc_done
840	jl	.Lcleanup_sysc_restore
841	clg	%r9,BASED(.Lcleanup_table+40)	# .Lio_tif
842	jl	0f
843	clg	%r9,BASED(.Lcleanup_table+48)	# .Lio_restore
844	jl	.Lcleanup_io_tif
845	clg	%r9,BASED(.Lcleanup_table+56)	# .Lio_done
846	jl	.Lcleanup_io_restore
847	clg	%r9,BASED(.Lcleanup_table+64)	# psw_idle
848	jl	0f
849	clg	%r9,BASED(.Lcleanup_table+72)	# .Lpsw_idle_end
850	jl	.Lcleanup_idle
8510:	br	%r14
852
853
854.Lcleanup_system_call:
855	# check if stpt has been executed
856	clg	%r9,BASED(.Lcleanup_system_call_insn)
857	jh	0f
858	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
859	cghi	%r11,__LC_SAVE_AREA_ASYNC
860	je	0f
861	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
8620:	# check if stmg has been executed
863	clg	%r9,BASED(.Lcleanup_system_call_insn+8)
864	jh	0f
865	mvc	__LC_SAVE_AREA_SYNC(64),0(%r11)
8660:	# check if base register setup + TIF bit load has been done
867	clg	%r9,BASED(.Lcleanup_system_call_insn+16)
868	jhe	0f
869	# set up saved registers r10 and r12
870	stg	%r10,16(%r11)		# r10 last break
871	stg	%r12,32(%r11)		# r12 thread-info pointer
8720:	# check if the user time update has been done
873	clg	%r9,BASED(.Lcleanup_system_call_insn+24)
874	jh	0f
875	lg	%r15,__LC_EXIT_TIMER
876	slg	%r15,__LC_SYNC_ENTER_TIMER
877	alg	%r15,__LC_USER_TIMER
878	stg	%r15,__LC_USER_TIMER
8790:	# check if the system time update has been done
880	clg	%r9,BASED(.Lcleanup_system_call_insn+32)
881	jh	0f
882	lg	%r15,__LC_LAST_UPDATE_TIMER
883	slg	%r15,__LC_EXIT_TIMER
884	alg	%r15,__LC_SYSTEM_TIMER
885	stg	%r15,__LC_SYSTEM_TIMER
8860:	# update accounting time stamp
887	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
888	# do LAST_BREAK
889	lg	%r9,16(%r11)
890	srag	%r9,%r9,23
891	jz	0f
892	mvc	__TI_last_break(8,%r12),16(%r11)
8930:	# set up saved register r11
894	lg	%r15,__LC_KERNEL_STACK
895	la	%r9,STACK_FRAME_OVERHEAD(%r15)
896	stg	%r9,24(%r11)		# r11 pt_regs pointer
897	# fill pt_regs
898	mvc	__PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
899	stmg	%r0,%r7,__PT_R0(%r9)
900	mvc	__PT_PSW(16,%r9),__LC_SVC_OLD_PSW
901	mvc	__PT_INT_CODE(4,%r9),__LC_SVC_ILC
902	xc	__PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
903	mvi	__PT_FLAGS+7(%r9),_PIF_SYSCALL
904	# setup saved register r15
905	stg	%r15,56(%r11)		# r15 stack pointer
906	# set new psw address and exit
907	larl	%r9,.Lsysc_do_svc
908	br	%r14
909.Lcleanup_system_call_insn:
910	.quad	system_call
911	.quad	.Lsysc_stmg
912	.quad	.Lsysc_per
913	.quad	.Lsysc_vtime+18
914	.quad	.Lsysc_vtime+42
915
916.Lcleanup_sysc_tif:
917	larl	%r9,.Lsysc_tif
918	br	%r14
919
920.Lcleanup_sysc_restore:
921	clg	%r9,BASED(.Lcleanup_sysc_restore_insn)
922	je	0f
923	lg	%r9,24(%r11)		# get saved pointer to pt_regs
924	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
925	mvc	0(64,%r11),__PT_R8(%r9)
926	lmg	%r0,%r7,__PT_R0(%r9)
9270:	lmg	%r8,%r9,__LC_RETURN_PSW
928	br	%r14
929.Lcleanup_sysc_restore_insn:
930	.quad	.Lsysc_done - 4
931
932.Lcleanup_io_tif:
933	larl	%r9,.Lio_tif
934	br	%r14
935
936.Lcleanup_io_restore:
937	clg	%r9,BASED(.Lcleanup_io_restore_insn)
938	je	0f
939	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
940	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
941	mvc	0(64,%r11),__PT_R8(%r9)
942	lmg	%r0,%r7,__PT_R0(%r9)
9430:	lmg	%r8,%r9,__LC_RETURN_PSW
944	br	%r14
945.Lcleanup_io_restore_insn:
946	.quad	.Lio_done - 4
947
948.Lcleanup_idle:
949	# copy interrupt clock & cpu timer
950	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
951	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
952	cghi	%r11,__LC_SAVE_AREA_ASYNC
953	je	0f
954	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
955	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
9560:	# check if stck & stpt have been executed
957	clg	%r9,BASED(.Lcleanup_idle_insn)
958	jhe	1f
959	mvc	__CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
960	mvc	__TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
9611:	# account system time going idle
962	lg	%r9,__LC_STEAL_TIMER
963	alg	%r9,__CLOCK_IDLE_ENTER(%r2)
964	slg	%r9,__LC_LAST_UPDATE_CLOCK
965	stg	%r9,__LC_STEAL_TIMER
966	mvc	__LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
967	lg	%r9,__LC_SYSTEM_TIMER
968	alg	%r9,__LC_LAST_UPDATE_TIMER
969	slg	%r9,__TIMER_IDLE_ENTER(%r2)
970	stg	%r9,__LC_SYSTEM_TIMER
971	mvc	__LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
972	# prepare return psw
973	nihh	%r8,0xfcfd		# clear irq & wait state bits
974	lg	%r9,48(%r11)		# return from psw_idle
975	br	%r14
976.Lcleanup_idle_insn:
977	.quad	.Lpsw_idle_lpsw
978
979/*
980 * Integer constants
981 */
982	.align	8
983.Lcritical_start:
984	.quad	.L__critical_start
985.Lcritical_length:
986	.quad	.L__critical_end - .L__critical_start
987
988
989#if IS_ENABLED(CONFIG_KVM)
990/*
991 * sie64a calling convention:
992 * %r2 pointer to sie control block
993 * %r3 guest register save area
994 */
995ENTRY(sie64a)
996	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
997	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
998	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
999	xc	__SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
1000	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
1001	lg	%r14,__LC_GMAP			# get gmap pointer
1002	ltgr	%r14,%r14
1003	jz	.Lsie_gmap
1004	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
1005.Lsie_gmap:
1006	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
1007	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
1008	tm	__SIE_PROG20+3(%r14),1		# last exit...
1009	jnz	.Lsie_done
1010	LPP	__SF_EMPTY(%r15)		# set guest id
1011	sie	0(%r14)
1012.Lsie_done:
1013	LPP	__SF_EMPTY+16(%r15)		# set host id
1014	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
1015	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
1016# some program checks are suppressing. C code (e.g. do_protection_exception)
1017# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
1018# instructions between sie64a and .Lsie_done should not cause program
1019# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
1020# See also HANDLE_SIE_INTERCEPT
1021.Lrewind_pad:
1022	nop	0
1023	.globl sie_exit
1024sie_exit:
1025	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
1026	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
1027	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
1028	lg	%r2,__SF_EMPTY+24(%r15)		# return exit reason code
1029	br	%r14
1030.Lsie_fault:
1031	lghi	%r14,-EFAULT
1032	stg	%r14,__SF_EMPTY+24(%r15)	# set exit reason code
1033	j	sie_exit
1034
1035	.align	8
1036.Lsie_critical:
1037	.quad	.Lsie_gmap
1038.Lsie_critical_length:
1039	.quad	.Lsie_done - .Lsie_gmap
1040
1041	EX_TABLE(.Lrewind_pad,.Lsie_fault)
1042	EX_TABLE(sie_exit,.Lsie_fault)
1043#endif
1044
1045	.section .rodata, "a"
1046#define SYSCALL(esame,emu)	.long esame
1047	.globl	sys_call_table
1048sys_call_table:
1049#include "syscalls.S"
1050#undef SYSCALL
1051
1052#ifdef CONFIG_COMPAT
1053
1054#define SYSCALL(esame,emu)	.long emu
1055	.globl	sys_call_table_emu
1056sys_call_table_emu:
1057#include "syscalls.S"
1058#undef SYSCALL
1059#endif
1060