xref: /openbmc/linux/arch/s390/kernel/entry.S (revision 367e5927)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/ctl_reg.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/vx-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/export.h>
31#include <asm/nospec-insn.h>
32
33__PT_R0      =	__PT_GPRS
34__PT_R1      =	__PT_GPRS + 8
35__PT_R2      =	__PT_GPRS + 16
36__PT_R3      =	__PT_GPRS + 24
37__PT_R4      =	__PT_GPRS + 32
38__PT_R5      =	__PT_GPRS + 40
39__PT_R6      =	__PT_GPRS + 48
40__PT_R7      =	__PT_GPRS + 56
41__PT_R8      =	__PT_GPRS + 64
42__PT_R9      =	__PT_GPRS + 72
43__PT_R10     =	__PT_GPRS + 80
44__PT_R11     =	__PT_GPRS + 88
45__PT_R12     =	__PT_GPRS + 96
46__PT_R13     =	__PT_GPRS + 104
47__PT_R14     =	__PT_GPRS + 112
48__PT_R15     =	__PT_GPRS + 120
49
50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
51STACK_SIZE  = 1 << STACK_SHIFT
52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
53
54_TIF_WORK	= (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
55		   _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
56_TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
57		   _TIF_SYSCALL_TRACEPOINT)
58_CIF_WORK	= (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
59		   _CIF_ASCE_SECONDARY | _CIF_FPU)
60_PIF_WORK	= (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
61
62_LPP_OFFSET	= __LC_LPP
63
64#define BASED(name) name-cleanup_critical(%r13)
65
66	.macro	TRACE_IRQS_ON
67#ifdef CONFIG_TRACE_IRQFLAGS
68	basr	%r2,%r0
69	brasl	%r14,trace_hardirqs_on_caller
70#endif
71	.endm
72
73	.macro	TRACE_IRQS_OFF
74#ifdef CONFIG_TRACE_IRQFLAGS
75	basr	%r2,%r0
76	brasl	%r14,trace_hardirqs_off_caller
77#endif
78	.endm
79
80	.macro	LOCKDEP_SYS_EXIT
81#ifdef CONFIG_LOCKDEP
82	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
83	jz	.+10
84	brasl	%r14,lockdep_sys_exit
85#endif
86	.endm
87
88	.macro	CHECK_STACK savearea
89#ifdef CONFIG_CHECK_STACK
90	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
91	lghi	%r14,\savearea
92	jz	stack_overflow
93#endif
94	.endm
95
96	.macro	CHECK_VMAP_STACK savearea,oklabel
97#ifdef CONFIG_VMAP_STACK
98	lgr	%r14,%r15
99	nill	%r14,0x10000 - STACK_SIZE
100	oill	%r14,STACK_INIT
101	clg	%r14,__LC_KERNEL_STACK
102	je	\oklabel
103	clg	%r14,__LC_ASYNC_STACK
104	je	\oklabel
105	clg	%r14,__LC_NODAT_STACK
106	je	\oklabel
107	clg	%r14,__LC_RESTART_STACK
108	je	\oklabel
109	lghi	%r14,\savearea
110	j	stack_overflow
111#else
112	j	\oklabel
113#endif
114	.endm
115
116	.macro	SWITCH_ASYNC savearea,timer
117	tmhh	%r8,0x0001		# interrupting from user ?
118	jnz	1f
119	lgr	%r14,%r9
120	slg	%r14,BASED(.Lcritical_start)
121	clg	%r14,BASED(.Lcritical_length)
122	jhe	0f
123	lghi	%r11,\savearea		# inside critical section, do cleanup
124	brasl	%r14,cleanup_critical
125	tmhh	%r8,0x0001		# retest problem state after cleanup
126	jnz	1f
1270:	lg	%r14,__LC_ASYNC_STACK	# are we already on the target stack?
128	slgr	%r14,%r15
129	srag	%r14,%r14,STACK_SHIFT
130	jnz	2f
131	CHECK_STACK \savearea
132	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
133	j	3f
1341:	UPDATE_VTIME %r14,%r15,\timer
135	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1362:	lg	%r15,__LC_ASYNC_STACK	# load async stack
1373:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
138	.endm
139
140	.macro UPDATE_VTIME w1,w2,enter_timer
141	lg	\w1,__LC_EXIT_TIMER
142	lg	\w2,__LC_LAST_UPDATE_TIMER
143	slg	\w1,\enter_timer
144	slg	\w2,__LC_EXIT_TIMER
145	alg	\w1,__LC_USER_TIMER
146	alg	\w2,__LC_SYSTEM_TIMER
147	stg	\w1,__LC_USER_TIMER
148	stg	\w2,__LC_SYSTEM_TIMER
149	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
150	.endm
151
152	.macro REENABLE_IRQS
153	stg	%r8,__LC_RETURN_PSW
154	ni	__LC_RETURN_PSW,0xbf
155	ssm	__LC_RETURN_PSW
156	.endm
157
158	.macro STCK savearea
159#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
160	.insn	s,0xb27c0000,\savearea		# store clock fast
161#else
162	.insn	s,0xb2050000,\savearea		# store clock
163#endif
164	.endm
165
166	/*
167	 * The TSTMSK macro generates a test-under-mask instruction by
168	 * calculating the memory offset for the specified mask value.
169	 * Mask value can be any constant.  The macro shifts the mask
170	 * value to calculate the memory offset for the test-under-mask
171	 * instruction.
172	 */
173	.macro TSTMSK addr, mask, size=8, bytepos=0
174		.if (\bytepos < \size) && (\mask >> 8)
175			.if (\mask & 0xff)
176				.error "Mask exceeds byte boundary"
177			.endif
178			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
179			.exitm
180		.endif
181		.ifeq \mask
182			.error "Mask must not be zero"
183		.endif
184		off = \size - \bytepos - 1
185		tm	off+\addr, \mask
186	.endm
187
188	.macro BPOFF
189	ALTERNATIVE "", ".long 0xb2e8c000", 82
190	.endm
191
192	.macro BPON
193	ALTERNATIVE "", ".long 0xb2e8d000", 82
194	.endm
195
196	.macro BPENTER tif_ptr,tif_mask
197	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
198		    "", 82
199	.endm
200
201	.macro BPEXIT tif_ptr,tif_mask
202	TSTMSK	\tif_ptr,\tif_mask
203	ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
204		    "jnz .+8; .long 0xb2e8d000", 82
205	.endm
206
207	GEN_BR_THUNK %r9
208	GEN_BR_THUNK %r14
209	GEN_BR_THUNK %r14,%r11
210
211	.section .kprobes.text, "ax"
212.Ldummy:
213	/*
214	 * This nop exists only in order to avoid that __switch_to starts at
215	 * the beginning of the kprobes text section. In that case we would
216	 * have several symbols at the same address. E.g. objdump would take
217	 * an arbitrary symbol name when disassembling this code.
218	 * With the added nop in between the __switch_to symbol is unique
219	 * again.
220	 */
221	nop	0
222
223ENTRY(__bpon)
224	.globl __bpon
225	BPON
226	BR_EX	%r14
227
228/*
229 * Scheduler resume function, called by switch_to
230 *  gpr2 = (task_struct *) prev
231 *  gpr3 = (task_struct *) next
232 * Returns:
233 *  gpr2 = prev
234 */
235ENTRY(__switch_to)
236	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
237	lghi	%r4,__TASK_stack
238	lghi	%r1,__TASK_thread
239	llill	%r5,STACK_INIT
240	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
241	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
242	agr	%r15,%r5			# end of kernel stack of next
243	stg	%r3,__LC_CURRENT		# store task struct of next
244	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
245	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
246	aghi	%r3,__TASK_pid
247	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
248	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
249	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
250	BR_EX	%r14
251
252.L__critical_start:
253
254#if IS_ENABLED(CONFIG_KVM)
255/*
256 * sie64a calling convention:
257 * %r2 pointer to sie control block
258 * %r3 guest register save area
259 */
260ENTRY(sie64a)
261	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
262	lg	%r12,__LC_CURRENT
263	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
264	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
265	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
266	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
267	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU		# load guest fp/vx registers ?
268	jno	.Lsie_load_guest_gprs
269	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
270.Lsie_load_guest_gprs:
271	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
272	lg	%r14,__LC_GMAP			# get gmap pointer
273	ltgr	%r14,%r14
274	jz	.Lsie_gmap
275	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
276.Lsie_gmap:
277	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
278	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
279	tm	__SIE_PROG20+3(%r14),3		# last exit...
280	jnz	.Lsie_skip
281	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
282	jo	.Lsie_skip			# exit if fp/vx regs changed
283	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
284.Lsie_entry:
285	sie	0(%r14)
286.Lsie_exit:
287	BPOFF
288	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
289.Lsie_skip:
290	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
291	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
292.Lsie_done:
293# some program checks are suppressing. C code (e.g. do_protection_exception)
294# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
295# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
296# Other instructions between sie64a and .Lsie_done should not cause program
297# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
298# See also .Lcleanup_sie
299.Lrewind_pad6:
300	nopr	7
301.Lrewind_pad4:
302	nopr	7
303.Lrewind_pad2:
304	nopr	7
305	.globl sie_exit
306sie_exit:
307	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
308	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
309	xgr	%r0,%r0				# clear guest registers to
310	xgr	%r1,%r1				# prevent speculative use
311	xgr	%r2,%r2
312	xgr	%r3,%r3
313	xgr	%r4,%r4
314	xgr	%r5,%r5
315	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
316	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
317	BR_EX	%r14
318.Lsie_fault:
319	lghi	%r14,-EFAULT
320	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
321	j	sie_exit
322
323	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
324	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
325	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
326	EX_TABLE(sie_exit,.Lsie_fault)
327EXPORT_SYMBOL(sie64a)
328EXPORT_SYMBOL(sie_exit)
329#endif
330
331/*
332 * SVC interrupt handler routine. System calls are synchronous events and
333 * are executed with interrupts enabled.
334 */
335
336ENTRY(system_call)
337	stpt	__LC_SYNC_ENTER_TIMER
338.Lsysc_stmg:
339	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
340	BPOFF
341	lg	%r12,__LC_CURRENT
342	lghi	%r13,__TASK_thread
343	lghi	%r14,_PIF_SYSCALL
344.Lsysc_per:
345	lg	%r15,__LC_KERNEL_STACK
346	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
347.Lsysc_vtime:
348	UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
349	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
350	stmg	%r0,%r7,__PT_R0(%r11)
351	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
352	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
353	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
354	stg	%r14,__PT_FLAGS(%r11)
355.Lsysc_do_svc:
356	# clear user controlled register to prevent speculative use
357	xgr	%r0,%r0
358	# load address of system call table
359	lg	%r10,__THREAD_sysc_table(%r13,%r12)
360	llgh	%r8,__PT_INT_CODE+2(%r11)
361	slag	%r8,%r8,2			# shift and test for svc 0
362	jnz	.Lsysc_nr_ok
363	# svc 0: system call number in %r1
364	llgfr	%r1,%r1				# clear high word in r1
365	cghi	%r1,NR_syscalls
366	jnl	.Lsysc_nr_ok
367	sth	%r1,__PT_INT_CODE+2(%r11)
368	slag	%r8,%r1,2
369.Lsysc_nr_ok:
370	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
371	stg	%r2,__PT_ORIG_GPR2(%r11)
372	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
373	lgf	%r9,0(%r8,%r10)			# get system call add.
374	TSTMSK	__TI_flags(%r12),_TIF_TRACE
375	jnz	.Lsysc_tracesys
376	BASR_EX	%r14,%r9			# call sys_xxxx
377	stg	%r2,__PT_R2(%r11)		# store return value
378
379.Lsysc_return:
380#ifdef CONFIG_DEBUG_RSEQ
381	lgr	%r2,%r11
382	brasl	%r14,rseq_syscall
383#endif
384	LOCKDEP_SYS_EXIT
385.Lsysc_tif:
386	TSTMSK	__PT_FLAGS(%r11),_PIF_WORK
387	jnz	.Lsysc_work
388	TSTMSK	__TI_flags(%r12),_TIF_WORK
389	jnz	.Lsysc_work			# check for work
390	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
391	jnz	.Lsysc_work
392	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
393.Lsysc_restore:
394	lg	%r14,__LC_VDSO_PER_CPU
395	lmg	%r0,%r10,__PT_R0(%r11)
396	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
397.Lsysc_exit_timer:
398	stpt	__LC_EXIT_TIMER
399	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
400	lmg	%r11,%r15,__PT_R11(%r11)
401	lpswe	__LC_RETURN_PSW
402.Lsysc_done:
403
404#
405# One of the work bits is on. Find out which one.
406#
407.Lsysc_work:
408	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
409	jo	.Lsysc_mcck_pending
410	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
411	jo	.Lsysc_reschedule
412	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
413	jo	.Lsysc_syscall_restart
414#ifdef CONFIG_UPROBES
415	TSTMSK	__TI_flags(%r12),_TIF_UPROBE
416	jo	.Lsysc_uprobe_notify
417#endif
418	TSTMSK	__TI_flags(%r12),_TIF_GUARDED_STORAGE
419	jo	.Lsysc_guarded_storage
420	TSTMSK	__PT_FLAGS(%r11),_PIF_PER_TRAP
421	jo	.Lsysc_singlestep
422#ifdef CONFIG_LIVEPATCH
423	TSTMSK	__TI_flags(%r12),_TIF_PATCH_PENDING
424	jo	.Lsysc_patch_pending	# handle live patching just before
425					# signals and possible syscall restart
426#endif
427	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
428	jo	.Lsysc_syscall_restart
429	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
430	jo	.Lsysc_sigpending
431	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
432	jo	.Lsysc_notify_resume
433	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
434	jo	.Lsysc_vxrs
435	TSTMSK	__LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
436	jnz	.Lsysc_asce
437	j	.Lsysc_return		# beware of critical section cleanup
438
439#
440# _TIF_NEED_RESCHED is set, call schedule
441#
442.Lsysc_reschedule:
443	larl	%r14,.Lsysc_return
444	jg	schedule
445
446#
447# _CIF_MCCK_PENDING is set, call handler
448#
449.Lsysc_mcck_pending:
450	larl	%r14,.Lsysc_return
451	jg	s390_handle_mcck	# TIF bit will be cleared by handler
452
453#
454# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
455#
456.Lsysc_asce:
457	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
458	lctlg	%c7,%c7,__LC_VDSO_ASCE		# load secondary asce
459	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
460	jz	.Lsysc_return
461#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
462	tm	__LC_STFLE_FAC_LIST+3,0x10	# has MVCOS ?
463	jnz	.Lsysc_set_fs_fixup
464	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
465	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
466	j	.Lsysc_return
467.Lsysc_set_fs_fixup:
468#endif
469	larl	%r14,.Lsysc_return
470	jg	set_fs_fixup
471
472#
473# CIF_FPU is set, restore floating-point controls and floating-point registers.
474#
475.Lsysc_vxrs:
476	larl	%r14,.Lsysc_return
477	jg	load_fpu_regs
478
479#
480# _TIF_SIGPENDING is set, call do_signal
481#
482.Lsysc_sigpending:
483	lgr	%r2,%r11		# pass pointer to pt_regs
484	brasl	%r14,do_signal
485	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
486	jno	.Lsysc_return
487.Lsysc_do_syscall:
488	lghi	%r13,__TASK_thread
489	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
490	lghi	%r1,0			# svc 0 returns -ENOSYS
491	j	.Lsysc_do_svc
492
493#
494# _TIF_NOTIFY_RESUME is set, call do_notify_resume
495#
496.Lsysc_notify_resume:
497	lgr	%r2,%r11		# pass pointer to pt_regs
498	larl	%r14,.Lsysc_return
499	jg	do_notify_resume
500
501#
502# _TIF_UPROBE is set, call uprobe_notify_resume
503#
504#ifdef CONFIG_UPROBES
505.Lsysc_uprobe_notify:
506	lgr	%r2,%r11		# pass pointer to pt_regs
507	larl	%r14,.Lsysc_return
508	jg	uprobe_notify_resume
509#endif
510
511#
512# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
513#
514.Lsysc_guarded_storage:
515	lgr	%r2,%r11		# pass pointer to pt_regs
516	larl	%r14,.Lsysc_return
517	jg	gs_load_bc_cb
518#
519# _TIF_PATCH_PENDING is set, call klp_update_patch_state
520#
521#ifdef CONFIG_LIVEPATCH
522.Lsysc_patch_pending:
523	lg	%r2,__LC_CURRENT	# pass pointer to task struct
524	larl	%r14,.Lsysc_return
525	jg	klp_update_patch_state
526#endif
527
528#
529# _PIF_PER_TRAP is set, call do_per_trap
530#
531.Lsysc_singlestep:
532	ni	__PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
533	lgr	%r2,%r11		# pass pointer to pt_regs
534	larl	%r14,.Lsysc_return
535	jg	do_per_trap
536
537#
538# _PIF_SYSCALL_RESTART is set, repeat the current system call
539#
540.Lsysc_syscall_restart:
541	ni	__PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
542	lmg	%r1,%r7,__PT_R1(%r11)	# load svc arguments
543	lg	%r2,__PT_ORIG_GPR2(%r11)
544	j	.Lsysc_do_svc
545
546#
547# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
548# and after the system call
549#
550.Lsysc_tracesys:
551	lgr	%r2,%r11		# pass pointer to pt_regs
552	la	%r3,0
553	llgh	%r0,__PT_INT_CODE+2(%r11)
554	stg	%r0,__PT_R2(%r11)
555	brasl	%r14,do_syscall_trace_enter
556	lghi	%r0,NR_syscalls
557	clgr	%r0,%r2
558	jnh	.Lsysc_tracenogo
559	sllg	%r8,%r2,2
560	lgf	%r9,0(%r8,%r10)
561.Lsysc_tracego:
562	lmg	%r3,%r7,__PT_R3(%r11)
563	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
564	lg	%r2,__PT_ORIG_GPR2(%r11)
565	BASR_EX	%r14,%r9		# call sys_xxx
566	stg	%r2,__PT_R2(%r11)	# store return value
567.Lsysc_tracenogo:
568	TSTMSK	__TI_flags(%r12),_TIF_TRACE
569	jz	.Lsysc_return
570	lgr	%r2,%r11		# pass pointer to pt_regs
571	larl	%r14,.Lsysc_return
572	jg	do_syscall_trace_exit
573
574#
575# a new process exits the kernel with ret_from_fork
576#
577ENTRY(ret_from_fork)
578	la	%r11,STACK_FRAME_OVERHEAD(%r15)
579	lg	%r12,__LC_CURRENT
580	brasl	%r14,schedule_tail
581	TRACE_IRQS_ON
582	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
583	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
584	jne	.Lsysc_tracenogo
585	# it's a kernel thread
586	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
587ENTRY(kernel_thread_starter)
588	la	%r2,0(%r10)
589	BASR_EX	%r14,%r9
590	j	.Lsysc_tracenogo
591
592/*
593 * Program check handler routine
594 */
595
596ENTRY(pgm_check_handler)
597	stpt	__LC_SYNC_ENTER_TIMER
598	BPOFF
599	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
600	lg	%r10,__LC_LAST_BREAK
601	lg	%r12,__LC_CURRENT
602	lghi	%r11,0
603	larl	%r13,cleanup_critical
604	lmg	%r8,%r9,__LC_PGM_OLD_PSW
605	tmhh	%r8,0x0001		# test problem state bit
606	jnz	2f			# -> fault in user space
607#if IS_ENABLED(CONFIG_KVM)
608	# cleanup critical section for program checks in sie64a
609	lgr	%r14,%r9
610	slg	%r14,BASED(.Lsie_critical_start)
611	clg	%r14,BASED(.Lsie_critical_length)
612	jhe	0f
613	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
614	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
615	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
616	larl	%r9,sie_exit			# skip forward to sie_exit
617	lghi	%r11,_PIF_GUEST_FAULT
618#endif
6190:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
620	jnz	1f			# -> enabled, can't be a double fault
621	tm	__LC_PGM_ILC+3,0x80	# check for per exception
622	jnz	.Lpgm_svcper		# -> single stepped svc
6231:	CHECK_STACK __LC_SAVE_AREA_SYNC
624	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
625	# CHECK_VMAP_STACK branches to stack_overflow or 4f
626	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
6272:	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
628	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
629	lg	%r15,__LC_KERNEL_STACK
630	lgr	%r14,%r12
631	aghi	%r14,__TASK_thread	# pointer to thread_struct
632	lghi	%r13,__LC_PGM_TDB
633	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
634	jz	3f
635	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
6363:	stg	%r10,__THREAD_last_break(%r14)
6374:	lgr	%r13,%r11
638	la	%r11,STACK_FRAME_OVERHEAD(%r15)
639	stmg	%r0,%r7,__PT_R0(%r11)
640	# clear user controlled registers to prevent speculative use
641	xgr	%r0,%r0
642	xgr	%r1,%r1
643	xgr	%r2,%r2
644	xgr	%r3,%r3
645	xgr	%r4,%r4
646	xgr	%r5,%r5
647	xgr	%r6,%r6
648	xgr	%r7,%r7
649	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
650	stmg	%r8,%r9,__PT_PSW(%r11)
651	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
652	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
653	stg	%r13,__PT_FLAGS(%r11)
654	stg	%r10,__PT_ARGS(%r11)
655	tm	__LC_PGM_ILC+3,0x80	# check for per exception
656	jz	5f
657	tmhh	%r8,0x0001		# kernel per event ?
658	jz	.Lpgm_kprobe
659	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
660	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
661	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
662	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
6635:	REENABLE_IRQS
664	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
665	larl	%r1,pgm_check_table
666	llgh	%r10,__PT_INT_CODE+2(%r11)
667	nill	%r10,0x007f
668	sll	%r10,2
669	je	.Lpgm_return
670	lgf	%r9,0(%r10,%r1)		# load address of handler routine
671	lgr	%r2,%r11		# pass pointer to pt_regs
672	BASR_EX	%r14,%r9		# branch to interrupt-handler
673.Lpgm_return:
674	LOCKDEP_SYS_EXIT
675	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
676	jno	.Lsysc_restore
677	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
678	jo	.Lsysc_do_syscall
679	j	.Lsysc_tif
680
681#
682# PER event in supervisor state, must be kprobes
683#
684.Lpgm_kprobe:
685	REENABLE_IRQS
686	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
687	lgr	%r2,%r11		# pass pointer to pt_regs
688	brasl	%r14,do_per_trap
689	j	.Lpgm_return
690
691#
692# single stepped system call
693#
694.Lpgm_svcper:
695	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
696	lghi	%r13,__TASK_thread
697	larl	%r14,.Lsysc_per
698	stg	%r14,__LC_RETURN_PSW+8
699	lghi	%r14,_PIF_SYSCALL | _PIF_PER_TRAP
700	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per and enable irqs
701
702/*
703 * IO interrupt handler routine
704 */
705ENTRY(io_int_handler)
706	STCK	__LC_INT_CLOCK
707	stpt	__LC_ASYNC_ENTER_TIMER
708	BPOFF
709	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
710	lg	%r12,__LC_CURRENT
711	larl	%r13,cleanup_critical
712	lmg	%r8,%r9,__LC_IO_OLD_PSW
713	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
714	stmg	%r0,%r7,__PT_R0(%r11)
715	# clear user controlled registers to prevent speculative use
716	xgr	%r0,%r0
717	xgr	%r1,%r1
718	xgr	%r2,%r2
719	xgr	%r3,%r3
720	xgr	%r4,%r4
721	xgr	%r5,%r5
722	xgr	%r6,%r6
723	xgr	%r7,%r7
724	xgr	%r10,%r10
725	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
726	stmg	%r8,%r9,__PT_PSW(%r11)
727	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
728	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
729	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
730	jo	.Lio_restore
731	TRACE_IRQS_OFF
732	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
733.Lio_loop:
734	lgr	%r2,%r11		# pass pointer to pt_regs
735	lghi	%r3,IO_INTERRUPT
736	tm	__PT_INT_CODE+8(%r11),0x80	# adapter interrupt ?
737	jz	.Lio_call
738	lghi	%r3,THIN_INTERRUPT
739.Lio_call:
740	brasl	%r14,do_IRQ
741	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
742	jz	.Lio_return
743	tpi	0
744	jz	.Lio_return
745	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
746	j	.Lio_loop
747.Lio_return:
748	LOCKDEP_SYS_EXIT
749	TRACE_IRQS_ON
750.Lio_tif:
751	TSTMSK	__TI_flags(%r12),_TIF_WORK
752	jnz	.Lio_work		# there is work to do (signals etc.)
753	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
754	jnz	.Lio_work
755.Lio_restore:
756	lg	%r14,__LC_VDSO_PER_CPU
757	lmg	%r0,%r10,__PT_R0(%r11)
758	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
759	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
760	jno	.Lio_exit_kernel
761	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
762.Lio_exit_timer:
763	stpt	__LC_EXIT_TIMER
764	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
765.Lio_exit_kernel:
766	lmg	%r11,%r15,__PT_R11(%r11)
767	lpswe	__LC_RETURN_PSW
768.Lio_done:
769
770#
771# There is work todo, find out in which context we have been interrupted:
772# 1) if we return to user space we can do all _TIF_WORK work
773# 2) if we return to kernel code and kvm is enabled check if we need to
774#    modify the psw to leave SIE
775# 3) if we return to kernel code and preemptive scheduling is enabled check
776#    the preemption counter and if it is zero call preempt_schedule_irq
777# Before any work can be done, a switch to the kernel stack is required.
778#
779.Lio_work:
780	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
781	jo	.Lio_work_user		# yes -> do resched & signal
782#ifdef CONFIG_PREEMPT
783	# check for preemptive scheduling
784	icm	%r0,15,__LC_PREEMPT_COUNT
785	jnz	.Lio_restore		# preemption is disabled
786	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
787	jno	.Lio_restore
788	# switch to kernel stack
789	lg	%r1,__PT_R15(%r11)
790	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
791	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
792	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
793	la	%r11,STACK_FRAME_OVERHEAD(%r1)
794	lgr	%r15,%r1
795	# TRACE_IRQS_ON already done at .Lio_return, call
796	# TRACE_IRQS_OFF to keep things symmetrical
797	TRACE_IRQS_OFF
798	brasl	%r14,preempt_schedule_irq
799	j	.Lio_return
800#else
801	j	.Lio_restore
802#endif
803
804#
805# Need to do work before returning to userspace, switch to kernel stack
806#
807.Lio_work_user:
808	lg	%r1,__LC_KERNEL_STACK
809	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
810	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
811	la	%r11,STACK_FRAME_OVERHEAD(%r1)
812	lgr	%r15,%r1
813
814#
815# One of the work bits is on. Find out which one.
816#
817.Lio_work_tif:
818	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
819	jo	.Lio_mcck_pending
820	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
821	jo	.Lio_reschedule
822#ifdef CONFIG_LIVEPATCH
823	TSTMSK	__TI_flags(%r12),_TIF_PATCH_PENDING
824	jo	.Lio_patch_pending
825#endif
826	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
827	jo	.Lio_sigpending
828	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
829	jo	.Lio_notify_resume
830	TSTMSK	__TI_flags(%r12),_TIF_GUARDED_STORAGE
831	jo	.Lio_guarded_storage
832	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
833	jo	.Lio_vxrs
834	TSTMSK	__LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
835	jnz	.Lio_asce
836	j	.Lio_return		# beware of critical section cleanup
837
838#
839# _CIF_MCCK_PENDING is set, call handler
840#
841.Lio_mcck_pending:
842	# TRACE_IRQS_ON already done at .Lio_return
843	brasl	%r14,s390_handle_mcck	# TIF bit will be cleared by handler
844	TRACE_IRQS_OFF
845	j	.Lio_return
846
847#
848# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
849#
850.Lio_asce:
851	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
852	lctlg	%c7,%c7,__LC_VDSO_ASCE		# load secondary asce
853	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
854	jz	.Lio_return
855#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
856	tm	__LC_STFLE_FAC_LIST+3,0x10	# has MVCOS ?
857	jnz	.Lio_set_fs_fixup
858	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
859	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
860	j	.Lio_return
861.Lio_set_fs_fixup:
862#endif
863	larl	%r14,.Lio_return
864	jg	set_fs_fixup
865
866#
867# CIF_FPU is set, restore floating-point controls and floating-point registers.
868#
869.Lio_vxrs:
870	larl	%r14,.Lio_return
871	jg	load_fpu_regs
872
873#
874# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
875#
876.Lio_guarded_storage:
877	# TRACE_IRQS_ON already done at .Lio_return
878	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
879	lgr	%r2,%r11		# pass pointer to pt_regs
880	brasl	%r14,gs_load_bc_cb
881	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
882	TRACE_IRQS_OFF
883	j	.Lio_return
884
885#
886# _TIF_NEED_RESCHED is set, call schedule
887#
888.Lio_reschedule:
889	# TRACE_IRQS_ON already done at .Lio_return
890	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
891	brasl	%r14,schedule		# call scheduler
892	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
893	TRACE_IRQS_OFF
894	j	.Lio_return
895
896#
897# _TIF_PATCH_PENDING is set, call klp_update_patch_state
898#
899#ifdef CONFIG_LIVEPATCH
900.Lio_patch_pending:
901	lg	%r2,__LC_CURRENT	# pass pointer to task struct
902	larl	%r14,.Lio_return
903	jg	klp_update_patch_state
904#endif
905
906#
907# _TIF_SIGPENDING or is set, call do_signal
908#
909.Lio_sigpending:
910	# TRACE_IRQS_ON already done at .Lio_return
911	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
912	lgr	%r2,%r11		# pass pointer to pt_regs
913	brasl	%r14,do_signal
914	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
915	TRACE_IRQS_OFF
916	j	.Lio_return
917
918#
919# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
920#
921.Lio_notify_resume:
922	# TRACE_IRQS_ON already done at .Lio_return
923	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
924	lgr	%r2,%r11		# pass pointer to pt_regs
925	brasl	%r14,do_notify_resume
926	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
927	TRACE_IRQS_OFF
928	j	.Lio_return
929
930/*
931 * External interrupt handler routine
932 */
933ENTRY(ext_int_handler)
934	STCK	__LC_INT_CLOCK
935	stpt	__LC_ASYNC_ENTER_TIMER
936	BPOFF
937	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
938	lg	%r12,__LC_CURRENT
939	larl	%r13,cleanup_critical
940	lmg	%r8,%r9,__LC_EXT_OLD_PSW
941	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
942	stmg	%r0,%r7,__PT_R0(%r11)
943	# clear user controlled registers to prevent speculative use
944	xgr	%r0,%r0
945	xgr	%r1,%r1
946	xgr	%r2,%r2
947	xgr	%r3,%r3
948	xgr	%r4,%r4
949	xgr	%r5,%r5
950	xgr	%r6,%r6
951	xgr	%r7,%r7
952	xgr	%r10,%r10
953	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
954	stmg	%r8,%r9,__PT_PSW(%r11)
955	lghi	%r1,__LC_EXT_PARAMS2
956	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
957	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
958	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
959	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
960	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
961	jo	.Lio_restore
962	TRACE_IRQS_OFF
963	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
964	lgr	%r2,%r11		# pass pointer to pt_regs
965	lghi	%r3,EXT_INTERRUPT
966	brasl	%r14,do_IRQ
967	j	.Lio_return
968
969/*
970 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
971 */
972ENTRY(psw_idle)
973	stg	%r3,__SF_EMPTY(%r15)
974	larl	%r1,.Lpsw_idle_lpsw+4
975	stg	%r1,__SF_EMPTY+8(%r15)
976#ifdef CONFIG_SMP
977	larl	%r1,smp_cpu_mtid
978	llgf	%r1,0(%r1)
979	ltgr	%r1,%r1
980	jz	.Lpsw_idle_stcctm
981	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
982.Lpsw_idle_stcctm:
983#endif
984	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
985	BPON
986	STCK	__CLOCK_IDLE_ENTER(%r2)
987	stpt	__TIMER_IDLE_ENTER(%r2)
988.Lpsw_idle_lpsw:
989	lpswe	__SF_EMPTY(%r15)
990	BR_EX	%r14
991.Lpsw_idle_end:
992
993/*
994 * Store floating-point controls and floating-point or vector register
995 * depending whether the vector facility is available.	A critical section
996 * cleanup assures that the registers are stored even if interrupted for
997 * some other work.  The CIF_FPU flag is set to trigger a lazy restore
998 * of the register contents at return from io or a system call.
999 */
1000ENTRY(save_fpu_regs)
1001	lg	%r2,__LC_CURRENT
1002	aghi	%r2,__TASK_thread
1003	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
1004	jo	.Lsave_fpu_regs_exit
1005	stfpc	__THREAD_FPU_fpc(%r2)
1006	lg	%r3,__THREAD_FPU_regs(%r2)
1007	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1008	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
1009	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
1010	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
1011	j	.Lsave_fpu_regs_done	  # -> set CIF_FPU flag
1012.Lsave_fpu_regs_fp:
1013	std	0,0(%r3)
1014	std	1,8(%r3)
1015	std	2,16(%r3)
1016	std	3,24(%r3)
1017	std	4,32(%r3)
1018	std	5,40(%r3)
1019	std	6,48(%r3)
1020	std	7,56(%r3)
1021	std	8,64(%r3)
1022	std	9,72(%r3)
1023	std	10,80(%r3)
1024	std	11,88(%r3)
1025	std	12,96(%r3)
1026	std	13,104(%r3)
1027	std	14,112(%r3)
1028	std	15,120(%r3)
1029.Lsave_fpu_regs_done:
1030	oi	__LC_CPU_FLAGS+7,_CIF_FPU
1031.Lsave_fpu_regs_exit:
1032	BR_EX	%r14
1033.Lsave_fpu_regs_end:
1034EXPORT_SYMBOL(save_fpu_regs)
1035
1036/*
1037 * Load floating-point controls and floating-point or vector registers.
1038 * A critical section cleanup assures that the register contents are
1039 * loaded even if interrupted for some other work.
1040 *
1041 * There are special calling conventions to fit into sysc and io return work:
1042 *	%r15:	<kernel stack>
1043 * The function requires:
1044 *	%r4
1045 */
1046load_fpu_regs:
1047	lg	%r4,__LC_CURRENT
1048	aghi	%r4,__TASK_thread
1049	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
1050	jno	.Lload_fpu_regs_exit
1051	lfpc	__THREAD_FPU_fpc(%r4)
1052	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1053	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
1054	jz	.Lload_fpu_regs_fp		# -> no VX, load FP regs
1055	VLM	%v0,%v15,0,%r4
1056	VLM	%v16,%v31,256,%r4
1057	j	.Lload_fpu_regs_done
1058.Lload_fpu_regs_fp:
1059	ld	0,0(%r4)
1060	ld	1,8(%r4)
1061	ld	2,16(%r4)
1062	ld	3,24(%r4)
1063	ld	4,32(%r4)
1064	ld	5,40(%r4)
1065	ld	6,48(%r4)
1066	ld	7,56(%r4)
1067	ld	8,64(%r4)
1068	ld	9,72(%r4)
1069	ld	10,80(%r4)
1070	ld	11,88(%r4)
1071	ld	12,96(%r4)
1072	ld	13,104(%r4)
1073	ld	14,112(%r4)
1074	ld	15,120(%r4)
1075.Lload_fpu_regs_done:
1076	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
1077.Lload_fpu_regs_exit:
1078	BR_EX	%r14
1079.Lload_fpu_regs_end:
1080
1081.L__critical_end:
1082
1083/*
1084 * Machine check handler routines
1085 */
1086ENTRY(mcck_int_handler)
1087	STCK	__LC_MCCK_CLOCK
1088	BPOFF
1089	la	%r1,4095		# validate r1
1090	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
1091	sckc	__LC_CLOCK_COMPARATOR			# validate comparator
1092	lam	%a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
1093	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
1094	lg	%r12,__LC_CURRENT
1095	larl	%r13,cleanup_critical
1096	lmg	%r8,%r9,__LC_MCK_OLD_PSW
1097	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
1098	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
1099	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
1100	jno	.Lmcck_panic		# control registers invalid -> panic
1101	la	%r14,4095
1102	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
1103	ptlb
1104	lg	%r11,__LC_MCESAD-4095(%r14) # extended machine check save area
1105	nill	%r11,0xfc00		# MCESA_ORIGIN_MASK
1106	TSTMSK	__LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
1107	jno	0f
1108	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_GS_VALID
1109	jno	0f
1110	.insn	 rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
11110:	l	%r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
1112	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_FC_VALID
1113	jo	0f
1114	sr	%r14,%r14
11150:	sfpc	%r14
1116	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1117	jo	0f
1118	lghi	%r14,__LC_FPREGS_SAVE_AREA
1119	ld	%f0,0(%r14)
1120	ld	%f1,8(%r14)
1121	ld	%f2,16(%r14)
1122	ld	%f3,24(%r14)
1123	ld	%f4,32(%r14)
1124	ld	%f5,40(%r14)
1125	ld	%f6,48(%r14)
1126	ld	%f7,56(%r14)
1127	ld	%f8,64(%r14)
1128	ld	%f9,72(%r14)
1129	ld	%f10,80(%r14)
1130	ld	%f11,88(%r14)
1131	ld	%f12,96(%r14)
1132	ld	%f13,104(%r14)
1133	ld	%f14,112(%r14)
1134	ld	%f15,120(%r14)
1135	j	1f
11360:	VLM	%v0,%v15,0,%r11
1137	VLM	%v16,%v31,256,%r11
11381:	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
1139	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
1140	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
1141	jo	3f
1142	la	%r14,__LC_SYNC_ENTER_TIMER
1143	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
1144	jl	0f
1145	la	%r14,__LC_ASYNC_ENTER_TIMER
11460:	clc	0(8,%r14),__LC_EXIT_TIMER
1147	jl	1f
1148	la	%r14,__LC_EXIT_TIMER
11491:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
1150	jl	2f
1151	la	%r14,__LC_LAST_UPDATE_TIMER
11522:	spt	0(%r14)
1153	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
11543:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
1155	jno	.Lmcck_panic
1156	tmhh	%r8,0x0001		# interrupting from user ?
1157	jnz	4f
1158	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
1159	jno	.Lmcck_panic
11604:	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
1161	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
1162.Lmcck_skip:
1163	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
1164	stmg	%r0,%r7,__PT_R0(%r11)
1165	# clear user controlled registers to prevent speculative use
1166	xgr	%r0,%r0
1167	xgr	%r1,%r1
1168	xgr	%r2,%r2
1169	xgr	%r3,%r3
1170	xgr	%r4,%r4
1171	xgr	%r5,%r5
1172	xgr	%r6,%r6
1173	xgr	%r7,%r7
1174	xgr	%r10,%r10
1175	mvc	__PT_R8(64,%r11),0(%r14)
1176	stmg	%r8,%r9,__PT_PSW(%r11)
1177	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1178	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1179	lgr	%r2,%r11		# pass pointer to pt_regs
1180	brasl	%r14,s390_do_machine_check
1181	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
1182	jno	.Lmcck_return
1183	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
1184	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
1185	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
1186	la	%r11,STACK_FRAME_OVERHEAD(%r1)
1187	lgr	%r15,%r1
1188	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
1189	jno	.Lmcck_return
1190	TRACE_IRQS_OFF
1191	brasl	%r14,s390_handle_mcck
1192	TRACE_IRQS_ON
1193.Lmcck_return:
1194	lg	%r14,__LC_VDSO_PER_CPU
1195	lmg	%r0,%r10,__PT_R0(%r11)
1196	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1197	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1198	jno	0f
1199	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
1200	stpt	__LC_EXIT_TIMER
1201	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
12020:	lmg	%r11,%r15,__PT_R11(%r11)
1203	lpswe	__LC_RETURN_MCCK_PSW
1204
1205.Lmcck_panic:
1206	lg	%r15,__LC_NODAT_STACK
1207	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1208	j	.Lmcck_skip
1209
1210#
1211# PSW restart interrupt handler
1212#
1213ENTRY(restart_int_handler)
1214	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
1215	stg	%r15,__LC_SAVE_AREA_RESTART
1216	lg	%r15,__LC_RESTART_STACK
1217	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
1218	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
1219	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
1220	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
1221	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
1222	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
1223	lg	%r2,__LC_RESTART_DATA
1224	lg	%r3,__LC_RESTART_SOURCE
1225	ltgr	%r3,%r3				# test source cpu address
1226	jm	1f				# negative -> skip source stop
12270:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
1228	brc	10,0b				# wait for status stored
12291:	basr	%r14,%r1			# call function
1230	stap	__SF_EMPTY(%r15)		# store cpu address
1231	llgh	%r3,__SF_EMPTY(%r15)
12322:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
1233	brc	2,2b
12343:	j	3b
1235
1236	.section .kprobes.text, "ax"
1237
1238#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
1239/*
1240 * The synchronous or the asynchronous stack overflowed. We are dead.
1241 * No need to properly save the registers, we are going to panic anyway.
1242 * Setup a pt_regs so that show_trace can provide a good call trace.
1243 */
1244stack_overflow:
1245	lg	%r15,__LC_NODAT_STACK	# change to panic stack
1246	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1247	stmg	%r0,%r7,__PT_R0(%r11)
1248	stmg	%r8,%r9,__PT_PSW(%r11)
1249	mvc	__PT_R8(64,%r11),0(%r14)
1250	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1251	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1252	lgr	%r2,%r11		# pass pointer to pt_regs
1253	jg	kernel_stack_overflow
1254#endif
1255
1256cleanup_critical:
1257#if IS_ENABLED(CONFIG_KVM)
1258	clg	%r9,BASED(.Lcleanup_table_sie)	# .Lsie_gmap
1259	jl	0f
1260	clg	%r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
1261	jl	.Lcleanup_sie
1262#endif
1263	clg	%r9,BASED(.Lcleanup_table)	# system_call
1264	jl	0f
1265	clg	%r9,BASED(.Lcleanup_table+8)	# .Lsysc_do_svc
1266	jl	.Lcleanup_system_call
1267	clg	%r9,BASED(.Lcleanup_table+16)	# .Lsysc_tif
1268	jl	0f
1269	clg	%r9,BASED(.Lcleanup_table+24)	# .Lsysc_restore
1270	jl	.Lcleanup_sysc_tif
1271	clg	%r9,BASED(.Lcleanup_table+32)	# .Lsysc_done
1272	jl	.Lcleanup_sysc_restore
1273	clg	%r9,BASED(.Lcleanup_table+40)	# .Lio_tif
1274	jl	0f
1275	clg	%r9,BASED(.Lcleanup_table+48)	# .Lio_restore
1276	jl	.Lcleanup_io_tif
1277	clg	%r9,BASED(.Lcleanup_table+56)	# .Lio_done
1278	jl	.Lcleanup_io_restore
1279	clg	%r9,BASED(.Lcleanup_table+64)	# psw_idle
1280	jl	0f
1281	clg	%r9,BASED(.Lcleanup_table+72)	# .Lpsw_idle_end
1282	jl	.Lcleanup_idle
1283	clg	%r9,BASED(.Lcleanup_table+80)	# save_fpu_regs
1284	jl	0f
1285	clg	%r9,BASED(.Lcleanup_table+88)	# .Lsave_fpu_regs_end
1286	jl	.Lcleanup_save_fpu_regs
1287	clg	%r9,BASED(.Lcleanup_table+96)	# load_fpu_regs
1288	jl	0f
1289	clg	%r9,BASED(.Lcleanup_table+104)	# .Lload_fpu_regs_end
1290	jl	.Lcleanup_load_fpu_regs
12910:	BR_EX	%r14,%r11
1292
1293	.align	8
1294.Lcleanup_table:
1295	.quad	system_call
1296	.quad	.Lsysc_do_svc
1297	.quad	.Lsysc_tif
1298	.quad	.Lsysc_restore
1299	.quad	.Lsysc_done
1300	.quad	.Lio_tif
1301	.quad	.Lio_restore
1302	.quad	.Lio_done
1303	.quad	psw_idle
1304	.quad	.Lpsw_idle_end
1305	.quad	save_fpu_regs
1306	.quad	.Lsave_fpu_regs_end
1307	.quad	load_fpu_regs
1308	.quad	.Lload_fpu_regs_end
1309
1310#if IS_ENABLED(CONFIG_KVM)
1311.Lcleanup_table_sie:
1312	.quad	.Lsie_gmap
1313	.quad	.Lsie_done
1314
1315.Lcleanup_sie:
1316	cghi    %r11,__LC_SAVE_AREA_ASYNC 	#Is this in normal interrupt?
1317	je      1f
1318	slg     %r9,BASED(.Lsie_crit_mcck_start)
1319	clg     %r9,BASED(.Lsie_crit_mcck_length)
1320	jh      1f
1321	oi      __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
13221:	BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1323	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
1324	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
1325	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
1326	larl	%r9,sie_exit			# skip forward to sie_exit
1327	BR_EX	%r14,%r11
1328#endif
1329
1330.Lcleanup_system_call:
1331	# check if stpt has been executed
1332	clg	%r9,BASED(.Lcleanup_system_call_insn)
1333	jh	0f
1334	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
1335	cghi	%r11,__LC_SAVE_AREA_ASYNC
1336	je	0f
1337	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
13380:	# check if stmg has been executed
1339	clg	%r9,BASED(.Lcleanup_system_call_insn+8)
1340	jh	0f
1341	mvc	__LC_SAVE_AREA_SYNC(64),0(%r11)
13420:	# check if base register setup + TIF bit load has been done
1343	clg	%r9,BASED(.Lcleanup_system_call_insn+16)
1344	jhe	0f
1345	# set up saved register r12 task struct pointer
1346	stg	%r12,32(%r11)
1347	# set up saved register r13 __TASK_thread offset
1348	mvc	40(8,%r11),BASED(.Lcleanup_system_call_const)
13490:	# check if the user time update has been done
1350	clg	%r9,BASED(.Lcleanup_system_call_insn+24)
1351	jh	0f
1352	lg	%r15,__LC_EXIT_TIMER
1353	slg	%r15,__LC_SYNC_ENTER_TIMER
1354	alg	%r15,__LC_USER_TIMER
1355	stg	%r15,__LC_USER_TIMER
13560:	# check if the system time update has been done
1357	clg	%r9,BASED(.Lcleanup_system_call_insn+32)
1358	jh	0f
1359	lg	%r15,__LC_LAST_UPDATE_TIMER
1360	slg	%r15,__LC_EXIT_TIMER
1361	alg	%r15,__LC_SYSTEM_TIMER
1362	stg	%r15,__LC_SYSTEM_TIMER
13630:	# update accounting time stamp
1364	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1365	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1366	# set up saved register r11
1367	lg	%r15,__LC_KERNEL_STACK
1368	la	%r9,STACK_FRAME_OVERHEAD(%r15)
1369	stg	%r9,24(%r11)		# r11 pt_regs pointer
1370	# fill pt_regs
1371	mvc	__PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
1372	stmg	%r0,%r7,__PT_R0(%r9)
1373	mvc	__PT_PSW(16,%r9),__LC_SVC_OLD_PSW
1374	mvc	__PT_INT_CODE(4,%r9),__LC_SVC_ILC
1375	xc	__PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
1376	mvi	__PT_FLAGS+7(%r9),_PIF_SYSCALL
1377	# setup saved register r15
1378	stg	%r15,56(%r11)		# r15 stack pointer
1379	# set new psw address and exit
1380	larl	%r9,.Lsysc_do_svc
1381	BR_EX	%r14,%r11
1382.Lcleanup_system_call_insn:
1383	.quad	system_call
1384	.quad	.Lsysc_stmg
1385	.quad	.Lsysc_per
1386	.quad	.Lsysc_vtime+36
1387	.quad	.Lsysc_vtime+42
1388.Lcleanup_system_call_const:
1389	.quad	__TASK_thread
1390
1391.Lcleanup_sysc_tif:
1392	larl	%r9,.Lsysc_tif
1393	BR_EX	%r14,%r11
1394
1395.Lcleanup_sysc_restore:
1396	# check if stpt has been executed
1397	clg	%r9,BASED(.Lcleanup_sysc_restore_insn)
1398	jh	0f
1399	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1400	cghi	%r11,__LC_SAVE_AREA_ASYNC
1401	je	0f
1402	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
14030:	clg	%r9,BASED(.Lcleanup_sysc_restore_insn+8)
1404	je	1f
1405	lg	%r9,24(%r11)		# get saved pointer to pt_regs
1406	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
1407	mvc	0(64,%r11),__PT_R8(%r9)
1408	lmg	%r0,%r7,__PT_R0(%r9)
14091:	lmg	%r8,%r9,__LC_RETURN_PSW
1410	BR_EX	%r14,%r11
1411.Lcleanup_sysc_restore_insn:
1412	.quad	.Lsysc_exit_timer
1413	.quad	.Lsysc_done - 4
1414
1415.Lcleanup_io_tif:
1416	larl	%r9,.Lio_tif
1417	BR_EX	%r14,%r11
1418
1419.Lcleanup_io_restore:
1420	# check if stpt has been executed
1421	clg	%r9,BASED(.Lcleanup_io_restore_insn)
1422	jh	0f
1423	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
14240:	clg	%r9,BASED(.Lcleanup_io_restore_insn+8)
1425	je	1f
1426	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
1427	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
1428	mvc	0(64,%r11),__PT_R8(%r9)
1429	lmg	%r0,%r7,__PT_R0(%r9)
14301:	lmg	%r8,%r9,__LC_RETURN_PSW
1431	BR_EX	%r14,%r11
1432.Lcleanup_io_restore_insn:
1433	.quad	.Lio_exit_timer
1434	.quad	.Lio_done - 4
1435
1436.Lcleanup_idle:
1437	ni	__LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
1438	# copy interrupt clock & cpu timer
1439	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
1440	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
1441	cghi	%r11,__LC_SAVE_AREA_ASYNC
1442	je	0f
1443	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
1444	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
14450:	# check if stck & stpt have been executed
1446	clg	%r9,BASED(.Lcleanup_idle_insn)
1447	jhe	1f
1448	mvc	__CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
1449	mvc	__TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
14501:	# calculate idle cycles
1451#ifdef CONFIG_SMP
1452	clg	%r9,BASED(.Lcleanup_idle_insn)
1453	jl	3f
1454	larl	%r1,smp_cpu_mtid
1455	llgf	%r1,0(%r1)
1456	ltgr	%r1,%r1
1457	jz	3f
1458	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
1459	larl	%r3,mt_cycles
1460	ag	%r3,__LC_PERCPU_OFFSET
1461	la	%r4,__SF_EMPTY+16(%r15)
14622:	lg	%r0,0(%r3)
1463	slg	%r0,0(%r4)
1464	alg	%r0,64(%r4)
1465	stg	%r0,0(%r3)
1466	la	%r3,8(%r3)
1467	la	%r4,8(%r4)
1468	brct	%r1,2b
1469#endif
14703:	# account system time going idle
1471	lg	%r9,__LC_STEAL_TIMER
1472	alg	%r9,__CLOCK_IDLE_ENTER(%r2)
1473	slg	%r9,__LC_LAST_UPDATE_CLOCK
1474	stg	%r9,__LC_STEAL_TIMER
1475	mvc	__LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
1476	lg	%r9,__LC_SYSTEM_TIMER
1477	alg	%r9,__LC_LAST_UPDATE_TIMER
1478	slg	%r9,__TIMER_IDLE_ENTER(%r2)
1479	stg	%r9,__LC_SYSTEM_TIMER
1480	mvc	__LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
1481	# prepare return psw
1482	nihh	%r8,0xfcfd		# clear irq & wait state bits
1483	lg	%r9,48(%r11)		# return from psw_idle
1484	BR_EX	%r14,%r11
1485.Lcleanup_idle_insn:
1486	.quad	.Lpsw_idle_lpsw
1487
1488.Lcleanup_save_fpu_regs:
1489	larl	%r9,save_fpu_regs
1490	BR_EX	%r14,%r11
1491
1492.Lcleanup_load_fpu_regs:
1493	larl	%r9,load_fpu_regs
1494	BR_EX	%r14,%r11
1495
1496/*
1497 * Integer constants
1498 */
1499	.align	8
1500.Lcritical_start:
1501	.quad	.L__critical_start
1502.Lcritical_length:
1503	.quad	.L__critical_end - .L__critical_start
1504#if IS_ENABLED(CONFIG_KVM)
1505.Lsie_critical_start:
1506	.quad	.Lsie_gmap
1507.Lsie_critical_length:
1508	.quad	.Lsie_done - .Lsie_gmap
1509.Lsie_crit_mcck_start:
1510	.quad   .Lsie_entry
1511.Lsie_crit_mcck_length:
1512	.quad   .Lsie_skip - .Lsie_entry
1513#endif
1514	.section .rodata, "a"
1515#define SYSCALL(esame,emu)	.long __s390x_ ## esame
1516	.globl	sys_call_table
1517sys_call_table:
1518#include "asm/syscall_table.h"
1519#undef SYSCALL
1520
1521#ifdef CONFIG_COMPAT
1522
1523#define SYSCALL(esame,emu)	.long __s390_ ## emu
1524	.globl	sys_call_table_emu
1525sys_call_table_emu:
1526#include "asm/syscall_table.h"
1527#undef SYSCALL
1528#endif
1529