xref: /openbmc/linux/arch/s390/kernel/entry.S (revision 4cff79e9)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/ctl_reg.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/vx-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/export.h>
31
32__PT_R0      =	__PT_GPRS
33__PT_R1      =	__PT_GPRS + 8
34__PT_R2      =	__PT_GPRS + 16
35__PT_R3      =	__PT_GPRS + 24
36__PT_R4      =	__PT_GPRS + 32
37__PT_R5      =	__PT_GPRS + 40
38__PT_R6      =	__PT_GPRS + 48
39__PT_R7      =	__PT_GPRS + 56
40__PT_R8      =	__PT_GPRS + 64
41__PT_R9      =	__PT_GPRS + 72
42__PT_R10     =	__PT_GPRS + 80
43__PT_R11     =	__PT_GPRS + 88
44__PT_R12     =	__PT_GPRS + 96
45__PT_R13     =	__PT_GPRS + 104
46__PT_R14     =	__PT_GPRS + 112
47__PT_R15     =	__PT_GPRS + 120
48
49STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
50STACK_SIZE  = 1 << STACK_SHIFT
51STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
52
53_TIF_WORK	= (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
54		   _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
55_TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
56		   _TIF_SYSCALL_TRACEPOINT)
57_CIF_WORK	= (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
58		   _CIF_ASCE_SECONDARY | _CIF_FPU)
59_PIF_WORK	= (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
60
61_LPP_OFFSET	= __LC_LPP
62
63#define BASED(name) name-cleanup_critical(%r13)
64
65	.macro	TRACE_IRQS_ON
66#ifdef CONFIG_TRACE_IRQFLAGS
67	basr	%r2,%r0
68	brasl	%r14,trace_hardirqs_on_caller
69#endif
70	.endm
71
72	.macro	TRACE_IRQS_OFF
73#ifdef CONFIG_TRACE_IRQFLAGS
74	basr	%r2,%r0
75	brasl	%r14,trace_hardirqs_off_caller
76#endif
77	.endm
78
79	.macro	LOCKDEP_SYS_EXIT
80#ifdef CONFIG_LOCKDEP
81	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
82	jz	.+10
83	brasl	%r14,lockdep_sys_exit
84#endif
85	.endm
86
87	.macro	CHECK_STACK stacksize,savearea
88#ifdef CONFIG_CHECK_STACK
89	tml	%r15,\stacksize - CONFIG_STACK_GUARD
90	lghi	%r14,\savearea
91	jz	stack_overflow
92#endif
93	.endm
94
95	.macro	SWITCH_ASYNC savearea,timer
96	tmhh	%r8,0x0001		# interrupting from user ?
97	jnz	1f
98	lgr	%r14,%r9
99	slg	%r14,BASED(.Lcritical_start)
100	clg	%r14,BASED(.Lcritical_length)
101	jhe	0f
102	lghi	%r11,\savearea		# inside critical section, do cleanup
103	brasl	%r14,cleanup_critical
104	tmhh	%r8,0x0001		# retest problem state after cleanup
105	jnz	1f
1060:	lg	%r14,__LC_ASYNC_STACK	# are we already on the async stack?
107	slgr	%r14,%r15
108	srag	%r14,%r14,STACK_SHIFT
109	jnz	2f
110	CHECK_STACK 1<<STACK_SHIFT,\savearea
111	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
112	j	3f
1131:	UPDATE_VTIME %r14,%r15,\timer
114	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1152:	lg	%r15,__LC_ASYNC_STACK	# load async stack
1163:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
117	.endm
118
119	.macro UPDATE_VTIME w1,w2,enter_timer
120	lg	\w1,__LC_EXIT_TIMER
121	lg	\w2,__LC_LAST_UPDATE_TIMER
122	slg	\w1,\enter_timer
123	slg	\w2,__LC_EXIT_TIMER
124	alg	\w1,__LC_USER_TIMER
125	alg	\w2,__LC_SYSTEM_TIMER
126	stg	\w1,__LC_USER_TIMER
127	stg	\w2,__LC_SYSTEM_TIMER
128	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
129	.endm
130
131	.macro REENABLE_IRQS
132	stg	%r8,__LC_RETURN_PSW
133	ni	__LC_RETURN_PSW,0xbf
134	ssm	__LC_RETURN_PSW
135	.endm
136
137	.macro STCK savearea
138#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
139	.insn	s,0xb27c0000,\savearea		# store clock fast
140#else
141	.insn	s,0xb2050000,\savearea		# store clock
142#endif
143	.endm
144
145	/*
146	 * The TSTMSK macro generates a test-under-mask instruction by
147	 * calculating the memory offset for the specified mask value.
148	 * Mask value can be any constant.  The macro shifts the mask
149	 * value to calculate the memory offset for the test-under-mask
150	 * instruction.
151	 */
152	.macro TSTMSK addr, mask, size=8, bytepos=0
153		.if (\bytepos < \size) && (\mask >> 8)
154			.if (\mask & 0xff)
155				.error "Mask exceeds byte boundary"
156			.endif
157			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
158			.exitm
159		.endif
160		.ifeq \mask
161			.error "Mask must not be zero"
162		.endif
163		off = \size - \bytepos - 1
164		tm	off+\addr, \mask
165	.endm
166
167	.macro BPOFF
168	ALTERNATIVE "", ".long 0xb2e8c000", 82
169	.endm
170
171	.macro BPON
172	ALTERNATIVE "", ".long 0xb2e8d000", 82
173	.endm
174
175	.macro BPENTER tif_ptr,tif_mask
176	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
177		    "", 82
178	.endm
179
180	.macro BPEXIT tif_ptr,tif_mask
181	TSTMSK	\tif_ptr,\tif_mask
182	ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
183		    "jnz .+8; .long 0xb2e8d000", 82
184	.endm
185
186#ifdef CONFIG_EXPOLINE
187
188	.macro GEN_BR_THUNK name,reg,tmp
189	.section .text.\name,"axG",@progbits,\name,comdat
190	.globl \name
191	.hidden \name
192	.type \name,@function
193\name:
194	CFI_STARTPROC
195#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
196	exrl	0,0f
197#else
198	larl	\tmp,0f
199	ex	0,0(\tmp)
200#endif
201	j	.
2020:	br	\reg
203	CFI_ENDPROC
204	.endm
205
206	GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
207	GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
208	GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
209
210	.macro BASR_R14_R9
2110:	brasl	%r14,__s390x_indirect_jump_r1use_r9
212	.pushsection .s390_indirect_branches,"a",@progbits
213	.long	0b-.
214	.popsection
215	.endm
216
217	.macro BR_R1USE_R14
2180:	jg	__s390x_indirect_jump_r1use_r14
219	.pushsection .s390_indirect_branches,"a",@progbits
220	.long	0b-.
221	.popsection
222	.endm
223
224	.macro BR_R11USE_R14
2250:	jg	__s390x_indirect_jump_r11use_r14
226	.pushsection .s390_indirect_branches,"a",@progbits
227	.long	0b-.
228	.popsection
229	.endm
230
231#else	/* CONFIG_EXPOLINE */
232
233	.macro BASR_R14_R9
234	basr	%r14,%r9
235	.endm
236
237	.macro BR_R1USE_R14
238	br	%r14
239	.endm
240
241	.macro BR_R11USE_R14
242	br	%r14
243	.endm
244
245#endif /* CONFIG_EXPOLINE */
246
247
248	.section .kprobes.text, "ax"
249.Ldummy:
250	/*
251	 * This nop exists only in order to avoid that __switch_to starts at
252	 * the beginning of the kprobes text section. In that case we would
253	 * have several symbols at the same address. E.g. objdump would take
254	 * an arbitrary symbol name when disassembling this code.
255	 * With the added nop in between the __switch_to symbol is unique
256	 * again.
257	 */
258	nop	0
259
260ENTRY(__bpon)
261	.globl __bpon
262	BPON
263	BR_R1USE_R14
264
265/*
266 * Scheduler resume function, called by switch_to
267 *  gpr2 = (task_struct *) prev
268 *  gpr3 = (task_struct *) next
269 * Returns:
270 *  gpr2 = prev
271 */
272ENTRY(__switch_to)
273	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
274	lghi	%r4,__TASK_stack
275	lghi	%r1,__TASK_thread
276	lg	%r5,0(%r4,%r3)			# start of kernel stack of next
277	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
278	lgr	%r15,%r5
279	aghi	%r15,STACK_INIT			# end of kernel stack of next
280	stg	%r3,__LC_CURRENT		# store task struct of next
281	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
282	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
283	aghi	%r3,__TASK_pid
284	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
285	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
286	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
287	BR_R1USE_R14
288
289.L__critical_start:
290
291#if IS_ENABLED(CONFIG_KVM)
292/*
293 * sie64a calling convention:
294 * %r2 pointer to sie control block
295 * %r3 guest register save area
296 */
297ENTRY(sie64a)
298	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
299	lg	%r12,__LC_CURRENT
300	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
301	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
302	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
303	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
304	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU		# load guest fp/vx registers ?
305	jno	.Lsie_load_guest_gprs
306	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
307.Lsie_load_guest_gprs:
308	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
309	lg	%r14,__LC_GMAP			# get gmap pointer
310	ltgr	%r14,%r14
311	jz	.Lsie_gmap
312	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
313.Lsie_gmap:
314	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
315	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
316	tm	__SIE_PROG20+3(%r14),3		# last exit...
317	jnz	.Lsie_skip
318	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
319	jo	.Lsie_skip			# exit if fp/vx regs changed
320	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
321.Lsie_entry:
322	sie	0(%r14)
323.Lsie_exit:
324	BPOFF
325	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
326.Lsie_skip:
327	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
328	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
329.Lsie_done:
330# some program checks are suppressing. C code (e.g. do_protection_exception)
331# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
332# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
333# Other instructions between sie64a and .Lsie_done should not cause program
334# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
335# See also .Lcleanup_sie
336.Lrewind_pad6:
337	nopr	7
338.Lrewind_pad4:
339	nopr	7
340.Lrewind_pad2:
341	nopr	7
342	.globl sie_exit
343sie_exit:
344	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
345	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
346	xgr	%r0,%r0				# clear guest registers to
347	xgr	%r1,%r1				# prevent speculative use
348	xgr	%r2,%r2
349	xgr	%r3,%r3
350	xgr	%r4,%r4
351	xgr	%r5,%r5
352	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
353	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
354	BR_R1USE_R14
355.Lsie_fault:
356	lghi	%r14,-EFAULT
357	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
358	j	sie_exit
359
360	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
361	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
362	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
363	EX_TABLE(sie_exit,.Lsie_fault)
364EXPORT_SYMBOL(sie64a)
365EXPORT_SYMBOL(sie_exit)
366#endif
367
368/*
369 * SVC interrupt handler routine. System calls are synchronous events and
370 * are executed with interrupts enabled.
371 */
372
373ENTRY(system_call)
374	stpt	__LC_SYNC_ENTER_TIMER
375.Lsysc_stmg:
376	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
377	BPOFF
378	lg	%r12,__LC_CURRENT
379	lghi	%r13,__TASK_thread
380	lghi	%r14,_PIF_SYSCALL
381.Lsysc_per:
382	lg	%r15,__LC_KERNEL_STACK
383	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
384.Lsysc_vtime:
385	UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
386	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
387	stmg	%r0,%r7,__PT_R0(%r11)
388	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
389	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
390	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
391	stg	%r14,__PT_FLAGS(%r11)
392.Lsysc_do_svc:
393	# clear user controlled register to prevent speculative use
394	xgr	%r0,%r0
395	# load address of system call table
396	lg	%r10,__THREAD_sysc_table(%r13,%r12)
397	llgh	%r8,__PT_INT_CODE+2(%r11)
398	slag	%r8,%r8,2			# shift and test for svc 0
399	jnz	.Lsysc_nr_ok
400	# svc 0: system call number in %r1
401	llgfr	%r1,%r1				# clear high word in r1
402	cghi	%r1,NR_syscalls
403	jnl	.Lsysc_nr_ok
404	sth	%r1,__PT_INT_CODE+2(%r11)
405	slag	%r8,%r1,2
406.Lsysc_nr_ok:
407	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
408	stg	%r2,__PT_ORIG_GPR2(%r11)
409	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
410	lgf	%r9,0(%r8,%r10)			# get system call add.
411	TSTMSK	__TI_flags(%r12),_TIF_TRACE
412	jnz	.Lsysc_tracesys
413	BASR_R14_R9				# call sys_xxxx
414	stg	%r2,__PT_R2(%r11)		# store return value
415
416.Lsysc_return:
417	LOCKDEP_SYS_EXIT
418.Lsysc_tif:
419	TSTMSK	__PT_FLAGS(%r11),_PIF_WORK
420	jnz	.Lsysc_work
421	TSTMSK	__TI_flags(%r12),_TIF_WORK
422	jnz	.Lsysc_work			# check for work
423	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
424	jnz	.Lsysc_work
425	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
426.Lsysc_restore:
427	lg	%r14,__LC_VDSO_PER_CPU
428	lmg	%r0,%r10,__PT_R0(%r11)
429	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
430.Lsysc_exit_timer:
431	stpt	__LC_EXIT_TIMER
432	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
433	lmg	%r11,%r15,__PT_R11(%r11)
434	lpswe	__LC_RETURN_PSW
435.Lsysc_done:
436
437#
438# One of the work bits is on. Find out which one.
439#
440.Lsysc_work:
441	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
442	jo	.Lsysc_mcck_pending
443	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
444	jo	.Lsysc_reschedule
445	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
446	jo	.Lsysc_syscall_restart
447#ifdef CONFIG_UPROBES
448	TSTMSK	__TI_flags(%r12),_TIF_UPROBE
449	jo	.Lsysc_uprobe_notify
450#endif
451	TSTMSK	__TI_flags(%r12),_TIF_GUARDED_STORAGE
452	jo	.Lsysc_guarded_storage
453	TSTMSK	__PT_FLAGS(%r11),_PIF_PER_TRAP
454	jo	.Lsysc_singlestep
455#ifdef CONFIG_LIVEPATCH
456	TSTMSK	__TI_flags(%r12),_TIF_PATCH_PENDING
457	jo	.Lsysc_patch_pending	# handle live patching just before
458					# signals and possible syscall restart
459#endif
460	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
461	jo	.Lsysc_syscall_restart
462	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
463	jo	.Lsysc_sigpending
464	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
465	jo	.Lsysc_notify_resume
466	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
467	jo	.Lsysc_vxrs
468	TSTMSK	__LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
469	jnz	.Lsysc_asce
470	j	.Lsysc_return		# beware of critical section cleanup
471
472#
473# _TIF_NEED_RESCHED is set, call schedule
474#
475.Lsysc_reschedule:
476	larl	%r14,.Lsysc_return
477	jg	schedule
478
479#
480# _CIF_MCCK_PENDING is set, call handler
481#
482.Lsysc_mcck_pending:
483	larl	%r14,.Lsysc_return
484	jg	s390_handle_mcck	# TIF bit will be cleared by handler
485
486#
487# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
488#
489.Lsysc_asce:
490	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
491	lctlg	%c7,%c7,__LC_VDSO_ASCE		# load secondary asce
492	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
493	jz	.Lsysc_return
494#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
495	tm	__LC_STFLE_FAC_LIST+3,0x10	# has MVCOS ?
496	jnz	.Lsysc_set_fs_fixup
497	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
498	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
499	j	.Lsysc_return
500.Lsysc_set_fs_fixup:
501#endif
502	larl	%r14,.Lsysc_return
503	jg	set_fs_fixup
504
505#
506# CIF_FPU is set, restore floating-point controls and floating-point registers.
507#
508.Lsysc_vxrs:
509	larl	%r14,.Lsysc_return
510	jg	load_fpu_regs
511
512#
513# _TIF_SIGPENDING is set, call do_signal
514#
515.Lsysc_sigpending:
516	lgr	%r2,%r11		# pass pointer to pt_regs
517	brasl	%r14,do_signal
518	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
519	jno	.Lsysc_return
520.Lsysc_do_syscall:
521	lghi	%r13,__TASK_thread
522	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
523	lghi	%r1,0			# svc 0 returns -ENOSYS
524	j	.Lsysc_do_svc
525
526#
527# _TIF_NOTIFY_RESUME is set, call do_notify_resume
528#
529.Lsysc_notify_resume:
530	lgr	%r2,%r11		# pass pointer to pt_regs
531	larl	%r14,.Lsysc_return
532	jg	do_notify_resume
533
534#
535# _TIF_UPROBE is set, call uprobe_notify_resume
536#
537#ifdef CONFIG_UPROBES
538.Lsysc_uprobe_notify:
539	lgr	%r2,%r11		# pass pointer to pt_regs
540	larl	%r14,.Lsysc_return
541	jg	uprobe_notify_resume
542#endif
543
544#
545# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
546#
547.Lsysc_guarded_storage:
548	lgr	%r2,%r11		# pass pointer to pt_regs
549	larl	%r14,.Lsysc_return
550	jg	gs_load_bc_cb
551#
552# _TIF_PATCH_PENDING is set, call klp_update_patch_state
553#
554#ifdef CONFIG_LIVEPATCH
555.Lsysc_patch_pending:
556	lg	%r2,__LC_CURRENT	# pass pointer to task struct
557	larl	%r14,.Lsysc_return
558	jg	klp_update_patch_state
559#endif
560
561#
562# _PIF_PER_TRAP is set, call do_per_trap
563#
564.Lsysc_singlestep:
565	ni	__PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
566	lgr	%r2,%r11		# pass pointer to pt_regs
567	larl	%r14,.Lsysc_return
568	jg	do_per_trap
569
570#
571# _PIF_SYSCALL_RESTART is set, repeat the current system call
572#
573.Lsysc_syscall_restart:
574	ni	__PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
575	lmg	%r1,%r7,__PT_R1(%r11)	# load svc arguments
576	lg	%r2,__PT_ORIG_GPR2(%r11)
577	j	.Lsysc_do_svc
578
579#
580# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
581# and after the system call
582#
583.Lsysc_tracesys:
584	lgr	%r2,%r11		# pass pointer to pt_regs
585	la	%r3,0
586	llgh	%r0,__PT_INT_CODE+2(%r11)
587	stg	%r0,__PT_R2(%r11)
588	brasl	%r14,do_syscall_trace_enter
589	lghi	%r0,NR_syscalls
590	clgr	%r0,%r2
591	jnh	.Lsysc_tracenogo
592	sllg	%r8,%r2,2
593	lgf	%r9,0(%r8,%r10)
594.Lsysc_tracego:
595	lmg	%r3,%r7,__PT_R3(%r11)
596	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
597	lg	%r2,__PT_ORIG_GPR2(%r11)
598	BASR_R14_R9			# call sys_xxx
599	stg	%r2,__PT_R2(%r11)	# store return value
600.Lsysc_tracenogo:
601	TSTMSK	__TI_flags(%r12),_TIF_TRACE
602	jz	.Lsysc_return
603	lgr	%r2,%r11		# pass pointer to pt_regs
604	larl	%r14,.Lsysc_return
605	jg	do_syscall_trace_exit
606
607#
608# a new process exits the kernel with ret_from_fork
609#
610ENTRY(ret_from_fork)
611	la	%r11,STACK_FRAME_OVERHEAD(%r15)
612	lg	%r12,__LC_CURRENT
613	brasl	%r14,schedule_tail
614	TRACE_IRQS_ON
615	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
616	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
617	jne	.Lsysc_tracenogo
618	# it's a kernel thread
619	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
620ENTRY(kernel_thread_starter)
621	la	%r2,0(%r10)
622	BASR_R14_R9
623	j	.Lsysc_tracenogo
624
625/*
626 * Program check handler routine
627 */
628
629ENTRY(pgm_check_handler)
630	stpt	__LC_SYNC_ENTER_TIMER
631	BPOFF
632	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
633	lg	%r10,__LC_LAST_BREAK
634	lg	%r12,__LC_CURRENT
635	lghi	%r11,0
636	larl	%r13,cleanup_critical
637	lmg	%r8,%r9,__LC_PGM_OLD_PSW
638	tmhh	%r8,0x0001		# test problem state bit
639	jnz	2f			# -> fault in user space
640#if IS_ENABLED(CONFIG_KVM)
641	# cleanup critical section for program checks in sie64a
642	lgr	%r14,%r9
643	slg	%r14,BASED(.Lsie_critical_start)
644	clg	%r14,BASED(.Lsie_critical_length)
645	jhe	0f
646	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
647	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
648	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
649	larl	%r9,sie_exit			# skip forward to sie_exit
650	lghi	%r11,_PIF_GUEST_FAULT
651#endif
6520:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
653	jnz	1f			# -> enabled, can't be a double fault
654	tm	__LC_PGM_ILC+3,0x80	# check for per exception
655	jnz	.Lpgm_svcper		# -> single stepped svc
6561:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
657	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
658	j	4f
6592:	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
660	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
661	lg	%r15,__LC_KERNEL_STACK
662	lgr	%r14,%r12
663	aghi	%r14,__TASK_thread	# pointer to thread_struct
664	lghi	%r13,__LC_PGM_TDB
665	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
666	jz	3f
667	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
6683:	stg	%r10,__THREAD_last_break(%r14)
6694:	lgr	%r13,%r11
670	la	%r11,STACK_FRAME_OVERHEAD(%r15)
671	stmg	%r0,%r7,__PT_R0(%r11)
672	# clear user controlled registers to prevent speculative use
673	xgr	%r0,%r0
674	xgr	%r1,%r1
675	xgr	%r2,%r2
676	xgr	%r3,%r3
677	xgr	%r4,%r4
678	xgr	%r5,%r5
679	xgr	%r6,%r6
680	xgr	%r7,%r7
681	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
682	stmg	%r8,%r9,__PT_PSW(%r11)
683	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
684	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
685	stg	%r13,__PT_FLAGS(%r11)
686	stg	%r10,__PT_ARGS(%r11)
687	tm	__LC_PGM_ILC+3,0x80	# check for per exception
688	jz	5f
689	tmhh	%r8,0x0001		# kernel per event ?
690	jz	.Lpgm_kprobe
691	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
692	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
693	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
694	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
6955:	REENABLE_IRQS
696	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
697	larl	%r1,pgm_check_table
698	llgh	%r10,__PT_INT_CODE+2(%r11)
699	nill	%r10,0x007f
700	sll	%r10,2
701	je	.Lpgm_return
702	lgf	%r9,0(%r10,%r1)		# load address of handler routine
703	lgr	%r2,%r11		# pass pointer to pt_regs
704	BASR_R14_R9			# branch to interrupt-handler
705.Lpgm_return:
706	LOCKDEP_SYS_EXIT
707	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
708	jno	.Lsysc_restore
709	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
710	jo	.Lsysc_do_syscall
711	j	.Lsysc_tif
712
713#
714# PER event in supervisor state, must be kprobes
715#
716.Lpgm_kprobe:
717	REENABLE_IRQS
718	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
719	lgr	%r2,%r11		# pass pointer to pt_regs
720	brasl	%r14,do_per_trap
721	j	.Lpgm_return
722
723#
724# single stepped system call
725#
726.Lpgm_svcper:
727	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
728	lghi	%r13,__TASK_thread
729	larl	%r14,.Lsysc_per
730	stg	%r14,__LC_RETURN_PSW+8
731	lghi	%r14,_PIF_SYSCALL | _PIF_PER_TRAP
732	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per and enable irqs
733
734/*
735 * IO interrupt handler routine
736 */
737ENTRY(io_int_handler)
738	STCK	__LC_INT_CLOCK
739	stpt	__LC_ASYNC_ENTER_TIMER
740	BPOFF
741	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
742	lg	%r12,__LC_CURRENT
743	larl	%r13,cleanup_critical
744	lmg	%r8,%r9,__LC_IO_OLD_PSW
745	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
746	stmg	%r0,%r7,__PT_R0(%r11)
747	# clear user controlled registers to prevent speculative use
748	xgr	%r0,%r0
749	xgr	%r1,%r1
750	xgr	%r2,%r2
751	xgr	%r3,%r3
752	xgr	%r4,%r4
753	xgr	%r5,%r5
754	xgr	%r6,%r6
755	xgr	%r7,%r7
756	xgr	%r10,%r10
757	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
758	stmg	%r8,%r9,__PT_PSW(%r11)
759	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
760	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
761	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
762	jo	.Lio_restore
763	TRACE_IRQS_OFF
764	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
765.Lio_loop:
766	lgr	%r2,%r11		# pass pointer to pt_regs
767	lghi	%r3,IO_INTERRUPT
768	tm	__PT_INT_CODE+8(%r11),0x80	# adapter interrupt ?
769	jz	.Lio_call
770	lghi	%r3,THIN_INTERRUPT
771.Lio_call:
772	brasl	%r14,do_IRQ
773	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
774	jz	.Lio_return
775	tpi	0
776	jz	.Lio_return
777	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
778	j	.Lio_loop
779.Lio_return:
780	LOCKDEP_SYS_EXIT
781	TRACE_IRQS_ON
782.Lio_tif:
783	TSTMSK	__TI_flags(%r12),_TIF_WORK
784	jnz	.Lio_work		# there is work to do (signals etc.)
785	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
786	jnz	.Lio_work
787.Lio_restore:
788	lg	%r14,__LC_VDSO_PER_CPU
789	lmg	%r0,%r10,__PT_R0(%r11)
790	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
791	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
792	jno	.Lio_exit_kernel
793	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
794.Lio_exit_timer:
795	stpt	__LC_EXIT_TIMER
796	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
797.Lio_exit_kernel:
798	lmg	%r11,%r15,__PT_R11(%r11)
799	lpswe	__LC_RETURN_PSW
800.Lio_done:
801
802#
803# There is work todo, find out in which context we have been interrupted:
804# 1) if we return to user space we can do all _TIF_WORK work
805# 2) if we return to kernel code and kvm is enabled check if we need to
806#    modify the psw to leave SIE
807# 3) if we return to kernel code and preemptive scheduling is enabled check
808#    the preemption counter and if it is zero call preempt_schedule_irq
809# Before any work can be done, a switch to the kernel stack is required.
810#
811.Lio_work:
812	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
813	jo	.Lio_work_user		# yes -> do resched & signal
814#ifdef CONFIG_PREEMPT
815	# check for preemptive scheduling
816	icm	%r0,15,__LC_PREEMPT_COUNT
817	jnz	.Lio_restore		# preemption is disabled
818	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
819	jno	.Lio_restore
820	# switch to kernel stack
821	lg	%r1,__PT_R15(%r11)
822	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
823	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
824	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
825	la	%r11,STACK_FRAME_OVERHEAD(%r1)
826	lgr	%r15,%r1
827	# TRACE_IRQS_ON already done at .Lio_return, call
828	# TRACE_IRQS_OFF to keep things symmetrical
829	TRACE_IRQS_OFF
830	brasl	%r14,preempt_schedule_irq
831	j	.Lio_return
832#else
833	j	.Lio_restore
834#endif
835
836#
837# Need to do work before returning to userspace, switch to kernel stack
838#
839.Lio_work_user:
840	lg	%r1,__LC_KERNEL_STACK
841	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
842	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
843	la	%r11,STACK_FRAME_OVERHEAD(%r1)
844	lgr	%r15,%r1
845
846#
847# One of the work bits is on. Find out which one.
848#
849.Lio_work_tif:
850	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
851	jo	.Lio_mcck_pending
852	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
853	jo	.Lio_reschedule
854#ifdef CONFIG_LIVEPATCH
855	TSTMSK	__TI_flags(%r12),_TIF_PATCH_PENDING
856	jo	.Lio_patch_pending
857#endif
858	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
859	jo	.Lio_sigpending
860	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
861	jo	.Lio_notify_resume
862	TSTMSK	__TI_flags(%r12),_TIF_GUARDED_STORAGE
863	jo	.Lio_guarded_storage
864	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
865	jo	.Lio_vxrs
866	TSTMSK	__LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
867	jnz	.Lio_asce
868	j	.Lio_return		# beware of critical section cleanup
869
870#
871# _CIF_MCCK_PENDING is set, call handler
872#
873.Lio_mcck_pending:
874	# TRACE_IRQS_ON already done at .Lio_return
875	brasl	%r14,s390_handle_mcck	# TIF bit will be cleared by handler
876	TRACE_IRQS_OFF
877	j	.Lio_return
878
879#
880# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
881#
882.Lio_asce:
883	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
884	lctlg	%c7,%c7,__LC_VDSO_ASCE		# load secondary asce
885	TSTMSK	__LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
886	jz	.Lio_return
887#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
888	tm	__LC_STFLE_FAC_LIST+3,0x10	# has MVCOS ?
889	jnz	.Lio_set_fs_fixup
890	ni	__LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
891	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
892	j	.Lio_return
893.Lio_set_fs_fixup:
894#endif
895	larl	%r14,.Lio_return
896	jg	set_fs_fixup
897
898#
899# CIF_FPU is set, restore floating-point controls and floating-point registers.
900#
901.Lio_vxrs:
902	larl	%r14,.Lio_return
903	jg	load_fpu_regs
904
905#
906# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
907#
908.Lio_guarded_storage:
909	# TRACE_IRQS_ON already done at .Lio_return
910	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
911	lgr	%r2,%r11		# pass pointer to pt_regs
912	brasl	%r14,gs_load_bc_cb
913	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
914	TRACE_IRQS_OFF
915	j	.Lio_return
916
917#
918# _TIF_NEED_RESCHED is set, call schedule
919#
920.Lio_reschedule:
921	# TRACE_IRQS_ON already done at .Lio_return
922	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
923	brasl	%r14,schedule		# call scheduler
924	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
925	TRACE_IRQS_OFF
926	j	.Lio_return
927
928#
929# _TIF_PATCH_PENDING is set, call klp_update_patch_state
930#
931#ifdef CONFIG_LIVEPATCH
932.Lio_patch_pending:
933	lg	%r2,__LC_CURRENT	# pass pointer to task struct
934	larl	%r14,.Lio_return
935	jg	klp_update_patch_state
936#endif
937
938#
939# _TIF_SIGPENDING or is set, call do_signal
940#
941.Lio_sigpending:
942	# TRACE_IRQS_ON already done at .Lio_return
943	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
944	lgr	%r2,%r11		# pass pointer to pt_regs
945	brasl	%r14,do_signal
946	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
947	TRACE_IRQS_OFF
948	j	.Lio_return
949
950#
951# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
952#
953.Lio_notify_resume:
954	# TRACE_IRQS_ON already done at .Lio_return
955	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
956	lgr	%r2,%r11		# pass pointer to pt_regs
957	brasl	%r14,do_notify_resume
958	ssm	__LC_PGM_NEW_PSW	# disable I/O and ext. interrupts
959	TRACE_IRQS_OFF
960	j	.Lio_return
961
962/*
963 * External interrupt handler routine
964 */
965ENTRY(ext_int_handler)
966	STCK	__LC_INT_CLOCK
967	stpt	__LC_ASYNC_ENTER_TIMER
968	BPOFF
969	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
970	lg	%r12,__LC_CURRENT
971	larl	%r13,cleanup_critical
972	lmg	%r8,%r9,__LC_EXT_OLD_PSW
973	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
974	stmg	%r0,%r7,__PT_R0(%r11)
975	# clear user controlled registers to prevent speculative use
976	xgr	%r0,%r0
977	xgr	%r1,%r1
978	xgr	%r2,%r2
979	xgr	%r3,%r3
980	xgr	%r4,%r4
981	xgr	%r5,%r5
982	xgr	%r6,%r6
983	xgr	%r7,%r7
984	xgr	%r10,%r10
985	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
986	stmg	%r8,%r9,__PT_PSW(%r11)
987	lghi	%r1,__LC_EXT_PARAMS2
988	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
989	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
990	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
991	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
992	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
993	jo	.Lio_restore
994	TRACE_IRQS_OFF
995	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
996	lgr	%r2,%r11		# pass pointer to pt_regs
997	lghi	%r3,EXT_INTERRUPT
998	brasl	%r14,do_IRQ
999	j	.Lio_return
1000
1001/*
1002 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
1003 */
1004ENTRY(psw_idle)
1005	stg	%r3,__SF_EMPTY(%r15)
1006	larl	%r1,.Lpsw_idle_lpsw+4
1007	stg	%r1,__SF_EMPTY+8(%r15)
1008#ifdef CONFIG_SMP
1009	larl	%r1,smp_cpu_mtid
1010	llgf	%r1,0(%r1)
1011	ltgr	%r1,%r1
1012	jz	.Lpsw_idle_stcctm
1013	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
1014.Lpsw_idle_stcctm:
1015#endif
1016	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
1017	BPON
1018	STCK	__CLOCK_IDLE_ENTER(%r2)
1019	stpt	__TIMER_IDLE_ENTER(%r2)
1020.Lpsw_idle_lpsw:
1021	lpswe	__SF_EMPTY(%r15)
1022	BR_R1USE_R14
1023.Lpsw_idle_end:
1024
1025/*
1026 * Store floating-point controls and floating-point or vector register
1027 * depending whether the vector facility is available.	A critical section
1028 * cleanup assures that the registers are stored even if interrupted for
1029 * some other work.  The CIF_FPU flag is set to trigger a lazy restore
1030 * of the register contents at return from io or a system call.
1031 */
1032ENTRY(save_fpu_regs)
1033	lg	%r2,__LC_CURRENT
1034	aghi	%r2,__TASK_thread
1035	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
1036	jo	.Lsave_fpu_regs_exit
1037	stfpc	__THREAD_FPU_fpc(%r2)
1038	lg	%r3,__THREAD_FPU_regs(%r2)
1039	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1040	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
1041	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
1042	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
1043	j	.Lsave_fpu_regs_done	  # -> set CIF_FPU flag
1044.Lsave_fpu_regs_fp:
1045	std	0,0(%r3)
1046	std	1,8(%r3)
1047	std	2,16(%r3)
1048	std	3,24(%r3)
1049	std	4,32(%r3)
1050	std	5,40(%r3)
1051	std	6,48(%r3)
1052	std	7,56(%r3)
1053	std	8,64(%r3)
1054	std	9,72(%r3)
1055	std	10,80(%r3)
1056	std	11,88(%r3)
1057	std	12,96(%r3)
1058	std	13,104(%r3)
1059	std	14,112(%r3)
1060	std	15,120(%r3)
1061.Lsave_fpu_regs_done:
1062	oi	__LC_CPU_FLAGS+7,_CIF_FPU
1063.Lsave_fpu_regs_exit:
1064	BR_R1USE_R14
1065.Lsave_fpu_regs_end:
1066EXPORT_SYMBOL(save_fpu_regs)
1067
1068/*
1069 * Load floating-point controls and floating-point or vector registers.
1070 * A critical section cleanup assures that the register contents are
1071 * loaded even if interrupted for some other work.
1072 *
1073 * There are special calling conventions to fit into sysc and io return work:
1074 *	%r15:	<kernel stack>
1075 * The function requires:
1076 *	%r4
1077 */
1078load_fpu_regs:
1079	lg	%r4,__LC_CURRENT
1080	aghi	%r4,__TASK_thread
1081	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
1082	jno	.Lload_fpu_regs_exit
1083	lfpc	__THREAD_FPU_fpc(%r4)
1084	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1085	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
1086	jz	.Lload_fpu_regs_fp		# -> no VX, load FP regs
1087	VLM	%v0,%v15,0,%r4
1088	VLM	%v16,%v31,256,%r4
1089	j	.Lload_fpu_regs_done
1090.Lload_fpu_regs_fp:
1091	ld	0,0(%r4)
1092	ld	1,8(%r4)
1093	ld	2,16(%r4)
1094	ld	3,24(%r4)
1095	ld	4,32(%r4)
1096	ld	5,40(%r4)
1097	ld	6,48(%r4)
1098	ld	7,56(%r4)
1099	ld	8,64(%r4)
1100	ld	9,72(%r4)
1101	ld	10,80(%r4)
1102	ld	11,88(%r4)
1103	ld	12,96(%r4)
1104	ld	13,104(%r4)
1105	ld	14,112(%r4)
1106	ld	15,120(%r4)
1107.Lload_fpu_regs_done:
1108	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
1109.Lload_fpu_regs_exit:
1110	BR_R1USE_R14
1111.Lload_fpu_regs_end:
1112
1113.L__critical_end:
1114
1115/*
1116 * Machine check handler routines
1117 */
1118ENTRY(mcck_int_handler)
1119	STCK	__LC_MCCK_CLOCK
1120	BPOFF
1121	la	%r1,4095		# validate r1
1122	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
1123	sckc	__LC_CLOCK_COMPARATOR			# validate comparator
1124	lam	%a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
1125	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
1126	lg	%r12,__LC_CURRENT
1127	larl	%r13,cleanup_critical
1128	lmg	%r8,%r9,__LC_MCK_OLD_PSW
1129	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
1130	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
1131	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
1132	jno	.Lmcck_panic		# control registers invalid -> panic
1133	la	%r14,4095
1134	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
1135	ptlb
1136	lg	%r11,__LC_MCESAD-4095(%r14) # extended machine check save area
1137	nill	%r11,0xfc00		# MCESA_ORIGIN_MASK
1138	TSTMSK	__LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
1139	jno	0f
1140	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_GS_VALID
1141	jno	0f
1142	.insn	 rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
11430:	l	%r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
1144	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_FC_VALID
1145	jo	0f
1146	sr	%r14,%r14
11470:	sfpc	%r14
1148	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1149	jo	0f
1150	lghi	%r14,__LC_FPREGS_SAVE_AREA
1151	ld	%f0,0(%r14)
1152	ld	%f1,8(%r14)
1153	ld	%f2,16(%r14)
1154	ld	%f3,24(%r14)
1155	ld	%f4,32(%r14)
1156	ld	%f5,40(%r14)
1157	ld	%f6,48(%r14)
1158	ld	%f7,56(%r14)
1159	ld	%f8,64(%r14)
1160	ld	%f9,72(%r14)
1161	ld	%f10,80(%r14)
1162	ld	%f11,88(%r14)
1163	ld	%f12,96(%r14)
1164	ld	%f13,104(%r14)
1165	ld	%f14,112(%r14)
1166	ld	%f15,120(%r14)
1167	j	1f
11680:	VLM	%v0,%v15,0,%r11
1169	VLM	%v16,%v31,256,%r11
11701:	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
1171	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
1172	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
1173	jo	3f
1174	la	%r14,__LC_SYNC_ENTER_TIMER
1175	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
1176	jl	0f
1177	la	%r14,__LC_ASYNC_ENTER_TIMER
11780:	clc	0(8,%r14),__LC_EXIT_TIMER
1179	jl	1f
1180	la	%r14,__LC_EXIT_TIMER
11811:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
1182	jl	2f
1183	la	%r14,__LC_LAST_UPDATE_TIMER
11842:	spt	0(%r14)
1185	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
11863:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
1187	jno	.Lmcck_panic
1188	tmhh	%r8,0x0001		# interrupting from user ?
1189	jnz	4f
1190	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
1191	jno	.Lmcck_panic
11924:	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
1193.Lmcck_skip:
1194	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
1195	stmg	%r0,%r7,__PT_R0(%r11)
1196	# clear user controlled registers to prevent speculative use
1197	xgr	%r0,%r0
1198	xgr	%r1,%r1
1199	xgr	%r2,%r2
1200	xgr	%r3,%r3
1201	xgr	%r4,%r4
1202	xgr	%r5,%r5
1203	xgr	%r6,%r6
1204	xgr	%r7,%r7
1205	xgr	%r10,%r10
1206	mvc	__PT_R8(64,%r11),0(%r14)
1207	stmg	%r8,%r9,__PT_PSW(%r11)
1208	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1209	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1210	lgr	%r2,%r11		# pass pointer to pt_regs
1211	brasl	%r14,s390_do_machine_check
1212	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
1213	jno	.Lmcck_return
1214	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
1215	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
1216	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
1217	la	%r11,STACK_FRAME_OVERHEAD(%r1)
1218	lgr	%r15,%r1
1219	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
1220	TSTMSK	__LC_CPU_FLAGS,_CIF_MCCK_PENDING
1221	jno	.Lmcck_return
1222	TRACE_IRQS_OFF
1223	brasl	%r14,s390_handle_mcck
1224	TRACE_IRQS_ON
1225.Lmcck_return:
1226	lg	%r14,__LC_VDSO_PER_CPU
1227	lmg	%r0,%r10,__PT_R0(%r11)
1228	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1229	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1230	jno	0f
1231	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
1232	stpt	__LC_EXIT_TIMER
1233	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
12340:	lmg	%r11,%r15,__PT_R11(%r11)
1235	lpswe	__LC_RETURN_MCCK_PSW
1236
1237.Lmcck_panic:
1238	lg	%r15,__LC_PANIC_STACK
1239	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1240	j	.Lmcck_skip
1241
1242#
1243# PSW restart interrupt handler
1244#
1245ENTRY(restart_int_handler)
1246	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
1247	stg	%r15,__LC_SAVE_AREA_RESTART
1248	lg	%r15,__LC_RESTART_STACK
1249	aghi	%r15,-__PT_SIZE			# create pt_regs on stack
1250	xc	0(__PT_SIZE,%r15),0(%r15)
1251	stmg	%r0,%r14,__PT_R0(%r15)
1252	mvc	__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
1253	mvc	__PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
1254	aghi	%r15,-STACK_FRAME_OVERHEAD	# create stack frame on stack
1255	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
1256	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
1257	lg	%r2,__LC_RESTART_DATA
1258	lg	%r3,__LC_RESTART_SOURCE
1259	ltgr	%r3,%r3				# test source cpu address
1260	jm	1f				# negative -> skip source stop
12610:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
1262	brc	10,0b				# wait for status stored
12631:	basr	%r14,%r1			# call function
1264	stap	__SF_EMPTY(%r15)		# store cpu address
1265	llgh	%r3,__SF_EMPTY(%r15)
12662:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
1267	brc	2,2b
12683:	j	3b
1269
1270	.section .kprobes.text, "ax"
1271
1272#ifdef CONFIG_CHECK_STACK
1273/*
1274 * The synchronous or the asynchronous stack overflowed. We are dead.
1275 * No need to properly save the registers, we are going to panic anyway.
1276 * Setup a pt_regs so that show_trace can provide a good call trace.
1277 */
1278stack_overflow:
1279	lg	%r15,__LC_PANIC_STACK	# change to panic stack
1280	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1281	stmg	%r0,%r7,__PT_R0(%r11)
1282	stmg	%r8,%r9,__PT_PSW(%r11)
1283	mvc	__PT_R8(64,%r11),0(%r14)
1284	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1285	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1286	lgr	%r2,%r11		# pass pointer to pt_regs
1287	jg	kernel_stack_overflow
1288#endif
1289
1290cleanup_critical:
1291#if IS_ENABLED(CONFIG_KVM)
1292	clg	%r9,BASED(.Lcleanup_table_sie)	# .Lsie_gmap
1293	jl	0f
1294	clg	%r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
1295	jl	.Lcleanup_sie
1296#endif
1297	clg	%r9,BASED(.Lcleanup_table)	# system_call
1298	jl	0f
1299	clg	%r9,BASED(.Lcleanup_table+8)	# .Lsysc_do_svc
1300	jl	.Lcleanup_system_call
1301	clg	%r9,BASED(.Lcleanup_table+16)	# .Lsysc_tif
1302	jl	0f
1303	clg	%r9,BASED(.Lcleanup_table+24)	# .Lsysc_restore
1304	jl	.Lcleanup_sysc_tif
1305	clg	%r9,BASED(.Lcleanup_table+32)	# .Lsysc_done
1306	jl	.Lcleanup_sysc_restore
1307	clg	%r9,BASED(.Lcleanup_table+40)	# .Lio_tif
1308	jl	0f
1309	clg	%r9,BASED(.Lcleanup_table+48)	# .Lio_restore
1310	jl	.Lcleanup_io_tif
1311	clg	%r9,BASED(.Lcleanup_table+56)	# .Lio_done
1312	jl	.Lcleanup_io_restore
1313	clg	%r9,BASED(.Lcleanup_table+64)	# psw_idle
1314	jl	0f
1315	clg	%r9,BASED(.Lcleanup_table+72)	# .Lpsw_idle_end
1316	jl	.Lcleanup_idle
1317	clg	%r9,BASED(.Lcleanup_table+80)	# save_fpu_regs
1318	jl	0f
1319	clg	%r9,BASED(.Lcleanup_table+88)	# .Lsave_fpu_regs_end
1320	jl	.Lcleanup_save_fpu_regs
1321	clg	%r9,BASED(.Lcleanup_table+96)	# load_fpu_regs
1322	jl	0f
1323	clg	%r9,BASED(.Lcleanup_table+104)	# .Lload_fpu_regs_end
1324	jl	.Lcleanup_load_fpu_regs
13250:	BR_R11USE_R14
1326
1327	.align	8
1328.Lcleanup_table:
1329	.quad	system_call
1330	.quad	.Lsysc_do_svc
1331	.quad	.Lsysc_tif
1332	.quad	.Lsysc_restore
1333	.quad	.Lsysc_done
1334	.quad	.Lio_tif
1335	.quad	.Lio_restore
1336	.quad	.Lio_done
1337	.quad	psw_idle
1338	.quad	.Lpsw_idle_end
1339	.quad	save_fpu_regs
1340	.quad	.Lsave_fpu_regs_end
1341	.quad	load_fpu_regs
1342	.quad	.Lload_fpu_regs_end
1343
1344#if IS_ENABLED(CONFIG_KVM)
1345.Lcleanup_table_sie:
1346	.quad	.Lsie_gmap
1347	.quad	.Lsie_done
1348
1349.Lcleanup_sie:
1350	cghi    %r11,__LC_SAVE_AREA_ASYNC 	#Is this in normal interrupt?
1351	je      1f
1352	slg     %r9,BASED(.Lsie_crit_mcck_start)
1353	clg     %r9,BASED(.Lsie_crit_mcck_length)
1354	jh      1f
1355	oi      __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
13561:	BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1357	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
1358	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
1359	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
1360	larl	%r9,sie_exit			# skip forward to sie_exit
1361	BR_R11USE_R14
1362#endif
1363
1364.Lcleanup_system_call:
1365	# check if stpt has been executed
1366	clg	%r9,BASED(.Lcleanup_system_call_insn)
1367	jh	0f
1368	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
1369	cghi	%r11,__LC_SAVE_AREA_ASYNC
1370	je	0f
1371	mvc	__LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
13720:	# check if stmg has been executed
1373	clg	%r9,BASED(.Lcleanup_system_call_insn+8)
1374	jh	0f
1375	mvc	__LC_SAVE_AREA_SYNC(64),0(%r11)
13760:	# check if base register setup + TIF bit load has been done
1377	clg	%r9,BASED(.Lcleanup_system_call_insn+16)
1378	jhe	0f
1379	# set up saved register r12 task struct pointer
1380	stg	%r12,32(%r11)
1381	# set up saved register r13 __TASK_thread offset
1382	mvc	40(8,%r11),BASED(.Lcleanup_system_call_const)
13830:	# check if the user time update has been done
1384	clg	%r9,BASED(.Lcleanup_system_call_insn+24)
1385	jh	0f
1386	lg	%r15,__LC_EXIT_TIMER
1387	slg	%r15,__LC_SYNC_ENTER_TIMER
1388	alg	%r15,__LC_USER_TIMER
1389	stg	%r15,__LC_USER_TIMER
13900:	# check if the system time update has been done
1391	clg	%r9,BASED(.Lcleanup_system_call_insn+32)
1392	jh	0f
1393	lg	%r15,__LC_LAST_UPDATE_TIMER
1394	slg	%r15,__LC_EXIT_TIMER
1395	alg	%r15,__LC_SYSTEM_TIMER
1396	stg	%r15,__LC_SYSTEM_TIMER
13970:	# update accounting time stamp
1398	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1399	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1400	# set up saved register r11
1401	lg	%r15,__LC_KERNEL_STACK
1402	la	%r9,STACK_FRAME_OVERHEAD(%r15)
1403	stg	%r9,24(%r11)		# r11 pt_regs pointer
1404	# fill pt_regs
1405	mvc	__PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
1406	stmg	%r0,%r7,__PT_R0(%r9)
1407	mvc	__PT_PSW(16,%r9),__LC_SVC_OLD_PSW
1408	mvc	__PT_INT_CODE(4,%r9),__LC_SVC_ILC
1409	xc	__PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
1410	mvi	__PT_FLAGS+7(%r9),_PIF_SYSCALL
1411	# setup saved register r15
1412	stg	%r15,56(%r11)		# r15 stack pointer
1413	# set new psw address and exit
1414	larl	%r9,.Lsysc_do_svc
1415	BR_R11USE_R14
1416.Lcleanup_system_call_insn:
1417	.quad	system_call
1418	.quad	.Lsysc_stmg
1419	.quad	.Lsysc_per
1420	.quad	.Lsysc_vtime+36
1421	.quad	.Lsysc_vtime+42
1422.Lcleanup_system_call_const:
1423	.quad	__TASK_thread
1424
1425.Lcleanup_sysc_tif:
1426	larl	%r9,.Lsysc_tif
1427	BR_R11USE_R14
1428
1429.Lcleanup_sysc_restore:
1430	# check if stpt has been executed
1431	clg	%r9,BASED(.Lcleanup_sysc_restore_insn)
1432	jh	0f
1433	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1434	cghi	%r11,__LC_SAVE_AREA_ASYNC
1435	je	0f
1436	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
14370:	clg	%r9,BASED(.Lcleanup_sysc_restore_insn+8)
1438	je	1f
1439	lg	%r9,24(%r11)		# get saved pointer to pt_regs
1440	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
1441	mvc	0(64,%r11),__PT_R8(%r9)
1442	lmg	%r0,%r7,__PT_R0(%r9)
14431:	lmg	%r8,%r9,__LC_RETURN_PSW
1444	BR_R11USE_R14
1445.Lcleanup_sysc_restore_insn:
1446	.quad	.Lsysc_exit_timer
1447	.quad	.Lsysc_done - 4
1448
1449.Lcleanup_io_tif:
1450	larl	%r9,.Lio_tif
1451	BR_R11USE_R14
1452
1453.Lcleanup_io_restore:
1454	# check if stpt has been executed
1455	clg	%r9,BASED(.Lcleanup_io_restore_insn)
1456	jh	0f
1457	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
14580:	clg	%r9,BASED(.Lcleanup_io_restore_insn+8)
1459	je	1f
1460	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
1461	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
1462	mvc	0(64,%r11),__PT_R8(%r9)
1463	lmg	%r0,%r7,__PT_R0(%r9)
14641:	lmg	%r8,%r9,__LC_RETURN_PSW
1465	BR_R11USE_R14
1466.Lcleanup_io_restore_insn:
1467	.quad	.Lio_exit_timer
1468	.quad	.Lio_done - 4
1469
1470.Lcleanup_idle:
1471	ni	__LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
1472	# copy interrupt clock & cpu timer
1473	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
1474	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
1475	cghi	%r11,__LC_SAVE_AREA_ASYNC
1476	je	0f
1477	mvc	__CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
1478	mvc	__TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
14790:	# check if stck & stpt have been executed
1480	clg	%r9,BASED(.Lcleanup_idle_insn)
1481	jhe	1f
1482	mvc	__CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
1483	mvc	__TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
14841:	# calculate idle cycles
1485#ifdef CONFIG_SMP
1486	clg	%r9,BASED(.Lcleanup_idle_insn)
1487	jl	3f
1488	larl	%r1,smp_cpu_mtid
1489	llgf	%r1,0(%r1)
1490	ltgr	%r1,%r1
1491	jz	3f
1492	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
1493	larl	%r3,mt_cycles
1494	ag	%r3,__LC_PERCPU_OFFSET
1495	la	%r4,__SF_EMPTY+16(%r15)
14962:	lg	%r0,0(%r3)
1497	slg	%r0,0(%r4)
1498	alg	%r0,64(%r4)
1499	stg	%r0,0(%r3)
1500	la	%r3,8(%r3)
1501	la	%r4,8(%r4)
1502	brct	%r1,2b
1503#endif
15043:	# account system time going idle
1505	lg	%r9,__LC_STEAL_TIMER
1506	alg	%r9,__CLOCK_IDLE_ENTER(%r2)
1507	slg	%r9,__LC_LAST_UPDATE_CLOCK
1508	stg	%r9,__LC_STEAL_TIMER
1509	mvc	__LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
1510	lg	%r9,__LC_SYSTEM_TIMER
1511	alg	%r9,__LC_LAST_UPDATE_TIMER
1512	slg	%r9,__TIMER_IDLE_ENTER(%r2)
1513	stg	%r9,__LC_SYSTEM_TIMER
1514	mvc	__LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
1515	# prepare return psw
1516	nihh	%r8,0xfcfd		# clear irq & wait state bits
1517	lg	%r9,48(%r11)		# return from psw_idle
1518	BR_R11USE_R14
1519.Lcleanup_idle_insn:
1520	.quad	.Lpsw_idle_lpsw
1521
1522.Lcleanup_save_fpu_regs:
1523	larl	%r9,save_fpu_regs
1524	BR_R11USE_R14
1525
1526.Lcleanup_load_fpu_regs:
1527	larl	%r9,load_fpu_regs
1528	BR_R11USE_R14
1529
1530/*
1531 * Integer constants
1532 */
1533	.align	8
1534.Lcritical_start:
1535	.quad	.L__critical_start
1536.Lcritical_length:
1537	.quad	.L__critical_end - .L__critical_start
1538#if IS_ENABLED(CONFIG_KVM)
1539.Lsie_critical_start:
1540	.quad	.Lsie_gmap
1541.Lsie_critical_length:
1542	.quad	.Lsie_done - .Lsie_gmap
1543.Lsie_crit_mcck_start:
1544	.quad   .Lsie_entry
1545.Lsie_crit_mcck_length:
1546	.quad   .Lsie_skip - .Lsie_entry
1547#endif
1548	.section .rodata, "a"
1549#define SYSCALL(esame,emu)	.long esame
1550	.globl	sys_call_table
1551sys_call_table:
1552#include "asm/syscall_table.h"
1553#undef SYSCALL
1554
1555#ifdef CONFIG_COMPAT
1556
1557#define SYSCALL(esame,emu)	.long emu
1558	.globl	sys_call_table_emu
1559sys_call_table_emu:
1560#include "asm/syscall_table.h"
1561#undef SYSCALL
1562#endif
1563