xref: /openbmc/linux/arch/s390/kernel/entry.S (revision f5ad1c74)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
10 */
11
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/ctl_reg.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/vx-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/export.h>
31#include <asm/nospec-insn.h>
32
33__PT_R0      =	__PT_GPRS
34__PT_R1      =	__PT_GPRS + 8
35__PT_R2      =	__PT_GPRS + 16
36__PT_R3      =	__PT_GPRS + 24
37__PT_R4      =	__PT_GPRS + 32
38__PT_R5      =	__PT_GPRS + 40
39__PT_R6      =	__PT_GPRS + 48
40__PT_R7      =	__PT_GPRS + 56
41__PT_R8      =	__PT_GPRS + 64
42__PT_R9      =	__PT_GPRS + 72
43__PT_R10     =	__PT_GPRS + 80
44__PT_R11     =	__PT_GPRS + 88
45__PT_R12     =	__PT_GPRS + 96
46__PT_R13     =	__PT_GPRS + 104
47__PT_R14     =	__PT_GPRS + 112
48__PT_R15     =	__PT_GPRS + 120
49
50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
51STACK_SIZE  = 1 << STACK_SHIFT
52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
53
54_TIF_WORK	= (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
55		   _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
56_TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
57		   _TIF_SYSCALL_TRACEPOINT)
58_CIF_WORK	= (_CIF_FPU)
59_PIF_WORK	= (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
60
61_LPP_OFFSET	= __LC_LPP
62
63	.macro	TRACE_IRQS_ON
64#ifdef CONFIG_TRACE_IRQFLAGS
65	basr	%r2,%r0
66	brasl	%r14,trace_hardirqs_on_caller
67#endif
68	.endm
69
70	.macro	TRACE_IRQS_OFF
71#ifdef CONFIG_TRACE_IRQFLAGS
72	basr	%r2,%r0
73	brasl	%r14,trace_hardirqs_off_caller
74#endif
75	.endm
76
77	.macro	LOCKDEP_SYS_EXIT
78#ifdef CONFIG_LOCKDEP
79	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
80	jz	.+10
81	brasl	%r14,lockdep_sys_exit
82#endif
83	.endm
84
85	.macro	CHECK_STACK savearea
86#ifdef CONFIG_CHECK_STACK
87	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
88	lghi	%r14,\savearea
89	jz	stack_overflow
90#endif
91	.endm
92
93	.macro	DEBUG_USER_ASCE
94#ifdef CONFIG_DEBUG_USER_ASCE
95	brasl	%r14,debug_user_asce
96#endif
97	.endm
98
99	.macro	CHECK_VMAP_STACK savearea,oklabel
100#ifdef CONFIG_VMAP_STACK
101	lgr	%r14,%r15
102	nill	%r14,0x10000 - STACK_SIZE
103	oill	%r14,STACK_INIT
104	clg	%r14,__LC_KERNEL_STACK
105	je	\oklabel
106	clg	%r14,__LC_ASYNC_STACK
107	je	\oklabel
108	clg	%r14,__LC_NODAT_STACK
109	je	\oklabel
110	clg	%r14,__LC_RESTART_STACK
111	je	\oklabel
112	lghi	%r14,\savearea
113	j	stack_overflow
114#else
115	j	\oklabel
116#endif
117	.endm
118
119	.macro	SWITCH_ASYNC savearea,timer,clock
120	tmhh	%r8,0x0001		# interrupting from user ?
121	jnz	4f
122#if IS_ENABLED(CONFIG_KVM)
123	lgr	%r14,%r9
124	larl	%r13,.Lsie_gmap
125	slgr	%r14,%r13
126	lghi	%r13,.Lsie_done - .Lsie_gmap
127	clgr	%r14,%r13
128	jhe	0f
129	lghi	%r11,\savearea		# inside critical section, do cleanup
130	brasl	%r14,.Lcleanup_sie
131#endif
1320:	larl	%r13,.Lpsw_idle_exit
133	cgr	%r13,%r9
134	jne	3f
135
136	larl	%r1,smp_cpu_mtid
137	llgf	%r1,0(%r1)
138	ltgr	%r1,%r1
139	jz	2f			# no SMT, skip mt_cycles calculation
140	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
141	larl	%r3,mt_cycles
142	ag	%r3,__LC_PERCPU_OFFSET
143	la	%r4,__SF_EMPTY+16(%r15)
1441:	lg	%r0,0(%r3)
145	slg	%r0,0(%r4)
146	alg	%r0,64(%r4)
147	stg	%r0,0(%r3)
148	la	%r3,8(%r3)
149	la	%r4,8(%r4)
150	brct	%r1,1b
151
1522:	mvc	__CLOCK_IDLE_EXIT(8,%r2), \clock
153	mvc	__TIMER_IDLE_EXIT(8,%r2), \timer
154	# account system time going idle
155	ni	__LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
156
157	lg	%r13,__LC_STEAL_TIMER
158	alg	%r13,__CLOCK_IDLE_ENTER(%r2)
159	slg	%r13,__LC_LAST_UPDATE_CLOCK
160	stg	%r13,__LC_STEAL_TIMER
161
162	mvc	__LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
163
164	lg	%r13,__LC_SYSTEM_TIMER
165	alg	%r13,__LC_LAST_UPDATE_TIMER
166	slg	%r13,__TIMER_IDLE_ENTER(%r2)
167	stg	%r13,__LC_SYSTEM_TIMER
168	mvc	__LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
169
170	nihh	%r8,0xfcfd		# clear wait state and irq bits
1713:	lg	%r14,__LC_ASYNC_STACK	# are we already on the target stack?
172	slgr	%r14,%r15
173	srag	%r14,%r14,STACK_SHIFT
174	jnz	5f
175	CHECK_STACK \savearea
176	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
177	j	6f
1784:	UPDATE_VTIME %r14,%r15,\timer
179	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1805:	lg	%r15,__LC_ASYNC_STACK	# load async stack
1816:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
182	.endm
183
184	.macro UPDATE_VTIME w1,w2,enter_timer
185	lg	\w1,__LC_EXIT_TIMER
186	lg	\w2,__LC_LAST_UPDATE_TIMER
187	slg	\w1,\enter_timer
188	slg	\w2,__LC_EXIT_TIMER
189	alg	\w1,__LC_USER_TIMER
190	alg	\w2,__LC_SYSTEM_TIMER
191	stg	\w1,__LC_USER_TIMER
192	stg	\w2,__LC_SYSTEM_TIMER
193	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
194	.endm
195
196	.macro RESTORE_SM_CLEAR_PER
197	stg	%r8,__LC_RETURN_PSW
198	ni	__LC_RETURN_PSW,0xbf
199	ssm	__LC_RETURN_PSW
200	.endm
201
202	.macro ENABLE_INTS
203	stosm	__SF_EMPTY(%r15),3
204	.endm
205
206	.macro ENABLE_INTS_TRACE
207	TRACE_IRQS_ON
208	ENABLE_INTS
209	.endm
210
211	.macro DISABLE_INTS
212	stnsm	__SF_EMPTY(%r15),0xfc
213	.endm
214
215	.macro DISABLE_INTS_TRACE
216	DISABLE_INTS
217	TRACE_IRQS_OFF
218	.endm
219
220	.macro STCK savearea
221#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
222	.insn	s,0xb27c0000,\savearea		# store clock fast
223#else
224	.insn	s,0xb2050000,\savearea		# store clock
225#endif
226	.endm
227
228	/*
229	 * The TSTMSK macro generates a test-under-mask instruction by
230	 * calculating the memory offset for the specified mask value.
231	 * Mask value can be any constant.  The macro shifts the mask
232	 * value to calculate the memory offset for the test-under-mask
233	 * instruction.
234	 */
235	.macro TSTMSK addr, mask, size=8, bytepos=0
236		.if (\bytepos < \size) && (\mask >> 8)
237			.if (\mask & 0xff)
238				.error "Mask exceeds byte boundary"
239			.endif
240			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
241			.exitm
242		.endif
243		.ifeq \mask
244			.error "Mask must not be zero"
245		.endif
246		off = \size - \bytepos - 1
247		tm	off+\addr, \mask
248	.endm
249
250	.macro BPOFF
251	ALTERNATIVE "", ".long 0xb2e8c000", 82
252	.endm
253
254	.macro BPON
255	ALTERNATIVE "", ".long 0xb2e8d000", 82
256	.endm
257
258	.macro BPENTER tif_ptr,tif_mask
259	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
260		    "", 82
261	.endm
262
263	.macro BPEXIT tif_ptr,tif_mask
264	TSTMSK	\tif_ptr,\tif_mask
265	ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
266		    "jnz .+8; .long 0xb2e8d000", 82
267	.endm
268
269	GEN_BR_THUNK %r9
270	GEN_BR_THUNK %r14
271	GEN_BR_THUNK %r14,%r11
272
273	.section .kprobes.text, "ax"
274.Ldummy:
275	/*
276	 * This nop exists only in order to avoid that __switch_to starts at
277	 * the beginning of the kprobes text section. In that case we would
278	 * have several symbols at the same address. E.g. objdump would take
279	 * an arbitrary symbol name when disassembling this code.
280	 * With the added nop in between the __switch_to symbol is unique
281	 * again.
282	 */
283	nop	0
284
285ENTRY(__bpon)
286	.globl __bpon
287	BPON
288	BR_EX	%r14
289ENDPROC(__bpon)
290
291/*
292 * Scheduler resume function, called by switch_to
293 *  gpr2 = (task_struct *) prev
294 *  gpr3 = (task_struct *) next
295 * Returns:
296 *  gpr2 = prev
297 */
298ENTRY(__switch_to)
299	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
300	lghi	%r4,__TASK_stack
301	lghi	%r1,__TASK_thread
302	llill	%r5,STACK_INIT
303	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
304	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
305	agr	%r15,%r5			# end of kernel stack of next
306	stg	%r3,__LC_CURRENT		# store task struct of next
307	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
308	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
309	aghi	%r3,__TASK_pid
310	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
311	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
312	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
313	BR_EX	%r14
314ENDPROC(__switch_to)
315
316#if IS_ENABLED(CONFIG_KVM)
317/*
318 * sie64a calling convention:
319 * %r2 pointer to sie control block
320 * %r3 guest register save area
321 */
322ENTRY(sie64a)
323	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
324	lg	%r12,__LC_CURRENT
325	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
326	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
327	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
328	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
329	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU		# load guest fp/vx registers ?
330	jno	.Lsie_load_guest_gprs
331	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
332.Lsie_load_guest_gprs:
333	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
334	lg	%r14,__LC_GMAP			# get gmap pointer
335	ltgr	%r14,%r14
336	jz	.Lsie_gmap
337	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
338.Lsie_gmap:
339	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
340	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
341	tm	__SIE_PROG20+3(%r14),3		# last exit...
342	jnz	.Lsie_skip
343	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
344	jo	.Lsie_skip			# exit if fp/vx regs changed
345	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
346.Lsie_entry:
347	sie	0(%r14)
348	BPOFF
349	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
350.Lsie_skip:
351	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
352	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
353.Lsie_done:
354# some program checks are suppressing. C code (e.g. do_protection_exception)
355# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
356# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
357# Other instructions between sie64a and .Lsie_done should not cause program
358# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
359# See also .Lcleanup_sie
360.Lrewind_pad6:
361	nopr	7
362.Lrewind_pad4:
363	nopr	7
364.Lrewind_pad2:
365	nopr	7
366	.globl sie_exit
367sie_exit:
368	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
369	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
370	xgr	%r0,%r0				# clear guest registers to
371	xgr	%r1,%r1				# prevent speculative use
372	xgr	%r2,%r2
373	xgr	%r3,%r3
374	xgr	%r4,%r4
375	xgr	%r5,%r5
376	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
377	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
378	BR_EX	%r14
379.Lsie_fault:
380	lghi	%r14,-EFAULT
381	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
382	j	sie_exit
383
384	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
385	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
386	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
387	EX_TABLE(sie_exit,.Lsie_fault)
388ENDPROC(sie64a)
389EXPORT_SYMBOL(sie64a)
390EXPORT_SYMBOL(sie_exit)
391#endif
392
393/*
394 * SVC interrupt handler routine. System calls are synchronous events and
395 * are entered with interrupts disabled.
396 */
397
398ENTRY(system_call)
399	stpt	__LC_SYNC_ENTER_TIMER
400	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
401	BPOFF
402	lg	%r12,__LC_CURRENT
403	lghi	%r14,_PIF_SYSCALL
404.Lsysc_per:
405	lctlg	%c1,%c1,__LC_KERNEL_ASCE
406	lghi	%r13,__TASK_thread
407	lg	%r15,__LC_KERNEL_STACK
408	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
409	UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
410	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
411	stmg	%r0,%r7,__PT_R0(%r11)
412	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
413	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
414	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
415	stg	%r14,__PT_FLAGS(%r11)
416	ENABLE_INTS
417.Lsysc_do_svc:
418	# clear user controlled register to prevent speculative use
419	xgr	%r0,%r0
420	# load address of system call table
421	lg	%r10,__THREAD_sysc_table(%r13,%r12)
422	llgh	%r8,__PT_INT_CODE+2(%r11)
423	slag	%r8,%r8,3			# shift and test for svc 0
424	jnz	.Lsysc_nr_ok
425	# svc 0: system call number in %r1
426	llgfr	%r1,%r1				# clear high word in r1
427	sth	%r1,__PT_INT_CODE+2(%r11)
428	cghi	%r1,NR_syscalls
429	jnl	.Lsysc_nr_ok
430	slag	%r8,%r1,3
431.Lsysc_nr_ok:
432	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
433	stg	%r2,__PT_ORIG_GPR2(%r11)
434	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
435	lg	%r9,0(%r8,%r10)			# get system call add.
436	TSTMSK	__TI_flags(%r12),_TIF_TRACE
437	jnz	.Lsysc_tracesys
438	BASR_EX	%r14,%r9			# call sys_xxxx
439	stg	%r2,__PT_R2(%r11)		# store return value
440
441.Lsysc_return:
442#ifdef CONFIG_DEBUG_RSEQ
443	lgr	%r2,%r11
444	brasl	%r14,rseq_syscall
445#endif
446	LOCKDEP_SYS_EXIT
447.Lsysc_tif:
448	DISABLE_INTS
449	TSTMSK	__PT_FLAGS(%r11),_PIF_WORK
450	jnz	.Lsysc_work
451	TSTMSK	__TI_flags(%r12),_TIF_WORK
452	jnz	.Lsysc_work			# check for work
453	DEBUG_USER_ASCE
454	lctlg	%c1,%c1,__LC_USER_ASCE
455	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
456	TSTMSK	__LC_CPU_FLAGS, _CIF_FPU
457	jz	.Lsysc_skip_fpu
458	brasl	%r14,load_fpu_regs
459.Lsysc_skip_fpu:
460	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
461	stpt	__LC_EXIT_TIMER
462	lmg	%r0,%r15,__PT_R0(%r11)
463	b	__LC_RETURN_LPSWE
464
465#
466# One of the work bits is on. Find out which one.
467#
468.Lsysc_work:
469	ENABLE_INTS
470	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
471	jo	.Lsysc_reschedule
472	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
473	jo	.Lsysc_syscall_restart
474#ifdef CONFIG_UPROBES
475	TSTMSK	__TI_flags(%r12),_TIF_UPROBE
476	jo	.Lsysc_uprobe_notify
477#endif
478	TSTMSK	__TI_flags(%r12),_TIF_GUARDED_STORAGE
479	jo	.Lsysc_guarded_storage
480	TSTMSK	__PT_FLAGS(%r11),_PIF_PER_TRAP
481	jo	.Lsysc_singlestep
482#ifdef CONFIG_LIVEPATCH
483	TSTMSK	__TI_flags(%r12),_TIF_PATCH_PENDING
484	jo	.Lsysc_patch_pending	# handle live patching just before
485					# signals and possible syscall restart
486#endif
487	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
488	jo	.Lsysc_syscall_restart
489	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
490	jo	.Lsysc_sigpending
491	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
492	jo	.Lsysc_notify_resume
493	j	.Lsysc_return
494
495#
496# _TIF_NEED_RESCHED is set, call schedule
497#
498.Lsysc_reschedule:
499	larl	%r14,.Lsysc_return
500	jg	schedule
501
502#
503# _TIF_SIGPENDING is set, call do_signal
504#
505.Lsysc_sigpending:
506	lgr	%r2,%r11		# pass pointer to pt_regs
507	brasl	%r14,do_signal
508	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
509	jno	.Lsysc_return
510.Lsysc_do_syscall:
511	lghi	%r13,__TASK_thread
512	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
513	lghi	%r1,0			# svc 0 returns -ENOSYS
514	j	.Lsysc_do_svc
515
516#
517# _TIF_NOTIFY_RESUME is set, call do_notify_resume
518#
519.Lsysc_notify_resume:
520	lgr	%r2,%r11		# pass pointer to pt_regs
521	larl	%r14,.Lsysc_return
522	jg	do_notify_resume
523
524#
525# _TIF_UPROBE is set, call uprobe_notify_resume
526#
527#ifdef CONFIG_UPROBES
528.Lsysc_uprobe_notify:
529	lgr	%r2,%r11		# pass pointer to pt_regs
530	larl	%r14,.Lsysc_return
531	jg	uprobe_notify_resume
532#endif
533
534#
535# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
536#
537.Lsysc_guarded_storage:
538	lgr	%r2,%r11		# pass pointer to pt_regs
539	larl	%r14,.Lsysc_return
540	jg	gs_load_bc_cb
541#
542# _TIF_PATCH_PENDING is set, call klp_update_patch_state
543#
544#ifdef CONFIG_LIVEPATCH
545.Lsysc_patch_pending:
546	lg	%r2,__LC_CURRENT	# pass pointer to task struct
547	larl	%r14,.Lsysc_return
548	jg	klp_update_patch_state
549#endif
550
551#
552# _PIF_PER_TRAP is set, call do_per_trap
553#
554.Lsysc_singlestep:
555	ni	__PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
556	lgr	%r2,%r11		# pass pointer to pt_regs
557	larl	%r14,.Lsysc_return
558	jg	do_per_trap
559
560#
561# _PIF_SYSCALL_RESTART is set, repeat the current system call
562#
563.Lsysc_syscall_restart:
564	ni	__PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART
565	lmg	%r1,%r7,__PT_R1(%r11)	# load svc arguments
566	lg	%r2,__PT_ORIG_GPR2(%r11)
567	j	.Lsysc_do_svc
568
569#
570# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
571# and after the system call
572#
573.Lsysc_tracesys:
574	lgr	%r2,%r11		# pass pointer to pt_regs
575	la	%r3,0
576	llgh	%r0,__PT_INT_CODE+2(%r11)
577	stg	%r0,__PT_R2(%r11)
578	brasl	%r14,do_syscall_trace_enter
579	lghi	%r0,NR_syscalls
580	clgr	%r0,%r2
581	jnh	.Lsysc_tracenogo
582	sllg	%r8,%r2,3
583	lg	%r9,0(%r8,%r10)
584	lmg	%r3,%r7,__PT_R3(%r11)
585	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
586	lg	%r2,__PT_ORIG_GPR2(%r11)
587	BASR_EX	%r14,%r9		# call sys_xxx
588	stg	%r2,__PT_R2(%r11)	# store return value
589.Lsysc_tracenogo:
590	TSTMSK	__TI_flags(%r12),_TIF_TRACE
591	jz	.Lsysc_return
592	lgr	%r2,%r11		# pass pointer to pt_regs
593	larl	%r14,.Lsysc_return
594	jg	do_syscall_trace_exit
595ENDPROC(system_call)
596
597#
598# a new process exits the kernel with ret_from_fork
599#
600ENTRY(ret_from_fork)
601	la	%r11,STACK_FRAME_OVERHEAD(%r15)
602	lg	%r12,__LC_CURRENT
603	brasl	%r14,schedule_tail
604	tm	__PT_PSW+1(%r11),0x01	# forking a kernel thread ?
605	jne	.Lsysc_tracenogo
606	# it's a kernel thread
607	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
608	la	%r2,0(%r10)
609	BASR_EX	%r14,%r9
610	j	.Lsysc_tracenogo
611ENDPROC(ret_from_fork)
612
613ENTRY(kernel_thread_starter)
614	la	%r2,0(%r10)
615	BASR_EX	%r14,%r9
616	j	.Lsysc_tracenogo
617ENDPROC(kernel_thread_starter)
618
619/*
620 * Program check handler routine
621 */
622
623ENTRY(pgm_check_handler)
624	stpt	__LC_SYNC_ENTER_TIMER
625	BPOFF
626	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
627	lg	%r10,__LC_LAST_BREAK
628	srag	%r11,%r10,12
629	jnz	0f
630	/* if __LC_LAST_BREAK is < 4096, it contains one of
631	 * the lpswe addresses in lowcore. Set it to 1 (initial state)
632	 * to prevent leaking that address to userspace.
633	 */
634	lghi	%r10,1
6350:	lg	%r12,__LC_CURRENT
636	lghi	%r11,0
637	lmg	%r8,%r9,__LC_PGM_OLD_PSW
638	tmhh	%r8,0x0001		# coming from user space?
639	jno	.Lpgm_skip_asce
640	lctlg	%c1,%c1,__LC_KERNEL_ASCE
641	j	3f
642.Lpgm_skip_asce:
643#if IS_ENABLED(CONFIG_KVM)
644	# cleanup critical section for program checks in sie64a
645	lgr	%r14,%r9
646	larl	%r13,.Lsie_gmap
647	slgr	%r14,%r13
648	lghi	%r13,.Lsie_done - .Lsie_gmap
649	clgr	%r14,%r13
650	jhe	1f
651	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
652	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
653	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
654	larl	%r9,sie_exit			# skip forward to sie_exit
655	lghi	%r11,_PIF_GUEST_FAULT
656#endif
6571:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
658	jnz	2f			# -> enabled, can't be a double fault
659	tm	__LC_PGM_ILC+3,0x80	# check for per exception
660	jnz	.Lpgm_svcper		# -> single stepped svc
6612:	CHECK_STACK __LC_SAVE_AREA_SYNC
662	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
663	# CHECK_VMAP_STACK branches to stack_overflow or 5f
664	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
6653:	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
666	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
667	lg	%r15,__LC_KERNEL_STACK
668	lgr	%r14,%r12
669	aghi	%r14,__TASK_thread	# pointer to thread_struct
670	lghi	%r13,__LC_PGM_TDB
671	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
672	jz	4f
673	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
6744:	stg	%r10,__THREAD_last_break(%r14)
6755:	lgr	%r13,%r11
676	la	%r11,STACK_FRAME_OVERHEAD(%r15)
677	stmg	%r0,%r7,__PT_R0(%r11)
678	# clear user controlled registers to prevent speculative use
679	xgr	%r0,%r0
680	xgr	%r1,%r1
681	xgr	%r2,%r2
682	xgr	%r3,%r3
683	xgr	%r4,%r4
684	xgr	%r5,%r5
685	xgr	%r6,%r6
686	xgr	%r7,%r7
687	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
688	stmg	%r8,%r9,__PT_PSW(%r11)
689	mvc	__PT_INT_CODE(4,%r11),__LC_PGM_ILC
690	mvc	__PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
691	stg	%r13,__PT_FLAGS(%r11)
692	stg	%r10,__PT_ARGS(%r11)
693	tm	__LC_PGM_ILC+3,0x80	# check for per exception
694	jz	6f
695	tmhh	%r8,0x0001		# kernel per event ?
696	jz	.Lpgm_kprobe
697	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
698	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
699	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
700	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
7016:	RESTORE_SM_CLEAR_PER
702	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
703	larl	%r1,pgm_check_table
704	llgh	%r10,__PT_INT_CODE+2(%r11)
705	nill	%r10,0x007f
706	sll	%r10,3
707	je	.Lpgm_return
708	lg	%r9,0(%r10,%r1)		# load address of handler routine
709	lgr	%r2,%r11		# pass pointer to pt_regs
710	BASR_EX	%r14,%r9		# branch to interrupt-handler
711.Lpgm_return:
712	LOCKDEP_SYS_EXIT
713	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
714	jno	.Lpgm_restore
715	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
716	jo	.Lsysc_do_syscall
717	j	.Lsysc_tif
718.Lpgm_restore:
719	DISABLE_INTS
720	TSTMSK	__LC_CPU_FLAGS, _CIF_FPU
721	jz	.Lpgm_skip_fpu
722	brasl	%r14,load_fpu_regs
723.Lpgm_skip_fpu:
724	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
725	stpt	__LC_EXIT_TIMER
726	lmg	%r0,%r15,__PT_R0(%r11)
727	b	__LC_RETURN_LPSWE
728
729#
730# PER event in supervisor state, must be kprobes
731#
732.Lpgm_kprobe:
733	RESTORE_SM_CLEAR_PER
734	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
735	lgr	%r2,%r11		# pass pointer to pt_regs
736	brasl	%r14,do_per_trap
737	j	.Lpgm_return
738
739#
740# single stepped system call
741#
742.Lpgm_svcper:
743	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
744	larl	%r14,.Lsysc_per
745	stg	%r14,__LC_RETURN_PSW+8
746	lghi	%r14,_PIF_SYSCALL | _PIF_PER_TRAP
747	lpswe	__LC_RETURN_PSW		# branch to .Lsysc_per
748ENDPROC(pgm_check_handler)
749
750/*
751 * IO interrupt handler routine
752 */
753ENTRY(io_int_handler)
754	STCK	__LC_INT_CLOCK
755	stpt	__LC_ASYNC_ENTER_TIMER
756	BPOFF
757	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
758	lg	%r12,__LC_CURRENT
759	lmg	%r8,%r9,__LC_IO_OLD_PSW
760	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
761	stmg	%r0,%r7,__PT_R0(%r11)
762	# clear user controlled registers to prevent speculative use
763	xgr	%r0,%r0
764	xgr	%r1,%r1
765	xgr	%r2,%r2
766	xgr	%r3,%r3
767	xgr	%r4,%r4
768	xgr	%r5,%r5
769	xgr	%r6,%r6
770	xgr	%r7,%r7
771	xgr	%r10,%r10
772	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
773	stmg	%r8,%r9,__PT_PSW(%r11)
774	tm	__PT_PSW+1(%r11),0x01	# coming from user space?
775	jno	.Lio_skip_asce
776	lctlg	%c1,%c1,__LC_KERNEL_ASCE
777.Lio_skip_asce:
778	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
779	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
780	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
781	jo	.Lio_restore
782	TRACE_IRQS_OFF
783	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
784.Lio_loop:
785	lgr	%r2,%r11		# pass pointer to pt_regs
786	lghi	%r3,IO_INTERRUPT
787	tm	__PT_INT_CODE+8(%r11),0x80	# adapter interrupt ?
788	jz	.Lio_call
789	lghi	%r3,THIN_INTERRUPT
790.Lio_call:
791	brasl	%r14,do_IRQ
792	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
793	jz	.Lio_return
794	tpi	0
795	jz	.Lio_return
796	mvc	__PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
797	j	.Lio_loop
798.Lio_return:
799	LOCKDEP_SYS_EXIT
800	TSTMSK	__TI_flags(%r12),_TIF_WORK
801	jnz	.Lio_work		# there is work to do (signals etc.)
802	TSTMSK	__LC_CPU_FLAGS,_CIF_WORK
803	jnz	.Lio_work
804.Lio_restore:
805	TRACE_IRQS_ON
806	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
807	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
808	jno	.Lio_exit_kernel
809	DEBUG_USER_ASCE
810	lctlg	%c1,%c1,__LC_USER_ASCE
811	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
812	stpt	__LC_EXIT_TIMER
813.Lio_exit_kernel:
814	lmg	%r0,%r15,__PT_R0(%r11)
815	b	__LC_RETURN_LPSWE
816.Lio_done:
817
818#
819# There is work todo, find out in which context we have been interrupted:
820# 1) if we return to user space we can do all _TIF_WORK work
821# 2) if we return to kernel code and kvm is enabled check if we need to
822#    modify the psw to leave SIE
823# 3) if we return to kernel code and preemptive scheduling is enabled check
824#    the preemption counter and if it is zero call preempt_schedule_irq
825# Before any work can be done, a switch to the kernel stack is required.
826#
827.Lio_work:
828	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
829	jo	.Lio_work_user		# yes -> do resched & signal
830#ifdef CONFIG_PREEMPTION
831	# check for preemptive scheduling
832	icm	%r0,15,__LC_PREEMPT_COUNT
833	jnz	.Lio_restore		# preemption is disabled
834	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
835	jno	.Lio_restore
836	# switch to kernel stack
837	lg	%r1,__PT_R15(%r11)
838	aghi	%r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
839	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
840	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
841	la	%r11,STACK_FRAME_OVERHEAD(%r1)
842	lgr	%r15,%r1
843	brasl	%r14,preempt_schedule_irq
844	j	.Lio_return
845#else
846	j	.Lio_restore
847#endif
848
849#
850# Need to do work before returning to userspace, switch to kernel stack
851#
852.Lio_work_user:
853	lg	%r1,__LC_KERNEL_STACK
854	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
855	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
856	la	%r11,STACK_FRAME_OVERHEAD(%r1)
857	lgr	%r15,%r1
858
859#
860# One of the work bits is on. Find out which one.
861#
862	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
863	jo	.Lio_reschedule
864#ifdef CONFIG_LIVEPATCH
865	TSTMSK	__TI_flags(%r12),_TIF_PATCH_PENDING
866	jo	.Lio_patch_pending
867#endif
868	TSTMSK	__TI_flags(%r12),_TIF_SIGPENDING
869	jo	.Lio_sigpending
870	TSTMSK	__TI_flags(%r12),_TIF_NOTIFY_RESUME
871	jo	.Lio_notify_resume
872	TSTMSK	__TI_flags(%r12),_TIF_GUARDED_STORAGE
873	jo	.Lio_guarded_storage
874	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
875	jo	.Lio_vxrs
876	j	.Lio_return
877
878#
879# CIF_FPU is set, restore floating-point controls and floating-point registers.
880#
881.Lio_vxrs:
882	larl	%r14,.Lio_return
883	jg	load_fpu_regs
884
885#
886# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
887#
888.Lio_guarded_storage:
889	ENABLE_INTS_TRACE
890	lgr	%r2,%r11		# pass pointer to pt_regs
891	brasl	%r14,gs_load_bc_cb
892	DISABLE_INTS_TRACE
893	j	.Lio_return
894
895#
896# _TIF_NEED_RESCHED is set, call schedule
897#
898.Lio_reschedule:
899	ENABLE_INTS_TRACE
900	brasl	%r14,schedule		# call scheduler
901	DISABLE_INTS_TRACE
902	j	.Lio_return
903
904#
905# _TIF_PATCH_PENDING is set, call klp_update_patch_state
906#
907#ifdef CONFIG_LIVEPATCH
908.Lio_patch_pending:
909	lg	%r2,__LC_CURRENT	# pass pointer to task struct
910	larl	%r14,.Lio_return
911	jg	klp_update_patch_state
912#endif
913
914#
915# _TIF_SIGPENDING or is set, call do_signal
916#
917.Lio_sigpending:
918	ENABLE_INTS_TRACE
919	lgr	%r2,%r11		# pass pointer to pt_regs
920	brasl	%r14,do_signal
921	DISABLE_INTS_TRACE
922	j	.Lio_return
923
924#
925# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
926#
927.Lio_notify_resume:
928	ENABLE_INTS_TRACE
929	lgr	%r2,%r11		# pass pointer to pt_regs
930	brasl	%r14,do_notify_resume
931	DISABLE_INTS_TRACE
932	j	.Lio_return
933ENDPROC(io_int_handler)
934
935/*
936 * External interrupt handler routine
937 */
938ENTRY(ext_int_handler)
939	STCK	__LC_INT_CLOCK
940	stpt	__LC_ASYNC_ENTER_TIMER
941	BPOFF
942	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
943	lg	%r12,__LC_CURRENT
944	lmg	%r8,%r9,__LC_EXT_OLD_PSW
945	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK
946	stmg	%r0,%r7,__PT_R0(%r11)
947	# clear user controlled registers to prevent speculative use
948	xgr	%r0,%r0
949	xgr	%r1,%r1
950	xgr	%r2,%r2
951	xgr	%r3,%r3
952	xgr	%r4,%r4
953	xgr	%r5,%r5
954	xgr	%r6,%r6
955	xgr	%r7,%r7
956	xgr	%r10,%r10
957	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
958	stmg	%r8,%r9,__PT_PSW(%r11)
959	tm	__PT_PSW+1(%r11),0x01	# coming from user space?
960	jno	.Lext_skip_asce
961	lctlg	%c1,%c1,__LC_KERNEL_ASCE
962.Lext_skip_asce:
963	lghi	%r1,__LC_EXT_PARAMS2
964	mvc	__PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
965	mvc	__PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
966	mvc	__PT_INT_PARM_LONG(8,%r11),0(%r1)
967	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
968	TSTMSK	__LC_CPU_FLAGS,_CIF_IGNORE_IRQ
969	jo	.Lio_restore
970	TRACE_IRQS_OFF
971	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
972	lgr	%r2,%r11		# pass pointer to pt_regs
973	lghi	%r3,EXT_INTERRUPT
974	brasl	%r14,do_IRQ
975	j	.Lio_return
976ENDPROC(ext_int_handler)
977
978/*
979 * Load idle PSW.
980 */
981ENTRY(psw_idle)
982	stg	%r3,__SF_EMPTY(%r15)
983	larl	%r1,.Lpsw_idle_exit
984	stg	%r1,__SF_EMPTY+8(%r15)
985	larl	%r1,smp_cpu_mtid
986	llgf	%r1,0(%r1)
987	ltgr	%r1,%r1
988	jz	.Lpsw_idle_stcctm
989	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
990.Lpsw_idle_stcctm:
991	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
992	BPON
993	STCK	__CLOCK_IDLE_ENTER(%r2)
994	stpt	__TIMER_IDLE_ENTER(%r2)
995	lpswe	__SF_EMPTY(%r15)
996.Lpsw_idle_exit:
997	BR_EX	%r14
998ENDPROC(psw_idle)
999
1000/*
1001 * Store floating-point controls and floating-point or vector register
1002 * depending whether the vector facility is available.	A critical section
1003 * cleanup assures that the registers are stored even if interrupted for
1004 * some other work.  The CIF_FPU flag is set to trigger a lazy restore
1005 * of the register contents at return from io or a system call.
1006 */
1007ENTRY(save_fpu_regs)
1008	stnsm	__SF_EMPTY(%r15),0xfc
1009	lg	%r2,__LC_CURRENT
1010	aghi	%r2,__TASK_thread
1011	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
1012	jo	.Lsave_fpu_regs_exit
1013	stfpc	__THREAD_FPU_fpc(%r2)
1014	lg	%r3,__THREAD_FPU_regs(%r2)
1015	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1016	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
1017	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
1018	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
1019	j	.Lsave_fpu_regs_done	  # -> set CIF_FPU flag
1020.Lsave_fpu_regs_fp:
1021	std	0,0(%r3)
1022	std	1,8(%r3)
1023	std	2,16(%r3)
1024	std	3,24(%r3)
1025	std	4,32(%r3)
1026	std	5,40(%r3)
1027	std	6,48(%r3)
1028	std	7,56(%r3)
1029	std	8,64(%r3)
1030	std	9,72(%r3)
1031	std	10,80(%r3)
1032	std	11,88(%r3)
1033	std	12,96(%r3)
1034	std	13,104(%r3)
1035	std	14,112(%r3)
1036	std	15,120(%r3)
1037.Lsave_fpu_regs_done:
1038	oi	__LC_CPU_FLAGS+7,_CIF_FPU
1039.Lsave_fpu_regs_exit:
1040	ssm	__SF_EMPTY(%r15)
1041	BR_EX	%r14
1042.Lsave_fpu_regs_end:
1043ENDPROC(save_fpu_regs)
1044EXPORT_SYMBOL(save_fpu_regs)
1045
1046/*
1047 * Load floating-point controls and floating-point or vector registers.
1048 * A critical section cleanup assures that the register contents are
1049 * loaded even if interrupted for some other work.
1050 *
1051 * There are special calling conventions to fit into sysc and io return work:
1052 *	%r15:	<kernel stack>
1053 * The function requires:
1054 *	%r4
1055 */
1056load_fpu_regs:
1057	stnsm	__SF_EMPTY(%r15),0xfc
1058	lg	%r4,__LC_CURRENT
1059	aghi	%r4,__TASK_thread
1060	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
1061	jno	.Lload_fpu_regs_exit
1062	lfpc	__THREAD_FPU_fpc(%r4)
1063	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1064	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
1065	jz	.Lload_fpu_regs_fp		# -> no VX, load FP regs
1066	VLM	%v0,%v15,0,%r4
1067	VLM	%v16,%v31,256,%r4
1068	j	.Lload_fpu_regs_done
1069.Lload_fpu_regs_fp:
1070	ld	0,0(%r4)
1071	ld	1,8(%r4)
1072	ld	2,16(%r4)
1073	ld	3,24(%r4)
1074	ld	4,32(%r4)
1075	ld	5,40(%r4)
1076	ld	6,48(%r4)
1077	ld	7,56(%r4)
1078	ld	8,64(%r4)
1079	ld	9,72(%r4)
1080	ld	10,80(%r4)
1081	ld	11,88(%r4)
1082	ld	12,96(%r4)
1083	ld	13,104(%r4)
1084	ld	14,112(%r4)
1085	ld	15,120(%r4)
1086.Lload_fpu_regs_done:
1087	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
1088.Lload_fpu_regs_exit:
1089	ssm	__SF_EMPTY(%r15)
1090	BR_EX	%r14
1091.Lload_fpu_regs_end:
1092ENDPROC(load_fpu_regs)
1093
1094/*
1095 * Machine check handler routines
1096 */
1097ENTRY(mcck_int_handler)
1098	STCK	__LC_MCCK_CLOCK
1099	BPOFF
1100	la	%r1,4095		# validate r1
1101	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
1102	sckc	__LC_CLOCK_COMPARATOR			# validate comparator
1103	lam	%a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
1104	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
1105	lg	%r12,__LC_CURRENT
1106	lmg	%r8,%r9,__LC_MCK_OLD_PSW
1107	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
1108	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
1109	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
1110	jno	.Lmcck_panic		# control registers invalid -> panic
1111	la	%r14,4095
1112	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
1113	ptlb
1114	lg	%r11,__LC_MCESAD-4095(%r14) # extended machine check save area
1115	nill	%r11,0xfc00		# MCESA_ORIGIN_MASK
1116	TSTMSK	__LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
1117	jno	0f
1118	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_GS_VALID
1119	jno	0f
1120	.insn	 rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
11210:	l	%r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
1122	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_FC_VALID
1123	jo	0f
1124	sr	%r14,%r14
11250:	sfpc	%r14
1126	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1127	jo	0f
1128	lghi	%r14,__LC_FPREGS_SAVE_AREA
1129	ld	%f0,0(%r14)
1130	ld	%f1,8(%r14)
1131	ld	%f2,16(%r14)
1132	ld	%f3,24(%r14)
1133	ld	%f4,32(%r14)
1134	ld	%f5,40(%r14)
1135	ld	%f6,48(%r14)
1136	ld	%f7,56(%r14)
1137	ld	%f8,64(%r14)
1138	ld	%f9,72(%r14)
1139	ld	%f10,80(%r14)
1140	ld	%f11,88(%r14)
1141	ld	%f12,96(%r14)
1142	ld	%f13,104(%r14)
1143	ld	%f14,112(%r14)
1144	ld	%f15,120(%r14)
1145	j	1f
11460:	VLM	%v0,%v15,0,%r11
1147	VLM	%v16,%v31,256,%r11
11481:	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
1149	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
1150	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
1151	jo	3f
1152	la	%r14,__LC_SYNC_ENTER_TIMER
1153	clc	0(8,%r14),__LC_ASYNC_ENTER_TIMER
1154	jl	0f
1155	la	%r14,__LC_ASYNC_ENTER_TIMER
11560:	clc	0(8,%r14),__LC_EXIT_TIMER
1157	jl	1f
1158	la	%r14,__LC_EXIT_TIMER
11591:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
1160	jl	2f
1161	la	%r14,__LC_LAST_UPDATE_TIMER
11622:	spt	0(%r14)
1163	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
11643:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
1165	jno	.Lmcck_panic
1166	tmhh	%r8,0x0001		# interrupting from user ?
1167	jnz	4f
1168	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
1169	jno	.Lmcck_panic
11704:	ssm	__LC_PGM_NEW_PSW	# turn dat on, keep irqs off
1171	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER,__LC_MCCK_CLOCK
1172.Lmcck_skip:
1173	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
1174	stmg	%r0,%r7,__PT_R0(%r11)
1175	# clear user controlled registers to prevent speculative use
1176	xgr	%r0,%r0
1177	xgr	%r1,%r1
1178	xgr	%r2,%r2
1179	xgr	%r3,%r3
1180	xgr	%r4,%r4
1181	xgr	%r5,%r5
1182	xgr	%r6,%r6
1183	xgr	%r7,%r7
1184	xgr	%r10,%r10
1185	mvc	__PT_R8(64,%r11),0(%r14)
1186	stmg	%r8,%r9,__PT_PSW(%r11)
1187	la	%r14,4095
1188	mvc	__PT_CR1(8,%r11),__LC_CREGS_SAVE_AREA-4095+8(%r14)
1189	lctlg	%c1,%c1,__LC_KERNEL_ASCE
1190	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
1191	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1192	lgr	%r2,%r11		# pass pointer to pt_regs
1193	brasl	%r14,s390_do_machine_check
1194	cghi	%r2,0
1195	je	.Lmcck_return
1196	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
1197	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
1198	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
1199	la	%r11,STACK_FRAME_OVERHEAD(%r1)
1200	lgr	%r15,%r1
1201	TRACE_IRQS_OFF
1202	brasl	%r14,s390_handle_mcck
1203	TRACE_IRQS_ON
1204.Lmcck_return:
1205	lctlg	%c1,%c1,__PT_CR1(%r11)
1206	lmg	%r0,%r10,__PT_R0(%r11)
1207	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
1208	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
1209	jno	0f
1210	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
1211	stpt	__LC_EXIT_TIMER
12120:	lmg	%r11,%r15,__PT_R11(%r11)
1213	b	__LC_RETURN_MCCK_LPSWE
1214
1215.Lmcck_panic:
1216	lg	%r15,__LC_NODAT_STACK
1217	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1218	j	.Lmcck_skip
1219ENDPROC(mcck_int_handler)
1220
1221#
1222# PSW restart interrupt handler
1223#
1224ENTRY(restart_int_handler)
1225	ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
1226	stg	%r15,__LC_SAVE_AREA_RESTART
1227	lg	%r15,__LC_RESTART_STACK
1228	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
1229	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
1230	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
1231	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
1232	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
1233	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
1234	lg	%r2,__LC_RESTART_DATA
1235	lg	%r3,__LC_RESTART_SOURCE
1236	ltgr	%r3,%r3				# test source cpu address
1237	jm	1f				# negative -> skip source stop
12380:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
1239	brc	10,0b				# wait for status stored
12401:	basr	%r14,%r1			# call function
1241	stap	__SF_EMPTY(%r15)		# store cpu address
1242	llgh	%r3,__SF_EMPTY(%r15)
12432:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
1244	brc	2,2b
12453:	j	3b
1246ENDPROC(restart_int_handler)
1247
1248	.section .kprobes.text, "ax"
1249
1250#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
1251/*
1252 * The synchronous or the asynchronous stack overflowed. We are dead.
1253 * No need to properly save the registers, we are going to panic anyway.
1254 * Setup a pt_regs so that show_trace can provide a good call trace.
1255 */
1256ENTRY(stack_overflow)
1257	lg	%r15,__LC_NODAT_STACK	# change to panic stack
1258	la	%r11,STACK_FRAME_OVERHEAD(%r15)
1259	stmg	%r0,%r7,__PT_R0(%r11)
1260	stmg	%r8,%r9,__PT_PSW(%r11)
1261	mvc	__PT_R8(64,%r11),0(%r14)
1262	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
1263	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
1264	lgr	%r2,%r11		# pass pointer to pt_regs
1265	jg	kernel_stack_overflow
1266ENDPROC(stack_overflow)
1267#endif
1268
1269#if IS_ENABLED(CONFIG_KVM)
1270.Lcleanup_sie:
1271	cghi	%r11,__LC_SAVE_AREA_ASYNC	#Is this in normal interrupt?
1272	je	1f
1273	larl	%r13,.Lsie_entry
1274	slgr	%r9,%r13
1275	larl	%r13,.Lsie_skip
1276	clgr	%r9,%r13
1277	jh	1f
1278	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
12791:	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
1280	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
1281	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
1282	lctlg	%c1,%c1,__LC_KERNEL_ASCE
1283	larl	%r9,sie_exit			# skip forward to sie_exit
1284	BR_EX	%r14,%r11
1285
1286#endif
1287	.section .rodata, "a"
1288#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
1289	.globl	sys_call_table
1290sys_call_table:
1291#include "asm/syscall_table.h"
1292#undef SYSCALL
1293
1294#ifdef CONFIG_COMPAT
1295
1296#define SYSCALL(esame,emu)	.quad __s390_ ## emu
1297	.globl	sys_call_table_emu
1298sys_call_table_emu:
1299#include "asm/syscall_table.h"
1300#undef SYSCALL
1301#endif
1302