xref: /openbmc/linux/arch/s390/kernel/entry.S (revision dfeab63a)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 */
10
11#include <linux/init.h>
12#include <linux/linkage.h>
13#include <asm/asm-extable.h>
14#include <asm/alternative-asm.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/dwarf.h>
18#include <asm/errno.h>
19#include <asm/ptrace.h>
20#include <asm/thread_info.h>
21#include <asm/asm-offsets.h>
22#include <asm/unistd.h>
23#include <asm/page.h>
24#include <asm/sigp.h>
25#include <asm/irq.h>
26#include <asm/vx-insn.h>
27#include <asm/setup.h>
28#include <asm/nmi.h>
29#include <asm/export.h>
30#include <asm/nospec-insn.h>
31
32__PT_R0      =	__PT_GPRS
33__PT_R1      =	__PT_GPRS + 8
34__PT_R2      =	__PT_GPRS + 16
35__PT_R3      =	__PT_GPRS + 24
36__PT_R4      =	__PT_GPRS + 32
37__PT_R5      =	__PT_GPRS + 40
38__PT_R6      =	__PT_GPRS + 48
39__PT_R7      =	__PT_GPRS + 56
40__PT_R8      =	__PT_GPRS + 64
41__PT_R9      =	__PT_GPRS + 72
42__PT_R10     =	__PT_GPRS + 80
43__PT_R11     =	__PT_GPRS + 88
44__PT_R12     =	__PT_GPRS + 96
45__PT_R13     =	__PT_GPRS + 104
46__PT_R14     =	__PT_GPRS + 112
47__PT_R15     =	__PT_GPRS + 120
48
49STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
50STACK_SIZE  = 1 << STACK_SHIFT
51STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
52
53_LPP_OFFSET	= __LC_LPP
54
55	.macro STBEAR address
56	ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193
57	.endm
58
59	.macro LBEAR address
60	ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193
61	.endm
62
63	.macro LPSWEY address,lpswe
64	ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193
65	.endm
66
67	.macro MBEAR reg
68	ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
69	.endm
70
71	.macro	CHECK_STACK savearea
72#ifdef CONFIG_CHECK_STACK
73	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
74	lghi	%r14,\savearea
75	jz	stack_overflow
76#endif
77	.endm
78
79	.macro	CHECK_VMAP_STACK savearea,oklabel
80#ifdef CONFIG_VMAP_STACK
81	lgr	%r14,%r15
82	nill	%r14,0x10000 - STACK_SIZE
83	oill	%r14,STACK_INIT
84	clg	%r14,__LC_KERNEL_STACK
85	je	\oklabel
86	clg	%r14,__LC_ASYNC_STACK
87	je	\oklabel
88	clg	%r14,__LC_MCCK_STACK
89	je	\oklabel
90	clg	%r14,__LC_NODAT_STACK
91	je	\oklabel
92	clg	%r14,__LC_RESTART_STACK
93	je	\oklabel
94	lghi	%r14,\savearea
95	j	stack_overflow
96#else
97	j	\oklabel
98#endif
99	.endm
100
101	/*
102	 * The TSTMSK macro generates a test-under-mask instruction by
103	 * calculating the memory offset for the specified mask value.
104	 * Mask value can be any constant.  The macro shifts the mask
105	 * value to calculate the memory offset for the test-under-mask
106	 * instruction.
107	 */
108	.macro TSTMSK addr, mask, size=8, bytepos=0
109		.if (\bytepos < \size) && (\mask >> 8)
110			.if (\mask & 0xff)
111				.error "Mask exceeds byte boundary"
112			.endif
113			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
114			.exitm
115		.endif
116		.ifeq \mask
117			.error "Mask must not be zero"
118		.endif
119		off = \size - \bytepos - 1
120		tm	off+\addr, \mask
121	.endm
122
123	.macro BPOFF
124	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82
125	.endm
126
127	.macro BPON
128	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82
129	.endm
130
131	.macro BPENTER tif_ptr,tif_mask
132	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
133		    "j .+12; nop; nop", 82
134	.endm
135
136	.macro BPEXIT tif_ptr,tif_mask
137	TSTMSK	\tif_ptr,\tif_mask
138	ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
139		    "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
140	.endm
141
142	/*
143	 * The CHKSTG macro jumps to the provided label in case the
144	 * machine check interruption code reports one of unrecoverable
145	 * storage errors:
146	 * - Storage error uncorrected
147	 * - Storage key error uncorrected
148	 * - Storage degradation with Failing-storage-address validity
149	 */
150	.macro CHKSTG errlabel
151	TSTMSK	__LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
152	jnz	\errlabel
153	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
154	jz	.Loklabel\@
155	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
156	jnz	\errlabel
157.Loklabel\@:
158	.endm
159
160#if IS_ENABLED(CONFIG_KVM)
161	/*
162	 * The OUTSIDE macro jumps to the provided label in case the value
163	 * in the provided register is outside of the provided range. The
164	 * macro is useful for checking whether a PSW stored in a register
165	 * pair points inside or outside of a block of instructions.
166	 * @reg: register to check
167	 * @start: start of the range
168	 * @end: end of the range
169	 * @outside_label: jump here if @reg is outside of [@start..@end)
170	 */
171	.macro OUTSIDE reg,start,end,outside_label
172	lgr	%r14,\reg
173	larl	%r13,\start
174	slgr	%r14,%r13
175#ifdef CONFIG_AS_IS_LLVM
176	clgfrl	%r14,.Lrange_size\@
177#else
178	clgfi	%r14,\end - \start
179#endif
180	jhe	\outside_label
181#ifdef CONFIG_AS_IS_LLVM
182	.section .rodata, "a"
183	.align 4
184.Lrange_size\@:
185	.long	\end - \start
186	.previous
187#endif
188	.endm
189
190	.macro SIEEXIT
191	lg	%r9,__SF_SIE_CONTROL(%r15)	# get control block pointer
192	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
193	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
194	larl	%r9,sie_exit			# skip forward to sie_exit
195	.endm
196#endif
197
198	GEN_BR_THUNK %r14
199
200	.section .kprobes.text, "ax"
201.Ldummy:
202	/*
203	 * This nop exists only in order to avoid that __bpon starts at
204	 * the beginning of the kprobes text section. In that case we would
205	 * have several symbols at the same address. E.g. objdump would take
206	 * an arbitrary symbol name when disassembling this code.
207	 * With the added nop in between the __bpon symbol is unique
208	 * again.
209	 */
210	nop	0
211
212ENTRY(__bpon)
213	.globl __bpon
214	BPON
215	BR_EX	%r14
216ENDPROC(__bpon)
217
218/*
219 * Scheduler resume function, called by switch_to
220 *  gpr2 = (task_struct *) prev
221 *  gpr3 = (task_struct *) next
222 * Returns:
223 *  gpr2 = prev
224 */
225ENTRY(__switch_to)
226	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
227	lghi	%r4,__TASK_stack
228	lghi	%r1,__TASK_thread
229	llill	%r5,STACK_INIT
230	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
231	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
232	agr	%r15,%r5			# end of kernel stack of next
233	stg	%r3,__LC_CURRENT		# store task struct of next
234	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
235	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
236	aghi	%r3,__TASK_pid
237	mvc	__LC_CURRENT_PID(4,%r0),0(%r3)	# store pid of next
238	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
239	ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
240	BR_EX	%r14
241ENDPROC(__switch_to)
242
243#if IS_ENABLED(CONFIG_KVM)
244/*
245 * sie64a calling convention:
246 * %r2 pointer to sie control block
247 * %r3 guest register save area
248 */
249ENTRY(sie64a)
250	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
251	lg	%r12,__LC_CURRENT
252	stg	%r2,__SF_SIE_CONTROL(%r15)	# save control block pointer
253	stg	%r3,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
254	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
255	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
256	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
257	lg	%r14,__LC_GMAP			# get gmap pointer
258	ltgr	%r14,%r14
259	jz	.Lsie_gmap
260	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
261.Lsie_gmap:
262	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
263	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
264	tm	__SIE_PROG20+3(%r14),3		# last exit...
265	jnz	.Lsie_skip
266	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
267	jo	.Lsie_skip			# exit if fp/vx regs changed
268	BPEXIT	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
269.Lsie_entry:
270	sie	0(%r14)
271	BPOFF
272	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
273.Lsie_skip:
274	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
275	lctlg	%c1,%c1,__LC_KERNEL_ASCE	# load primary asce
276.Lsie_done:
277# some program checks are suppressing. C code (e.g. do_protection_exception)
278# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
279# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
280# Other instructions between sie64a and .Lsie_done should not cause program
281# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
282.Lrewind_pad6:
283	nopr	7
284.Lrewind_pad4:
285	nopr	7
286.Lrewind_pad2:
287	nopr	7
288	.globl sie_exit
289sie_exit:
290	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
291	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
292	xgr	%r0,%r0				# clear guest registers to
293	xgr	%r1,%r1				# prevent speculative use
294	xgr	%r3,%r3
295	xgr	%r4,%r4
296	xgr	%r5,%r5
297	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
298	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
299	BR_EX	%r14
300.Lsie_fault:
301	lghi	%r14,-EFAULT
302	stg	%r14,__SF_SIE_REASON(%r15)	# set exit reason code
303	j	sie_exit
304
305	EX_TABLE(.Lrewind_pad6,.Lsie_fault)
306	EX_TABLE(.Lrewind_pad4,.Lsie_fault)
307	EX_TABLE(.Lrewind_pad2,.Lsie_fault)
308	EX_TABLE(sie_exit,.Lsie_fault)
309ENDPROC(sie64a)
310EXPORT_SYMBOL(sie64a)
311EXPORT_SYMBOL(sie_exit)
312#endif
313
314/*
315 * SVC interrupt handler routine. System calls are synchronous events and
316 * are entered with interrupts disabled.
317 */
318
319ENTRY(system_call)
320	stpt	__LC_SYS_ENTER_TIMER
321	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
322	BPOFF
323	lghi	%r14,0
324.Lsysc_per:
325	STBEAR	__LC_LAST_BREAK
326	lctlg	%c1,%c1,__LC_KERNEL_ASCE
327	lg	%r12,__LC_CURRENT
328	lg	%r15,__LC_KERNEL_STACK
329	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
330	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
331	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
332	# clear user controlled register to prevent speculative use
333	xgr	%r0,%r0
334	xgr	%r1,%r1
335	xgr	%r4,%r4
336	xgr	%r5,%r5
337	xgr	%r6,%r6
338	xgr	%r7,%r7
339	xgr	%r8,%r8
340	xgr	%r9,%r9
341	xgr	%r10,%r10
342	xgr	%r11,%r11
343	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
344	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
345	MBEAR	%r2
346	lgr	%r3,%r14
347	brasl	%r14,__do_syscall
348	lctlg	%c1,%c1,__LC_USER_ASCE
349	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
350	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
351	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
352	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
353	stpt	__LC_EXIT_TIMER
354	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
355ENDPROC(system_call)
356
357#
358# a new process exits the kernel with ret_from_fork
359#
360ENTRY(ret_from_fork)
361	lgr	%r3,%r11
362	brasl	%r14,__ret_from_fork
363	lctlg	%c1,%c1,__LC_USER_ASCE
364	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
365	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
366	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
367	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
368	stpt	__LC_EXIT_TIMER
369	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
370ENDPROC(ret_from_fork)
371
372/*
373 * Program check handler routine
374 */
375
376ENTRY(pgm_check_handler)
377	stpt	__LC_SYS_ENTER_TIMER
378	BPOFF
379	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
380	lg	%r12,__LC_CURRENT
381	lghi	%r10,0
382	lmg	%r8,%r9,__LC_PGM_OLD_PSW
383	tmhh	%r8,0x0001		# coming from user space?
384	jno	.Lpgm_skip_asce
385	lctlg	%c1,%c1,__LC_KERNEL_ASCE
386	j	3f			# -> fault in user space
387.Lpgm_skip_asce:
388#if IS_ENABLED(CONFIG_KVM)
389	# cleanup critical section for program checks in sie64a
390	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,1f
391	SIEEXIT
392	lghi	%r10,_PIF_GUEST_FAULT
393#endif
3941:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
395	jnz	2f			# -> enabled, can't be a double fault
396	tm	__LC_PGM_ILC+3,0x80	# check for per exception
397	jnz	.Lpgm_svcper		# -> single stepped svc
3982:	CHECK_STACK __LC_SAVE_AREA_SYNC
399	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
400	# CHECK_VMAP_STACK branches to stack_overflow or 4f
401	CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
4023:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
403	lg	%r15,__LC_KERNEL_STACK
4044:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
405	stg	%r10,__PT_FLAGS(%r11)
406	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
407	stmg	%r0,%r7,__PT_R0(%r11)
408	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
409	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
410	stmg	%r8,%r9,__PT_PSW(%r11)
411
412	# clear user controlled registers to prevent speculative use
413	xgr	%r0,%r0
414	xgr	%r1,%r1
415	xgr	%r3,%r3
416	xgr	%r4,%r4
417	xgr	%r5,%r5
418	xgr	%r6,%r6
419	xgr	%r7,%r7
420	lgr	%r2,%r11
421	brasl	%r14,__do_pgm_check
422	tmhh	%r8,0x0001		# returning to user space?
423	jno	.Lpgm_exit_kernel
424	lctlg	%c1,%c1,__LC_USER_ASCE
425	BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
426	stpt	__LC_EXIT_TIMER
427.Lpgm_exit_kernel:
428	mvc	__LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
429	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
430	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
431	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
432
433#
434# single stepped system call
435#
436.Lpgm_svcper:
437	mvc	__LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
438	larl	%r14,.Lsysc_per
439	stg	%r14,__LC_RETURN_PSW+8
440	lghi	%r14,1
441	LBEAR	__LC_PGM_LAST_BREAK
442	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
443ENDPROC(pgm_check_handler)
444
445/*
446 * Interrupt handler macro used for external and IO interrupts.
447 */
448.macro INT_HANDLER name,lc_old_psw,handler
449ENTRY(\name)
450	stckf	__LC_INT_CLOCK
451	stpt	__LC_SYS_ENTER_TIMER
452	STBEAR	__LC_LAST_BREAK
453	BPOFF
454	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
455	lg	%r12,__LC_CURRENT
456	lmg	%r8,%r9,\lc_old_psw
457	tmhh	%r8,0x0001			# interrupting from user ?
458	jnz	1f
459#if IS_ENABLED(CONFIG_KVM)
460	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,0f
461	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
462	SIEEXIT
463#endif
4640:	CHECK_STACK __LC_SAVE_AREA_ASYNC
465	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
466	j	2f
4671:	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
468	lctlg	%c1,%c1,__LC_KERNEL_ASCE
469	lg	%r15,__LC_KERNEL_STACK
4702:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
471	la	%r11,STACK_FRAME_OVERHEAD(%r15)
472	stmg	%r0,%r7,__PT_R0(%r11)
473	# clear user controlled registers to prevent speculative use
474	xgr	%r0,%r0
475	xgr	%r1,%r1
476	xgr	%r3,%r3
477	xgr	%r4,%r4
478	xgr	%r5,%r5
479	xgr	%r6,%r6
480	xgr	%r7,%r7
481	xgr	%r10,%r10
482	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
483	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
484	MBEAR	%r11
485	stmg	%r8,%r9,__PT_PSW(%r11)
486	lgr	%r2,%r11		# pass pointer to pt_regs
487	brasl	%r14,\handler
488	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
489	tmhh	%r8,0x0001		# returning to user ?
490	jno	2f
491	lctlg	%c1,%c1,__LC_USER_ASCE
492	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
493	stpt	__LC_EXIT_TIMER
4942:	LBEAR	__PT_LAST_BREAK(%r11)
495	lmg	%r0,%r15,__PT_R0(%r11)
496	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
497ENDPROC(\name)
498.endm
499
500INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
501INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
502
503/*
504 * Load idle PSW.
505 */
506ENTRY(psw_idle)
507	stg	%r14,(__SF_GPRS+8*8)(%r15)
508	stg	%r3,__SF_EMPTY(%r15)
509	larl	%r1,psw_idle_exit
510	stg	%r1,__SF_EMPTY+8(%r15)
511	larl	%r1,smp_cpu_mtid
512	llgf	%r1,0(%r1)
513	ltgr	%r1,%r1
514	jz	.Lpsw_idle_stcctm
515	.insn	rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
516.Lpsw_idle_stcctm:
517	oi	__LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
518	BPON
519	stckf	__CLOCK_IDLE_ENTER(%r2)
520	stpt	__TIMER_IDLE_ENTER(%r2)
521	lpswe	__SF_EMPTY(%r15)
522.globl psw_idle_exit
523psw_idle_exit:
524	BR_EX	%r14
525ENDPROC(psw_idle)
526
527/*
528 * Machine check handler routines
529 */
530ENTRY(mcck_int_handler)
531	stckf	__LC_MCCK_CLOCK
532	BPOFF
533	la	%r1,4095		# validate r1
534	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# validate cpu timer
535	LBEAR	__LC_LAST_BREAK_SAVE_AREA-4095(%r1)		# validate bear
536	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
537	lg	%r12,__LC_CURRENT
538	lmg	%r8,%r9,__LC_MCK_OLD_PSW
539	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
540	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
541	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CR_VALID
542	jno	.Lmcck_panic		# control registers invalid -> panic
543	la	%r14,4095
544	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
545	ptlb
546	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
547	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
548	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
549	jo	3f
550	la	%r14,__LC_SYS_ENTER_TIMER
551	clc	0(8,%r14),__LC_EXIT_TIMER
552	jl	1f
553	la	%r14,__LC_EXIT_TIMER
5541:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER
555	jl	2f
556	la	%r14,__LC_LAST_UPDATE_TIMER
5572:	spt	0(%r14)
558	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
5593:	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
560	jno	.Lmcck_panic
561	tmhh	%r8,0x0001		# interrupting from user ?
562	jnz	6f
563	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
564	jno	.Lmcck_panic
565#if IS_ENABLED(CONFIG_KVM)
566	OUTSIDE	%r9,.Lsie_gmap,.Lsie_done,6f
567	OUTSIDE	%r9,.Lsie_entry,.Lsie_skip,4f
568	oi	__LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
569	j	5f
5704:	CHKSTG	.Lmcck_panic
5715:	larl	%r14,.Lstosm_tmp
572	stosm	0(%r14),0x04		# turn dat on, keep irqs off
573	BPENTER	__SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
574	SIEEXIT
575	j	.Lmcck_stack
576#endif
5776:	CHKSTG	.Lmcck_panic
578	larl	%r14,.Lstosm_tmp
579	stosm	0(%r14),0x04		# turn dat on, keep irqs off
580	tmhh	%r8,0x0001		# interrupting from user ?
581	jz	.Lmcck_stack
582	BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
583.Lmcck_stack:
584	lg	%r15,__LC_MCCK_STACK
585	la	%r11,STACK_FRAME_OVERHEAD(%r15)
586	stctg	%c1,%c1,__PT_CR1(%r11)
587	lctlg	%c1,%c1,__LC_KERNEL_ASCE
588	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
589	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
590	stmg	%r0,%r7,__PT_R0(%r11)
591	# clear user controlled registers to prevent speculative use
592	xgr	%r0,%r0
593	xgr	%r1,%r1
594	xgr	%r3,%r3
595	xgr	%r4,%r4
596	xgr	%r5,%r5
597	xgr	%r6,%r6
598	xgr	%r7,%r7
599	xgr	%r10,%r10
600	mvc	__PT_R8(64,%r11),0(%r14)
601	stmg	%r8,%r9,__PT_PSW(%r11)
602	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
603	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
604	lgr	%r2,%r11		# pass pointer to pt_regs
605	brasl	%r14,s390_do_machine_check
606	cghi	%r2,0
607	je	.Lmcck_return
608	lg	%r1,__LC_KERNEL_STACK	# switch to kernel stack
609	mvc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
610	xc	__SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
611	la	%r11,STACK_FRAME_OVERHEAD(%r1)
612	lgr	%r2,%r11
613	lgr	%r15,%r1
614	brasl	%r14,s390_handle_mcck
615.Lmcck_return:
616	lctlg	%c1,%c1,__PT_CR1(%r11)
617	lmg	%r0,%r10,__PT_R0(%r11)
618	mvc	__LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
619	tm	__LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
620	jno	0f
621	BPEXIT	__TI_flags(%r12),_TIF_ISOLATE_BP
622	stpt	__LC_EXIT_TIMER
6230:	ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
624	LBEAR	0(%r12)
625	lmg	%r11,%r15,__PT_R11(%r11)
626	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
627
628.Lmcck_panic:
629	/*
630	 * Iterate over all possible CPU addresses in the range 0..0xffff
631	 * and stop each CPU using signal processor. Use compare and swap
632	 * to allow just one CPU-stopper and prevent concurrent CPUs from
633	 * stopping each other while leaving the others running.
634	 */
635	lhi	%r5,0
636	lhi	%r6,1
637	larl	%r7,.Lstop_lock
638	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
639	jnz	4f
640	larl	%r7,.Lthis_cpu
641	stap	0(%r7)			# this CPU address
642	lh	%r4,0(%r7)
643	nilh	%r4,0
644	lhi	%r0,1
645	sll	%r0,16			# CPU counter
646	lhi	%r3,0			# next CPU address
6470:	cr	%r3,%r4
648	je	2f
6491:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
650	brc	SIGP_CC_BUSY,1b
6512:	ahi	%r3,1
652	brct	%r0,0b
6533:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
654	brc	SIGP_CC_BUSY,3b
6554:	j	4b
656ENDPROC(mcck_int_handler)
657
658ENTRY(restart_int_handler)
659	ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
660	stg	%r15,__LC_SAVE_AREA_RESTART
661	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
662	jz	0f
663	la	%r15,4095
664	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
6650:	larl	%r15,.Lstosm_tmp
666	stosm	0(%r15),0x04			# turn dat on, keep irqs off
667	lg	%r15,__LC_RESTART_STACK
668	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
669	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
670	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
671	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
672	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
673	lg	%r1,__LC_RESTART_FN		# load fn, parm & source cpu
674	lg	%r2,__LC_RESTART_DATA
675	lgf	%r3,__LC_RESTART_SOURCE
676	ltgr	%r3,%r3				# test source cpu address
677	jm	1f				# negative -> skip source stop
6780:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
679	brc	10,0b				# wait for status stored
6801:	basr	%r14,%r1			# call function
681	stap	__SF_EMPTY(%r15)		# store cpu address
682	llgh	%r3,__SF_EMPTY(%r15)
6832:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
684	brc	2,2b
6853:	j	3b
686ENDPROC(restart_int_handler)
687
688	.section .kprobes.text, "ax"
689
690#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
691/*
692 * The synchronous or the asynchronous stack overflowed. We are dead.
693 * No need to properly save the registers, we are going to panic anyway.
694 * Setup a pt_regs so that show_trace can provide a good call trace.
695 */
696ENTRY(stack_overflow)
697	lg	%r15,__LC_NODAT_STACK	# change to panic stack
698	la	%r11,STACK_FRAME_OVERHEAD(%r15)
699	stmg	%r0,%r7,__PT_R0(%r11)
700	stmg	%r8,%r9,__PT_PSW(%r11)
701	mvc	__PT_R8(64,%r11),0(%r14)
702	stg	%r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
703	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
704	lgr	%r2,%r11		# pass pointer to pt_regs
705	jg	kernel_stack_overflow
706ENDPROC(stack_overflow)
707#endif
708
709	.section .data, "aw"
710		.align	4
711.Lstop_lock:	.long	0
712.Lthis_cpu:	.short	0
713.Lstosm_tmp:	.byte	0
714	.section .rodata, "a"
715#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
716	.globl	sys_call_table
717sys_call_table:
718#include "asm/syscall_table.h"
719#undef SYSCALL
720
721#ifdef CONFIG_COMPAT
722
723#define SYSCALL(esame,emu)	.quad __s390_ ## emu
724	.globl	sys_call_table_emu
725sys_call_table_emu:
726#include "asm/syscall_table.h"
727#undef SYSCALL
728#endif
729