1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 */ 10 11#include <linux/init.h> 12#include <linux/linkage.h> 13#include <asm/asm-extable.h> 14#include <asm/alternative-asm.h> 15#include <asm/processor.h> 16#include <asm/cache.h> 17#include <asm/dwarf.h> 18#include <asm/errno.h> 19#include <asm/ptrace.h> 20#include <asm/thread_info.h> 21#include <asm/asm-offsets.h> 22#include <asm/unistd.h> 23#include <asm/page.h> 24#include <asm/sigp.h> 25#include <asm/irq.h> 26#include <asm/vx-insn.h> 27#include <asm/setup.h> 28#include <asm/nmi.h> 29#include <asm/export.h> 30#include <asm/nospec-insn.h> 31 32STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER 33STACK_SIZE = 1 << STACK_SHIFT 34STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 35 36_LPP_OFFSET = __LC_LPP 37 38 .macro STBEAR address 39 ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193 40 .endm 41 42 .macro LBEAR address 43 ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193 44 .endm 45 46 .macro LPSWEY address,lpswe 47 ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193 48 .endm 49 50 .macro MBEAR reg 51 ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 52 .endm 53 54 .macro CHECK_STACK savearea 55#ifdef CONFIG_CHECK_STACK 56 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 57 lghi %r14,\savearea 58 jz stack_overflow 59#endif 60 .endm 61 62 .macro CHECK_VMAP_STACK savearea,oklabel 63#ifdef CONFIG_VMAP_STACK 64 lgr %r14,%r15 65 nill %r14,0x10000 - STACK_SIZE 66 oill %r14,STACK_INIT 67 clg %r14,__LC_KERNEL_STACK 68 je \oklabel 69 clg %r14,__LC_ASYNC_STACK 70 je \oklabel 71 clg %r14,__LC_MCCK_STACK 72 je \oklabel 73 clg %r14,__LC_NODAT_STACK 74 je \oklabel 75 clg %r14,__LC_RESTART_STACK 76 je \oklabel 77 lghi %r14,\savearea 78 j stack_overflow 79#else 80 j \oklabel 81#endif 82 .endm 83 84 /* 85 * The TSTMSK macro generates a test-under-mask instruction by 86 * calculating the memory offset for the specified mask value. 87 * Mask value can be any constant. The macro shifts the mask 88 * value to calculate the memory offset for the test-under-mask 89 * instruction. 90 */ 91 .macro TSTMSK addr, mask, size=8, bytepos=0 92 .if (\bytepos < \size) && (\mask >> 8) 93 .if (\mask & 0xff) 94 .error "Mask exceeds byte boundary" 95 .endif 96 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 97 .exitm 98 .endif 99 .ifeq \mask 100 .error "Mask must not be zero" 101 .endif 102 off = \size - \bytepos - 1 103 tm off+\addr, \mask 104 .endm 105 106 .macro BPOFF 107 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82 108 .endm 109 110 .macro BPON 111 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82 112 .endm 113 114 .macro BPENTER tif_ptr,tif_mask 115 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ 116 "j .+12; nop; nop", 82 117 .endm 118 119 .macro BPEXIT tif_ptr,tif_mask 120 TSTMSK \tif_ptr,\tif_mask 121 ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \ 122 "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82 123 .endm 124 125#if IS_ENABLED(CONFIG_KVM) 126 /* 127 * The OUTSIDE macro jumps to the provided label in case the value 128 * in the provided register is outside of the provided range. The 129 * macro is useful for checking whether a PSW stored in a register 130 * pair points inside or outside of a block of instructions. 131 * @reg: register to check 132 * @start: start of the range 133 * @end: end of the range 134 * @outside_label: jump here if @reg is outside of [@start..@end) 135 */ 136 .macro OUTSIDE reg,start,end,outside_label 137 lgr %r14,\reg 138 larl %r13,\start 139 slgr %r14,%r13 140 clgfrl %r14,.Lrange_size\@ 141 jhe \outside_label 142 .section .rodata, "a" 143 .align 4 144.Lrange_size\@: 145 .long \end - \start 146 .previous 147 .endm 148 149 .macro SIEEXIT 150 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 151 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 152 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 153 larl %r9,sie_exit # skip forward to sie_exit 154 .endm 155#endif 156 157 GEN_BR_THUNK %r14 158 159 .section .kprobes.text, "ax" 160.Ldummy: 161 /* 162 * This nop exists only in order to avoid that __bpon starts at 163 * the beginning of the kprobes text section. In that case we would 164 * have several symbols at the same address. E.g. objdump would take 165 * an arbitrary symbol name when disassembling this code. 166 * With the added nop in between the __bpon symbol is unique 167 * again. 168 */ 169 nop 0 170 171ENTRY(__bpon) 172 .globl __bpon 173 BPON 174 BR_EX %r14 175ENDPROC(__bpon) 176 177/* 178 * Scheduler resume function, called by switch_to 179 * gpr2 = (task_struct *) prev 180 * gpr3 = (task_struct *) next 181 * Returns: 182 * gpr2 = prev 183 */ 184ENTRY(__switch_to) 185 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 186 lghi %r4,__TASK_stack 187 lghi %r1,__TASK_thread 188 llill %r5,STACK_INIT 189 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 190 lg %r15,0(%r4,%r3) # start of kernel stack of next 191 agr %r15,%r5 # end of kernel stack of next 192 stg %r3,__LC_CURRENT # store task struct of next 193 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 194 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 195 aghi %r3,__TASK_pid 196 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 197 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 198 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 199 BR_EX %r14 200ENDPROC(__switch_to) 201 202#if IS_ENABLED(CONFIG_KVM) 203/* 204 * __sie64a calling convention: 205 * %r2 pointer to sie control block phys 206 * %r3 pointer to sie control block virt 207 * %r4 guest register save area 208 */ 209ENTRY(__sie64a) 210 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 211 lg %r12,__LC_CURRENT 212 stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. 213 stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses 214 stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area 215 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 216 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 217 lmg %r0,%r13,0(%r4) # load guest gprs 0-13 218 lg %r14,__LC_GMAP # get gmap pointer 219 ltgr %r14,%r14 220 jz .Lsie_gmap 221 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 222.Lsie_gmap: 223 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 224 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 225 tm __SIE_PROG20+3(%r14),3 # last exit... 226 jnz .Lsie_skip 227 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 228 jo .Lsie_skip # exit if fp/vx regs changed 229 lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr 230 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 231.Lsie_entry: 232 sie 0(%r14) 233# Let the next instruction be NOP to avoid triggering a machine check 234# and handling it in a guest as result of the instruction execution. 235 nopr 7 236.Lsie_leave: 237 BPOFF 238 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 239.Lsie_skip: 240 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 241 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 242 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 243.Lsie_done: 244# some program checks are suppressing. C code (e.g. do_protection_exception) 245# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 246# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 247# Other instructions between __sie64a and .Lsie_done should not cause program 248# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 249.Lrewind_pad6: 250 nopr 7 251.Lrewind_pad4: 252 nopr 7 253.Lrewind_pad2: 254 nopr 7 255 .globl sie_exit 256sie_exit: 257 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 258 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 259 xgr %r0,%r0 # clear guest registers to 260 xgr %r1,%r1 # prevent speculative use 261 xgr %r3,%r3 262 xgr %r4,%r4 263 xgr %r5,%r5 264 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 265 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 266 BR_EX %r14 267.Lsie_fault: 268 lghi %r14,-EFAULT 269 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 270 j sie_exit 271 272 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 273 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 274 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 275 EX_TABLE(sie_exit,.Lsie_fault) 276ENDPROC(__sie64a) 277EXPORT_SYMBOL(__sie64a) 278EXPORT_SYMBOL(sie_exit) 279#endif 280 281/* 282 * SVC interrupt handler routine. System calls are synchronous events and 283 * are entered with interrupts disabled. 284 */ 285 286ENTRY(system_call) 287 stpt __LC_SYS_ENTER_TIMER 288 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 289 BPOFF 290 lghi %r14,0 291.Lsysc_per: 292 STBEAR __LC_LAST_BREAK 293 lctlg %c1,%c1,__LC_KERNEL_ASCE 294 lg %r12,__LC_CURRENT 295 lg %r15,__LC_KERNEL_STACK 296 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 297 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 298 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 299 # clear user controlled register to prevent speculative use 300 xgr %r0,%r0 301 xgr %r1,%r1 302 xgr %r4,%r4 303 xgr %r5,%r5 304 xgr %r6,%r6 305 xgr %r7,%r7 306 xgr %r8,%r8 307 xgr %r9,%r9 308 xgr %r10,%r10 309 xgr %r11,%r11 310 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 311 mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC 312 MBEAR %r2 313 lgr %r3,%r14 314 brasl %r14,__do_syscall 315 lctlg %c1,%c1,__LC_USER_ASCE 316 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 317 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 318 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 319 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 320 stpt __LC_EXIT_TIMER 321 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 322ENDPROC(system_call) 323 324# 325# a new process exits the kernel with ret_from_fork 326# 327ENTRY(ret_from_fork) 328 lgr %r3,%r11 329 brasl %r14,__ret_from_fork 330 lctlg %c1,%c1,__LC_USER_ASCE 331 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 332 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 333 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 334 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 335 stpt __LC_EXIT_TIMER 336 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 337ENDPROC(ret_from_fork) 338 339/* 340 * Program check handler routine 341 */ 342 343ENTRY(pgm_check_handler) 344 stpt __LC_SYS_ENTER_TIMER 345 BPOFF 346 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 347 lg %r12,__LC_CURRENT 348 lghi %r10,0 349 lmg %r8,%r9,__LC_PGM_OLD_PSW 350 tmhh %r8,0x0001 # coming from user space? 351 jno .Lpgm_skip_asce 352 lctlg %c1,%c1,__LC_KERNEL_ASCE 353 j 3f # -> fault in user space 354.Lpgm_skip_asce: 355#if IS_ENABLED(CONFIG_KVM) 356 # cleanup critical section for program checks in __sie64a 357 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f 358 SIEEXIT 359 lghi %r10,_PIF_GUEST_FAULT 360#endif 3611: tmhh %r8,0x4000 # PER bit set in old PSW ? 362 jnz 2f # -> enabled, can't be a double fault 363 tm __LC_PGM_ILC+3,0x80 # check for per exception 364 jnz .Lpgm_svcper # -> single stepped svc 3652: CHECK_STACK __LC_SAVE_AREA_SYNC 366 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 367 # CHECK_VMAP_STACK branches to stack_overflow or 4f 368 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 3693: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 370 lg %r15,__LC_KERNEL_STACK 3714: la %r11,STACK_FRAME_OVERHEAD(%r15) 372 stg %r10,__PT_FLAGS(%r11) 373 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 374 stmg %r0,%r7,__PT_R0(%r11) 375 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 376 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK 377 stmg %r8,%r9,__PT_PSW(%r11) 378 379 # clear user controlled registers to prevent speculative use 380 xgr %r0,%r0 381 xgr %r1,%r1 382 xgr %r3,%r3 383 xgr %r4,%r4 384 xgr %r5,%r5 385 xgr %r6,%r6 386 xgr %r7,%r7 387 lgr %r2,%r11 388 brasl %r14,__do_pgm_check 389 tmhh %r8,0x0001 # returning to user space? 390 jno .Lpgm_exit_kernel 391 lctlg %c1,%c1,__LC_USER_ASCE 392 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 393 stpt __LC_EXIT_TIMER 394.Lpgm_exit_kernel: 395 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 396 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 397 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 398 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 399 400# 401# single stepped system call 402# 403.Lpgm_svcper: 404 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 405 larl %r14,.Lsysc_per 406 stg %r14,__LC_RETURN_PSW+8 407 lghi %r14,1 408 LBEAR __LC_PGM_LAST_BREAK 409 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per 410ENDPROC(pgm_check_handler) 411 412/* 413 * Interrupt handler macro used for external and IO interrupts. 414 */ 415.macro INT_HANDLER name,lc_old_psw,handler 416ENTRY(\name) 417 stckf __LC_INT_CLOCK 418 stpt __LC_SYS_ENTER_TIMER 419 STBEAR __LC_LAST_BREAK 420 BPOFF 421 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 422 lg %r12,__LC_CURRENT 423 lmg %r8,%r9,\lc_old_psw 424 tmhh %r8,0x0001 # interrupting from user ? 425 jnz 1f 426#if IS_ENABLED(CONFIG_KVM) 427 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f 428 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 429 SIEEXIT 430#endif 4310: CHECK_STACK __LC_SAVE_AREA_ASYNC 432 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 433 j 2f 4341: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 435 lctlg %c1,%c1,__LC_KERNEL_ASCE 436 lg %r15,__LC_KERNEL_STACK 4372: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 438 la %r11,STACK_FRAME_OVERHEAD(%r15) 439 stmg %r0,%r7,__PT_R0(%r11) 440 # clear user controlled registers to prevent speculative use 441 xgr %r0,%r0 442 xgr %r1,%r1 443 xgr %r3,%r3 444 xgr %r4,%r4 445 xgr %r5,%r5 446 xgr %r6,%r6 447 xgr %r7,%r7 448 xgr %r10,%r10 449 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 450 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 451 MBEAR %r11 452 stmg %r8,%r9,__PT_PSW(%r11) 453 lgr %r2,%r11 # pass pointer to pt_regs 454 brasl %r14,\handler 455 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 456 tmhh %r8,0x0001 # returning to user ? 457 jno 2f 458 lctlg %c1,%c1,__LC_USER_ASCE 459 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 460 stpt __LC_EXIT_TIMER 4612: LBEAR __PT_LAST_BREAK(%r11) 462 lmg %r0,%r15,__PT_R0(%r11) 463 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 464ENDPROC(\name) 465.endm 466 467INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq 468INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq 469 470/* 471 * Load idle PSW. 472 */ 473ENTRY(psw_idle) 474 stg %r14,(__SF_GPRS+8*8)(%r15) 475 stg %r3,__SF_EMPTY(%r15) 476 larl %r1,psw_idle_exit 477 stg %r1,__SF_EMPTY+8(%r15) 478 larl %r1,smp_cpu_mtid 479 llgf %r1,0(%r1) 480 ltgr %r1,%r1 481 jz .Lpsw_idle_stcctm 482 .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2) 483.Lpsw_idle_stcctm: 484 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 485 BPON 486 stckf __CLOCK_IDLE_ENTER(%r2) 487 stpt __TIMER_IDLE_ENTER(%r2) 488 lpswe __SF_EMPTY(%r15) 489.globl psw_idle_exit 490psw_idle_exit: 491 BR_EX %r14 492ENDPROC(psw_idle) 493 494/* 495 * Machine check handler routines 496 */ 497ENTRY(mcck_int_handler) 498 stckf __LC_MCCK_CLOCK 499 BPOFF 500 la %r1,4095 # validate r1 501 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer 502 LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear 503 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs 504 lg %r12,__LC_CURRENT 505 lmg %r8,%r9,__LC_MCK_OLD_PSW 506 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 507 jo .Lmcck_panic # yes -> rest of mcck code invalid 508 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 509 jno .Lmcck_panic # control registers invalid -> panic 510 la %r14,4095 511 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs 512 ptlb 513 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 514 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 515 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 516 jo 3f 517 la %r14,__LC_SYS_ENTER_TIMER 518 clc 0(8,%r14),__LC_EXIT_TIMER 519 jl 1f 520 la %r14,__LC_EXIT_TIMER 5211: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 522 jl 2f 523 la %r14,__LC_LAST_UPDATE_TIMER 5242: spt 0(%r14) 525 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 5263: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 527 jno .Lmcck_panic 528 tmhh %r8,0x0001 # interrupting from user ? 529 jnz .Lmcck_user 530 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 531 jno .Lmcck_panic 532#if IS_ENABLED(CONFIG_KVM) 533 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack 534 OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f 535 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 5364: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 537 SIEEXIT 538 j .Lmcck_stack 539#endif 540.Lmcck_user: 541 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 542.Lmcck_stack: 543 lg %r15,__LC_MCCK_STACK 544 la %r11,STACK_FRAME_OVERHEAD(%r15) 545 stctg %c1,%c1,__PT_CR1(%r11) 546 lctlg %c1,%c1,__LC_KERNEL_ASCE 547 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 548 lghi %r14,__LC_GPREGS_SAVE_AREA+64 549 stmg %r0,%r7,__PT_R0(%r11) 550 # clear user controlled registers to prevent speculative use 551 xgr %r0,%r0 552 xgr %r1,%r1 553 xgr %r3,%r3 554 xgr %r4,%r4 555 xgr %r5,%r5 556 xgr %r6,%r6 557 xgr %r7,%r7 558 xgr %r10,%r10 559 mvc __PT_R8(64,%r11),0(%r14) 560 stmg %r8,%r9,__PT_PSW(%r11) 561 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 562 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 563 lgr %r2,%r11 # pass pointer to pt_regs 564 brasl %r14,s390_do_machine_check 565 cghi %r2,0 566 je .Lmcck_return 567 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 568 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 569 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 570 la %r11,STACK_FRAME_OVERHEAD(%r1) 571 lgr %r2,%r11 572 lgr %r15,%r1 573 brasl %r14,s390_handle_mcck 574.Lmcck_return: 575 lctlg %c1,%c1,__PT_CR1(%r11) 576 lmg %r0,%r10,__PT_R0(%r11) 577 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 578 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 579 jno 0f 580 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 581 stpt __LC_EXIT_TIMER 5820: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 583 LBEAR 0(%r12) 584 lmg %r11,%r15,__PT_R11(%r11) 585 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE 586 587.Lmcck_panic: 588 /* 589 * Iterate over all possible CPU addresses in the range 0..0xffff 590 * and stop each CPU using signal processor. Use compare and swap 591 * to allow just one CPU-stopper and prevent concurrent CPUs from 592 * stopping each other while leaving the others running. 593 */ 594 lhi %r5,0 595 lhi %r6,1 596 larl %r7,.Lstop_lock 597 cs %r5,%r6,0(%r7) # single CPU-stopper only 598 jnz 4f 599 larl %r7,.Lthis_cpu 600 stap 0(%r7) # this CPU address 601 lh %r4,0(%r7) 602 nilh %r4,0 603 lhi %r0,1 604 sll %r0,16 # CPU counter 605 lhi %r3,0 # next CPU address 6060: cr %r3,%r4 607 je 2f 6081: sigp %r1,%r3,SIGP_STOP # stop next CPU 609 brc SIGP_CC_BUSY,1b 6102: ahi %r3,1 611 brct %r0,0b 6123: sigp %r1,%r4,SIGP_STOP # stop this CPU 613 brc SIGP_CC_BUSY,3b 6144: j 4b 615ENDPROC(mcck_int_handler) 616 617ENTRY(restart_int_handler) 618 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 619 stg %r15,__LC_SAVE_AREA_RESTART 620 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 621 jz 0f 622 la %r15,4095 623 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15) 6240: larl %r15,.Lstosm_tmp 625 stosm 0(%r15),0x04 # turn dat on, keep irqs off 626 lg %r15,__LC_RESTART_STACK 627 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 628 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 629 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 630 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 631 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 632 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 633 lg %r2,__LC_RESTART_DATA 634 lgf %r3,__LC_RESTART_SOURCE 635 ltgr %r3,%r3 # test source cpu address 636 jm 1f # negative -> skip source stop 6370: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 638 brc 10,0b # wait for status stored 6391: basr %r14,%r1 # call function 640 stap __SF_EMPTY(%r15) # store cpu address 641 llgh %r3,__SF_EMPTY(%r15) 6422: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 643 brc 2,2b 6443: j 3b 645ENDPROC(restart_int_handler) 646 647 .section .kprobes.text, "ax" 648 649#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 650/* 651 * The synchronous or the asynchronous stack overflowed. We are dead. 652 * No need to properly save the registers, we are going to panic anyway. 653 * Setup a pt_regs so that show_trace can provide a good call trace. 654 */ 655ENTRY(stack_overflow) 656 lg %r15,__LC_NODAT_STACK # change to panic stack 657 la %r11,STACK_FRAME_OVERHEAD(%r15) 658 stmg %r0,%r7,__PT_R0(%r11) 659 stmg %r8,%r9,__PT_PSW(%r11) 660 mvc __PT_R8(64,%r11),0(%r14) 661 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 662 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 663 lgr %r2,%r11 # pass pointer to pt_regs 664 jg kernel_stack_overflow 665ENDPROC(stack_overflow) 666#endif 667 668 .section .data, "aw" 669 .align 4 670.Lstop_lock: .long 0 671.Lthis_cpu: .short 0 672.Lstosm_tmp: .byte 0 673 .section .rodata, "a" 674#define SYSCALL(esame,emu) .quad __s390x_ ## esame 675 .globl sys_call_table 676sys_call_table: 677#include "asm/syscall_table.h" 678#undef SYSCALL 679 680#ifdef CONFIG_COMPAT 681 682#define SYSCALL(esame,emu) .quad __s390_ ## emu 683 .globl sys_call_table_emu 684sys_call_table_emu: 685#include "asm/syscall_table.h" 686#undef SYSCALL 687#endif 688