1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 */ 10 11#include <linux/export.h> 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/asm-extable.h> 15#include <asm/alternative-asm.h> 16#include <asm/processor.h> 17#include <asm/cache.h> 18#include <asm/dwarf.h> 19#include <asm/errno.h> 20#include <asm/ptrace.h> 21#include <asm/thread_info.h> 22#include <asm/asm-offsets.h> 23#include <asm/unistd.h> 24#include <asm/page.h> 25#include <asm/sigp.h> 26#include <asm/irq.h> 27#include <asm/vx-insn.h> 28#include <asm/setup.h> 29#include <asm/nmi.h> 30#include <asm/nospec-insn.h> 31 32_LPP_OFFSET = __LC_LPP 33 34 .macro STBEAR address 35 ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193 36 .endm 37 38 .macro LBEAR address 39 ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193 40 .endm 41 42 .macro LPSWEY address,lpswe 43 ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193 44 .endm 45 46 .macro MBEAR reg 47 ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 48 .endm 49 50 .macro CHECK_STACK savearea 51#ifdef CONFIG_CHECK_STACK 52 tml %r15,THREAD_SIZE - CONFIG_STACK_GUARD 53 lghi %r14,\savearea 54 jz stack_overflow 55#endif 56 .endm 57 58 .macro CHECK_VMAP_STACK savearea,oklabel 59#ifdef CONFIG_VMAP_STACK 60 lgr %r14,%r15 61 nill %r14,0x10000 - THREAD_SIZE 62 oill %r14,STACK_INIT_OFFSET 63 clg %r14,__LC_KERNEL_STACK 64 je \oklabel 65 clg %r14,__LC_ASYNC_STACK 66 je \oklabel 67 clg %r14,__LC_MCCK_STACK 68 je \oklabel 69 clg %r14,__LC_NODAT_STACK 70 je \oklabel 71 clg %r14,__LC_RESTART_STACK 72 je \oklabel 73 lghi %r14,\savearea 74 j stack_overflow 75#else 76 j \oklabel 77#endif 78 .endm 79 80 /* 81 * The TSTMSK macro generates a test-under-mask instruction by 82 * calculating the memory offset for the specified mask value. 83 * Mask value can be any constant. The macro shifts the mask 84 * value to calculate the memory offset for the test-under-mask 85 * instruction. 86 */ 87 .macro TSTMSK addr, mask, size=8, bytepos=0 88 .if (\bytepos < \size) && (\mask >> 8) 89 .if (\mask & 0xff) 90 .error "Mask exceeds byte boundary" 91 .endif 92 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 93 .exitm 94 .endif 95 .ifeq \mask 96 .error "Mask must not be zero" 97 .endif 98 off = \size - \bytepos - 1 99 tm off+\addr, \mask 100 .endm 101 102 .macro BPOFF 103 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82 104 .endm 105 106 .macro BPON 107 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82 108 .endm 109 110 .macro BPENTER tif_ptr,tif_mask 111 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ 112 "j .+12; nop; nop", 82 113 .endm 114 115 .macro BPEXIT tif_ptr,tif_mask 116 TSTMSK \tif_ptr,\tif_mask 117 ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \ 118 "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82 119 .endm 120 121#if IS_ENABLED(CONFIG_KVM) 122 /* 123 * The OUTSIDE macro jumps to the provided label in case the value 124 * in the provided register is outside of the provided range. The 125 * macro is useful for checking whether a PSW stored in a register 126 * pair points inside or outside of a block of instructions. 127 * @reg: register to check 128 * @start: start of the range 129 * @end: end of the range 130 * @outside_label: jump here if @reg is outside of [@start..@end) 131 */ 132 .macro OUTSIDE reg,start,end,outside_label 133 lgr %r14,\reg 134 larl %r13,\start 135 slgr %r14,%r13 136 clgfrl %r14,.Lrange_size\@ 137 jhe \outside_label 138 .section .rodata, "a" 139 .balign 4 140.Lrange_size\@: 141 .long \end - \start 142 .previous 143 .endm 144 145 .macro SIEEXIT 146 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 147 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 148 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 149 larl %r9,sie_exit # skip forward to sie_exit 150 .endm 151#endif 152 153 .macro STACKLEAK_ERASE 154#ifdef CONFIG_GCC_PLUGIN_STACKLEAK 155 brasl %r14,stackleak_erase_on_task_stack 156#endif 157 .endm 158 159 GEN_BR_THUNK %r14 160 161 .section .kprobes.text, "ax" 162.Ldummy: 163 /* 164 * The following nop exists only in order to avoid that the next 165 * symbol starts at the beginning of the kprobes text section. 166 * In that case there would be several symbols at the same address. 167 * E.g. objdump would take an arbitrary symbol when disassembling 168 * the code. 169 * With the added nop in between this cannot happen. 170 */ 171 nop 0 172 173/* 174 * Scheduler resume function, called by switch_to 175 * gpr2 = (task_struct *) prev 176 * gpr3 = (task_struct *) next 177 * Returns: 178 * gpr2 = prev 179 */ 180SYM_FUNC_START(__switch_to) 181 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 182 lghi %r4,__TASK_stack 183 lghi %r1,__TASK_thread 184 llill %r5,STACK_INIT_OFFSET 185 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 186 lg %r15,0(%r4,%r3) # start of kernel stack of next 187 agr %r15,%r5 # end of kernel stack of next 188 stg %r3,__LC_CURRENT # store task struct of next 189 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 190 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 191 aghi %r3,__TASK_pid 192 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 193 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 194 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 195 BR_EX %r14 196SYM_FUNC_END(__switch_to) 197 198#if IS_ENABLED(CONFIG_KVM) 199/* 200 * __sie64a calling convention: 201 * %r2 pointer to sie control block phys 202 * %r3 pointer to sie control block virt 203 * %r4 guest register save area 204 */ 205SYM_FUNC_START(__sie64a) 206 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 207 lg %r12,__LC_CURRENT 208 stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. 209 stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses 210 stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area 211 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 212 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 213 lmg %r0,%r13,0(%r4) # load guest gprs 0-13 214 lg %r14,__LC_GMAP # get gmap pointer 215 ltgr %r14,%r14 216 jz .Lsie_gmap 217 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 218.Lsie_gmap: 219 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 220 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 221 tm __SIE_PROG20+3(%r14),3 # last exit... 222 jnz .Lsie_skip 223 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 224 jo .Lsie_skip # exit if fp/vx regs changed 225 lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr 226 BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 227.Lsie_entry: 228 sie 0(%r14) 229# Let the next instruction be NOP to avoid triggering a machine check 230# and handling it in a guest as result of the instruction execution. 231 nopr 7 232.Lsie_leave: 233 BPOFF 234 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 235.Lsie_skip: 236 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 237 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 238 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 239.Lsie_done: 240# some program checks are suppressing. C code (e.g. do_protection_exception) 241# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 242# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 243# Other instructions between __sie64a and .Lsie_done should not cause program 244# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 245.Lrewind_pad6: 246 nopr 7 247.Lrewind_pad4: 248 nopr 7 249.Lrewind_pad2: 250 nopr 7 251SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) 252 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 253 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 254 xgr %r0,%r0 # clear guest registers to 255 xgr %r1,%r1 # prevent speculative use 256 xgr %r3,%r3 257 xgr %r4,%r4 258 xgr %r5,%r5 259 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 260 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 261 BR_EX %r14 262.Lsie_fault: 263 lghi %r14,-EFAULT 264 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 265 j sie_exit 266 267 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 268 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 269 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 270 EX_TABLE(sie_exit,.Lsie_fault) 271SYM_FUNC_END(__sie64a) 272EXPORT_SYMBOL(__sie64a) 273EXPORT_SYMBOL(sie_exit) 274#endif 275 276/* 277 * SVC interrupt handler routine. System calls are synchronous events and 278 * are entered with interrupts disabled. 279 */ 280 281SYM_CODE_START(system_call) 282 stpt __LC_SYS_ENTER_TIMER 283 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 284 BPOFF 285 lghi %r14,0 286.Lsysc_per: 287 STBEAR __LC_LAST_BREAK 288 lctlg %c1,%c1,__LC_KERNEL_ASCE 289 lg %r15,__LC_KERNEL_STACK 290 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 291 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 292 # clear user controlled register to prevent speculative use 293 xgr %r0,%r0 294 xgr %r1,%r1 295 xgr %r4,%r4 296 xgr %r5,%r5 297 xgr %r6,%r6 298 xgr %r7,%r7 299 xgr %r8,%r8 300 xgr %r9,%r9 301 xgr %r10,%r10 302 xgr %r11,%r11 303 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 304 mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC 305 MBEAR %r2 306 lgr %r3,%r14 307 brasl %r14,__do_syscall 308 STACKLEAK_ERASE 309 lctlg %c1,%c1,__LC_USER_ASCE 310 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 311 BPON 312 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 313 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 314 stpt __LC_EXIT_TIMER 315 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 316SYM_CODE_END(system_call) 317 318# 319# a new process exits the kernel with ret_from_fork 320# 321SYM_CODE_START(ret_from_fork) 322 lgr %r3,%r11 323 brasl %r14,__ret_from_fork 324 STACKLEAK_ERASE 325 lctlg %c1,%c1,__LC_USER_ASCE 326 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 327 BPON 328 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 329 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 330 stpt __LC_EXIT_TIMER 331 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 332SYM_CODE_END(ret_from_fork) 333 334/* 335 * Program check handler routine 336 */ 337 338SYM_CODE_START(pgm_check_handler) 339 stpt __LC_SYS_ENTER_TIMER 340 BPOFF 341 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 342 lghi %r10,0 343 lmg %r8,%r9,__LC_PGM_OLD_PSW 344 tmhh %r8,0x0001 # coming from user space? 345 jno .Lpgm_skip_asce 346 lctlg %c1,%c1,__LC_KERNEL_ASCE 347 j 3f # -> fault in user space 348.Lpgm_skip_asce: 349#if IS_ENABLED(CONFIG_KVM) 350 # cleanup critical section for program checks in __sie64a 351 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f 352 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 353 SIEEXIT 354 lghi %r10,_PIF_GUEST_FAULT 355#endif 3561: tmhh %r8,0x4000 # PER bit set in old PSW ? 357 jnz 2f # -> enabled, can't be a double fault 358 tm __LC_PGM_ILC+3,0x80 # check for per exception 359 jnz .Lpgm_svcper # -> single stepped svc 3602: CHECK_STACK __LC_SAVE_AREA_SYNC 361 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 362 # CHECK_VMAP_STACK branches to stack_overflow or 4f 363 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 3643: lg %r15,__LC_KERNEL_STACK 3654: la %r11,STACK_FRAME_OVERHEAD(%r15) 366 stg %r10,__PT_FLAGS(%r11) 367 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 368 stmg %r0,%r7,__PT_R0(%r11) 369 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 370 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK 371 stmg %r8,%r9,__PT_PSW(%r11) 372 373 # clear user controlled registers to prevent speculative use 374 xgr %r0,%r0 375 xgr %r1,%r1 376 xgr %r3,%r3 377 xgr %r4,%r4 378 xgr %r5,%r5 379 xgr %r6,%r6 380 xgr %r7,%r7 381 lgr %r2,%r11 382 brasl %r14,__do_pgm_check 383 tmhh %r8,0x0001 # returning to user space? 384 jno .Lpgm_exit_kernel 385 STACKLEAK_ERASE 386 lctlg %c1,%c1,__LC_USER_ASCE 387 BPON 388 stpt __LC_EXIT_TIMER 389.Lpgm_exit_kernel: 390 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 391 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 392 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 393 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 394 395# 396# single stepped system call 397# 398.Lpgm_svcper: 399 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 400 larl %r14,.Lsysc_per 401 stg %r14,__LC_RETURN_PSW+8 402 lghi %r14,1 403 LBEAR __LC_PGM_LAST_BREAK 404 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per 405SYM_CODE_END(pgm_check_handler) 406 407/* 408 * Interrupt handler macro used for external and IO interrupts. 409 */ 410.macro INT_HANDLER name,lc_old_psw,handler 411SYM_CODE_START(\name) 412 stckf __LC_INT_CLOCK 413 stpt __LC_SYS_ENTER_TIMER 414 STBEAR __LC_LAST_BREAK 415 BPOFF 416 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 417 lmg %r8,%r9,\lc_old_psw 418 tmhh %r8,0x0001 # interrupting from user ? 419 jnz 1f 420#if IS_ENABLED(CONFIG_KVM) 421 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f 422 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 423 SIEEXIT 424#endif 4250: CHECK_STACK __LC_SAVE_AREA_ASYNC 426 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 427 j 2f 4281: lctlg %c1,%c1,__LC_KERNEL_ASCE 429 lg %r15,__LC_KERNEL_STACK 4302: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 431 la %r11,STACK_FRAME_OVERHEAD(%r15) 432 stmg %r0,%r7,__PT_R0(%r11) 433 # clear user controlled registers to prevent speculative use 434 xgr %r0,%r0 435 xgr %r1,%r1 436 xgr %r3,%r3 437 xgr %r4,%r4 438 xgr %r5,%r5 439 xgr %r6,%r6 440 xgr %r7,%r7 441 xgr %r10,%r10 442 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 443 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 444 MBEAR %r11 445 stmg %r8,%r9,__PT_PSW(%r11) 446 lgr %r2,%r11 # pass pointer to pt_regs 447 brasl %r14,\handler 448 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 449 tmhh %r8,0x0001 # returning to user ? 450 jno 2f 451 STACKLEAK_ERASE 452 lctlg %c1,%c1,__LC_USER_ASCE 453 BPON 454 stpt __LC_EXIT_TIMER 4552: LBEAR __PT_LAST_BREAK(%r11) 456 lmg %r0,%r15,__PT_R0(%r11) 457 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 458SYM_CODE_END(\name) 459.endm 460 461 .section .irqentry.text, "ax" 462 463INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq 464INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq 465 466 .section .kprobes.text, "ax" 467 468/* 469 * Load idle PSW. 470 */ 471SYM_FUNC_START(psw_idle) 472 stg %r14,(__SF_GPRS+8*8)(%r15) 473 stg %r3,__SF_EMPTY(%r15) 474 larl %r1,psw_idle_exit 475 stg %r1,__SF_EMPTY+8(%r15) 476 larl %r1,smp_cpu_mtid 477 llgf %r1,0(%r1) 478 ltgr %r1,%r1 479 jz .Lpsw_idle_stcctm 480 .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2) 481.Lpsw_idle_stcctm: 482 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 483 BPON 484 stckf __CLOCK_IDLE_ENTER(%r2) 485 stpt __TIMER_IDLE_ENTER(%r2) 486 lpswe __SF_EMPTY(%r15) 487SYM_INNER_LABEL(psw_idle_exit, SYM_L_GLOBAL) 488 BR_EX %r14 489SYM_FUNC_END(psw_idle) 490 491/* 492 * Machine check handler routines 493 */ 494SYM_CODE_START(mcck_int_handler) 495 BPOFF 496 la %r1,4095 # validate r1 497 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer 498 LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear 499 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA # validate gprs 500 lmg %r8,%r9,__LC_MCK_OLD_PSW 501 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 502 jo .Lmcck_panic # yes -> rest of mcck code invalid 503 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 504 jno .Lmcck_panic # control registers invalid -> panic 505 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA # validate ctl regs 506 ptlb 507 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 508 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 509 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 510 jo 3f 511 la %r14,__LC_SYS_ENTER_TIMER 512 clc 0(8,%r14),__LC_EXIT_TIMER 513 jl 1f 514 la %r14,__LC_EXIT_TIMER 5151: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 516 jl 2f 517 la %r14,__LC_LAST_UPDATE_TIMER 5182: spt 0(%r14) 519 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 5203: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 521 jno .Lmcck_panic 522 tmhh %r8,0x0001 # interrupting from user ? 523 jnz .Lmcck_user 524 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 525 jno .Lmcck_panic 526#if IS_ENABLED(CONFIG_KVM) 527 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_user 528 OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f 529 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 5304: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 531 SIEEXIT 532#endif 533.Lmcck_user: 534 lg %r15,__LC_MCCK_STACK 535 la %r11,STACK_FRAME_OVERHEAD(%r15) 536 stctg %c1,%c1,__PT_CR1(%r11) 537 lctlg %c1,%c1,__LC_KERNEL_ASCE 538 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 539 lghi %r14,__LC_GPREGS_SAVE_AREA+64 540 stmg %r0,%r7,__PT_R0(%r11) 541 # clear user controlled registers to prevent speculative use 542 xgr %r0,%r0 543 xgr %r1,%r1 544 xgr %r3,%r3 545 xgr %r4,%r4 546 xgr %r5,%r5 547 xgr %r6,%r6 548 xgr %r7,%r7 549 xgr %r10,%r10 550 mvc __PT_R8(64,%r11),0(%r14) 551 stmg %r8,%r9,__PT_PSW(%r11) 552 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 553 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 554 lgr %r2,%r11 # pass pointer to pt_regs 555 brasl %r14,s390_do_machine_check 556 lctlg %c1,%c1,__PT_CR1(%r11) 557 lmg %r0,%r10,__PT_R0(%r11) 558 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 559 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 560 jno 0f 561 BPON 562 stpt __LC_EXIT_TIMER 5630: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 564 LBEAR 0(%r12) 565 lmg %r11,%r15,__PT_R11(%r11) 566 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE 567 568.Lmcck_panic: 569 /* 570 * Iterate over all possible CPU addresses in the range 0..0xffff 571 * and stop each CPU using signal processor. Use compare and swap 572 * to allow just one CPU-stopper and prevent concurrent CPUs from 573 * stopping each other while leaving the others running. 574 */ 575 lhi %r5,0 576 lhi %r6,1 577 larl %r7,stop_lock 578 cs %r5,%r6,0(%r7) # single CPU-stopper only 579 jnz 4f 580 larl %r7,this_cpu 581 stap 0(%r7) # this CPU address 582 lh %r4,0(%r7) 583 nilh %r4,0 584 lhi %r0,1 585 sll %r0,16 # CPU counter 586 lhi %r3,0 # next CPU address 5870: cr %r3,%r4 588 je 2f 5891: sigp %r1,%r3,SIGP_STOP # stop next CPU 590 brc SIGP_CC_BUSY,1b 5912: ahi %r3,1 592 brct %r0,0b 5933: sigp %r1,%r4,SIGP_STOP # stop this CPU 594 brc SIGP_CC_BUSY,3b 5954: j 4b 596SYM_CODE_END(mcck_int_handler) 597 598SYM_CODE_START(restart_int_handler) 599 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 600 stg %r15,__LC_SAVE_AREA_RESTART 601 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 602 jz 0f 603 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA 6040: larl %r15,daton_psw 605 lpswe 0(%r15) # turn dat on, keep irqs off 606.Ldaton: 607 lg %r15,__LC_RESTART_STACK 608 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 609 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 610 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 611 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 612 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 613 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 614 lg %r2,__LC_RESTART_DATA 615 lgf %r3,__LC_RESTART_SOURCE 616 ltgr %r3,%r3 # test source cpu address 617 jm 1f # negative -> skip source stop 6180: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 619 brc 10,0b # wait for status stored 6201: basr %r14,%r1 # call function 621 stap __SF_EMPTY(%r15) # store cpu address 622 llgh %r3,__SF_EMPTY(%r15) 6232: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 624 brc 2,2b 6253: j 3b 626SYM_CODE_END(restart_int_handler) 627 628 .section .kprobes.text, "ax" 629 630#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 631/* 632 * The synchronous or the asynchronous stack overflowed. We are dead. 633 * No need to properly save the registers, we are going to panic anyway. 634 * Setup a pt_regs so that show_trace can provide a good call trace. 635 */ 636SYM_CODE_START(stack_overflow) 637 lg %r15,__LC_NODAT_STACK # change to panic stack 638 la %r11,STACK_FRAME_OVERHEAD(%r15) 639 stmg %r0,%r7,__PT_R0(%r11) 640 stmg %r8,%r9,__PT_PSW(%r11) 641 mvc __PT_R8(64,%r11),0(%r14) 642 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 643 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 644 lgr %r2,%r11 # pass pointer to pt_regs 645 jg kernel_stack_overflow 646SYM_CODE_END(stack_overflow) 647#endif 648 649 .section .data, "aw" 650 .balign 4 651SYM_DATA_LOCAL(stop_lock, .long 0) 652SYM_DATA_LOCAL(this_cpu, .short 0) 653 .balign 8 654SYM_DATA_START_LOCAL(daton_psw) 655 .quad PSW_KERNEL_BITS 656 .quad .Ldaton 657SYM_DATA_END(daton_psw) 658 659 .section .rodata, "a" 660 .balign 8 661#define SYSCALL(esame,emu) .quad __s390x_ ## esame 662SYM_DATA_START(sys_call_table) 663#include "asm/syscall_table.h" 664SYM_DATA_END(sys_call_table) 665#undef SYSCALL 666 667#ifdef CONFIG_COMPAT 668 669#define SYSCALL(esame,emu) .quad __s390_ ## emu 670SYM_DATA_START(sys_call_table_emu) 671#include "asm/syscall_table.h" 672SYM_DATA_END(sys_call_table_emu) 673#undef SYSCALL 674#endif 675