1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 * Heiko Carstens <heiko.carstens@de.ibm.com> 10 */ 11 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/alternative-asm.h> 15#include <asm/processor.h> 16#include <asm/cache.h> 17#include <asm/ctl_reg.h> 18#include <asm/dwarf.h> 19#include <asm/errno.h> 20#include <asm/ptrace.h> 21#include <asm/thread_info.h> 22#include <asm/asm-offsets.h> 23#include <asm/unistd.h> 24#include <asm/page.h> 25#include <asm/sigp.h> 26#include <asm/irq.h> 27#include <asm/vx-insn.h> 28#include <asm/setup.h> 29#include <asm/nmi.h> 30#include <asm/export.h> 31#include <asm/nospec-insn.h> 32 33__PT_R0 = __PT_GPRS 34__PT_R1 = __PT_GPRS + 8 35__PT_R2 = __PT_GPRS + 16 36__PT_R3 = __PT_GPRS + 24 37__PT_R4 = __PT_GPRS + 32 38__PT_R5 = __PT_GPRS + 40 39__PT_R6 = __PT_GPRS + 48 40__PT_R7 = __PT_GPRS + 56 41__PT_R8 = __PT_GPRS + 64 42__PT_R9 = __PT_GPRS + 72 43__PT_R10 = __PT_GPRS + 80 44__PT_R11 = __PT_GPRS + 88 45__PT_R12 = __PT_GPRS + 96 46__PT_R13 = __PT_GPRS + 104 47__PT_R14 = __PT_GPRS + 112 48__PT_R15 = __PT_GPRS + 120 49 50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER 51STACK_SIZE = 1 << STACK_SHIFT 52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 53 54_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING) 56_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 57 _TIF_SYSCALL_TRACEPOINT) 58_CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU) 59_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) 60 61_LPP_OFFSET = __LC_LPP 62 63 .macro TRACE_IRQS_ON 64#ifdef CONFIG_TRACE_IRQFLAGS 65 basr %r2,%r0 66 brasl %r14,trace_hardirqs_on_caller 67#endif 68 .endm 69 70 .macro TRACE_IRQS_OFF 71#ifdef CONFIG_TRACE_IRQFLAGS 72 basr %r2,%r0 73 brasl %r14,trace_hardirqs_off_caller 74#endif 75 .endm 76 77 .macro LOCKDEP_SYS_EXIT 78#ifdef CONFIG_LOCKDEP 79 tm __PT_PSW+1(%r11),0x01 # returning to user ? 80 jz .+10 81 brasl %r14,lockdep_sys_exit 82#endif 83 .endm 84 85 .macro CHECK_STACK savearea 86#ifdef CONFIG_CHECK_STACK 87 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 88 lghi %r14,\savearea 89 jz stack_overflow 90#endif 91 .endm 92 93 .macro CHECK_VMAP_STACK savearea,oklabel 94#ifdef CONFIG_VMAP_STACK 95 lgr %r14,%r15 96 nill %r14,0x10000 - STACK_SIZE 97 oill %r14,STACK_INIT 98 clg %r14,__LC_KERNEL_STACK 99 je \oklabel 100 clg %r14,__LC_ASYNC_STACK 101 je \oklabel 102 clg %r14,__LC_NODAT_STACK 103 je \oklabel 104 clg %r14,__LC_RESTART_STACK 105 je \oklabel 106 lghi %r14,\savearea 107 j stack_overflow 108#else 109 j \oklabel 110#endif 111 .endm 112 113 .macro SWITCH_ASYNC savearea,timer 114 tmhh %r8,0x0001 # interrupting from user ? 115 jnz 2f 116#if IS_ENABLED(CONFIG_KVM) 117 lgr %r14,%r9 118 larl %r13,.Lsie_gmap 119 slgr %r14,%r13 120 lghi %r13,.Lsie_done - .Lsie_gmap 121 clgr %r14,%r13 122 jhe 0f 123 lghi %r11,\savearea # inside critical section, do cleanup 124 brasl %r14,.Lcleanup_sie 125#endif 1260: larl %r13,.Lpsw_idle_exit 127 cgr %r13,%r9 128 jne 1f 129 130 mvc __CLOCK_IDLE_EXIT(8,%r2), __LC_INT_CLOCK 131 mvc __TIMER_IDLE_EXIT(8,%r2), __LC_ASYNC_ENTER_TIMER 132 # account system time going idle 133 ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT 134 135 lg %r13,__LC_STEAL_TIMER 136 alg %r13,__CLOCK_IDLE_ENTER(%r2) 137 slg %r13,__LC_LAST_UPDATE_CLOCK 138 stg %r13,__LC_STEAL_TIMER 139 140 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 141 142 lg %r13,__LC_SYSTEM_TIMER 143 alg %r13,__LC_LAST_UPDATE_TIMER 144 slg %r13,__TIMER_IDLE_ENTER(%r2) 145 stg %r13,__LC_SYSTEM_TIMER 146 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 147 148 nihh %r8,0xfcfd # clear wait state and irq bits 1491: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? 150 slgr %r14,%r15 151 srag %r14,%r14,STACK_SHIFT 152 jnz 3f 153 CHECK_STACK \savearea 154 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 155 j 4f 1562: UPDATE_VTIME %r14,%r15,\timer 157 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 1583: lg %r15,__LC_ASYNC_STACK # load async stack 1594: la %r11,STACK_FRAME_OVERHEAD(%r15) 160 .endm 161 162 .macro UPDATE_VTIME w1,w2,enter_timer 163 lg \w1,__LC_EXIT_TIMER 164 lg \w2,__LC_LAST_UPDATE_TIMER 165 slg \w1,\enter_timer 166 slg \w2,__LC_EXIT_TIMER 167 alg \w1,__LC_USER_TIMER 168 alg \w2,__LC_SYSTEM_TIMER 169 stg \w1,__LC_USER_TIMER 170 stg \w2,__LC_SYSTEM_TIMER 171 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 172 .endm 173 174 .macro RESTORE_SM_CLEAR_PER 175 stg %r8,__LC_RETURN_PSW 176 ni __LC_RETURN_PSW,0xbf 177 ssm __LC_RETURN_PSW 178 .endm 179 180 .macro ENABLE_INTS 181 stosm __SF_EMPTY(%r15),3 182 .endm 183 184 .macro ENABLE_INTS_TRACE 185 TRACE_IRQS_ON 186 ENABLE_INTS 187 .endm 188 189 .macro DISABLE_INTS 190 stnsm __SF_EMPTY(%r15),0xfc 191 .endm 192 193 .macro DISABLE_INTS_TRACE 194 DISABLE_INTS 195 TRACE_IRQS_OFF 196 .endm 197 198 .macro STCK savearea 199#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 200 .insn s,0xb27c0000,\savearea # store clock fast 201#else 202 .insn s,0xb2050000,\savearea # store clock 203#endif 204 .endm 205 206 /* 207 * The TSTMSK macro generates a test-under-mask instruction by 208 * calculating the memory offset for the specified mask value. 209 * Mask value can be any constant. The macro shifts the mask 210 * value to calculate the memory offset for the test-under-mask 211 * instruction. 212 */ 213 .macro TSTMSK addr, mask, size=8, bytepos=0 214 .if (\bytepos < \size) && (\mask >> 8) 215 .if (\mask & 0xff) 216 .error "Mask exceeds byte boundary" 217 .endif 218 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 219 .exitm 220 .endif 221 .ifeq \mask 222 .error "Mask must not be zero" 223 .endif 224 off = \size - \bytepos - 1 225 tm off+\addr, \mask 226 .endm 227 228 .macro BPOFF 229 ALTERNATIVE "", ".long 0xb2e8c000", 82 230 .endm 231 232 .macro BPON 233 ALTERNATIVE "", ".long 0xb2e8d000", 82 234 .endm 235 236 .macro BPENTER tif_ptr,tif_mask 237 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \ 238 "", 82 239 .endm 240 241 .macro BPEXIT tif_ptr,tif_mask 242 TSTMSK \tif_ptr,\tif_mask 243 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \ 244 "jnz .+8; .long 0xb2e8d000", 82 245 .endm 246 247 GEN_BR_THUNK %r9 248 GEN_BR_THUNK %r14 249 GEN_BR_THUNK %r14,%r11 250 251 .section .kprobes.text, "ax" 252.Ldummy: 253 /* 254 * This nop exists only in order to avoid that __switch_to starts at 255 * the beginning of the kprobes text section. In that case we would 256 * have several symbols at the same address. E.g. objdump would take 257 * an arbitrary symbol name when disassembling this code. 258 * With the added nop in between the __switch_to symbol is unique 259 * again. 260 */ 261 nop 0 262 263ENTRY(__bpon) 264 .globl __bpon 265 BPON 266 BR_EX %r14 267ENDPROC(__bpon) 268 269/* 270 * Scheduler resume function, called by switch_to 271 * gpr2 = (task_struct *) prev 272 * gpr3 = (task_struct *) next 273 * Returns: 274 * gpr2 = prev 275 */ 276ENTRY(__switch_to) 277 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 278 lghi %r4,__TASK_stack 279 lghi %r1,__TASK_thread 280 llill %r5,STACK_INIT 281 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 282 lg %r15,0(%r4,%r3) # start of kernel stack of next 283 agr %r15,%r5 # end of kernel stack of next 284 stg %r3,__LC_CURRENT # store task struct of next 285 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 286 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 287 aghi %r3,__TASK_pid 288 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 289 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 290 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 291 BR_EX %r14 292ENDPROC(__switch_to) 293 294#if IS_ENABLED(CONFIG_KVM) 295/* 296 * sie64a calling convention: 297 * %r2 pointer to sie control block 298 * %r3 guest register save area 299 */ 300ENTRY(sie64a) 301 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 302 lg %r12,__LC_CURRENT 303 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer 304 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area 305 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 306 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 307 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? 308 jno .Lsie_load_guest_gprs 309 brasl %r14,load_fpu_regs # load guest fp/vx regs 310.Lsie_load_guest_gprs: 311 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 312 lg %r14,__LC_GMAP # get gmap pointer 313 ltgr %r14,%r14 314 jz .Lsie_gmap 315 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 316.Lsie_gmap: 317 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 318 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 319 tm __SIE_PROG20+3(%r14),3 # last exit... 320 jnz .Lsie_skip 321 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 322 jo .Lsie_skip # exit if fp/vx regs changed 323 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 324.Lsie_entry: 325 sie 0(%r14) 326 BPOFF 327 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 328.Lsie_skip: 329 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 330 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 331.Lsie_done: 332# some program checks are suppressing. C code (e.g. do_protection_exception) 333# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 334# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 335# Other instructions between sie64a and .Lsie_done should not cause program 336# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 337# See also .Lcleanup_sie 338.Lrewind_pad6: 339 nopr 7 340.Lrewind_pad4: 341 nopr 7 342.Lrewind_pad2: 343 nopr 7 344 .globl sie_exit 345sie_exit: 346 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 347 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 348 xgr %r0,%r0 # clear guest registers to 349 xgr %r1,%r1 # prevent speculative use 350 xgr %r2,%r2 351 xgr %r3,%r3 352 xgr %r4,%r4 353 xgr %r5,%r5 354 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 355 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 356 BR_EX %r14 357.Lsie_fault: 358 lghi %r14,-EFAULT 359 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 360 j sie_exit 361 362 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 363 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 364 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 365 EX_TABLE(sie_exit,.Lsie_fault) 366ENDPROC(sie64a) 367EXPORT_SYMBOL(sie64a) 368EXPORT_SYMBOL(sie_exit) 369#endif 370 371/* 372 * SVC interrupt handler routine. System calls are synchronous events and 373 * are entered with interrupts disabled. 374 */ 375 376ENTRY(system_call) 377 stpt __LC_SYNC_ENTER_TIMER 378 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 379 BPOFF 380 lg %r12,__LC_CURRENT 381 lghi %r14,_PIF_SYSCALL 382.Lsysc_per: 383 lghi %r13,__TASK_thread 384 lg %r15,__LC_KERNEL_STACK 385 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 386 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 387 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 388 stmg %r0,%r7,__PT_R0(%r11) 389 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 390 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 391 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 392 stg %r14,__PT_FLAGS(%r11) 393 ENABLE_INTS 394.Lsysc_do_svc: 395 # clear user controlled register to prevent speculative use 396 xgr %r0,%r0 397 # load address of system call table 398 lg %r10,__THREAD_sysc_table(%r13,%r12) 399 llgh %r8,__PT_INT_CODE+2(%r11) 400 slag %r8,%r8,3 # shift and test for svc 0 401 jnz .Lsysc_nr_ok 402 # svc 0: system call number in %r1 403 llgfr %r1,%r1 # clear high word in r1 404 sth %r1,__PT_INT_CODE+2(%r11) 405 cghi %r1,NR_syscalls 406 jnl .Lsysc_nr_ok 407 slag %r8,%r1,3 408.Lsysc_nr_ok: 409 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 410 stg %r2,__PT_ORIG_GPR2(%r11) 411 stg %r7,STACK_FRAME_OVERHEAD(%r15) 412 lg %r9,0(%r8,%r10) # get system call add. 413 TSTMSK __TI_flags(%r12),_TIF_TRACE 414 jnz .Lsysc_tracesys 415 BASR_EX %r14,%r9 # call sys_xxxx 416 stg %r2,__PT_R2(%r11) # store return value 417 418.Lsysc_return: 419#ifdef CONFIG_DEBUG_RSEQ 420 lgr %r2,%r11 421 brasl %r14,rseq_syscall 422#endif 423 LOCKDEP_SYS_EXIT 424.Lsysc_tif: 425 DISABLE_INTS 426 TSTMSK __PT_FLAGS(%r11),_PIF_WORK 427 jnz .Lsysc_work 428 TSTMSK __TI_flags(%r12),_TIF_WORK 429 jnz .Lsysc_work # check for work 430 TSTMSK __LC_CPU_FLAGS,(_CIF_WORK-_CIF_FPU) 431 jnz .Lsysc_work 432 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 433.Lsysc_restore: 434 DISABLE_INTS 435 TSTMSK __LC_CPU_FLAGS, _CIF_FPU 436 jz .Lsysc_skip_fpu 437 brasl %r14,load_fpu_regs 438.Lsysc_skip_fpu: 439 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 440 stpt __LC_EXIT_TIMER 441 lmg %r0,%r15,__PT_R0(%r11) 442 b __LC_RETURN_LPSWE 443 444# 445# One of the work bits is on. Find out which one. 446# 447.Lsysc_work: 448 ENABLE_INTS 449 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 450 jo .Lsysc_reschedule 451 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 452 jo .Lsysc_syscall_restart 453#ifdef CONFIG_UPROBES 454 TSTMSK __TI_flags(%r12),_TIF_UPROBE 455 jo .Lsysc_uprobe_notify 456#endif 457 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 458 jo .Lsysc_guarded_storage 459 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP 460 jo .Lsysc_singlestep 461#ifdef CONFIG_LIVEPATCH 462 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 463 jo .Lsysc_patch_pending # handle live patching just before 464 # signals and possible syscall restart 465#endif 466 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 467 jo .Lsysc_syscall_restart 468 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 469 jo .Lsysc_sigpending 470 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 471 jo .Lsysc_notify_resume 472 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 473 jnz .Lsysc_asce 474 j .Lsysc_return 475 476# 477# _TIF_NEED_RESCHED is set, call schedule 478# 479.Lsysc_reschedule: 480 larl %r14,.Lsysc_return 481 jg schedule 482 483# 484# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce 485# 486.Lsysc_asce: 487 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY 488 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce 489 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY 490 jz .Lsysc_return 491#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 492 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? 493 jnz .Lsysc_set_fs_fixup 494 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 495 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 496 j .Lsysc_return 497.Lsysc_set_fs_fixup: 498#endif 499 larl %r14,.Lsysc_return 500 jg set_fs_fixup 501 502 503# 504# _TIF_SIGPENDING is set, call do_signal 505# 506.Lsysc_sigpending: 507 lgr %r2,%r11 # pass pointer to pt_regs 508 brasl %r14,do_signal 509 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 510 jno .Lsysc_return 511.Lsysc_do_syscall: 512 lghi %r13,__TASK_thread 513 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 514 lghi %r1,0 # svc 0 returns -ENOSYS 515 j .Lsysc_do_svc 516 517# 518# _TIF_NOTIFY_RESUME is set, call do_notify_resume 519# 520.Lsysc_notify_resume: 521 lgr %r2,%r11 # pass pointer to pt_regs 522 larl %r14,.Lsysc_return 523 jg do_notify_resume 524 525# 526# _TIF_UPROBE is set, call uprobe_notify_resume 527# 528#ifdef CONFIG_UPROBES 529.Lsysc_uprobe_notify: 530 lgr %r2,%r11 # pass pointer to pt_regs 531 larl %r14,.Lsysc_return 532 jg uprobe_notify_resume 533#endif 534 535# 536# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 537# 538.Lsysc_guarded_storage: 539 lgr %r2,%r11 # pass pointer to pt_regs 540 larl %r14,.Lsysc_return 541 jg gs_load_bc_cb 542# 543# _TIF_PATCH_PENDING is set, call klp_update_patch_state 544# 545#ifdef CONFIG_LIVEPATCH 546.Lsysc_patch_pending: 547 lg %r2,__LC_CURRENT # pass pointer to task struct 548 larl %r14,.Lsysc_return 549 jg klp_update_patch_state 550#endif 551 552# 553# _PIF_PER_TRAP is set, call do_per_trap 554# 555.Lsysc_singlestep: 556 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP 557 lgr %r2,%r11 # pass pointer to pt_regs 558 larl %r14,.Lsysc_return 559 jg do_per_trap 560 561# 562# _PIF_SYSCALL_RESTART is set, repeat the current system call 563# 564.Lsysc_syscall_restart: 565 ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART 566 lmg %r1,%r7,__PT_R1(%r11) # load svc arguments 567 lg %r2,__PT_ORIG_GPR2(%r11) 568 j .Lsysc_do_svc 569 570# 571# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 572# and after the system call 573# 574.Lsysc_tracesys: 575 lgr %r2,%r11 # pass pointer to pt_regs 576 la %r3,0 577 llgh %r0,__PT_INT_CODE+2(%r11) 578 stg %r0,__PT_R2(%r11) 579 brasl %r14,do_syscall_trace_enter 580 lghi %r0,NR_syscalls 581 clgr %r0,%r2 582 jnh .Lsysc_tracenogo 583 sllg %r8,%r2,3 584 lg %r9,0(%r8,%r10) 585 lmg %r3,%r7,__PT_R3(%r11) 586 stg %r7,STACK_FRAME_OVERHEAD(%r15) 587 lg %r2,__PT_ORIG_GPR2(%r11) 588 BASR_EX %r14,%r9 # call sys_xxx 589 stg %r2,__PT_R2(%r11) # store return value 590.Lsysc_tracenogo: 591 TSTMSK __TI_flags(%r12),_TIF_TRACE 592 jz .Lsysc_return 593 lgr %r2,%r11 # pass pointer to pt_regs 594 larl %r14,.Lsysc_return 595 jg do_syscall_trace_exit 596ENDPROC(system_call) 597 598# 599# a new process exits the kernel with ret_from_fork 600# 601ENTRY(ret_from_fork) 602 la %r11,STACK_FRAME_OVERHEAD(%r15) 603 lg %r12,__LC_CURRENT 604 brasl %r14,schedule_tail 605 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 606 jne .Lsysc_tracenogo 607 # it's a kernel thread 608 lmg %r9,%r10,__PT_R9(%r11) # load gprs 609 la %r2,0(%r10) 610 BASR_EX %r14,%r9 611 j .Lsysc_tracenogo 612ENDPROC(ret_from_fork) 613 614ENTRY(kernel_thread_starter) 615 la %r2,0(%r10) 616 BASR_EX %r14,%r9 617 j .Lsysc_tracenogo 618ENDPROC(kernel_thread_starter) 619 620/* 621 * Program check handler routine 622 */ 623 624ENTRY(pgm_check_handler) 625 stpt __LC_SYNC_ENTER_TIMER 626 BPOFF 627 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 628 lg %r10,__LC_LAST_BREAK 629 srag %r11,%r10,12 630 jnz 0f 631 /* if __LC_LAST_BREAK is < 4096, it contains one of 632 * the lpswe addresses in lowcore. Set it to 1 (initial state) 633 * to prevent leaking that address to userspace. 634 */ 635 lghi %r10,1 6360: lg %r12,__LC_CURRENT 637 lghi %r11,0 638 lmg %r8,%r9,__LC_PGM_OLD_PSW 639 tmhh %r8,0x0001 # test problem state bit 640 jnz 3f # -> fault in user space 641#if IS_ENABLED(CONFIG_KVM) 642 # cleanup critical section for program checks in sie64a 643 lgr %r14,%r9 644 larl %r13,.Lsie_gmap 645 slgr %r14,%r13 646 lghi %r13,.Lsie_done - .Lsie_gmap 647 clgr %r14,%r13 648 jhe 1f 649 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 650 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 651 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 652 larl %r9,sie_exit # skip forward to sie_exit 653 lghi %r11,_PIF_GUEST_FAULT 654#endif 6551: tmhh %r8,0x4000 # PER bit set in old PSW ? 656 jnz 2f # -> enabled, can't be a double fault 657 tm __LC_PGM_ILC+3,0x80 # check for per exception 658 jnz .Lpgm_svcper # -> single stepped svc 6592: CHECK_STACK __LC_SAVE_AREA_SYNC 660 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 661 # CHECK_VMAP_STACK branches to stack_overflow or 5f 662 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f 6633: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 664 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 665 lg %r15,__LC_KERNEL_STACK 666 lgr %r14,%r12 667 aghi %r14,__TASK_thread # pointer to thread_struct 668 lghi %r13,__LC_PGM_TDB 669 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 670 jz 4f 671 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 6724: stg %r10,__THREAD_last_break(%r14) 6735: lgr %r13,%r11 674 la %r11,STACK_FRAME_OVERHEAD(%r15) 675 stmg %r0,%r7,__PT_R0(%r11) 676 # clear user controlled registers to prevent speculative use 677 xgr %r0,%r0 678 xgr %r1,%r1 679 xgr %r2,%r2 680 xgr %r3,%r3 681 xgr %r4,%r4 682 xgr %r5,%r5 683 xgr %r6,%r6 684 xgr %r7,%r7 685 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 686 stmg %r8,%r9,__PT_PSW(%r11) 687 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 688 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE 689 stg %r13,__PT_FLAGS(%r11) 690 stg %r10,__PT_ARGS(%r11) 691 tm __LC_PGM_ILC+3,0x80 # check for per exception 692 jz 6f 693 tmhh %r8,0x0001 # kernel per event ? 694 jz .Lpgm_kprobe 695 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 696 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 697 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 698 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 6996: RESTORE_SM_CLEAR_PER 700 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 701 larl %r1,pgm_check_table 702 llgh %r10,__PT_INT_CODE+2(%r11) 703 nill %r10,0x007f 704 sll %r10,3 705 je .Lpgm_return 706 lg %r9,0(%r10,%r1) # load address of handler routine 707 lgr %r2,%r11 # pass pointer to pt_regs 708 BASR_EX %r14,%r9 # branch to interrupt-handler 709.Lpgm_return: 710 LOCKDEP_SYS_EXIT 711 tm __PT_PSW+1(%r11),0x01 # returning to user ? 712 jno .Lsysc_restore 713 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 714 jo .Lsysc_do_syscall 715 j .Lsysc_tif 716 717# 718# PER event in supervisor state, must be kprobes 719# 720.Lpgm_kprobe: 721 RESTORE_SM_CLEAR_PER 722 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 723 lgr %r2,%r11 # pass pointer to pt_regs 724 brasl %r14,do_per_trap 725 j .Lpgm_return 726 727# 728# single stepped system call 729# 730.Lpgm_svcper: 731 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 732 larl %r14,.Lsysc_per 733 stg %r14,__LC_RETURN_PSW+8 734 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 735 lpswe __LC_RETURN_PSW # branch to .Lsysc_per 736ENDPROC(pgm_check_handler) 737 738/* 739 * IO interrupt handler routine 740 */ 741ENTRY(io_int_handler) 742 STCK __LC_INT_CLOCK 743 stpt __LC_ASYNC_ENTER_TIMER 744 BPOFF 745 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 746 lg %r12,__LC_CURRENT 747 lmg %r8,%r9,__LC_IO_OLD_PSW 748 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 749 stmg %r0,%r7,__PT_R0(%r11) 750 # clear user controlled registers to prevent speculative use 751 xgr %r0,%r0 752 xgr %r1,%r1 753 xgr %r2,%r2 754 xgr %r3,%r3 755 xgr %r4,%r4 756 xgr %r5,%r5 757 xgr %r6,%r6 758 xgr %r7,%r7 759 xgr %r10,%r10 760 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 761 stmg %r8,%r9,__PT_PSW(%r11) 762 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 763 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 764 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 765 jo .Lio_restore 766 TRACE_IRQS_OFF 767 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 768.Lio_loop: 769 lgr %r2,%r11 # pass pointer to pt_regs 770 lghi %r3,IO_INTERRUPT 771 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 772 jz .Lio_call 773 lghi %r3,THIN_INTERRUPT 774.Lio_call: 775 brasl %r14,do_IRQ 776 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR 777 jz .Lio_return 778 tpi 0 779 jz .Lio_return 780 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 781 j .Lio_loop 782.Lio_return: 783 LOCKDEP_SYS_EXIT 784 TSTMSK __TI_flags(%r12),_TIF_WORK 785 jnz .Lio_work # there is work to do (signals etc.) 786 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 787 jnz .Lio_work 788.Lio_restore: 789 TRACE_IRQS_ON 790 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 791 tm __PT_PSW+1(%r11),0x01 # returning to user ? 792 jno .Lio_exit_kernel 793 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 794 stpt __LC_EXIT_TIMER 795.Lio_exit_kernel: 796 lmg %r0,%r15,__PT_R0(%r11) 797 b __LC_RETURN_LPSWE 798.Lio_done: 799 800# 801# There is work todo, find out in which context we have been interrupted: 802# 1) if we return to user space we can do all _TIF_WORK work 803# 2) if we return to kernel code and kvm is enabled check if we need to 804# modify the psw to leave SIE 805# 3) if we return to kernel code and preemptive scheduling is enabled check 806# the preemption counter and if it is zero call preempt_schedule_irq 807# Before any work can be done, a switch to the kernel stack is required. 808# 809.Lio_work: 810 tm __PT_PSW+1(%r11),0x01 # returning to user ? 811 jo .Lio_work_user # yes -> do resched & signal 812#ifdef CONFIG_PREEMPTION 813 # check for preemptive scheduling 814 icm %r0,15,__LC_PREEMPT_COUNT 815 jnz .Lio_restore # preemption is disabled 816 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 817 jno .Lio_restore 818 # switch to kernel stack 819 lg %r1,__PT_R15(%r11) 820 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 821 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 822 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 823 la %r11,STACK_FRAME_OVERHEAD(%r1) 824 lgr %r15,%r1 825 brasl %r14,preempt_schedule_irq 826 j .Lio_return 827#else 828 j .Lio_restore 829#endif 830 831# 832# Need to do work before returning to userspace, switch to kernel stack 833# 834.Lio_work_user: 835 lg %r1,__LC_KERNEL_STACK 836 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 837 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 838 la %r11,STACK_FRAME_OVERHEAD(%r1) 839 lgr %r15,%r1 840 841# 842# One of the work bits is on. Find out which one. 843# 844 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 845 jo .Lio_reschedule 846#ifdef CONFIG_LIVEPATCH 847 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 848 jo .Lio_patch_pending 849#endif 850 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 851 jo .Lio_sigpending 852 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 853 jo .Lio_notify_resume 854 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 855 jo .Lio_guarded_storage 856 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 857 jo .Lio_vxrs 858 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 859 jnz .Lio_asce 860 j .Lio_return 861 862# 863# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce 864# 865.Lio_asce: 866 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY 867 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce 868 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY 869 jz .Lio_return 870#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 871 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? 872 jnz .Lio_set_fs_fixup 873 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 874 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 875 j .Lio_return 876.Lio_set_fs_fixup: 877#endif 878 larl %r14,.Lio_return 879 jg set_fs_fixup 880 881# 882# CIF_FPU is set, restore floating-point controls and floating-point registers. 883# 884.Lio_vxrs: 885 larl %r14,.Lio_return 886 jg load_fpu_regs 887 888# 889# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 890# 891.Lio_guarded_storage: 892 ENABLE_INTS_TRACE 893 lgr %r2,%r11 # pass pointer to pt_regs 894 brasl %r14,gs_load_bc_cb 895 DISABLE_INTS_TRACE 896 j .Lio_return 897 898# 899# _TIF_NEED_RESCHED is set, call schedule 900# 901.Lio_reschedule: 902 ENABLE_INTS_TRACE 903 brasl %r14,schedule # call scheduler 904 DISABLE_INTS_TRACE 905 j .Lio_return 906 907# 908# _TIF_PATCH_PENDING is set, call klp_update_patch_state 909# 910#ifdef CONFIG_LIVEPATCH 911.Lio_patch_pending: 912 lg %r2,__LC_CURRENT # pass pointer to task struct 913 larl %r14,.Lio_return 914 jg klp_update_patch_state 915#endif 916 917# 918# _TIF_SIGPENDING or is set, call do_signal 919# 920.Lio_sigpending: 921 ENABLE_INTS_TRACE 922 lgr %r2,%r11 # pass pointer to pt_regs 923 brasl %r14,do_signal 924 DISABLE_INTS_TRACE 925 j .Lio_return 926 927# 928# _TIF_NOTIFY_RESUME or is set, call do_notify_resume 929# 930.Lio_notify_resume: 931 ENABLE_INTS_TRACE 932 lgr %r2,%r11 # pass pointer to pt_regs 933 brasl %r14,do_notify_resume 934 DISABLE_INTS_TRACE 935 j .Lio_return 936ENDPROC(io_int_handler) 937 938/* 939 * External interrupt handler routine 940 */ 941ENTRY(ext_int_handler) 942 STCK __LC_INT_CLOCK 943 stpt __LC_ASYNC_ENTER_TIMER 944 BPOFF 945 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 946 lg %r12,__LC_CURRENT 947 lmg %r8,%r9,__LC_EXT_OLD_PSW 948 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 949 stmg %r0,%r7,__PT_R0(%r11) 950 # clear user controlled registers to prevent speculative use 951 xgr %r0,%r0 952 xgr %r1,%r1 953 xgr %r2,%r2 954 xgr %r3,%r3 955 xgr %r4,%r4 956 xgr %r5,%r5 957 xgr %r6,%r6 958 xgr %r7,%r7 959 xgr %r10,%r10 960 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 961 stmg %r8,%r9,__PT_PSW(%r11) 962 lghi %r1,__LC_EXT_PARAMS2 963 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR 964 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 965 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) 966 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 967 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 968 jo .Lio_restore 969 TRACE_IRQS_OFF 970 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 971 lgr %r2,%r11 # pass pointer to pt_regs 972 lghi %r3,EXT_INTERRUPT 973 brasl %r14,do_IRQ 974 j .Lio_return 975ENDPROC(ext_int_handler) 976 977/* 978 * Load idle PSW. 979 */ 980ENTRY(psw_idle) 981 stg %r3,__SF_EMPTY(%r15) 982 larl %r1,.Lpsw_idle_exit 983 stg %r1,__SF_EMPTY+8(%r15) 984 larl %r1,smp_cpu_mtid 985 llgf %r1,0(%r1) 986 ltgr %r1,%r1 987 jz .Lpsw_idle_stcctm 988 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) 989.Lpsw_idle_stcctm: 990 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 991 BPON 992 STCK __CLOCK_IDLE_ENTER(%r2) 993 stpt __TIMER_IDLE_ENTER(%r2) 994 lpswe __SF_EMPTY(%r15) 995.Lpsw_idle_exit: 996 BR_EX %r14 997ENDPROC(psw_idle) 998 999/* 1000 * Store floating-point controls and floating-point or vector register 1001 * depending whether the vector facility is available. A critical section 1002 * cleanup assures that the registers are stored even if interrupted for 1003 * some other work. The CIF_FPU flag is set to trigger a lazy restore 1004 * of the register contents at return from io or a system call. 1005 */ 1006ENTRY(save_fpu_regs) 1007 stnsm __SF_EMPTY(%r15),0xfc 1008 lg %r2,__LC_CURRENT 1009 aghi %r2,__TASK_thread 1010 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1011 jo .Lsave_fpu_regs_exit 1012 stfpc __THREAD_FPU_fpc(%r2) 1013 lg %r3,__THREAD_FPU_regs(%r2) 1014 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1015 jz .Lsave_fpu_regs_fp # no -> store FP regs 1016 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 1017 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) 1018 j .Lsave_fpu_regs_done # -> set CIF_FPU flag 1019.Lsave_fpu_regs_fp: 1020 std 0,0(%r3) 1021 std 1,8(%r3) 1022 std 2,16(%r3) 1023 std 3,24(%r3) 1024 std 4,32(%r3) 1025 std 5,40(%r3) 1026 std 6,48(%r3) 1027 std 7,56(%r3) 1028 std 8,64(%r3) 1029 std 9,72(%r3) 1030 std 10,80(%r3) 1031 std 11,88(%r3) 1032 std 12,96(%r3) 1033 std 13,104(%r3) 1034 std 14,112(%r3) 1035 std 15,120(%r3) 1036.Lsave_fpu_regs_done: 1037 oi __LC_CPU_FLAGS+7,_CIF_FPU 1038.Lsave_fpu_regs_exit: 1039 ssm __SF_EMPTY(%r15) 1040 BR_EX %r14 1041.Lsave_fpu_regs_end: 1042ENDPROC(save_fpu_regs) 1043EXPORT_SYMBOL(save_fpu_regs) 1044 1045/* 1046 * Load floating-point controls and floating-point or vector registers. 1047 * A critical section cleanup assures that the register contents are 1048 * loaded even if interrupted for some other work. 1049 * 1050 * There are special calling conventions to fit into sysc and io return work: 1051 * %r15: <kernel stack> 1052 * The function requires: 1053 * %r4 1054 */ 1055load_fpu_regs: 1056 stnsm __SF_EMPTY(%r15),0xfc 1057 lg %r4,__LC_CURRENT 1058 aghi %r4,__TASK_thread 1059 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1060 jno .Lload_fpu_regs_exit 1061 lfpc __THREAD_FPU_fpc(%r4) 1062 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1063 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 1064 jz .Lload_fpu_regs_fp # -> no VX, load FP regs 1065 VLM %v0,%v15,0,%r4 1066 VLM %v16,%v31,256,%r4 1067 j .Lload_fpu_regs_done 1068.Lload_fpu_regs_fp: 1069 ld 0,0(%r4) 1070 ld 1,8(%r4) 1071 ld 2,16(%r4) 1072 ld 3,24(%r4) 1073 ld 4,32(%r4) 1074 ld 5,40(%r4) 1075 ld 6,48(%r4) 1076 ld 7,56(%r4) 1077 ld 8,64(%r4) 1078 ld 9,72(%r4) 1079 ld 10,80(%r4) 1080 ld 11,88(%r4) 1081 ld 12,96(%r4) 1082 ld 13,104(%r4) 1083 ld 14,112(%r4) 1084 ld 15,120(%r4) 1085.Lload_fpu_regs_done: 1086 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 1087.Lload_fpu_regs_exit: 1088 ssm __SF_EMPTY(%r15) 1089 BR_EX %r14 1090.Lload_fpu_regs_end: 1091ENDPROC(load_fpu_regs) 1092 1093/* 1094 * Machine check handler routines 1095 */ 1096ENTRY(mcck_int_handler) 1097 STCK __LC_MCCK_CLOCK 1098 BPOFF 1099 la %r1,4095 # validate r1 1100 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer 1101 sckc __LC_CLOCK_COMPARATOR # validate comparator 1102 lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs 1103 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs 1104 lg %r12,__LC_CURRENT 1105 lmg %r8,%r9,__LC_MCK_OLD_PSW 1106 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 1107 jo .Lmcck_panic # yes -> rest of mcck code invalid 1108 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 1109 jno .Lmcck_panic # control registers invalid -> panic 1110 la %r14,4095 1111 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs 1112 ptlb 1113 lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area 1114 nill %r11,0xfc00 # MCESA_ORIGIN_MASK 1115 TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE 1116 jno 0f 1117 TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID 1118 jno 0f 1119 .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC 11200: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14) 1121 TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID 1122 jo 0f 1123 sr %r14,%r14 11240: sfpc %r14 1125 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1126 jo 0f 1127 lghi %r14,__LC_FPREGS_SAVE_AREA 1128 ld %f0,0(%r14) 1129 ld %f1,8(%r14) 1130 ld %f2,16(%r14) 1131 ld %f3,24(%r14) 1132 ld %f4,32(%r14) 1133 ld %f5,40(%r14) 1134 ld %f6,48(%r14) 1135 ld %f7,56(%r14) 1136 ld %f8,64(%r14) 1137 ld %f9,72(%r14) 1138 ld %f10,80(%r14) 1139 ld %f11,88(%r14) 1140 ld %f12,96(%r14) 1141 ld %f13,104(%r14) 1142 ld %f14,112(%r14) 1143 ld %f15,120(%r14) 1144 j 1f 11450: VLM %v0,%v15,0,%r11 1146 VLM %v16,%v31,256,%r11 11471: lghi %r14,__LC_CPU_TIMER_SAVE_AREA 1148 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 1149 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 1150 jo 3f 1151 la %r14,__LC_SYNC_ENTER_TIMER 1152 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 1153 jl 0f 1154 la %r14,__LC_ASYNC_ENTER_TIMER 11550: clc 0(8,%r14),__LC_EXIT_TIMER 1156 jl 1f 1157 la %r14,__LC_EXIT_TIMER 11581: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 1159 jl 2f 1160 la %r14,__LC_LAST_UPDATE_TIMER 11612: spt 0(%r14) 1162 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 11633: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 1164 jno .Lmcck_panic 1165 tmhh %r8,0x0001 # interrupting from user ? 1166 jnz 4f 1167 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 1168 jno .Lmcck_panic 11694: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 1170 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER 1171.Lmcck_skip: 1172 lghi %r14,__LC_GPREGS_SAVE_AREA+64 1173 stmg %r0,%r7,__PT_R0(%r11) 1174 # clear user controlled registers to prevent speculative use 1175 xgr %r0,%r0 1176 xgr %r1,%r1 1177 xgr %r2,%r2 1178 xgr %r3,%r3 1179 xgr %r4,%r4 1180 xgr %r5,%r5 1181 xgr %r6,%r6 1182 xgr %r7,%r7 1183 xgr %r10,%r10 1184 mvc __PT_R8(64,%r11),0(%r14) 1185 stmg %r8,%r9,__PT_PSW(%r11) 1186 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 1187 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1188 lgr %r2,%r11 # pass pointer to pt_regs 1189 brasl %r14,s390_do_machine_check 1190 cghi %r2,0 1191 je .Lmcck_return 1192 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 1193 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 1194 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 1195 la %r11,STACK_FRAME_OVERHEAD(%r1) 1196 lgr %r15,%r1 1197 TRACE_IRQS_OFF 1198 brasl %r14,s390_handle_mcck 1199 TRACE_IRQS_ON 1200.Lmcck_return: 1201 lmg %r0,%r10,__PT_R0(%r11) 1202 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 1203 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 1204 jno 0f 1205 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 1206 stpt __LC_EXIT_TIMER 12070: lmg %r11,%r15,__PT_R11(%r11) 1208 b __LC_RETURN_MCCK_LPSWE 1209 1210.Lmcck_panic: 1211 lg %r15,__LC_NODAT_STACK 1212 la %r11,STACK_FRAME_OVERHEAD(%r15) 1213 j .Lmcck_skip 1214ENDPROC(mcck_int_handler) 1215 1216# 1217# PSW restart interrupt handler 1218# 1219ENTRY(restart_int_handler) 1220 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 1221 stg %r15,__LC_SAVE_AREA_RESTART 1222 lg %r15,__LC_RESTART_STACK 1223 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 1224 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 1225 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 1226 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 1227 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 1228 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 1229 lg %r2,__LC_RESTART_DATA 1230 lg %r3,__LC_RESTART_SOURCE 1231 ltgr %r3,%r3 # test source cpu address 1232 jm 1f # negative -> skip source stop 12330: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 1234 brc 10,0b # wait for status stored 12351: basr %r14,%r1 # call function 1236 stap __SF_EMPTY(%r15) # store cpu address 1237 llgh %r3,__SF_EMPTY(%r15) 12382: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 1239 brc 2,2b 12403: j 3b 1241ENDPROC(restart_int_handler) 1242 1243 .section .kprobes.text, "ax" 1244 1245#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 1246/* 1247 * The synchronous or the asynchronous stack overflowed. We are dead. 1248 * No need to properly save the registers, we are going to panic anyway. 1249 * Setup a pt_regs so that show_trace can provide a good call trace. 1250 */ 1251ENTRY(stack_overflow) 1252 lg %r15,__LC_NODAT_STACK # change to panic stack 1253 la %r11,STACK_FRAME_OVERHEAD(%r15) 1254 stmg %r0,%r7,__PT_R0(%r11) 1255 stmg %r8,%r9,__PT_PSW(%r11) 1256 mvc __PT_R8(64,%r11),0(%r14) 1257 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 1258 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1259 lgr %r2,%r11 # pass pointer to pt_regs 1260 jg kernel_stack_overflow 1261ENDPROC(stack_overflow) 1262#endif 1263 1264#if IS_ENABLED(CONFIG_KVM) 1265.Lcleanup_sie: 1266 cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? 1267 je 1f 1268 larl %r13,.Lsie_entry 1269 slgr %r9,%r13 1270 larl %r13,.Lsie_skip 1271 clgr %r9,%r13 1272 jh 1f 1273 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 12741: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 1275 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 1276 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1277 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1278 larl %r9,sie_exit # skip forward to sie_exit 1279 BR_EX %r14,%r11 1280 1281#endif 1282 .section .rodata, "a" 1283#define SYSCALL(esame,emu) .quad __s390x_ ## esame 1284 .globl sys_call_table 1285sys_call_table: 1286#include "asm/syscall_table.h" 1287#undef SYSCALL 1288 1289#ifdef CONFIG_COMPAT 1290 1291#define SYSCALL(esame,emu) .quad __s390_ ## emu 1292 .globl sys_call_table_emu 1293sys_call_table_emu: 1294#include "asm/syscall_table.h" 1295#undef SYSCALL 1296#endif 1297