1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 * Heiko Carstens <heiko.carstens@de.ibm.com> 10 */ 11 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/alternative-asm.h> 15#include <asm/processor.h> 16#include <asm/cache.h> 17#include <asm/ctl_reg.h> 18#include <asm/dwarf.h> 19#include <asm/errno.h> 20#include <asm/ptrace.h> 21#include <asm/thread_info.h> 22#include <asm/asm-offsets.h> 23#include <asm/unistd.h> 24#include <asm/page.h> 25#include <asm/sigp.h> 26#include <asm/irq.h> 27#include <asm/vx-insn.h> 28#include <asm/setup.h> 29#include <asm/nmi.h> 30#include <asm/export.h> 31#include <asm/nospec-insn.h> 32 33__PT_R0 = __PT_GPRS 34__PT_R1 = __PT_GPRS + 8 35__PT_R2 = __PT_GPRS + 16 36__PT_R3 = __PT_GPRS + 24 37__PT_R4 = __PT_GPRS + 32 38__PT_R5 = __PT_GPRS + 40 39__PT_R6 = __PT_GPRS + 48 40__PT_R7 = __PT_GPRS + 56 41__PT_R8 = __PT_GPRS + 64 42__PT_R9 = __PT_GPRS + 72 43__PT_R10 = __PT_GPRS + 80 44__PT_R11 = __PT_GPRS + 88 45__PT_R12 = __PT_GPRS + 96 46__PT_R13 = __PT_GPRS + 104 47__PT_R14 = __PT_GPRS + 112 48__PT_R15 = __PT_GPRS + 120 49 50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER 51STACK_SIZE = 1 << STACK_SHIFT 52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 53 54_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING) 56_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 57 _TIF_SYSCALL_TRACEPOINT) 58_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ 59 _CIF_ASCE_SECONDARY | _CIF_FPU) 60_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) 61 62_LPP_OFFSET = __LC_LPP 63 64#define BASED(name) name-cleanup_critical(%r13) 65 66 .macro TRACE_IRQS_ON 67#ifdef CONFIG_TRACE_IRQFLAGS 68 basr %r2,%r0 69 brasl %r14,trace_hardirqs_on_caller 70#endif 71 .endm 72 73 .macro TRACE_IRQS_OFF 74#ifdef CONFIG_TRACE_IRQFLAGS 75 basr %r2,%r0 76 brasl %r14,trace_hardirqs_off_caller 77#endif 78 .endm 79 80 .macro LOCKDEP_SYS_EXIT 81#ifdef CONFIG_LOCKDEP 82 tm __PT_PSW+1(%r11),0x01 # returning to user ? 83 jz .+10 84 brasl %r14,lockdep_sys_exit 85#endif 86 .endm 87 88 .macro CHECK_STACK savearea 89#ifdef CONFIG_CHECK_STACK 90 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 91 lghi %r14,\savearea 92 jz stack_overflow 93#endif 94 .endm 95 96 .macro CHECK_VMAP_STACK savearea,oklabel 97#ifdef CONFIG_VMAP_STACK 98 lgr %r14,%r15 99 nill %r14,0x10000 - STACK_SIZE 100 oill %r14,STACK_INIT 101 clg %r14,__LC_KERNEL_STACK 102 je \oklabel 103 clg %r14,__LC_ASYNC_STACK 104 je \oklabel 105 clg %r14,__LC_NODAT_STACK 106 je \oklabel 107 clg %r14,__LC_RESTART_STACK 108 je \oklabel 109 lghi %r14,\savearea 110 j stack_overflow 111#else 112 j \oklabel 113#endif 114 .endm 115 116 .macro SWITCH_ASYNC savearea,timer 117 tmhh %r8,0x0001 # interrupting from user ? 118 jnz 1f 119 lgr %r14,%r9 120 slg %r14,BASED(.Lcritical_start) 121 clg %r14,BASED(.Lcritical_length) 122 jhe 0f 123 lghi %r11,\savearea # inside critical section, do cleanup 124 brasl %r14,cleanup_critical 125 tmhh %r8,0x0001 # retest problem state after cleanup 126 jnz 1f 1270: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? 128 slgr %r14,%r15 129 srag %r14,%r14,STACK_SHIFT 130 jnz 2f 131 CHECK_STACK \savearea 132 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 133 j 3f 1341: UPDATE_VTIME %r14,%r15,\timer 135 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 1362: lg %r15,__LC_ASYNC_STACK # load async stack 1373: la %r11,STACK_FRAME_OVERHEAD(%r15) 138 .endm 139 140 .macro UPDATE_VTIME w1,w2,enter_timer 141 lg \w1,__LC_EXIT_TIMER 142 lg \w2,__LC_LAST_UPDATE_TIMER 143 slg \w1,\enter_timer 144 slg \w2,__LC_EXIT_TIMER 145 alg \w1,__LC_USER_TIMER 146 alg \w2,__LC_SYSTEM_TIMER 147 stg \w1,__LC_USER_TIMER 148 stg \w2,__LC_SYSTEM_TIMER 149 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 150 .endm 151 152 .macro REENABLE_IRQS 153 stg %r8,__LC_RETURN_PSW 154 ni __LC_RETURN_PSW,0xbf 155 ssm __LC_RETURN_PSW 156 .endm 157 158 .macro STCK savearea 159#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 160 .insn s,0xb27c0000,\savearea # store clock fast 161#else 162 .insn s,0xb2050000,\savearea # store clock 163#endif 164 .endm 165 166 /* 167 * The TSTMSK macro generates a test-under-mask instruction by 168 * calculating the memory offset for the specified mask value. 169 * Mask value can be any constant. The macro shifts the mask 170 * value to calculate the memory offset for the test-under-mask 171 * instruction. 172 */ 173 .macro TSTMSK addr, mask, size=8, bytepos=0 174 .if (\bytepos < \size) && (\mask >> 8) 175 .if (\mask & 0xff) 176 .error "Mask exceeds byte boundary" 177 .endif 178 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 179 .exitm 180 .endif 181 .ifeq \mask 182 .error "Mask must not be zero" 183 .endif 184 off = \size - \bytepos - 1 185 tm off+\addr, \mask 186 .endm 187 188 .macro BPOFF 189 ALTERNATIVE "", ".long 0xb2e8c000", 82 190 .endm 191 192 .macro BPON 193 ALTERNATIVE "", ".long 0xb2e8d000", 82 194 .endm 195 196 .macro BPENTER tif_ptr,tif_mask 197 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \ 198 "", 82 199 .endm 200 201 .macro BPEXIT tif_ptr,tif_mask 202 TSTMSK \tif_ptr,\tif_mask 203 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \ 204 "jnz .+8; .long 0xb2e8d000", 82 205 .endm 206 207 GEN_BR_THUNK %r9 208 GEN_BR_THUNK %r14 209 GEN_BR_THUNK %r14,%r11 210 211 .section .kprobes.text, "ax" 212.Ldummy: 213 /* 214 * This nop exists only in order to avoid that __switch_to starts at 215 * the beginning of the kprobes text section. In that case we would 216 * have several symbols at the same address. E.g. objdump would take 217 * an arbitrary symbol name when disassembling this code. 218 * With the added nop in between the __switch_to symbol is unique 219 * again. 220 */ 221 nop 0 222 223ENTRY(__bpon) 224 .globl __bpon 225 BPON 226 BR_EX %r14 227ENDPROC(__bpon) 228 229/* 230 * Scheduler resume function, called by switch_to 231 * gpr2 = (task_struct *) prev 232 * gpr3 = (task_struct *) next 233 * Returns: 234 * gpr2 = prev 235 */ 236ENTRY(__switch_to) 237 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 238 lghi %r4,__TASK_stack 239 lghi %r1,__TASK_thread 240 llill %r5,STACK_INIT 241 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 242 lg %r15,0(%r4,%r3) # start of kernel stack of next 243 agr %r15,%r5 # end of kernel stack of next 244 stg %r3,__LC_CURRENT # store task struct of next 245 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 246 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 247 aghi %r3,__TASK_pid 248 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 249 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 250 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 251 BR_EX %r14 252ENDPROC(__switch_to) 253 254.L__critical_start: 255 256#if IS_ENABLED(CONFIG_KVM) 257/* 258 * sie64a calling convention: 259 * %r2 pointer to sie control block 260 * %r3 guest register save area 261 */ 262ENTRY(sie64a) 263 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 264 lg %r12,__LC_CURRENT 265 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer 266 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area 267 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 268 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 269 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? 270 jno .Lsie_load_guest_gprs 271 brasl %r14,load_fpu_regs # load guest fp/vx regs 272.Lsie_load_guest_gprs: 273 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 274 lg %r14,__LC_GMAP # get gmap pointer 275 ltgr %r14,%r14 276 jz .Lsie_gmap 277 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 278.Lsie_gmap: 279 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 280 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 281 tm __SIE_PROG20+3(%r14),3 # last exit... 282 jnz .Lsie_skip 283 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 284 jo .Lsie_skip # exit if fp/vx regs changed 285 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 286.Lsie_entry: 287 sie 0(%r14) 288.Lsie_exit: 289 BPOFF 290 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 291.Lsie_skip: 292 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 293 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 294.Lsie_done: 295# some program checks are suppressing. C code (e.g. do_protection_exception) 296# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 297# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 298# Other instructions between sie64a and .Lsie_done should not cause program 299# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 300# See also .Lcleanup_sie 301.Lrewind_pad6: 302 nopr 7 303.Lrewind_pad4: 304 nopr 7 305.Lrewind_pad2: 306 nopr 7 307 .globl sie_exit 308sie_exit: 309 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 310 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 311 xgr %r0,%r0 # clear guest registers to 312 xgr %r1,%r1 # prevent speculative use 313 xgr %r2,%r2 314 xgr %r3,%r3 315 xgr %r4,%r4 316 xgr %r5,%r5 317 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 318 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 319 BR_EX %r14 320.Lsie_fault: 321 lghi %r14,-EFAULT 322 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 323 j sie_exit 324 325 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 326 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 327 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 328 EX_TABLE(sie_exit,.Lsie_fault) 329ENDPROC(sie64a) 330EXPORT_SYMBOL(sie64a) 331EXPORT_SYMBOL(sie_exit) 332#endif 333 334/* 335 * SVC interrupt handler routine. System calls are synchronous events and 336 * are executed with interrupts enabled. 337 */ 338 339ENTRY(system_call) 340 stpt __LC_SYNC_ENTER_TIMER 341.Lsysc_stmg: 342 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 343 BPOFF 344 lg %r12,__LC_CURRENT 345 lghi %r13,__TASK_thread 346 lghi %r14,_PIF_SYSCALL 347.Lsysc_per: 348 lg %r15,__LC_KERNEL_STACK 349 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 350.Lsysc_vtime: 351 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 352 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 353 stmg %r0,%r7,__PT_R0(%r11) 354 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 355 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 356 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 357 stg %r14,__PT_FLAGS(%r11) 358.Lsysc_do_svc: 359 # clear user controlled register to prevent speculative use 360 xgr %r0,%r0 361 # load address of system call table 362 lg %r10,__THREAD_sysc_table(%r13,%r12) 363 llgh %r8,__PT_INT_CODE+2(%r11) 364 slag %r8,%r8,3 # shift and test for svc 0 365 jnz .Lsysc_nr_ok 366 # svc 0: system call number in %r1 367 llgfr %r1,%r1 # clear high word in r1 368 cghi %r1,NR_syscalls 369 jnl .Lsysc_nr_ok 370 sth %r1,__PT_INT_CODE+2(%r11) 371 slag %r8,%r1,3 372.Lsysc_nr_ok: 373 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 374 stg %r2,__PT_ORIG_GPR2(%r11) 375 stg %r7,STACK_FRAME_OVERHEAD(%r15) 376 lg %r9,0(%r8,%r10) # get system call add. 377 TSTMSK __TI_flags(%r12),_TIF_TRACE 378 jnz .Lsysc_tracesys 379 BASR_EX %r14,%r9 # call sys_xxxx 380 stg %r2,__PT_R2(%r11) # store return value 381 382.Lsysc_return: 383#ifdef CONFIG_DEBUG_RSEQ 384 lgr %r2,%r11 385 brasl %r14,rseq_syscall 386#endif 387 LOCKDEP_SYS_EXIT 388.Lsysc_tif: 389 TSTMSK __PT_FLAGS(%r11),_PIF_WORK 390 jnz .Lsysc_work 391 TSTMSK __TI_flags(%r12),_TIF_WORK 392 jnz .Lsysc_work # check for work 393 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 394 jnz .Lsysc_work 395 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 396.Lsysc_restore: 397 lg %r14,__LC_VDSO_PER_CPU 398 lmg %r0,%r10,__PT_R0(%r11) 399 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 400.Lsysc_exit_timer: 401 stpt __LC_EXIT_TIMER 402 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 403 lmg %r11,%r15,__PT_R11(%r11) 404 lpswe __LC_RETURN_PSW 405.Lsysc_done: 406 407# 408# One of the work bits is on. Find out which one. 409# 410.Lsysc_work: 411 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 412 jo .Lsysc_mcck_pending 413 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 414 jo .Lsysc_reschedule 415 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 416 jo .Lsysc_syscall_restart 417#ifdef CONFIG_UPROBES 418 TSTMSK __TI_flags(%r12),_TIF_UPROBE 419 jo .Lsysc_uprobe_notify 420#endif 421 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 422 jo .Lsysc_guarded_storage 423 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP 424 jo .Lsysc_singlestep 425#ifdef CONFIG_LIVEPATCH 426 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 427 jo .Lsysc_patch_pending # handle live patching just before 428 # signals and possible syscall restart 429#endif 430 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 431 jo .Lsysc_syscall_restart 432 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 433 jo .Lsysc_sigpending 434 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 435 jo .Lsysc_notify_resume 436 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 437 jo .Lsysc_vxrs 438 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 439 jnz .Lsysc_asce 440 j .Lsysc_return # beware of critical section cleanup 441 442# 443# _TIF_NEED_RESCHED is set, call schedule 444# 445.Lsysc_reschedule: 446 larl %r14,.Lsysc_return 447 jg schedule 448 449# 450# _CIF_MCCK_PENDING is set, call handler 451# 452.Lsysc_mcck_pending: 453 larl %r14,.Lsysc_return 454 jg s390_handle_mcck # TIF bit will be cleared by handler 455 456# 457# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce 458# 459.Lsysc_asce: 460 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY 461 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce 462 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY 463 jz .Lsysc_return 464#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 465 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? 466 jnz .Lsysc_set_fs_fixup 467 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 468 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 469 j .Lsysc_return 470.Lsysc_set_fs_fixup: 471#endif 472 larl %r14,.Lsysc_return 473 jg set_fs_fixup 474 475# 476# CIF_FPU is set, restore floating-point controls and floating-point registers. 477# 478.Lsysc_vxrs: 479 larl %r14,.Lsysc_return 480 jg load_fpu_regs 481 482# 483# _TIF_SIGPENDING is set, call do_signal 484# 485.Lsysc_sigpending: 486 lgr %r2,%r11 # pass pointer to pt_regs 487 brasl %r14,do_signal 488 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 489 jno .Lsysc_return 490.Lsysc_do_syscall: 491 lghi %r13,__TASK_thread 492 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 493 lghi %r1,0 # svc 0 returns -ENOSYS 494 j .Lsysc_do_svc 495 496# 497# _TIF_NOTIFY_RESUME is set, call do_notify_resume 498# 499.Lsysc_notify_resume: 500 lgr %r2,%r11 # pass pointer to pt_regs 501 larl %r14,.Lsysc_return 502 jg do_notify_resume 503 504# 505# _TIF_UPROBE is set, call uprobe_notify_resume 506# 507#ifdef CONFIG_UPROBES 508.Lsysc_uprobe_notify: 509 lgr %r2,%r11 # pass pointer to pt_regs 510 larl %r14,.Lsysc_return 511 jg uprobe_notify_resume 512#endif 513 514# 515# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 516# 517.Lsysc_guarded_storage: 518 lgr %r2,%r11 # pass pointer to pt_regs 519 larl %r14,.Lsysc_return 520 jg gs_load_bc_cb 521# 522# _TIF_PATCH_PENDING is set, call klp_update_patch_state 523# 524#ifdef CONFIG_LIVEPATCH 525.Lsysc_patch_pending: 526 lg %r2,__LC_CURRENT # pass pointer to task struct 527 larl %r14,.Lsysc_return 528 jg klp_update_patch_state 529#endif 530 531# 532# _PIF_PER_TRAP is set, call do_per_trap 533# 534.Lsysc_singlestep: 535 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP 536 lgr %r2,%r11 # pass pointer to pt_regs 537 larl %r14,.Lsysc_return 538 jg do_per_trap 539 540# 541# _PIF_SYSCALL_RESTART is set, repeat the current system call 542# 543.Lsysc_syscall_restart: 544 ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART 545 lmg %r1,%r7,__PT_R1(%r11) # load svc arguments 546 lg %r2,__PT_ORIG_GPR2(%r11) 547 j .Lsysc_do_svc 548 549# 550# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 551# and after the system call 552# 553.Lsysc_tracesys: 554 lgr %r2,%r11 # pass pointer to pt_regs 555 la %r3,0 556 llgh %r0,__PT_INT_CODE+2(%r11) 557 stg %r0,__PT_R2(%r11) 558 brasl %r14,do_syscall_trace_enter 559 lghi %r0,NR_syscalls 560 clgr %r0,%r2 561 jnh .Lsysc_tracenogo 562 sllg %r8,%r2,3 563 lg %r9,0(%r8,%r10) 564.Lsysc_tracego: 565 lmg %r3,%r7,__PT_R3(%r11) 566 stg %r7,STACK_FRAME_OVERHEAD(%r15) 567 lg %r2,__PT_ORIG_GPR2(%r11) 568 BASR_EX %r14,%r9 # call sys_xxx 569 stg %r2,__PT_R2(%r11) # store return value 570.Lsysc_tracenogo: 571 TSTMSK __TI_flags(%r12),_TIF_TRACE 572 jz .Lsysc_return 573 lgr %r2,%r11 # pass pointer to pt_regs 574 larl %r14,.Lsysc_return 575 jg do_syscall_trace_exit 576ENDPROC(system_call) 577 578# 579# a new process exits the kernel with ret_from_fork 580# 581ENTRY(ret_from_fork) 582 la %r11,STACK_FRAME_OVERHEAD(%r15) 583 lg %r12,__LC_CURRENT 584 brasl %r14,schedule_tail 585 TRACE_IRQS_ON 586 ssm __LC_SVC_NEW_PSW # reenable interrupts 587 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 588 jne .Lsysc_tracenogo 589 # it's a kernel thread 590 lmg %r9,%r10,__PT_R9(%r11) # load gprs 591 la %r2,0(%r10) 592 BASR_EX %r14,%r9 593 j .Lsysc_tracenogo 594ENDPROC(ret_from_fork) 595 596ENTRY(kernel_thread_starter) 597 la %r2,0(%r10) 598 BASR_EX %r14,%r9 599 j .Lsysc_tracenogo 600ENDPROC(kernel_thread_starter) 601 602/* 603 * Program check handler routine 604 */ 605 606ENTRY(pgm_check_handler) 607 stpt __LC_SYNC_ENTER_TIMER 608 BPOFF 609 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 610 lg %r10,__LC_LAST_BREAK 611 lg %r12,__LC_CURRENT 612 lghi %r11,0 613 larl %r13,cleanup_critical 614 lmg %r8,%r9,__LC_PGM_OLD_PSW 615 tmhh %r8,0x0001 # test problem state bit 616 jnz 2f # -> fault in user space 617#if IS_ENABLED(CONFIG_KVM) 618 # cleanup critical section for program checks in sie64a 619 lgr %r14,%r9 620 slg %r14,BASED(.Lsie_critical_start) 621 clg %r14,BASED(.Lsie_critical_length) 622 jhe 0f 623 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 624 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 625 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 626 larl %r9,sie_exit # skip forward to sie_exit 627 lghi %r11,_PIF_GUEST_FAULT 628#endif 6290: tmhh %r8,0x4000 # PER bit set in old PSW ? 630 jnz 1f # -> enabled, can't be a double fault 631 tm __LC_PGM_ILC+3,0x80 # check for per exception 632 jnz .Lpgm_svcper # -> single stepped svc 6331: CHECK_STACK __LC_SAVE_AREA_SYNC 634 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 635 # CHECK_VMAP_STACK branches to stack_overflow or 4f 636 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 6372: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 638 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 639 lg %r15,__LC_KERNEL_STACK 640 lgr %r14,%r12 641 aghi %r14,__TASK_thread # pointer to thread_struct 642 lghi %r13,__LC_PGM_TDB 643 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 644 jz 3f 645 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 6463: stg %r10,__THREAD_last_break(%r14) 6474: lgr %r13,%r11 648 la %r11,STACK_FRAME_OVERHEAD(%r15) 649 stmg %r0,%r7,__PT_R0(%r11) 650 # clear user controlled registers to prevent speculative use 651 xgr %r0,%r0 652 xgr %r1,%r1 653 xgr %r2,%r2 654 xgr %r3,%r3 655 xgr %r4,%r4 656 xgr %r5,%r5 657 xgr %r6,%r6 658 xgr %r7,%r7 659 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 660 stmg %r8,%r9,__PT_PSW(%r11) 661 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 662 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE 663 stg %r13,__PT_FLAGS(%r11) 664 stg %r10,__PT_ARGS(%r11) 665 tm __LC_PGM_ILC+3,0x80 # check for per exception 666 jz 5f 667 tmhh %r8,0x0001 # kernel per event ? 668 jz .Lpgm_kprobe 669 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 670 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 671 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 672 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 6735: REENABLE_IRQS 674 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 675 larl %r1,pgm_check_table 676 llgh %r10,__PT_INT_CODE+2(%r11) 677 nill %r10,0x007f 678 sll %r10,3 679 je .Lpgm_return 680 lg %r9,0(%r10,%r1) # load address of handler routine 681 lgr %r2,%r11 # pass pointer to pt_regs 682 BASR_EX %r14,%r9 # branch to interrupt-handler 683.Lpgm_return: 684 LOCKDEP_SYS_EXIT 685 tm __PT_PSW+1(%r11),0x01 # returning to user ? 686 jno .Lsysc_restore 687 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 688 jo .Lsysc_do_syscall 689 j .Lsysc_tif 690 691# 692# PER event in supervisor state, must be kprobes 693# 694.Lpgm_kprobe: 695 REENABLE_IRQS 696 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 697 lgr %r2,%r11 # pass pointer to pt_regs 698 brasl %r14,do_per_trap 699 j .Lpgm_return 700 701# 702# single stepped system call 703# 704.Lpgm_svcper: 705 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 706 lghi %r13,__TASK_thread 707 larl %r14,.Lsysc_per 708 stg %r14,__LC_RETURN_PSW+8 709 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 710 lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs 711ENDPROC(pgm_check_handler) 712 713/* 714 * IO interrupt handler routine 715 */ 716ENTRY(io_int_handler) 717 STCK __LC_INT_CLOCK 718 stpt __LC_ASYNC_ENTER_TIMER 719 BPOFF 720 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 721 lg %r12,__LC_CURRENT 722 larl %r13,cleanup_critical 723 lmg %r8,%r9,__LC_IO_OLD_PSW 724 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 725 stmg %r0,%r7,__PT_R0(%r11) 726 # clear user controlled registers to prevent speculative use 727 xgr %r0,%r0 728 xgr %r1,%r1 729 xgr %r2,%r2 730 xgr %r3,%r3 731 xgr %r4,%r4 732 xgr %r5,%r5 733 xgr %r6,%r6 734 xgr %r7,%r7 735 xgr %r10,%r10 736 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 737 stmg %r8,%r9,__PT_PSW(%r11) 738 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 739 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 740 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 741 jo .Lio_restore 742 TRACE_IRQS_OFF 743 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 744.Lio_loop: 745 lgr %r2,%r11 # pass pointer to pt_regs 746 lghi %r3,IO_INTERRUPT 747 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 748 jz .Lio_call 749 lghi %r3,THIN_INTERRUPT 750.Lio_call: 751 brasl %r14,do_IRQ 752 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR 753 jz .Lio_return 754 tpi 0 755 jz .Lio_return 756 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 757 j .Lio_loop 758.Lio_return: 759 LOCKDEP_SYS_EXIT 760 TRACE_IRQS_ON 761.Lio_tif: 762 TSTMSK __TI_flags(%r12),_TIF_WORK 763 jnz .Lio_work # there is work to do (signals etc.) 764 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 765 jnz .Lio_work 766.Lio_restore: 767 lg %r14,__LC_VDSO_PER_CPU 768 lmg %r0,%r10,__PT_R0(%r11) 769 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 770 tm __PT_PSW+1(%r11),0x01 # returning to user ? 771 jno .Lio_exit_kernel 772 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 773.Lio_exit_timer: 774 stpt __LC_EXIT_TIMER 775 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 776.Lio_exit_kernel: 777 lmg %r11,%r15,__PT_R11(%r11) 778 lpswe __LC_RETURN_PSW 779.Lio_done: 780 781# 782# There is work todo, find out in which context we have been interrupted: 783# 1) if we return to user space we can do all _TIF_WORK work 784# 2) if we return to kernel code and kvm is enabled check if we need to 785# modify the psw to leave SIE 786# 3) if we return to kernel code and preemptive scheduling is enabled check 787# the preemption counter and if it is zero call preempt_schedule_irq 788# Before any work can be done, a switch to the kernel stack is required. 789# 790.Lio_work: 791 tm __PT_PSW+1(%r11),0x01 # returning to user ? 792 jo .Lio_work_user # yes -> do resched & signal 793#ifdef CONFIG_PREEMPT 794 # check for preemptive scheduling 795 icm %r0,15,__LC_PREEMPT_COUNT 796 jnz .Lio_restore # preemption is disabled 797 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 798 jno .Lio_restore 799 # switch to kernel stack 800 lg %r1,__PT_R15(%r11) 801 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 802 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 803 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 804 la %r11,STACK_FRAME_OVERHEAD(%r1) 805 lgr %r15,%r1 806 # TRACE_IRQS_ON already done at .Lio_return, call 807 # TRACE_IRQS_OFF to keep things symmetrical 808 TRACE_IRQS_OFF 809 brasl %r14,preempt_schedule_irq 810 j .Lio_return 811#else 812 j .Lio_restore 813#endif 814 815# 816# Need to do work before returning to userspace, switch to kernel stack 817# 818.Lio_work_user: 819 lg %r1,__LC_KERNEL_STACK 820 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 821 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 822 la %r11,STACK_FRAME_OVERHEAD(%r1) 823 lgr %r15,%r1 824 825# 826# One of the work bits is on. Find out which one. 827# 828.Lio_work_tif: 829 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 830 jo .Lio_mcck_pending 831 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 832 jo .Lio_reschedule 833#ifdef CONFIG_LIVEPATCH 834 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 835 jo .Lio_patch_pending 836#endif 837 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 838 jo .Lio_sigpending 839 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 840 jo .Lio_notify_resume 841 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 842 jo .Lio_guarded_storage 843 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 844 jo .Lio_vxrs 845 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 846 jnz .Lio_asce 847 j .Lio_return # beware of critical section cleanup 848 849# 850# _CIF_MCCK_PENDING is set, call handler 851# 852.Lio_mcck_pending: 853 # TRACE_IRQS_ON already done at .Lio_return 854 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler 855 TRACE_IRQS_OFF 856 j .Lio_return 857 858# 859# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce 860# 861.Lio_asce: 862 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY 863 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce 864 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY 865 jz .Lio_return 866#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 867 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? 868 jnz .Lio_set_fs_fixup 869 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 870 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 871 j .Lio_return 872.Lio_set_fs_fixup: 873#endif 874 larl %r14,.Lio_return 875 jg set_fs_fixup 876 877# 878# CIF_FPU is set, restore floating-point controls and floating-point registers. 879# 880.Lio_vxrs: 881 larl %r14,.Lio_return 882 jg load_fpu_regs 883 884# 885# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 886# 887.Lio_guarded_storage: 888 # TRACE_IRQS_ON already done at .Lio_return 889 ssm __LC_SVC_NEW_PSW # reenable interrupts 890 lgr %r2,%r11 # pass pointer to pt_regs 891 brasl %r14,gs_load_bc_cb 892 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 893 TRACE_IRQS_OFF 894 j .Lio_return 895 896# 897# _TIF_NEED_RESCHED is set, call schedule 898# 899.Lio_reschedule: 900 # TRACE_IRQS_ON already done at .Lio_return 901 ssm __LC_SVC_NEW_PSW # reenable interrupts 902 brasl %r14,schedule # call scheduler 903 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 904 TRACE_IRQS_OFF 905 j .Lio_return 906 907# 908# _TIF_PATCH_PENDING is set, call klp_update_patch_state 909# 910#ifdef CONFIG_LIVEPATCH 911.Lio_patch_pending: 912 lg %r2,__LC_CURRENT # pass pointer to task struct 913 larl %r14,.Lio_return 914 jg klp_update_patch_state 915#endif 916 917# 918# _TIF_SIGPENDING or is set, call do_signal 919# 920.Lio_sigpending: 921 # TRACE_IRQS_ON already done at .Lio_return 922 ssm __LC_SVC_NEW_PSW # reenable interrupts 923 lgr %r2,%r11 # pass pointer to pt_regs 924 brasl %r14,do_signal 925 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 926 TRACE_IRQS_OFF 927 j .Lio_return 928 929# 930# _TIF_NOTIFY_RESUME or is set, call do_notify_resume 931# 932.Lio_notify_resume: 933 # TRACE_IRQS_ON already done at .Lio_return 934 ssm __LC_SVC_NEW_PSW # reenable interrupts 935 lgr %r2,%r11 # pass pointer to pt_regs 936 brasl %r14,do_notify_resume 937 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 938 TRACE_IRQS_OFF 939 j .Lio_return 940ENDPROC(io_int_handler) 941 942/* 943 * External interrupt handler routine 944 */ 945ENTRY(ext_int_handler) 946 STCK __LC_INT_CLOCK 947 stpt __LC_ASYNC_ENTER_TIMER 948 BPOFF 949 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 950 lg %r12,__LC_CURRENT 951 larl %r13,cleanup_critical 952 lmg %r8,%r9,__LC_EXT_OLD_PSW 953 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 954 stmg %r0,%r7,__PT_R0(%r11) 955 # clear user controlled registers to prevent speculative use 956 xgr %r0,%r0 957 xgr %r1,%r1 958 xgr %r2,%r2 959 xgr %r3,%r3 960 xgr %r4,%r4 961 xgr %r5,%r5 962 xgr %r6,%r6 963 xgr %r7,%r7 964 xgr %r10,%r10 965 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 966 stmg %r8,%r9,__PT_PSW(%r11) 967 lghi %r1,__LC_EXT_PARAMS2 968 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR 969 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 970 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) 971 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 972 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 973 jo .Lio_restore 974 TRACE_IRQS_OFF 975 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 976 lgr %r2,%r11 # pass pointer to pt_regs 977 lghi %r3,EXT_INTERRUPT 978 brasl %r14,do_IRQ 979 j .Lio_return 980ENDPROC(ext_int_handler) 981 982/* 983 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. 984 */ 985ENTRY(psw_idle) 986 stg %r3,__SF_EMPTY(%r15) 987 larl %r1,.Lpsw_idle_lpsw+4 988 stg %r1,__SF_EMPTY+8(%r15) 989#ifdef CONFIG_SMP 990 larl %r1,smp_cpu_mtid 991 llgf %r1,0(%r1) 992 ltgr %r1,%r1 993 jz .Lpsw_idle_stcctm 994 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) 995.Lpsw_idle_stcctm: 996#endif 997 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 998 BPON 999 STCK __CLOCK_IDLE_ENTER(%r2) 1000 stpt __TIMER_IDLE_ENTER(%r2) 1001.Lpsw_idle_lpsw: 1002 lpswe __SF_EMPTY(%r15) 1003 BR_EX %r14 1004.Lpsw_idle_end: 1005ENDPROC(psw_idle) 1006 1007/* 1008 * Store floating-point controls and floating-point or vector register 1009 * depending whether the vector facility is available. A critical section 1010 * cleanup assures that the registers are stored even if interrupted for 1011 * some other work. The CIF_FPU flag is set to trigger a lazy restore 1012 * of the register contents at return from io or a system call. 1013 */ 1014ENTRY(save_fpu_regs) 1015 lg %r2,__LC_CURRENT 1016 aghi %r2,__TASK_thread 1017 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1018 jo .Lsave_fpu_regs_exit 1019 stfpc __THREAD_FPU_fpc(%r2) 1020 lg %r3,__THREAD_FPU_regs(%r2) 1021 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1022 jz .Lsave_fpu_regs_fp # no -> store FP regs 1023 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 1024 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) 1025 j .Lsave_fpu_regs_done # -> set CIF_FPU flag 1026.Lsave_fpu_regs_fp: 1027 std 0,0(%r3) 1028 std 1,8(%r3) 1029 std 2,16(%r3) 1030 std 3,24(%r3) 1031 std 4,32(%r3) 1032 std 5,40(%r3) 1033 std 6,48(%r3) 1034 std 7,56(%r3) 1035 std 8,64(%r3) 1036 std 9,72(%r3) 1037 std 10,80(%r3) 1038 std 11,88(%r3) 1039 std 12,96(%r3) 1040 std 13,104(%r3) 1041 std 14,112(%r3) 1042 std 15,120(%r3) 1043.Lsave_fpu_regs_done: 1044 oi __LC_CPU_FLAGS+7,_CIF_FPU 1045.Lsave_fpu_regs_exit: 1046 BR_EX %r14 1047.Lsave_fpu_regs_end: 1048ENDPROC(save_fpu_regs) 1049EXPORT_SYMBOL(save_fpu_regs) 1050 1051/* 1052 * Load floating-point controls and floating-point or vector registers. 1053 * A critical section cleanup assures that the register contents are 1054 * loaded even if interrupted for some other work. 1055 * 1056 * There are special calling conventions to fit into sysc and io return work: 1057 * %r15: <kernel stack> 1058 * The function requires: 1059 * %r4 1060 */ 1061load_fpu_regs: 1062 lg %r4,__LC_CURRENT 1063 aghi %r4,__TASK_thread 1064 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1065 jno .Lload_fpu_regs_exit 1066 lfpc __THREAD_FPU_fpc(%r4) 1067 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1068 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 1069 jz .Lload_fpu_regs_fp # -> no VX, load FP regs 1070 VLM %v0,%v15,0,%r4 1071 VLM %v16,%v31,256,%r4 1072 j .Lload_fpu_regs_done 1073.Lload_fpu_regs_fp: 1074 ld 0,0(%r4) 1075 ld 1,8(%r4) 1076 ld 2,16(%r4) 1077 ld 3,24(%r4) 1078 ld 4,32(%r4) 1079 ld 5,40(%r4) 1080 ld 6,48(%r4) 1081 ld 7,56(%r4) 1082 ld 8,64(%r4) 1083 ld 9,72(%r4) 1084 ld 10,80(%r4) 1085 ld 11,88(%r4) 1086 ld 12,96(%r4) 1087 ld 13,104(%r4) 1088 ld 14,112(%r4) 1089 ld 15,120(%r4) 1090.Lload_fpu_regs_done: 1091 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 1092.Lload_fpu_regs_exit: 1093 BR_EX %r14 1094.Lload_fpu_regs_end: 1095ENDPROC(load_fpu_regs) 1096 1097.L__critical_end: 1098 1099/* 1100 * Machine check handler routines 1101 */ 1102ENTRY(mcck_int_handler) 1103 STCK __LC_MCCK_CLOCK 1104 BPOFF 1105 la %r1,4095 # validate r1 1106 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer 1107 sckc __LC_CLOCK_COMPARATOR # validate comparator 1108 lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs 1109 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs 1110 lg %r12,__LC_CURRENT 1111 larl %r13,cleanup_critical 1112 lmg %r8,%r9,__LC_MCK_OLD_PSW 1113 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 1114 jo .Lmcck_panic # yes -> rest of mcck code invalid 1115 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 1116 jno .Lmcck_panic # control registers invalid -> panic 1117 la %r14,4095 1118 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs 1119 ptlb 1120 lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area 1121 nill %r11,0xfc00 # MCESA_ORIGIN_MASK 1122 TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE 1123 jno 0f 1124 TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID 1125 jno 0f 1126 .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC 11270: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14) 1128 TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID 1129 jo 0f 1130 sr %r14,%r14 11310: sfpc %r14 1132 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1133 jo 0f 1134 lghi %r14,__LC_FPREGS_SAVE_AREA 1135 ld %f0,0(%r14) 1136 ld %f1,8(%r14) 1137 ld %f2,16(%r14) 1138 ld %f3,24(%r14) 1139 ld %f4,32(%r14) 1140 ld %f5,40(%r14) 1141 ld %f6,48(%r14) 1142 ld %f7,56(%r14) 1143 ld %f8,64(%r14) 1144 ld %f9,72(%r14) 1145 ld %f10,80(%r14) 1146 ld %f11,88(%r14) 1147 ld %f12,96(%r14) 1148 ld %f13,104(%r14) 1149 ld %f14,112(%r14) 1150 ld %f15,120(%r14) 1151 j 1f 11520: VLM %v0,%v15,0,%r11 1153 VLM %v16,%v31,256,%r11 11541: lghi %r14,__LC_CPU_TIMER_SAVE_AREA 1155 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 1156 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 1157 jo 3f 1158 la %r14,__LC_SYNC_ENTER_TIMER 1159 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 1160 jl 0f 1161 la %r14,__LC_ASYNC_ENTER_TIMER 11620: clc 0(8,%r14),__LC_EXIT_TIMER 1163 jl 1f 1164 la %r14,__LC_EXIT_TIMER 11651: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 1166 jl 2f 1167 la %r14,__LC_LAST_UPDATE_TIMER 11682: spt 0(%r14) 1169 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 11703: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 1171 jno .Lmcck_panic 1172 tmhh %r8,0x0001 # interrupting from user ? 1173 jnz 4f 1174 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 1175 jno .Lmcck_panic 11764: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 1177 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER 1178.Lmcck_skip: 1179 lghi %r14,__LC_GPREGS_SAVE_AREA+64 1180 stmg %r0,%r7,__PT_R0(%r11) 1181 # clear user controlled registers to prevent speculative use 1182 xgr %r0,%r0 1183 xgr %r1,%r1 1184 xgr %r2,%r2 1185 xgr %r3,%r3 1186 xgr %r4,%r4 1187 xgr %r5,%r5 1188 xgr %r6,%r6 1189 xgr %r7,%r7 1190 xgr %r10,%r10 1191 mvc __PT_R8(64,%r11),0(%r14) 1192 stmg %r8,%r9,__PT_PSW(%r11) 1193 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 1194 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1195 lgr %r2,%r11 # pass pointer to pt_regs 1196 brasl %r14,s390_do_machine_check 1197 tm __PT_PSW+1(%r11),0x01 # returning to user ? 1198 jno .Lmcck_return 1199 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 1200 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 1201 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 1202 la %r11,STACK_FRAME_OVERHEAD(%r1) 1203 lgr %r15,%r1 1204 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 1205 jno .Lmcck_return 1206 TRACE_IRQS_OFF 1207 brasl %r14,s390_handle_mcck 1208 TRACE_IRQS_ON 1209.Lmcck_return: 1210 lg %r14,__LC_VDSO_PER_CPU 1211 lmg %r0,%r10,__PT_R0(%r11) 1212 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 1213 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 1214 jno 0f 1215 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 1216 stpt __LC_EXIT_TIMER 1217 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 12180: lmg %r11,%r15,__PT_R11(%r11) 1219 lpswe __LC_RETURN_MCCK_PSW 1220 1221.Lmcck_panic: 1222 lg %r15,__LC_NODAT_STACK 1223 la %r11,STACK_FRAME_OVERHEAD(%r15) 1224 j .Lmcck_skip 1225ENDPROC(mcck_int_handler) 1226 1227# 1228# PSW restart interrupt handler 1229# 1230ENTRY(restart_int_handler) 1231 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 1232 stg %r15,__LC_SAVE_AREA_RESTART 1233 lg %r15,__LC_RESTART_STACK 1234 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 1235 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 1236 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 1237 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 1238 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 1239 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 1240 lg %r2,__LC_RESTART_DATA 1241 lg %r3,__LC_RESTART_SOURCE 1242 ltgr %r3,%r3 # test source cpu address 1243 jm 1f # negative -> skip source stop 12440: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 1245 brc 10,0b # wait for status stored 12461: basr %r14,%r1 # call function 1247 stap __SF_EMPTY(%r15) # store cpu address 1248 llgh %r3,__SF_EMPTY(%r15) 12492: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 1250 brc 2,2b 12513: j 3b 1252ENDPROC(restart_int_handler) 1253 1254 .section .kprobes.text, "ax" 1255 1256#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 1257/* 1258 * The synchronous or the asynchronous stack overflowed. We are dead. 1259 * No need to properly save the registers, we are going to panic anyway. 1260 * Setup a pt_regs so that show_trace can provide a good call trace. 1261 */ 1262ENTRY(stack_overflow) 1263 lg %r15,__LC_NODAT_STACK # change to panic stack 1264 la %r11,STACK_FRAME_OVERHEAD(%r15) 1265 stmg %r0,%r7,__PT_R0(%r11) 1266 stmg %r8,%r9,__PT_PSW(%r11) 1267 mvc __PT_R8(64,%r11),0(%r14) 1268 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 1269 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1270 lgr %r2,%r11 # pass pointer to pt_regs 1271 jg kernel_stack_overflow 1272ENDPROC(stack_overflow) 1273#endif 1274 1275ENTRY(cleanup_critical) 1276#if IS_ENABLED(CONFIG_KVM) 1277 clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap 1278 jl 0f 1279 clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done 1280 jl .Lcleanup_sie 1281#endif 1282 clg %r9,BASED(.Lcleanup_table) # system_call 1283 jl 0f 1284 clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc 1285 jl .Lcleanup_system_call 1286 clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif 1287 jl 0f 1288 clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore 1289 jl .Lcleanup_sysc_tif 1290 clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done 1291 jl .Lcleanup_sysc_restore 1292 clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif 1293 jl 0f 1294 clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore 1295 jl .Lcleanup_io_tif 1296 clg %r9,BASED(.Lcleanup_table+56) # .Lio_done 1297 jl .Lcleanup_io_restore 1298 clg %r9,BASED(.Lcleanup_table+64) # psw_idle 1299 jl 0f 1300 clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end 1301 jl .Lcleanup_idle 1302 clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs 1303 jl 0f 1304 clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end 1305 jl .Lcleanup_save_fpu_regs 1306 clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs 1307 jl 0f 1308 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end 1309 jl .Lcleanup_load_fpu_regs 13100: BR_EX %r14,%r11 1311ENDPROC(cleanup_critical) 1312 1313 .align 8 1314.Lcleanup_table: 1315 .quad system_call 1316 .quad .Lsysc_do_svc 1317 .quad .Lsysc_tif 1318 .quad .Lsysc_restore 1319 .quad .Lsysc_done 1320 .quad .Lio_tif 1321 .quad .Lio_restore 1322 .quad .Lio_done 1323 .quad psw_idle 1324 .quad .Lpsw_idle_end 1325 .quad save_fpu_regs 1326 .quad .Lsave_fpu_regs_end 1327 .quad load_fpu_regs 1328 .quad .Lload_fpu_regs_end 1329 1330#if IS_ENABLED(CONFIG_KVM) 1331.Lcleanup_table_sie: 1332 .quad .Lsie_gmap 1333 .quad .Lsie_done 1334 1335.Lcleanup_sie: 1336 cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? 1337 je 1f 1338 slg %r9,BASED(.Lsie_crit_mcck_start) 1339 clg %r9,BASED(.Lsie_crit_mcck_length) 1340 jh 1f 1341 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 13421: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 1343 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 1344 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1345 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1346 larl %r9,sie_exit # skip forward to sie_exit 1347 BR_EX %r14,%r11 1348#endif 1349 1350.Lcleanup_system_call: 1351 # check if stpt has been executed 1352 clg %r9,BASED(.Lcleanup_system_call_insn) 1353 jh 0f 1354 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 1355 cghi %r11,__LC_SAVE_AREA_ASYNC 1356 je 0f 1357 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 13580: # check if stmg has been executed 1359 clg %r9,BASED(.Lcleanup_system_call_insn+8) 1360 jh 0f 1361 mvc __LC_SAVE_AREA_SYNC(64),0(%r11) 13620: # check if base register setup + TIF bit load has been done 1363 clg %r9,BASED(.Lcleanup_system_call_insn+16) 1364 jhe 0f 1365 # set up saved register r12 task struct pointer 1366 stg %r12,32(%r11) 1367 # set up saved register r13 __TASK_thread offset 1368 mvc 40(8,%r11),BASED(.Lcleanup_system_call_const) 13690: # check if the user time update has been done 1370 clg %r9,BASED(.Lcleanup_system_call_insn+24) 1371 jh 0f 1372 lg %r15,__LC_EXIT_TIMER 1373 slg %r15,__LC_SYNC_ENTER_TIMER 1374 alg %r15,__LC_USER_TIMER 1375 stg %r15,__LC_USER_TIMER 13760: # check if the system time update has been done 1377 clg %r9,BASED(.Lcleanup_system_call_insn+32) 1378 jh 0f 1379 lg %r15,__LC_LAST_UPDATE_TIMER 1380 slg %r15,__LC_EXIT_TIMER 1381 alg %r15,__LC_SYSTEM_TIMER 1382 stg %r15,__LC_SYSTEM_TIMER 13830: # update accounting time stamp 1384 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 1385 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 1386 # set up saved register r11 1387 lg %r15,__LC_KERNEL_STACK 1388 la %r9,STACK_FRAME_OVERHEAD(%r15) 1389 stg %r9,24(%r11) # r11 pt_regs pointer 1390 # fill pt_regs 1391 mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC 1392 stmg %r0,%r7,__PT_R0(%r9) 1393 mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW 1394 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC 1395 xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) 1396 mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL 1397 # setup saved register r15 1398 stg %r15,56(%r11) # r15 stack pointer 1399 # set new psw address and exit 1400 larl %r9,.Lsysc_do_svc 1401 BR_EX %r14,%r11 1402.Lcleanup_system_call_insn: 1403 .quad system_call 1404 .quad .Lsysc_stmg 1405 .quad .Lsysc_per 1406 .quad .Lsysc_vtime+36 1407 .quad .Lsysc_vtime+42 1408.Lcleanup_system_call_const: 1409 .quad __TASK_thread 1410 1411.Lcleanup_sysc_tif: 1412 larl %r9,.Lsysc_tif 1413 BR_EX %r14,%r11 1414 1415.Lcleanup_sysc_restore: 1416 # check if stpt has been executed 1417 clg %r9,BASED(.Lcleanup_sysc_restore_insn) 1418 jh 0f 1419 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1420 cghi %r11,__LC_SAVE_AREA_ASYNC 1421 je 0f 1422 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 14230: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8) 1424 je 1f 1425 lg %r9,24(%r11) # get saved pointer to pt_regs 1426 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1427 mvc 0(64,%r11),__PT_R8(%r9) 1428 lmg %r0,%r7,__PT_R0(%r9) 14291: lmg %r8,%r9,__LC_RETURN_PSW 1430 BR_EX %r14,%r11 1431.Lcleanup_sysc_restore_insn: 1432 .quad .Lsysc_exit_timer 1433 .quad .Lsysc_done - 4 1434 1435.Lcleanup_io_tif: 1436 larl %r9,.Lio_tif 1437 BR_EX %r14,%r11 1438 1439.Lcleanup_io_restore: 1440 # check if stpt has been executed 1441 clg %r9,BASED(.Lcleanup_io_restore_insn) 1442 jh 0f 1443 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 14440: clg %r9,BASED(.Lcleanup_io_restore_insn+8) 1445 je 1f 1446 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 1447 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1448 mvc 0(64,%r11),__PT_R8(%r9) 1449 lmg %r0,%r7,__PT_R0(%r9) 14501: lmg %r8,%r9,__LC_RETURN_PSW 1451 BR_EX %r14,%r11 1452.Lcleanup_io_restore_insn: 1453 .quad .Lio_exit_timer 1454 .quad .Lio_done - 4 1455 1456.Lcleanup_idle: 1457 ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT 1458 # copy interrupt clock & cpu timer 1459 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 1460 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 1461 cghi %r11,__LC_SAVE_AREA_ASYNC 1462 je 0f 1463 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 1464 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 14650: # check if stck & stpt have been executed 1466 clg %r9,BASED(.Lcleanup_idle_insn) 1467 jhe 1f 1468 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 1469 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) 14701: # calculate idle cycles 1471#ifdef CONFIG_SMP 1472 clg %r9,BASED(.Lcleanup_idle_insn) 1473 jl 3f 1474 larl %r1,smp_cpu_mtid 1475 llgf %r1,0(%r1) 1476 ltgr %r1,%r1 1477 jz 3f 1478 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) 1479 larl %r3,mt_cycles 1480 ag %r3,__LC_PERCPU_OFFSET 1481 la %r4,__SF_EMPTY+16(%r15) 14822: lg %r0,0(%r3) 1483 slg %r0,0(%r4) 1484 alg %r0,64(%r4) 1485 stg %r0,0(%r3) 1486 la %r3,8(%r3) 1487 la %r4,8(%r4) 1488 brct %r1,2b 1489#endif 14903: # account system time going idle 1491 lg %r9,__LC_STEAL_TIMER 1492 alg %r9,__CLOCK_IDLE_ENTER(%r2) 1493 slg %r9,__LC_LAST_UPDATE_CLOCK 1494 stg %r9,__LC_STEAL_TIMER 1495 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 1496 lg %r9,__LC_SYSTEM_TIMER 1497 alg %r9,__LC_LAST_UPDATE_TIMER 1498 slg %r9,__TIMER_IDLE_ENTER(%r2) 1499 stg %r9,__LC_SYSTEM_TIMER 1500 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 1501 # prepare return psw 1502 nihh %r8,0xfcfd # clear irq & wait state bits 1503 lg %r9,48(%r11) # return from psw_idle 1504 BR_EX %r14,%r11 1505.Lcleanup_idle_insn: 1506 .quad .Lpsw_idle_lpsw 1507 1508.Lcleanup_save_fpu_regs: 1509 larl %r9,save_fpu_regs 1510 BR_EX %r14,%r11 1511 1512.Lcleanup_load_fpu_regs: 1513 larl %r9,load_fpu_regs 1514 BR_EX %r14,%r11 1515 1516/* 1517 * Integer constants 1518 */ 1519 .align 8 1520.Lcritical_start: 1521 .quad .L__critical_start 1522.Lcritical_length: 1523 .quad .L__critical_end - .L__critical_start 1524#if IS_ENABLED(CONFIG_KVM) 1525.Lsie_critical_start: 1526 .quad .Lsie_gmap 1527.Lsie_critical_length: 1528 .quad .Lsie_done - .Lsie_gmap 1529.Lsie_crit_mcck_start: 1530 .quad .Lsie_entry 1531.Lsie_crit_mcck_length: 1532 .quad .Lsie_skip - .Lsie_entry 1533#endif 1534 .section .rodata, "a" 1535#define SYSCALL(esame,emu) .quad __s390x_ ## esame 1536 .globl sys_call_table 1537sys_call_table: 1538#include "asm/syscall_table.h" 1539#undef SYSCALL 1540 1541#ifdef CONFIG_COMPAT 1542 1543#define SYSCALL(esame,emu) .quad __s390_ ## emu 1544 .globl sys_call_table_emu 1545sys_call_table_emu: 1546#include "asm/syscall_table.h" 1547#undef SYSCALL 1548#endif 1549