1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 * Heiko Carstens <heiko.carstens@de.ibm.com> 10 */ 11 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/alternative-asm.h> 15#include <asm/processor.h> 16#include <asm/cache.h> 17#include <asm/ctl_reg.h> 18#include <asm/dwarf.h> 19#include <asm/errno.h> 20#include <asm/ptrace.h> 21#include <asm/thread_info.h> 22#include <asm/asm-offsets.h> 23#include <asm/unistd.h> 24#include <asm/page.h> 25#include <asm/sigp.h> 26#include <asm/irq.h> 27#include <asm/vx-insn.h> 28#include <asm/setup.h> 29#include <asm/nmi.h> 30#include <asm/export.h> 31#include <asm/nospec-insn.h> 32 33__PT_R0 = __PT_GPRS 34__PT_R1 = __PT_GPRS + 8 35__PT_R2 = __PT_GPRS + 16 36__PT_R3 = __PT_GPRS + 24 37__PT_R4 = __PT_GPRS + 32 38__PT_R5 = __PT_GPRS + 40 39__PT_R6 = __PT_GPRS + 48 40__PT_R7 = __PT_GPRS + 56 41__PT_R8 = __PT_GPRS + 64 42__PT_R9 = __PT_GPRS + 72 43__PT_R10 = __PT_GPRS + 80 44__PT_R11 = __PT_GPRS + 88 45__PT_R12 = __PT_GPRS + 96 46__PT_R13 = __PT_GPRS + 104 47__PT_R14 = __PT_GPRS + 112 48__PT_R15 = __PT_GPRS + 120 49 50STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER 51STACK_SIZE = 1 << STACK_SHIFT 52STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 53 54_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING) 56_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 57 _TIF_SYSCALL_TRACEPOINT) 58_CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU) 59_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) 60 61_LPP_OFFSET = __LC_LPP 62 63 .macro TRACE_IRQS_ON 64#ifdef CONFIG_TRACE_IRQFLAGS 65 basr %r2,%r0 66 brasl %r14,trace_hardirqs_on_caller 67#endif 68 .endm 69 70 .macro TRACE_IRQS_OFF 71#ifdef CONFIG_TRACE_IRQFLAGS 72 basr %r2,%r0 73 brasl %r14,trace_hardirqs_off_caller 74#endif 75 .endm 76 77 .macro LOCKDEP_SYS_EXIT 78#ifdef CONFIG_LOCKDEP 79 tm __PT_PSW+1(%r11),0x01 # returning to user ? 80 jz .+10 81 brasl %r14,lockdep_sys_exit 82#endif 83 .endm 84 85 .macro CHECK_STACK savearea 86#ifdef CONFIG_CHECK_STACK 87 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 88 lghi %r14,\savearea 89 jz stack_overflow 90#endif 91 .endm 92 93 .macro CHECK_VMAP_STACK savearea,oklabel 94#ifdef CONFIG_VMAP_STACK 95 lgr %r14,%r15 96 nill %r14,0x10000 - STACK_SIZE 97 oill %r14,STACK_INIT 98 clg %r14,__LC_KERNEL_STACK 99 je \oklabel 100 clg %r14,__LC_ASYNC_STACK 101 je \oklabel 102 clg %r14,__LC_NODAT_STACK 103 je \oklabel 104 clg %r14,__LC_RESTART_STACK 105 je \oklabel 106 lghi %r14,\savearea 107 j stack_overflow 108#else 109 j \oklabel 110#endif 111 .endm 112 113 .macro SWITCH_ASYNC savearea,timer 114 tmhh %r8,0x0001 # interrupting from user ? 115 jnz 2f 116#if IS_ENABLED(CONFIG_KVM) 117 lgr %r14,%r9 118 larl %r13,.Lsie_gmap 119 slgr %r14,%r13 120 lghi %r13,.Lsie_done - .Lsie_gmap 121 clgr %r14,%r13 122 jhe 0f 123 lghi %r11,\savearea # inside critical section, do cleanup 124 brasl %r14,.Lcleanup_sie 125#endif 1260: larl %r13,.Lpsw_idle_exit 127 cgr %r13,%r9 128 jne 1f 129 130 mvc __CLOCK_IDLE_EXIT(8,%r2), __LC_INT_CLOCK 131 mvc __TIMER_IDLE_EXIT(8,%r2), __LC_ASYNC_ENTER_TIMER 132 # account system time going idle 133 ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT 134 135 lg %r13,__LC_STEAL_TIMER 136 alg %r13,__CLOCK_IDLE_ENTER(%r2) 137 slg %r13,__LC_LAST_UPDATE_CLOCK 138 stg %r13,__LC_STEAL_TIMER 139 140 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 141 142 lg %r13,__LC_SYSTEM_TIMER 143 alg %r13,__LC_LAST_UPDATE_TIMER 144 slg %r13,__TIMER_IDLE_ENTER(%r2) 145 stg %r13,__LC_SYSTEM_TIMER 146 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 147 148 nihh %r8,0xfcfd # clear wait state and irq bits 1491: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? 150 slgr %r14,%r15 151 srag %r14,%r14,STACK_SHIFT 152 jnz 3f 153 CHECK_STACK \savearea 154 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 155 j 4f 1562: UPDATE_VTIME %r14,%r15,\timer 157 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 1583: lg %r15,__LC_ASYNC_STACK # load async stack 1594: la %r11,STACK_FRAME_OVERHEAD(%r15) 160 .endm 161 162 .macro UPDATE_VTIME w1,w2,enter_timer 163 lg \w1,__LC_EXIT_TIMER 164 lg \w2,__LC_LAST_UPDATE_TIMER 165 slg \w1,\enter_timer 166 slg \w2,__LC_EXIT_TIMER 167 alg \w1,__LC_USER_TIMER 168 alg \w2,__LC_SYSTEM_TIMER 169 stg \w1,__LC_USER_TIMER 170 stg \w2,__LC_SYSTEM_TIMER 171 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 172 .endm 173 174 .macro RESTORE_SM_CLEAR_PER 175 stg %r8,__LC_RETURN_PSW 176 ni __LC_RETURN_PSW,0xbf 177 ssm __LC_RETURN_PSW 178 .endm 179 180 .macro ENABLE_INTS 181 stosm __SF_EMPTY(%r15),3 182 .endm 183 184 .macro ENABLE_INTS_TRACE 185 TRACE_IRQS_ON 186 ENABLE_INTS 187 .endm 188 189 .macro DISABLE_INTS 190 stnsm __SF_EMPTY(%r15),0xfc 191 .endm 192 193 .macro DISABLE_INTS_TRACE 194 DISABLE_INTS 195 TRACE_IRQS_OFF 196 .endm 197 198 .macro STCK savearea 199#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 200 .insn s,0xb27c0000,\savearea # store clock fast 201#else 202 .insn s,0xb2050000,\savearea # store clock 203#endif 204 .endm 205 206 /* 207 * The TSTMSK macro generates a test-under-mask instruction by 208 * calculating the memory offset for the specified mask value. 209 * Mask value can be any constant. The macro shifts the mask 210 * value to calculate the memory offset for the test-under-mask 211 * instruction. 212 */ 213 .macro TSTMSK addr, mask, size=8, bytepos=0 214 .if (\bytepos < \size) && (\mask >> 8) 215 .if (\mask & 0xff) 216 .error "Mask exceeds byte boundary" 217 .endif 218 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 219 .exitm 220 .endif 221 .ifeq \mask 222 .error "Mask must not be zero" 223 .endif 224 off = \size - \bytepos - 1 225 tm off+\addr, \mask 226 .endm 227 228 .macro BPOFF 229 ALTERNATIVE "", ".long 0xb2e8c000", 82 230 .endm 231 232 .macro BPON 233 ALTERNATIVE "", ".long 0xb2e8d000", 82 234 .endm 235 236 .macro BPENTER tif_ptr,tif_mask 237 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \ 238 "", 82 239 .endm 240 241 .macro BPEXIT tif_ptr,tif_mask 242 TSTMSK \tif_ptr,\tif_mask 243 ALTERNATIVE "jz .+8; .long 0xb2e8c000", \ 244 "jnz .+8; .long 0xb2e8d000", 82 245 .endm 246 247 GEN_BR_THUNK %r9 248 GEN_BR_THUNK %r14 249 GEN_BR_THUNK %r14,%r11 250 251 .section .kprobes.text, "ax" 252.Ldummy: 253 /* 254 * This nop exists only in order to avoid that __switch_to starts at 255 * the beginning of the kprobes text section. In that case we would 256 * have several symbols at the same address. E.g. objdump would take 257 * an arbitrary symbol name when disassembling this code. 258 * With the added nop in between the __switch_to symbol is unique 259 * again. 260 */ 261 nop 0 262 263ENTRY(__bpon) 264 .globl __bpon 265 BPON 266 BR_EX %r14 267ENDPROC(__bpon) 268 269/* 270 * Scheduler resume function, called by switch_to 271 * gpr2 = (task_struct *) prev 272 * gpr3 = (task_struct *) next 273 * Returns: 274 * gpr2 = prev 275 */ 276ENTRY(__switch_to) 277 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 278 lghi %r4,__TASK_stack 279 lghi %r1,__TASK_thread 280 llill %r5,STACK_INIT 281 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 282 lg %r15,0(%r4,%r3) # start of kernel stack of next 283 agr %r15,%r5 # end of kernel stack of next 284 stg %r3,__LC_CURRENT # store task struct of next 285 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 286 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 287 aghi %r3,__TASK_pid 288 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 289 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 290 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 291 BR_EX %r14 292ENDPROC(__switch_to) 293 294#if IS_ENABLED(CONFIG_KVM) 295/* 296 * sie64a calling convention: 297 * %r2 pointer to sie control block 298 * %r3 guest register save area 299 */ 300ENTRY(sie64a) 301 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 302 lg %r12,__LC_CURRENT 303 stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer 304 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area 305 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 306 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 307 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? 308 jno .Lsie_load_guest_gprs 309 brasl %r14,load_fpu_regs # load guest fp/vx regs 310.Lsie_load_guest_gprs: 311 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 312 lg %r14,__LC_GMAP # get gmap pointer 313 ltgr %r14,%r14 314 jz .Lsie_gmap 315 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 316.Lsie_gmap: 317 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 318 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 319 tm __SIE_PROG20+3(%r14),3 # last exit... 320 jnz .Lsie_skip 321 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 322 jo .Lsie_skip # exit if fp/vx regs changed 323 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 324.Lsie_entry: 325 sie 0(%r14) 326 BPOFF 327 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 328.Lsie_skip: 329 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 330 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 331.Lsie_done: 332# some program checks are suppressing. C code (e.g. do_protection_exception) 333# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 334# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 335# Other instructions between sie64a and .Lsie_done should not cause program 336# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 337# See also .Lcleanup_sie 338.Lrewind_pad6: 339 nopr 7 340.Lrewind_pad4: 341 nopr 7 342.Lrewind_pad2: 343 nopr 7 344 .globl sie_exit 345sie_exit: 346 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 347 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 348 xgr %r0,%r0 # clear guest registers to 349 xgr %r1,%r1 # prevent speculative use 350 xgr %r2,%r2 351 xgr %r3,%r3 352 xgr %r4,%r4 353 xgr %r5,%r5 354 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 355 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 356 BR_EX %r14 357.Lsie_fault: 358 lghi %r14,-EFAULT 359 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 360 j sie_exit 361 362 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 363 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 364 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 365 EX_TABLE(sie_exit,.Lsie_fault) 366ENDPROC(sie64a) 367EXPORT_SYMBOL(sie64a) 368EXPORT_SYMBOL(sie_exit) 369#endif 370 371/* 372 * SVC interrupt handler routine. System calls are synchronous events and 373 * are executed with interrupts enabled. 374 */ 375 376ENTRY(system_call) 377 stpt __LC_SYNC_ENTER_TIMER 378 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 379 BPOFF 380 lg %r12,__LC_CURRENT 381 lghi %r13,__TASK_thread 382 lghi %r14,_PIF_SYSCALL 383.Lsysc_per: 384 lg %r15,__LC_KERNEL_STACK 385 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 386 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 387 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 388 stmg %r0,%r7,__PT_R0(%r11) 389 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 390 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 391 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 392 stg %r14,__PT_FLAGS(%r11) 393 ENABLE_INTS 394.Lsysc_do_svc: 395 # clear user controlled register to prevent speculative use 396 xgr %r0,%r0 397 # load address of system call table 398 lg %r10,__THREAD_sysc_table(%r13,%r12) 399 llgh %r8,__PT_INT_CODE+2(%r11) 400 slag %r8,%r8,3 # shift and test for svc 0 401 jnz .Lsysc_nr_ok 402 # svc 0: system call number in %r1 403 llgfr %r1,%r1 # clear high word in r1 404 cghi %r1,NR_syscalls 405 jnl .Lsysc_nr_ok 406 sth %r1,__PT_INT_CODE+2(%r11) 407 slag %r8,%r1,3 408.Lsysc_nr_ok: 409 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 410 stg %r2,__PT_ORIG_GPR2(%r11) 411 stg %r7,STACK_FRAME_OVERHEAD(%r15) 412 lg %r9,0(%r8,%r10) # get system call add. 413 TSTMSK __TI_flags(%r12),_TIF_TRACE 414 jnz .Lsysc_tracesys 415 BASR_EX %r14,%r9 # call sys_xxxx 416 stg %r2,__PT_R2(%r11) # store return value 417 418.Lsysc_return: 419#ifdef CONFIG_DEBUG_RSEQ 420 lgr %r2,%r11 421 brasl %r14,rseq_syscall 422#endif 423 LOCKDEP_SYS_EXIT 424.Lsysc_tif: 425 TSTMSK __PT_FLAGS(%r11),_PIF_WORK 426 jnz .Lsysc_work 427 TSTMSK __TI_flags(%r12),_TIF_WORK 428 jnz .Lsysc_work # check for work 429 TSTMSK __LC_CPU_FLAGS,(_CIF_WORK-_CIF_FPU) 430 jnz .Lsysc_work 431 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 432.Lsysc_restore: 433 DISABLE_INTS 434 TSTMSK __LC_CPU_FLAGS, _CIF_FPU 435 jz .Lsysc_skip_fpu 436 brasl %r14,load_fpu_regs 437.Lsysc_skip_fpu: 438 lg %r14,__LC_VDSO_PER_CPU 439 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 440 stpt __LC_EXIT_TIMER 441 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 442 lmg %r0,%r15,__PT_R0(%r11) 443 b __LC_RETURN_LPSWE 444 445# 446# One of the work bits is on. Find out which one. 447# 448.Lsysc_work: 449 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 450 jo .Lsysc_reschedule 451 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 452 jo .Lsysc_syscall_restart 453#ifdef CONFIG_UPROBES 454 TSTMSK __TI_flags(%r12),_TIF_UPROBE 455 jo .Lsysc_uprobe_notify 456#endif 457 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 458 jo .Lsysc_guarded_storage 459 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP 460 jo .Lsysc_singlestep 461#ifdef CONFIG_LIVEPATCH 462 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 463 jo .Lsysc_patch_pending # handle live patching just before 464 # signals and possible syscall restart 465#endif 466 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 467 jo .Lsysc_syscall_restart 468 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 469 jo .Lsysc_sigpending 470 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 471 jo .Lsysc_notify_resume 472 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 473 jnz .Lsysc_asce 474 j .Lsysc_return 475 476# 477# _TIF_NEED_RESCHED is set, call schedule 478# 479.Lsysc_reschedule: 480 larl %r14,.Lsysc_return 481 jg schedule 482 483# 484# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce 485# 486.Lsysc_asce: 487 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY 488 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce 489 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY 490 jz .Lsysc_return 491#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 492 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? 493 jnz .Lsysc_set_fs_fixup 494 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 495 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 496 j .Lsysc_return 497.Lsysc_set_fs_fixup: 498#endif 499 larl %r14,.Lsysc_return 500 jg set_fs_fixup 501 502 503# 504# _TIF_SIGPENDING is set, call do_signal 505# 506.Lsysc_sigpending: 507 lgr %r2,%r11 # pass pointer to pt_regs 508 brasl %r14,do_signal 509 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 510 jno .Lsysc_return 511.Lsysc_do_syscall: 512 lghi %r13,__TASK_thread 513 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 514 lghi %r1,0 # svc 0 returns -ENOSYS 515 j .Lsysc_do_svc 516 517# 518# _TIF_NOTIFY_RESUME is set, call do_notify_resume 519# 520.Lsysc_notify_resume: 521 lgr %r2,%r11 # pass pointer to pt_regs 522 larl %r14,.Lsysc_return 523 jg do_notify_resume 524 525# 526# _TIF_UPROBE is set, call uprobe_notify_resume 527# 528#ifdef CONFIG_UPROBES 529.Lsysc_uprobe_notify: 530 lgr %r2,%r11 # pass pointer to pt_regs 531 larl %r14,.Lsysc_return 532 jg uprobe_notify_resume 533#endif 534 535# 536# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 537# 538.Lsysc_guarded_storage: 539 lgr %r2,%r11 # pass pointer to pt_regs 540 larl %r14,.Lsysc_return 541 jg gs_load_bc_cb 542# 543# _TIF_PATCH_PENDING is set, call klp_update_patch_state 544# 545#ifdef CONFIG_LIVEPATCH 546.Lsysc_patch_pending: 547 lg %r2,__LC_CURRENT # pass pointer to task struct 548 larl %r14,.Lsysc_return 549 jg klp_update_patch_state 550#endif 551 552# 553# _PIF_PER_TRAP is set, call do_per_trap 554# 555.Lsysc_singlestep: 556 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP 557 lgr %r2,%r11 # pass pointer to pt_regs 558 larl %r14,.Lsysc_return 559 jg do_per_trap 560 561# 562# _PIF_SYSCALL_RESTART is set, repeat the current system call 563# 564.Lsysc_syscall_restart: 565 ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART 566 lmg %r1,%r7,__PT_R1(%r11) # load svc arguments 567 lg %r2,__PT_ORIG_GPR2(%r11) 568 j .Lsysc_do_svc 569 570# 571# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 572# and after the system call 573# 574.Lsysc_tracesys: 575 lgr %r2,%r11 # pass pointer to pt_regs 576 la %r3,0 577 llgh %r0,__PT_INT_CODE+2(%r11) 578 stg %r0,__PT_R2(%r11) 579 brasl %r14,do_syscall_trace_enter 580 lghi %r0,NR_syscalls 581 clgr %r0,%r2 582 jnh .Lsysc_tracenogo 583 sllg %r8,%r2,3 584 lg %r9,0(%r8,%r10) 585 lmg %r3,%r7,__PT_R3(%r11) 586 stg %r7,STACK_FRAME_OVERHEAD(%r15) 587 lg %r2,__PT_ORIG_GPR2(%r11) 588 BASR_EX %r14,%r9 # call sys_xxx 589 stg %r2,__PT_R2(%r11) # store return value 590.Lsysc_tracenogo: 591 TSTMSK __TI_flags(%r12),_TIF_TRACE 592 jz .Lsysc_return 593 lgr %r2,%r11 # pass pointer to pt_regs 594 larl %r14,.Lsysc_return 595 jg do_syscall_trace_exit 596ENDPROC(system_call) 597 598# 599# a new process exits the kernel with ret_from_fork 600# 601ENTRY(ret_from_fork) 602 la %r11,STACK_FRAME_OVERHEAD(%r15) 603 lg %r12,__LC_CURRENT 604 brasl %r14,schedule_tail 605 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 606 jne .Lsysc_tracenogo 607 # it's a kernel thread 608 lmg %r9,%r10,__PT_R9(%r11) # load gprs 609 la %r2,0(%r10) 610 BASR_EX %r14,%r9 611 j .Lsysc_tracenogo 612ENDPROC(ret_from_fork) 613 614ENTRY(kernel_thread_starter) 615 la %r2,0(%r10) 616 BASR_EX %r14,%r9 617 j .Lsysc_tracenogo 618ENDPROC(kernel_thread_starter) 619 620/* 621 * Program check handler routine 622 */ 623 624ENTRY(pgm_check_handler) 625 stpt __LC_SYNC_ENTER_TIMER 626 BPOFF 627 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 628 lg %r10,__LC_LAST_BREAK 629 srag %r11,%r10,12 630 jnz 0f 631 /* if __LC_LAST_BREAK is < 4096, it contains one of 632 * the lpswe addresses in lowcore. Set it to 1 (initial state) 633 * to prevent leaking that address to userspace. 634 */ 635 lghi %r10,1 6360: lg %r12,__LC_CURRENT 637 lghi %r11,0 638 lmg %r8,%r9,__LC_PGM_OLD_PSW 639 tmhh %r8,0x0001 # test problem state bit 640 jnz 3f # -> fault in user space 641#if IS_ENABLED(CONFIG_KVM) 642 # cleanup critical section for program checks in sie64a 643 lgr %r14,%r9 644 larl %r13,.Lsie_gmap 645 slgr %r14,%r13 646 lghi %r13,.Lsie_done - .Lsie_gmap 647 clgr %r14,%r13 648 jhe 1f 649 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 650 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 651 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 652 larl %r9,sie_exit # skip forward to sie_exit 653 lghi %r11,_PIF_GUEST_FAULT 654#endif 6551: tmhh %r8,0x4000 # PER bit set in old PSW ? 656 jnz 2f # -> enabled, can't be a double fault 657 tm __LC_PGM_ILC+3,0x80 # check for per exception 658 jnz .Lpgm_svcper # -> single stepped svc 6592: CHECK_STACK __LC_SAVE_AREA_SYNC 660 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 661 # CHECK_VMAP_STACK branches to stack_overflow or 5f 662 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f 6633: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 664 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 665 lg %r15,__LC_KERNEL_STACK 666 lgr %r14,%r12 667 aghi %r14,__TASK_thread # pointer to thread_struct 668 lghi %r13,__LC_PGM_TDB 669 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 670 jz 4f 671 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 6724: stg %r10,__THREAD_last_break(%r14) 6735: lgr %r13,%r11 674 la %r11,STACK_FRAME_OVERHEAD(%r15) 675 stmg %r0,%r7,__PT_R0(%r11) 676 # clear user controlled registers to prevent speculative use 677 xgr %r0,%r0 678 xgr %r1,%r1 679 xgr %r2,%r2 680 xgr %r3,%r3 681 xgr %r4,%r4 682 xgr %r5,%r5 683 xgr %r6,%r6 684 xgr %r7,%r7 685 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 686 stmg %r8,%r9,__PT_PSW(%r11) 687 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 688 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE 689 stg %r13,__PT_FLAGS(%r11) 690 stg %r10,__PT_ARGS(%r11) 691 tm __LC_PGM_ILC+3,0x80 # check for per exception 692 jz 6f 693 tmhh %r8,0x0001 # kernel per event ? 694 jz .Lpgm_kprobe 695 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 696 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 697 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 698 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 6996: RESTORE_SM_CLEAR_PER 700 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 701 larl %r1,pgm_check_table 702 llgh %r10,__PT_INT_CODE+2(%r11) 703 nill %r10,0x007f 704 sll %r10,3 705 je .Lpgm_return 706 lg %r9,0(%r10,%r1) # load address of handler routine 707 lgr %r2,%r11 # pass pointer to pt_regs 708 BASR_EX %r14,%r9 # branch to interrupt-handler 709.Lpgm_return: 710 LOCKDEP_SYS_EXIT 711 tm __PT_PSW+1(%r11),0x01 # returning to user ? 712 jno .Lsysc_restore 713 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 714 jo .Lsysc_do_syscall 715 j .Lsysc_tif 716 717# 718# PER event in supervisor state, must be kprobes 719# 720.Lpgm_kprobe: 721 RESTORE_SM_CLEAR_PER 722 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 723 lgr %r2,%r11 # pass pointer to pt_regs 724 brasl %r14,do_per_trap 725 j .Lpgm_return 726 727# 728# single stepped system call 729# 730.Lpgm_svcper: 731 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 732 larl %r14,.Lsysc_per 733 stg %r14,__LC_RETURN_PSW+8 734 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 735 lpswe __LC_RETURN_PSW # branch to .Lsysc_per 736ENDPROC(pgm_check_handler) 737 738/* 739 * IO interrupt handler routine 740 */ 741ENTRY(io_int_handler) 742 STCK __LC_INT_CLOCK 743 stpt __LC_ASYNC_ENTER_TIMER 744 BPOFF 745 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 746 lg %r12,__LC_CURRENT 747 lmg %r8,%r9,__LC_IO_OLD_PSW 748 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 749 stmg %r0,%r7,__PT_R0(%r11) 750 # clear user controlled registers to prevent speculative use 751 xgr %r0,%r0 752 xgr %r1,%r1 753 xgr %r2,%r2 754 xgr %r3,%r3 755 xgr %r4,%r4 756 xgr %r5,%r5 757 xgr %r6,%r6 758 xgr %r7,%r7 759 xgr %r10,%r10 760 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 761 stmg %r8,%r9,__PT_PSW(%r11) 762 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 763 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 764 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 765 jo .Lio_restore 766#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS) 767 tmhh %r8,0x300 768 jz 1f 769 TRACE_IRQS_OFF 7701: 771#endif 772 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 773.Lio_loop: 774 lgr %r2,%r11 # pass pointer to pt_regs 775 lghi %r3,IO_INTERRUPT 776 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 777 jz .Lio_call 778 lghi %r3,THIN_INTERRUPT 779.Lio_call: 780 brasl %r14,do_IRQ 781 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR 782 jz .Lio_return 783 tpi 0 784 jz .Lio_return 785 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 786 j .Lio_loop 787.Lio_return: 788 LOCKDEP_SYS_EXIT 789 TSTMSK __TI_flags(%r12),_TIF_WORK 790 jnz .Lio_work # there is work to do (signals etc.) 791 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 792 jnz .Lio_work 793.Lio_restore: 794#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS) 795 tm __PT_PSW(%r11),3 796 jno 0f 797 TRACE_IRQS_ON 7980: 799#endif 800 lg %r14,__LC_VDSO_PER_CPU 801 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 802 tm __PT_PSW+1(%r11),0x01 # returning to user ? 803 jno .Lio_exit_kernel 804 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 805 stpt __LC_EXIT_TIMER 806 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 807.Lio_exit_kernel: 808 lmg %r0,%r15,__PT_R0(%r11) 809 b __LC_RETURN_LPSWE 810.Lio_done: 811 812# 813# There is work todo, find out in which context we have been interrupted: 814# 1) if we return to user space we can do all _TIF_WORK work 815# 2) if we return to kernel code and kvm is enabled check if we need to 816# modify the psw to leave SIE 817# 3) if we return to kernel code and preemptive scheduling is enabled check 818# the preemption counter and if it is zero call preempt_schedule_irq 819# Before any work can be done, a switch to the kernel stack is required. 820# 821.Lio_work: 822 tm __PT_PSW+1(%r11),0x01 # returning to user ? 823 jo .Lio_work_user # yes -> do resched & signal 824#ifdef CONFIG_PREEMPTION 825 # check for preemptive scheduling 826 icm %r0,15,__LC_PREEMPT_COUNT 827 jnz .Lio_restore # preemption is disabled 828 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 829 jno .Lio_restore 830 # switch to kernel stack 831 lg %r1,__PT_R15(%r11) 832 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 833 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 834 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 835 la %r11,STACK_FRAME_OVERHEAD(%r1) 836 lgr %r15,%r1 837 brasl %r14,preempt_schedule_irq 838 j .Lio_return 839#else 840 j .Lio_restore 841#endif 842 843# 844# Need to do work before returning to userspace, switch to kernel stack 845# 846.Lio_work_user: 847 lg %r1,__LC_KERNEL_STACK 848 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 849 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 850 la %r11,STACK_FRAME_OVERHEAD(%r1) 851 lgr %r15,%r1 852 853# 854# One of the work bits is on. Find out which one. 855# 856 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 857 jo .Lio_reschedule 858#ifdef CONFIG_LIVEPATCH 859 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 860 jo .Lio_patch_pending 861#endif 862 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 863 jo .Lio_sigpending 864 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 865 jo .Lio_notify_resume 866 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 867 jo .Lio_guarded_storage 868 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 869 jo .Lio_vxrs 870 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 871 jnz .Lio_asce 872 j .Lio_return 873 874# 875# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce 876# 877.Lio_asce: 878 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY 879 lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce 880 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY 881 jz .Lio_return 882#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 883 tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ? 884 jnz .Lio_set_fs_fixup 885 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 886 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 887 j .Lio_return 888.Lio_set_fs_fixup: 889#endif 890 larl %r14,.Lio_return 891 jg set_fs_fixup 892 893# 894# CIF_FPU is set, restore floating-point controls and floating-point registers. 895# 896.Lio_vxrs: 897 larl %r14,.Lio_return 898 jg load_fpu_regs 899 900# 901# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 902# 903.Lio_guarded_storage: 904 ENABLE_INTS_TRACE 905 lgr %r2,%r11 # pass pointer to pt_regs 906 brasl %r14,gs_load_bc_cb 907 DISABLE_INTS_TRACE 908 j .Lio_return 909 910# 911# _TIF_NEED_RESCHED is set, call schedule 912# 913.Lio_reschedule: 914 ENABLE_INTS_TRACE 915 brasl %r14,schedule # call scheduler 916 DISABLE_INTS_TRACE 917 j .Lio_return 918 919# 920# _TIF_PATCH_PENDING is set, call klp_update_patch_state 921# 922#ifdef CONFIG_LIVEPATCH 923.Lio_patch_pending: 924 lg %r2,__LC_CURRENT # pass pointer to task struct 925 larl %r14,.Lio_return 926 jg klp_update_patch_state 927#endif 928 929# 930# _TIF_SIGPENDING or is set, call do_signal 931# 932.Lio_sigpending: 933 ENABLE_INTS_TRACE 934 lgr %r2,%r11 # pass pointer to pt_regs 935 brasl %r14,do_signal 936 DISABLE_INTS_TRACE 937 j .Lio_return 938 939# 940# _TIF_NOTIFY_RESUME or is set, call do_notify_resume 941# 942.Lio_notify_resume: 943 ENABLE_INTS_TRACE 944 lgr %r2,%r11 # pass pointer to pt_regs 945 brasl %r14,do_notify_resume 946 DISABLE_INTS_TRACE 947 j .Lio_return 948ENDPROC(io_int_handler) 949 950/* 951 * External interrupt handler routine 952 */ 953ENTRY(ext_int_handler) 954 STCK __LC_INT_CLOCK 955 stpt __LC_ASYNC_ENTER_TIMER 956 BPOFF 957 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 958 lg %r12,__LC_CURRENT 959 lmg %r8,%r9,__LC_EXT_OLD_PSW 960 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 961 stmg %r0,%r7,__PT_R0(%r11) 962 # clear user controlled registers to prevent speculative use 963 xgr %r0,%r0 964 xgr %r1,%r1 965 xgr %r2,%r2 966 xgr %r3,%r3 967 xgr %r4,%r4 968 xgr %r5,%r5 969 xgr %r6,%r6 970 xgr %r7,%r7 971 xgr %r10,%r10 972 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 973 stmg %r8,%r9,__PT_PSW(%r11) 974 lghi %r1,__LC_EXT_PARAMS2 975 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR 976 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 977 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) 978 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 979 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 980 jo .Lio_restore 981#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS) 982 tmhh %r8,0x300 983 jz 1f 984 TRACE_IRQS_OFF 9851: 986#endif 987 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 988 lgr %r2,%r11 # pass pointer to pt_regs 989 lghi %r3,EXT_INTERRUPT 990 brasl %r14,do_IRQ 991 j .Lio_return 992ENDPROC(ext_int_handler) 993 994/* 995 * Load idle PSW. 996 */ 997ENTRY(psw_idle) 998 stg %r3,__SF_EMPTY(%r15) 999 larl %r1,.Lpsw_idle_exit 1000 stg %r1,__SF_EMPTY+8(%r15) 1001 larl %r1,smp_cpu_mtid 1002 llgf %r1,0(%r1) 1003 ltgr %r1,%r1 1004 jz .Lpsw_idle_stcctm 1005 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) 1006.Lpsw_idle_stcctm: 1007 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 1008 BPON 1009 STCK __CLOCK_IDLE_ENTER(%r2) 1010 stpt __TIMER_IDLE_ENTER(%r2) 1011 lpswe __SF_EMPTY(%r15) 1012.Lpsw_idle_exit: 1013 BR_EX %r14 1014ENDPROC(psw_idle) 1015 1016/* 1017 * Store floating-point controls and floating-point or vector register 1018 * depending whether the vector facility is available. A critical section 1019 * cleanup assures that the registers are stored even if interrupted for 1020 * some other work. The CIF_FPU flag is set to trigger a lazy restore 1021 * of the register contents at return from io or a system call. 1022 */ 1023ENTRY(save_fpu_regs) 1024 stnsm __SF_EMPTY(%r15),0xfc 1025 lg %r2,__LC_CURRENT 1026 aghi %r2,__TASK_thread 1027 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1028 jo .Lsave_fpu_regs_exit 1029 stfpc __THREAD_FPU_fpc(%r2) 1030 lg %r3,__THREAD_FPU_regs(%r2) 1031 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1032 jz .Lsave_fpu_regs_fp # no -> store FP regs 1033 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 1034 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) 1035 j .Lsave_fpu_regs_done # -> set CIF_FPU flag 1036.Lsave_fpu_regs_fp: 1037 std 0,0(%r3) 1038 std 1,8(%r3) 1039 std 2,16(%r3) 1040 std 3,24(%r3) 1041 std 4,32(%r3) 1042 std 5,40(%r3) 1043 std 6,48(%r3) 1044 std 7,56(%r3) 1045 std 8,64(%r3) 1046 std 9,72(%r3) 1047 std 10,80(%r3) 1048 std 11,88(%r3) 1049 std 12,96(%r3) 1050 std 13,104(%r3) 1051 std 14,112(%r3) 1052 std 15,120(%r3) 1053.Lsave_fpu_regs_done: 1054 oi __LC_CPU_FLAGS+7,_CIF_FPU 1055.Lsave_fpu_regs_exit: 1056 ssm __SF_EMPTY(%r15) 1057 BR_EX %r14 1058.Lsave_fpu_regs_end: 1059ENDPROC(save_fpu_regs) 1060EXPORT_SYMBOL(save_fpu_regs) 1061 1062/* 1063 * Load floating-point controls and floating-point or vector registers. 1064 * A critical section cleanup assures that the register contents are 1065 * loaded even if interrupted for some other work. 1066 * 1067 * There are special calling conventions to fit into sysc and io return work: 1068 * %r15: <kernel stack> 1069 * The function requires: 1070 * %r4 1071 */ 1072load_fpu_regs: 1073 lg %r4,__LC_CURRENT 1074 aghi %r4,__TASK_thread 1075 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1076 jno .Lload_fpu_regs_exit 1077 lfpc __THREAD_FPU_fpc(%r4) 1078 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1079 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 1080 jz .Lload_fpu_regs_fp # -> no VX, load FP regs 1081 VLM %v0,%v15,0,%r4 1082 VLM %v16,%v31,256,%r4 1083 j .Lload_fpu_regs_done 1084.Lload_fpu_regs_fp: 1085 ld 0,0(%r4) 1086 ld 1,8(%r4) 1087 ld 2,16(%r4) 1088 ld 3,24(%r4) 1089 ld 4,32(%r4) 1090 ld 5,40(%r4) 1091 ld 6,48(%r4) 1092 ld 7,56(%r4) 1093 ld 8,64(%r4) 1094 ld 9,72(%r4) 1095 ld 10,80(%r4) 1096 ld 11,88(%r4) 1097 ld 12,96(%r4) 1098 ld 13,104(%r4) 1099 ld 14,112(%r4) 1100 ld 15,120(%r4) 1101.Lload_fpu_regs_done: 1102 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 1103.Lload_fpu_regs_exit: 1104 BR_EX %r14 1105.Lload_fpu_regs_end: 1106ENDPROC(load_fpu_regs) 1107 1108/* 1109 * Machine check handler routines 1110 */ 1111ENTRY(mcck_int_handler) 1112 STCK __LC_MCCK_CLOCK 1113 BPOFF 1114 la %r1,4095 # validate r1 1115 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer 1116 sckc __LC_CLOCK_COMPARATOR # validate comparator 1117 lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs 1118 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs 1119 lg %r12,__LC_CURRENT 1120 lmg %r8,%r9,__LC_MCK_OLD_PSW 1121 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 1122 jo .Lmcck_panic # yes -> rest of mcck code invalid 1123 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID 1124 jno .Lmcck_panic # control registers invalid -> panic 1125 la %r14,4095 1126 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs 1127 ptlb 1128 lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area 1129 nill %r11,0xfc00 # MCESA_ORIGIN_MASK 1130 TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE 1131 jno 0f 1132 TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID 1133 jno 0f 1134 .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC 11350: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14) 1136 TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID 1137 jo 0f 1138 sr %r14,%r14 11390: sfpc %r14 1140 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1141 jo 0f 1142 lghi %r14,__LC_FPREGS_SAVE_AREA 1143 ld %f0,0(%r14) 1144 ld %f1,8(%r14) 1145 ld %f2,16(%r14) 1146 ld %f3,24(%r14) 1147 ld %f4,32(%r14) 1148 ld %f5,40(%r14) 1149 ld %f6,48(%r14) 1150 ld %f7,56(%r14) 1151 ld %f8,64(%r14) 1152 ld %f9,72(%r14) 1153 ld %f10,80(%r14) 1154 ld %f11,88(%r14) 1155 ld %f12,96(%r14) 1156 ld %f13,104(%r14) 1157 ld %f14,112(%r14) 1158 ld %f15,120(%r14) 1159 j 1f 11600: VLM %v0,%v15,0,%r11 1161 VLM %v16,%v31,256,%r11 11621: lghi %r14,__LC_CPU_TIMER_SAVE_AREA 1163 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 1164 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 1165 jo 3f 1166 la %r14,__LC_SYNC_ENTER_TIMER 1167 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 1168 jl 0f 1169 la %r14,__LC_ASYNC_ENTER_TIMER 11700: clc 0(8,%r14),__LC_EXIT_TIMER 1171 jl 1f 1172 la %r14,__LC_EXIT_TIMER 11731: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 1174 jl 2f 1175 la %r14,__LC_LAST_UPDATE_TIMER 11762: spt 0(%r14) 1177 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 11783: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID 1179 jno .Lmcck_panic 1180 tmhh %r8,0x0001 # interrupting from user ? 1181 jnz 4f 1182 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 1183 jno .Lmcck_panic 11844: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 1185 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER 1186.Lmcck_skip: 1187 lghi %r14,__LC_GPREGS_SAVE_AREA+64 1188 stmg %r0,%r7,__PT_R0(%r11) 1189 # clear user controlled registers to prevent speculative use 1190 xgr %r0,%r0 1191 xgr %r1,%r1 1192 xgr %r2,%r2 1193 xgr %r3,%r3 1194 xgr %r4,%r4 1195 xgr %r5,%r5 1196 xgr %r6,%r6 1197 xgr %r7,%r7 1198 xgr %r10,%r10 1199 mvc __PT_R8(64,%r11),0(%r14) 1200 stmg %r8,%r9,__PT_PSW(%r11) 1201 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 1202 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1203 lgr %r2,%r11 # pass pointer to pt_regs 1204 brasl %r14,s390_do_machine_check 1205 cghi %r2,0 1206 je .Lmcck_return 1207 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 1208 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 1209 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 1210 la %r11,STACK_FRAME_OVERHEAD(%r1) 1211 lgr %r15,%r1 1212 TRACE_IRQS_OFF 1213 brasl %r14,s390_handle_mcck 1214 TRACE_IRQS_ON 1215.Lmcck_return: 1216 lg %r14,__LC_VDSO_PER_CPU 1217 lmg %r0,%r10,__PT_R0(%r11) 1218 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 1219 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 1220 jno 0f 1221 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 1222 stpt __LC_EXIT_TIMER 1223 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 12240: lmg %r11,%r15,__PT_R11(%r11) 1225 b __LC_RETURN_MCCK_LPSWE 1226 1227.Lmcck_panic: 1228 lg %r15,__LC_NODAT_STACK 1229 la %r11,STACK_FRAME_OVERHEAD(%r15) 1230 j .Lmcck_skip 1231ENDPROC(mcck_int_handler) 1232 1233# 1234# PSW restart interrupt handler 1235# 1236ENTRY(restart_int_handler) 1237 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 1238 stg %r15,__LC_SAVE_AREA_RESTART 1239 lg %r15,__LC_RESTART_STACK 1240 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 1241 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 1242 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 1243 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 1244 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 1245 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 1246 lg %r2,__LC_RESTART_DATA 1247 lg %r3,__LC_RESTART_SOURCE 1248 ltgr %r3,%r3 # test source cpu address 1249 jm 1f # negative -> skip source stop 12500: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 1251 brc 10,0b # wait for status stored 12521: basr %r14,%r1 # call function 1253 stap __SF_EMPTY(%r15) # store cpu address 1254 llgh %r3,__SF_EMPTY(%r15) 12552: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 1256 brc 2,2b 12573: j 3b 1258ENDPROC(restart_int_handler) 1259 1260 .section .kprobes.text, "ax" 1261 1262#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 1263/* 1264 * The synchronous or the asynchronous stack overflowed. We are dead. 1265 * No need to properly save the registers, we are going to panic anyway. 1266 * Setup a pt_regs so that show_trace can provide a good call trace. 1267 */ 1268ENTRY(stack_overflow) 1269 lg %r15,__LC_NODAT_STACK # change to panic stack 1270 la %r11,STACK_FRAME_OVERHEAD(%r15) 1271 stmg %r0,%r7,__PT_R0(%r11) 1272 stmg %r8,%r9,__PT_PSW(%r11) 1273 mvc __PT_R8(64,%r11),0(%r14) 1274 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 1275 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1276 lgr %r2,%r11 # pass pointer to pt_regs 1277 jg kernel_stack_overflow 1278ENDPROC(stack_overflow) 1279#endif 1280 1281#if IS_ENABLED(CONFIG_KVM) 1282.Lcleanup_sie: 1283 cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? 1284 je 1f 1285 larl %r13,.Lsie_entry 1286 slgr %r9,%r13 1287 larl %r13,.Lsie_skip 1288 clgr %r9,%r13 1289 jh 1f 1290 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 12911: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 1292 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 1293 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1294 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1295 larl %r9,sie_exit # skip forward to sie_exit 1296 BR_EX %r14,%r11 1297 1298#endif 1299 .section .rodata, "a" 1300#define SYSCALL(esame,emu) .quad __s390x_ ## esame 1301 .globl sys_call_table 1302sys_call_table: 1303#include "asm/syscall_table.h" 1304#undef SYSCALL 1305 1306#ifdef CONFIG_COMPAT 1307 1308#define SYSCALL(esame,emu) .quad __s390_ ## emu 1309 .globl sys_call_table_emu 1310sys_call_table_emu: 1311#include "asm/syscall_table.h" 1312#undef SYSCALL 1313#endif 1314