1/* 2 * S390 low-level entry points. 3 * 4 * Copyright IBM Corp. 1999, 2012 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Hartmut Penner (hp@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 */ 10 11#include <linux/init.h> 12#include <linux/linkage.h> 13#include <asm/processor.h> 14#include <asm/cache.h> 15#include <asm/errno.h> 16#include <asm/ptrace.h> 17#include <asm/thread_info.h> 18#include <asm/asm-offsets.h> 19#include <asm/unistd.h> 20#include <asm/page.h> 21#include <asm/sigp.h> 22#include <asm/irq.h> 23#include <asm/vx-insn.h> 24#include <asm/setup.h> 25#include <asm/nmi.h> 26#include <asm/export.h> 27 28__PT_R0 = __PT_GPRS 29__PT_R1 = __PT_GPRS + 8 30__PT_R2 = __PT_GPRS + 16 31__PT_R3 = __PT_GPRS + 24 32__PT_R4 = __PT_GPRS + 32 33__PT_R5 = __PT_GPRS + 40 34__PT_R6 = __PT_GPRS + 48 35__PT_R7 = __PT_GPRS + 56 36__PT_R8 = __PT_GPRS + 64 37__PT_R9 = __PT_GPRS + 72 38__PT_R10 = __PT_GPRS + 80 39__PT_R11 = __PT_GPRS + 88 40__PT_R12 = __PT_GPRS + 96 41__PT_R13 = __PT_GPRS + 104 42__PT_R14 = __PT_GPRS + 112 43__PT_R15 = __PT_GPRS + 120 44 45STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER 46STACK_SIZE = 1 << STACK_SHIFT 47STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 48 49_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 50 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING) 51_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 52 _TIF_SYSCALL_TRACEPOINT) 53_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ 54 _CIF_ASCE_SECONDARY | _CIF_FPU) 55_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) 56 57#define BASED(name) name-cleanup_critical(%r13) 58 59 .macro TRACE_IRQS_ON 60#ifdef CONFIG_TRACE_IRQFLAGS 61 basr %r2,%r0 62 brasl %r14,trace_hardirqs_on_caller 63#endif 64 .endm 65 66 .macro TRACE_IRQS_OFF 67#ifdef CONFIG_TRACE_IRQFLAGS 68 basr %r2,%r0 69 brasl %r14,trace_hardirqs_off_caller 70#endif 71 .endm 72 73 .macro LOCKDEP_SYS_EXIT 74#ifdef CONFIG_LOCKDEP 75 tm __PT_PSW+1(%r11),0x01 # returning to user ? 76 jz .+10 77 brasl %r14,lockdep_sys_exit 78#endif 79 .endm 80 81 .macro CHECK_STACK stacksize,savearea 82#ifdef CONFIG_CHECK_STACK 83 tml %r15,\stacksize - CONFIG_STACK_GUARD 84 lghi %r14,\savearea 85 jz stack_overflow 86#endif 87 .endm 88 89 .macro SWITCH_ASYNC savearea,timer 90 tmhh %r8,0x0001 # interrupting from user ? 91 jnz 1f 92 lgr %r14,%r9 93 slg %r14,BASED(.Lcritical_start) 94 clg %r14,BASED(.Lcritical_length) 95 jhe 0f 96 lghi %r11,\savearea # inside critical section, do cleanup 97 brasl %r14,cleanup_critical 98 tmhh %r8,0x0001 # retest problem state after cleanup 99 jnz 1f 1000: lg %r14,__LC_ASYNC_STACK # are we already on the async stack? 101 slgr %r14,%r15 102 srag %r14,%r14,STACK_SHIFT 103 jnz 2f 104 CHECK_STACK 1<<STACK_SHIFT,\savearea 105 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 106 j 3f 1071: UPDATE_VTIME %r14,%r15,\timer 1082: lg %r15,__LC_ASYNC_STACK # load async stack 1093: la %r11,STACK_FRAME_OVERHEAD(%r15) 110 .endm 111 112 .macro UPDATE_VTIME w1,w2,enter_timer 113 lg \w1,__LC_EXIT_TIMER 114 lg \w2,__LC_LAST_UPDATE_TIMER 115 slg \w1,\enter_timer 116 slg \w2,__LC_EXIT_TIMER 117 alg \w1,__LC_USER_TIMER 118 alg \w2,__LC_SYSTEM_TIMER 119 stg \w1,__LC_USER_TIMER 120 stg \w2,__LC_SYSTEM_TIMER 121 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 122 .endm 123 124 .macro REENABLE_IRQS 125 stg %r8,__LC_RETURN_PSW 126 ni __LC_RETURN_PSW,0xbf 127 ssm __LC_RETURN_PSW 128 .endm 129 130 .macro STCK savearea 131#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 132 .insn s,0xb27c0000,\savearea # store clock fast 133#else 134 .insn s,0xb2050000,\savearea # store clock 135#endif 136 .endm 137 138 /* 139 * The TSTMSK macro generates a test-under-mask instruction by 140 * calculating the memory offset for the specified mask value. 141 * Mask value can be any constant. The macro shifts the mask 142 * value to calculate the memory offset for the test-under-mask 143 * instruction. 144 */ 145 .macro TSTMSK addr, mask, size=8, bytepos=0 146 .if (\bytepos < \size) && (\mask >> 8) 147 .if (\mask & 0xff) 148 .error "Mask exceeds byte boundary" 149 .endif 150 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 151 .exitm 152 .endif 153 .ifeq \mask 154 .error "Mask must not be zero" 155 .endif 156 off = \size - \bytepos - 1 157 tm off+\addr, \mask 158 .endm 159 160 .section .kprobes.text, "ax" 161.Ldummy: 162 /* 163 * This nop exists only in order to avoid that __switch_to starts at 164 * the beginning of the kprobes text section. In that case we would 165 * have several symbols at the same address. E.g. objdump would take 166 * an arbitrary symbol name when disassembling this code. 167 * With the added nop in between the __switch_to symbol is unique 168 * again. 169 */ 170 nop 0 171 172/* 173 * Scheduler resume function, called by switch_to 174 * gpr2 = (task_struct *) prev 175 * gpr3 = (task_struct *) next 176 * Returns: 177 * gpr2 = prev 178 */ 179ENTRY(__switch_to) 180 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 181 lgr %r1,%r2 182 aghi %r1,__TASK_thread # thread_struct of prev task 183 lg %r5,__TASK_stack(%r3) # start of kernel stack of next 184 stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev 185 lgr %r1,%r3 186 aghi %r1,__TASK_thread # thread_struct of next task 187 lgr %r15,%r5 188 aghi %r15,STACK_INIT # end of kernel stack of next 189 stg %r3,__LC_CURRENT # store task struct of next 190 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 191 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next 192 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next 193 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 194 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP 195 bzr %r14 196 .insn s,0xb2800000,__LC_LPP # set program parameter 197 br %r14 198 199.L__critical_start: 200 201#if IS_ENABLED(CONFIG_KVM) 202/* 203 * sie64a calling convention: 204 * %r2 pointer to sie control block 205 * %r3 guest register save area 206 */ 207ENTRY(sie64a) 208 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 209 stg %r2,__SF_EMPTY(%r15) # save control block pointer 210 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area 211 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0 212 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? 213 jno .Lsie_load_guest_gprs 214 brasl %r14,load_fpu_regs # load guest fp/vx regs 215.Lsie_load_guest_gprs: 216 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 217 lg %r14,__LC_GMAP # get gmap pointer 218 ltgr %r14,%r14 219 jz .Lsie_gmap 220 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 221.Lsie_gmap: 222 lg %r14,__SF_EMPTY(%r15) # get control block pointer 223 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 224 tm __SIE_PROG20+3(%r14),3 # last exit... 225 jnz .Lsie_skip 226 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 227 jo .Lsie_skip # exit if fp/vx regs changed 228.Lsie_entry: 229 sie 0(%r14) 230.Lsie_skip: 231 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 232 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 233.Lsie_done: 234# some program checks are suppressing. C code (e.g. do_protection_exception) 235# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 236# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 237# Other instructions between sie64a and .Lsie_done should not cause program 238# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 239# See also .Lcleanup_sie 240.Lrewind_pad6: 241 nopr 7 242.Lrewind_pad4: 243 nopr 7 244.Lrewind_pad2: 245 nopr 7 246 .globl sie_exit 247sie_exit: 248 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 249 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 250 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 251 lg %r2,__SF_EMPTY+16(%r15) # return exit reason code 252 br %r14 253.Lsie_fault: 254 lghi %r14,-EFAULT 255 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code 256 j sie_exit 257 258 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 259 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 260 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 261 EX_TABLE(sie_exit,.Lsie_fault) 262EXPORT_SYMBOL(sie64a) 263EXPORT_SYMBOL(sie_exit) 264#endif 265 266/* 267 * SVC interrupt handler routine. System calls are synchronous events and 268 * are executed with interrupts enabled. 269 */ 270 271ENTRY(system_call) 272 stpt __LC_SYNC_ENTER_TIMER 273.Lsysc_stmg: 274 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 275 lg %r12,__LC_CURRENT 276 lghi %r13,__TASK_thread 277 lghi %r14,_PIF_SYSCALL 278.Lsysc_per: 279 lg %r15,__LC_KERNEL_STACK 280 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 281.Lsysc_vtime: 282 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 283 stmg %r0,%r7,__PT_R0(%r11) 284 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 285 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 286 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 287 stg %r14,__PT_FLAGS(%r11) 288.Lsysc_do_svc: 289 # load address of system call table 290 lg %r10,__THREAD_sysc_table(%r13,%r12) 291 llgh %r8,__PT_INT_CODE+2(%r11) 292 slag %r8,%r8,2 # shift and test for svc 0 293 jnz .Lsysc_nr_ok 294 # svc 0: system call number in %r1 295 llgfr %r1,%r1 # clear high word in r1 296 cghi %r1,NR_syscalls 297 jnl .Lsysc_nr_ok 298 sth %r1,__PT_INT_CODE+2(%r11) 299 slag %r8,%r1,2 300.Lsysc_nr_ok: 301 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 302 stg %r2,__PT_ORIG_GPR2(%r11) 303 stg %r7,STACK_FRAME_OVERHEAD(%r15) 304 lgf %r9,0(%r8,%r10) # get system call add. 305 TSTMSK __TI_flags(%r12),_TIF_TRACE 306 jnz .Lsysc_tracesys 307 basr %r14,%r9 # call sys_xxxx 308 stg %r2,__PT_R2(%r11) # store return value 309 310.Lsysc_return: 311 LOCKDEP_SYS_EXIT 312.Lsysc_tif: 313 TSTMSK __PT_FLAGS(%r11),_PIF_WORK 314 jnz .Lsysc_work 315 TSTMSK __TI_flags(%r12),_TIF_WORK 316 jnz .Lsysc_work # check for work 317 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 318 jnz .Lsysc_work 319.Lsysc_restore: 320 lg %r14,__LC_VDSO_PER_CPU 321 lmg %r0,%r10,__PT_R0(%r11) 322 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 323.Lsysc_exit_timer: 324 stpt __LC_EXIT_TIMER 325 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 326 lmg %r11,%r15,__PT_R11(%r11) 327 lpswe __LC_RETURN_PSW 328.Lsysc_done: 329 330# 331# One of the work bits is on. Find out which one. 332# 333.Lsysc_work: 334 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 335 jo .Lsysc_mcck_pending 336 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 337 jo .Lsysc_reschedule 338 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 339 jo .Lsysc_syscall_restart 340#ifdef CONFIG_UPROBES 341 TSTMSK __TI_flags(%r12),_TIF_UPROBE 342 jo .Lsysc_uprobe_notify 343#endif 344 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 345 jo .Lsysc_guarded_storage 346 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP 347 jo .Lsysc_singlestep 348#ifdef CONFIG_LIVEPATCH 349 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 350 jo .Lsysc_patch_pending # handle live patching just before 351 # signals and possible syscall restart 352#endif 353 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 354 jo .Lsysc_syscall_restart 355 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 356 jo .Lsysc_sigpending 357 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 358 jo .Lsysc_notify_resume 359 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 360 jo .Lsysc_vxrs 361 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 362 jnz .Lsysc_asce 363 j .Lsysc_return # beware of critical section cleanup 364 365# 366# _TIF_NEED_RESCHED is set, call schedule 367# 368.Lsysc_reschedule: 369 larl %r14,.Lsysc_return 370 jg schedule 371 372# 373# _CIF_MCCK_PENDING is set, call handler 374# 375.Lsysc_mcck_pending: 376 larl %r14,.Lsysc_return 377 jg s390_handle_mcck # TIF bit will be cleared by handler 378 379# 380# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce 381# 382.Lsysc_asce: 383 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 384 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 385 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY 386 jz .Lsysc_return 387 larl %r14,.Lsysc_return 388 jg set_fs_fixup 389 390# 391# CIF_FPU is set, restore floating-point controls and floating-point registers. 392# 393.Lsysc_vxrs: 394 larl %r14,.Lsysc_return 395 jg load_fpu_regs 396 397# 398# _TIF_SIGPENDING is set, call do_signal 399# 400.Lsysc_sigpending: 401 lgr %r2,%r11 # pass pointer to pt_regs 402 brasl %r14,do_signal 403 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 404 jno .Lsysc_return 405.Lsysc_do_syscall: 406 lghi %r13,__TASK_thread 407 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 408 lghi %r1,0 # svc 0 returns -ENOSYS 409 j .Lsysc_do_svc 410 411# 412# _TIF_NOTIFY_RESUME is set, call do_notify_resume 413# 414.Lsysc_notify_resume: 415 lgr %r2,%r11 # pass pointer to pt_regs 416 larl %r14,.Lsysc_return 417 jg do_notify_resume 418 419# 420# _TIF_UPROBE is set, call uprobe_notify_resume 421# 422#ifdef CONFIG_UPROBES 423.Lsysc_uprobe_notify: 424 lgr %r2,%r11 # pass pointer to pt_regs 425 larl %r14,.Lsysc_return 426 jg uprobe_notify_resume 427#endif 428 429# 430# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 431# 432.Lsysc_guarded_storage: 433 lgr %r2,%r11 # pass pointer to pt_regs 434 larl %r14,.Lsysc_return 435 jg gs_load_bc_cb 436# 437# _TIF_PATCH_PENDING is set, call klp_update_patch_state 438# 439#ifdef CONFIG_LIVEPATCH 440.Lsysc_patch_pending: 441 lg %r2,__LC_CURRENT # pass pointer to task struct 442 larl %r14,.Lsysc_return 443 jg klp_update_patch_state 444#endif 445 446# 447# _PIF_PER_TRAP is set, call do_per_trap 448# 449.Lsysc_singlestep: 450 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP 451 lgr %r2,%r11 # pass pointer to pt_regs 452 larl %r14,.Lsysc_return 453 jg do_per_trap 454 455# 456# _PIF_SYSCALL_RESTART is set, repeat the current system call 457# 458.Lsysc_syscall_restart: 459 ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART 460 lmg %r1,%r7,__PT_R1(%r11) # load svc arguments 461 lg %r2,__PT_ORIG_GPR2(%r11) 462 j .Lsysc_do_svc 463 464# 465# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 466# and after the system call 467# 468.Lsysc_tracesys: 469 lgr %r2,%r11 # pass pointer to pt_regs 470 la %r3,0 471 llgh %r0,__PT_INT_CODE+2(%r11) 472 stg %r0,__PT_R2(%r11) 473 brasl %r14,do_syscall_trace_enter 474 lghi %r0,NR_syscalls 475 clgr %r0,%r2 476 jnh .Lsysc_tracenogo 477 sllg %r8,%r2,2 478 lgf %r9,0(%r8,%r10) 479.Lsysc_tracego: 480 lmg %r3,%r7,__PT_R3(%r11) 481 stg %r7,STACK_FRAME_OVERHEAD(%r15) 482 lg %r2,__PT_ORIG_GPR2(%r11) 483 basr %r14,%r9 # call sys_xxx 484 stg %r2,__PT_R2(%r11) # store return value 485.Lsysc_tracenogo: 486 TSTMSK __TI_flags(%r12),_TIF_TRACE 487 jz .Lsysc_return 488 lgr %r2,%r11 # pass pointer to pt_regs 489 larl %r14,.Lsysc_return 490 jg do_syscall_trace_exit 491 492# 493# a new process exits the kernel with ret_from_fork 494# 495ENTRY(ret_from_fork) 496 la %r11,STACK_FRAME_OVERHEAD(%r15) 497 lg %r12,__LC_CURRENT 498 brasl %r14,schedule_tail 499 TRACE_IRQS_ON 500 ssm __LC_SVC_NEW_PSW # reenable interrupts 501 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 502 jne .Lsysc_tracenogo 503 # it's a kernel thread 504 lmg %r9,%r10,__PT_R9(%r11) # load gprs 505ENTRY(kernel_thread_starter) 506 la %r2,0(%r10) 507 basr %r14,%r9 508 j .Lsysc_tracenogo 509 510/* 511 * Program check handler routine 512 */ 513 514ENTRY(pgm_check_handler) 515 stpt __LC_SYNC_ENTER_TIMER 516 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 517 lg %r10,__LC_LAST_BREAK 518 lg %r12,__LC_CURRENT 519 larl %r13,cleanup_critical 520 lmg %r8,%r9,__LC_PGM_OLD_PSW 521 tmhh %r8,0x0001 # test problem state bit 522 jnz 2f # -> fault in user space 523#if IS_ENABLED(CONFIG_KVM) 524 # cleanup critical section for program checks in sie64a 525 lgr %r14,%r9 526 slg %r14,BASED(.Lsie_critical_start) 527 clg %r14,BASED(.Lsie_critical_length) 528 jhe 0f 529 lg %r14,__SF_EMPTY(%r15) # get control block pointer 530 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 531 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 532 larl %r9,sie_exit # skip forward to sie_exit 533#endif 5340: tmhh %r8,0x4000 # PER bit set in old PSW ? 535 jnz 1f # -> enabled, can't be a double fault 536 tm __LC_PGM_ILC+3,0x80 # check for per exception 537 jnz .Lpgm_svcper # -> single stepped svc 5381: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 539 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 540 j 4f 5412: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 542 lg %r15,__LC_KERNEL_STACK 543 lgr %r14,%r12 544 aghi %r14,__TASK_thread # pointer to thread_struct 545 lghi %r13,__LC_PGM_TDB 546 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 547 jz 3f 548 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 5493: stg %r10,__THREAD_last_break(%r14) 5504: la %r11,STACK_FRAME_OVERHEAD(%r15) 551 stmg %r0,%r7,__PT_R0(%r11) 552 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 553 stmg %r8,%r9,__PT_PSW(%r11) 554 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 555 mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE 556 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 557 stg %r10,__PT_ARGS(%r11) 558 tm __LC_PGM_ILC+3,0x80 # check for per exception 559 jz 5f 560 tmhh %r8,0x0001 # kernel per event ? 561 jz .Lpgm_kprobe 562 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 563 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 564 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 565 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 5665: REENABLE_IRQS 567 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 568 larl %r1,pgm_check_table 569 llgh %r10,__PT_INT_CODE+2(%r11) 570 nill %r10,0x007f 571 sll %r10,2 572 je .Lpgm_return 573 lgf %r1,0(%r10,%r1) # load address of handler routine 574 lgr %r2,%r11 # pass pointer to pt_regs 575 basr %r14,%r1 # branch to interrupt-handler 576.Lpgm_return: 577 LOCKDEP_SYS_EXIT 578 tm __PT_PSW+1(%r11),0x01 # returning to user ? 579 jno .Lsysc_restore 580 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 581 jo .Lsysc_do_syscall 582 j .Lsysc_tif 583 584# 585# PER event in supervisor state, must be kprobes 586# 587.Lpgm_kprobe: 588 REENABLE_IRQS 589 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 590 lgr %r2,%r11 # pass pointer to pt_regs 591 brasl %r14,do_per_trap 592 j .Lpgm_return 593 594# 595# single stepped system call 596# 597.Lpgm_svcper: 598 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 599 lghi %r13,__TASK_thread 600 larl %r14,.Lsysc_per 601 stg %r14,__LC_RETURN_PSW+8 602 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 603 lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs 604 605/* 606 * IO interrupt handler routine 607 */ 608ENTRY(io_int_handler) 609 STCK __LC_INT_CLOCK 610 stpt __LC_ASYNC_ENTER_TIMER 611 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 612 lg %r12,__LC_CURRENT 613 larl %r13,cleanup_critical 614 lmg %r8,%r9,__LC_IO_OLD_PSW 615 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 616 stmg %r0,%r7,__PT_R0(%r11) 617 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 618 stmg %r8,%r9,__PT_PSW(%r11) 619 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 620 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 621 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 622 jo .Lio_restore 623 TRACE_IRQS_OFF 624 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 625.Lio_loop: 626 lgr %r2,%r11 # pass pointer to pt_regs 627 lghi %r3,IO_INTERRUPT 628 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 629 jz .Lio_call 630 lghi %r3,THIN_INTERRUPT 631.Lio_call: 632 brasl %r14,do_IRQ 633 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR 634 jz .Lio_return 635 tpi 0 636 jz .Lio_return 637 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 638 j .Lio_loop 639.Lio_return: 640 LOCKDEP_SYS_EXIT 641 TRACE_IRQS_ON 642.Lio_tif: 643 TSTMSK __TI_flags(%r12),_TIF_WORK 644 jnz .Lio_work # there is work to do (signals etc.) 645 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 646 jnz .Lio_work 647.Lio_restore: 648 lg %r14,__LC_VDSO_PER_CPU 649 lmg %r0,%r10,__PT_R0(%r11) 650 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 651.Lio_exit_timer: 652 stpt __LC_EXIT_TIMER 653 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 654 lmg %r11,%r15,__PT_R11(%r11) 655 lpswe __LC_RETURN_PSW 656.Lio_done: 657 658# 659# There is work todo, find out in which context we have been interrupted: 660# 1) if we return to user space we can do all _TIF_WORK work 661# 2) if we return to kernel code and kvm is enabled check if we need to 662# modify the psw to leave SIE 663# 3) if we return to kernel code and preemptive scheduling is enabled check 664# the preemption counter and if it is zero call preempt_schedule_irq 665# Before any work can be done, a switch to the kernel stack is required. 666# 667.Lio_work: 668 tm __PT_PSW+1(%r11),0x01 # returning to user ? 669 jo .Lio_work_user # yes -> do resched & signal 670#ifdef CONFIG_PREEMPT 671 # check for preemptive scheduling 672 icm %r0,15,__LC_PREEMPT_COUNT 673 jnz .Lio_restore # preemption is disabled 674 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 675 jno .Lio_restore 676 # switch to kernel stack 677 lg %r1,__PT_R15(%r11) 678 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 679 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 680 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 681 la %r11,STACK_FRAME_OVERHEAD(%r1) 682 lgr %r15,%r1 683 # TRACE_IRQS_ON already done at .Lio_return, call 684 # TRACE_IRQS_OFF to keep things symmetrical 685 TRACE_IRQS_OFF 686 brasl %r14,preempt_schedule_irq 687 j .Lio_return 688#else 689 j .Lio_restore 690#endif 691 692# 693# Need to do work before returning to userspace, switch to kernel stack 694# 695.Lio_work_user: 696 lg %r1,__LC_KERNEL_STACK 697 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 698 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 699 la %r11,STACK_FRAME_OVERHEAD(%r1) 700 lgr %r15,%r1 701 702# 703# One of the work bits is on. Find out which one. 704# 705.Lio_work_tif: 706 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 707 jo .Lio_mcck_pending 708 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 709 jo .Lio_reschedule 710#ifdef CONFIG_LIVEPATCH 711 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 712 jo .Lio_patch_pending 713#endif 714 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 715 jo .Lio_sigpending 716 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 717 jo .Lio_notify_resume 718 TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 719 jo .Lio_guarded_storage 720 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 721 jo .Lio_vxrs 722 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 723 jnz .Lio_asce 724 j .Lio_return # beware of critical section cleanup 725 726# 727# _CIF_MCCK_PENDING is set, call handler 728# 729.Lio_mcck_pending: 730 # TRACE_IRQS_ON already done at .Lio_return 731 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler 732 TRACE_IRQS_OFF 733 j .Lio_return 734 735# 736# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce 737# 738.Lio_asce: 739 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY 740 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 741 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY 742 jz .Lio_return 743 larl %r14,.Lio_return 744 jg set_fs_fixup 745 746# 747# CIF_FPU is set, restore floating-point controls and floating-point registers. 748# 749.Lio_vxrs: 750 larl %r14,.Lio_return 751 jg load_fpu_regs 752 753# 754# _TIF_GUARDED_STORAGE is set, call guarded_storage_load 755# 756.Lio_guarded_storage: 757 # TRACE_IRQS_ON already done at .Lio_return 758 ssm __LC_SVC_NEW_PSW # reenable interrupts 759 lgr %r2,%r11 # pass pointer to pt_regs 760 brasl %r14,gs_load_bc_cb 761 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 762 TRACE_IRQS_OFF 763 j .Lio_return 764 765# 766# _TIF_NEED_RESCHED is set, call schedule 767# 768.Lio_reschedule: 769 # TRACE_IRQS_ON already done at .Lio_return 770 ssm __LC_SVC_NEW_PSW # reenable interrupts 771 brasl %r14,schedule # call scheduler 772 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 773 TRACE_IRQS_OFF 774 j .Lio_return 775 776# 777# _TIF_PATCH_PENDING is set, call klp_update_patch_state 778# 779#ifdef CONFIG_LIVEPATCH 780.Lio_patch_pending: 781 lg %r2,__LC_CURRENT # pass pointer to task struct 782 larl %r14,.Lio_return 783 jg klp_update_patch_state 784#endif 785 786# 787# _TIF_SIGPENDING or is set, call do_signal 788# 789.Lio_sigpending: 790 # TRACE_IRQS_ON already done at .Lio_return 791 ssm __LC_SVC_NEW_PSW # reenable interrupts 792 lgr %r2,%r11 # pass pointer to pt_regs 793 brasl %r14,do_signal 794 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 795 TRACE_IRQS_OFF 796 j .Lio_return 797 798# 799# _TIF_NOTIFY_RESUME or is set, call do_notify_resume 800# 801.Lio_notify_resume: 802 # TRACE_IRQS_ON already done at .Lio_return 803 ssm __LC_SVC_NEW_PSW # reenable interrupts 804 lgr %r2,%r11 # pass pointer to pt_regs 805 brasl %r14,do_notify_resume 806 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 807 TRACE_IRQS_OFF 808 j .Lio_return 809 810/* 811 * External interrupt handler routine 812 */ 813ENTRY(ext_int_handler) 814 STCK __LC_INT_CLOCK 815 stpt __LC_ASYNC_ENTER_TIMER 816 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 817 lg %r12,__LC_CURRENT 818 larl %r13,cleanup_critical 819 lmg %r8,%r9,__LC_EXT_OLD_PSW 820 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 821 stmg %r0,%r7,__PT_R0(%r11) 822 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 823 stmg %r8,%r9,__PT_PSW(%r11) 824 lghi %r1,__LC_EXT_PARAMS2 825 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR 826 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 827 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) 828 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 829 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 830 jo .Lio_restore 831 TRACE_IRQS_OFF 832 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 833 lgr %r2,%r11 # pass pointer to pt_regs 834 lghi %r3,EXT_INTERRUPT 835 brasl %r14,do_IRQ 836 j .Lio_return 837 838/* 839 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. 840 */ 841ENTRY(psw_idle) 842 stg %r3,__SF_EMPTY(%r15) 843 larl %r1,.Lpsw_idle_lpsw+4 844 stg %r1,__SF_EMPTY+8(%r15) 845#ifdef CONFIG_SMP 846 larl %r1,smp_cpu_mtid 847 llgf %r1,0(%r1) 848 ltgr %r1,%r1 849 jz .Lpsw_idle_stcctm 850 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) 851.Lpsw_idle_stcctm: 852#endif 853 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 854 STCK __CLOCK_IDLE_ENTER(%r2) 855 stpt __TIMER_IDLE_ENTER(%r2) 856.Lpsw_idle_lpsw: 857 lpswe __SF_EMPTY(%r15) 858 br %r14 859.Lpsw_idle_end: 860 861/* 862 * Store floating-point controls and floating-point or vector register 863 * depending whether the vector facility is available. A critical section 864 * cleanup assures that the registers are stored even if interrupted for 865 * some other work. The CIF_FPU flag is set to trigger a lazy restore 866 * of the register contents at return from io or a system call. 867 */ 868ENTRY(save_fpu_regs) 869 lg %r2,__LC_CURRENT 870 aghi %r2,__TASK_thread 871 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 872 bor %r14 873 stfpc __THREAD_FPU_fpc(%r2) 874 lg %r3,__THREAD_FPU_regs(%r2) 875 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 876 jz .Lsave_fpu_regs_fp # no -> store FP regs 877 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 878 VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) 879 j .Lsave_fpu_regs_done # -> set CIF_FPU flag 880.Lsave_fpu_regs_fp: 881 std 0,0(%r3) 882 std 1,8(%r3) 883 std 2,16(%r3) 884 std 3,24(%r3) 885 std 4,32(%r3) 886 std 5,40(%r3) 887 std 6,48(%r3) 888 std 7,56(%r3) 889 std 8,64(%r3) 890 std 9,72(%r3) 891 std 10,80(%r3) 892 std 11,88(%r3) 893 std 12,96(%r3) 894 std 13,104(%r3) 895 std 14,112(%r3) 896 std 15,120(%r3) 897.Lsave_fpu_regs_done: 898 oi __LC_CPU_FLAGS+7,_CIF_FPU 899 br %r14 900.Lsave_fpu_regs_end: 901EXPORT_SYMBOL(save_fpu_regs) 902 903/* 904 * Load floating-point controls and floating-point or vector registers. 905 * A critical section cleanup assures that the register contents are 906 * loaded even if interrupted for some other work. 907 * 908 * There are special calling conventions to fit into sysc and io return work: 909 * %r15: <kernel stack> 910 * The function requires: 911 * %r4 912 */ 913load_fpu_regs: 914 lg %r4,__LC_CURRENT 915 aghi %r4,__TASK_thread 916 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 917 bnor %r14 918 lfpc __THREAD_FPU_fpc(%r4) 919 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 920 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 921 jz .Lload_fpu_regs_fp # -> no VX, load FP regs 922 VLM %v0,%v15,0,%r4 923 VLM %v16,%v31,256,%r4 924 j .Lload_fpu_regs_done 925.Lload_fpu_regs_fp: 926 ld 0,0(%r4) 927 ld 1,8(%r4) 928 ld 2,16(%r4) 929 ld 3,24(%r4) 930 ld 4,32(%r4) 931 ld 5,40(%r4) 932 ld 6,48(%r4) 933 ld 7,56(%r4) 934 ld 8,64(%r4) 935 ld 9,72(%r4) 936 ld 10,80(%r4) 937 ld 11,88(%r4) 938 ld 12,96(%r4) 939 ld 13,104(%r4) 940 ld 14,112(%r4) 941 ld 15,120(%r4) 942.Lload_fpu_regs_done: 943 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 944 br %r14 945.Lload_fpu_regs_end: 946 947.L__critical_end: 948 949/* 950 * Machine check handler routines 951 */ 952ENTRY(mcck_int_handler) 953 STCK __LC_MCCK_CLOCK 954 la %r1,4095 # revalidate r1 955 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer 956 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs 957 lg %r12,__LC_CURRENT 958 larl %r13,cleanup_critical 959 lmg %r8,%r9,__LC_MCK_OLD_PSW 960 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 961 jo .Lmcck_panic # yes -> rest of mcck code invalid 962 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 963 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 964 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 965 jo 3f 966 la %r14,__LC_SYNC_ENTER_TIMER 967 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 968 jl 0f 969 la %r14,__LC_ASYNC_ENTER_TIMER 9700: clc 0(8,%r14),__LC_EXIT_TIMER 971 jl 1f 972 la %r14,__LC_EXIT_TIMER 9731: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 974 jl 2f 975 la %r14,__LC_LAST_UPDATE_TIMER 9762: spt 0(%r14) 977 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 9783: TSTMSK __LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID) 979 jno .Lmcck_panic # no -> skip cleanup critical 980 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER 981.Lmcck_skip: 982 lghi %r14,__LC_GPREGS_SAVE_AREA+64 983 stmg %r0,%r7,__PT_R0(%r11) 984 mvc __PT_R8(64,%r11),0(%r14) 985 stmg %r8,%r9,__PT_PSW(%r11) 986 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 987 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 988 lgr %r2,%r11 # pass pointer to pt_regs 989 brasl %r14,s390_do_machine_check 990 tm __PT_PSW+1(%r11),0x01 # returning to user ? 991 jno .Lmcck_return 992 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 993 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 994 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 995 la %r11,STACK_FRAME_OVERHEAD(%r1) 996 lgr %r15,%r1 997 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 998 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 999 jno .Lmcck_return 1000 TRACE_IRQS_OFF 1001 brasl %r14,s390_handle_mcck 1002 TRACE_IRQS_ON 1003.Lmcck_return: 1004 lg %r14,__LC_VDSO_PER_CPU 1005 lmg %r0,%r10,__PT_R0(%r11) 1006 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 1007 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 1008 jno 0f 1009 stpt __LC_EXIT_TIMER 1010 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 10110: lmg %r11,%r15,__PT_R11(%r11) 1012 lpswe __LC_RETURN_MCCK_PSW 1013 1014.Lmcck_panic: 1015 lg %r15,__LC_PANIC_STACK 1016 la %r11,STACK_FRAME_OVERHEAD(%r15) 1017 j .Lmcck_skip 1018 1019# 1020# PSW restart interrupt handler 1021# 1022ENTRY(restart_int_handler) 1023 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP 1024 jz 0f 1025 .insn s,0xb2800000,__LC_LPP 10260: stg %r15,__LC_SAVE_AREA_RESTART 1027 lg %r15,__LC_RESTART_STACK 1028 aghi %r15,-__PT_SIZE # create pt_regs on stack 1029 xc 0(__PT_SIZE,%r15),0(%r15) 1030 stmg %r0,%r14,__PT_R0(%r15) 1031 mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 1032 mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw 1033 aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack 1034 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 1035 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 1036 lg %r2,__LC_RESTART_DATA 1037 lg %r3,__LC_RESTART_SOURCE 1038 ltgr %r3,%r3 # test source cpu address 1039 jm 1f # negative -> skip source stop 10400: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 1041 brc 10,0b # wait for status stored 10421: basr %r14,%r1 # call function 1043 stap __SF_EMPTY(%r15) # store cpu address 1044 llgh %r3,__SF_EMPTY(%r15) 10452: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 1046 brc 2,2b 10473: j 3b 1048 1049 .section .kprobes.text, "ax" 1050 1051#ifdef CONFIG_CHECK_STACK 1052/* 1053 * The synchronous or the asynchronous stack overflowed. We are dead. 1054 * No need to properly save the registers, we are going to panic anyway. 1055 * Setup a pt_regs so that show_trace can provide a good call trace. 1056 */ 1057stack_overflow: 1058 lg %r15,__LC_PANIC_STACK # change to panic stack 1059 la %r11,STACK_FRAME_OVERHEAD(%r15) 1060 stmg %r0,%r7,__PT_R0(%r11) 1061 stmg %r8,%r9,__PT_PSW(%r11) 1062 mvc __PT_R8(64,%r11),0(%r14) 1063 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 1064 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1065 lgr %r2,%r11 # pass pointer to pt_regs 1066 jg kernel_stack_overflow 1067#endif 1068 1069cleanup_critical: 1070#if IS_ENABLED(CONFIG_KVM) 1071 clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap 1072 jl 0f 1073 clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done 1074 jl .Lcleanup_sie 1075#endif 1076 clg %r9,BASED(.Lcleanup_table) # system_call 1077 jl 0f 1078 clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc 1079 jl .Lcleanup_system_call 1080 clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif 1081 jl 0f 1082 clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore 1083 jl .Lcleanup_sysc_tif 1084 clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done 1085 jl .Lcleanup_sysc_restore 1086 clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif 1087 jl 0f 1088 clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore 1089 jl .Lcleanup_io_tif 1090 clg %r9,BASED(.Lcleanup_table+56) # .Lio_done 1091 jl .Lcleanup_io_restore 1092 clg %r9,BASED(.Lcleanup_table+64) # psw_idle 1093 jl 0f 1094 clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end 1095 jl .Lcleanup_idle 1096 clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs 1097 jl 0f 1098 clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end 1099 jl .Lcleanup_save_fpu_regs 1100 clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs 1101 jl 0f 1102 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end 1103 jl .Lcleanup_load_fpu_regs 11040: br %r14 1105 1106 .align 8 1107.Lcleanup_table: 1108 .quad system_call 1109 .quad .Lsysc_do_svc 1110 .quad .Lsysc_tif 1111 .quad .Lsysc_restore 1112 .quad .Lsysc_done 1113 .quad .Lio_tif 1114 .quad .Lio_restore 1115 .quad .Lio_done 1116 .quad psw_idle 1117 .quad .Lpsw_idle_end 1118 .quad save_fpu_regs 1119 .quad .Lsave_fpu_regs_end 1120 .quad load_fpu_regs 1121 .quad .Lload_fpu_regs_end 1122 1123#if IS_ENABLED(CONFIG_KVM) 1124.Lcleanup_table_sie: 1125 .quad .Lsie_gmap 1126 .quad .Lsie_done 1127 1128.Lcleanup_sie: 1129 cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? 1130 je 1f 1131 slg %r9,BASED(.Lsie_crit_mcck_start) 1132 clg %r9,BASED(.Lsie_crit_mcck_length) 1133 jh 1f 1134 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 11351: lg %r9,__SF_EMPTY(%r15) # get control block pointer 1136 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1137 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1138 larl %r9,sie_exit # skip forward to sie_exit 1139 br %r14 1140#endif 1141 1142.Lcleanup_system_call: 1143 # check if stpt has been executed 1144 clg %r9,BASED(.Lcleanup_system_call_insn) 1145 jh 0f 1146 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 1147 cghi %r11,__LC_SAVE_AREA_ASYNC 1148 je 0f 1149 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 11500: # check if stmg has been executed 1151 clg %r9,BASED(.Lcleanup_system_call_insn+8) 1152 jh 0f 1153 mvc __LC_SAVE_AREA_SYNC(64),0(%r11) 11540: # check if base register setup + TIF bit load has been done 1155 clg %r9,BASED(.Lcleanup_system_call_insn+16) 1156 jhe 0f 1157 # set up saved register r12 task struct pointer 1158 stg %r12,32(%r11) 1159 # set up saved register r13 __TASK_thread offset 1160 mvc 40(8,%r11),BASED(.Lcleanup_system_call_const) 11610: # check if the user time update has been done 1162 clg %r9,BASED(.Lcleanup_system_call_insn+24) 1163 jh 0f 1164 lg %r15,__LC_EXIT_TIMER 1165 slg %r15,__LC_SYNC_ENTER_TIMER 1166 alg %r15,__LC_USER_TIMER 1167 stg %r15,__LC_USER_TIMER 11680: # check if the system time update has been done 1169 clg %r9,BASED(.Lcleanup_system_call_insn+32) 1170 jh 0f 1171 lg %r15,__LC_LAST_UPDATE_TIMER 1172 slg %r15,__LC_EXIT_TIMER 1173 alg %r15,__LC_SYSTEM_TIMER 1174 stg %r15,__LC_SYSTEM_TIMER 11750: # update accounting time stamp 1176 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 1177 # set up saved register r11 1178 lg %r15,__LC_KERNEL_STACK 1179 la %r9,STACK_FRAME_OVERHEAD(%r15) 1180 stg %r9,24(%r11) # r11 pt_regs pointer 1181 # fill pt_regs 1182 mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC 1183 stmg %r0,%r7,__PT_R0(%r9) 1184 mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW 1185 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC 1186 xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) 1187 mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL 1188 # setup saved register r15 1189 stg %r15,56(%r11) # r15 stack pointer 1190 # set new psw address and exit 1191 larl %r9,.Lsysc_do_svc 1192 br %r14 1193.Lcleanup_system_call_insn: 1194 .quad system_call 1195 .quad .Lsysc_stmg 1196 .quad .Lsysc_per 1197 .quad .Lsysc_vtime+36 1198 .quad .Lsysc_vtime+42 1199.Lcleanup_system_call_const: 1200 .quad __TASK_thread 1201 1202.Lcleanup_sysc_tif: 1203 larl %r9,.Lsysc_tif 1204 br %r14 1205 1206.Lcleanup_sysc_restore: 1207 # check if stpt has been executed 1208 clg %r9,BASED(.Lcleanup_sysc_restore_insn) 1209 jh 0f 1210 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1211 cghi %r11,__LC_SAVE_AREA_ASYNC 1212 je 0f 1213 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 12140: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8) 1215 je 1f 1216 lg %r9,24(%r11) # get saved pointer to pt_regs 1217 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1218 mvc 0(64,%r11),__PT_R8(%r9) 1219 lmg %r0,%r7,__PT_R0(%r9) 12201: lmg %r8,%r9,__LC_RETURN_PSW 1221 br %r14 1222.Lcleanup_sysc_restore_insn: 1223 .quad .Lsysc_exit_timer 1224 .quad .Lsysc_done - 4 1225 1226.Lcleanup_io_tif: 1227 larl %r9,.Lio_tif 1228 br %r14 1229 1230.Lcleanup_io_restore: 1231 # check if stpt has been executed 1232 clg %r9,BASED(.Lcleanup_io_restore_insn) 1233 jh 0f 1234 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 12350: clg %r9,BASED(.Lcleanup_io_restore_insn+8) 1236 je 1f 1237 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 1238 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1239 mvc 0(64,%r11),__PT_R8(%r9) 1240 lmg %r0,%r7,__PT_R0(%r9) 12411: lmg %r8,%r9,__LC_RETURN_PSW 1242 br %r14 1243.Lcleanup_io_restore_insn: 1244 .quad .Lio_exit_timer 1245 .quad .Lio_done - 4 1246 1247.Lcleanup_idle: 1248 ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT 1249 # copy interrupt clock & cpu timer 1250 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 1251 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 1252 cghi %r11,__LC_SAVE_AREA_ASYNC 1253 je 0f 1254 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 1255 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 12560: # check if stck & stpt have been executed 1257 clg %r9,BASED(.Lcleanup_idle_insn) 1258 jhe 1f 1259 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 1260 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) 12611: # calculate idle cycles 1262#ifdef CONFIG_SMP 1263 clg %r9,BASED(.Lcleanup_idle_insn) 1264 jl 3f 1265 larl %r1,smp_cpu_mtid 1266 llgf %r1,0(%r1) 1267 ltgr %r1,%r1 1268 jz 3f 1269 .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) 1270 larl %r3,mt_cycles 1271 ag %r3,__LC_PERCPU_OFFSET 1272 la %r4,__SF_EMPTY+16(%r15) 12732: lg %r0,0(%r3) 1274 slg %r0,0(%r4) 1275 alg %r0,64(%r4) 1276 stg %r0,0(%r3) 1277 la %r3,8(%r3) 1278 la %r4,8(%r4) 1279 brct %r1,2b 1280#endif 12813: # account system time going idle 1282 lg %r9,__LC_STEAL_TIMER 1283 alg %r9,__CLOCK_IDLE_ENTER(%r2) 1284 slg %r9,__LC_LAST_UPDATE_CLOCK 1285 stg %r9,__LC_STEAL_TIMER 1286 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 1287 lg %r9,__LC_SYSTEM_TIMER 1288 alg %r9,__LC_LAST_UPDATE_TIMER 1289 slg %r9,__TIMER_IDLE_ENTER(%r2) 1290 stg %r9,__LC_SYSTEM_TIMER 1291 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 1292 # prepare return psw 1293 nihh %r8,0xfcfd # clear irq & wait state bits 1294 lg %r9,48(%r11) # return from psw_idle 1295 br %r14 1296.Lcleanup_idle_insn: 1297 .quad .Lpsw_idle_lpsw 1298 1299.Lcleanup_save_fpu_regs: 1300 larl %r9,save_fpu_regs 1301 br %r14 1302 1303.Lcleanup_load_fpu_regs: 1304 larl %r9,load_fpu_regs 1305 br %r14 1306 1307/* 1308 * Integer constants 1309 */ 1310 .align 8 1311.Lcritical_start: 1312 .quad .L__critical_start 1313.Lcritical_length: 1314 .quad .L__critical_end - .L__critical_start 1315#if IS_ENABLED(CONFIG_KVM) 1316.Lsie_critical_start: 1317 .quad .Lsie_gmap 1318.Lsie_critical_length: 1319 .quad .Lsie_done - .Lsie_gmap 1320.Lsie_crit_mcck_start: 1321 .quad .Lsie_entry 1322.Lsie_crit_mcck_length: 1323 .quad .Lsie_skip - .Lsie_entry 1324#endif 1325 1326 .section .rodata, "a" 1327#define SYSCALL(esame,emu) .long esame 1328 .globl sys_call_table 1329sys_call_table: 1330#include "syscalls.S" 1331#undef SYSCALL 1332 1333#ifdef CONFIG_COMPAT 1334 1335#define SYSCALL(esame,emu) .long emu 1336 .globl sys_call_table_emu 1337sys_call_table_emu: 1338#include "syscalls.S" 1339#undef SYSCALL 1340#endif 1341