1/* 2 * S390 low-level entry points. 3 * 4 * Copyright IBM Corp. 1999, 2012 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Hartmut Penner (hp@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 */ 10 11#include <linux/init.h> 12#include <linux/linkage.h> 13#include <asm/cache.h> 14#include <asm/errno.h> 15#include <asm/ptrace.h> 16#include <asm/thread_info.h> 17#include <asm/asm-offsets.h> 18#include <asm/unistd.h> 19#include <asm/page.h> 20#include <asm/sigp.h> 21 22__PT_R0 = __PT_GPRS 23__PT_R1 = __PT_GPRS + 4 24__PT_R2 = __PT_GPRS + 8 25__PT_R3 = __PT_GPRS + 12 26__PT_R4 = __PT_GPRS + 16 27__PT_R5 = __PT_GPRS + 20 28__PT_R6 = __PT_GPRS + 24 29__PT_R7 = __PT_GPRS + 28 30__PT_R8 = __PT_GPRS + 32 31__PT_R9 = __PT_GPRS + 36 32__PT_R10 = __PT_GPRS + 40 33__PT_R11 = __PT_GPRS + 44 34__PT_R12 = __PT_GPRS + 48 35__PT_R13 = __PT_GPRS + 524 36__PT_R14 = __PT_GPRS + 56 37__PT_R15 = __PT_GPRS + 60 38 39_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 40 _TIF_MCCK_PENDING | _TIF_PER_TRAP ) 41_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 42 _TIF_MCCK_PENDING) 43_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 44 _TIF_SYSCALL_TRACEPOINT) 45 46STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 47STACK_SIZE = 1 << STACK_SHIFT 48 49#define BASED(name) name-system_call(%r13) 50 51 .macro TRACE_IRQS_ON 52#ifdef CONFIG_TRACE_IRQFLAGS 53 basr %r2,%r0 54 l %r1,BASED(.Lhardirqs_on) 55 basr %r14,%r1 # call trace_hardirqs_on_caller 56#endif 57 .endm 58 59 .macro TRACE_IRQS_OFF 60#ifdef CONFIG_TRACE_IRQFLAGS 61 basr %r2,%r0 62 l %r1,BASED(.Lhardirqs_off) 63 basr %r14,%r1 # call trace_hardirqs_off_caller 64#endif 65 .endm 66 67 .macro LOCKDEP_SYS_EXIT 68#ifdef CONFIG_LOCKDEP 69 tm __PT_PSW+1(%r11),0x01 # returning to user ? 70 jz .+10 71 l %r1,BASED(.Llockdep_sys_exit) 72 basr %r14,%r1 # call lockdep_sys_exit 73#endif 74 .endm 75 76 .macro CHECK_STACK stacksize,savearea 77#ifdef CONFIG_CHECK_STACK 78 tml %r15,\stacksize - CONFIG_STACK_GUARD 79 la %r14,\savearea 80 jz stack_overflow 81#endif 82 .endm 83 84 .macro SWITCH_ASYNC savearea,stack,shift 85 tmh %r8,0x0001 # interrupting from user ? 86 jnz 1f 87 lr %r14,%r9 88 sl %r14,BASED(.Lcritical_start) 89 cl %r14,BASED(.Lcritical_length) 90 jhe 0f 91 la %r11,\savearea # inside critical section, do cleanup 92 bras %r14,cleanup_critical 93 tmh %r8,0x0001 # retest problem state after cleanup 94 jnz 1f 950: l %r14,\stack # are we already on the target stack? 96 slr %r14,%r15 97 sra %r14,\shift 98 jnz 1f 99 CHECK_STACK 1<<\shift,\savearea 100 j 2f 1011: l %r15,\stack # load target stack 1022: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 103 la %r11,STACK_FRAME_OVERHEAD(%r15) 104 .endm 105 106 .macro ADD64 high,low,timer 107 al \high,\timer 108 al \low,4+\timer 109 brc 12,.+8 110 ahi \high,1 111 .endm 112 113 .macro SUB64 high,low,timer 114 sl \high,\timer 115 sl \low,4+\timer 116 brc 3,.+8 117 ahi \high,-1 118 .endm 119 120 .macro UPDATE_VTIME high,low,enter_timer 121 lm \high,\low,__LC_EXIT_TIMER 122 SUB64 \high,\low,\enter_timer 123 ADD64 \high,\low,__LC_USER_TIMER 124 stm \high,\low,__LC_USER_TIMER 125 lm \high,\low,__LC_LAST_UPDATE_TIMER 126 SUB64 \high,\low,__LC_EXIT_TIMER 127 ADD64 \high,\low,__LC_SYSTEM_TIMER 128 stm \high,\low,__LC_SYSTEM_TIMER 129 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 130 .endm 131 132 .macro REENABLE_IRQS 133 st %r8,__LC_RETURN_PSW 134 ni __LC_RETURN_PSW,0xbf 135 ssm __LC_RETURN_PSW 136 .endm 137 138 .section .kprobes.text, "ax" 139 140/* 141 * Scheduler resume function, called by switch_to 142 * gpr2 = (task_struct *) prev 143 * gpr3 = (task_struct *) next 144 * Returns: 145 * gpr2 = prev 146 */ 147ENTRY(__switch_to) 148 stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 149 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev 150 l %r4,__THREAD_info(%r2) # get thread_info of prev 151 l %r5,__THREAD_info(%r3) # get thread_info of next 152 lr %r15,%r5 153 ahi %r15,STACK_SIZE # end of kernel stack of next 154 st %r3,__LC_CURRENT # store task struct of next 155 st %r5,__LC_THREAD_INFO # store thread info of next 156 st %r15,__LC_KERNEL_STACK # store end of kernel stack 157 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 158 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next 159 l %r15,__THREAD_ksp(%r3) # load kernel stack of next 160 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? 161 jz 0f 162 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 163 oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next 1640: lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 165 br %r14 166 167__critical_start: 168/* 169 * SVC interrupt handler routine. System calls are synchronous events and 170 * are executed with interrupts enabled. 171 */ 172 173ENTRY(system_call) 174 stpt __LC_SYNC_ENTER_TIMER 175sysc_stm: 176 stm %r8,%r15,__LC_SAVE_AREA_SYNC 177 l %r12,__LC_THREAD_INFO 178 l %r13,__LC_SVC_NEW_PSW+4 179sysc_per: 180 l %r15,__LC_KERNEL_STACK 181 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 182 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 183sysc_vtime: 184 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 185 stm %r0,%r7,__PT_R0(%r11) 186 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 187 mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW 188 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 189sysc_do_svc: 190 oi __TI_flags+3(%r12),_TIF_SYSCALL 191 lh %r8,__PT_INT_CODE+2(%r11) 192 sla %r8,2 # shift and test for svc0 193 jnz sysc_nr_ok 194 # svc 0: system call number in %r1 195 cl %r1,BASED(.Lnr_syscalls) 196 jnl sysc_nr_ok 197 sth %r1,__PT_INT_CODE+2(%r11) 198 lr %r8,%r1 199 sla %r8,2 200sysc_nr_ok: 201 l %r10,BASED(.Lsys_call_table) # 31 bit system call table 202 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 203 st %r2,__PT_ORIG_GPR2(%r11) 204 st %r7,STACK_FRAME_OVERHEAD(%r15) 205 l %r9,0(%r8,%r10) # get system call addr. 206 tm __TI_flags+2(%r12),_TIF_TRACE >> 8 207 jnz sysc_tracesys 208 basr %r14,%r9 # call sys_xxxx 209 st %r2,__PT_R2(%r11) # store return value 210 211sysc_return: 212 LOCKDEP_SYS_EXIT 213sysc_tif: 214 tm __PT_PSW+1(%r11),0x01 # returning to user ? 215 jno sysc_restore 216 tm __TI_flags+3(%r12),_TIF_WORK_SVC 217 jnz sysc_work # check for work 218 ni __TI_flags+3(%r12),255-_TIF_SYSCALL 219sysc_restore: 220 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 221 stpt __LC_EXIT_TIMER 222 lm %r0,%r15,__PT_R0(%r11) 223 lpsw __LC_RETURN_PSW 224sysc_done: 225 226# 227# One of the work bits is on. Find out which one. 228# 229sysc_work: 230 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 231 jo sysc_mcck_pending 232 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 233 jo sysc_reschedule 234 tm __TI_flags+3(%r12),_TIF_SIGPENDING 235 jo sysc_sigpending 236 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 237 jo sysc_notify_resume 238 tm __TI_flags+3(%r12),_TIF_PER_TRAP 239 jo sysc_singlestep 240 j sysc_return # beware of critical section cleanup 241 242# 243# _TIF_NEED_RESCHED is set, call schedule 244# 245sysc_reschedule: 246 l %r1,BASED(.Lschedule) 247 la %r14,BASED(sysc_return) 248 br %r1 # call schedule 249 250# 251# _TIF_MCCK_PENDING is set, call handler 252# 253sysc_mcck_pending: 254 l %r1,BASED(.Lhandle_mcck) 255 la %r14,BASED(sysc_return) 256 br %r1 # TIF bit will be cleared by handler 257 258# 259# _TIF_SIGPENDING is set, call do_signal 260# 261sysc_sigpending: 262 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP 263 lr %r2,%r11 # pass pointer to pt_regs 264 l %r1,BASED(.Ldo_signal) 265 basr %r14,%r1 # call do_signal 266 tm __TI_flags+3(%r12),_TIF_SYSCALL 267 jno sysc_return 268 lm %r2,%r7,__PT_R2(%r11) # load svc arguments 269 xr %r8,%r8 # svc 0 returns -ENOSYS 270 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) 271 jnl sysc_nr_ok # invalid svc number -> do svc 0 272 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number 273 sla %r8,2 274 j sysc_nr_ok # restart svc 275 276# 277# _TIF_NOTIFY_RESUME is set, call do_notify_resume 278# 279sysc_notify_resume: 280 lr %r2,%r11 # pass pointer to pt_regs 281 l %r1,BASED(.Ldo_notify_resume) 282 la %r14,BASED(sysc_return) 283 br %r1 # call do_notify_resume 284 285# 286# _TIF_PER_TRAP is set, call do_per_trap 287# 288sysc_singlestep: 289 ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) 290 lr %r2,%r11 # pass pointer to pt_regs 291 l %r1,BASED(.Ldo_per_trap) 292 la %r14,BASED(sysc_return) 293 br %r1 # call do_per_trap 294 295# 296# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 297# and after the system call 298# 299sysc_tracesys: 300 l %r1,BASED(.Ltrace_enter) 301 lr %r2,%r11 # pass pointer to pt_regs 302 la %r3,0 303 xr %r0,%r0 304 icm %r0,3,__PT_INT_CODE+2(%r11) 305 st %r0,__PT_R2(%r11) 306 basr %r14,%r1 # call do_syscall_trace_enter 307 cl %r2,BASED(.Lnr_syscalls) 308 jnl sysc_tracenogo 309 lr %r8,%r2 310 sll %r8,2 311 l %r9,0(%r8,%r10) 312sysc_tracego: 313 lm %r3,%r7,__PT_R3(%r11) 314 st %r7,STACK_FRAME_OVERHEAD(%r15) 315 l %r2,__PT_ORIG_GPR2(%r11) 316 basr %r14,%r9 # call sys_xxx 317 st %r2,__PT_R2(%r11) # store return value 318sysc_tracenogo: 319 tm __TI_flags+2(%r12),_TIF_TRACE >> 8 320 jz sysc_return 321 l %r1,BASED(.Ltrace_exit) 322 lr %r2,%r11 # pass pointer to pt_regs 323 la %r14,BASED(sysc_return) 324 br %r1 # call do_syscall_trace_exit 325 326# 327# a new process exits the kernel with ret_from_fork 328# 329ENTRY(ret_from_fork) 330 la %r11,STACK_FRAME_OVERHEAD(%r15) 331 l %r12,__LC_THREAD_INFO 332 l %r13,__LC_SVC_NEW_PSW+4 333 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 334 jo 0f 335 st %r15,__PT_R15(%r11) # store stack pointer for new kthread 3360: l %r1,BASED(.Lschedule_tail) 337 basr %r14,%r1 # call schedule_tail 338 TRACE_IRQS_ON 339 ssm __LC_SVC_NEW_PSW # reenable interrupts 340 j sysc_tracenogo 341 342# 343# kernel_execve function needs to deal with pt_regs that is not 344# at the usual place 345# 346ENTRY(kernel_execve) 347 stm %r12,%r15,48(%r15) 348 lr %r14,%r15 349 l %r13,__LC_SVC_NEW_PSW+4 350 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 351 st %r14,__SF_BACKCHAIN(%r15) 352 la %r12,STACK_FRAME_OVERHEAD(%r15) 353 xc 0(__PT_SIZE,%r12),0(%r12) 354 l %r1,BASED(.Ldo_execve) 355 lr %r5,%r12 356 basr %r14,%r1 # call do_execve 357 ltr %r2,%r2 358 je 0f 359 ahi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE) 360 lm %r12,%r15,48(%r15) 361 br %r14 362 # execve succeeded. 3630: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 364 l %r15,__LC_KERNEL_STACK # load ksp 365 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 366 la %r11,STACK_FRAME_OVERHEAD(%r15) 367 mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs 368 l %r12,__LC_THREAD_INFO 369 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 370 ssm __LC_SVC_NEW_PSW # reenable interrupts 371 l %r1,BASED(.Lexecve_tail) 372 basr %r14,%r1 # call execve_tail 373 j sysc_return 374 375/* 376 * Program check handler routine 377 */ 378 379ENTRY(pgm_check_handler) 380 stpt __LC_SYNC_ENTER_TIMER 381 stm %r8,%r15,__LC_SAVE_AREA_SYNC 382 l %r12,__LC_THREAD_INFO 383 l %r13,__LC_SVC_NEW_PSW+4 384 lm %r8,%r9,__LC_PGM_OLD_PSW 385 tmh %r8,0x0001 # test problem state bit 386 jnz 1f # -> fault in user space 387 tmh %r8,0x4000 # PER bit set in old PSW ? 388 jnz 0f # -> enabled, can't be a double fault 389 tm __LC_PGM_ILC+3,0x80 # check for per exception 390 jnz pgm_svcper # -> single stepped svc 3910: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 392 j 2f 3931: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 394 l %r15,__LC_KERNEL_STACK 3952: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 396 la %r11,STACK_FRAME_OVERHEAD(%r15) 397 stm %r0,%r7,__PT_R0(%r11) 398 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 399 stm %r8,%r9,__PT_PSW(%r11) 400 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 401 mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE 402 tm __LC_PGM_ILC+3,0x80 # check for per exception 403 jz 0f 404 l %r1,__TI_task(%r12) 405 tmh %r8,0x0001 # kernel per event ? 406 jz pgm_kprobe 407 oi __TI_flags+3(%r12),_TIF_PER_TRAP 408 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS 409 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE 410 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID 4110: REENABLE_IRQS 412 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 413 l %r1,BASED(.Ljump_table) 414 la %r10,0x7f 415 n %r10,__PT_INT_CODE(%r11) 416 je sysc_return 417 sll %r10,2 418 l %r1,0(%r10,%r1) # load address of handler routine 419 lr %r2,%r11 # pass pointer to pt_regs 420 basr %r14,%r1 # branch to interrupt-handler 421 j sysc_return 422 423# 424# PER event in supervisor state, must be kprobes 425# 426pgm_kprobe: 427 REENABLE_IRQS 428 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 429 l %r1,BASED(.Ldo_per_trap) 430 lr %r2,%r11 # pass pointer to pt_regs 431 basr %r14,%r1 # call do_per_trap 432 j sysc_return 433 434# 435# single stepped system call 436# 437pgm_svcper: 438 oi __TI_flags+3(%r12),_TIF_PER_TRAP 439 mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW 440 mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) 441 lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs 442 443/* 444 * IO interrupt handler routine 445 */ 446 447ENTRY(io_int_handler) 448 stck __LC_INT_CLOCK 449 stpt __LC_ASYNC_ENTER_TIMER 450 stm %r8,%r15,__LC_SAVE_AREA_ASYNC 451 l %r12,__LC_THREAD_INFO 452 l %r13,__LC_SVC_NEW_PSW+4 453 lm %r8,%r9,__LC_IO_OLD_PSW 454 tmh %r8,0x0001 # interrupting from user ? 455 jz io_skip 456 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 457io_skip: 458 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 459 stm %r0,%r7,__PT_R0(%r11) 460 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 461 stm %r8,%r9,__PT_PSW(%r11) 462 TRACE_IRQS_OFF 463 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 464 l %r1,BASED(.Ldo_IRQ) 465 lr %r2,%r11 # pass pointer to pt_regs 466 basr %r14,%r1 # call do_IRQ 467io_return: 468 LOCKDEP_SYS_EXIT 469 TRACE_IRQS_ON 470io_tif: 471 tm __TI_flags+3(%r12),_TIF_WORK_INT 472 jnz io_work # there is work to do (signals etc.) 473io_restore: 474 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 475 stpt __LC_EXIT_TIMER 476 lm %r0,%r15,__PT_R0(%r11) 477 lpsw __LC_RETURN_PSW 478io_done: 479 480# 481# There is work todo, find out in which context we have been interrupted: 482# 1) if we return to user space we can do all _TIF_WORK_INT work 483# 2) if we return to kernel code and preemptive scheduling is enabled check 484# the preemption counter and if it is zero call preempt_schedule_irq 485# Before any work can be done, a switch to the kernel stack is required. 486# 487io_work: 488 tm __PT_PSW+1(%r11),0x01 # returning to user ? 489 jo io_work_user # yes -> do resched & signal 490#ifdef CONFIG_PREEMPT 491 # check for preemptive scheduling 492 icm %r0,15,__TI_precount(%r12) 493 jnz io_restore # preemption disabled 494 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 495 jno io_restore 496 # switch to kernel stack 497 l %r1,__PT_R15(%r11) 498 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 499 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 500 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 501 la %r11,STACK_FRAME_OVERHEAD(%r1) 502 lr %r15,%r1 503 # TRACE_IRQS_ON already done at io_return, call 504 # TRACE_IRQS_OFF to keep things symmetrical 505 TRACE_IRQS_OFF 506 l %r1,BASED(.Lpreempt_irq) 507 basr %r14,%r1 # call preempt_schedule_irq 508 j io_return 509#else 510 j io_restore 511#endif 512 513# 514# Need to do work before returning to userspace, switch to kernel stack 515# 516io_work_user: 517 l %r1,__LC_KERNEL_STACK 518 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 519 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 520 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 521 la %r11,STACK_FRAME_OVERHEAD(%r1) 522 lr %r15,%r1 523 524# 525# One of the work bits is on. Find out which one. 526# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED 527# and _TIF_MCCK_PENDING 528# 529io_work_tif: 530 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 531 jo io_mcck_pending 532 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 533 jo io_reschedule 534 tm __TI_flags+3(%r12),_TIF_SIGPENDING 535 jo io_sigpending 536 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 537 jo io_notify_resume 538 j io_return # beware of critical section cleanup 539 540# 541# _TIF_MCCK_PENDING is set, call handler 542# 543io_mcck_pending: 544 # TRACE_IRQS_ON already done at io_return 545 l %r1,BASED(.Lhandle_mcck) 546 basr %r14,%r1 # TIF bit will be cleared by handler 547 TRACE_IRQS_OFF 548 j io_return 549 550# 551# _TIF_NEED_RESCHED is set, call schedule 552# 553io_reschedule: 554 # TRACE_IRQS_ON already done at io_return 555 l %r1,BASED(.Lschedule) 556 ssm __LC_SVC_NEW_PSW # reenable interrupts 557 basr %r14,%r1 # call scheduler 558 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 559 TRACE_IRQS_OFF 560 j io_return 561 562# 563# _TIF_SIGPENDING is set, call do_signal 564# 565io_sigpending: 566 # TRACE_IRQS_ON already done at io_return 567 l %r1,BASED(.Ldo_signal) 568 ssm __LC_SVC_NEW_PSW # reenable interrupts 569 lr %r2,%r11 # pass pointer to pt_regs 570 basr %r14,%r1 # call do_signal 571 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 572 TRACE_IRQS_OFF 573 j io_return 574 575# 576# _TIF_SIGPENDING is set, call do_signal 577# 578io_notify_resume: 579 # TRACE_IRQS_ON already done at io_return 580 l %r1,BASED(.Ldo_notify_resume) 581 ssm __LC_SVC_NEW_PSW # reenable interrupts 582 lr %r2,%r11 # pass pointer to pt_regs 583 basr %r14,%r1 # call do_notify_resume 584 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 585 TRACE_IRQS_OFF 586 j io_return 587 588/* 589 * External interrupt handler routine 590 */ 591 592ENTRY(ext_int_handler) 593 stck __LC_INT_CLOCK 594 stpt __LC_ASYNC_ENTER_TIMER 595 stm %r8,%r15,__LC_SAVE_AREA_ASYNC 596 l %r12,__LC_THREAD_INFO 597 l %r13,__LC_SVC_NEW_PSW+4 598 lm %r8,%r9,__LC_EXT_OLD_PSW 599 tmh %r8,0x0001 # interrupting from user ? 600 jz ext_skip 601 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 602ext_skip: 603 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 604 stm %r0,%r7,__PT_R0(%r11) 605 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 606 stm %r8,%r9,__PT_PSW(%r11) 607 TRACE_IRQS_OFF 608 lr %r2,%r11 # pass pointer to pt_regs 609 l %r3,__LC_EXT_CPU_ADDR # get cpu address + interruption code 610 l %r4,__LC_EXT_PARAMS # get external parameters 611 l %r1,BASED(.Ldo_extint) 612 basr %r14,%r1 # call do_extint 613 j io_return 614 615/* 616 * Load idle PSW. The second "half" of this function is in cleanup_idle. 617 */ 618ENTRY(psw_idle) 619 st %r3,__SF_EMPTY(%r15) 620 basr %r1,0 621 la %r1,psw_idle_lpsw+4-.(%r1) 622 st %r1,__SF_EMPTY+4(%r15) 623 oi __SF_EMPTY+4(%r15),0x80 624 stck __CLOCK_IDLE_ENTER(%r2) 625 stpt __TIMER_IDLE_ENTER(%r2) 626psw_idle_lpsw: 627 lpsw __SF_EMPTY(%r15) 628 br %r14 629psw_idle_end: 630 631__critical_end: 632 633/* 634 * Machine check handler routines 635 */ 636 637ENTRY(mcck_int_handler) 638 stck __LC_MCCK_CLOCK 639 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 640 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 641 l %r12,__LC_THREAD_INFO 642 l %r13,__LC_SVC_NEW_PSW+4 643 lm %r8,%r9,__LC_MCK_OLD_PSW 644 tm __LC_MCCK_CODE,0x80 # system damage? 645 jo mcck_panic # yes -> rest of mcck code invalid 646 la %r14,__LC_CPU_TIMER_SAVE_AREA 647 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 648 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 649 jo 3f 650 la %r14,__LC_SYNC_ENTER_TIMER 651 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 652 jl 0f 653 la %r14,__LC_ASYNC_ENTER_TIMER 6540: clc 0(8,%r14),__LC_EXIT_TIMER 655 jl 1f 656 la %r14,__LC_EXIT_TIMER 6571: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 658 jl 2f 659 la %r14,__LC_LAST_UPDATE_TIMER 6602: spt 0(%r14) 661 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 6623: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 663 jno mcck_panic # no -> skip cleanup critical 664 tm %r8,0x0001 # interrupting from user ? 665 jz mcck_skip 666 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER 667mcck_skip: 668 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT 669 mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA 670 stm %r8,%r9,__PT_PSW(%r11) 671 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 672 l %r1,BASED(.Ldo_machine_check) 673 lr %r2,%r11 # pass pointer to pt_regs 674 basr %r14,%r1 # call s390_do_machine_check 675 tm __PT_PSW+1(%r11),0x01 # returning to user ? 676 jno mcck_return 677 l %r1,__LC_KERNEL_STACK # switch to kernel stack 678 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 679 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 680 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 681 la %r11,STACK_FRAME_OVERHEAD(%r15) 682 lr %r15,%r1 683 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 684 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 685 jno mcck_return 686 TRACE_IRQS_OFF 687 l %r1,BASED(.Lhandle_mcck) 688 basr %r14,%r1 # call s390_handle_mcck 689 TRACE_IRQS_ON 690mcck_return: 691 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW 692 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 693 jno 0f 694 lm %r0,%r15,__PT_R0(%r11) 695 stpt __LC_EXIT_TIMER 696 lpsw __LC_RETURN_MCCK_PSW 6970: lm %r0,%r15,__PT_R0(%r11) 698 lpsw __LC_RETURN_MCCK_PSW 699 700mcck_panic: 701 l %r14,__LC_PANIC_STACK 702 slr %r14,%r15 703 sra %r14,PAGE_SHIFT 704 jz 0f 705 l %r15,__LC_PANIC_STACK 7060: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 707 j mcck_skip 708 709# 710# PSW restart interrupt handler 711# 712ENTRY(restart_int_handler) 713 st %r15,__LC_SAVE_AREA_RESTART 714 l %r15,__LC_RESTART_STACK 715 ahi %r15,-__PT_SIZE # create pt_regs on stack 716 xc 0(__PT_SIZE,%r15),0(%r15) 717 stm %r0,%r14,__PT_R0(%r15) 718 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART 719 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw 720 ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack 721 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 722 l %r1,__LC_RESTART_FN # load fn, parm & source cpu 723 l %r2,__LC_RESTART_DATA 724 l %r3,__LC_RESTART_SOURCE 725 ltr %r3,%r3 # test source cpu address 726 jm 1f # negative -> skip source stop 7270: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 728 brc 10,0b # wait for status stored 7291: basr %r14,%r1 # call function 730 stap __SF_EMPTY(%r15) # store cpu address 731 lh %r3,__SF_EMPTY(%r15) 7322: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 733 brc 2,2b 7343: j 3b 735 736 .section .kprobes.text, "ax" 737 738#ifdef CONFIG_CHECK_STACK 739/* 740 * The synchronous or the asynchronous stack overflowed. We are dead. 741 * No need to properly save the registers, we are going to panic anyway. 742 * Setup a pt_regs so that show_trace can provide a good call trace. 743 */ 744stack_overflow: 745 l %r15,__LC_PANIC_STACK # change to panic stack 746 ahi %r15,-__PT_SIZE # create pt_regs 747 stm %r0,%r7,__PT_R0(%r15) 748 stm %r8,%r9,__PT_PSW(%r15) 749 mvc __PT_R8(32,%r11),0(%r14) 750 lr %r15,%r11 751 ahi %r15,-STACK_FRAME_OVERHEAD 752 l %r1,BASED(1f) 753 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 754 lr %r2,%r11 # pass pointer to pt_regs 755 br %r1 # branch to kernel_stack_overflow 7561: .long kernel_stack_overflow 757#endif 758 759cleanup_table: 760 .long system_call + 0x80000000 761 .long sysc_do_svc + 0x80000000 762 .long sysc_tif + 0x80000000 763 .long sysc_restore + 0x80000000 764 .long sysc_done + 0x80000000 765 .long io_tif + 0x80000000 766 .long io_restore + 0x80000000 767 .long io_done + 0x80000000 768 .long psw_idle + 0x80000000 769 .long psw_idle_end + 0x80000000 770 771cleanup_critical: 772 cl %r9,BASED(cleanup_table) # system_call 773 jl 0f 774 cl %r9,BASED(cleanup_table+4) # sysc_do_svc 775 jl cleanup_system_call 776 cl %r9,BASED(cleanup_table+8) # sysc_tif 777 jl 0f 778 cl %r9,BASED(cleanup_table+12) # sysc_restore 779 jl cleanup_sysc_tif 780 cl %r9,BASED(cleanup_table+16) # sysc_done 781 jl cleanup_sysc_restore 782 cl %r9,BASED(cleanup_table+20) # io_tif 783 jl 0f 784 cl %r9,BASED(cleanup_table+24) # io_restore 785 jl cleanup_io_tif 786 cl %r9,BASED(cleanup_table+28) # io_done 787 jl cleanup_io_restore 788 cl %r9,BASED(cleanup_table+32) # psw_idle 789 jl 0f 790 cl %r9,BASED(cleanup_table+36) # psw_idle_end 791 jl cleanup_idle 7920: br %r14 793 794cleanup_system_call: 795 # check if stpt has been executed 796 cl %r9,BASED(cleanup_system_call_insn) 797 jh 0f 798 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 799 chi %r11,__LC_SAVE_AREA_ASYNC 800 je 0f 801 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 8020: # check if stm has been executed 803 cl %r9,BASED(cleanup_system_call_insn+4) 804 jh 0f 805 mvc __LC_SAVE_AREA_SYNC(32),0(%r11) 8060: # set up saved registers r12, and r13 807 st %r12,16(%r11) # r12 thread-info pointer 808 st %r13,20(%r11) # r13 literal-pool pointer 809 # check if the user time calculation has been done 810 cl %r9,BASED(cleanup_system_call_insn+8) 811 jh 0f 812 l %r10,__LC_EXIT_TIMER 813 l %r15,__LC_EXIT_TIMER+4 814 SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER 815 ADD64 %r10,%r15,__LC_USER_TIMER 816 st %r10,__LC_USER_TIMER 817 st %r15,__LC_USER_TIMER+4 8180: # check if the system time calculation has been done 819 cl %r9,BASED(cleanup_system_call_insn+12) 820 jh 0f 821 l %r10,__LC_LAST_UPDATE_TIMER 822 l %r15,__LC_LAST_UPDATE_TIMER+4 823 SUB64 %r10,%r15,__LC_EXIT_TIMER 824 ADD64 %r10,%r15,__LC_SYSTEM_TIMER 825 st %r10,__LC_SYSTEM_TIMER 826 st %r15,__LC_SYSTEM_TIMER+4 8270: # update accounting time stamp 828 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 829 # set up saved register 11 830 l %r15,__LC_KERNEL_STACK 831 ahi %r15,-__PT_SIZE 832 st %r15,12(%r11) # r11 pt_regs pointer 833 # fill pt_regs 834 mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC 835 stm %r0,%r7,__PT_R0(%r15) 836 mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW 837 mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC 838 # setup saved register 15 839 ahi %r15,-STACK_FRAME_OVERHEAD 840 st %r15,28(%r11) # r15 stack pointer 841 # set new psw address and exit 842 l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 843 br %r14 844cleanup_system_call_insn: 845 .long system_call + 0x80000000 846 .long sysc_stm + 0x80000000 847 .long sysc_vtime + 0x80000000 + 36 848 .long sysc_vtime + 0x80000000 + 76 849 850cleanup_sysc_tif: 851 l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000 852 br %r14 853 854cleanup_sysc_restore: 855 cl %r9,BASED(cleanup_sysc_restore_insn) 856 jhe 0f 857 l %r9,12(%r11) # get saved pointer to pt_regs 858 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 859 mvc 0(32,%r11),__PT_R8(%r9) 860 lm %r0,%r7,__PT_R0(%r9) 8610: lm %r8,%r9,__LC_RETURN_PSW 862 br %r14 863cleanup_sysc_restore_insn: 864 .long sysc_done - 4 + 0x80000000 865 866cleanup_io_tif: 867 l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000 868 br %r14 869 870cleanup_io_restore: 871 cl %r9,BASED(cleanup_io_restore_insn) 872 jhe 0f 873 l %r9,12(%r11) # get saved r11 pointer to pt_regs 874 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 875 mvc 0(32,%r11),__PT_R8(%r9) 876 lm %r0,%r7,__PT_R0(%r9) 8770: lm %r8,%r9,__LC_RETURN_PSW 878 br %r14 879cleanup_io_restore_insn: 880 .long io_done - 4 + 0x80000000 881 882cleanup_idle: 883 # copy interrupt clock & cpu timer 884 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 885 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 886 chi %r11,__LC_SAVE_AREA_ASYNC 887 je 0f 888 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 889 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 8900: # check if stck has been executed 891 cl %r9,BASED(cleanup_idle_insn) 892 jhe 1f 893 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 894 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) 8951: # account system time going idle 896 lm %r9,%r10,__LC_STEAL_TIMER 897 ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2) 898 SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK 899 stm %r9,%r10,__LC_STEAL_TIMER 900 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 901 lm %r9,%r10,__LC_SYSTEM_TIMER 902 ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER 903 SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2) 904 stm %r9,%r10,__LC_SYSTEM_TIMER 905 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 906 # prepare return psw 907 n %r8,BASED(cleanup_idle_wait) # clear wait state bit 908 l %r9,24(%r11) # return from psw_idle 909 br %r14 910cleanup_idle_insn: 911 .long psw_idle_lpsw + 0x80000000 912cleanup_idle_wait: 913 .long 0xfffdffff 914 915/* 916 * Integer constants 917 */ 918 .align 4 919.Lnr_syscalls: 920 .long NR_syscalls 921.Lvtimer_max: 922 .quad 0x7fffffffffffffff 923 924/* 925 * Symbol constants 926 */ 927.Ldo_machine_check: .long s390_do_machine_check 928.Lhandle_mcck: .long s390_handle_mcck 929.Ldo_IRQ: .long do_IRQ 930.Ldo_extint: .long do_extint 931.Ldo_signal: .long do_signal 932.Ldo_notify_resume: .long do_notify_resume 933.Ldo_per_trap: .long do_per_trap 934.Ldo_execve: .long do_execve 935.Lexecve_tail: .long execve_tail 936.Ljump_table: .long pgm_check_table 937.Lschedule: .long schedule 938#ifdef CONFIG_PREEMPT 939.Lpreempt_irq: .long preempt_schedule_irq 940#endif 941.Ltrace_enter: .long do_syscall_trace_enter 942.Ltrace_exit: .long do_syscall_trace_exit 943.Lschedule_tail: .long schedule_tail 944.Lsys_call_table: .long sys_call_table 945.Lsysc_per: .long sysc_per + 0x80000000 946#ifdef CONFIG_TRACE_IRQFLAGS 947.Lhardirqs_on: .long trace_hardirqs_on_caller 948.Lhardirqs_off: .long trace_hardirqs_off_caller 949#endif 950#ifdef CONFIG_LOCKDEP 951.Llockdep_sys_exit: .long lockdep_sys_exit 952#endif 953.Lcritical_start: .long __critical_start + 0x80000000 954.Lcritical_length: .long __critical_end - __critical_start 955 956 .section .rodata, "a" 957#define SYSCALL(esa,esame,emu) .long esa 958 .globl sys_call_table 959sys_call_table: 960#include "syscalls.S" 961#undef SYSCALL 962