1/* 2 * S390 low-level entry points. 3 * 4 * Copyright IBM Corp. 1999, 2012 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Hartmut Penner (hp@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 */ 10 11#include <linux/init.h> 12#include <linux/linkage.h> 13#include <asm/cache.h> 14#include <asm/errno.h> 15#include <asm/ptrace.h> 16#include <asm/thread_info.h> 17#include <asm/asm-offsets.h> 18#include <asm/unistd.h> 19#include <asm/page.h> 20#include <asm/sigp.h> 21 22__PT_R0 = __PT_GPRS 23__PT_R1 = __PT_GPRS + 4 24__PT_R2 = __PT_GPRS + 8 25__PT_R3 = __PT_GPRS + 12 26__PT_R4 = __PT_GPRS + 16 27__PT_R5 = __PT_GPRS + 20 28__PT_R6 = __PT_GPRS + 24 29__PT_R7 = __PT_GPRS + 28 30__PT_R8 = __PT_GPRS + 32 31__PT_R9 = __PT_GPRS + 36 32__PT_R10 = __PT_GPRS + 40 33__PT_R11 = __PT_GPRS + 44 34__PT_R12 = __PT_GPRS + 48 35__PT_R13 = __PT_GPRS + 524 36__PT_R14 = __PT_GPRS + 56 37__PT_R15 = __PT_GPRS + 60 38 39_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 40 _TIF_MCCK_PENDING | _TIF_PER_TRAP ) 41_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 42 _TIF_MCCK_PENDING) 43_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 44 _TIF_SYSCALL_TRACEPOINT) 45 46STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 47STACK_SIZE = 1 << STACK_SHIFT 48STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 49 50#define BASED(name) name-system_call(%r13) 51 52 .macro TRACE_IRQS_ON 53#ifdef CONFIG_TRACE_IRQFLAGS 54 basr %r2,%r0 55 l %r1,BASED(.Lhardirqs_on) 56 basr %r14,%r1 # call trace_hardirqs_on_caller 57#endif 58 .endm 59 60 .macro TRACE_IRQS_OFF 61#ifdef CONFIG_TRACE_IRQFLAGS 62 basr %r2,%r0 63 l %r1,BASED(.Lhardirqs_off) 64 basr %r14,%r1 # call trace_hardirqs_off_caller 65#endif 66 .endm 67 68 .macro LOCKDEP_SYS_EXIT 69#ifdef CONFIG_LOCKDEP 70 tm __PT_PSW+1(%r11),0x01 # returning to user ? 71 jz .+10 72 l %r1,BASED(.Llockdep_sys_exit) 73 basr %r14,%r1 # call lockdep_sys_exit 74#endif 75 .endm 76 77 .macro CHECK_STACK stacksize,savearea 78#ifdef CONFIG_CHECK_STACK 79 tml %r15,\stacksize - CONFIG_STACK_GUARD 80 la %r14,\savearea 81 jz stack_overflow 82#endif 83 .endm 84 85 .macro SWITCH_ASYNC savearea,stack,shift 86 tmh %r8,0x0001 # interrupting from user ? 87 jnz 1f 88 lr %r14,%r9 89 sl %r14,BASED(.Lcritical_start) 90 cl %r14,BASED(.Lcritical_length) 91 jhe 0f 92 la %r11,\savearea # inside critical section, do cleanup 93 bras %r14,cleanup_critical 94 tmh %r8,0x0001 # retest problem state after cleanup 95 jnz 1f 960: l %r14,\stack # are we already on the target stack? 97 slr %r14,%r15 98 sra %r14,\shift 99 jnz 1f 100 CHECK_STACK 1<<\shift,\savearea 101 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 102 j 2f 1031: l %r15,\stack # load target stack 1042: la %r11,STACK_FRAME_OVERHEAD(%r15) 105 .endm 106 107 .macro ADD64 high,low,timer 108 al \high,\timer 109 al \low,4+\timer 110 brc 12,.+8 111 ahi \high,1 112 .endm 113 114 .macro SUB64 high,low,timer 115 sl \high,\timer 116 sl \low,4+\timer 117 brc 3,.+8 118 ahi \high,-1 119 .endm 120 121 .macro UPDATE_VTIME high,low,enter_timer 122 lm \high,\low,__LC_EXIT_TIMER 123 SUB64 \high,\low,\enter_timer 124 ADD64 \high,\low,__LC_USER_TIMER 125 stm \high,\low,__LC_USER_TIMER 126 lm \high,\low,__LC_LAST_UPDATE_TIMER 127 SUB64 \high,\low,__LC_EXIT_TIMER 128 ADD64 \high,\low,__LC_SYSTEM_TIMER 129 stm \high,\low,__LC_SYSTEM_TIMER 130 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 131 .endm 132 133 .macro REENABLE_IRQS 134 st %r8,__LC_RETURN_PSW 135 ni __LC_RETURN_PSW,0xbf 136 ssm __LC_RETURN_PSW 137 .endm 138 139 .section .kprobes.text, "ax" 140 141/* 142 * Scheduler resume function, called by switch_to 143 * gpr2 = (task_struct *) prev 144 * gpr3 = (task_struct *) next 145 * Returns: 146 * gpr2 = prev 147 */ 148ENTRY(__switch_to) 149 stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 150 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev 151 l %r4,__THREAD_info(%r2) # get thread_info of prev 152 l %r5,__THREAD_info(%r3) # get thread_info of next 153 lr %r15,%r5 154 ahi %r15,STACK_INIT # end of kernel stack of next 155 st %r3,__LC_CURRENT # store task struct of next 156 st %r5,__LC_THREAD_INFO # store thread info of next 157 st %r15,__LC_KERNEL_STACK # store end of kernel stack 158 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 159 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next 160 l %r15,__THREAD_ksp(%r3) # load kernel stack of next 161 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? 162 jz 0f 163 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 164 oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next 1650: lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 166 br %r14 167 168__critical_start: 169/* 170 * SVC interrupt handler routine. System calls are synchronous events and 171 * are executed with interrupts enabled. 172 */ 173 174ENTRY(system_call) 175 stpt __LC_SYNC_ENTER_TIMER 176sysc_stm: 177 stm %r8,%r15,__LC_SAVE_AREA_SYNC 178 l %r12,__LC_THREAD_INFO 179 l %r13,__LC_SVC_NEW_PSW+4 180sysc_per: 181 l %r15,__LC_KERNEL_STACK 182 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 183sysc_vtime: 184 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 185 stm %r0,%r7,__PT_R0(%r11) 186 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 187 mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW 188 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 189sysc_do_svc: 190 oi __TI_flags+3(%r12),_TIF_SYSCALL 191 l %r10,__TI_sysc_table(%r12) # 31 bit system call table 192 lh %r8,__PT_INT_CODE+2(%r11) 193 sla %r8,2 # shift and test for svc0 194 jnz sysc_nr_ok 195 # svc 0: system call number in %r1 196 cl %r1,BASED(.Lnr_syscalls) 197 jnl sysc_nr_ok 198 sth %r1,__PT_INT_CODE+2(%r11) 199 lr %r8,%r1 200 sla %r8,2 201sysc_nr_ok: 202 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 203 st %r2,__PT_ORIG_GPR2(%r11) 204 st %r7,STACK_FRAME_OVERHEAD(%r15) 205 l %r9,0(%r8,%r10) # get system call addr. 206 tm __TI_flags+2(%r12),_TIF_TRACE >> 8 207 jnz sysc_tracesys 208 basr %r14,%r9 # call sys_xxxx 209 st %r2,__PT_R2(%r11) # store return value 210 211sysc_return: 212 LOCKDEP_SYS_EXIT 213sysc_tif: 214 tm __PT_PSW+1(%r11),0x01 # returning to user ? 215 jno sysc_restore 216 tm __TI_flags+3(%r12),_TIF_WORK_SVC 217 jnz sysc_work # check for work 218 ni __TI_flags+3(%r12),255-_TIF_SYSCALL 219sysc_restore: 220 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 221 stpt __LC_EXIT_TIMER 222 lm %r0,%r15,__PT_R0(%r11) 223 lpsw __LC_RETURN_PSW 224sysc_done: 225 226# 227# One of the work bits is on. Find out which one. 228# 229sysc_work: 230 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 231 jo sysc_mcck_pending 232 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 233 jo sysc_reschedule 234 tm __TI_flags+3(%r12),_TIF_PER_TRAP 235 jo sysc_singlestep 236 tm __TI_flags+3(%r12),_TIF_SIGPENDING 237 jo sysc_sigpending 238 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 239 jo sysc_notify_resume 240 j sysc_return # beware of critical section cleanup 241 242# 243# _TIF_NEED_RESCHED is set, call schedule 244# 245sysc_reschedule: 246 l %r1,BASED(.Lschedule) 247 la %r14,BASED(sysc_return) 248 br %r1 # call schedule 249 250# 251# _TIF_MCCK_PENDING is set, call handler 252# 253sysc_mcck_pending: 254 l %r1,BASED(.Lhandle_mcck) 255 la %r14,BASED(sysc_return) 256 br %r1 # TIF bit will be cleared by handler 257 258# 259# _TIF_SIGPENDING is set, call do_signal 260# 261sysc_sigpending: 262 lr %r2,%r11 # pass pointer to pt_regs 263 l %r1,BASED(.Ldo_signal) 264 basr %r14,%r1 # call do_signal 265 tm __TI_flags+3(%r12),_TIF_SYSCALL 266 jno sysc_return 267 lm %r2,%r7,__PT_R2(%r11) # load svc arguments 268 xr %r8,%r8 # svc 0 returns -ENOSYS 269 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) 270 jnl sysc_nr_ok # invalid svc number -> do svc 0 271 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number 272 sla %r8,2 273 j sysc_nr_ok # restart svc 274 275# 276# _TIF_NOTIFY_RESUME is set, call do_notify_resume 277# 278sysc_notify_resume: 279 lr %r2,%r11 # pass pointer to pt_regs 280 l %r1,BASED(.Ldo_notify_resume) 281 la %r14,BASED(sysc_return) 282 br %r1 # call do_notify_resume 283 284# 285# _TIF_PER_TRAP is set, call do_per_trap 286# 287sysc_singlestep: 288 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP 289 lr %r2,%r11 # pass pointer to pt_regs 290 l %r1,BASED(.Ldo_per_trap) 291 la %r14,BASED(sysc_return) 292 br %r1 # call do_per_trap 293 294# 295# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 296# and after the system call 297# 298sysc_tracesys: 299 l %r1,BASED(.Ltrace_enter) 300 lr %r2,%r11 # pass pointer to pt_regs 301 la %r3,0 302 xr %r0,%r0 303 icm %r0,3,__PT_INT_CODE+2(%r11) 304 st %r0,__PT_R2(%r11) 305 basr %r14,%r1 # call do_syscall_trace_enter 306 cl %r2,BASED(.Lnr_syscalls) 307 jnl sysc_tracenogo 308 lr %r8,%r2 309 sll %r8,2 310 l %r9,0(%r8,%r10) 311sysc_tracego: 312 lm %r3,%r7,__PT_R3(%r11) 313 st %r7,STACK_FRAME_OVERHEAD(%r15) 314 l %r2,__PT_ORIG_GPR2(%r11) 315 basr %r14,%r9 # call sys_xxx 316 st %r2,__PT_R2(%r11) # store return value 317sysc_tracenogo: 318 tm __TI_flags+2(%r12),_TIF_TRACE >> 8 319 jz sysc_return 320 l %r1,BASED(.Ltrace_exit) 321 lr %r2,%r11 # pass pointer to pt_regs 322 la %r14,BASED(sysc_return) 323 br %r1 # call do_syscall_trace_exit 324 325# 326# a new process exits the kernel with ret_from_fork 327# 328ENTRY(ret_from_fork) 329 la %r11,STACK_FRAME_OVERHEAD(%r15) 330 l %r12,__LC_THREAD_INFO 331 l %r13,__LC_SVC_NEW_PSW+4 332 l %r1,BASED(.Lschedule_tail) 333 basr %r14,%r1 # call schedule_tail 334 TRACE_IRQS_ON 335 ssm __LC_SVC_NEW_PSW # reenable interrupts 336 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 337 jne sysc_tracenogo 338 # it's a kernel thread 339 lm %r9,%r10,__PT_R9(%r11) # load gprs 340ENTRY(kernel_thread_starter) 341 la %r2,0(%r10) 342 basr %r14,%r9 343 j sysc_tracenogo 344 345/* 346 * Program check handler routine 347 */ 348 349ENTRY(pgm_check_handler) 350 stpt __LC_SYNC_ENTER_TIMER 351 stm %r8,%r15,__LC_SAVE_AREA_SYNC 352 l %r12,__LC_THREAD_INFO 353 l %r13,__LC_SVC_NEW_PSW+4 354 lm %r8,%r9,__LC_PGM_OLD_PSW 355 tmh %r8,0x0001 # test problem state bit 356 jnz 1f # -> fault in user space 357 tmh %r8,0x4000 # PER bit set in old PSW ? 358 jnz 0f # -> enabled, can't be a double fault 359 tm __LC_PGM_ILC+3,0x80 # check for per exception 360 jnz pgm_svcper # -> single stepped svc 3610: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 362 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 363 j 2f 3641: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 365 l %r15,__LC_KERNEL_STACK 3662: la %r11,STACK_FRAME_OVERHEAD(%r15) 367 stm %r0,%r7,__PT_R0(%r11) 368 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 369 stm %r8,%r9,__PT_PSW(%r11) 370 mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 371 mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE 372 tm __LC_PGM_ILC+3,0x80 # check for per exception 373 jz 0f 374 l %r1,__TI_task(%r12) 375 tmh %r8,0x0001 # kernel per event ? 376 jz pgm_kprobe 377 oi __TI_flags+3(%r12),_TIF_PER_TRAP 378 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS 379 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE 380 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID 3810: REENABLE_IRQS 382 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 383 l %r1,BASED(.Ljump_table) 384 la %r10,0x7f 385 n %r10,__PT_INT_CODE(%r11) 386 je sysc_return 387 sll %r10,2 388 l %r1,0(%r10,%r1) # load address of handler routine 389 lr %r2,%r11 # pass pointer to pt_regs 390 basr %r14,%r1 # branch to interrupt-handler 391 j sysc_return 392 393# 394# PER event in supervisor state, must be kprobes 395# 396pgm_kprobe: 397 REENABLE_IRQS 398 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 399 l %r1,BASED(.Ldo_per_trap) 400 lr %r2,%r11 # pass pointer to pt_regs 401 basr %r14,%r1 # call do_per_trap 402 j sysc_return 403 404# 405# single stepped system call 406# 407pgm_svcper: 408 oi __TI_flags+3(%r12),_TIF_PER_TRAP 409 mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW 410 mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) 411 lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs 412 413/* 414 * IO interrupt handler routine 415 */ 416 417ENTRY(io_int_handler) 418 stck __LC_INT_CLOCK 419 stpt __LC_ASYNC_ENTER_TIMER 420 stm %r8,%r15,__LC_SAVE_AREA_ASYNC 421 l %r12,__LC_THREAD_INFO 422 l %r13,__LC_SVC_NEW_PSW+4 423 lm %r8,%r9,__LC_IO_OLD_PSW 424 tmh %r8,0x0001 # interrupting from user ? 425 jz io_skip 426 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 427io_skip: 428 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 429 stm %r0,%r7,__PT_R0(%r11) 430 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 431 stm %r8,%r9,__PT_PSW(%r11) 432 TRACE_IRQS_OFF 433 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 434 l %r1,BASED(.Ldo_IRQ) 435 lr %r2,%r11 # pass pointer to pt_regs 436 basr %r14,%r1 # call do_IRQ 437io_return: 438 LOCKDEP_SYS_EXIT 439 TRACE_IRQS_ON 440io_tif: 441 tm __TI_flags+3(%r12),_TIF_WORK_INT 442 jnz io_work # there is work to do (signals etc.) 443io_restore: 444 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 445 stpt __LC_EXIT_TIMER 446 lm %r0,%r15,__PT_R0(%r11) 447 lpsw __LC_RETURN_PSW 448io_done: 449 450# 451# There is work todo, find out in which context we have been interrupted: 452# 1) if we return to user space we can do all _TIF_WORK_INT work 453# 2) if we return to kernel code and preemptive scheduling is enabled check 454# the preemption counter and if it is zero call preempt_schedule_irq 455# Before any work can be done, a switch to the kernel stack is required. 456# 457io_work: 458 tm __PT_PSW+1(%r11),0x01 # returning to user ? 459 jo io_work_user # yes -> do resched & signal 460#ifdef CONFIG_PREEMPT 461 # check for preemptive scheduling 462 icm %r0,15,__TI_precount(%r12) 463 jnz io_restore # preemption disabled 464 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 465 jno io_restore 466 # switch to kernel stack 467 l %r1,__PT_R15(%r11) 468 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 469 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 470 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 471 la %r11,STACK_FRAME_OVERHEAD(%r1) 472 lr %r15,%r1 473 # TRACE_IRQS_ON already done at io_return, call 474 # TRACE_IRQS_OFF to keep things symmetrical 475 TRACE_IRQS_OFF 476 l %r1,BASED(.Lpreempt_irq) 477 basr %r14,%r1 # call preempt_schedule_irq 478 j io_return 479#else 480 j io_restore 481#endif 482 483# 484# Need to do work before returning to userspace, switch to kernel stack 485# 486io_work_user: 487 l %r1,__LC_KERNEL_STACK 488 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 489 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 490 la %r11,STACK_FRAME_OVERHEAD(%r1) 491 lr %r15,%r1 492 493# 494# One of the work bits is on. Find out which one. 495# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED 496# and _TIF_MCCK_PENDING 497# 498io_work_tif: 499 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 500 jo io_mcck_pending 501 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 502 jo io_reschedule 503 tm __TI_flags+3(%r12),_TIF_SIGPENDING 504 jo io_sigpending 505 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 506 jo io_notify_resume 507 j io_return # beware of critical section cleanup 508 509# 510# _TIF_MCCK_PENDING is set, call handler 511# 512io_mcck_pending: 513 # TRACE_IRQS_ON already done at io_return 514 l %r1,BASED(.Lhandle_mcck) 515 basr %r14,%r1 # TIF bit will be cleared by handler 516 TRACE_IRQS_OFF 517 j io_return 518 519# 520# _TIF_NEED_RESCHED is set, call schedule 521# 522io_reschedule: 523 # TRACE_IRQS_ON already done at io_return 524 l %r1,BASED(.Lschedule) 525 ssm __LC_SVC_NEW_PSW # reenable interrupts 526 basr %r14,%r1 # call scheduler 527 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 528 TRACE_IRQS_OFF 529 j io_return 530 531# 532# _TIF_SIGPENDING is set, call do_signal 533# 534io_sigpending: 535 # TRACE_IRQS_ON already done at io_return 536 l %r1,BASED(.Ldo_signal) 537 ssm __LC_SVC_NEW_PSW # reenable interrupts 538 lr %r2,%r11 # pass pointer to pt_regs 539 basr %r14,%r1 # call do_signal 540 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 541 TRACE_IRQS_OFF 542 j io_return 543 544# 545# _TIF_SIGPENDING is set, call do_signal 546# 547io_notify_resume: 548 # TRACE_IRQS_ON already done at io_return 549 l %r1,BASED(.Ldo_notify_resume) 550 ssm __LC_SVC_NEW_PSW # reenable interrupts 551 lr %r2,%r11 # pass pointer to pt_regs 552 basr %r14,%r1 # call do_notify_resume 553 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 554 TRACE_IRQS_OFF 555 j io_return 556 557/* 558 * External interrupt handler routine 559 */ 560 561ENTRY(ext_int_handler) 562 stck __LC_INT_CLOCK 563 stpt __LC_ASYNC_ENTER_TIMER 564 stm %r8,%r15,__LC_SAVE_AREA_ASYNC 565 l %r12,__LC_THREAD_INFO 566 l %r13,__LC_SVC_NEW_PSW+4 567 lm %r8,%r9,__LC_EXT_OLD_PSW 568 tmh %r8,0x0001 # interrupting from user ? 569 jz ext_skip 570 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 571ext_skip: 572 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 573 stm %r0,%r7,__PT_R0(%r11) 574 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 575 stm %r8,%r9,__PT_PSW(%r11) 576 TRACE_IRQS_OFF 577 lr %r2,%r11 # pass pointer to pt_regs 578 l %r3,__LC_EXT_CPU_ADDR # get cpu address + interruption code 579 l %r4,__LC_EXT_PARAMS # get external parameters 580 l %r1,BASED(.Ldo_extint) 581 basr %r14,%r1 # call do_extint 582 j io_return 583 584/* 585 * Load idle PSW. The second "half" of this function is in cleanup_idle. 586 */ 587ENTRY(psw_idle) 588 st %r3,__SF_EMPTY(%r15) 589 basr %r1,0 590 la %r1,psw_idle_lpsw+4-.(%r1) 591 st %r1,__SF_EMPTY+4(%r15) 592 oi __SF_EMPTY+4(%r15),0x80 593 stck __CLOCK_IDLE_ENTER(%r2) 594 stpt __TIMER_IDLE_ENTER(%r2) 595psw_idle_lpsw: 596 lpsw __SF_EMPTY(%r15) 597 br %r14 598psw_idle_end: 599 600__critical_end: 601 602/* 603 * Machine check handler routines 604 */ 605 606ENTRY(mcck_int_handler) 607 stck __LC_MCCK_CLOCK 608 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 609 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 610 l %r12,__LC_THREAD_INFO 611 l %r13,__LC_SVC_NEW_PSW+4 612 lm %r8,%r9,__LC_MCK_OLD_PSW 613 tm __LC_MCCK_CODE,0x80 # system damage? 614 jo mcck_panic # yes -> rest of mcck code invalid 615 la %r14,__LC_CPU_TIMER_SAVE_AREA 616 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 617 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 618 jo 3f 619 la %r14,__LC_SYNC_ENTER_TIMER 620 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 621 jl 0f 622 la %r14,__LC_ASYNC_ENTER_TIMER 6230: clc 0(8,%r14),__LC_EXIT_TIMER 624 jl 1f 625 la %r14,__LC_EXIT_TIMER 6261: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 627 jl 2f 628 la %r14,__LC_LAST_UPDATE_TIMER 6292: spt 0(%r14) 630 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 6313: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 632 jno mcck_panic # no -> skip cleanup critical 633 tm %r8,0x0001 # interrupting from user ? 634 jz mcck_skip 635 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER 636mcck_skip: 637 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT 638 stm %r0,%r7,__PT_R0(%r11) 639 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 640 stm %r8,%r9,__PT_PSW(%r11) 641 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 642 l %r1,BASED(.Ldo_machine_check) 643 lr %r2,%r11 # pass pointer to pt_regs 644 basr %r14,%r1 # call s390_do_machine_check 645 tm __PT_PSW+1(%r11),0x01 # returning to user ? 646 jno mcck_return 647 l %r1,__LC_KERNEL_STACK # switch to kernel stack 648 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 649 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 650 la %r11,STACK_FRAME_OVERHEAD(%r15) 651 lr %r15,%r1 652 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 653 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 654 jno mcck_return 655 TRACE_IRQS_OFF 656 l %r1,BASED(.Lhandle_mcck) 657 basr %r14,%r1 # call s390_handle_mcck 658 TRACE_IRQS_ON 659mcck_return: 660 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW 661 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 662 jno 0f 663 lm %r0,%r15,__PT_R0(%r11) 664 stpt __LC_EXIT_TIMER 665 lpsw __LC_RETURN_MCCK_PSW 6660: lm %r0,%r15,__PT_R0(%r11) 667 lpsw __LC_RETURN_MCCK_PSW 668 669mcck_panic: 670 l %r14,__LC_PANIC_STACK 671 slr %r14,%r15 672 sra %r14,PAGE_SHIFT 673 jz 0f 674 l %r15,__LC_PANIC_STACK 675 j mcck_skip 6760: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 677 j mcck_skip 678 679# 680# PSW restart interrupt handler 681# 682ENTRY(restart_int_handler) 683 st %r15,__LC_SAVE_AREA_RESTART 684 l %r15,__LC_RESTART_STACK 685 ahi %r15,-__PT_SIZE # create pt_regs on stack 686 xc 0(__PT_SIZE,%r15),0(%r15) 687 stm %r0,%r14,__PT_R0(%r15) 688 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART 689 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw 690 ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack 691 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 692 l %r1,__LC_RESTART_FN # load fn, parm & source cpu 693 l %r2,__LC_RESTART_DATA 694 l %r3,__LC_RESTART_SOURCE 695 ltr %r3,%r3 # test source cpu address 696 jm 1f # negative -> skip source stop 6970: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 698 brc 10,0b # wait for status stored 6991: basr %r14,%r1 # call function 700 stap __SF_EMPTY(%r15) # store cpu address 701 lh %r3,__SF_EMPTY(%r15) 7022: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 703 brc 2,2b 7043: j 3b 705 706 .section .kprobes.text, "ax" 707 708#ifdef CONFIG_CHECK_STACK 709/* 710 * The synchronous or the asynchronous stack overflowed. We are dead. 711 * No need to properly save the registers, we are going to panic anyway. 712 * Setup a pt_regs so that show_trace can provide a good call trace. 713 */ 714stack_overflow: 715 l %r15,__LC_PANIC_STACK # change to panic stack 716 la %r11,STACK_FRAME_OVERHEAD(%r15) 717 stm %r0,%r7,__PT_R0(%r11) 718 stm %r8,%r9,__PT_PSW(%r11) 719 mvc __PT_R8(32,%r11),0(%r14) 720 l %r1,BASED(1f) 721 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 722 lr %r2,%r11 # pass pointer to pt_regs 723 br %r1 # branch to kernel_stack_overflow 7241: .long kernel_stack_overflow 725#endif 726 727cleanup_table: 728 .long system_call + 0x80000000 729 .long sysc_do_svc + 0x80000000 730 .long sysc_tif + 0x80000000 731 .long sysc_restore + 0x80000000 732 .long sysc_done + 0x80000000 733 .long io_tif + 0x80000000 734 .long io_restore + 0x80000000 735 .long io_done + 0x80000000 736 .long psw_idle + 0x80000000 737 .long psw_idle_end + 0x80000000 738 739cleanup_critical: 740 cl %r9,BASED(cleanup_table) # system_call 741 jl 0f 742 cl %r9,BASED(cleanup_table+4) # sysc_do_svc 743 jl cleanup_system_call 744 cl %r9,BASED(cleanup_table+8) # sysc_tif 745 jl 0f 746 cl %r9,BASED(cleanup_table+12) # sysc_restore 747 jl cleanup_sysc_tif 748 cl %r9,BASED(cleanup_table+16) # sysc_done 749 jl cleanup_sysc_restore 750 cl %r9,BASED(cleanup_table+20) # io_tif 751 jl 0f 752 cl %r9,BASED(cleanup_table+24) # io_restore 753 jl cleanup_io_tif 754 cl %r9,BASED(cleanup_table+28) # io_done 755 jl cleanup_io_restore 756 cl %r9,BASED(cleanup_table+32) # psw_idle 757 jl 0f 758 cl %r9,BASED(cleanup_table+36) # psw_idle_end 759 jl cleanup_idle 7600: br %r14 761 762cleanup_system_call: 763 # check if stpt has been executed 764 cl %r9,BASED(cleanup_system_call_insn) 765 jh 0f 766 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 767 chi %r11,__LC_SAVE_AREA_ASYNC 768 je 0f 769 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 7700: # check if stm has been executed 771 cl %r9,BASED(cleanup_system_call_insn+4) 772 jh 0f 773 mvc __LC_SAVE_AREA_SYNC(32),0(%r11) 7740: # set up saved registers r12, and r13 775 st %r12,16(%r11) # r12 thread-info pointer 776 st %r13,20(%r11) # r13 literal-pool pointer 777 # check if the user time calculation has been done 778 cl %r9,BASED(cleanup_system_call_insn+8) 779 jh 0f 780 l %r10,__LC_EXIT_TIMER 781 l %r15,__LC_EXIT_TIMER+4 782 SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER 783 ADD64 %r10,%r15,__LC_USER_TIMER 784 st %r10,__LC_USER_TIMER 785 st %r15,__LC_USER_TIMER+4 7860: # check if the system time calculation has been done 787 cl %r9,BASED(cleanup_system_call_insn+12) 788 jh 0f 789 l %r10,__LC_LAST_UPDATE_TIMER 790 l %r15,__LC_LAST_UPDATE_TIMER+4 791 SUB64 %r10,%r15,__LC_EXIT_TIMER 792 ADD64 %r10,%r15,__LC_SYSTEM_TIMER 793 st %r10,__LC_SYSTEM_TIMER 794 st %r15,__LC_SYSTEM_TIMER+4 7950: # update accounting time stamp 796 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 797 # set up saved register 11 798 l %r15,__LC_KERNEL_STACK 799 la %r9,STACK_FRAME_OVERHEAD(%r15) 800 st %r9,12(%r11) # r11 pt_regs pointer 801 # fill pt_regs 802 mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC 803 stm %r0,%r7,__PT_R0(%r9) 804 mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW 805 mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC 806 # setup saved register 15 807 st %r15,28(%r11) # r15 stack pointer 808 # set new psw address and exit 809 l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 810 br %r14 811cleanup_system_call_insn: 812 .long system_call + 0x80000000 813 .long sysc_stm + 0x80000000 814 .long sysc_vtime + 0x80000000 + 36 815 .long sysc_vtime + 0x80000000 + 76 816 817cleanup_sysc_tif: 818 l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000 819 br %r14 820 821cleanup_sysc_restore: 822 cl %r9,BASED(cleanup_sysc_restore_insn) 823 jhe 0f 824 l %r9,12(%r11) # get saved pointer to pt_regs 825 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 826 mvc 0(32,%r11),__PT_R8(%r9) 827 lm %r0,%r7,__PT_R0(%r9) 8280: lm %r8,%r9,__LC_RETURN_PSW 829 br %r14 830cleanup_sysc_restore_insn: 831 .long sysc_done - 4 + 0x80000000 832 833cleanup_io_tif: 834 l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000 835 br %r14 836 837cleanup_io_restore: 838 cl %r9,BASED(cleanup_io_restore_insn) 839 jhe 0f 840 l %r9,12(%r11) # get saved r11 pointer to pt_regs 841 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 842 mvc 0(32,%r11),__PT_R8(%r9) 843 lm %r0,%r7,__PT_R0(%r9) 8440: lm %r8,%r9,__LC_RETURN_PSW 845 br %r14 846cleanup_io_restore_insn: 847 .long io_done - 4 + 0x80000000 848 849cleanup_idle: 850 # copy interrupt clock & cpu timer 851 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 852 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 853 chi %r11,__LC_SAVE_AREA_ASYNC 854 je 0f 855 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 856 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 8570: # check if stck has been executed 858 cl %r9,BASED(cleanup_idle_insn) 859 jhe 1f 860 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 861 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) 8621: # account system time going idle 863 lm %r9,%r10,__LC_STEAL_TIMER 864 ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2) 865 SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK 866 stm %r9,%r10,__LC_STEAL_TIMER 867 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 868 lm %r9,%r10,__LC_SYSTEM_TIMER 869 ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER 870 SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2) 871 stm %r9,%r10,__LC_SYSTEM_TIMER 872 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 873 # prepare return psw 874 n %r8,BASED(cleanup_idle_wait) # clear wait state bit 875 l %r9,24(%r11) # return from psw_idle 876 br %r14 877cleanup_idle_insn: 878 .long psw_idle_lpsw + 0x80000000 879cleanup_idle_wait: 880 .long 0xfffdffff 881 882/* 883 * Integer constants 884 */ 885 .align 4 886.Lnr_syscalls: 887 .long NR_syscalls 888.Lvtimer_max: 889 .quad 0x7fffffffffffffff 890 891/* 892 * Symbol constants 893 */ 894.Ldo_machine_check: .long s390_do_machine_check 895.Lhandle_mcck: .long s390_handle_mcck 896.Ldo_IRQ: .long do_IRQ 897.Ldo_extint: .long do_extint 898.Ldo_signal: .long do_signal 899.Ldo_notify_resume: .long do_notify_resume 900.Ldo_per_trap: .long do_per_trap 901.Ljump_table: .long pgm_check_table 902.Lschedule: .long schedule 903#ifdef CONFIG_PREEMPT 904.Lpreempt_irq: .long preempt_schedule_irq 905#endif 906.Ltrace_enter: .long do_syscall_trace_enter 907.Ltrace_exit: .long do_syscall_trace_exit 908.Lschedule_tail: .long schedule_tail 909.Lsysc_per: .long sysc_per + 0x80000000 910#ifdef CONFIG_TRACE_IRQFLAGS 911.Lhardirqs_on: .long trace_hardirqs_on_caller 912.Lhardirqs_off: .long trace_hardirqs_off_caller 913#endif 914#ifdef CONFIG_LOCKDEP 915.Llockdep_sys_exit: .long lockdep_sys_exit 916#endif 917.Lcritical_start: .long __critical_start + 0x80000000 918.Lcritical_length: .long __critical_end - __critical_start 919 920 .section .rodata, "a" 921#define SYSCALL(esa,esame,emu) .long esa 922 .globl sys_call_table 923sys_call_table: 924#include "syscalls.S" 925#undef SYSCALL 926