1/* 2 * arch/s390/kernel/entry.S 3 * S390 low-level entry points. 4 * 5 * Copyright (C) IBM Corp. 1999,2006 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 * Heiko Carstens <heiko.carstens@de.ibm.com> 10 */ 11 12#include <linux/init.h> 13#include <linux/linkage.h> 14#include <asm/cache.h> 15#include <asm/errno.h> 16#include <asm/ptrace.h> 17#include <asm/thread_info.h> 18#include <asm/asm-offsets.h> 19#include <asm/unistd.h> 20#include <asm/page.h> 21 22/* 23 * Stack layout for the system_call stack entry. 24 * The first few entries are identical to the user_regs_struct. 25 */ 26SP_PTREGS = STACK_FRAME_OVERHEAD 27SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS 28SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW 29SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS 30SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4 31SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 32SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12 33SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 34SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20 35SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 36SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28 37SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 38SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36 39SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 40SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44 41SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 42SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52 43SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 44SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 45SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 46SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC 47SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR 48SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE 49 50_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 51 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP ) 52_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 53 _TIF_MCCK_PENDING) 54_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 55 _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) 56 57STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 58STACK_SIZE = 1 << STACK_SHIFT 59 60#define BASED(name) name-system_call(%r13) 61 62#ifdef CONFIG_TRACE_IRQFLAGS 63 .macro TRACE_IRQS_ON 64 basr %r2,%r0 65 l %r1,BASED(.Ltrace_irq_on_caller) 66 basr %r14,%r1 67 .endm 68 69 .macro TRACE_IRQS_OFF 70 basr %r2,%r0 71 l %r1,BASED(.Ltrace_irq_off_caller) 72 basr %r14,%r1 73 .endm 74#else 75#define TRACE_IRQS_ON 76#define TRACE_IRQS_OFF 77#endif 78 79#ifdef CONFIG_LOCKDEP 80 .macro LOCKDEP_SYS_EXIT 81 tm SP_PSW+1(%r15),0x01 # returning to user ? 82 jz 0f 83 l %r1,BASED(.Llockdep_sys_exit) 84 basr %r14,%r1 850: 86 .endm 87#else 88#define LOCKDEP_SYS_EXIT 89#endif 90 91/* 92 * Register usage in interrupt handlers: 93 * R9 - pointer to current task structure 94 * R13 - pointer to literal pool 95 * R14 - return register for function calls 96 * R15 - kernel stack pointer 97 */ 98 99 .macro UPDATE_VTIME lc_from,lc_to,lc_sum 100 lm %r10,%r11,\lc_from 101 sl %r10,\lc_to 102 sl %r11,\lc_to+4 103 bc 3,BASED(0f) 104 sl %r10,BASED(.Lc_1) 1050: al %r10,\lc_sum 106 al %r11,\lc_sum+4 107 bc 12,BASED(1f) 108 al %r10,BASED(.Lc_1) 1091: stm %r10,%r11,\lc_sum 110 .endm 111 112 .macro SAVE_ALL_SVC psworg,savearea 113 stm %r12,%r15,\savearea 114 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 115 l %r15,__LC_KERNEL_STACK # problem state -> load ksp 116 s %r15,BASED(.Lc_spsize) # make room for registers & psw 117 .endm 118 119 .macro SAVE_ALL_BASE savearea 120 stm %r12,%r15,\savearea 121 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 122 .endm 123 124 .macro SAVE_ALL_PGM psworg,savearea 125 tm \psworg+1,0x01 # test problem state bit 126#ifdef CONFIG_CHECK_STACK 127 bnz BASED(1f) 128 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 129 bnz BASED(2f) 130 la %r12,\psworg 131 b BASED(stack_overflow) 132#else 133 bz BASED(2f) 134#endif 1351: l %r15,__LC_KERNEL_STACK # problem state -> load ksp 1362: s %r15,BASED(.Lc_spsize) # make room for registers & psw 137 .endm 138 139 .macro SAVE_ALL_ASYNC psworg,savearea 140 stm %r12,%r15,\savearea 141 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 142 la %r12,\psworg 143 tm \psworg+1,0x01 # test problem state bit 144 bnz BASED(1f) # from user -> load async stack 145 clc \psworg+4(4),BASED(.Lcritical_end) 146 bhe BASED(0f) 147 clc \psworg+4(4),BASED(.Lcritical_start) 148 bl BASED(0f) 149 l %r14,BASED(.Lcleanup_critical) 150 basr %r14,%r14 151 tm 1(%r12),0x01 # retest problem state after cleanup 152 bnz BASED(1f) 1530: l %r14,__LC_ASYNC_STACK # are we already on the async stack ? 154 slr %r14,%r15 155 sra %r14,STACK_SHIFT 156#ifdef CONFIG_CHECK_STACK 157 bnz BASED(1f) 158 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 159 bnz BASED(2f) 160 b BASED(stack_overflow) 161#else 162 bz BASED(2f) 163#endif 1641: l %r15,__LC_ASYNC_STACK 1652: s %r15,BASED(.Lc_spsize) # make room for registers & psw 166 .endm 167 168 .macro CREATE_STACK_FRAME savearea 169 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 170 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 171 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack 172 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 173 .endm 174 175 .macro RESTORE_ALL psworg,sync 176 mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore 177 .if !\sync 178 ni \psworg+1,0xfd # clear wait state bit 179 .endif 180 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user 181 stpt __LC_EXIT_TIMER 182 lpsw \psworg # back to caller 183 .endm 184 185 .macro REENABLE_IRQS 186 mvc __SF_EMPTY(1,%r15),SP_PSW(%r15) 187 ni __SF_EMPTY(%r15),0xbf 188 ssm __SF_EMPTY(%r15) 189 .endm 190 191 .section .kprobes.text, "ax" 192 193/* 194 * Scheduler resume function, called by switch_to 195 * gpr2 = (task_struct *) prev 196 * gpr3 = (task_struct *) next 197 * Returns: 198 * gpr2 = prev 199 */ 200ENTRY(__switch_to) 201 basr %r1,0 2020: l %r4,__THREAD_info(%r2) # get thread_info of prev 203 l %r5,__THREAD_info(%r3) # get thread_info of next 204 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? 205 bz 1f-0b(%r1) 206 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 207 oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next 2081: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 209 st %r15,__THREAD_ksp(%r2) # store kernel stack of prev 210 l %r15,__THREAD_ksp(%r3) # load kernel stack of next 211 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 212 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 213 st %r3,__LC_CURRENT # store task struct of next 214 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next 215 st %r5,__LC_THREAD_INFO # store thread info of next 216 ahi %r5,STACK_SIZE # end of kernel stack of next 217 st %r5,__LC_KERNEL_STACK # store end of kernel stack 218 br %r14 219 220__critical_start: 221/* 222 * SVC interrupt handler routine. System calls are synchronous events and 223 * are executed with interrupts enabled. 224 */ 225 226ENTRY(system_call) 227 stpt __LC_SYNC_ENTER_TIMER 228sysc_saveall: 229 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 230 CREATE_STACK_FRAME __LC_SAVE_AREA 231 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW 232 mvc SP_ILC(4,%r15),__LC_SVC_ILC 233 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 234sysc_vtime: 235 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 236sysc_stime: 237 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 238sysc_update: 239 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 240sysc_do_svc: 241 xr %r7,%r7 242 icm %r7,3,SP_SVCNR(%r15) # load svc number and test for svc 0 243 bnz BASED(sysc_nr_ok) # svc number > 0 244 # svc 0: system call number in %r1 245 cl %r1,BASED(.Lnr_syscalls) 246 bnl BASED(sysc_nr_ok) 247 sth %r1,SP_SVCNR(%r15) 248 lr %r7,%r1 # copy svc number to %r7 249sysc_nr_ok: 250 sll %r7,2 # svc number *4 251 l %r10,BASED(.Lsysc_table) 252 tm __TI_flags+2(%r12),_TIF_SYSCALL 253 mvc SP_ARGS(4,%r15),SP_R7(%r15) 254 l %r8,0(%r7,%r10) # get system call addr. 255 bnz BASED(sysc_tracesys) 256 basr %r14,%r8 # call sys_xxxx 257 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 258 259sysc_return: 260 LOCKDEP_SYS_EXIT 261sysc_tif: 262 tm __TI_flags+3(%r12),_TIF_WORK_SVC 263 bnz BASED(sysc_work) # there is work to do (signals etc.) 264sysc_restore: 265 RESTORE_ALL __LC_RETURN_PSW,1 266sysc_done: 267 268# 269# There is work to do, but first we need to check if we return to userspace. 270# 271sysc_work: 272 tm SP_PSW+1(%r15),0x01 # returning to user ? 273 bno BASED(sysc_restore) 274 275# 276# One of the work bits is on. Find out which one. 277# 278sysc_work_tif: 279 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 280 bo BASED(sysc_mcck_pending) 281 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 282 bo BASED(sysc_reschedule) 283 tm __TI_flags+3(%r12),_TIF_SIGPENDING 284 bo BASED(sysc_sigpending) 285 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 286 bo BASED(sysc_notify_resume) 287 tm __TI_flags+3(%r12),_TIF_RESTART_SVC 288 bo BASED(sysc_restart) 289 tm __TI_flags+3(%r12),_TIF_PER_TRAP 290 bo BASED(sysc_singlestep) 291 b BASED(sysc_return) # beware of critical section cleanup 292 293# 294# _TIF_NEED_RESCHED is set, call schedule 295# 296sysc_reschedule: 297 l %r1,BASED(.Lschedule) 298 la %r14,BASED(sysc_return) 299 br %r1 # call scheduler 300 301# 302# _TIF_MCCK_PENDING is set, call handler 303# 304sysc_mcck_pending: 305 l %r1,BASED(.Ls390_handle_mcck) 306 la %r14,BASED(sysc_return) 307 br %r1 # TIF bit will be cleared by handler 308 309# 310# _TIF_SIGPENDING is set, call do_signal 311# 312sysc_sigpending: 313 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP 314 la %r2,SP_PTREGS(%r15) # load pt_regs 315 l %r1,BASED(.Ldo_signal) 316 basr %r14,%r1 # call do_signal 317 tm __TI_flags+3(%r12),_TIF_RESTART_SVC 318 bo BASED(sysc_restart) 319 tm __TI_flags+3(%r12),_TIF_PER_TRAP 320 bo BASED(sysc_singlestep) 321 b BASED(sysc_return) 322 323# 324# _TIF_NOTIFY_RESUME is set, call do_notify_resume 325# 326sysc_notify_resume: 327 la %r2,SP_PTREGS(%r15) # load pt_regs 328 l %r1,BASED(.Ldo_notify_resume) 329 la %r14,BASED(sysc_return) 330 br %r1 # call do_notify_resume 331 332 333# 334# _TIF_RESTART_SVC is set, set up registers and restart svc 335# 336sysc_restart: 337 ni __TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC 338 l %r7,SP_R2(%r15) # load new svc number 339 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument 340 lm %r2,%r6,SP_R2(%r15) # load svc arguments 341 sth %r7,SP_SVCNR(%r15) 342 b BASED(sysc_nr_ok) # restart svc 343 344# 345# _TIF_PER_TRAP is set, call do_per_trap 346# 347sysc_singlestep: 348 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP 349 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 350 la %r2,SP_PTREGS(%r15) # address of register-save area 351 l %r1,BASED(.Lhandle_per) # load adr. of per handler 352 la %r14,BASED(sysc_return) # load adr. of system return 353 br %r1 # branch to do_per_trap 354 355# 356# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 357# and after the system call 358# 359sysc_tracesys: 360 l %r1,BASED(.Ltrace_entry) 361 la %r2,SP_PTREGS(%r15) # load pt_regs 362 la %r3,0 363 xr %r0,%r0 364 icm %r0,3,SP_SVCNR(%r15) 365 st %r0,SP_R2(%r15) 366 basr %r14,%r1 367 cl %r2,BASED(.Lnr_syscalls) 368 bnl BASED(sysc_tracenogo) 369 lr %r7,%r2 370 sll %r7,2 # svc number *4 371 l %r8,0(%r7,%r10) 372sysc_tracego: 373 lm %r3,%r6,SP_R3(%r15) 374 mvc SP_ARGS(4,%r15),SP_R7(%r15) 375 l %r2,SP_ORIG_R2(%r15) 376 basr %r14,%r8 # call sys_xxx 377 st %r2,SP_R2(%r15) # store return value 378sysc_tracenogo: 379 tm __TI_flags+2(%r12),_TIF_SYSCALL 380 bz BASED(sysc_return) 381 l %r1,BASED(.Ltrace_exit) 382 la %r2,SP_PTREGS(%r15) # load pt_regs 383 la %r14,BASED(sysc_return) 384 br %r1 385 386# 387# a new process exits the kernel with ret_from_fork 388# 389ENTRY(ret_from_fork) 390 l %r13,__LC_SVC_NEW_PSW+4 391 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 392 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? 393 bo BASED(0f) 394 st %r15,SP_R15(%r15) # store stack pointer for new kthread 3950: l %r1,BASED(.Lschedtail) 396 basr %r14,%r1 397 TRACE_IRQS_ON 398 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 399 b BASED(sysc_tracenogo) 400 401# 402# kernel_execve function needs to deal with pt_regs that is not 403# at the usual place 404# 405ENTRY(kernel_execve) 406 stm %r12,%r15,48(%r15) 407 lr %r14,%r15 408 l %r13,__LC_SVC_NEW_PSW+4 409 s %r15,BASED(.Lc_spsize) 410 st %r14,__SF_BACKCHAIN(%r15) 411 la %r12,SP_PTREGS(%r15) 412 xc 0(__PT_SIZE,%r12),0(%r12) 413 l %r1,BASED(.Ldo_execve) 414 lr %r5,%r12 415 basr %r14,%r1 416 ltr %r2,%r2 417 be BASED(0f) 418 a %r15,BASED(.Lc_spsize) 419 lm %r12,%r15,48(%r15) 420 br %r14 421 # execve succeeded. 4220: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 423 l %r15,__LC_KERNEL_STACK # load ksp 424 s %r15,BASED(.Lc_spsize) # make room for registers & psw 425 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 426 l %r12,__LC_THREAD_INFO 427 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 428 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 429 l %r1,BASED(.Lexecve_tail) 430 basr %r14,%r1 431 b BASED(sysc_return) 432 433/* 434 * Program check handler routine 435 */ 436 437ENTRY(pgm_check_handler) 438/* 439 * First we need to check for a special case: 440 * Single stepping an instruction that disables the PER event mask will 441 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. 442 * For a single stepped SVC the program check handler gets control after 443 * the SVC new PSW has been loaded. But we want to execute the SVC first and 444 * then handle the PER event. Therefore we update the SVC old PSW to point 445 * to the pgm_check_handler and branch to the SVC handler after we checked 446 * if we have to load the kernel stack register. 447 * For every other possible cause for PER event without the PER mask set 448 * we just ignore the PER event (FIXME: is there anything we have to do 449 * for LPSW?). 450 */ 451 stpt __LC_SYNC_ENTER_TIMER 452 SAVE_ALL_BASE __LC_SAVE_AREA 453 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 454 bnz BASED(pgm_per) # got per exception -> special case 455 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA 456 CREATE_STACK_FRAME __LC_SAVE_AREA 457 xc SP_ILC(4,%r15),SP_ILC(%r15) 458 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW 459 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 460 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 461 bz BASED(pgm_no_vtime) 462 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 463 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 464 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 465pgm_no_vtime: 466 l %r3,__LC_PGM_ILC # load program interruption code 467 l %r4,__LC_TRANS_EXC_CODE 468 REENABLE_IRQS 469 la %r8,0x7f 470 nr %r8,%r3 471 sll %r8,2 472 l %r1,BASED(.Ljump_table) 473 l %r1,0(%r8,%r1) # load address of handler routine 474 la %r2,SP_PTREGS(%r15) # address of register-save area 475 basr %r14,%r1 # branch to interrupt-handler 476pgm_exit: 477 b BASED(sysc_return) 478 479# 480# handle per exception 481# 482pgm_per: 483 tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on 484 bnz BASED(pgm_per_std) # ok, normal per event from user space 485# ok its one of the special cases, now we need to find out which one 486 clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW 487 be BASED(pgm_svcper) 488# no interesting special case, ignore PER event 489 lm %r12,%r15,__LC_SAVE_AREA 490 lpsw 0x28 491 492# 493# Normal per exception 494# 495pgm_per_std: 496 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA 497 CREATE_STACK_FRAME __LC_SAVE_AREA 498 mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW 499 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 500 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 501 bz BASED(pgm_no_vtime2) 502 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 503 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 504 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 505pgm_no_vtime2: 506 l %r1,__TI_task(%r12) 507 tm SP_PSW+1(%r15),0x01 # kernel per event ? 508 bz BASED(kernel_per) 509 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE 510 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS 511 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID 512 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP 513 l %r3,__LC_PGM_ILC # load program interruption code 514 l %r4,__LC_TRANS_EXC_CODE 515 REENABLE_IRQS 516 la %r8,0x7f 517 nr %r8,%r3 # clear per-event-bit and ilc 518 be BASED(pgm_exit2) # only per or per+check ? 519 sll %r8,2 520 l %r1,BASED(.Ljump_table) 521 l %r1,0(%r8,%r1) # load address of handler routine 522 la %r2,SP_PTREGS(%r15) # address of register-save area 523 basr %r14,%r1 # branch to interrupt-handler 524pgm_exit2: 525 b BASED(sysc_return) 526 527# 528# it was a single stepped SVC that is causing all the trouble 529# 530pgm_svcper: 531 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA 532 CREATE_STACK_FRAME __LC_SAVE_AREA 533 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW 534 mvc SP_ILC(4,%r15),__LC_SVC_ILC 535 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 536 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 537 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 538 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 539 l %r8,__TI_task(%r12) 540 mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE 541 mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS 542 mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID 543 oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP 544 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 545 lm %r2,%r6,SP_R2(%r15) # load svc arguments 546 b BASED(sysc_do_svc) 547 548# 549# per was called from kernel, must be kprobes 550# 551kernel_per: 552 REENABLE_IRQS 553 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) 554 la %r2,SP_PTREGS(%r15) # address of register-save area 555 l %r1,BASED(.Lhandle_per) # load adr. of per handler 556 basr %r14,%r1 # branch to do_single_step 557 b BASED(pgm_exit) 558 559/* 560 * IO interrupt handler routine 561 */ 562 563ENTRY(io_int_handler) 564 stck __LC_INT_CLOCK 565 stpt __LC_ASYNC_ENTER_TIMER 566 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 567 CREATE_STACK_FRAME __LC_SAVE_AREA+16 568 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack 569 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 570 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 571 bz BASED(io_no_vtime) 572 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 573 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 574 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 575io_no_vtime: 576 TRACE_IRQS_OFF 577 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ 578 la %r2,SP_PTREGS(%r15) # address of register-save area 579 basr %r14,%r1 # branch to standard irq handler 580io_return: 581 LOCKDEP_SYS_EXIT 582 TRACE_IRQS_ON 583io_tif: 584 tm __TI_flags+3(%r12),_TIF_WORK_INT 585 bnz BASED(io_work) # there is work to do (signals etc.) 586io_restore: 587 RESTORE_ALL __LC_RETURN_PSW,0 588io_done: 589 590# 591# There is work todo, find out in which context we have been interrupted: 592# 1) if we return to user space we can do all _TIF_WORK_INT work 593# 2) if we return to kernel code and preemptive scheduling is enabled check 594# the preemption counter and if it is zero call preempt_schedule_irq 595# Before any work can be done, a switch to the kernel stack is required. 596# 597io_work: 598 tm SP_PSW+1(%r15),0x01 # returning to user ? 599 bo BASED(io_work_user) # yes -> do resched & signal 600#ifdef CONFIG_PREEMPT 601 # check for preemptive scheduling 602 icm %r0,15,__TI_precount(%r12) 603 bnz BASED(io_restore) # preemption disabled 604 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 605 bno BASED(io_restore) 606 # switch to kernel stack 607 l %r1,SP_R15(%r15) 608 s %r1,BASED(.Lc_spsize) 609 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 610 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 611 lr %r15,%r1 612 # TRACE_IRQS_ON already done at io_return, call 613 # TRACE_IRQS_OFF to keep things symmetrical 614 TRACE_IRQS_OFF 615 l %r1,BASED(.Lpreempt_schedule_irq) 616 basr %r14,%r1 # call preempt_schedule_irq 617 b BASED(io_return) 618#else 619 b BASED(io_restore) 620#endif 621 622# 623# Need to do work before returning to userspace, switch to kernel stack 624# 625io_work_user: 626 l %r1,__LC_KERNEL_STACK 627 s %r1,BASED(.Lc_spsize) 628 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 629 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 630 lr %r15,%r1 631 632# 633# One of the work bits is on. Find out which one. 634# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED 635# and _TIF_MCCK_PENDING 636# 637io_work_tif: 638 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 639 bo BASED(io_mcck_pending) 640 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 641 bo BASED(io_reschedule) 642 tm __TI_flags+3(%r12),_TIF_SIGPENDING 643 bo BASED(io_sigpending) 644 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 645 bo BASED(io_notify_resume) 646 b BASED(io_return) # beware of critical section cleanup 647 648# 649# _TIF_MCCK_PENDING is set, call handler 650# 651io_mcck_pending: 652 # TRACE_IRQS_ON already done at io_return 653 l %r1,BASED(.Ls390_handle_mcck) 654 basr %r14,%r1 # TIF bit will be cleared by handler 655 TRACE_IRQS_OFF 656 b BASED(io_return) 657 658# 659# _TIF_NEED_RESCHED is set, call schedule 660# 661io_reschedule: 662 # TRACE_IRQS_ON already done at io_return 663 l %r1,BASED(.Lschedule) 664 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 665 basr %r14,%r1 # call scheduler 666 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 667 TRACE_IRQS_OFF 668 b BASED(io_return) 669 670# 671# _TIF_SIGPENDING is set, call do_signal 672# 673io_sigpending: 674 # TRACE_IRQS_ON already done at io_return 675 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 676 la %r2,SP_PTREGS(%r15) # load pt_regs 677 l %r1,BASED(.Ldo_signal) 678 basr %r14,%r1 # call do_signal 679 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 680 TRACE_IRQS_OFF 681 b BASED(io_return) 682 683# 684# _TIF_SIGPENDING is set, call do_signal 685# 686io_notify_resume: 687 # TRACE_IRQS_ON already done at io_return 688 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 689 la %r2,SP_PTREGS(%r15) # load pt_regs 690 l %r1,BASED(.Ldo_notify_resume) 691 basr %r14,%r1 # call do_signal 692 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 693 TRACE_IRQS_OFF 694 b BASED(io_return) 695 696/* 697 * External interrupt handler routine 698 */ 699 700ENTRY(ext_int_handler) 701 stck __LC_INT_CLOCK 702 stpt __LC_ASYNC_ENTER_TIMER 703 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 704 CREATE_STACK_FRAME __LC_SAVE_AREA+16 705 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack 706 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 707 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 708 bz BASED(ext_no_vtime) 709 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 710 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 711 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 712ext_no_vtime: 713 TRACE_IRQS_OFF 714 la %r2,SP_PTREGS(%r15) # address of register-save area 715 l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code 716 l %r4,__LC_EXT_PARAMS # get external parameters 717 l %r1,BASED(.Ldo_extint) 718 basr %r14,%r1 719 b BASED(io_return) 720 721__critical_end: 722 723/* 724 * Machine check handler routines 725 */ 726 727ENTRY(mcck_int_handler) 728 stck __LC_MCCK_CLOCK 729 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 730 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 731 SAVE_ALL_BASE __LC_SAVE_AREA+32 732 la %r12,__LC_MCK_OLD_PSW 733 tm __LC_MCCK_CODE,0x80 # system damage? 734 bo BASED(mcck_int_main) # yes -> rest of mcck code invalid 735 mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA 736 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 737 bo BASED(1f) 738 la %r14,__LC_SYNC_ENTER_TIMER 739 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 740 bl BASED(0f) 741 la %r14,__LC_ASYNC_ENTER_TIMER 7420: clc 0(8,%r14),__LC_EXIT_TIMER 743 bl BASED(0f) 744 la %r14,__LC_EXIT_TIMER 7450: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 746 bl BASED(0f) 747 la %r14,__LC_LAST_UPDATE_TIMER 7480: spt 0(%r14) 749 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 7501: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 751 bno BASED(mcck_int_main) # no -> skip cleanup critical 752 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 753 bnz BASED(mcck_int_main) # from user -> load async stack 754 clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end) 755 bhe BASED(mcck_int_main) 756 clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start) 757 bl BASED(mcck_int_main) 758 l %r14,BASED(.Lcleanup_critical) 759 basr %r14,%r14 760mcck_int_main: 761 l %r14,__LC_PANIC_STACK # are we already on the panic stack? 762 slr %r14,%r15 763 sra %r14,PAGE_SHIFT 764 be BASED(0f) 765 l %r15,__LC_PANIC_STACK # load panic stack 7660: s %r15,BASED(.Lc_spsize) # make room for registers & psw 767 CREATE_STACK_FRAME __LC_SAVE_AREA+32 768 mvc SP_PSW(8,%r15),0(%r12) 769 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 770 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 771 bno BASED(mcck_no_vtime) # no -> skip cleanup critical 772 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 773 bz BASED(mcck_no_vtime) 774 UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER 775 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 776 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER 777mcck_no_vtime: 778 la %r2,SP_PTREGS(%r15) # load pt_regs 779 l %r1,BASED(.Ls390_mcck) 780 basr %r14,%r1 # call machine check handler 781 tm SP_PSW+1(%r15),0x01 # returning to user ? 782 bno BASED(mcck_return) 783 l %r1,__LC_KERNEL_STACK # switch to kernel stack 784 s %r1,BASED(.Lc_spsize) 785 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 786 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 787 lr %r15,%r1 788 stosm __SF_EMPTY(%r15),0x04 # turn dat on 789 tm __TI_flags+3(%r12),_TIF_MCCK_PENDING 790 bno BASED(mcck_return) 791 TRACE_IRQS_OFF 792 l %r1,BASED(.Ls390_handle_mcck) 793 basr %r14,%r1 # call machine check handler 794 TRACE_IRQS_ON 795mcck_return: 796 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW 797 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 798 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 799 bno BASED(0f) 800 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 801 stpt __LC_EXIT_TIMER 802 lpsw __LC_RETURN_MCCK_PSW # back to caller 8030: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 804 lpsw __LC_RETURN_MCCK_PSW # back to caller 805 806 RESTORE_ALL __LC_RETURN_MCCK_PSW,0 807 808/* 809 * Restart interruption handler, kick starter for additional CPUs 810 */ 811#ifdef CONFIG_SMP 812 __CPUINIT 813ENTRY(restart_int_handler) 814 basr %r1,0 815restart_base: 816 spt restart_vtime-restart_base(%r1) 817 stck __LC_LAST_UPDATE_CLOCK 818 mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) 819 mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) 820 l %r15,__LC_SAVE_AREA+60 # load ksp 821 lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs 822 lam %a0,%a15,__LC_AREGS_SAVE_AREA 823 lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone 824 l %r1,__LC_THREAD_INFO 825 mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) 826 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) 827 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER 828 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 829 basr %r14,0 830 l %r14,restart_addr-.(%r14) 831 basr %r14,%r14 # branch to start_secondary 832restart_addr: 833 .long start_secondary 834 .align 8 835restart_vtime: 836 .long 0x7fffffff,0xffffffff 837 .previous 838#else 839/* 840 * If we do not run with SMP enabled, let the new CPU crash ... 841 */ 842ENTRY(restart_int_handler) 843 basr %r1,0 844restart_base: 845 lpsw restart_crash-restart_base(%r1) 846 .align 8 847restart_crash: 848 .long 0x000a0000,0x00000000 849restart_go: 850#endif 851 852 .section .kprobes.text, "ax" 853 854#ifdef CONFIG_CHECK_STACK 855/* 856 * The synchronous or the asynchronous stack overflowed. We are dead. 857 * No need to properly save the registers, we are going to panic anyway. 858 * Setup a pt_regs so that show_trace can provide a good call trace. 859 */ 860stack_overflow: 861 l %r15,__LC_PANIC_STACK # change to panic stack 862 sl %r15,BASED(.Lc_spsize) 863 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack 864 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 865 la %r1,__LC_SAVE_AREA 866 ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ? 867 be BASED(0f) 868 ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ? 869 be BASED(0f) 870 la %r1,__LC_SAVE_AREA+16 8710: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack 872 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain 873 l %r1,BASED(1f) # branch to kernel_stack_overflow 874 la %r2,SP_PTREGS(%r15) # load pt_regs 875 br %r1 8761: .long kernel_stack_overflow 877#endif 878 879cleanup_table_system_call: 880 .long system_call + 0x80000000, sysc_do_svc + 0x80000000 881cleanup_table_sysc_tif: 882 .long sysc_tif + 0x80000000, sysc_restore + 0x80000000 883cleanup_table_sysc_restore: 884 .long sysc_restore + 0x80000000, sysc_done + 0x80000000 885cleanup_table_io_tif: 886 .long io_tif + 0x80000000, io_restore + 0x80000000 887cleanup_table_io_restore: 888 .long io_restore + 0x80000000, io_done + 0x80000000 889 890cleanup_critical: 891 clc 4(4,%r12),BASED(cleanup_table_system_call) 892 bl BASED(0f) 893 clc 4(4,%r12),BASED(cleanup_table_system_call+4) 894 bl BASED(cleanup_system_call) 8950: 896 clc 4(4,%r12),BASED(cleanup_table_sysc_tif) 897 bl BASED(0f) 898 clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4) 899 bl BASED(cleanup_sysc_tif) 9000: 901 clc 4(4,%r12),BASED(cleanup_table_sysc_restore) 902 bl BASED(0f) 903 clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4) 904 bl BASED(cleanup_sysc_restore) 9050: 906 clc 4(4,%r12),BASED(cleanup_table_io_tif) 907 bl BASED(0f) 908 clc 4(4,%r12),BASED(cleanup_table_io_tif+4) 909 bl BASED(cleanup_io_tif) 9100: 911 clc 4(4,%r12),BASED(cleanup_table_io_restore) 912 bl BASED(0f) 913 clc 4(4,%r12),BASED(cleanup_table_io_restore+4) 914 bl BASED(cleanup_io_restore) 9150: 916 br %r14 917 918cleanup_system_call: 919 mvc __LC_RETURN_PSW(8),0(%r12) 920 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) 921 bh BASED(0f) 922 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 923 c %r12,BASED(.Lmck_old_psw) 924 be BASED(0f) 925 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 9260: c %r12,BASED(.Lmck_old_psw) 927 la %r12,__LC_SAVE_AREA+32 928 be BASED(0f) 929 la %r12,__LC_SAVE_AREA+16 9300: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) 931 bhe BASED(cleanup_vtime) 932 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) 933 bh BASED(0f) 934 mvc __LC_SAVE_AREA(16),0(%r12) 9350: st %r13,4(%r12) 936 l %r15,__LC_KERNEL_STACK # problem state -> load ksp 937 s %r15,BASED(.Lc_spsize) # make room for registers & psw 938 st %r15,12(%r12) 939 CREATE_STACK_FRAME __LC_SAVE_AREA 940 mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW 941 mvc SP_ILC(4,%r15),__LC_SVC_ILC 942 mvc 0(4,%r12),__LC_THREAD_INFO 943cleanup_vtime: 944 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) 945 bhe BASED(cleanup_stime) 946 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 947cleanup_stime: 948 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16) 949 bh BASED(cleanup_update) 950 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 951cleanup_update: 952 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 953 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) 954 la %r12,__LC_RETURN_PSW 955 br %r14 956cleanup_system_call_insn: 957 .long sysc_saveall + 0x80000000 958 .long system_call + 0x80000000 959 .long sysc_vtime + 0x80000000 960 .long sysc_stime + 0x80000000 961 .long sysc_update + 0x80000000 962 963cleanup_sysc_tif: 964 mvc __LC_RETURN_PSW(4),0(%r12) 965 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif) 966 la %r12,__LC_RETURN_PSW 967 br %r14 968 969cleanup_sysc_restore: 970 clc 4(4,%r12),BASED(cleanup_sysc_restore_insn) 971 be BASED(2f) 972 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 973 c %r12,BASED(.Lmck_old_psw) 974 be BASED(0f) 975 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 9760: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4) 977 be BASED(2f) 978 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 979 c %r12,BASED(.Lmck_old_psw) 980 la %r12,__LC_SAVE_AREA+32 981 be BASED(1f) 982 la %r12,__LC_SAVE_AREA+16 9831: mvc 0(16,%r12),SP_R12(%r15) 984 lm %r0,%r11,SP_R0(%r15) 985 l %r15,SP_R15(%r15) 9862: la %r12,__LC_RETURN_PSW 987 br %r14 988cleanup_sysc_restore_insn: 989 .long sysc_done - 4 + 0x80000000 990 .long sysc_done - 8 + 0x80000000 991 992cleanup_io_tif: 993 mvc __LC_RETURN_PSW(4),0(%r12) 994 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif) 995 la %r12,__LC_RETURN_PSW 996 br %r14 997 998cleanup_io_restore: 999 clc 4(4,%r12),BASED(cleanup_io_restore_insn) 1000 be BASED(1f) 1001 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 1002 clc 4(4,%r12),BASED(cleanup_io_restore_insn+4) 1003 be BASED(1f) 1004 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 1005 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) 1006 lm %r0,%r11,SP_R0(%r15) 1007 l %r15,SP_R15(%r15) 10081: la %r12,__LC_RETURN_PSW 1009 br %r14 1010cleanup_io_restore_insn: 1011 .long io_done - 4 + 0x80000000 1012 .long io_done - 8 + 0x80000000 1013 1014/* 1015 * Integer constants 1016 */ 1017 .align 4 1018.Lc_spsize: .long SP_SIZE 1019.Lc_overhead: .long STACK_FRAME_OVERHEAD 1020.Lnr_syscalls: .long NR_syscalls 1021.L0x018: .short 0x018 1022.L0x020: .short 0x020 1023.L0x028: .short 0x028 1024.L0x030: .short 0x030 1025.L0x038: .short 0x038 1026.Lc_1: .long 1 1027 1028/* 1029 * Symbol constants 1030 */ 1031.Ls390_mcck: .long s390_do_machine_check 1032.Ls390_handle_mcck: 1033 .long s390_handle_mcck 1034.Lmck_old_psw: .long __LC_MCK_OLD_PSW 1035.Ldo_IRQ: .long do_IRQ 1036.Ldo_extint: .long do_extint 1037.Ldo_signal: .long do_signal 1038.Ldo_notify_resume: 1039 .long do_notify_resume 1040.Lhandle_per: .long do_per_trap 1041.Ldo_execve: .long do_execve 1042.Lexecve_tail: .long execve_tail 1043.Ljump_table: .long pgm_check_table 1044.Lschedule: .long schedule 1045#ifdef CONFIG_PREEMPT 1046.Lpreempt_schedule_irq: 1047 .long preempt_schedule_irq 1048#endif 1049.Ltrace_entry: .long do_syscall_trace_enter 1050.Ltrace_exit: .long do_syscall_trace_exit 1051.Lschedtail: .long schedule_tail 1052.Lsysc_table: .long sys_call_table 1053#ifdef CONFIG_TRACE_IRQFLAGS 1054.Ltrace_irq_on_caller: 1055 .long trace_hardirqs_on_caller 1056.Ltrace_irq_off_caller: 1057 .long trace_hardirqs_off_caller 1058#endif 1059#ifdef CONFIG_LOCKDEP 1060.Llockdep_sys_exit: 1061 .long lockdep_sys_exit 1062#endif 1063.Lcritical_start: 1064 .long __critical_start + 0x80000000 1065.Lcritical_end: 1066 .long __critical_end + 0x80000000 1067.Lcleanup_critical: 1068 .long cleanup_critical 1069 1070 .section .rodata, "a" 1071#define SYSCALL(esa,esame,emu) .long esa 1072 .globl sys_call_table 1073sys_call_table: 1074#include "syscalls.S" 1075#undef SYSCALL 1076