1/* 2 * arch/s390/kernel/entry.S 3 * S390 low-level entry points. 4 * 5 * Copyright (C) IBM Corp. 1999,2006 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 * Heiko Carstens <heiko.carstens@de.ibm.com> 10 */ 11 12#include <linux/sys.h> 13#include <linux/linkage.h> 14#include <asm/cache.h> 15#include <asm/lowcore.h> 16#include <asm/errno.h> 17#include <asm/ptrace.h> 18#include <asm/thread_info.h> 19#include <asm/asm-offsets.h> 20#include <asm/unistd.h> 21#include <asm/page.h> 22 23/* 24 * Stack layout for the system_call stack entry. 25 * The first few entries are identical to the user_regs_struct. 26 */ 27SP_PTREGS = STACK_FRAME_OVERHEAD 28SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS 29SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW 30SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS 31SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4 32SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 33SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12 34SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 35SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20 36SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 37SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28 38SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 39SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36 40SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 41SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44 42SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 43SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52 44SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 45SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 46SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 47SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC 48SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP 49SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE 50 51_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ 52 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 53_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ 54 _TIF_MCCK_PENDING) 55 56STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 57STACK_SIZE = 1 << STACK_SHIFT 58 59#define BASED(name) name-system_call(%r13) 60 61#ifdef CONFIG_TRACE_IRQFLAGS 62 .macro TRACE_IRQS_ON 63 l %r1,BASED(.Ltrace_irq_on) 64 basr %r14,%r1 65 .endm 66 67 .macro TRACE_IRQS_OFF 68 l %r1,BASED(.Ltrace_irq_off) 69 basr %r14,%r1 70 .endm 71 72 .macro TRACE_IRQS_CHECK 73 tm SP_PSW(%r15),0x03 # irqs enabled? 74 jz 0f 75 l %r1,BASED(.Ltrace_irq_on) 76 basr %r14,%r1 77 j 1f 780: l %r1,BASED(.Ltrace_irq_off) 79 basr %r14,%r1 801: 81 .endm 82#else 83#define TRACE_IRQS_ON 84#define TRACE_IRQS_OFF 85#define TRACE_IRQS_CHECK 86#endif 87 88#ifdef CONFIG_LOCKDEP 89 .macro LOCKDEP_SYS_EXIT 90 tm SP_PSW+1(%r15),0x01 # returning to user ? 91 jz 0f 92 l %r1,BASED(.Llockdep_sys_exit) 93 basr %r14,%r1 940: 95 .endm 96#else 97#define LOCKDEP_SYS_EXIT 98#endif 99 100/* 101 * Register usage in interrupt handlers: 102 * R9 - pointer to current task structure 103 * R13 - pointer to literal pool 104 * R14 - return register for function calls 105 * R15 - kernel stack pointer 106 */ 107 108 .macro STORE_TIMER lc_offset 109#ifdef CONFIG_VIRT_CPU_ACCOUNTING 110 stpt \lc_offset 111#endif 112 .endm 113 114#ifdef CONFIG_VIRT_CPU_ACCOUNTING 115 .macro UPDATE_VTIME lc_from,lc_to,lc_sum 116 lm %r10,%r11,\lc_from 117 sl %r10,\lc_to 118 sl %r11,\lc_to+4 119 bc 3,BASED(0f) 120 sl %r10,BASED(.Lc_1) 1210: al %r10,\lc_sum 122 al %r11,\lc_sum+4 123 bc 12,BASED(1f) 124 al %r10,BASED(.Lc_1) 1251: stm %r10,%r11,\lc_sum 126 .endm 127#endif 128 129 .macro SAVE_ALL_BASE savearea 130 stm %r12,%r15,\savearea 131 l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 132 .endm 133 134 .macro SAVE_ALL_SVC psworg,savearea 135 la %r12,\psworg 136 l %r15,__LC_KERNEL_STACK # problem state -> load ksp 137 .endm 138 139 .macro SAVE_ALL_SYNC psworg,savearea 140 la %r12,\psworg 141 tm \psworg+1,0x01 # test problem state bit 142 bz BASED(2f) # skip stack setup save 143 l %r15,__LC_KERNEL_STACK # problem state -> load ksp 144#ifdef CONFIG_CHECK_STACK 145 b BASED(3f) 1462: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 147 bz BASED(stack_overflow) 1483: 149#endif 1502: 151 .endm 152 153 .macro SAVE_ALL_ASYNC psworg,savearea 154 la %r12,\psworg 155 tm \psworg+1,0x01 # test problem state bit 156 bnz BASED(1f) # from user -> load async stack 157 clc \psworg+4(4),BASED(.Lcritical_end) 158 bhe BASED(0f) 159 clc \psworg+4(4),BASED(.Lcritical_start) 160 bl BASED(0f) 161 l %r14,BASED(.Lcleanup_critical) 162 basr %r14,%r14 163 tm 1(%r12),0x01 # retest problem state after cleanup 164 bnz BASED(1f) 1650: l %r14,__LC_ASYNC_STACK # are we already on the async stack ? 166 slr %r14,%r15 167 sra %r14,STACK_SHIFT 168 be BASED(2f) 1691: l %r15,__LC_ASYNC_STACK 170#ifdef CONFIG_CHECK_STACK 171 b BASED(3f) 1722: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 173 bz BASED(stack_overflow) 1743: 175#endif 1762: 177 .endm 178 179 .macro CREATE_STACK_FRAME psworg,savearea 180 s %r15,BASED(.Lc_spsize) # make room for registers & psw 181 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack 182 la %r12,\psworg 183 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 184 icm %r12,12,__LC_SVC_ILC 185 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 186 st %r12,SP_ILC(%r15) 187 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack 188 la %r12,0 189 st %r12,__SF_BACKCHAIN(%r15) # clear back chain 190 .endm 191 192 .macro RESTORE_ALL psworg,sync 193 mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore 194 .if !\sync 195 ni \psworg+1,0xfd # clear wait state bit 196 .endif 197 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user 198 STORE_TIMER __LC_EXIT_TIMER 199 lpsw \psworg # back to caller 200 .endm 201 202/* 203 * Scheduler resume function, called by switch_to 204 * gpr2 = (task_struct *) prev 205 * gpr3 = (task_struct *) next 206 * Returns: 207 * gpr2 = prev 208 */ 209 .globl __switch_to 210__switch_to: 211 basr %r1,0 212__switch_to_base: 213 tm __THREAD_per(%r3),0xe8 # new process is using per ? 214 bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine 215 stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff 216 clc __THREAD_per(12,%r3),__SF_EMPTY(%r15) 217 be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's 218 lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't 219__switch_to_noper: 220 l %r4,__THREAD_info(%r2) # get thread_info of prev 221 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? 222 bz __switch_to_no_mcck-__switch_to_base(%r1) 223 ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 224 l %r4,__THREAD_info(%r3) # get thread_info of next 225 oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next 226__switch_to_no_mcck: 227 stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 228 st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 229 l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 230 lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 231 st %r3,__LC_CURRENT # __LC_CURRENT = current task struct 232 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 233 l %r3,__THREAD_info(%r3) # load thread_info from task struct 234 st %r3,__LC_THREAD_INFO 235 ahi %r3,STACK_SIZE 236 st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack 237 br %r14 238 239__critical_start: 240/* 241 * SVC interrupt handler routine. System calls are synchronous events and 242 * are executed with interrupts enabled. 243 */ 244 245 .globl system_call 246system_call: 247 STORE_TIMER __LC_SYNC_ENTER_TIMER 248sysc_saveall: 249 SAVE_ALL_BASE __LC_SAVE_AREA 250 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 251 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 252 lh %r7,0x8a # get svc number from lowcore 253#ifdef CONFIG_VIRT_CPU_ACCOUNTING 254sysc_vtime: 255 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 256sysc_stime: 257 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 258sysc_update: 259 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 260#endif 261sysc_do_svc: 262 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 263 sla %r7,2 # *4 and test for svc 0 264 bnz BASED(sysc_nr_ok) # svc number > 0 265 # svc 0: system call number in %r1 266 cl %r1,BASED(.Lnr_syscalls) 267 bnl BASED(sysc_nr_ok) 268 lr %r7,%r1 # copy svc number to %r7 269 sla %r7,2 # *4 270sysc_nr_ok: 271 mvc SP_ARGS(4,%r15),SP_R7(%r15) 272sysc_do_restart: 273 l %r8,BASED(.Lsysc_table) 274 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 275 l %r8,0(%r7,%r8) # get system call addr. 276 bnz BASED(sysc_tracesys) 277 basr %r14,%r8 # call sys_xxxx 278 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 279 280sysc_return: 281 tm SP_PSW+1(%r15),0x01 # returning to user ? 282 bno BASED(sysc_restore) 283 tm __TI_flags+3(%r9),_TIF_WORK_SVC 284 bnz BASED(sysc_work) # there is work to do (signals etc.) 285sysc_restore: 286#ifdef CONFIG_TRACE_IRQFLAGS 287 la %r1,BASED(sysc_restore_trace_psw) 288 lpsw 0(%r1) 289sysc_restore_trace: 290 TRACE_IRQS_CHECK 291 LOCKDEP_SYS_EXIT 292#endif 293sysc_leave: 294 RESTORE_ALL __LC_RETURN_PSW,1 295sysc_done: 296 297#ifdef CONFIG_TRACE_IRQFLAGS 298 .align 8 299 .globl sysc_restore_trace_psw 300sysc_restore_trace_psw: 301 .long 0, sysc_restore_trace + 0x80000000 302#endif 303 304# 305# recheck if there is more work to do 306# 307sysc_work_loop: 308 tm __TI_flags+3(%r9),_TIF_WORK_SVC 309 bz BASED(sysc_restore) # there is no work to do 310# 311# One of the work bits is on. Find out which one. 312# 313sysc_work: 314 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 315 bo BASED(sysc_mcck_pending) 316 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 317 bo BASED(sysc_reschedule) 318 tm __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK) 319 bnz BASED(sysc_sigpending) 320 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 321 bo BASED(sysc_restart) 322 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 323 bo BASED(sysc_singlestep) 324 b BASED(sysc_restore) 325sysc_work_done: 326 327# 328# _TIF_NEED_RESCHED is set, call schedule 329# 330sysc_reschedule: 331 l %r1,BASED(.Lschedule) 332 la %r14,BASED(sysc_work_loop) 333 br %r1 # call scheduler 334 335# 336# _TIF_MCCK_PENDING is set, call handler 337# 338sysc_mcck_pending: 339 l %r1,BASED(.Ls390_handle_mcck) 340 la %r14,BASED(sysc_work_loop) 341 br %r1 # TIF bit will be cleared by handler 342 343# 344# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal 345# 346sysc_sigpending: 347 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 348 la %r2,SP_PTREGS(%r15) # load pt_regs 349 l %r1,BASED(.Ldo_signal) 350 basr %r14,%r1 # call do_signal 351 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 352 bo BASED(sysc_restart) 353 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 354 bo BASED(sysc_singlestep) 355 b BASED(sysc_work_loop) 356 357# 358# _TIF_RESTART_SVC is set, set up registers and restart svc 359# 360sysc_restart: 361 ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC 362 l %r7,SP_R2(%r15) # load new svc number 363 sla %r7,2 364 mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument 365 lm %r2,%r6,SP_R2(%r15) # load svc arguments 366 b BASED(sysc_do_restart) # restart svc 367 368# 369# _TIF_SINGLE_STEP is set, call do_single_step 370# 371sysc_singlestep: 372 ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 373 mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check 374 la %r2,SP_PTREGS(%r15) # address of register-save area 375 l %r1,BASED(.Lhandle_per) # load adr. of per handler 376 la %r14,BASED(sysc_return) # load adr. of system return 377 br %r1 # branch to do_single_step 378 379# 380# call trace before and after sys_call 381# 382sysc_tracesys: 383 l %r1,BASED(.Ltrace) 384 la %r2,SP_PTREGS(%r15) # load pt_regs 385 la %r3,0 386 srl %r7,2 387 st %r7,SP_R2(%r15) 388 basr %r14,%r1 389 clc SP_R2(4,%r15),BASED(.Lnr_syscalls) 390 bnl BASED(sysc_tracenogo) 391 l %r8,BASED(.Lsysc_table) 392 l %r7,SP_R2(%r15) # strace might have changed the 393 sll %r7,2 # system call 394 l %r8,0(%r7,%r8) 395sysc_tracego: 396 lm %r3,%r6,SP_R3(%r15) 397 l %r2,SP_ORIG_R2(%r15) 398 basr %r14,%r8 # call sys_xxx 399 st %r2,SP_R2(%r15) # store return value 400sysc_tracenogo: 401 tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 402 bz BASED(sysc_return) 403 l %r1,BASED(.Ltrace) 404 la %r2,SP_PTREGS(%r15) # load pt_regs 405 la %r3,1 406 la %r14,BASED(sysc_return) 407 br %r1 408 409# 410# a new process exits the kernel with ret_from_fork 411# 412 .globl ret_from_fork 413ret_from_fork: 414 l %r13,__LC_SVC_NEW_PSW+4 415 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 416 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? 417 bo BASED(0f) 418 st %r15,SP_R15(%r15) # store stack pointer for new kthread 4190: l %r1,BASED(.Lschedtail) 420 basr %r14,%r1 421 TRACE_IRQS_ON 422 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 423 b BASED(sysc_return) 424 425# 426# kernel_execve function needs to deal with pt_regs that is not 427# at the usual place 428# 429 .globl kernel_execve 430kernel_execve: 431 stm %r12,%r15,48(%r15) 432 lr %r14,%r15 433 l %r13,__LC_SVC_NEW_PSW+4 434 s %r15,BASED(.Lc_spsize) 435 st %r14,__SF_BACKCHAIN(%r15) 436 la %r12,SP_PTREGS(%r15) 437 xc 0(__PT_SIZE,%r12),0(%r12) 438 l %r1,BASED(.Ldo_execve) 439 lr %r5,%r12 440 basr %r14,%r1 441 ltr %r2,%r2 442 be BASED(0f) 443 a %r15,BASED(.Lc_spsize) 444 lm %r12,%r15,48(%r15) 445 br %r14 446 # execve succeeded. 4470: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 448 l %r15,__LC_KERNEL_STACK # load ksp 449 s %r15,BASED(.Lc_spsize) # make room for registers & psw 450 l %r9,__LC_THREAD_INFO 451 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 452 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 453 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 454 l %r1,BASED(.Lexecve_tail) 455 basr %r14,%r1 456 b BASED(sysc_return) 457 458/* 459 * Program check handler routine 460 */ 461 462 .globl pgm_check_handler 463pgm_check_handler: 464/* 465 * First we need to check for a special case: 466 * Single stepping an instruction that disables the PER event mask will 467 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. 468 * For a single stepped SVC the program check handler gets control after 469 * the SVC new PSW has been loaded. But we want to execute the SVC first and 470 * then handle the PER event. Therefore we update the SVC old PSW to point 471 * to the pgm_check_handler and branch to the SVC handler after we checked 472 * if we have to load the kernel stack register. 473 * For every other possible cause for PER event without the PER mask set 474 * we just ignore the PER event (FIXME: is there anything we have to do 475 * for LPSW?). 476 */ 477 STORE_TIMER __LC_SYNC_ENTER_TIMER 478 SAVE_ALL_BASE __LC_SAVE_AREA 479 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 480 bnz BASED(pgm_per) # got per exception -> special case 481 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 482 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 483#ifdef CONFIG_VIRT_CPU_ACCOUNTING 484 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 485 bz BASED(pgm_no_vtime) 486 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 487 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 488 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 489pgm_no_vtime: 490#endif 491 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 492 TRACE_IRQS_OFF 493 l %r3,__LC_PGM_ILC # load program interruption code 494 la %r8,0x7f 495 nr %r8,%r3 496pgm_do_call: 497 l %r7,BASED(.Ljump_table) 498 sll %r8,2 499 l %r7,0(%r8,%r7) # load address of handler routine 500 la %r2,SP_PTREGS(%r15) # address of register-save area 501 la %r14,BASED(sysc_return) 502 br %r7 # branch to interrupt-handler 503 504# 505# handle per exception 506# 507pgm_per: 508 tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on 509 bnz BASED(pgm_per_std) # ok, normal per event from user space 510# ok its one of the special cases, now we need to find out which one 511 clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW 512 be BASED(pgm_svcper) 513# no interesting special case, ignore PER event 514 lm %r12,%r15,__LC_SAVE_AREA 515 lpsw 0x28 516 517# 518# Normal per exception 519# 520pgm_per_std: 521 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 522 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 523#ifdef CONFIG_VIRT_CPU_ACCOUNTING 524 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 525 bz BASED(pgm_no_vtime2) 526 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 527 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 528 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 529pgm_no_vtime2: 530#endif 531 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 532 TRACE_IRQS_OFF 533 l %r1,__TI_task(%r9) 534 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 535 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 536 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 537 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 538 tm SP_PSW+1(%r15),0x01 # kernel per event ? 539 bz BASED(kernel_per) 540 l %r3,__LC_PGM_ILC # load program interruption code 541 la %r8,0x7f 542 nr %r8,%r3 # clear per-event-bit and ilc 543 be BASED(sysc_return) # only per or per+check ? 544 b BASED(pgm_do_call) 545 546# 547# it was a single stepped SVC that is causing all the trouble 548# 549pgm_svcper: 550 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 551 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 552#ifdef CONFIG_VIRT_CPU_ACCOUNTING 553 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 554 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 555 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 556#endif 557 lh %r7,0x8a # get svc number from lowcore 558 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 559 TRACE_IRQS_OFF 560 l %r1,__TI_task(%r9) 561 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 562 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 563 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 564 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 565 TRACE_IRQS_ON 566 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 567 b BASED(sysc_do_svc) 568 569# 570# per was called from kernel, must be kprobes 571# 572kernel_per: 573 mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check 574 la %r2,SP_PTREGS(%r15) # address of register-save area 575 l %r1,BASED(.Lhandle_per) # load adr. of per handler 576 la %r14,BASED(sysc_restore)# load adr. of system return 577 br %r1 # branch to do_single_step 578 579/* 580 * IO interrupt handler routine 581 */ 582 583 .globl io_int_handler 584io_int_handler: 585 STORE_TIMER __LC_ASYNC_ENTER_TIMER 586 stck __LC_INT_CLOCK 587 SAVE_ALL_BASE __LC_SAVE_AREA+16 588 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 589 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 590#ifdef CONFIG_VIRT_CPU_ACCOUNTING 591 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 592 bz BASED(io_no_vtime) 593 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 594 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 595 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 596io_no_vtime: 597#endif 598 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 599 TRACE_IRQS_OFF 600 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ 601 la %r2,SP_PTREGS(%r15) # address of register-save area 602 basr %r14,%r1 # branch to standard irq handler 603io_return: 604 tm SP_PSW+1(%r15),0x01 # returning to user ? 605#ifdef CONFIG_PREEMPT 606 bno BASED(io_preempt) # no -> check for preemptive scheduling 607#else 608 bno BASED(io_restore) # no-> skip resched & signal 609#endif 610 tm __TI_flags+3(%r9),_TIF_WORK_INT 611 bnz BASED(io_work) # there is work to do (signals etc.) 612io_restore: 613#ifdef CONFIG_TRACE_IRQFLAGS 614 la %r1,BASED(io_restore_trace_psw) 615 lpsw 0(%r1) 616io_restore_trace: 617 TRACE_IRQS_CHECK 618 LOCKDEP_SYS_EXIT 619#endif 620io_leave: 621 RESTORE_ALL __LC_RETURN_PSW,0 622io_done: 623 624#ifdef CONFIG_TRACE_IRQFLAGS 625 .align 8 626 .globl io_restore_trace_psw 627io_restore_trace_psw: 628 .long 0, io_restore_trace + 0x80000000 629#endif 630 631#ifdef CONFIG_PREEMPT 632io_preempt: 633 icm %r0,15,__TI_precount(%r9) 634 bnz BASED(io_restore) 635 l %r1,SP_R15(%r15) 636 s %r1,BASED(.Lc_spsize) 637 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 638 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 639 lr %r15,%r1 640io_resume_loop: 641 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 642 bno BASED(io_restore) 643 l %r1,BASED(.Lpreempt_schedule_irq) 644 la %r14,BASED(io_resume_loop) 645 br %r1 # call schedule 646#endif 647 648# 649# switch to kernel stack, then check the TIF bits 650# 651io_work: 652 l %r1,__LC_KERNEL_STACK 653 s %r1,BASED(.Lc_spsize) 654 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 655 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 656 lr %r15,%r1 657# 658# One of the work bits is on. Find out which one. 659# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGMASK, _TIF_NEED_RESCHED 660# and _TIF_MCCK_PENDING 661# 662io_work_loop: 663 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 664 bo BASED(io_mcck_pending) 665 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 666 bo BASED(io_reschedule) 667 tm __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK) 668 bnz BASED(io_sigpending) 669 b BASED(io_restore) 670io_work_done: 671 672# 673# _TIF_MCCK_PENDING is set, call handler 674# 675io_mcck_pending: 676 l %r1,BASED(.Ls390_handle_mcck) 677 basr %r14,%r1 # TIF bit will be cleared by handler 678 b BASED(io_work_loop) 679 680# 681# _TIF_NEED_RESCHED is set, call schedule 682# 683io_reschedule: 684 TRACE_IRQS_ON 685 l %r1,BASED(.Lschedule) 686 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 687 basr %r14,%r1 # call scheduler 688 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 689 TRACE_IRQS_OFF 690 tm __TI_flags+3(%r9),_TIF_WORK_INT 691 bz BASED(io_restore) # there is no work to do 692 b BASED(io_work_loop) 693 694# 695# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal 696# 697io_sigpending: 698 TRACE_IRQS_ON 699 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 700 la %r2,SP_PTREGS(%r15) # load pt_regs 701 l %r1,BASED(.Ldo_signal) 702 basr %r14,%r1 # call do_signal 703 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 704 TRACE_IRQS_OFF 705 b BASED(io_work_loop) 706 707/* 708 * External interrupt handler routine 709 */ 710 711 .globl ext_int_handler 712ext_int_handler: 713 STORE_TIMER __LC_ASYNC_ENTER_TIMER 714 stck __LC_INT_CLOCK 715 SAVE_ALL_BASE __LC_SAVE_AREA+16 716 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 717 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 718#ifdef CONFIG_VIRT_CPU_ACCOUNTING 719 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 720 bz BASED(ext_no_vtime) 721 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 722 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 723 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 724ext_no_vtime: 725#endif 726 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 727 TRACE_IRQS_OFF 728 la %r2,SP_PTREGS(%r15) # address of register-save area 729 lh %r3,__LC_EXT_INT_CODE # get interruption code 730 l %r1,BASED(.Ldo_extint) 731 basr %r14,%r1 732 b BASED(io_return) 733 734__critical_end: 735 736/* 737 * Machine check handler routines 738 */ 739 740 .globl mcck_int_handler 741mcck_int_handler: 742 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 743 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 744 SAVE_ALL_BASE __LC_SAVE_AREA+32 745 la %r12,__LC_MCK_OLD_PSW 746 tm __LC_MCCK_CODE,0x80 # system damage? 747 bo BASED(mcck_int_main) # yes -> rest of mcck code invalid 748#ifdef CONFIG_VIRT_CPU_ACCOUNTING 749 mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER 750 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA 751 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 752 bo BASED(1f) 753 la %r14,__LC_SYNC_ENTER_TIMER 754 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 755 bl BASED(0f) 756 la %r14,__LC_ASYNC_ENTER_TIMER 7570: clc 0(8,%r14),__LC_EXIT_TIMER 758 bl BASED(0f) 759 la %r14,__LC_EXIT_TIMER 7600: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 761 bl BASED(0f) 762 la %r14,__LC_LAST_UPDATE_TIMER 7630: spt 0(%r14) 764 mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) 7651: 766#endif 767 tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 768 bno BASED(mcck_int_main) # no -> skip cleanup critical 769 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 770 bnz BASED(mcck_int_main) # from user -> load async stack 771 clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end) 772 bhe BASED(mcck_int_main) 773 clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start) 774 bl BASED(mcck_int_main) 775 l %r14,BASED(.Lcleanup_critical) 776 basr %r14,%r14 777mcck_int_main: 778 l %r14,__LC_PANIC_STACK # are we already on the panic stack? 779 slr %r14,%r15 780 sra %r14,PAGE_SHIFT 781 be BASED(0f) 782 l %r15,__LC_PANIC_STACK # load panic stack 7830: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 784#ifdef CONFIG_VIRT_CPU_ACCOUNTING 785 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 786 bno BASED(mcck_no_vtime) # no -> skip cleanup critical 787 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 788 bz BASED(mcck_no_vtime) 789 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 790 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 791 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 792mcck_no_vtime: 793#endif 794 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 795 la %r2,SP_PTREGS(%r15) # load pt_regs 796 l %r1,BASED(.Ls390_mcck) 797 basr %r14,%r1 # call machine check handler 798 tm SP_PSW+1(%r15),0x01 # returning to user ? 799 bno BASED(mcck_return) 800 l %r1,__LC_KERNEL_STACK # switch to kernel stack 801 s %r1,BASED(.Lc_spsize) 802 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 803 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 804 lr %r15,%r1 805 stosm __SF_EMPTY(%r15),0x04 # turn dat on 806 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 807 bno BASED(mcck_return) 808 TRACE_IRQS_OFF 809 l %r1,BASED(.Ls390_handle_mcck) 810 basr %r14,%r1 # call machine check handler 811 TRACE_IRQS_ON 812mcck_return: 813 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW 814 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 815#ifdef CONFIG_VIRT_CPU_ACCOUNTING 816 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52 817 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 818 bno BASED(0f) 819 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 820 stpt __LC_EXIT_TIMER 821 lpsw __LC_RETURN_MCCK_PSW # back to caller 8220: 823#endif 824 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 825 lpsw __LC_RETURN_MCCK_PSW # back to caller 826 827 RESTORE_ALL __LC_RETURN_MCCK_PSW,0 828 829/* 830 * Restart interruption handler, kick starter for additional CPUs 831 */ 832#ifdef CONFIG_SMP 833#ifndef CONFIG_HOTPLUG_CPU 834 .section .init.text,"ax" 835#endif 836 .globl restart_int_handler 837restart_int_handler: 838 l %r15,__LC_SAVE_AREA+60 # load ksp 839 lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs 840 lam %a0,%a15,__LC_AREGS_SAVE_AREA 841 lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone 842 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 843 basr %r14,0 844 l %r14,restart_addr-.(%r14) 845 br %r14 # branch to start_secondary 846restart_addr: 847 .long start_secondary 848#ifndef CONFIG_HOTPLUG_CPU 849 .previous 850#endif 851#else 852/* 853 * If we do not run with SMP enabled, let the new CPU crash ... 854 */ 855 .globl restart_int_handler 856restart_int_handler: 857 basr %r1,0 858restart_base: 859 lpsw restart_crash-restart_base(%r1) 860 .align 8 861restart_crash: 862 .long 0x000a0000,0x00000000 863restart_go: 864#endif 865 866#ifdef CONFIG_CHECK_STACK 867/* 868 * The synchronous or the asynchronous stack overflowed. We are dead. 869 * No need to properly save the registers, we are going to panic anyway. 870 * Setup a pt_regs so that show_trace can provide a good call trace. 871 */ 872stack_overflow: 873 l %r15,__LC_PANIC_STACK # change to panic stack 874 sl %r15,BASED(.Lc_spsize) 875 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack 876 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 877 la %r1,__LC_SAVE_AREA 878 ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ? 879 be BASED(0f) 880 ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ? 881 be BASED(0f) 882 la %r1,__LC_SAVE_AREA+16 8830: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack 884 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain 885 l %r1,BASED(1f) # branch to kernel_stack_overflow 886 la %r2,SP_PTREGS(%r15) # load pt_regs 887 br %r1 8881: .long kernel_stack_overflow 889#endif 890 891cleanup_table_system_call: 892 .long system_call + 0x80000000, sysc_do_svc + 0x80000000 893cleanup_table_sysc_return: 894 .long sysc_return + 0x80000000, sysc_leave + 0x80000000 895cleanup_table_sysc_leave: 896 .long sysc_leave + 0x80000000, sysc_done + 0x80000000 897cleanup_table_sysc_work_loop: 898 .long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000 899cleanup_table_io_return: 900 .long io_return + 0x80000000, io_leave + 0x80000000 901cleanup_table_io_leave: 902 .long io_leave + 0x80000000, io_done + 0x80000000 903cleanup_table_io_work_loop: 904 .long io_work_loop + 0x80000000, io_work_done + 0x80000000 905 906cleanup_critical: 907 clc 4(4,%r12),BASED(cleanup_table_system_call) 908 bl BASED(0f) 909 clc 4(4,%r12),BASED(cleanup_table_system_call+4) 910 bl BASED(cleanup_system_call) 9110: 912 clc 4(4,%r12),BASED(cleanup_table_sysc_return) 913 bl BASED(0f) 914 clc 4(4,%r12),BASED(cleanup_table_sysc_return+4) 915 bl BASED(cleanup_sysc_return) 9160: 917 clc 4(4,%r12),BASED(cleanup_table_sysc_leave) 918 bl BASED(0f) 919 clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4) 920 bl BASED(cleanup_sysc_leave) 9210: 922 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop) 923 bl BASED(0f) 924 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) 925 bl BASED(cleanup_sysc_return) 9260: 927 clc 4(4,%r12),BASED(cleanup_table_io_return) 928 bl BASED(0f) 929 clc 4(4,%r12),BASED(cleanup_table_io_return+4) 930 bl BASED(cleanup_io_return) 9310: 932 clc 4(4,%r12),BASED(cleanup_table_io_leave) 933 bl BASED(0f) 934 clc 4(4,%r12),BASED(cleanup_table_io_leave+4) 935 bl BASED(cleanup_io_leave) 9360: 937 clc 4(4,%r12),BASED(cleanup_table_io_work_loop) 938 bl BASED(0f) 939 clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4) 940 bl BASED(cleanup_io_return) 9410: 942 br %r14 943 944cleanup_system_call: 945 mvc __LC_RETURN_PSW(8),0(%r12) 946 c %r12,BASED(.Lmck_old_psw) 947 be BASED(0f) 948 la %r12,__LC_SAVE_AREA+16 949 b BASED(1f) 9500: la %r12,__LC_SAVE_AREA+32 9511: 952#ifdef CONFIG_VIRT_CPU_ACCOUNTING 953 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) 954 bh BASED(0f) 955 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 9560: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) 957 bhe BASED(cleanup_vtime) 958#endif 959 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) 960 bh BASED(0f) 961 mvc __LC_SAVE_AREA(16),0(%r12) 9620: st %r13,4(%r12) 963 st %r12,__LC_SAVE_AREA+48 # argh 964 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 965 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 966 l %r12,__LC_SAVE_AREA+48 # argh 967 st %r15,12(%r12) 968 lh %r7,0x8a 969#ifdef CONFIG_VIRT_CPU_ACCOUNTING 970cleanup_vtime: 971 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) 972 bhe BASED(cleanup_stime) 973 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 974cleanup_stime: 975 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16) 976 bh BASED(cleanup_update) 977 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 978cleanup_update: 979 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 980#endif 981 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) 982 la %r12,__LC_RETURN_PSW 983 br %r14 984cleanup_system_call_insn: 985 .long sysc_saveall + 0x80000000 986#ifdef CONFIG_VIRT_CPU_ACCOUNTING 987 .long system_call + 0x80000000 988 .long sysc_vtime + 0x80000000 989 .long sysc_stime + 0x80000000 990 .long sysc_update + 0x80000000 991#endif 992 993cleanup_sysc_return: 994 mvc __LC_RETURN_PSW(4),0(%r12) 995 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return) 996 la %r12,__LC_RETURN_PSW 997 br %r14 998 999cleanup_sysc_leave: 1000 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) 1001 be BASED(2f) 1002#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1003 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1004 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) 1005 be BASED(2f) 1006#endif 1007 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 1008 c %r12,BASED(.Lmck_old_psw) 1009 bne BASED(0f) 1010 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) 1011 b BASED(1f) 10120: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) 10131: lm %r0,%r11,SP_R0(%r15) 1014 l %r15,SP_R15(%r15) 10152: la %r12,__LC_RETURN_PSW 1016 br %r14 1017cleanup_sysc_leave_insn: 1018 .long sysc_done - 4 + 0x80000000 1019#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1020 .long sysc_done - 8 + 0x80000000 1021#endif 1022 1023cleanup_io_return: 1024 mvc __LC_RETURN_PSW(4),0(%r12) 1025 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) 1026 la %r12,__LC_RETURN_PSW 1027 br %r14 1028 1029cleanup_io_leave: 1030 clc 4(4,%r12),BASED(cleanup_io_leave_insn) 1031 be BASED(2f) 1032#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1033 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1034 clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) 1035 be BASED(2f) 1036#endif 1037 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 1038 c %r12,BASED(.Lmck_old_psw) 1039 bne BASED(0f) 1040 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) 1041 b BASED(1f) 10420: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) 10431: lm %r0,%r11,SP_R0(%r15) 1044 l %r15,SP_R15(%r15) 10452: la %r12,__LC_RETURN_PSW 1046 br %r14 1047cleanup_io_leave_insn: 1048 .long io_done - 4 + 0x80000000 1049#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1050 .long io_done - 8 + 0x80000000 1051#endif 1052 1053/* 1054 * Integer constants 1055 */ 1056 .align 4 1057.Lc_spsize: .long SP_SIZE 1058.Lc_overhead: .long STACK_FRAME_OVERHEAD 1059.Lnr_syscalls: .long NR_syscalls 1060.L0x018: .short 0x018 1061.L0x020: .short 0x020 1062.L0x028: .short 0x028 1063.L0x030: .short 0x030 1064.L0x038: .short 0x038 1065.Lc_1: .long 1 1066 1067/* 1068 * Symbol constants 1069 */ 1070.Ls390_mcck: .long s390_do_machine_check 1071.Ls390_handle_mcck: 1072 .long s390_handle_mcck 1073.Lmck_old_psw: .long __LC_MCK_OLD_PSW 1074.Ldo_IRQ: .long do_IRQ 1075.Ldo_extint: .long do_extint 1076.Ldo_signal: .long do_signal 1077.Lhandle_per: .long do_single_step 1078.Ldo_execve: .long do_execve 1079.Lexecve_tail: .long execve_tail 1080.Ljump_table: .long pgm_check_table 1081.Lschedule: .long schedule 1082#ifdef CONFIG_PREEMPT 1083.Lpreempt_schedule_irq: 1084 .long preempt_schedule_irq 1085#endif 1086.Ltrace: .long syscall_trace 1087.Lschedtail: .long schedule_tail 1088.Lsysc_table: .long sys_call_table 1089#ifdef CONFIG_TRACE_IRQFLAGS 1090.Ltrace_irq_on: .long trace_hardirqs_on 1091.Ltrace_irq_off: 1092 .long trace_hardirqs_off 1093.Llockdep_sys_exit: 1094 .long lockdep_sys_exit 1095#endif 1096.Lcritical_start: 1097 .long __critical_start + 0x80000000 1098.Lcritical_end: 1099 .long __critical_end + 0x80000000 1100.Lcleanup_critical: 1101 .long cleanup_critical 1102 1103 .section .rodata, "a" 1104#define SYSCALL(esa,esame,emu) .long esa 1105sys_call_table: 1106#include "syscalls.S" 1107#undef SYSCALL 1108