1/* 2 * linux/arch/arm/kernel/entry-common.S 3 * 4 * Copyright (C) 2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <asm/unistd.h> 12#include <asm/ftrace.h> 13#include <asm/unwind.h> 14 15#ifdef CONFIG_NEED_RET_TO_USER 16#include <mach/entry-macro.S> 17#else 18 .macro arch_ret_to_user, tmp1, tmp2 19 .endm 20#endif 21 22#include "entry-header.S" 23 24 25 .align 5 26/* 27 * This is the fast syscall return path. We do as little as 28 * possible here, and this includes saving r0 back into the SVC 29 * stack. 30 */ 31ret_fast_syscall: 32 UNWIND(.fnstart ) 33 UNWIND(.cantunwind ) 34 disable_irq @ disable interrupts 35 ldr r1, [tsk, #TI_FLAGS] 36 tst r1, #_TIF_WORK_MASK 37 bne fast_work_pending 38#if defined(CONFIG_IRQSOFF_TRACER) 39 asm_trace_hardirqs_on 40#endif 41 42 /* perform architecture specific actions before user return */ 43 arch_ret_to_user r1, lr 44 45 restore_user_regs fast = 1, offset = S_OFF 46 UNWIND(.fnend ) 47 48/* 49 * Ok, we need to do extra processing, enter the slow path. 50 */ 51fast_work_pending: 52 str r0, [sp, #S_R0+S_OFF]! @ returned r0 53work_pending: 54 tst r1, #_TIF_NEED_RESCHED 55 bne work_resched 56 tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME 57 beq no_work_pending 58 mov r0, sp @ 'regs' 59 mov r2, why @ 'syscall' 60 tst r1, #_TIF_SIGPENDING @ delivering a signal? 61 movne why, #0 @ prevent further restarts 62 bl do_notify_resume 63 b ret_slow_syscall @ Check work again 64 65work_resched: 66 bl schedule 67/* 68 * "slow" syscall return path. "why" tells us if this was a real syscall. 69 */ 70ENTRY(ret_to_user) 71ret_slow_syscall: 72 disable_irq @ disable interrupts 73ENTRY(ret_to_user_from_irq) 74 ldr r1, [tsk, #TI_FLAGS] 75 tst r1, #_TIF_WORK_MASK 76 bne work_pending 77no_work_pending: 78#if defined(CONFIG_IRQSOFF_TRACER) 79 asm_trace_hardirqs_on 80#endif 81 /* perform architecture specific actions before user return */ 82 arch_ret_to_user r1, lr 83 84 restore_user_regs fast = 0, offset = 0 85ENDPROC(ret_to_user_from_irq) 86ENDPROC(ret_to_user) 87 88/* 89 * This is how we return from a fork. 90 */ 91ENTRY(ret_from_fork) 92 bl schedule_tail 93 get_thread_info tsk 94 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing 95 mov why, #1 96 tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 97 beq ret_slow_syscall 98 mov r1, sp 99 mov r0, #1 @ trace exit [IP = 1] 100 bl syscall_trace 101 b ret_slow_syscall 102ENDPROC(ret_from_fork) 103 104 .equ NR_syscalls,0 105#define CALL(x) .equ NR_syscalls,NR_syscalls+1 106#include "calls.S" 107#undef CALL 108#define CALL(x) .long x 109 110#ifdef CONFIG_FUNCTION_TRACER 111/* 112 * When compiling with -pg, gcc inserts a call to the mcount routine at the 113 * start of every function. In mcount, apart from the function's address (in 114 * lr), we need to get hold of the function's caller's address. 115 * 116 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: 117 * 118 * bl mcount 119 * 120 * These versions have the limitation that in order for the mcount routine to 121 * be able to determine the function's caller's address, an APCS-style frame 122 * pointer (which is set up with something like the code below) is required. 123 * 124 * mov ip, sp 125 * push {fp, ip, lr, pc} 126 * sub fp, ip, #4 127 * 128 * With EABI, these frame pointers are not available unless -mapcs-frame is 129 * specified, and if building as Thumb-2, not even then. 130 * 131 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, 132 * with call sites like: 133 * 134 * push {lr} 135 * bl __gnu_mcount_nc 136 * 137 * With these compilers, frame pointers are not necessary. 138 * 139 * mcount can be thought of as a function called in the middle of a subroutine 140 * call. As such, it needs to be transparent for both the caller and the 141 * callee: the original lr needs to be restored when leaving mcount, and no 142 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we 143 * clobber the ip register. This is OK because the ARM calling convention 144 * allows it to be clobbered in subroutines and doesn't use it to hold 145 * parameters.) 146 * 147 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" 148 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see 149 * arch/arm/kernel/ftrace.c). 150 */ 151 152#ifndef CONFIG_OLD_MCOUNT 153#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) 154#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. 155#endif 156#endif 157 158.macro mcount_adjust_addr rd, rn 159 bic \rd, \rn, #1 @ clear the Thumb bit if present 160 sub \rd, \rd, #MCOUNT_INSN_SIZE 161.endm 162 163.macro __mcount suffix 164 mcount_enter 165 ldr r0, =ftrace_trace_function 166 ldr r2, [r0] 167 adr r0, .Lftrace_stub 168 cmp r0, r2 169 bne 1f 170 171#ifdef CONFIG_FUNCTION_GRAPH_TRACER 172 ldr r1, =ftrace_graph_return 173 ldr r2, [r1] 174 cmp r0, r2 175 bne ftrace_graph_caller\suffix 176 177 ldr r1, =ftrace_graph_entry 178 ldr r2, [r1] 179 ldr r0, =ftrace_graph_entry_stub 180 cmp r0, r2 181 bne ftrace_graph_caller\suffix 182#endif 183 184 mcount_exit 185 1861: mcount_get_lr r1 @ lr of instrumented func 187 mcount_adjust_addr r0, lr @ instrumented function 188 adr lr, BSYM(2f) 189 mov pc, r2 1902: mcount_exit 191.endm 192 193.macro __ftrace_caller suffix 194 mcount_enter 195 196 mcount_get_lr r1 @ lr of instrumented func 197 mcount_adjust_addr r0, lr @ instrumented function 198 199 .globl ftrace_call\suffix 200ftrace_call\suffix: 201 bl ftrace_stub 202 203#ifdef CONFIG_FUNCTION_GRAPH_TRACER 204 .globl ftrace_graph_call\suffix 205ftrace_graph_call\suffix: 206 mov r0, r0 207#endif 208 209 mcount_exit 210.endm 211 212.macro __ftrace_graph_caller 213 sub r0, fp, #4 @ &lr of instrumented routine (&parent) 214#ifdef CONFIG_DYNAMIC_FTRACE 215 @ called from __ftrace_caller, saved in mcount_enter 216 ldr r1, [sp, #16] @ instrumented routine (func) 217 mcount_adjust_addr r1, r1 218#else 219 @ called from __mcount, untouched in lr 220 mcount_adjust_addr r1, lr @ instrumented routine (func) 221#endif 222 mov r2, fp @ frame pointer 223 bl prepare_ftrace_return 224 mcount_exit 225.endm 226 227#ifdef CONFIG_OLD_MCOUNT 228/* 229 * mcount 230 */ 231 232.macro mcount_enter 233 stmdb sp!, {r0-r3, lr} 234.endm 235 236.macro mcount_get_lr reg 237 ldr \reg, [fp, #-4] 238.endm 239 240.macro mcount_exit 241 ldr lr, [fp, #-4] 242 ldmia sp!, {r0-r3, pc} 243.endm 244 245ENTRY(mcount) 246#ifdef CONFIG_DYNAMIC_FTRACE 247 stmdb sp!, {lr} 248 ldr lr, [fp, #-4] 249 ldmia sp!, {pc} 250#else 251 __mcount _old 252#endif 253ENDPROC(mcount) 254 255#ifdef CONFIG_DYNAMIC_FTRACE 256ENTRY(ftrace_caller_old) 257 __ftrace_caller _old 258ENDPROC(ftrace_caller_old) 259#endif 260 261#ifdef CONFIG_FUNCTION_GRAPH_TRACER 262ENTRY(ftrace_graph_caller_old) 263 __ftrace_graph_caller 264ENDPROC(ftrace_graph_caller_old) 265#endif 266 267.purgem mcount_enter 268.purgem mcount_get_lr 269.purgem mcount_exit 270#endif 271 272/* 273 * __gnu_mcount_nc 274 */ 275 276.macro mcount_enter 277 stmdb sp!, {r0-r3, lr} 278.endm 279 280.macro mcount_get_lr reg 281 ldr \reg, [sp, #20] 282.endm 283 284.macro mcount_exit 285 ldmia sp!, {r0-r3, ip, lr} 286 mov pc, ip 287.endm 288 289ENTRY(__gnu_mcount_nc) 290#ifdef CONFIG_DYNAMIC_FTRACE 291 mov ip, lr 292 ldmia sp!, {lr} 293 mov pc, ip 294#else 295 __mcount 296#endif 297ENDPROC(__gnu_mcount_nc) 298 299#ifdef CONFIG_DYNAMIC_FTRACE 300ENTRY(ftrace_caller) 301 __ftrace_caller 302ENDPROC(ftrace_caller) 303#endif 304 305#ifdef CONFIG_FUNCTION_GRAPH_TRACER 306ENTRY(ftrace_graph_caller) 307 __ftrace_graph_caller 308ENDPROC(ftrace_graph_caller) 309#endif 310 311.purgem mcount_enter 312.purgem mcount_get_lr 313.purgem mcount_exit 314 315#ifdef CONFIG_FUNCTION_GRAPH_TRACER 316 .globl return_to_handler 317return_to_handler: 318 stmdb sp!, {r0-r3} 319 mov r0, fp @ frame pointer 320 bl ftrace_return_to_handler 321 mov lr, r0 @ r0 has real ret addr 322 ldmia sp!, {r0-r3} 323 mov pc, lr 324#endif 325 326ENTRY(ftrace_stub) 327.Lftrace_stub: 328 mov pc, lr 329ENDPROC(ftrace_stub) 330 331#endif /* CONFIG_FUNCTION_TRACER */ 332 333/*============================================================================= 334 * SWI handler 335 *----------------------------------------------------------------------------- 336 */ 337 338 /* If we're optimising for StrongARM the resulting code won't 339 run on an ARM7 and we can save a couple of instructions. 340 --pb */ 341#ifdef CONFIG_CPU_ARM710 342#define A710(code...) code 343.Larm710bug: 344 ldmia sp, {r0 - lr}^ @ Get calling r0 - lr 345 mov r0, r0 346 add sp, sp, #S_FRAME_SIZE 347 subs pc, lr, #4 348#else 349#define A710(code...) 350#endif 351 352 .align 5 353ENTRY(vector_swi) 354 sub sp, sp, #S_FRAME_SIZE 355 stmia sp, {r0 - r12} @ Calling r0 - r12 356 ARM( add r8, sp, #S_PC ) 357 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 358 THUMB( mov r8, sp ) 359 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr 360 mrs r8, spsr @ called from non-FIQ mode, so ok. 361 str lr, [sp, #S_PC] @ Save calling PC 362 str r8, [sp, #S_PSR] @ Save CPSR 363 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 364 zero_fp 365 366 /* 367 * Get the system call number. 368 */ 369 370#if defined(CONFIG_OABI_COMPAT) 371 372 /* 373 * If we have CONFIG_OABI_COMPAT then we need to look at the swi 374 * value to determine if it is an EABI or an old ABI call. 375 */ 376#ifdef CONFIG_ARM_THUMB 377 tst r8, #PSR_T_BIT 378 movne r10, #0 @ no thumb OABI emulation 379 ldreq r10, [lr, #-4] @ get SWI instruction 380#else 381 ldr r10, [lr, #-4] @ get SWI instruction 382 A710( and ip, r10, #0x0f000000 @ check for SWI ) 383 A710( teq ip, #0x0f000000 ) 384 A710( bne .Larm710bug ) 385#endif 386#ifdef CONFIG_CPU_ENDIAN_BE8 387 rev r10, r10 @ little endian instruction 388#endif 389 390#elif defined(CONFIG_AEABI) 391 392 /* 393 * Pure EABI user space always put syscall number into scno (r7). 394 */ 395 A710( ldr ip, [lr, #-4] @ get SWI instruction ) 396 A710( and ip, ip, #0x0f000000 @ check for SWI ) 397 A710( teq ip, #0x0f000000 ) 398 A710( bne .Larm710bug ) 399 400#elif defined(CONFIG_ARM_THUMB) 401 402 /* Legacy ABI only, possibly thumb mode. */ 403 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs 404 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in 405 ldreq scno, [lr, #-4] 406 407#else 408 409 /* Legacy ABI only. */ 410 ldr scno, [lr, #-4] @ get SWI instruction 411 A710( and ip, scno, #0x0f000000 @ check for SWI ) 412 A710( teq ip, #0x0f000000 ) 413 A710( bne .Larm710bug ) 414 415#endif 416 417#ifdef CONFIG_ALIGNMENT_TRAP 418 ldr ip, __cr_alignment 419 ldr ip, [ip] 420 mcr p15, 0, ip, c1, c0 @ update control register 421#endif 422 enable_irq 423 424 get_thread_info tsk 425 adr tbl, sys_call_table @ load syscall table pointer 426 427#if defined(CONFIG_OABI_COMPAT) 428 /* 429 * If the swi argument is zero, this is an EABI call and we do nothing. 430 * 431 * If this is an old ABI call, get the syscall number into scno and 432 * get the old ABI syscall table address. 433 */ 434 bics r10, r10, #0xff000000 435 eorne scno, r10, #__NR_OABI_SYSCALL_BASE 436 ldrne tbl, =sys_oabi_call_table 437#elif !defined(CONFIG_AEABI) 438 bic scno, scno, #0xff000000 @ mask off SWI op-code 439 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number 440#endif 441 442 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing 443 stmdb sp!, {r4, r5} @ push fifth and sixth args 444 445#ifdef CONFIG_SECCOMP 446 tst r10, #_TIF_SECCOMP 447 beq 1f 448 mov r0, scno 449 bl __secure_computing 450 add r0, sp, #S_R0 + S_OFF @ pointer to regs 451 ldmia r0, {r0 - r3} @ have to reload r0 - r3 4521: 453#endif 454 455 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 456 bne __sys_trace 457 458 cmp scno, #NR_syscalls @ check upper syscall limit 459 adr lr, BSYM(ret_fast_syscall) @ return address 460 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 461 462 add r1, sp, #S_OFF 4632: mov why, #0 @ no longer a real syscall 464 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 465 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 466 bcs arm_syscall 467 b sys_ni_syscall @ not private func 468ENDPROC(vector_swi) 469 470 /* 471 * This is the really slow path. We're going to be doing 472 * context switches, and waiting for our parent to respond. 473 */ 474__sys_trace: 475 mov r2, scno 476 add r1, sp, #S_OFF 477 mov r0, #0 @ trace entry [IP = 0] 478 bl syscall_trace 479 480 adr lr, BSYM(__sys_trace_return) @ return address 481 mov scno, r0 @ syscall number (possibly new) 482 add r1, sp, #S_R0 + S_OFF @ pointer to regs 483 cmp scno, #NR_syscalls @ check upper syscall limit 484 ldmccia r1, {r0 - r3} @ have to reload r0 - r3 485 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 486 b 2b 487 488__sys_trace_return: 489 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 490 mov r2, scno 491 mov r1, sp 492 mov r0, #1 @ trace exit [IP = 1] 493 bl syscall_trace 494 b ret_slow_syscall 495 496 .align 5 497#ifdef CONFIG_ALIGNMENT_TRAP 498 .type __cr_alignment, #object 499__cr_alignment: 500 .word cr_alignment 501#endif 502 .ltorg 503 504/* 505 * This is the syscall table declaration for native ABI syscalls. 506 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. 507 */ 508#define ABI(native, compat) native 509#ifdef CONFIG_AEABI 510#define OBSOLETE(syscall) sys_ni_syscall 511#else 512#define OBSOLETE(syscall) syscall 513#endif 514 515 .type sys_call_table, #object 516ENTRY(sys_call_table) 517#include "calls.S" 518#undef ABI 519#undef OBSOLETE 520 521/*============================================================================ 522 * Special system call wrappers 523 */ 524@ r0 = syscall number 525@ r8 = syscall table 526sys_syscall: 527 bic scno, r0, #__NR_OABI_SYSCALL_BASE 528 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 529 cmpne scno, #NR_syscalls @ check range 530 stmloia sp, {r5, r6} @ shuffle args 531 movlo r0, r1 532 movlo r1, r2 533 movlo r2, r3 534 movlo r3, r4 535 ldrlo pc, [tbl, scno, lsl #2] 536 b sys_ni_syscall 537ENDPROC(sys_syscall) 538 539sys_fork_wrapper: 540 add r0, sp, #S_OFF 541 b sys_fork 542ENDPROC(sys_fork_wrapper) 543 544sys_vfork_wrapper: 545 add r0, sp, #S_OFF 546 b sys_vfork 547ENDPROC(sys_vfork_wrapper) 548 549sys_execve_wrapper: 550 add r3, sp, #S_OFF 551 b sys_execve 552ENDPROC(sys_execve_wrapper) 553 554sys_clone_wrapper: 555 add ip, sp, #S_OFF 556 str ip, [sp, #4] 557 b sys_clone 558ENDPROC(sys_clone_wrapper) 559 560sys_sigreturn_wrapper: 561 add r0, sp, #S_OFF 562 mov why, #0 @ prevent syscall restart handling 563 b sys_sigreturn 564ENDPROC(sys_sigreturn_wrapper) 565 566sys_rt_sigreturn_wrapper: 567 add r0, sp, #S_OFF 568 mov why, #0 @ prevent syscall restart handling 569 b sys_rt_sigreturn 570ENDPROC(sys_rt_sigreturn_wrapper) 571 572sys_sigaltstack_wrapper: 573 ldr r2, [sp, #S_OFF + S_SP] 574 b do_sigaltstack 575ENDPROC(sys_sigaltstack_wrapper) 576 577sys_statfs64_wrapper: 578 teq r1, #88 579 moveq r1, #84 580 b sys_statfs64 581ENDPROC(sys_statfs64_wrapper) 582 583sys_fstatfs64_wrapper: 584 teq r1, #88 585 moveq r1, #84 586 b sys_fstatfs64 587ENDPROC(sys_fstatfs64_wrapper) 588 589/* 590 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 591 * offset, we return EINVAL. 592 */ 593sys_mmap2: 594#if PAGE_SHIFT > 12 595 tst r5, #PGOFF_MASK 596 moveq r5, r5, lsr #PAGE_SHIFT - 12 597 streq r5, [sp, #4] 598 beq sys_mmap_pgoff 599 mov r0, #-EINVAL 600 mov pc, lr 601#else 602 str r5, [sp, #4] 603 b sys_mmap_pgoff 604#endif 605ENDPROC(sys_mmap2) 606 607#ifdef CONFIG_OABI_COMPAT 608 609/* 610 * These are syscalls with argument register differences 611 */ 612 613sys_oabi_pread64: 614 stmia sp, {r3, r4} 615 b sys_pread64 616ENDPROC(sys_oabi_pread64) 617 618sys_oabi_pwrite64: 619 stmia sp, {r3, r4} 620 b sys_pwrite64 621ENDPROC(sys_oabi_pwrite64) 622 623sys_oabi_truncate64: 624 mov r3, r2 625 mov r2, r1 626 b sys_truncate64 627ENDPROC(sys_oabi_truncate64) 628 629sys_oabi_ftruncate64: 630 mov r3, r2 631 mov r2, r1 632 b sys_ftruncate64 633ENDPROC(sys_oabi_ftruncate64) 634 635sys_oabi_readahead: 636 str r3, [sp] 637 mov r3, r2 638 mov r2, r1 639 b sys_readahead 640ENDPROC(sys_oabi_readahead) 641 642/* 643 * Let's declare a second syscall table for old ABI binaries 644 * using the compatibility syscall entries. 645 */ 646#define ABI(native, compat) compat 647#define OBSOLETE(syscall) syscall 648 649 .type sys_oabi_call_table, #object 650ENTRY(sys_oabi_call_table) 651#include "calls.S" 652#undef ABI 653#undef OBSOLETE 654 655#endif 656 657