1/* 2 * linux/arch/arm/kernel/entry-common.S 3 * 4 * Copyright (C) 2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <asm/unistd.h> 12#include <asm/ftrace.h> 13#include <mach/entry-macro.S> 14#include <asm/unwind.h> 15 16#include "entry-header.S" 17 18 19 .align 5 20/* 21 * This is the fast syscall return path. We do as little as 22 * possible here, and this includes saving r0 back into the SVC 23 * stack. 24 */ 25ret_fast_syscall: 26 UNWIND(.fnstart ) 27 UNWIND(.cantunwind ) 28 disable_irq @ disable interrupts 29 ldr r1, [tsk, #TI_FLAGS] 30 tst r1, #_TIF_WORK_MASK 31 bne fast_work_pending 32#if defined(CONFIG_IRQSOFF_TRACER) 33 asm_trace_hardirqs_on 34#endif 35 36 /* perform architecture specific actions before user return */ 37 arch_ret_to_user r1, lr 38 39 restore_user_regs fast = 1, offset = S_OFF 40 UNWIND(.fnend ) 41 42/* 43 * Ok, we need to do extra processing, enter the slow path. 44 */ 45fast_work_pending: 46 str r0, [sp, #S_R0+S_OFF]! @ returned r0 47work_pending: 48 tst r1, #_TIF_NEED_RESCHED 49 bne work_resched 50 tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME 51 beq no_work_pending 52 mov r0, sp @ 'regs' 53 mov r2, why @ 'syscall' 54 tst r1, #_TIF_SIGPENDING @ delivering a signal? 55 movne why, #0 @ prevent further restarts 56 bl do_notify_resume 57 b ret_slow_syscall @ Check work again 58 59work_resched: 60 bl schedule 61/* 62 * "slow" syscall return path. "why" tells us if this was a real syscall. 63 */ 64ENTRY(ret_to_user) 65ret_slow_syscall: 66 disable_irq @ disable interrupts 67ENTRY(ret_to_user_from_irq) 68 ldr r1, [tsk, #TI_FLAGS] 69 tst r1, #_TIF_WORK_MASK 70 bne work_pending 71no_work_pending: 72#if defined(CONFIG_IRQSOFF_TRACER) 73 asm_trace_hardirqs_on 74#endif 75 /* perform architecture specific actions before user return */ 76 arch_ret_to_user r1, lr 77 78 restore_user_regs fast = 0, offset = 0 79ENDPROC(ret_to_user_from_irq) 80ENDPROC(ret_to_user) 81 82/* 83 * This is how we return from a fork. 84 */ 85ENTRY(ret_from_fork) 86 bl schedule_tail 87 get_thread_info tsk 88 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing 89 mov why, #1 90 tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 91 beq ret_slow_syscall 92 mov r1, sp 93 mov r0, #1 @ trace exit [IP = 1] 94 bl syscall_trace 95 b ret_slow_syscall 96ENDPROC(ret_from_fork) 97 98 .equ NR_syscalls,0 99#define CALL(x) .equ NR_syscalls,NR_syscalls+1 100#include "calls.S" 101#undef CALL 102#define CALL(x) .long x 103 104#ifdef CONFIG_FUNCTION_TRACER 105/* 106 * When compiling with -pg, gcc inserts a call to the mcount routine at the 107 * start of every function. In mcount, apart from the function's address (in 108 * lr), we need to get hold of the function's caller's address. 109 * 110 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: 111 * 112 * bl mcount 113 * 114 * These versions have the limitation that in order for the mcount routine to 115 * be able to determine the function's caller's address, an APCS-style frame 116 * pointer (which is set up with something like the code below) is required. 117 * 118 * mov ip, sp 119 * push {fp, ip, lr, pc} 120 * sub fp, ip, #4 121 * 122 * With EABI, these frame pointers are not available unless -mapcs-frame is 123 * specified, and if building as Thumb-2, not even then. 124 * 125 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, 126 * with call sites like: 127 * 128 * push {lr} 129 * bl __gnu_mcount_nc 130 * 131 * With these compilers, frame pointers are not necessary. 132 * 133 * mcount can be thought of as a function called in the middle of a subroutine 134 * call. As such, it needs to be transparent for both the caller and the 135 * callee: the original lr needs to be restored when leaving mcount, and no 136 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we 137 * clobber the ip register. This is OK because the ARM calling convention 138 * allows it to be clobbered in subroutines and doesn't use it to hold 139 * parameters.) 140 * 141 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" 142 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see 143 * arch/arm/kernel/ftrace.c). 144 */ 145 146#ifndef CONFIG_OLD_MCOUNT 147#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) 148#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. 149#endif 150#endif 151 152.macro mcount_adjust_addr rd, rn 153 bic \rd, \rn, #1 @ clear the Thumb bit if present 154 sub \rd, \rd, #MCOUNT_INSN_SIZE 155.endm 156 157.macro __mcount suffix 158 mcount_enter 159 ldr r0, =ftrace_trace_function 160 ldr r2, [r0] 161 adr r0, .Lftrace_stub 162 cmp r0, r2 163 bne 1f 164 165#ifdef CONFIG_FUNCTION_GRAPH_TRACER 166 ldr r1, =ftrace_graph_return 167 ldr r2, [r1] 168 cmp r0, r2 169 bne ftrace_graph_caller\suffix 170 171 ldr r1, =ftrace_graph_entry 172 ldr r2, [r1] 173 ldr r0, =ftrace_graph_entry_stub 174 cmp r0, r2 175 bne ftrace_graph_caller\suffix 176#endif 177 178 mcount_exit 179 1801: mcount_get_lr r1 @ lr of instrumented func 181 mcount_adjust_addr r0, lr @ instrumented function 182 adr lr, BSYM(2f) 183 mov pc, r2 1842: mcount_exit 185.endm 186 187.macro __ftrace_caller suffix 188 mcount_enter 189 190 mcount_get_lr r1 @ lr of instrumented func 191 mcount_adjust_addr r0, lr @ instrumented function 192 193 .globl ftrace_call\suffix 194ftrace_call\suffix: 195 bl ftrace_stub 196 197#ifdef CONFIG_FUNCTION_GRAPH_TRACER 198 .globl ftrace_graph_call\suffix 199ftrace_graph_call\suffix: 200 mov r0, r0 201#endif 202 203 mcount_exit 204.endm 205 206.macro __ftrace_graph_caller 207 sub r0, fp, #4 @ &lr of instrumented routine (&parent) 208#ifdef CONFIG_DYNAMIC_FTRACE 209 @ called from __ftrace_caller, saved in mcount_enter 210 ldr r1, [sp, #16] @ instrumented routine (func) 211 mcount_adjust_addr r1, r1 212#else 213 @ called from __mcount, untouched in lr 214 mcount_adjust_addr r1, lr @ instrumented routine (func) 215#endif 216 mov r2, fp @ frame pointer 217 bl prepare_ftrace_return 218 mcount_exit 219.endm 220 221#ifdef CONFIG_OLD_MCOUNT 222/* 223 * mcount 224 */ 225 226.macro mcount_enter 227 stmdb sp!, {r0-r3, lr} 228.endm 229 230.macro mcount_get_lr reg 231 ldr \reg, [fp, #-4] 232.endm 233 234.macro mcount_exit 235 ldr lr, [fp, #-4] 236 ldmia sp!, {r0-r3, pc} 237.endm 238 239ENTRY(mcount) 240#ifdef CONFIG_DYNAMIC_FTRACE 241 stmdb sp!, {lr} 242 ldr lr, [fp, #-4] 243 ldmia sp!, {pc} 244#else 245 __mcount _old 246#endif 247ENDPROC(mcount) 248 249#ifdef CONFIG_DYNAMIC_FTRACE 250ENTRY(ftrace_caller_old) 251 __ftrace_caller _old 252ENDPROC(ftrace_caller_old) 253#endif 254 255#ifdef CONFIG_FUNCTION_GRAPH_TRACER 256ENTRY(ftrace_graph_caller_old) 257 __ftrace_graph_caller 258ENDPROC(ftrace_graph_caller_old) 259#endif 260 261.purgem mcount_enter 262.purgem mcount_get_lr 263.purgem mcount_exit 264#endif 265 266/* 267 * __gnu_mcount_nc 268 */ 269 270.macro mcount_enter 271 stmdb sp!, {r0-r3, lr} 272.endm 273 274.macro mcount_get_lr reg 275 ldr \reg, [sp, #20] 276.endm 277 278.macro mcount_exit 279 ldmia sp!, {r0-r3, ip, lr} 280 mov pc, ip 281.endm 282 283ENTRY(__gnu_mcount_nc) 284#ifdef CONFIG_DYNAMIC_FTRACE 285 mov ip, lr 286 ldmia sp!, {lr} 287 mov pc, ip 288#else 289 __mcount 290#endif 291ENDPROC(__gnu_mcount_nc) 292 293#ifdef CONFIG_DYNAMIC_FTRACE 294ENTRY(ftrace_caller) 295 __ftrace_caller 296ENDPROC(ftrace_caller) 297#endif 298 299#ifdef CONFIG_FUNCTION_GRAPH_TRACER 300ENTRY(ftrace_graph_caller) 301 __ftrace_graph_caller 302ENDPROC(ftrace_graph_caller) 303#endif 304 305.purgem mcount_enter 306.purgem mcount_get_lr 307.purgem mcount_exit 308 309#ifdef CONFIG_FUNCTION_GRAPH_TRACER 310 .globl return_to_handler 311return_to_handler: 312 stmdb sp!, {r0-r3} 313 mov r0, fp @ frame pointer 314 bl ftrace_return_to_handler 315 mov lr, r0 @ r0 has real ret addr 316 ldmia sp!, {r0-r3} 317 mov pc, lr 318#endif 319 320ENTRY(ftrace_stub) 321.Lftrace_stub: 322 mov pc, lr 323ENDPROC(ftrace_stub) 324 325#endif /* CONFIG_FUNCTION_TRACER */ 326 327/*============================================================================= 328 * SWI handler 329 *----------------------------------------------------------------------------- 330 */ 331 332 /* If we're optimising for StrongARM the resulting code won't 333 run on an ARM7 and we can save a couple of instructions. 334 --pb */ 335#ifdef CONFIG_CPU_ARM710 336#define A710(code...) code 337.Larm710bug: 338 ldmia sp, {r0 - lr}^ @ Get calling r0 - lr 339 mov r0, r0 340 add sp, sp, #S_FRAME_SIZE 341 subs pc, lr, #4 342#else 343#define A710(code...) 344#endif 345 346 .align 5 347ENTRY(vector_swi) 348 sub sp, sp, #S_FRAME_SIZE 349 stmia sp, {r0 - r12} @ Calling r0 - r12 350 ARM( add r8, sp, #S_PC ) 351 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 352 THUMB( mov r8, sp ) 353 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr 354 mrs r8, spsr @ called from non-FIQ mode, so ok. 355 str lr, [sp, #S_PC] @ Save calling PC 356 str r8, [sp, #S_PSR] @ Save CPSR 357 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 358 zero_fp 359 360 /* 361 * Get the system call number. 362 */ 363 364#if defined(CONFIG_OABI_COMPAT) 365 366 /* 367 * If we have CONFIG_OABI_COMPAT then we need to look at the swi 368 * value to determine if it is an EABI or an old ABI call. 369 */ 370#ifdef CONFIG_ARM_THUMB 371 tst r8, #PSR_T_BIT 372 movne r10, #0 @ no thumb OABI emulation 373 ldreq r10, [lr, #-4] @ get SWI instruction 374#else 375 ldr r10, [lr, #-4] @ get SWI instruction 376 A710( and ip, r10, #0x0f000000 @ check for SWI ) 377 A710( teq ip, #0x0f000000 ) 378 A710( bne .Larm710bug ) 379#endif 380#ifdef CONFIG_CPU_ENDIAN_BE8 381 rev r10, r10 @ little endian instruction 382#endif 383 384#elif defined(CONFIG_AEABI) 385 386 /* 387 * Pure EABI user space always put syscall number into scno (r7). 388 */ 389 A710( ldr ip, [lr, #-4] @ get SWI instruction ) 390 A710( and ip, ip, #0x0f000000 @ check for SWI ) 391 A710( teq ip, #0x0f000000 ) 392 A710( bne .Larm710bug ) 393 394#elif defined(CONFIG_ARM_THUMB) 395 396 /* Legacy ABI only, possibly thumb mode. */ 397 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs 398 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in 399 ldreq scno, [lr, #-4] 400 401#else 402 403 /* Legacy ABI only. */ 404 ldr scno, [lr, #-4] @ get SWI instruction 405 A710( and ip, scno, #0x0f000000 @ check for SWI ) 406 A710( teq ip, #0x0f000000 ) 407 A710( bne .Larm710bug ) 408 409#endif 410 411#ifdef CONFIG_ALIGNMENT_TRAP 412 ldr ip, __cr_alignment 413 ldr ip, [ip] 414 mcr p15, 0, ip, c1, c0 @ update control register 415#endif 416 enable_irq 417 418 get_thread_info tsk 419 adr tbl, sys_call_table @ load syscall table pointer 420 421#if defined(CONFIG_OABI_COMPAT) 422 /* 423 * If the swi argument is zero, this is an EABI call and we do nothing. 424 * 425 * If this is an old ABI call, get the syscall number into scno and 426 * get the old ABI syscall table address. 427 */ 428 bics r10, r10, #0xff000000 429 eorne scno, r10, #__NR_OABI_SYSCALL_BASE 430 ldrne tbl, =sys_oabi_call_table 431#elif !defined(CONFIG_AEABI) 432 bic scno, scno, #0xff000000 @ mask off SWI op-code 433 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number 434#endif 435 436 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing 437 stmdb sp!, {r4, r5} @ push fifth and sixth args 438 439#ifdef CONFIG_SECCOMP 440 tst r10, #_TIF_SECCOMP 441 beq 1f 442 mov r0, scno 443 bl __secure_computing 444 add r0, sp, #S_R0 + S_OFF @ pointer to regs 445 ldmia r0, {r0 - r3} @ have to reload r0 - r3 4461: 447#endif 448 449 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 450 bne __sys_trace 451 452 cmp scno, #NR_syscalls @ check upper syscall limit 453 adr lr, BSYM(ret_fast_syscall) @ return address 454 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 455 456 add r1, sp, #S_OFF 4572: mov why, #0 @ no longer a real syscall 458 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 459 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 460 bcs arm_syscall 461 b sys_ni_syscall @ not private func 462ENDPROC(vector_swi) 463 464 /* 465 * This is the really slow path. We're going to be doing 466 * context switches, and waiting for our parent to respond. 467 */ 468__sys_trace: 469 mov r2, scno 470 add r1, sp, #S_OFF 471 mov r0, #0 @ trace entry [IP = 0] 472 bl syscall_trace 473 474 adr lr, BSYM(__sys_trace_return) @ return address 475 mov scno, r0 @ syscall number (possibly new) 476 add r1, sp, #S_R0 + S_OFF @ pointer to regs 477 cmp scno, #NR_syscalls @ check upper syscall limit 478 ldmccia r1, {r0 - r3} @ have to reload r0 - r3 479 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 480 b 2b 481 482__sys_trace_return: 483 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 484 mov r2, scno 485 mov r1, sp 486 mov r0, #1 @ trace exit [IP = 1] 487 bl syscall_trace 488 b ret_slow_syscall 489 490 .align 5 491#ifdef CONFIG_ALIGNMENT_TRAP 492 .type __cr_alignment, #object 493__cr_alignment: 494 .word cr_alignment 495#endif 496 .ltorg 497 498/* 499 * This is the syscall table declaration for native ABI syscalls. 500 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. 501 */ 502#define ABI(native, compat) native 503#ifdef CONFIG_AEABI 504#define OBSOLETE(syscall) sys_ni_syscall 505#else 506#define OBSOLETE(syscall) syscall 507#endif 508 509 .type sys_call_table, #object 510ENTRY(sys_call_table) 511#include "calls.S" 512#undef ABI 513#undef OBSOLETE 514 515/*============================================================================ 516 * Special system call wrappers 517 */ 518@ r0 = syscall number 519@ r8 = syscall table 520sys_syscall: 521 bic scno, r0, #__NR_OABI_SYSCALL_BASE 522 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 523 cmpne scno, #NR_syscalls @ check range 524 stmloia sp, {r5, r6} @ shuffle args 525 movlo r0, r1 526 movlo r1, r2 527 movlo r2, r3 528 movlo r3, r4 529 ldrlo pc, [tbl, scno, lsl #2] 530 b sys_ni_syscall 531ENDPROC(sys_syscall) 532 533sys_fork_wrapper: 534 add r0, sp, #S_OFF 535 b sys_fork 536ENDPROC(sys_fork_wrapper) 537 538sys_vfork_wrapper: 539 add r0, sp, #S_OFF 540 b sys_vfork 541ENDPROC(sys_vfork_wrapper) 542 543sys_execve_wrapper: 544 add r3, sp, #S_OFF 545 b sys_execve 546ENDPROC(sys_execve_wrapper) 547 548sys_clone_wrapper: 549 add ip, sp, #S_OFF 550 str ip, [sp, #4] 551 b sys_clone 552ENDPROC(sys_clone_wrapper) 553 554sys_sigreturn_wrapper: 555 add r0, sp, #S_OFF 556 mov why, #0 @ prevent syscall restart handling 557 b sys_sigreturn 558ENDPROC(sys_sigreturn_wrapper) 559 560sys_rt_sigreturn_wrapper: 561 add r0, sp, #S_OFF 562 mov why, #0 @ prevent syscall restart handling 563 b sys_rt_sigreturn 564ENDPROC(sys_rt_sigreturn_wrapper) 565 566sys_sigaltstack_wrapper: 567 ldr r2, [sp, #S_OFF + S_SP] 568 b do_sigaltstack 569ENDPROC(sys_sigaltstack_wrapper) 570 571sys_statfs64_wrapper: 572 teq r1, #88 573 moveq r1, #84 574 b sys_statfs64 575ENDPROC(sys_statfs64_wrapper) 576 577sys_fstatfs64_wrapper: 578 teq r1, #88 579 moveq r1, #84 580 b sys_fstatfs64 581ENDPROC(sys_fstatfs64_wrapper) 582 583/* 584 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 585 * offset, we return EINVAL. 586 */ 587sys_mmap2: 588#if PAGE_SHIFT > 12 589 tst r5, #PGOFF_MASK 590 moveq r5, r5, lsr #PAGE_SHIFT - 12 591 streq r5, [sp, #4] 592 beq sys_mmap_pgoff 593 mov r0, #-EINVAL 594 mov pc, lr 595#else 596 str r5, [sp, #4] 597 b sys_mmap_pgoff 598#endif 599ENDPROC(sys_mmap2) 600 601#ifdef CONFIG_OABI_COMPAT 602 603/* 604 * These are syscalls with argument register differences 605 */ 606 607sys_oabi_pread64: 608 stmia sp, {r3, r4} 609 b sys_pread64 610ENDPROC(sys_oabi_pread64) 611 612sys_oabi_pwrite64: 613 stmia sp, {r3, r4} 614 b sys_pwrite64 615ENDPROC(sys_oabi_pwrite64) 616 617sys_oabi_truncate64: 618 mov r3, r2 619 mov r2, r1 620 b sys_truncate64 621ENDPROC(sys_oabi_truncate64) 622 623sys_oabi_ftruncate64: 624 mov r3, r2 625 mov r2, r1 626 b sys_ftruncate64 627ENDPROC(sys_oabi_ftruncate64) 628 629sys_oabi_readahead: 630 str r3, [sp] 631 mov r3, r2 632 mov r2, r1 633 b sys_readahead 634ENDPROC(sys_oabi_readahead) 635 636/* 637 * Let's declare a second syscall table for old ABI binaries 638 * using the compatibility syscall entries. 639 */ 640#define ABI(native, compat) compat 641#define OBSOLETE(syscall) syscall 642 643 .type sys_oabi_call_table, #object 644ENTRY(sys_oabi_call_table) 645#include "calls.S" 646#undef ABI 647#undef OBSOLETE 648 649#endif 650 651