1/* 2 * linux/arch/arm/kernel/entry-common.S 3 * 4 * Copyright (C) 2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <asm/assembler.h> 12#include <asm/unistd.h> 13#include <asm/ftrace.h> 14#include <asm/unwind.h> 15#ifdef CONFIG_AEABI 16#include <asm/unistd-oabi.h> 17#endif 18 19 .equ NR_syscalls, __NR_syscalls 20 21#ifdef CONFIG_NEED_RET_TO_USER 22#include <mach/entry-macro.S> 23#else 24 .macro arch_ret_to_user, tmp1, tmp2 25 .endm 26#endif 27 28#include "entry-header.S" 29 30 31 .align 5 32#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) 33/* 34 * This is the fast syscall return path. We do as little as possible here, 35 * such as avoiding writing r0 to the stack. We only use this path if we 36 * have tracing and context tracking disabled - the overheads from those 37 * features make this path too inefficient. 38 */ 39ret_fast_syscall: 40 UNWIND(.fnstart ) 41 UNWIND(.cantunwind ) 42 disable_irq_notrace @ disable interrupts 43 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing 44 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK 45 bne fast_work_pending 46 47 /* perform architecture specific actions before user return */ 48 arch_ret_to_user r1, lr 49 50 restore_user_regs fast = 1, offset = S_OFF 51 UNWIND(.fnend ) 52ENDPROC(ret_fast_syscall) 53 54 /* Ok, we need to do extra processing, enter the slow path. */ 55fast_work_pending: 56 str r0, [sp, #S_R0+S_OFF]! @ returned r0 57 /* fall through to work_pending */ 58#else 59/* 60 * The "replacement" ret_fast_syscall for when tracing or context tracking 61 * is enabled. As we will need to call out to some C functions, we save 62 * r0 first to avoid needing to save registers around each C function call. 63 */ 64ret_fast_syscall: 65 UNWIND(.fnstart ) 66 UNWIND(.cantunwind ) 67 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 68 disable_irq_notrace @ disable interrupts 69 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing 70 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK 71 beq no_work_pending 72 UNWIND(.fnend ) 73ENDPROC(ret_fast_syscall) 74 75 /* Slower path - fall through to work_pending */ 76#endif 77 78 tst r1, #_TIF_SYSCALL_WORK 79 bne __sys_trace_return_nosave 80slow_work_pending: 81 mov r0, sp @ 'regs' 82 mov r2, why @ 'syscall' 83 bl do_work_pending 84 cmp r0, #0 85 beq no_work_pending 86 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) 87 ldmia sp, {r0 - r6} @ have to reload r0 - r6 88 b local_restart @ ... and off we go 89ENDPROC(ret_fast_syscall) 90 91/* 92 * "slow" syscall return path. "why" tells us if this was a real syscall. 93 * IRQs may be enabled here, so always disable them. Note that we use the 94 * "notrace" version to avoid calling into the tracing code unnecessarily. 95 * do_work_pending() will update this state if necessary. 96 */ 97ENTRY(ret_to_user) 98ret_slow_syscall: 99 disable_irq_notrace @ disable interrupts 100ENTRY(ret_to_user_from_irq) 101 ldr r1, [tsk, #TI_FLAGS] 102 tst r1, #_TIF_WORK_MASK 103 bne slow_work_pending 104no_work_pending: 105 asm_trace_hardirqs_on save = 0 106 107 /* perform architecture specific actions before user return */ 108 arch_ret_to_user r1, lr 109 ct_user_enter save = 0 110 111 restore_user_regs fast = 0, offset = 0 112ENDPROC(ret_to_user_from_irq) 113ENDPROC(ret_to_user) 114 115/* 116 * This is how we return from a fork. 117 */ 118ENTRY(ret_from_fork) 119 bl schedule_tail 120 cmp r5, #0 121 movne r0, r4 122 badrne lr, 1f 123 retne r5 1241: get_thread_info tsk 125 b ret_slow_syscall 126ENDPROC(ret_from_fork) 127 128/*============================================================================= 129 * SWI handler 130 *----------------------------------------------------------------------------- 131 */ 132 133 .align 5 134ENTRY(vector_swi) 135#ifdef CONFIG_CPU_V7M 136 v7m_exception_entry 137#else 138 sub sp, sp, #PT_REGS_SIZE 139 stmia sp, {r0 - r12} @ Calling r0 - r12 140 ARM( add r8, sp, #S_PC ) 141 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 142 THUMB( mov r8, sp ) 143 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr 144 mrs r8, spsr @ called from non-FIQ mode, so ok. 145 str lr, [sp, #S_PC] @ Save calling PC 146 str r8, [sp, #S_PSR] @ Save CPSR 147 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 148#endif 149 zero_fp 150 alignment_trap r10, ip, __cr_alignment 151 enable_irq 152 ct_user_exit 153 get_thread_info tsk 154 155 /* 156 * Get the system call number. 157 */ 158 159#if defined(CONFIG_OABI_COMPAT) 160 161 /* 162 * If we have CONFIG_OABI_COMPAT then we need to look at the swi 163 * value to determine if it is an EABI or an old ABI call. 164 */ 165#ifdef CONFIG_ARM_THUMB 166 tst r8, #PSR_T_BIT 167 movne r10, #0 @ no thumb OABI emulation 168 USER( ldreq r10, [lr, #-4] ) @ get SWI instruction 169#else 170 USER( ldr r10, [lr, #-4] ) @ get SWI instruction 171#endif 172 ARM_BE8(rev r10, r10) @ little endian instruction 173 174#elif defined(CONFIG_AEABI) 175 176 /* 177 * Pure EABI user space always put syscall number into scno (r7). 178 */ 179#elif defined(CONFIG_ARM_THUMB) 180 /* Legacy ABI only, possibly thumb mode. */ 181 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs 182 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in 183 USER( ldreq scno, [lr, #-4] ) 184 185#else 186 /* Legacy ABI only. */ 187 USER( ldr scno, [lr, #-4] ) @ get SWI instruction 188#endif 189 190 uaccess_disable tbl 191 192 adr tbl, sys_call_table @ load syscall table pointer 193 194#if defined(CONFIG_OABI_COMPAT) 195 /* 196 * If the swi argument is zero, this is an EABI call and we do nothing. 197 * 198 * If this is an old ABI call, get the syscall number into scno and 199 * get the old ABI syscall table address. 200 */ 201 bics r10, r10, #0xff000000 202 eorne scno, r10, #__NR_OABI_SYSCALL_BASE 203 ldrne tbl, =sys_oabi_call_table 204#elif !defined(CONFIG_AEABI) 205 bic scno, scno, #0xff000000 @ mask off SWI op-code 206 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number 207#endif 208 209local_restart: 210 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing 211 stmdb sp!, {r4, r5} @ push fifth and sixth args 212 213 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 214 bne __sys_trace 215 216 cmp scno, #NR_syscalls @ check upper syscall limit 217 badr lr, ret_fast_syscall @ return address 218 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 219 220 add r1, sp, #S_OFF 2212: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 222 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 223 bcs arm_syscall 224 mov why, #0 @ no longer a real syscall 225 b sys_ni_syscall @ not private func 226 227#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) 228 /* 229 * We failed to handle a fault trying to access the page 230 * containing the swi instruction, but we're not really in a 231 * position to return -EFAULT. Instead, return back to the 232 * instruction and re-enter the user fault handling path trying 233 * to page it in. This will likely result in sending SEGV to the 234 * current task. 235 */ 2369001: 237 sub lr, lr, #4 238 str lr, [sp, #S_PC] 239 b ret_fast_syscall 240#endif 241ENDPROC(vector_swi) 242 243 /* 244 * This is the really slow path. We're going to be doing 245 * context switches, and waiting for our parent to respond. 246 */ 247__sys_trace: 248 mov r1, scno 249 add r0, sp, #S_OFF 250 bl syscall_trace_enter 251 252 badr lr, __sys_trace_return @ return address 253 mov scno, r0 @ syscall number (possibly new) 254 add r1, sp, #S_R0 + S_OFF @ pointer to regs 255 cmp scno, #NR_syscalls @ check upper syscall limit 256 ldmccia r1, {r0 - r6} @ have to reload r0 - r6 257 stmccia sp, {r4, r5} @ and update the stack args 258 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 259 cmp scno, #-1 @ skip the syscall? 260 bne 2b 261 add sp, sp, #S_OFF @ restore stack 262 b ret_slow_syscall 263 264__sys_trace_return: 265 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 266 mov r0, sp 267 bl syscall_trace_exit 268 b ret_slow_syscall 269 270__sys_trace_return_nosave: 271 enable_irq_notrace 272 mov r0, sp 273 bl syscall_trace_exit 274 b ret_slow_syscall 275 276 .align 5 277#ifdef CONFIG_ALIGNMENT_TRAP 278 .type __cr_alignment, #object 279__cr_alignment: 280 .word cr_alignment 281#endif 282 .ltorg 283 284 .macro syscall_table_start, sym 285 .equ __sys_nr, 0 286 .type \sym, #object 287ENTRY(\sym) 288 .endm 289 290 .macro syscall, nr, func 291 .ifgt __sys_nr - \nr 292 .error "Duplicated/unorded system call entry" 293 .endif 294 .rept \nr - __sys_nr 295 .long sys_ni_syscall 296 .endr 297 .long \func 298 .equ __sys_nr, \nr + 1 299 .endm 300 301 .macro syscall_table_end, sym 302 .ifgt __sys_nr - __NR_syscalls 303 .error "System call table too big" 304 .endif 305 .rept __NR_syscalls - __sys_nr 306 .long sys_ni_syscall 307 .endr 308 .size \sym, . - \sym 309 .endm 310 311#define NATIVE(nr, func) syscall nr, func 312 313/* 314 * This is the syscall table declaration for native ABI syscalls. 315 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. 316 */ 317 syscall_table_start sys_call_table 318#define COMPAT(nr, native, compat) syscall nr, native 319#ifdef CONFIG_AEABI 320#include <calls-eabi.S> 321#else 322#include <calls-oabi.S> 323#endif 324#undef COMPAT 325 syscall_table_end sys_call_table 326 327/*============================================================================ 328 * Special system call wrappers 329 */ 330@ r0 = syscall number 331@ r8 = syscall table 332sys_syscall: 333 bic scno, r0, #__NR_OABI_SYSCALL_BASE 334 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 335 cmpne scno, #NR_syscalls @ check range 336 stmloia sp, {r5, r6} @ shuffle args 337 movlo r0, r1 338 movlo r1, r2 339 movlo r2, r3 340 movlo r3, r4 341 ldrlo pc, [tbl, scno, lsl #2] 342 b sys_ni_syscall 343ENDPROC(sys_syscall) 344 345sys_sigreturn_wrapper: 346 add r0, sp, #S_OFF 347 mov why, #0 @ prevent syscall restart handling 348 b sys_sigreturn 349ENDPROC(sys_sigreturn_wrapper) 350 351sys_rt_sigreturn_wrapper: 352 add r0, sp, #S_OFF 353 mov why, #0 @ prevent syscall restart handling 354 b sys_rt_sigreturn 355ENDPROC(sys_rt_sigreturn_wrapper) 356 357sys_statfs64_wrapper: 358 teq r1, #88 359 moveq r1, #84 360 b sys_statfs64 361ENDPROC(sys_statfs64_wrapper) 362 363sys_fstatfs64_wrapper: 364 teq r1, #88 365 moveq r1, #84 366 b sys_fstatfs64 367ENDPROC(sys_fstatfs64_wrapper) 368 369/* 370 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 371 * offset, we return EINVAL. 372 */ 373sys_mmap2: 374#if PAGE_SHIFT > 12 375 tst r5, #PGOFF_MASK 376 moveq r5, r5, lsr #PAGE_SHIFT - 12 377 streq r5, [sp, #4] 378 beq sys_mmap_pgoff 379 mov r0, #-EINVAL 380 ret lr 381#else 382 str r5, [sp, #4] 383 b sys_mmap_pgoff 384#endif 385ENDPROC(sys_mmap2) 386 387#ifdef CONFIG_OABI_COMPAT 388 389/* 390 * These are syscalls with argument register differences 391 */ 392 393sys_oabi_pread64: 394 stmia sp, {r3, r4} 395 b sys_pread64 396ENDPROC(sys_oabi_pread64) 397 398sys_oabi_pwrite64: 399 stmia sp, {r3, r4} 400 b sys_pwrite64 401ENDPROC(sys_oabi_pwrite64) 402 403sys_oabi_truncate64: 404 mov r3, r2 405 mov r2, r1 406 b sys_truncate64 407ENDPROC(sys_oabi_truncate64) 408 409sys_oabi_ftruncate64: 410 mov r3, r2 411 mov r2, r1 412 b sys_ftruncate64 413ENDPROC(sys_oabi_ftruncate64) 414 415sys_oabi_readahead: 416 str r3, [sp] 417 mov r3, r2 418 mov r2, r1 419 b sys_readahead 420ENDPROC(sys_oabi_readahead) 421 422/* 423 * Let's declare a second syscall table for old ABI binaries 424 * using the compatibility syscall entries. 425 */ 426 syscall_table_start sys_oabi_call_table 427#define COMPAT(nr, native, compat) syscall nr, compat 428#include <calls-oabi.S> 429 syscall_table_end sys_oabi_call_table 430 431#endif 432 433