1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * linux/arch/arm/kernel/entry-common.S 4 * 5 * Copyright (C) 2000 Russell King 6 */ 7 8#include <asm/assembler.h> 9#include <asm/unistd.h> 10#include <asm/ftrace.h> 11#include <asm/unwind.h> 12#include <asm/memory.h> 13#ifdef CONFIG_AEABI 14#include <asm/unistd-oabi.h> 15#endif 16 17 .equ NR_syscalls, __NR_syscalls 18 19#ifdef CONFIG_NEED_RET_TO_USER 20#include <mach/entry-macro.S> 21#else 22 .macro arch_ret_to_user, tmp1, tmp2 23 .endm 24#endif 25 26#include "entry-header.S" 27 28saved_psr .req r8 29#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) 30saved_pc .req r9 31#define TRACE(x...) x 32#else 33saved_pc .req lr 34#define TRACE(x...) 35#endif 36 37 .section .entry.text,"ax",%progbits 38 .align 5 39#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING) || \ 40 IS_ENABLED(CONFIG_DEBUG_RSEQ)) 41/* 42 * This is the fast syscall return path. We do as little as possible here, 43 * such as avoiding writing r0 to the stack. We only use this path if we 44 * have tracing, context tracking and rseq debug disabled - the overheads 45 * from those features make this path too inefficient. 46 */ 47ret_fast_syscall: 48__ret_fast_syscall: 49 UNWIND(.fnstart ) 50 UNWIND(.cantunwind ) 51 disable_irq_notrace @ disable interrupts 52 ldr r2, [tsk, #TI_ADDR_LIMIT] 53 cmp r2, #TASK_SIZE 54 blne addr_limit_check_failed 55 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing 56 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK 57 bne fast_work_pending 58 59 60 /* perform architecture specific actions before user return */ 61 arch_ret_to_user r1, lr 62 63 restore_user_regs fast = 1, offset = S_OFF 64 UNWIND(.fnend ) 65ENDPROC(ret_fast_syscall) 66 67 /* Ok, we need to do extra processing, enter the slow path. */ 68fast_work_pending: 69 str r0, [sp, #S_R0+S_OFF]! @ returned r0 70 /* fall through to work_pending */ 71#else 72/* 73 * The "replacement" ret_fast_syscall for when tracing, context tracking, 74 * or rseq debug is enabled. As we will need to call out to some C functions, 75 * we save r0 first to avoid needing to save registers around each C function 76 * call. 77 */ 78ret_fast_syscall: 79__ret_fast_syscall: 80 UNWIND(.fnstart ) 81 UNWIND(.cantunwind ) 82 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 83#if IS_ENABLED(CONFIG_DEBUG_RSEQ) 84 /* do_rseq_syscall needs interrupts enabled. */ 85 mov r0, sp @ 'regs' 86 bl do_rseq_syscall 87#endif 88 disable_irq_notrace @ disable interrupts 89 ldr r2, [tsk, #TI_ADDR_LIMIT] 90 cmp r2, #TASK_SIZE 91 blne addr_limit_check_failed 92 ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing 93 tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK 94 beq no_work_pending 95 UNWIND(.fnend ) 96ENDPROC(ret_fast_syscall) 97 98 /* Slower path - fall through to work_pending */ 99#endif 100 101 tst r1, #_TIF_SYSCALL_WORK 102 bne __sys_trace_return_nosave 103slow_work_pending: 104 mov r0, sp @ 'regs' 105 mov r2, why @ 'syscall' 106 bl do_work_pending 107 cmp r0, #0 108 beq no_work_pending 109 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) 110 ldmia sp, {r0 - r6} @ have to reload r0 - r6 111 b local_restart @ ... and off we go 112ENDPROC(ret_fast_syscall) 113 114/* 115 * "slow" syscall return path. "why" tells us if this was a real syscall. 116 * IRQs may be enabled here, so always disable them. Note that we use the 117 * "notrace" version to avoid calling into the tracing code unnecessarily. 118 * do_work_pending() will update this state if necessary. 119 */ 120ENTRY(ret_to_user) 121ret_slow_syscall: 122#if IS_ENABLED(CONFIG_DEBUG_RSEQ) 123 /* do_rseq_syscall needs interrupts enabled. */ 124 enable_irq_notrace @ enable interrupts 125 mov r0, sp @ 'regs' 126 bl do_rseq_syscall 127#endif 128 disable_irq_notrace @ disable interrupts 129ENTRY(ret_to_user_from_irq) 130 ldr r2, [tsk, #TI_ADDR_LIMIT] 131 cmp r2, #TASK_SIZE 132 blne addr_limit_check_failed 133 ldr r1, [tsk, #TI_FLAGS] 134 tst r1, #_TIF_WORK_MASK 135 bne slow_work_pending 136no_work_pending: 137 asm_trace_hardirqs_on save = 0 138 139 /* perform architecture specific actions before user return */ 140 arch_ret_to_user r1, lr 141 ct_user_enter save = 0 142 143 restore_user_regs fast = 0, offset = 0 144ENDPROC(ret_to_user_from_irq) 145ENDPROC(ret_to_user) 146 147/* 148 * This is how we return from a fork. 149 */ 150ENTRY(ret_from_fork) 151 bl schedule_tail 152 cmp r5, #0 153 movne r0, r4 154 badrne lr, 1f 155 retne r5 1561: get_thread_info tsk 157 b ret_slow_syscall 158ENDPROC(ret_from_fork) 159 160/*============================================================================= 161 * SWI handler 162 *----------------------------------------------------------------------------- 163 */ 164 165 .align 5 166ENTRY(vector_swi) 167#ifdef CONFIG_CPU_V7M 168 v7m_exception_entry 169#else 170 sub sp, sp, #PT_REGS_SIZE 171 stmia sp, {r0 - r12} @ Calling r0 - r12 172 ARM( add r8, sp, #S_PC ) 173 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 174 THUMB( mov r8, sp ) 175 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr 176 mrs saved_psr, spsr @ called from non-FIQ mode, so ok. 177 TRACE( mov saved_pc, lr ) 178 str saved_pc, [sp, #S_PC] @ Save calling PC 179 str saved_psr, [sp, #S_PSR] @ Save CPSR 180 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 181#endif 182 zero_fp 183 alignment_trap r10, ip, __cr_alignment 184 asm_trace_hardirqs_on save=0 185 enable_irq_notrace 186 ct_user_exit save=0 187 188 /* 189 * Get the system call number. 190 */ 191 192#if defined(CONFIG_OABI_COMPAT) 193 194 /* 195 * If we have CONFIG_OABI_COMPAT then we need to look at the swi 196 * value to determine if it is an EABI or an old ABI call. 197 */ 198#ifdef CONFIG_ARM_THUMB 199 tst saved_psr, #PSR_T_BIT 200 movne r10, #0 @ no thumb OABI emulation 201 USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction 202#else 203 USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction 204#endif 205 ARM_BE8(rev r10, r10) @ little endian instruction 206 207#elif defined(CONFIG_AEABI) 208 209 /* 210 * Pure EABI user space always put syscall number into scno (r7). 211 */ 212#elif defined(CONFIG_ARM_THUMB) 213 /* Legacy ABI only, possibly thumb mode. */ 214 tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs 215 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in 216 USER( ldreq scno, [saved_pc, #-4] ) 217 218#else 219 /* Legacy ABI only. */ 220 USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction 221#endif 222 223 /* saved_psr and saved_pc are now dead */ 224 225 uaccess_disable tbl 226 227 adr tbl, sys_call_table @ load syscall table pointer 228 229#if defined(CONFIG_OABI_COMPAT) 230 /* 231 * If the swi argument is zero, this is an EABI call and we do nothing. 232 * 233 * If this is an old ABI call, get the syscall number into scno and 234 * get the old ABI syscall table address. 235 */ 236 bics r10, r10, #0xff000000 237 eorne scno, r10, #__NR_OABI_SYSCALL_BASE 238 ldrne tbl, =sys_oabi_call_table 239#elif !defined(CONFIG_AEABI) 240 bic scno, scno, #0xff000000 @ mask off SWI op-code 241 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number 242#endif 243 get_thread_info tsk 244 /* 245 * Reload the registers that may have been corrupted on entry to 246 * the syscall assembly (by tracing or context tracking.) 247 */ 248 TRACE( ldmia sp, {r0 - r3} ) 249 250local_restart: 251 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing 252 stmdb sp!, {r4, r5} @ push fifth and sixth args 253 254 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 255 bne __sys_trace 256 257 invoke_syscall tbl, scno, r10, __ret_fast_syscall 258 259 add r1, sp, #S_OFF 2602: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 261 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 262 bcs arm_syscall 263 mov why, #0 @ no longer a real syscall 264 b sys_ni_syscall @ not private func 265 266#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) 267 /* 268 * We failed to handle a fault trying to access the page 269 * containing the swi instruction, but we're not really in a 270 * position to return -EFAULT. Instead, return back to the 271 * instruction and re-enter the user fault handling path trying 272 * to page it in. This will likely result in sending SEGV to the 273 * current task. 274 */ 2759001: 276 sub lr, saved_pc, #4 277 str lr, [sp, #S_PC] 278 get_thread_info tsk 279 b ret_fast_syscall 280#endif 281ENDPROC(vector_swi) 282 283 /* 284 * This is the really slow path. We're going to be doing 285 * context switches, and waiting for our parent to respond. 286 */ 287__sys_trace: 288 mov r1, scno 289 add r0, sp, #S_OFF 290 bl syscall_trace_enter 291 mov scno, r0 292 invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1 293 cmp scno, #-1 @ skip the syscall? 294 bne 2b 295 add sp, sp, #S_OFF @ restore stack 296 297__sys_trace_return_nosave: 298 enable_irq_notrace 299 mov r0, sp 300 bl syscall_trace_exit 301 b ret_slow_syscall 302 303__sys_trace_return: 304 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 305 mov r0, sp 306 bl syscall_trace_exit 307 b ret_slow_syscall 308 309 .align 5 310#ifdef CONFIG_ALIGNMENT_TRAP 311 .type __cr_alignment, #object 312__cr_alignment: 313 .word cr_alignment 314#endif 315 .ltorg 316 317 .macro syscall_table_start, sym 318 .equ __sys_nr, 0 319 .type \sym, #object 320ENTRY(\sym) 321 .endm 322 323 .macro syscall, nr, func 324 .ifgt __sys_nr - \nr 325 .error "Duplicated/unorded system call entry" 326 .endif 327 .rept \nr - __sys_nr 328 .long sys_ni_syscall 329 .endr 330 .long \func 331 .equ __sys_nr, \nr + 1 332 .endm 333 334 .macro syscall_table_end, sym 335 .ifgt __sys_nr - __NR_syscalls 336 .error "System call table too big" 337 .endif 338 .rept __NR_syscalls - __sys_nr 339 .long sys_ni_syscall 340 .endr 341 .size \sym, . - \sym 342 .endm 343 344#define NATIVE(nr, func) syscall nr, func 345 346/* 347 * This is the syscall table declaration for native ABI syscalls. 348 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. 349 */ 350 syscall_table_start sys_call_table 351#define COMPAT(nr, native, compat) syscall nr, native 352#ifdef CONFIG_AEABI 353#include <calls-eabi.S> 354#else 355#include <calls-oabi.S> 356#endif 357#undef COMPAT 358 syscall_table_end sys_call_table 359 360/*============================================================================ 361 * Special system call wrappers 362 */ 363@ r0 = syscall number 364@ r8 = syscall table 365sys_syscall: 366 bic scno, r0, #__NR_OABI_SYSCALL_BASE 367 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 368 cmpne scno, #NR_syscalls @ check range 369#ifdef CONFIG_CPU_SPECTRE 370 movhs scno, #0 371 csdb 372#endif 373 stmialo sp, {r5, r6} @ shuffle args 374 movlo r0, r1 375 movlo r1, r2 376 movlo r2, r3 377 movlo r3, r4 378 ldrlo pc, [tbl, scno, lsl #2] 379 b sys_ni_syscall 380ENDPROC(sys_syscall) 381 382sys_sigreturn_wrapper: 383 add r0, sp, #S_OFF 384 mov why, #0 @ prevent syscall restart handling 385 b sys_sigreturn 386ENDPROC(sys_sigreturn_wrapper) 387 388sys_rt_sigreturn_wrapper: 389 add r0, sp, #S_OFF 390 mov why, #0 @ prevent syscall restart handling 391 b sys_rt_sigreturn 392ENDPROC(sys_rt_sigreturn_wrapper) 393 394sys_statfs64_wrapper: 395 teq r1, #88 396 moveq r1, #84 397 b sys_statfs64 398ENDPROC(sys_statfs64_wrapper) 399 400sys_fstatfs64_wrapper: 401 teq r1, #88 402 moveq r1, #84 403 b sys_fstatfs64 404ENDPROC(sys_fstatfs64_wrapper) 405 406/* 407 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 408 * offset, we return EINVAL. 409 */ 410sys_mmap2: 411 str r5, [sp, #4] 412 b sys_mmap_pgoff 413ENDPROC(sys_mmap2) 414 415#ifdef CONFIG_OABI_COMPAT 416 417/* 418 * These are syscalls with argument register differences 419 */ 420 421sys_oabi_pread64: 422 stmia sp, {r3, r4} 423 b sys_pread64 424ENDPROC(sys_oabi_pread64) 425 426sys_oabi_pwrite64: 427 stmia sp, {r3, r4} 428 b sys_pwrite64 429ENDPROC(sys_oabi_pwrite64) 430 431sys_oabi_truncate64: 432 mov r3, r2 433 mov r2, r1 434 b sys_truncate64 435ENDPROC(sys_oabi_truncate64) 436 437sys_oabi_ftruncate64: 438 mov r3, r2 439 mov r2, r1 440 b sys_ftruncate64 441ENDPROC(sys_oabi_ftruncate64) 442 443sys_oabi_readahead: 444 str r3, [sp] 445 mov r3, r2 446 mov r2, r1 447 b sys_readahead 448ENDPROC(sys_oabi_readahead) 449 450/* 451 * Let's declare a second syscall table for old ABI binaries 452 * using the compatibility syscall entries. 453 */ 454 syscall_table_start sys_oabi_call_table 455#define COMPAT(nr, native, compat) syscall nr, compat 456#include <calls-oabi.S> 457 syscall_table_end sys_oabi_call_table 458 459#endif 460 461