1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/compiler.h> 4 #include <linux/context_tracking.h> 5 #include <linux/errno.h> 6 #include <linux/nospec.h> 7 #include <linux/ptrace.h> 8 #include <linux/syscalls.h> 9 10 #include <asm/daifflags.h> 11 #include <asm/fpsimd.h> 12 #include <asm/syscall.h> 13 #include <asm/thread_info.h> 14 #include <asm/unistd.h> 15 16 long compat_arm_syscall(struct pt_regs *regs); 17 18 long sys_ni_syscall(void); 19 20 asmlinkage long do_ni_syscall(struct pt_regs *regs) 21 { 22 #ifdef CONFIG_COMPAT 23 long ret; 24 if (is_compat_task()) { 25 ret = compat_arm_syscall(regs); 26 if (ret != -ENOSYS) 27 return ret; 28 } 29 #endif 30 31 return sys_ni_syscall(); 32 } 33 34 static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn) 35 { 36 return syscall_fn(regs); 37 } 38 39 static void invoke_syscall(struct pt_regs *regs, unsigned int scno, 40 unsigned int sc_nr, 41 const syscall_fn_t syscall_table[]) 42 { 43 long ret; 44 45 if (scno < sc_nr) { 46 syscall_fn_t syscall_fn; 47 syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; 48 ret = __invoke_syscall(regs, syscall_fn); 49 } else { 50 ret = do_ni_syscall(regs); 51 } 52 53 regs->regs[0] = ret; 54 } 55 56 static inline bool has_syscall_work(unsigned long flags) 57 { 58 return unlikely(flags & _TIF_SYSCALL_WORK); 59 } 60 61 int syscall_trace_enter(struct pt_regs *regs); 62 void syscall_trace_exit(struct pt_regs *regs); 63 64 static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, 65 const syscall_fn_t syscall_table[]) 66 { 67 unsigned long flags = current_thread_info()->flags; 68 69 regs->orig_x0 = regs->regs[0]; 70 regs->syscallno = scno; 71 72 local_daif_restore(DAIF_PROCCTX); 73 user_exit(); 74 75 if (has_syscall_work(flags)) { 76 /* set default errno for user-issued syscall(-1) */ 77 if (scno == NO_SYSCALL) 78 regs->regs[0] = -ENOSYS; 79 scno = syscall_trace_enter(regs); 80 if (scno == NO_SYSCALL) 81 goto trace_exit; 82 } 83 84 invoke_syscall(regs, scno, sc_nr, syscall_table); 85 86 /* 87 * The tracing status may have changed under our feet, so we have to 88 * check again. However, if we were tracing entry, then we always trace 89 * exit regardless, as the old entry assembly did. 90 */ 91 if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) { 92 local_daif_mask(); 93 flags = current_thread_info()->flags; 94 if (!has_syscall_work(flags)) { 95 /* 96 * We're off to userspace, where interrupts are 97 * always enabled after we restore the flags from 98 * the SPSR. 99 */ 100 trace_hardirqs_on(); 101 return; 102 } 103 local_daif_restore(DAIF_PROCCTX); 104 } 105 106 trace_exit: 107 syscall_trace_exit(regs); 108 } 109 110 static inline void sve_user_discard(void) 111 { 112 if (!system_supports_sve()) 113 return; 114 115 clear_thread_flag(TIF_SVE); 116 117 /* 118 * task_fpsimd_load() won't be called to update CPACR_EL1 in 119 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only 120 * happens if a context switch or kernel_neon_begin() or context 121 * modification (sigreturn, ptrace) intervenes. 122 * So, ensure that CPACR_EL1 is already correct for the fast-path case. 123 */ 124 sve_user_disable(); 125 } 126 127 asmlinkage void el0_svc_handler(struct pt_regs *regs) 128 { 129 sve_user_discard(); 130 el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table); 131 } 132 133 #ifdef CONFIG_COMPAT 134 asmlinkage void el0_svc_compat_handler(struct pt_regs *regs) 135 { 136 el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls, 137 compat_sys_call_table); 138 } 139 #endif 140