xref: /openbmc/linux/arch/arm64/kernel/syscall.c (revision 176f011b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/compiler.h>
4 #include <linux/context_tracking.h>
5 #include <linux/errno.h>
6 #include <linux/nospec.h>
7 #include <linux/ptrace.h>
8 #include <linux/syscalls.h>
9 
10 #include <asm/daifflags.h>
11 #include <asm/fpsimd.h>
12 #include <asm/syscall.h>
13 #include <asm/thread_info.h>
14 #include <asm/unistd.h>
15 
16 long compat_arm_syscall(struct pt_regs *regs, int scno);
17 long sys_ni_syscall(void);
18 
19 static long do_ni_syscall(struct pt_regs *regs, int scno)
20 {
21 #ifdef CONFIG_COMPAT
22 	long ret;
23 	if (is_compat_task()) {
24 		ret = compat_arm_syscall(regs, scno);
25 		if (ret != -ENOSYS)
26 			return ret;
27 	}
28 #endif
29 
30 	return sys_ni_syscall();
31 }
32 
33 static long __invoke_syscall(struct pt_regs *regs, syscall_fn_t syscall_fn)
34 {
35 	return syscall_fn(regs);
36 }
37 
38 static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
39 			   unsigned int sc_nr,
40 			   const syscall_fn_t syscall_table[])
41 {
42 	long ret;
43 
44 	if (scno < sc_nr) {
45 		syscall_fn_t syscall_fn;
46 		syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
47 		ret = __invoke_syscall(regs, syscall_fn);
48 	} else {
49 		ret = do_ni_syscall(regs, scno);
50 	}
51 
52 	regs->regs[0] = ret;
53 }
54 
55 static inline bool has_syscall_work(unsigned long flags)
56 {
57 	return unlikely(flags & _TIF_SYSCALL_WORK);
58 }
59 
60 int syscall_trace_enter(struct pt_regs *regs);
61 void syscall_trace_exit(struct pt_regs *regs);
62 
63 static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
64 			   const syscall_fn_t syscall_table[])
65 {
66 	unsigned long flags = current_thread_info()->flags;
67 
68 	regs->orig_x0 = regs->regs[0];
69 	regs->syscallno = scno;
70 
71 	local_daif_restore(DAIF_PROCCTX);
72 	user_exit();
73 
74 	if (has_syscall_work(flags)) {
75 		/* set default errno for user-issued syscall(-1) */
76 		if (scno == NO_SYSCALL)
77 			regs->regs[0] = -ENOSYS;
78 		scno = syscall_trace_enter(regs);
79 		if (scno == NO_SYSCALL)
80 			goto trace_exit;
81 	}
82 
83 	invoke_syscall(regs, scno, sc_nr, syscall_table);
84 
85 	/*
86 	 * The tracing status may have changed under our feet, so we have to
87 	 * check again. However, if we were tracing entry, then we always trace
88 	 * exit regardless, as the old entry assembly did.
89 	 */
90 	if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
91 		local_daif_mask();
92 		flags = current_thread_info()->flags;
93 		if (!has_syscall_work(flags)) {
94 			/*
95 			 * We're off to userspace, where interrupts are
96 			 * always enabled after we restore the flags from
97 			 * the SPSR.
98 			 */
99 			trace_hardirqs_on();
100 			return;
101 		}
102 		local_daif_restore(DAIF_PROCCTX);
103 	}
104 
105 trace_exit:
106 	syscall_trace_exit(regs);
107 }
108 
109 static inline void sve_user_discard(void)
110 {
111 	if (!system_supports_sve())
112 		return;
113 
114 	clear_thread_flag(TIF_SVE);
115 
116 	/*
117 	 * task_fpsimd_load() won't be called to update CPACR_EL1 in
118 	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
119 	 * happens if a context switch or kernel_neon_begin() or context
120 	 * modification (sigreturn, ptrace) intervenes.
121 	 * So, ensure that CPACR_EL1 is already correct for the fast-path case.
122 	 */
123 	sve_user_disable();
124 }
125 
126 asmlinkage void el0_svc_handler(struct pt_regs *regs)
127 {
128 	sve_user_discard();
129 	el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
130 }
131 
132 #ifdef CONFIG_COMPAT
133 asmlinkage void el0_svc_compat_handler(struct pt_regs *regs)
134 {
135 	el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
136 		       compat_sys_call_table);
137 }
138 #endif
139