1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org> 7 * Copyright (C) 2001 MIPS Technologies, Inc. 8 * Copyright (C) 2004 Thiemo Seufer 9 * Copyright (C) 2014 Imagination Technologies Ltd. 10 */ 11#include <linux/errno.h> 12#include <asm/asm.h> 13#include <asm/asmmacro.h> 14#include <asm/irqflags.h> 15#include <asm/mipsregs.h> 16#include <asm/regdef.h> 17#include <asm/stackframe.h> 18#include <asm/isadep.h> 19#include <asm/sysmips.h> 20#include <asm/thread_info.h> 21#include <asm/unistd.h> 22#include <asm/war.h> 23#include <asm/asm-offsets.h> 24 25 .align 5 26NESTED(handle_sys, PT_SIZE, sp) 27 .set noat 28 SAVE_SOME 29 TRACE_IRQS_ON_RELOAD 30 STI 31 .set at 32 33 lw t1, PT_EPC(sp) # skip syscall on return 34 35 addiu t1, 4 # skip to next instruction 36 sw t1, PT_EPC(sp) 37 38 sw a3, PT_R26(sp) # save a3 for syscall restarting 39 40 /* 41 * More than four arguments. Try to deal with it by copying the 42 * stack arguments from the user stack to the kernel stack. 43 * This Sucks (TM). 44 */ 45 lw t0, PT_R29(sp) # get old user stack pointer 46 47 /* 48 * We intentionally keep the kernel stack a little below the top of 49 * userspace so we don't have to do a slower byte accurate check here. 50 */ 51 addu t4, t0, 32 52 bltz t4, bad_stack # -> sp is bad 53 54 /* 55 * Ok, copy the args from the luser stack to the kernel stack. 56 */ 57 58 .set push 59 .set noreorder 60 .set nomacro 61 62load_a4: user_lw(t5, 16(t0)) # argument #5 from usp 63load_a5: user_lw(t6, 20(t0)) # argument #6 from usp 64load_a6: user_lw(t7, 24(t0)) # argument #7 from usp 65load_a7: user_lw(t8, 28(t0)) # argument #8 from usp 66loads_done: 67 68 sw t5, 16(sp) # argument #5 to ksp 69 sw t6, 20(sp) # argument #6 to ksp 70 sw t7, 24(sp) # argument #7 to ksp 71 sw t8, 28(sp) # argument #8 to ksp 72 .set pop 73 74 .section __ex_table,"a" 75 PTR_WD load_a4, bad_stack_a4 76 PTR_WD load_a5, bad_stack_a5 77 PTR_WD load_a6, bad_stack_a6 78 PTR_WD load_a7, bad_stack_a7 79 .previous 80 81 lw t0, TI_FLAGS($28) # syscall tracing enabled? 82 li t1, _TIF_WORK_SYSCALL_ENTRY 83 and t0, t1 84 bnez t0, syscall_trace_entry # -> yes 85syscall_common: 86 subu v0, v0, __NR_O32_Linux # check syscall number 87 sltiu t0, v0, __NR_O32_Linux_syscalls 88 beqz t0, illegal_syscall 89 90 sll t0, v0, 2 91 la t1, sys_call_table 92 addu t1, t0 93 lw t2, (t1) # syscall routine 94 95 beqz t2, illegal_syscall 96 97 jalr t2 # Do The Real Thing (TM) 98 99 li t0, -EMAXERRNO - 1 # error? 100 sltu t0, t0, v0 101 sw t0, PT_R7(sp) # set error flag 102 beqz t0, 1f 103 104 lw t1, PT_R2(sp) # syscall number 105 negu v0 # error 106 sw t1, PT_R0(sp) # save it for syscall restarting 1071: sw v0, PT_R2(sp) # result 108 109o32_syscall_exit: 110 j syscall_exit_partial 111 112/* ------------------------------------------------------------------------ */ 113 114syscall_trace_entry: 115 SAVE_STATIC 116 move a0, sp 117 118 /* 119 * syscall number is in v0 unless we called syscall(__NR_###) 120 * where the real syscall number is in a0 121 */ 122 move a1, v0 123 subu t2, v0, __NR_O32_Linux 124 bnez t2, 1f /* __NR_syscall at offset 0 */ 125 lw a1, PT_R4(sp) 126 1271: jal syscall_trace_enter 128 129 bltz v0, 1f # seccomp failed? Skip syscall 130 131 RESTORE_STATIC 132 lw v0, PT_R2(sp) # Restore syscall (maybe modified) 133 lw a0, PT_R4(sp) # Restore argument registers 134 lw a1, PT_R5(sp) 135 lw a2, PT_R6(sp) 136 lw a3, PT_R7(sp) 137 j syscall_common 138 1391: j syscall_exit 140 141/* ------------------------------------------------------------------------ */ 142 143 /* 144 * Our open-coded access area sanity test for the stack pointer 145 * failed. We probably should handle this case a bit more drastic. 146 */ 147bad_stack: 148 li v0, EFAULT 149 sw v0, PT_R2(sp) 150 li t0, 1 # set error flag 151 sw t0, PT_R7(sp) 152 j o32_syscall_exit 153 154bad_stack_a4: 155 li t5, 0 156 b load_a5 157 158bad_stack_a5: 159 li t6, 0 160 b load_a6 161 162bad_stack_a6: 163 li t7, 0 164 b load_a7 165 166bad_stack_a7: 167 li t8, 0 168 b loads_done 169 170 /* 171 * The system call does not exist in this kernel 172 */ 173illegal_syscall: 174 li v0, ENOSYS # error 175 sw v0, PT_R2(sp) 176 li t0, 1 # set error flag 177 sw t0, PT_R7(sp) 178 j o32_syscall_exit 179 END(handle_sys) 180 181 LEAF(sys_syscall) 182 subu t0, a0, __NR_O32_Linux # check syscall number 183 sltiu v0, t0, __NR_O32_Linux_syscalls 184 beqz t0, einval # do not recurse 185 sll t1, t0, 2 186 beqz v0, einval 187 lw t2, sys_call_table(t1) # syscall routine 188 189 move a0, a1 # shift argument registers 190 move a1, a2 191 move a2, a3 192 lw a3, 16(sp) 193 lw t4, 20(sp) 194 lw t5, 24(sp) 195 lw t6, 28(sp) 196 sw t4, 16(sp) 197 sw t5, 20(sp) 198 sw t6, 24(sp) 199 jr t2 200 /* Unreached */ 201 202einval: li v0, -ENOSYS 203 jr ra 204 END(sys_syscall) 205 206#ifdef CONFIG_MIPS_MT_FPAFF 207 /* 208 * For FPU affinity scheduling on MIPS MT processors, we need to 209 * intercept sys_sched_xxxaffinity() calls until we get a proper hook 210 * in kernel/sched/core.c. Considered only temporary we only support 211 * these hooks for the 32-bit kernel - there is no MIPS64 MT processor 212 * atm. 213 */ 214#define sys_sched_setaffinity mipsmt_sys_sched_setaffinity 215#define sys_sched_getaffinity mipsmt_sys_sched_getaffinity 216#endif /* CONFIG_MIPS_MT_FPAFF */ 217 218#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) 219#define __SYSCALL(nr, entry) PTR_WD entry 220 .align 2 221 .type sys_call_table, @object 222EXPORT(sys_call_table) 223#include <asm/syscall_table_o32.h> 224