1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2001 MIPS Technologies, Inc. 9 */ 10 #include <linux/capability.h> 11 #include <linux/errno.h> 12 #include <linux/linkage.h> 13 #include <linux/fs.h> 14 #include <linux/smp.h> 15 #include <linux/ptrace.h> 16 #include <linux/string.h> 17 #include <linux/syscalls.h> 18 #include <linux/file.h> 19 #include <linux/utsname.h> 20 #include <linux/unistd.h> 21 #include <linux/sem.h> 22 #include <linux/msg.h> 23 #include <linux/shm.h> 24 #include <linux/compiler.h> 25 #include <linux/ipc.h> 26 #include <linux/uaccess.h> 27 #include <linux/slab.h> 28 #include <linux/elf.h> 29 30 #include <asm/asm.h> 31 #include <asm/branch.h> 32 #include <asm/cachectl.h> 33 #include <asm/cacheflush.h> 34 #include <asm/asm-offsets.h> 35 #include <asm/signal.h> 36 #include <asm/sim.h> 37 #include <asm/shmparam.h> 38 #include <asm/sysmips.h> 39 #include <asm/uaccess.h> 40 #include <asm/switch_to.h> 41 42 /* 43 * For historic reasons the pipe(2) syscall on MIPS has an unusual calling 44 * convention. It returns results in registers $v0 / $v1 which means there 45 * is no need for it to do verify the validity of a userspace pointer 46 * argument. Historically that used to be expensive in Linux. These days 47 * the performance advantage is negligible. 48 */ 49 asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs) 50 { 51 int fd[2]; 52 int error, res; 53 54 error = do_pipe_flags(fd, 0); 55 if (error) { 56 res = error; 57 goto out; 58 } 59 regs.regs[3] = fd[1]; 60 res = fd[0]; 61 out: 62 return res; 63 } 64 65 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, 66 unsigned long, prot, unsigned long, flags, unsigned long, 67 fd, off_t, offset) 68 { 69 unsigned long result; 70 71 result = -EINVAL; 72 if (offset & ~PAGE_MASK) 73 goto out; 74 75 result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); 76 77 out: 78 return result; 79 } 80 81 SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len, 82 unsigned long, prot, unsigned long, flags, unsigned long, fd, 83 unsigned long, pgoff) 84 { 85 if (pgoff & (~PAGE_MASK >> 12)) 86 return -EINVAL; 87 88 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); 89 } 90 91 save_static_function(sys_fork); 92 static int __used noinline 93 _sys_fork(nabi_no_regargs struct pt_regs regs) 94 { 95 return do_fork(SIGCHLD, regs.regs[29], ®s, 0, NULL, NULL); 96 } 97 98 save_static_function(sys_clone); 99 static int __used noinline 100 _sys_clone(nabi_no_regargs struct pt_regs regs) 101 { 102 unsigned long clone_flags; 103 unsigned long newsp; 104 int __user *parent_tidptr, *child_tidptr; 105 106 clone_flags = regs.regs[4]; 107 newsp = regs.regs[5]; 108 if (!newsp) 109 newsp = regs.regs[29]; 110 parent_tidptr = (int __user *) regs.regs[6]; 111 #ifdef CONFIG_32BIT 112 /* We need to fetch the fifth argument off the stack. */ 113 child_tidptr = NULL; 114 if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) { 115 int __user *__user *usp = (int __user *__user *) regs.regs[29]; 116 if (regs.regs[2] == __NR_syscall) { 117 if (get_user (child_tidptr, &usp[5])) 118 return -EFAULT; 119 } 120 else if (get_user (child_tidptr, &usp[4])) 121 return -EFAULT; 122 } 123 #else 124 child_tidptr = (int __user *) regs.regs[8]; 125 #endif 126 return do_fork(clone_flags, newsp, ®s, 0, 127 parent_tidptr, child_tidptr); 128 } 129 130 /* 131 * sys_execve() executes a new program. 132 */ 133 asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs) 134 { 135 int error; 136 char * filename; 137 138 filename = getname((const char __user *) (long)regs.regs[4]); 139 error = PTR_ERR(filename); 140 if (IS_ERR(filename)) 141 goto out; 142 error = do_execve(filename, 143 (const char __user *const __user *) (long)regs.regs[5], 144 (const char __user *const __user *) (long)regs.regs[6], 145 ®s); 146 putname(filename); 147 148 out: 149 return error; 150 } 151 152 SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) 153 { 154 struct thread_info *ti = task_thread_info(current); 155 156 ti->tp_value = addr; 157 if (cpu_has_userlocal) 158 write_c0_userlocal(addr); 159 160 return 0; 161 } 162 163 static inline int mips_atomic_set(struct pt_regs *regs, 164 unsigned long addr, unsigned long new) 165 { 166 unsigned long old, tmp; 167 unsigned int err; 168 169 if (unlikely(addr & 3)) 170 return -EINVAL; 171 172 if (unlikely(!access_ok(VERIFY_WRITE, addr, 4))) 173 return -EINVAL; 174 175 if (cpu_has_llsc && R10000_LLSC_WAR) { 176 __asm__ __volatile__ ( 177 " .set mips3 \n" 178 " li %[err], 0 \n" 179 "1: ll %[old], (%[addr]) \n" 180 " move %[tmp], %[new] \n" 181 "2: sc %[tmp], (%[addr]) \n" 182 " beqzl %[tmp], 1b \n" 183 "3: \n" 184 " .section .fixup,\"ax\" \n" 185 "4: li %[err], %[efault] \n" 186 " j 3b \n" 187 " .previous \n" 188 " .section __ex_table,\"a\" \n" 189 " "STR(PTR)" 1b, 4b \n" 190 " "STR(PTR)" 2b, 4b \n" 191 " .previous \n" 192 " .set mips0 \n" 193 : [old] "=&r" (old), 194 [err] "=&r" (err), 195 [tmp] "=&r" (tmp) 196 : [addr] "r" (addr), 197 [new] "r" (new), 198 [efault] "i" (-EFAULT) 199 : "memory"); 200 } else if (cpu_has_llsc) { 201 __asm__ __volatile__ ( 202 " .set mips3 \n" 203 " li %[err], 0 \n" 204 "1: ll %[old], (%[addr]) \n" 205 " move %[tmp], %[new] \n" 206 "2: sc %[tmp], (%[addr]) \n" 207 " bnez %[tmp], 4f \n" 208 "3: \n" 209 " .subsection 2 \n" 210 "4: b 1b \n" 211 " .previous \n" 212 " \n" 213 " .section .fixup,\"ax\" \n" 214 "5: li %[err], %[efault] \n" 215 " j 3b \n" 216 " .previous \n" 217 " .section __ex_table,\"a\" \n" 218 " "STR(PTR)" 1b, 5b \n" 219 " "STR(PTR)" 2b, 5b \n" 220 " .previous \n" 221 " .set mips0 \n" 222 : [old] "=&r" (old), 223 [err] "=&r" (err), 224 [tmp] "=&r" (tmp) 225 : [addr] "r" (addr), 226 [new] "r" (new), 227 [efault] "i" (-EFAULT) 228 : "memory"); 229 } else { 230 do { 231 preempt_disable(); 232 ll_bit = 1; 233 ll_task = current; 234 preempt_enable(); 235 236 err = __get_user(old, (unsigned int *) addr); 237 err |= __put_user(new, (unsigned int *) addr); 238 if (err) 239 break; 240 rmb(); 241 } while (!ll_bit); 242 } 243 244 if (unlikely(err)) 245 return err; 246 247 regs->regs[2] = old; 248 regs->regs[7] = 0; /* No error */ 249 250 /* 251 * Don't let your children do this ... 252 */ 253 __asm__ __volatile__( 254 " move $29, %0 \n" 255 " j syscall_exit \n" 256 : /* no outputs */ 257 : "r" (regs)); 258 259 /* unreached. Honestly. */ 260 while (1); 261 } 262 263 save_static_function(sys_sysmips); 264 static int __used noinline 265 _sys_sysmips(nabi_no_regargs struct pt_regs regs) 266 { 267 long cmd, arg1, arg2; 268 269 cmd = regs.regs[4]; 270 arg1 = regs.regs[5]; 271 arg2 = regs.regs[6]; 272 273 switch (cmd) { 274 case MIPS_ATOMIC_SET: 275 return mips_atomic_set(®s, arg1, arg2); 276 277 case MIPS_FIXADE: 278 if (arg1 & ~3) 279 return -EINVAL; 280 281 if (arg1 & 1) 282 set_thread_flag(TIF_FIXADE); 283 else 284 clear_thread_flag(TIF_FIXADE); 285 if (arg1 & 2) 286 set_thread_flag(TIF_LOGADE); 287 else 288 clear_thread_flag(TIF_LOGADE); 289 290 return 0; 291 292 case FLUSH_CACHE: 293 __flush_cache_all(); 294 return 0; 295 } 296 297 return -EINVAL; 298 } 299 300 /* 301 * No implemented yet ... 302 */ 303 SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op) 304 { 305 return -ENOSYS; 306 } 307 308 /* 309 * If we ever come here the user sp is bad. Zap the process right away. 310 * Due to the bad stack signaling wouldn't work. 311 */ 312 asmlinkage void bad_stack(void) 313 { 314 do_exit(SIGSEGV); 315 } 316 317 /* 318 * Do a system call from kernel instead of calling sys_execve so we 319 * end up with proper pt_regs. 320 */ 321 int kernel_execve(const char *filename, 322 const char *const argv[], 323 const char *const envp[]) 324 { 325 register unsigned long __a0 asm("$4") = (unsigned long) filename; 326 register unsigned long __a1 asm("$5") = (unsigned long) argv; 327 register unsigned long __a2 asm("$6") = (unsigned long) envp; 328 register unsigned long __a3 asm("$7"); 329 unsigned long __v0; 330 331 __asm__ volatile (" \n" 332 " .set noreorder \n" 333 " li $2, %5 # __NR_execve \n" 334 " syscall \n" 335 " move %0, $2 \n" 336 " .set reorder \n" 337 : "=&r" (__v0), "=r" (__a3) 338 : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve) 339 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", 340 "memory"); 341 342 if (__a3 == 0) 343 return __v0; 344 345 return -__v0; 346 } 347