xref: /openbmc/linux/arch/mips/kernel/syscall.c (revision 82ced6fd)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2001 MIPS Technologies, Inc.
9  */
10 #include <linux/capability.h>
11 #include <linux/errno.h>
12 #include <linux/linkage.h>
13 #include <linux/mm.h>
14 #include <linux/fs.h>
15 #include <linux/smp.h>
16 #include <linux/mman.h>
17 #include <linux/ptrace.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/syscalls.h>
21 #include <linux/file.h>
22 #include <linux/slab.h>
23 #include <linux/utsname.h>
24 #include <linux/unistd.h>
25 #include <linux/sem.h>
26 #include <linux/msg.h>
27 #include <linux/shm.h>
28 #include <linux/compiler.h>
29 #include <linux/module.h>
30 #include <linux/ipc.h>
31 
32 #include <asm/branch.h>
33 #include <asm/cachectl.h>
34 #include <asm/cacheflush.h>
35 #include <asm/asm-offsets.h>
36 #include <asm/signal.h>
37 #include <asm/sim.h>
38 #include <asm/shmparam.h>
39 #include <asm/sysmips.h>
40 #include <asm/uaccess.h>
41 
42 /*
43  * For historic reasons the pipe(2) syscall on MIPS has an unusual calling
44  * convention.  It returns results in registers $v0 / $v1 which means there
45  * is no need for it to do verify the validity of a userspace pointer
46  * argument.  Historically that used to be expensive in Linux.  These days
47  * the performance advantage is negligible.
48  */
49 asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs)
50 {
51 	int fd[2];
52 	int error, res;
53 
54 	error = do_pipe_flags(fd, 0);
55 	if (error) {
56 		res = error;
57 		goto out;
58 	}
59 	regs.regs[3] = fd[1];
60 	res = fd[0];
61 out:
62 	return res;
63 }
64 
65 unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
66 
67 EXPORT_SYMBOL(shm_align_mask);
68 
69 #define COLOUR_ALIGN(addr,pgoff)				\
70 	((((addr) + shm_align_mask) & ~shm_align_mask) +	\
71 	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
72 
73 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
74 	unsigned long len, unsigned long pgoff, unsigned long flags)
75 {
76 	struct vm_area_struct * vmm;
77 	int do_color_align;
78 	unsigned long task_size;
79 
80 	task_size = STACK_TOP;
81 
82 	if (len > task_size)
83 		return -ENOMEM;
84 
85 	if (flags & MAP_FIXED) {
86 		/* Even MAP_FIXED mappings must reside within task_size.  */
87 		if (task_size - len < addr)
88 			return -EINVAL;
89 
90 		/*
91 		 * We do not accept a shared mapping if it would violate
92 		 * cache aliasing constraints.
93 		 */
94 		if ((flags & MAP_SHARED) && (addr & shm_align_mask))
95 			return -EINVAL;
96 		return addr;
97 	}
98 
99 	do_color_align = 0;
100 	if (filp || (flags & MAP_SHARED))
101 		do_color_align = 1;
102 	if (addr) {
103 		if (do_color_align)
104 			addr = COLOUR_ALIGN(addr, pgoff);
105 		else
106 			addr = PAGE_ALIGN(addr);
107 		vmm = find_vma(current->mm, addr);
108 		if (task_size - len >= addr &&
109 		    (!vmm || addr + len <= vmm->vm_start))
110 			return addr;
111 	}
112 	addr = TASK_UNMAPPED_BASE;
113 	if (do_color_align)
114 		addr = COLOUR_ALIGN(addr, pgoff);
115 	else
116 		addr = PAGE_ALIGN(addr);
117 
118 	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
119 		/* At this point:  (!vmm || addr < vmm->vm_end). */
120 		if (task_size - len < addr)
121 			return -ENOMEM;
122 		if (!vmm || addr + len <= vmm->vm_start)
123 			return addr;
124 		addr = vmm->vm_end;
125 		if (do_color_align)
126 			addr = COLOUR_ALIGN(addr, pgoff);
127 	}
128 }
129 
130 /* common code for old and new mmaps */
131 static inline unsigned long
132 do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
133         unsigned long flags, unsigned long fd, unsigned long pgoff)
134 {
135 	unsigned long error = -EBADF;
136 	struct file * file = NULL;
137 
138 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
139 	if (!(flags & MAP_ANONYMOUS)) {
140 		file = fget(fd);
141 		if (!file)
142 			goto out;
143 	}
144 
145 	down_write(&current->mm->mmap_sem);
146 	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
147 	up_write(&current->mm->mmap_sem);
148 
149 	if (file)
150 		fput(file);
151 out:
152 	return error;
153 }
154 
155 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
156 	unsigned long, prot, unsigned long, flags, unsigned long,
157 	fd, off_t, offset)
158 {
159 	unsigned long result;
160 
161 	result = -EINVAL;
162 	if (offset & ~PAGE_MASK)
163 		goto out;
164 
165 	result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
166 
167 out:
168 	return result;
169 }
170 
171 SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
172 	unsigned long, prot, unsigned long, flags, unsigned long, fd,
173 	unsigned long, pgoff)
174 {
175 	if (pgoff & (~PAGE_MASK >> 12))
176 		return -EINVAL;
177 
178 	return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
179 }
180 
181 save_static_function(sys_fork);
182 static int __used noinline
183 _sys_fork(nabi_no_regargs struct pt_regs regs)
184 {
185 	return do_fork(SIGCHLD, regs.regs[29], &regs, 0, NULL, NULL);
186 }
187 
188 save_static_function(sys_clone);
189 static int __used noinline
190 _sys_clone(nabi_no_regargs struct pt_regs regs)
191 {
192 	unsigned long clone_flags;
193 	unsigned long newsp;
194 	int __user *parent_tidptr, *child_tidptr;
195 
196 	clone_flags = regs.regs[4];
197 	newsp = regs.regs[5];
198 	if (!newsp)
199 		newsp = regs.regs[29];
200 	parent_tidptr = (int __user *) regs.regs[6];
201 #ifdef CONFIG_32BIT
202 	/* We need to fetch the fifth argument off the stack.  */
203 	child_tidptr = NULL;
204 	if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) {
205 		int __user *__user *usp = (int __user *__user *) regs.regs[29];
206 		if (regs.regs[2] == __NR_syscall) {
207 			if (get_user (child_tidptr, &usp[5]))
208 				return -EFAULT;
209 		}
210 		else if (get_user (child_tidptr, &usp[4]))
211 			return -EFAULT;
212 	}
213 #else
214 	child_tidptr = (int __user *) regs.regs[8];
215 #endif
216 	return do_fork(clone_flags, newsp, &regs, 0,
217 	               parent_tidptr, child_tidptr);
218 }
219 
220 /*
221  * sys_execve() executes a new program.
222  */
223 asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs)
224 {
225 	int error;
226 	char * filename;
227 
228 	filename = getname((char __user *) (long)regs.regs[4]);
229 	error = PTR_ERR(filename);
230 	if (IS_ERR(filename))
231 		goto out;
232 	error = do_execve(filename, (char __user *__user *) (long)regs.regs[5],
233 	                  (char __user *__user *) (long)regs.regs[6], &regs);
234 	putname(filename);
235 
236 out:
237 	return error;
238 }
239 
240 /*
241  * Compacrapability ...
242  */
243 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
244 {
245 	if (name && !copy_to_user(name, utsname(), sizeof (*name)))
246 		return 0;
247 	return -EFAULT;
248 }
249 
250 /*
251  * Compacrapability ...
252  */
253 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
254 {
255 	int error;
256 
257 	if (!name)
258 		return -EFAULT;
259 	if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
260 		return -EFAULT;
261 
262 	error = __copy_to_user(&name->sysname, &utsname()->sysname,
263 			       __OLD_UTS_LEN);
264 	error -= __put_user(0, name->sysname + __OLD_UTS_LEN);
265 	error -= __copy_to_user(&name->nodename, &utsname()->nodename,
266 				__OLD_UTS_LEN);
267 	error -= __put_user(0, name->nodename + __OLD_UTS_LEN);
268 	error -= __copy_to_user(&name->release, &utsname()->release,
269 				__OLD_UTS_LEN);
270 	error -= __put_user(0, name->release + __OLD_UTS_LEN);
271 	error -= __copy_to_user(&name->version, &utsname()->version,
272 				__OLD_UTS_LEN);
273 	error -= __put_user(0, name->version + __OLD_UTS_LEN);
274 	error -= __copy_to_user(&name->machine, &utsname()->machine,
275 				__OLD_UTS_LEN);
276 	error = __put_user(0, name->machine + __OLD_UTS_LEN);
277 	error = error ? -EFAULT : 0;
278 
279 	return error;
280 }
281 
282 SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
283 {
284 	struct thread_info *ti = task_thread_info(current);
285 
286 	ti->tp_value = addr;
287 	if (cpu_has_userlocal)
288 		write_c0_userlocal(addr);
289 
290 	return 0;
291 }
292 
293 asmlinkage int _sys_sysmips(long cmd, long arg1, long arg2, long arg3)
294 {
295 	switch (cmd) {
296 	case MIPS_ATOMIC_SET:
297 		printk(KERN_CRIT "How did I get here?\n");
298 		return -EINVAL;
299 
300 	case MIPS_FIXADE:
301 		if (arg1 & ~3)
302 			return -EINVAL;
303 
304 		if (arg1 & 1)
305 			set_thread_flag(TIF_FIXADE);
306 		else
307 			clear_thread_flag(TIF_FIXADE);
308 		if (arg1 & 2)
309 			set_thread_flag(TIF_LOGADE);
310 		else
311 			clear_thread_flag(TIF_FIXADE);
312 
313 		return 0;
314 
315 	case FLUSH_CACHE:
316 		__flush_cache_all();
317 		return 0;
318 	}
319 
320 	return -EINVAL;
321 }
322 
323 /*
324  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
325  *
326  * This is really horribly ugly.
327  */
328 SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, int, second,
329 	unsigned long, third, void __user *, ptr, long, fifth)
330 {
331 	int version, ret;
332 
333 	version = call >> 16; /* hack for backward compatibility */
334 	call &= 0xffff;
335 
336 	switch (call) {
337 	case SEMOP:
338 		return sys_semtimedop(first, (struct sembuf __user *)ptr,
339 		                      second, NULL);
340 	case SEMTIMEDOP:
341 		return sys_semtimedop(first, (struct sembuf __user *)ptr,
342 				      second,
343 				      (const struct timespec __user *)fifth);
344 	case SEMGET:
345 		return sys_semget(first, second, third);
346 	case SEMCTL: {
347 		union semun fourth;
348 		if (!ptr)
349 			return -EINVAL;
350 		if (get_user(fourth.__pad, (void __user *__user *) ptr))
351 			return -EFAULT;
352 		return sys_semctl(first, second, third, fourth);
353 	}
354 
355 	case MSGSND:
356 		return sys_msgsnd(first, (struct msgbuf __user *) ptr,
357 				  second, third);
358 	case MSGRCV:
359 		switch (version) {
360 		case 0: {
361 			struct ipc_kludge tmp;
362 			if (!ptr)
363 				return -EINVAL;
364 
365 			if (copy_from_user(&tmp,
366 					   (struct ipc_kludge __user *) ptr,
367 					   sizeof(tmp)))
368 				return -EFAULT;
369 			return sys_msgrcv(first, tmp.msgp, second,
370 					  tmp.msgtyp, third);
371 		}
372 		default:
373 			return sys_msgrcv(first,
374 					  (struct msgbuf __user *) ptr,
375 					  second, fifth, third);
376 		}
377 	case MSGGET:
378 		return sys_msgget((key_t) first, second);
379 	case MSGCTL:
380 		return sys_msgctl(first, second,
381 				  (struct msqid_ds __user *) ptr);
382 
383 	case SHMAT:
384 		switch (version) {
385 		default: {
386 			unsigned long raddr;
387 			ret = do_shmat(first, (char __user *) ptr, second,
388 				       &raddr);
389 			if (ret)
390 				return ret;
391 			return put_user(raddr, (unsigned long __user *) third);
392 		}
393 		case 1:	/* iBCS2 emulator entry point */
394 			if (!segment_eq(get_fs(), get_ds()))
395 				return -EINVAL;
396 			return do_shmat(first, (char __user *) ptr, second,
397 				        (unsigned long *) third);
398 		}
399 	case SHMDT:
400 		return sys_shmdt((char __user *)ptr);
401 	case SHMGET:
402 		return sys_shmget(first, second, third);
403 	case SHMCTL:
404 		return sys_shmctl(first, second,
405 				  (struct shmid_ds __user *) ptr);
406 	default:
407 		return -ENOSYS;
408 	}
409 }
410 
411 /*
412  * No implemented yet ...
413  */
414 SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
415 {
416 	return -ENOSYS;
417 }
418 
419 /*
420  * If we ever come here the user sp is bad.  Zap the process right away.
421  * Due to the bad stack signaling wouldn't work.
422  */
423 asmlinkage void bad_stack(void)
424 {
425 	do_exit(SIGSEGV);
426 }
427 
428 /*
429  * Do a system call from kernel instead of calling sys_execve so we
430  * end up with proper pt_regs.
431  */
432 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
433 {
434 	register unsigned long __a0 asm("$4") = (unsigned long) filename;
435 	register unsigned long __a1 asm("$5") = (unsigned long) argv;
436 	register unsigned long __a2 asm("$6") = (unsigned long) envp;
437 	register unsigned long __a3 asm("$7");
438 	unsigned long __v0;
439 
440 	__asm__ volatile ("					\n"
441 	"	.set	noreorder				\n"
442 	"	li	$2, %5		# __NR_execve		\n"
443 	"	syscall						\n"
444 	"	move	%0, $2					\n"
445 	"	.set	reorder					\n"
446 	: "=&r" (__v0), "=r" (__a3)
447 	: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve)
448 	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24",
449 	  "memory");
450 
451 	if (__a3 == 0)
452 		return __v0;
453 
454 	return -__v0;
455 }
456