xref: /openbmc/linux/arch/mips/kernel/syscall.c (revision f46a6804135795f77d096ab0128f27531c7d051c)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2001 MIPS Technologies, Inc.
9  */
10 #include <linux/capability.h>
11 #include <linux/errno.h>
12 #include <linux/linkage.h>
13 #include <linux/mm.h>
14 #include <linux/fs.h>
15 #include <linux/smp.h>
16 #include <linux/mman.h>
17 #include <linux/ptrace.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/syscalls.h>
21 #include <linux/file.h>
22 #include <linux/utsname.h>
23 #include <linux/unistd.h>
24 #include <linux/sem.h>
25 #include <linux/msg.h>
26 #include <linux/shm.h>
27 #include <linux/compiler.h>
28 #include <linux/module.h>
29 #include <linux/ipc.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
32 #include <linux/random.h>
33 #include <linux/elf.h>
34 
35 #include <asm/asm.h>
36 #include <asm/branch.h>
37 #include <asm/cachectl.h>
38 #include <asm/cacheflush.h>
39 #include <asm/asm-offsets.h>
40 #include <asm/signal.h>
41 #include <asm/sim.h>
42 #include <asm/shmparam.h>
43 #include <asm/sysmips.h>
44 #include <asm/uaccess.h>
45 
46 /*
47  * For historic reasons the pipe(2) syscall on MIPS has an unusual calling
48  * convention.  It returns results in registers $v0 / $v1 which means there
49  * is no need for it to do verify the validity of a userspace pointer
50  * argument.  Historically that used to be expensive in Linux.  These days
51  * the performance advantage is negligible.
52  */
53 asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs)
54 {
55 	int fd[2];
56 	int error, res;
57 
58 	error = do_pipe_flags(fd, 0);
59 	if (error) {
60 		res = error;
61 		goto out;
62 	}
63 	regs.regs[3] = fd[1];
64 	res = fd[0];
65 out:
66 	return res;
67 }
68 
69 unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
70 
71 EXPORT_SYMBOL(shm_align_mask);
72 
73 #define COLOUR_ALIGN(addr,pgoff)				\
74 	((((addr) + shm_align_mask) & ~shm_align_mask) +	\
75 	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
76 
77 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
78 	unsigned long len, unsigned long pgoff, unsigned long flags)
79 {
80 	struct vm_area_struct * vmm;
81 	int do_color_align;
82 	unsigned long task_size;
83 
84 #ifdef CONFIG_32BIT
85 	task_size = TASK_SIZE;
86 #else /* Must be CONFIG_64BIT*/
87 	task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE;
88 #endif
89 
90 	if (len > task_size)
91 		return -ENOMEM;
92 
93 	if (flags & MAP_FIXED) {
94 		/* Even MAP_FIXED mappings must reside within task_size.  */
95 		if (task_size - len < addr)
96 			return -EINVAL;
97 
98 		/*
99 		 * We do not accept a shared mapping if it would violate
100 		 * cache aliasing constraints.
101 		 */
102 		if ((flags & MAP_SHARED) &&
103 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
104 			return -EINVAL;
105 		return addr;
106 	}
107 
108 	do_color_align = 0;
109 	if (filp || (flags & MAP_SHARED))
110 		do_color_align = 1;
111 	if (addr) {
112 		if (do_color_align)
113 			addr = COLOUR_ALIGN(addr, pgoff);
114 		else
115 			addr = PAGE_ALIGN(addr);
116 		vmm = find_vma(current->mm, addr);
117 		if (task_size - len >= addr &&
118 		    (!vmm || addr + len <= vmm->vm_start))
119 			return addr;
120 	}
121 	addr = current->mm->mmap_base;
122 	if (do_color_align)
123 		addr = COLOUR_ALIGN(addr, pgoff);
124 	else
125 		addr = PAGE_ALIGN(addr);
126 
127 	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
128 		/* At this point:  (!vmm || addr < vmm->vm_end). */
129 		if (task_size - len < addr)
130 			return -ENOMEM;
131 		if (!vmm || addr + len <= vmm->vm_start)
132 			return addr;
133 		addr = vmm->vm_end;
134 		if (do_color_align)
135 			addr = COLOUR_ALIGN(addr, pgoff);
136 	}
137 }
138 
139 void arch_pick_mmap_layout(struct mm_struct *mm)
140 {
141 	unsigned long random_factor = 0UL;
142 
143 	if (current->flags & PF_RANDOMIZE) {
144 		random_factor = get_random_int();
145 		random_factor = random_factor << PAGE_SHIFT;
146 		if (TASK_IS_32BIT_ADDR)
147 			random_factor &= 0xfffffful;
148 		else
149 			random_factor &= 0xffffffful;
150 	}
151 
152 	mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
153 	mm->get_unmapped_area = arch_get_unmapped_area;
154 	mm->unmap_area = arch_unmap_area;
155 }
156 
157 static inline unsigned long brk_rnd(void)
158 {
159 	unsigned long rnd = get_random_int();
160 
161 	rnd = rnd << PAGE_SHIFT;
162 	/* 8MB for 32bit, 256MB for 64bit */
163 	if (TASK_IS_32BIT_ADDR)
164 		rnd = rnd & 0x7ffffful;
165 	else
166 		rnd = rnd & 0xffffffful;
167 
168 	return rnd;
169 }
170 
171 unsigned long arch_randomize_brk(struct mm_struct *mm)
172 {
173 	unsigned long base = mm->brk;
174 	unsigned long ret;
175 
176 	ret = PAGE_ALIGN(base + brk_rnd());
177 
178 	if (ret < mm->brk)
179 		return mm->brk;
180 
181 	return ret;
182 }
183 
184 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
185 	unsigned long, prot, unsigned long, flags, unsigned long,
186 	fd, off_t, offset)
187 {
188 	unsigned long result;
189 
190 	result = -EINVAL;
191 	if (offset & ~PAGE_MASK)
192 		goto out;
193 
194 	result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
195 
196 out:
197 	return result;
198 }
199 
200 SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
201 	unsigned long, prot, unsigned long, flags, unsigned long, fd,
202 	unsigned long, pgoff)
203 {
204 	if (pgoff & (~PAGE_MASK >> 12))
205 		return -EINVAL;
206 
207 	return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
208 }
209 
210 save_static_function(sys_fork);
211 static int __used noinline
212 _sys_fork(nabi_no_regargs struct pt_regs regs)
213 {
214 	return do_fork(SIGCHLD, regs.regs[29], &regs, 0, NULL, NULL);
215 }
216 
217 save_static_function(sys_clone);
218 static int __used noinline
219 _sys_clone(nabi_no_regargs struct pt_regs regs)
220 {
221 	unsigned long clone_flags;
222 	unsigned long newsp;
223 	int __user *parent_tidptr, *child_tidptr;
224 
225 	clone_flags = regs.regs[4];
226 	newsp = regs.regs[5];
227 	if (!newsp)
228 		newsp = regs.regs[29];
229 	parent_tidptr = (int __user *) regs.regs[6];
230 #ifdef CONFIG_32BIT
231 	/* We need to fetch the fifth argument off the stack.  */
232 	child_tidptr = NULL;
233 	if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) {
234 		int __user *__user *usp = (int __user *__user *) regs.regs[29];
235 		if (regs.regs[2] == __NR_syscall) {
236 			if (get_user (child_tidptr, &usp[5]))
237 				return -EFAULT;
238 		}
239 		else if (get_user (child_tidptr, &usp[4]))
240 			return -EFAULT;
241 	}
242 #else
243 	child_tidptr = (int __user *) regs.regs[8];
244 #endif
245 	return do_fork(clone_flags, newsp, &regs, 0,
246 	               parent_tidptr, child_tidptr);
247 }
248 
249 /*
250  * sys_execve() executes a new program.
251  */
252 asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs)
253 {
254 	int error;
255 	char * filename;
256 
257 	filename = getname((char __user *) (long)regs.regs[4]);
258 	error = PTR_ERR(filename);
259 	if (IS_ERR(filename))
260 		goto out;
261 	error = do_execve(filename, (char __user *__user *) (long)regs.regs[5],
262 	                  (char __user *__user *) (long)regs.regs[6], &regs);
263 	putname(filename);
264 
265 out:
266 	return error;
267 }
268 
269 SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
270 {
271 	struct thread_info *ti = task_thread_info(current);
272 
273 	ti->tp_value = addr;
274 	if (cpu_has_userlocal)
275 		write_c0_userlocal(addr);
276 
277 	return 0;
278 }
279 
280 static inline int mips_atomic_set(struct pt_regs *regs,
281 	unsigned long addr, unsigned long new)
282 {
283 	unsigned long old, tmp;
284 	unsigned int err;
285 
286 	if (unlikely(addr & 3))
287 		return -EINVAL;
288 
289 	if (unlikely(!access_ok(VERIFY_WRITE, addr, 4)))
290 		return -EINVAL;
291 
292 	if (cpu_has_llsc && R10000_LLSC_WAR) {
293 		__asm__ __volatile__ (
294 		"	.set	mips3					\n"
295 		"	li	%[err], 0				\n"
296 		"1:	ll	%[old], (%[addr])			\n"
297 		"	move	%[tmp], %[new]				\n"
298 		"2:	sc	%[tmp], (%[addr])			\n"
299 		"	beqzl	%[tmp], 1b				\n"
300 		"3:							\n"
301 		"	.section .fixup,\"ax\"				\n"
302 		"4:	li	%[err], %[efault]			\n"
303 		"	j	3b					\n"
304 		"	.previous					\n"
305 		"	.section __ex_table,\"a\"			\n"
306 		"	"STR(PTR)"	1b, 4b				\n"
307 		"	"STR(PTR)"	2b, 4b				\n"
308 		"	.previous					\n"
309 		"	.set	mips0					\n"
310 		: [old] "=&r" (old),
311 		  [err] "=&r" (err),
312 		  [tmp] "=&r" (tmp)
313 		: [addr] "r" (addr),
314 		  [new] "r" (new),
315 		  [efault] "i" (-EFAULT)
316 		: "memory");
317 	} else if (cpu_has_llsc) {
318 		__asm__ __volatile__ (
319 		"	.set	mips3					\n"
320 		"	li	%[err], 0				\n"
321 		"1:	ll	%[old], (%[addr])			\n"
322 		"	move	%[tmp], %[new]				\n"
323 		"2:	sc	%[tmp], (%[addr])			\n"
324 		"	bnez	%[tmp], 4f				\n"
325 		"3:							\n"
326 		"	.subsection 2					\n"
327 		"4:	b	1b					\n"
328 		"	.previous					\n"
329 		"							\n"
330 		"	.section .fixup,\"ax\"				\n"
331 		"5:	li	%[err], %[efault]			\n"
332 		"	j	3b					\n"
333 		"	.previous					\n"
334 		"	.section __ex_table,\"a\"			\n"
335 		"	"STR(PTR)"	1b, 5b				\n"
336 		"	"STR(PTR)"	2b, 5b				\n"
337 		"	.previous					\n"
338 		"	.set	mips0					\n"
339 		: [old] "=&r" (old),
340 		  [err] "=&r" (err),
341 		  [tmp] "=&r" (tmp)
342 		: [addr] "r" (addr),
343 		  [new] "r" (new),
344 		  [efault] "i" (-EFAULT)
345 		: "memory");
346 	} else {
347 		do {
348 			preempt_disable();
349 			ll_bit = 1;
350 			ll_task = current;
351 			preempt_enable();
352 
353 			err = __get_user(old, (unsigned int *) addr);
354 			err |= __put_user(new, (unsigned int *) addr);
355 			if (err)
356 				break;
357 			rmb();
358 		} while (!ll_bit);
359 	}
360 
361 	if (unlikely(err))
362 		return err;
363 
364 	regs->regs[2] = old;
365 	regs->regs[7] = 0;	/* No error */
366 
367 	/*
368 	 * Don't let your children do this ...
369 	 */
370 	__asm__ __volatile__(
371 	"	move	$29, %0						\n"
372 	"	j	syscall_exit					\n"
373 	: /* no outputs */
374 	: "r" (regs));
375 
376 	/* unreached.  Honestly.  */
377 	while (1);
378 }
379 
380 save_static_function(sys_sysmips);
381 static int __used noinline
382 _sys_sysmips(nabi_no_regargs struct pt_regs regs)
383 {
384 	long cmd, arg1, arg2, arg3;
385 
386 	cmd = regs.regs[4];
387 	arg1 = regs.regs[5];
388 	arg2 = regs.regs[6];
389 	arg3 = regs.regs[7];
390 
391 	switch (cmd) {
392 	case MIPS_ATOMIC_SET:
393 		return mips_atomic_set(&regs, arg1, arg2);
394 
395 	case MIPS_FIXADE:
396 		if (arg1 & ~3)
397 			return -EINVAL;
398 
399 		if (arg1 & 1)
400 			set_thread_flag(TIF_FIXADE);
401 		else
402 			clear_thread_flag(TIF_FIXADE);
403 		if (arg1 & 2)
404 			set_thread_flag(TIF_LOGADE);
405 		else
406 			clear_thread_flag(TIF_FIXADE);
407 
408 		return 0;
409 
410 	case FLUSH_CACHE:
411 		__flush_cache_all();
412 		return 0;
413 	}
414 
415 	return -EINVAL;
416 }
417 
418 /*
419  * No implemented yet ...
420  */
421 SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
422 {
423 	return -ENOSYS;
424 }
425 
426 /*
427  * If we ever come here the user sp is bad.  Zap the process right away.
428  * Due to the bad stack signaling wouldn't work.
429  */
430 asmlinkage void bad_stack(void)
431 {
432 	do_exit(SIGSEGV);
433 }
434 
435 /*
436  * Do a system call from kernel instead of calling sys_execve so we
437  * end up with proper pt_regs.
438  */
439 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
440 {
441 	register unsigned long __a0 asm("$4") = (unsigned long) filename;
442 	register unsigned long __a1 asm("$5") = (unsigned long) argv;
443 	register unsigned long __a2 asm("$6") = (unsigned long) envp;
444 	register unsigned long __a3 asm("$7");
445 	unsigned long __v0;
446 
447 	__asm__ volatile ("					\n"
448 	"	.set	noreorder				\n"
449 	"	li	$2, %5		# __NR_execve		\n"
450 	"	syscall						\n"
451 	"	move	%0, $2					\n"
452 	"	.set	reorder					\n"
453 	: "=&r" (__v0), "=r" (__a3)
454 	: "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve)
455 	: "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24",
456 	  "memory");
457 
458 	if (__a3 == 0)
459 		return __v0;
460 
461 	return -__v0;
462 }
463