xref: /openbmc/linux/arch/sparc/kernel/sys_sparc_64.c (revision 82ced6fd)
1 /* linux/arch/sparc64/kernel/sys_sparc.c
2  *
3  * This file contains various random system calls that
4  * have a non-standard calling sequence on the Linux/sparc
5  * platform.
6  */
7 
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/mm.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/utsname.h>
20 #include <linux/smp.h>
21 #include <linux/slab.h>
22 #include <linux/syscalls.h>
23 #include <linux/ipc.h>
24 #include <linux/personality.h>
25 #include <linux/random.h>
26 #include <linux/module.h>
27 
28 #include <asm/uaccess.h>
29 #include <asm/utrap.h>
30 #include <asm/perfctr.h>
31 #include <asm/unistd.h>
32 
33 #include "entry.h"
34 #include "systbls.h"
35 
36 /* #define DEBUG_UNIMP_SYSCALL */
37 
38 asmlinkage unsigned long sys_getpagesize(void)
39 {
40 	return PAGE_SIZE;
41 }
42 
43 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
44 #define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
45 
46 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
47  * overflow past the end of the 64-bit address space?
48  */
49 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
50 {
51 	unsigned long va_exclude_start, va_exclude_end;
52 
53 	va_exclude_start = VA_EXCLUDE_START;
54 	va_exclude_end   = VA_EXCLUDE_END;
55 
56 	if (unlikely(len >= va_exclude_start))
57 		return 1;
58 
59 	if (unlikely((addr + len) < addr))
60 		return 1;
61 
62 	if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
63 		     ((addr + len) >= va_exclude_start &&
64 		      (addr + len) < va_exclude_end)))
65 		return 1;
66 
67 	return 0;
68 }
69 
70 /* Does start,end straddle the VA-space hole?  */
71 static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
72 {
73 	unsigned long va_exclude_start, va_exclude_end;
74 
75 	va_exclude_start = VA_EXCLUDE_START;
76 	va_exclude_end   = VA_EXCLUDE_END;
77 
78 	if (likely(start < va_exclude_start && end < va_exclude_start))
79 		return 0;
80 
81 	if (likely(start >= va_exclude_end && end >= va_exclude_end))
82 		return 0;
83 
84 	return 1;
85 }
86 
87 /* These functions differ from the default implementations in
88  * mm/mmap.c in two ways:
89  *
90  * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
91  *    for fixed such mappings we just validate what the user gave us.
92  * 2) For 64-bit tasks we avoid mapping anything within 4GB of
93  *    the spitfire/niagara VA-hole.
94  */
95 
96 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
97 					 unsigned long pgoff)
98 {
99 	unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
100 	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
101 
102 	return base + off;
103 }
104 
105 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
106 					      unsigned long pgoff)
107 {
108 	unsigned long base = addr & ~(SHMLBA-1);
109 	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
110 
111 	if (base + off <= addr)
112 		return base + off;
113 	return base - off;
114 }
115 
116 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
117 {
118 	struct mm_struct *mm = current->mm;
119 	struct vm_area_struct * vma;
120 	unsigned long task_size = TASK_SIZE;
121 	unsigned long start_addr;
122 	int do_color_align;
123 
124 	if (flags & MAP_FIXED) {
125 		/* We do not accept a shared mapping if it would violate
126 		 * cache aliasing constraints.
127 		 */
128 		if ((flags & MAP_SHARED) &&
129 		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
130 			return -EINVAL;
131 		return addr;
132 	}
133 
134 	if (test_thread_flag(TIF_32BIT))
135 		task_size = STACK_TOP32;
136 	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
137 		return -ENOMEM;
138 
139 	do_color_align = 0;
140 	if (filp || (flags & MAP_SHARED))
141 		do_color_align = 1;
142 
143 	if (addr) {
144 		if (do_color_align)
145 			addr = COLOUR_ALIGN(addr, pgoff);
146 		else
147 			addr = PAGE_ALIGN(addr);
148 
149 		vma = find_vma(mm, addr);
150 		if (task_size - len >= addr &&
151 		    (!vma || addr + len <= vma->vm_start))
152 			return addr;
153 	}
154 
155 	if (len > mm->cached_hole_size) {
156 	        start_addr = addr = mm->free_area_cache;
157 	} else {
158 	        start_addr = addr = TASK_UNMAPPED_BASE;
159 	        mm->cached_hole_size = 0;
160 	}
161 
162 	task_size -= len;
163 
164 full_search:
165 	if (do_color_align)
166 		addr = COLOUR_ALIGN(addr, pgoff);
167 	else
168 		addr = PAGE_ALIGN(addr);
169 
170 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
171 		/* At this point:  (!vma || addr < vma->vm_end). */
172 		if (addr < VA_EXCLUDE_START &&
173 		    (addr + len) >= VA_EXCLUDE_START) {
174 			addr = VA_EXCLUDE_END;
175 			vma = find_vma(mm, VA_EXCLUDE_END);
176 		}
177 		if (unlikely(task_size < addr)) {
178 			if (start_addr != TASK_UNMAPPED_BASE) {
179 				start_addr = addr = TASK_UNMAPPED_BASE;
180 				mm->cached_hole_size = 0;
181 				goto full_search;
182 			}
183 			return -ENOMEM;
184 		}
185 		if (likely(!vma || addr + len <= vma->vm_start)) {
186 			/*
187 			 * Remember the place where we stopped the search:
188 			 */
189 			mm->free_area_cache = addr + len;
190 			return addr;
191 		}
192 		if (addr + mm->cached_hole_size < vma->vm_start)
193 		        mm->cached_hole_size = vma->vm_start - addr;
194 
195 		addr = vma->vm_end;
196 		if (do_color_align)
197 			addr = COLOUR_ALIGN(addr, pgoff);
198 	}
199 }
200 
201 unsigned long
202 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
203 			  const unsigned long len, const unsigned long pgoff,
204 			  const unsigned long flags)
205 {
206 	struct vm_area_struct *vma;
207 	struct mm_struct *mm = current->mm;
208 	unsigned long task_size = STACK_TOP32;
209 	unsigned long addr = addr0;
210 	int do_color_align;
211 
212 	/* This should only ever run for 32-bit processes.  */
213 	BUG_ON(!test_thread_flag(TIF_32BIT));
214 
215 	if (flags & MAP_FIXED) {
216 		/* We do not accept a shared mapping if it would violate
217 		 * cache aliasing constraints.
218 		 */
219 		if ((flags & MAP_SHARED) &&
220 		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
221 			return -EINVAL;
222 		return addr;
223 	}
224 
225 	if (unlikely(len > task_size))
226 		return -ENOMEM;
227 
228 	do_color_align = 0;
229 	if (filp || (flags & MAP_SHARED))
230 		do_color_align = 1;
231 
232 	/* requesting a specific address */
233 	if (addr) {
234 		if (do_color_align)
235 			addr = COLOUR_ALIGN(addr, pgoff);
236 		else
237 			addr = PAGE_ALIGN(addr);
238 
239 		vma = find_vma(mm, addr);
240 		if (task_size - len >= addr &&
241 		    (!vma || addr + len <= vma->vm_start))
242 			return addr;
243 	}
244 
245 	/* check if free_area_cache is useful for us */
246 	if (len <= mm->cached_hole_size) {
247  	        mm->cached_hole_size = 0;
248  		mm->free_area_cache = mm->mmap_base;
249  	}
250 
251 	/* either no address requested or can't fit in requested address hole */
252 	addr = mm->free_area_cache;
253 	if (do_color_align) {
254 		unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
255 
256 		addr = base + len;
257 	}
258 
259 	/* make sure it can fit in the remaining address space */
260 	if (likely(addr > len)) {
261 		vma = find_vma(mm, addr-len);
262 		if (!vma || addr <= vma->vm_start) {
263 			/* remember the address as a hint for next time */
264 			return (mm->free_area_cache = addr-len);
265 		}
266 	}
267 
268 	if (unlikely(mm->mmap_base < len))
269 		goto bottomup;
270 
271 	addr = mm->mmap_base-len;
272 	if (do_color_align)
273 		addr = COLOUR_ALIGN_DOWN(addr, pgoff);
274 
275 	do {
276 		/*
277 		 * Lookup failure means no vma is above this address,
278 		 * else if new region fits below vma->vm_start,
279 		 * return with success:
280 		 */
281 		vma = find_vma(mm, addr);
282 		if (likely(!vma || addr+len <= vma->vm_start)) {
283 			/* remember the address as a hint for next time */
284 			return (mm->free_area_cache = addr);
285 		}
286 
287  		/* remember the largest hole we saw so far */
288  		if (addr + mm->cached_hole_size < vma->vm_start)
289  		        mm->cached_hole_size = vma->vm_start - addr;
290 
291 		/* try just below the current vma->vm_start */
292 		addr = vma->vm_start-len;
293 		if (do_color_align)
294 			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
295 	} while (likely(len < vma->vm_start));
296 
297 bottomup:
298 	/*
299 	 * A failed mmap() very likely causes application failure,
300 	 * so fall back to the bottom-up function here. This scenario
301 	 * can happen with large stack limits and large mmap()
302 	 * allocations.
303 	 */
304 	mm->cached_hole_size = ~0UL;
305   	mm->free_area_cache = TASK_UNMAPPED_BASE;
306 	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
307 	/*
308 	 * Restore the topdown base:
309 	 */
310 	mm->free_area_cache = mm->mmap_base;
311 	mm->cached_hole_size = ~0UL;
312 
313 	return addr;
314 }
315 
316 /* Try to align mapping such that we align it as much as possible. */
317 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
318 {
319 	unsigned long align_goal, addr = -ENOMEM;
320 
321 	if (flags & MAP_FIXED) {
322 		/* Ok, don't mess with it. */
323 		return get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
324 	}
325 	flags &= ~MAP_SHARED;
326 
327 	align_goal = PAGE_SIZE;
328 	if (len >= (4UL * 1024 * 1024))
329 		align_goal = (4UL * 1024 * 1024);
330 	else if (len >= (512UL * 1024))
331 		align_goal = (512UL * 1024);
332 	else if (len >= (64UL * 1024))
333 		align_goal = (64UL * 1024);
334 
335 	do {
336 		addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
337 		if (!(addr & ~PAGE_MASK)) {
338 			addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
339 			break;
340 		}
341 
342 		if (align_goal == (4UL * 1024 * 1024))
343 			align_goal = (512UL * 1024);
344 		else if (align_goal == (512UL * 1024))
345 			align_goal = (64UL * 1024);
346 		else
347 			align_goal = PAGE_SIZE;
348 	} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
349 
350 	/* Mapping is smaller than 64K or larger areas could not
351 	 * be obtained.
352 	 */
353 	if (addr & ~PAGE_MASK)
354 		addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
355 
356 	return addr;
357 }
358 EXPORT_SYMBOL(get_fb_unmapped_area);
359 
360 /* Essentially the same as PowerPC... */
361 void arch_pick_mmap_layout(struct mm_struct *mm)
362 {
363 	unsigned long random_factor = 0UL;
364 
365 	if (current->flags & PF_RANDOMIZE) {
366 		random_factor = get_random_int();
367 		if (test_thread_flag(TIF_32BIT))
368 			random_factor &= ((1 * 1024 * 1024) - 1);
369 		else
370 			random_factor = ((random_factor << PAGE_SHIFT) &
371 					 0xffffffffUL);
372 	}
373 
374 	/*
375 	 * Fall back to the standard layout if the personality
376 	 * bit is set, or if the expected stack growth is unlimited:
377 	 */
378 	if (!test_thread_flag(TIF_32BIT) ||
379 	    (current->personality & ADDR_COMPAT_LAYOUT) ||
380 	    current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
381 	    sysctl_legacy_va_layout) {
382 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
383 		mm->get_unmapped_area = arch_get_unmapped_area;
384 		mm->unmap_area = arch_unmap_area;
385 	} else {
386 		/* We know it's 32-bit */
387 		unsigned long task_size = STACK_TOP32;
388 		unsigned long gap;
389 
390 		gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
391 		if (gap < 128 * 1024 * 1024)
392 			gap = 128 * 1024 * 1024;
393 		if (gap > (task_size / 6 * 5))
394 			gap = (task_size / 6 * 5);
395 
396 		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
397 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
398 		mm->unmap_area = arch_unmap_area_topdown;
399 	}
400 }
401 
402 SYSCALL_DEFINE1(sparc_brk, unsigned long, brk)
403 {
404 	/* People could try to be nasty and use ta 0x6d in 32bit programs */
405 	if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
406 		return current->mm->brk;
407 
408 	if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
409 		return current->mm->brk;
410 
411 	return sys_brk(brk);
412 }
413 
414 /*
415  * sys_pipe() is the normal C calling standard for creating
416  * a pipe. It's not the way unix traditionally does this, though.
417  */
418 SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
419 {
420 	int fd[2];
421 	int error;
422 
423 	error = do_pipe_flags(fd, 0);
424 	if (error)
425 		goto out;
426 	regs->u_regs[UREG_I1] = fd[1];
427 	error = fd[0];
428 out:
429 	return error;
430 }
431 
432 /*
433  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
434  *
435  * This is really horribly ugly.
436  */
437 
438 SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
439 		unsigned long, third, void __user *, ptr, long, fifth)
440 {
441 	long err;
442 
443 	/* No need for backward compatibility. We can start fresh... */
444 	if (call <= SEMCTL) {
445 		switch (call) {
446 		case SEMOP:
447 			err = sys_semtimedop(first, ptr,
448 					     (unsigned)second, NULL);
449 			goto out;
450 		case SEMTIMEDOP:
451 			err = sys_semtimedop(first, ptr, (unsigned)second,
452 				(const struct timespec __user *)
453 					     (unsigned long) fifth);
454 			goto out;
455 		case SEMGET:
456 			err = sys_semget(first, (int)second, (int)third);
457 			goto out;
458 		case SEMCTL: {
459 			err = sys_semctl(first, second,
460 					 (int)third | IPC_64,
461 					 (union semun) ptr);
462 			goto out;
463 		}
464 		default:
465 			err = -ENOSYS;
466 			goto out;
467 		};
468 	}
469 	if (call <= MSGCTL) {
470 		switch (call) {
471 		case MSGSND:
472 			err = sys_msgsnd(first, ptr, (size_t)second,
473 					 (int)third);
474 			goto out;
475 		case MSGRCV:
476 			err = sys_msgrcv(first, ptr, (size_t)second, fifth,
477 					 (int)third);
478 			goto out;
479 		case MSGGET:
480 			err = sys_msgget((key_t)first, (int)second);
481 			goto out;
482 		case MSGCTL:
483 			err = sys_msgctl(first, (int)second | IPC_64, ptr);
484 			goto out;
485 		default:
486 			err = -ENOSYS;
487 			goto out;
488 		};
489 	}
490 	if (call <= SHMCTL) {
491 		switch (call) {
492 		case SHMAT: {
493 			ulong raddr;
494 			err = do_shmat(first, ptr, (int)second, &raddr);
495 			if (!err) {
496 				if (put_user(raddr,
497 					     (ulong __user *) third))
498 					err = -EFAULT;
499 			}
500 			goto out;
501 		}
502 		case SHMDT:
503 			err = sys_shmdt(ptr);
504 			goto out;
505 		case SHMGET:
506 			err = sys_shmget(first, (size_t)second, (int)third);
507 			goto out;
508 		case SHMCTL:
509 			err = sys_shmctl(first, (int)second | IPC_64, ptr);
510 			goto out;
511 		default:
512 			err = -ENOSYS;
513 			goto out;
514 		};
515 	} else {
516 		err = -ENOSYS;
517 	}
518 out:
519 	return err;
520 }
521 
522 SYSCALL_DEFINE1(sparc64_newuname, struct new_utsname __user *, name)
523 {
524 	int ret = sys_newuname(name);
525 
526 	if (current->personality == PER_LINUX32 && !ret) {
527 		ret = (copy_to_user(name->machine, "sparc\0\0", 8)
528 		       ? -EFAULT : 0);
529 	}
530 	return ret;
531 }
532 
533 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
534 {
535 	int ret;
536 
537 	if (current->personality == PER_LINUX32 &&
538 	    personality == PER_LINUX)
539 		personality = PER_LINUX32;
540 	ret = sys_personality(personality);
541 	if (ret == PER_LINUX32)
542 		ret = PER_LINUX;
543 
544 	return ret;
545 }
546 
547 int sparc_mmap_check(unsigned long addr, unsigned long len)
548 {
549 	if (test_thread_flag(TIF_32BIT)) {
550 		if (len >= STACK_TOP32)
551 			return -EINVAL;
552 
553 		if (addr > STACK_TOP32 - len)
554 			return -EINVAL;
555 	} else {
556 		if (len >= VA_EXCLUDE_START)
557 			return -EINVAL;
558 
559 		if (invalid_64bit_range(addr, len))
560 			return -EINVAL;
561 	}
562 
563 	return 0;
564 }
565 
566 /* Linux version of mmap */
567 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
568 		unsigned long, prot, unsigned long, flags, unsigned long, fd,
569 		unsigned long, off)
570 {
571 	struct file * file = NULL;
572 	unsigned long retval = -EBADF;
573 
574 	if (!(flags & MAP_ANONYMOUS)) {
575 		file = fget(fd);
576 		if (!file)
577 			goto out;
578 	}
579 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
580 	len = PAGE_ALIGN(len);
581 
582 	down_write(&current->mm->mmap_sem);
583 	retval = do_mmap(file, addr, len, prot, flags, off);
584 	up_write(&current->mm->mmap_sem);
585 
586 	if (file)
587 		fput(file);
588 out:
589 	return retval;
590 }
591 
592 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
593 {
594 	long ret;
595 
596 	if (invalid_64bit_range(addr, len))
597 		return -EINVAL;
598 
599 	down_write(&current->mm->mmap_sem);
600 	ret = do_munmap(current->mm, addr, len);
601 	up_write(&current->mm->mmap_sem);
602 	return ret;
603 }
604 
605 extern unsigned long do_mremap(unsigned long addr,
606 	unsigned long old_len, unsigned long new_len,
607 	unsigned long flags, unsigned long new_addr);
608 
609 SYSCALL_DEFINE5(64_mremap, unsigned long, addr,	unsigned long, old_len,
610 		unsigned long, new_len, unsigned long, flags,
611 		unsigned long, new_addr)
612 {
613 	unsigned long ret = -EINVAL;
614 
615 	if (test_thread_flag(TIF_32BIT))
616 		goto out;
617 	if (unlikely(new_len >= VA_EXCLUDE_START))
618 		goto out;
619 	if (unlikely(sparc_mmap_check(addr, old_len)))
620 		goto out;
621 	if (unlikely(sparc_mmap_check(new_addr, new_len)))
622 		goto out;
623 
624 	down_write(&current->mm->mmap_sem);
625 	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
626 	up_write(&current->mm->mmap_sem);
627 out:
628 	return ret;
629 }
630 
631 /* we come to here via sys_nis_syscall so it can setup the regs argument */
632 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
633 {
634 	static int count;
635 
636 	/* Don't make the system unusable, if someone goes stuck */
637 	if (count++ > 5)
638 		return -ENOSYS;
639 
640 	printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
641 #ifdef DEBUG_UNIMP_SYSCALL
642 	show_regs (regs);
643 #endif
644 
645 	return -ENOSYS;
646 }
647 
648 /* #define DEBUG_SPARC_BREAKPOINT */
649 
650 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
651 {
652 	siginfo_t info;
653 
654 	if (test_thread_flag(TIF_32BIT)) {
655 		regs->tpc &= 0xffffffff;
656 		regs->tnpc &= 0xffffffff;
657 	}
658 #ifdef DEBUG_SPARC_BREAKPOINT
659         printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
660 #endif
661 	info.si_signo = SIGTRAP;
662 	info.si_errno = 0;
663 	info.si_code = TRAP_BRKPT;
664 	info.si_addr = (void __user *)regs->tpc;
665 	info.si_trapno = 0;
666 	force_sig_info(SIGTRAP, &info, current);
667 #ifdef DEBUG_SPARC_BREAKPOINT
668 	printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
669 #endif
670 }
671 
672 extern void check_pending(int signum);
673 
674 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
675 {
676         int nlen, err;
677 
678 	if (len < 0)
679 		return -EINVAL;
680 
681  	down_read(&uts_sem);
682 
683 	nlen = strlen(utsname()->domainname) + 1;
684 	err = -EINVAL;
685 	if (nlen > len)
686 		goto out;
687 
688 	err = -EFAULT;
689 	if (!copy_to_user(name, utsname()->domainname, nlen))
690 		err = 0;
691 
692 out:
693 	up_read(&uts_sem);
694 	return err;
695 }
696 
697 SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
698 		utrap_handler_t, new_p, utrap_handler_t, new_d,
699 		utrap_handler_t __user *, old_p,
700 		utrap_handler_t __user *, old_d)
701 {
702 	if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
703 		return -EINVAL;
704 	if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
705 		if (old_p) {
706 			if (!current_thread_info()->utraps) {
707 				if (put_user(NULL, old_p))
708 					return -EFAULT;
709 			} else {
710 				if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
711 					return -EFAULT;
712 			}
713 		}
714 		if (old_d) {
715 			if (put_user(NULL, old_d))
716 				return -EFAULT;
717 		}
718 		return 0;
719 	}
720 	if (!current_thread_info()->utraps) {
721 		current_thread_info()->utraps =
722 			kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
723 		if (!current_thread_info()->utraps)
724 			return -ENOMEM;
725 		current_thread_info()->utraps[0] = 1;
726 	} else {
727 		if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
728 		    current_thread_info()->utraps[0] > 1) {
729 			unsigned long *p = current_thread_info()->utraps;
730 
731 			current_thread_info()->utraps =
732 				kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
733 					GFP_KERNEL);
734 			if (!current_thread_info()->utraps) {
735 				current_thread_info()->utraps = p;
736 				return -ENOMEM;
737 			}
738 			p[0]--;
739 			current_thread_info()->utraps[0] = 1;
740 			memcpy(current_thread_info()->utraps+1, p+1,
741 			       UT_TRAP_INSTRUCTION_31*sizeof(long));
742 		}
743 	}
744 	if (old_p) {
745 		if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
746 			return -EFAULT;
747 	}
748 	if (old_d) {
749 		if (put_user(NULL, old_d))
750 			return -EFAULT;
751 	}
752 	current_thread_info()->utraps[type] = (long)new_p;
753 
754 	return 0;
755 }
756 
757 asmlinkage long sparc_memory_ordering(unsigned long model,
758 				      struct pt_regs *regs)
759 {
760 	if (model >= 3)
761 		return -EINVAL;
762 	regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
763 	return 0;
764 }
765 
766 SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
767 		struct sigaction __user *, oact, void __user *, restorer,
768 		size_t, sigsetsize)
769 {
770 	struct k_sigaction new_ka, old_ka;
771 	int ret;
772 
773 	/* XXX: Don't preclude handling different sized sigset_t's.  */
774 	if (sigsetsize != sizeof(sigset_t))
775 		return -EINVAL;
776 
777 	if (act) {
778 		new_ka.ka_restorer = restorer;
779 		if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
780 			return -EFAULT;
781 	}
782 
783 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
784 
785 	if (!ret && oact) {
786 		if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
787 			return -EFAULT;
788 	}
789 
790 	return ret;
791 }
792 
793 /* Invoked by rtrap code to update performance counters in
794  * user space.
795  */
796 asmlinkage void update_perfctrs(void)
797 {
798 	unsigned long pic, tmp;
799 
800 	read_pic(pic);
801 	tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
802 	__put_user(tmp, current_thread_info()->user_cntd0);
803 	tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
804 	__put_user(tmp, current_thread_info()->user_cntd1);
805 	reset_pic();
806 }
807 
808 SYSCALL_DEFINE4(perfctr, int, opcode, unsigned long, arg0,
809 		unsigned long, arg1, unsigned long, arg2)
810 {
811 	int err = 0;
812 
813 	switch(opcode) {
814 	case PERFCTR_ON:
815 		current_thread_info()->pcr_reg = arg2;
816 		current_thread_info()->user_cntd0 = (u64 __user *) arg0;
817 		current_thread_info()->user_cntd1 = (u64 __user *) arg1;
818 		current_thread_info()->kernel_cntd0 =
819 			current_thread_info()->kernel_cntd1 = 0;
820 		write_pcr(arg2);
821 		reset_pic();
822 		set_thread_flag(TIF_PERFCTR);
823 		break;
824 
825 	case PERFCTR_OFF:
826 		err = -EINVAL;
827 		if (test_thread_flag(TIF_PERFCTR)) {
828 			current_thread_info()->user_cntd0 =
829 				current_thread_info()->user_cntd1 = NULL;
830 			current_thread_info()->pcr_reg = 0;
831 			write_pcr(0);
832 			clear_thread_flag(TIF_PERFCTR);
833 			err = 0;
834 		}
835 		break;
836 
837 	case PERFCTR_READ: {
838 		unsigned long pic, tmp;
839 
840 		if (!test_thread_flag(TIF_PERFCTR)) {
841 			err = -EINVAL;
842 			break;
843 		}
844 		read_pic(pic);
845 		tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
846 		err |= __put_user(tmp, current_thread_info()->user_cntd0);
847 		tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
848 		err |= __put_user(tmp, current_thread_info()->user_cntd1);
849 		reset_pic();
850 		break;
851 	}
852 
853 	case PERFCTR_CLRPIC:
854 		if (!test_thread_flag(TIF_PERFCTR)) {
855 			err = -EINVAL;
856 			break;
857 		}
858 		current_thread_info()->kernel_cntd0 =
859 			current_thread_info()->kernel_cntd1 = 0;
860 		reset_pic();
861 		break;
862 
863 	case PERFCTR_SETPCR: {
864 		u64 __user *user_pcr = (u64 __user *)arg0;
865 
866 		if (!test_thread_flag(TIF_PERFCTR)) {
867 			err = -EINVAL;
868 			break;
869 		}
870 		err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
871 		write_pcr(current_thread_info()->pcr_reg);
872 		current_thread_info()->kernel_cntd0 =
873 			current_thread_info()->kernel_cntd1 = 0;
874 		reset_pic();
875 		break;
876 	}
877 
878 	case PERFCTR_GETPCR: {
879 		u64 __user *user_pcr = (u64 __user *)arg0;
880 
881 		if (!test_thread_flag(TIF_PERFCTR)) {
882 			err = -EINVAL;
883 			break;
884 		}
885 		err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
886 		break;
887 	}
888 
889 	default:
890 		err = -EINVAL;
891 		break;
892 	};
893 	return err;
894 }
895 
896 /*
897  * Do a system call from kernel instead of calling sys_execve so we
898  * end up with proper pt_regs.
899  */
900 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
901 {
902 	long __res;
903 	register long __g1 __asm__ ("g1") = __NR_execve;
904 	register long __o0 __asm__ ("o0") = (long)(filename);
905 	register long __o1 __asm__ ("o1") = (long)(argv);
906 	register long __o2 __asm__ ("o2") = (long)(envp);
907 	asm volatile ("t 0x6d\n\t"
908 		      "sub %%g0, %%o0, %0\n\t"
909 		      "movcc %%xcc, %%o0, %0\n\t"
910 		      : "=r" (__res), "=&r" (__o0)
911 		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
912 		      : "cc");
913 	return __res;
914 }
915