1 /* linux/arch/sparc64/kernel/sys_sparc.c 2 * 3 * This file contains various random system calls that 4 * have a non-standard calling sequence on the Linux/sparc 5 * platform. 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/types.h> 10 #include <linux/sched.h> 11 #include <linux/fs.h> 12 #include <linux/file.h> 13 #include <linux/mm.h> 14 #include <linux/sem.h> 15 #include <linux/msg.h> 16 #include <linux/shm.h> 17 #include <linux/stat.h> 18 #include <linux/mman.h> 19 #include <linux/utsname.h> 20 #include <linux/smp.h> 21 #include <linux/slab.h> 22 #include <linux/syscalls.h> 23 #include <linux/ipc.h> 24 #include <linux/personality.h> 25 #include <linux/random.h> 26 #include <linux/module.h> 27 28 #include <asm/uaccess.h> 29 #include <asm/utrap.h> 30 #include <asm/unistd.h> 31 32 #include "entry.h" 33 #include "systbls.h" 34 35 /* #define DEBUG_UNIMP_SYSCALL */ 36 37 asmlinkage unsigned long sys_getpagesize(void) 38 { 39 return PAGE_SIZE; 40 } 41 42 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) 43 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) 44 45 /* Does addr --> addr+len fall within 4GB of the VA-space hole or 46 * overflow past the end of the 64-bit address space? 47 */ 48 static inline int invalid_64bit_range(unsigned long addr, unsigned long len) 49 { 50 unsigned long va_exclude_start, va_exclude_end; 51 52 va_exclude_start = VA_EXCLUDE_START; 53 va_exclude_end = VA_EXCLUDE_END; 54 55 if (unlikely(len >= va_exclude_start)) 56 return 1; 57 58 if (unlikely((addr + len) < addr)) 59 return 1; 60 61 if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) || 62 ((addr + len) >= va_exclude_start && 63 (addr + len) < va_exclude_end))) 64 return 1; 65 66 return 0; 67 } 68 69 /* Does start,end straddle the VA-space hole? */ 70 static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end) 71 { 72 unsigned long va_exclude_start, va_exclude_end; 73 74 va_exclude_start = VA_EXCLUDE_START; 75 va_exclude_end = VA_EXCLUDE_END; 76 77 if (likely(start < va_exclude_start && end < va_exclude_start)) 78 return 0; 79 80 if (likely(start >= va_exclude_end && end >= va_exclude_end)) 81 return 0; 82 83 return 1; 84 } 85 86 /* These functions differ from the default implementations in 87 * mm/mmap.c in two ways: 88 * 89 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align, 90 * for fixed such mappings we just validate what the user gave us. 91 * 2) For 64-bit tasks we avoid mapping anything within 4GB of 92 * the spitfire/niagara VA-hole. 93 */ 94 95 static inline unsigned long COLOUR_ALIGN(unsigned long addr, 96 unsigned long pgoff) 97 { 98 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); 99 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); 100 101 return base + off; 102 } 103 104 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, 105 unsigned long pgoff) 106 { 107 unsigned long base = addr & ~(SHMLBA-1); 108 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); 109 110 if (base + off <= addr) 111 return base + off; 112 return base - off; 113 } 114 115 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) 116 { 117 struct mm_struct *mm = current->mm; 118 struct vm_area_struct * vma; 119 unsigned long task_size = TASK_SIZE; 120 unsigned long start_addr; 121 int do_color_align; 122 123 if (flags & MAP_FIXED) { 124 /* We do not accept a shared mapping if it would violate 125 * cache aliasing constraints. 126 */ 127 if ((flags & MAP_SHARED) && 128 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) 129 return -EINVAL; 130 return addr; 131 } 132 133 if (test_thread_flag(TIF_32BIT)) 134 task_size = STACK_TOP32; 135 if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) 136 return -ENOMEM; 137 138 do_color_align = 0; 139 if (filp || (flags & MAP_SHARED)) 140 do_color_align = 1; 141 142 if (addr) { 143 if (do_color_align) 144 addr = COLOUR_ALIGN(addr, pgoff); 145 else 146 addr = PAGE_ALIGN(addr); 147 148 vma = find_vma(mm, addr); 149 if (task_size - len >= addr && 150 (!vma || addr + len <= vma->vm_start)) 151 return addr; 152 } 153 154 if (len > mm->cached_hole_size) { 155 start_addr = addr = mm->free_area_cache; 156 } else { 157 start_addr = addr = TASK_UNMAPPED_BASE; 158 mm->cached_hole_size = 0; 159 } 160 161 task_size -= len; 162 163 full_search: 164 if (do_color_align) 165 addr = COLOUR_ALIGN(addr, pgoff); 166 else 167 addr = PAGE_ALIGN(addr); 168 169 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 170 /* At this point: (!vma || addr < vma->vm_end). */ 171 if (addr < VA_EXCLUDE_START && 172 (addr + len) >= VA_EXCLUDE_START) { 173 addr = VA_EXCLUDE_END; 174 vma = find_vma(mm, VA_EXCLUDE_END); 175 } 176 if (unlikely(task_size < addr)) { 177 if (start_addr != TASK_UNMAPPED_BASE) { 178 start_addr = addr = TASK_UNMAPPED_BASE; 179 mm->cached_hole_size = 0; 180 goto full_search; 181 } 182 return -ENOMEM; 183 } 184 if (likely(!vma || addr + len <= vma->vm_start)) { 185 /* 186 * Remember the place where we stopped the search: 187 */ 188 mm->free_area_cache = addr + len; 189 return addr; 190 } 191 if (addr + mm->cached_hole_size < vma->vm_start) 192 mm->cached_hole_size = vma->vm_start - addr; 193 194 addr = vma->vm_end; 195 if (do_color_align) 196 addr = COLOUR_ALIGN(addr, pgoff); 197 } 198 } 199 200 unsigned long 201 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 202 const unsigned long len, const unsigned long pgoff, 203 const unsigned long flags) 204 { 205 struct vm_area_struct *vma; 206 struct mm_struct *mm = current->mm; 207 unsigned long task_size = STACK_TOP32; 208 unsigned long addr = addr0; 209 int do_color_align; 210 211 /* This should only ever run for 32-bit processes. */ 212 BUG_ON(!test_thread_flag(TIF_32BIT)); 213 214 if (flags & MAP_FIXED) { 215 /* We do not accept a shared mapping if it would violate 216 * cache aliasing constraints. 217 */ 218 if ((flags & MAP_SHARED) && 219 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) 220 return -EINVAL; 221 return addr; 222 } 223 224 if (unlikely(len > task_size)) 225 return -ENOMEM; 226 227 do_color_align = 0; 228 if (filp || (flags & MAP_SHARED)) 229 do_color_align = 1; 230 231 /* requesting a specific address */ 232 if (addr) { 233 if (do_color_align) 234 addr = COLOUR_ALIGN(addr, pgoff); 235 else 236 addr = PAGE_ALIGN(addr); 237 238 vma = find_vma(mm, addr); 239 if (task_size - len >= addr && 240 (!vma || addr + len <= vma->vm_start)) 241 return addr; 242 } 243 244 /* check if free_area_cache is useful for us */ 245 if (len <= mm->cached_hole_size) { 246 mm->cached_hole_size = 0; 247 mm->free_area_cache = mm->mmap_base; 248 } 249 250 /* either no address requested or can't fit in requested address hole */ 251 addr = mm->free_area_cache; 252 if (do_color_align) { 253 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); 254 255 addr = base + len; 256 } 257 258 /* make sure it can fit in the remaining address space */ 259 if (likely(addr > len)) { 260 vma = find_vma(mm, addr-len); 261 if (!vma || addr <= vma->vm_start) { 262 /* remember the address as a hint for next time */ 263 return (mm->free_area_cache = addr-len); 264 } 265 } 266 267 if (unlikely(mm->mmap_base < len)) 268 goto bottomup; 269 270 addr = mm->mmap_base-len; 271 if (do_color_align) 272 addr = COLOUR_ALIGN_DOWN(addr, pgoff); 273 274 do { 275 /* 276 * Lookup failure means no vma is above this address, 277 * else if new region fits below vma->vm_start, 278 * return with success: 279 */ 280 vma = find_vma(mm, addr); 281 if (likely(!vma || addr+len <= vma->vm_start)) { 282 /* remember the address as a hint for next time */ 283 return (mm->free_area_cache = addr); 284 } 285 286 /* remember the largest hole we saw so far */ 287 if (addr + mm->cached_hole_size < vma->vm_start) 288 mm->cached_hole_size = vma->vm_start - addr; 289 290 /* try just below the current vma->vm_start */ 291 addr = vma->vm_start-len; 292 if (do_color_align) 293 addr = COLOUR_ALIGN_DOWN(addr, pgoff); 294 } while (likely(len < vma->vm_start)); 295 296 bottomup: 297 /* 298 * A failed mmap() very likely causes application failure, 299 * so fall back to the bottom-up function here. This scenario 300 * can happen with large stack limits and large mmap() 301 * allocations. 302 */ 303 mm->cached_hole_size = ~0UL; 304 mm->free_area_cache = TASK_UNMAPPED_BASE; 305 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 306 /* 307 * Restore the topdown base: 308 */ 309 mm->free_area_cache = mm->mmap_base; 310 mm->cached_hole_size = ~0UL; 311 312 return addr; 313 } 314 315 /* Try to align mapping such that we align it as much as possible. */ 316 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) 317 { 318 unsigned long align_goal, addr = -ENOMEM; 319 unsigned long (*get_area)(struct file *, unsigned long, 320 unsigned long, unsigned long, unsigned long); 321 322 get_area = current->mm->get_unmapped_area; 323 324 if (flags & MAP_FIXED) { 325 /* Ok, don't mess with it. */ 326 return get_area(NULL, orig_addr, len, pgoff, flags); 327 } 328 flags &= ~MAP_SHARED; 329 330 align_goal = PAGE_SIZE; 331 if (len >= (4UL * 1024 * 1024)) 332 align_goal = (4UL * 1024 * 1024); 333 else if (len >= (512UL * 1024)) 334 align_goal = (512UL * 1024); 335 else if (len >= (64UL * 1024)) 336 align_goal = (64UL * 1024); 337 338 do { 339 addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); 340 if (!(addr & ~PAGE_MASK)) { 341 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL); 342 break; 343 } 344 345 if (align_goal == (4UL * 1024 * 1024)) 346 align_goal = (512UL * 1024); 347 else if (align_goal == (512UL * 1024)) 348 align_goal = (64UL * 1024); 349 else 350 align_goal = PAGE_SIZE; 351 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE); 352 353 /* Mapping is smaller than 64K or larger areas could not 354 * be obtained. 355 */ 356 if (addr & ~PAGE_MASK) 357 addr = get_area(NULL, orig_addr, len, pgoff, flags); 358 359 return addr; 360 } 361 EXPORT_SYMBOL(get_fb_unmapped_area); 362 363 /* Essentially the same as PowerPC. */ 364 static unsigned long mmap_rnd(void) 365 { 366 unsigned long rnd = 0UL; 367 368 if (current->flags & PF_RANDOMIZE) { 369 unsigned long val = get_random_int(); 370 if (test_thread_flag(TIF_32BIT)) 371 rnd = (val % (1UL << (22UL-PAGE_SHIFT))); 372 else 373 rnd = (val % (1UL << (29UL-PAGE_SHIFT))); 374 } 375 return (rnd << PAGE_SHIFT) * 2; 376 } 377 378 void arch_pick_mmap_layout(struct mm_struct *mm) 379 { 380 unsigned long random_factor = mmap_rnd(); 381 unsigned long gap; 382 383 /* 384 * Fall back to the standard layout if the personality 385 * bit is set, or if the expected stack growth is unlimited: 386 */ 387 gap = rlimit(RLIMIT_STACK); 388 if (!test_thread_flag(TIF_32BIT) || 389 (current->personality & ADDR_COMPAT_LAYOUT) || 390 gap == RLIM_INFINITY || 391 sysctl_legacy_va_layout) { 392 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 393 mm->get_unmapped_area = arch_get_unmapped_area; 394 mm->unmap_area = arch_unmap_area; 395 } else { 396 /* We know it's 32-bit */ 397 unsigned long task_size = STACK_TOP32; 398 399 if (gap < 128 * 1024 * 1024) 400 gap = 128 * 1024 * 1024; 401 if (gap > (task_size / 6 * 5)) 402 gap = (task_size / 6 * 5); 403 404 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); 405 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 406 mm->unmap_area = arch_unmap_area_topdown; 407 } 408 } 409 410 /* 411 * sys_pipe() is the normal C calling standard for creating 412 * a pipe. It's not the way unix traditionally does this, though. 413 */ 414 SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs) 415 { 416 int fd[2]; 417 int error; 418 419 error = do_pipe_flags(fd, 0); 420 if (error) 421 goto out; 422 regs->u_regs[UREG_I1] = fd[1]; 423 error = fd[0]; 424 out: 425 return error; 426 } 427 428 /* 429 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 430 * 431 * This is really horribly ugly. 432 */ 433 434 SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second, 435 unsigned long, third, void __user *, ptr, long, fifth) 436 { 437 long err; 438 439 /* No need for backward compatibility. We can start fresh... */ 440 if (call <= SEMCTL) { 441 switch (call) { 442 case SEMOP: 443 err = sys_semtimedop(first, ptr, 444 (unsigned)second, NULL); 445 goto out; 446 case SEMTIMEDOP: 447 err = sys_semtimedop(first, ptr, (unsigned)second, 448 (const struct timespec __user *) 449 (unsigned long) fifth); 450 goto out; 451 case SEMGET: 452 err = sys_semget(first, (int)second, (int)third); 453 goto out; 454 case SEMCTL: { 455 err = sys_semctl(first, second, 456 (int)third | IPC_64, 457 (union semun) ptr); 458 goto out; 459 } 460 default: 461 err = -ENOSYS; 462 goto out; 463 }; 464 } 465 if (call <= MSGCTL) { 466 switch (call) { 467 case MSGSND: 468 err = sys_msgsnd(first, ptr, (size_t)second, 469 (int)third); 470 goto out; 471 case MSGRCV: 472 err = sys_msgrcv(first, ptr, (size_t)second, fifth, 473 (int)third); 474 goto out; 475 case MSGGET: 476 err = sys_msgget((key_t)first, (int)second); 477 goto out; 478 case MSGCTL: 479 err = sys_msgctl(first, (int)second | IPC_64, ptr); 480 goto out; 481 default: 482 err = -ENOSYS; 483 goto out; 484 }; 485 } 486 if (call <= SHMCTL) { 487 switch (call) { 488 case SHMAT: { 489 ulong raddr; 490 err = do_shmat(first, ptr, (int)second, &raddr); 491 if (!err) { 492 if (put_user(raddr, 493 (ulong __user *) third)) 494 err = -EFAULT; 495 } 496 goto out; 497 } 498 case SHMDT: 499 err = sys_shmdt(ptr); 500 goto out; 501 case SHMGET: 502 err = sys_shmget(first, (size_t)second, (int)third); 503 goto out; 504 case SHMCTL: 505 err = sys_shmctl(first, (int)second | IPC_64, ptr); 506 goto out; 507 default: 508 err = -ENOSYS; 509 goto out; 510 }; 511 } else { 512 err = -ENOSYS; 513 } 514 out: 515 return err; 516 } 517 518 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) 519 { 520 int ret; 521 522 if (current->personality == PER_LINUX32 && 523 personality == PER_LINUX) 524 personality = PER_LINUX32; 525 ret = sys_personality(personality); 526 if (ret == PER_LINUX32) 527 ret = PER_LINUX; 528 529 return ret; 530 } 531 532 int sparc_mmap_check(unsigned long addr, unsigned long len) 533 { 534 if (test_thread_flag(TIF_32BIT)) { 535 if (len >= STACK_TOP32) 536 return -EINVAL; 537 538 if (addr > STACK_TOP32 - len) 539 return -EINVAL; 540 } else { 541 if (len >= VA_EXCLUDE_START) 542 return -EINVAL; 543 544 if (invalid_64bit_range(addr, len)) 545 return -EINVAL; 546 } 547 548 return 0; 549 } 550 551 /* Linux version of mmap */ 552 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, 553 unsigned long, prot, unsigned long, flags, unsigned long, fd, 554 unsigned long, off) 555 { 556 unsigned long retval = -EINVAL; 557 558 if ((off + PAGE_ALIGN(len)) < off) 559 goto out; 560 if (off & ~PAGE_MASK) 561 goto out; 562 retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); 563 out: 564 return retval; 565 } 566 567 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len) 568 { 569 long ret; 570 571 if (invalid_64bit_range(addr, len)) 572 return -EINVAL; 573 574 down_write(¤t->mm->mmap_sem); 575 ret = do_munmap(current->mm, addr, len); 576 up_write(¤t->mm->mmap_sem); 577 return ret; 578 } 579 580 extern unsigned long do_mremap(unsigned long addr, 581 unsigned long old_len, unsigned long new_len, 582 unsigned long flags, unsigned long new_addr); 583 584 SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len, 585 unsigned long, new_len, unsigned long, flags, 586 unsigned long, new_addr) 587 { 588 unsigned long ret = -EINVAL; 589 590 if (test_thread_flag(TIF_32BIT)) 591 goto out; 592 593 down_write(¤t->mm->mmap_sem); 594 ret = do_mremap(addr, old_len, new_len, flags, new_addr); 595 up_write(¤t->mm->mmap_sem); 596 out: 597 return ret; 598 } 599 600 /* we come to here via sys_nis_syscall so it can setup the regs argument */ 601 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs) 602 { 603 static int count; 604 605 /* Don't make the system unusable, if someone goes stuck */ 606 if (count++ > 5) 607 return -ENOSYS; 608 609 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]); 610 #ifdef DEBUG_UNIMP_SYSCALL 611 show_regs (regs); 612 #endif 613 614 return -ENOSYS; 615 } 616 617 /* #define DEBUG_SPARC_BREAKPOINT */ 618 619 asmlinkage void sparc_breakpoint(struct pt_regs *regs) 620 { 621 siginfo_t info; 622 623 if (test_thread_flag(TIF_32BIT)) { 624 regs->tpc &= 0xffffffff; 625 regs->tnpc &= 0xffffffff; 626 } 627 #ifdef DEBUG_SPARC_BREAKPOINT 628 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc); 629 #endif 630 info.si_signo = SIGTRAP; 631 info.si_errno = 0; 632 info.si_code = TRAP_BRKPT; 633 info.si_addr = (void __user *)regs->tpc; 634 info.si_trapno = 0; 635 force_sig_info(SIGTRAP, &info, current); 636 #ifdef DEBUG_SPARC_BREAKPOINT 637 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc); 638 #endif 639 } 640 641 extern void check_pending(int signum); 642 643 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len) 644 { 645 int nlen, err; 646 647 if (len < 0) 648 return -EINVAL; 649 650 down_read(&uts_sem); 651 652 nlen = strlen(utsname()->domainname) + 1; 653 err = -EINVAL; 654 if (nlen > len) 655 goto out; 656 657 err = -EFAULT; 658 if (!copy_to_user(name, utsname()->domainname, nlen)) 659 err = 0; 660 661 out: 662 up_read(&uts_sem); 663 return err; 664 } 665 666 SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type, 667 utrap_handler_t, new_p, utrap_handler_t, new_d, 668 utrap_handler_t __user *, old_p, 669 utrap_handler_t __user *, old_d) 670 { 671 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31) 672 return -EINVAL; 673 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) { 674 if (old_p) { 675 if (!current_thread_info()->utraps) { 676 if (put_user(NULL, old_p)) 677 return -EFAULT; 678 } else { 679 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p)) 680 return -EFAULT; 681 } 682 } 683 if (old_d) { 684 if (put_user(NULL, old_d)) 685 return -EFAULT; 686 } 687 return 0; 688 } 689 if (!current_thread_info()->utraps) { 690 current_thread_info()->utraps = 691 kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); 692 if (!current_thread_info()->utraps) 693 return -ENOMEM; 694 current_thread_info()->utraps[0] = 1; 695 } else { 696 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && 697 current_thread_info()->utraps[0] > 1) { 698 unsigned long *p = current_thread_info()->utraps; 699 700 current_thread_info()->utraps = 701 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), 702 GFP_KERNEL); 703 if (!current_thread_info()->utraps) { 704 current_thread_info()->utraps = p; 705 return -ENOMEM; 706 } 707 p[0]--; 708 current_thread_info()->utraps[0] = 1; 709 memcpy(current_thread_info()->utraps+1, p+1, 710 UT_TRAP_INSTRUCTION_31*sizeof(long)); 711 } 712 } 713 if (old_p) { 714 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p)) 715 return -EFAULT; 716 } 717 if (old_d) { 718 if (put_user(NULL, old_d)) 719 return -EFAULT; 720 } 721 current_thread_info()->utraps[type] = (long)new_p; 722 723 return 0; 724 } 725 726 asmlinkage long sparc_memory_ordering(unsigned long model, 727 struct pt_regs *regs) 728 { 729 if (model >= 3) 730 return -EINVAL; 731 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14); 732 return 0; 733 } 734 735 SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, 736 struct sigaction __user *, oact, void __user *, restorer, 737 size_t, sigsetsize) 738 { 739 struct k_sigaction new_ka, old_ka; 740 int ret; 741 742 /* XXX: Don't preclude handling different sized sigset_t's. */ 743 if (sigsetsize != sizeof(sigset_t)) 744 return -EINVAL; 745 746 if (act) { 747 new_ka.ka_restorer = restorer; 748 if (copy_from_user(&new_ka.sa, act, sizeof(*act))) 749 return -EFAULT; 750 } 751 752 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 753 754 if (!ret && oact) { 755 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact))) 756 return -EFAULT; 757 } 758 759 return ret; 760 } 761 762 /* 763 * Do a system call from kernel instead of calling sys_execve so we 764 * end up with proper pt_regs. 765 */ 766 int kernel_execve(const char *filename, 767 const char *const argv[], 768 const char *const envp[]) 769 { 770 long __res; 771 register long __g1 __asm__ ("g1") = __NR_execve; 772 register long __o0 __asm__ ("o0") = (long)(filename); 773 register long __o1 __asm__ ("o1") = (long)(argv); 774 register long __o2 __asm__ ("o2") = (long)(envp); 775 asm volatile ("t 0x6d\n\t" 776 "sub %%g0, %%o0, %0\n\t" 777 "movcc %%xcc, %%o0, %0\n\t" 778 : "=r" (__res), "=&r" (__o0) 779 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) 780 : "cc"); 781 return __res; 782 } 783