1 /* 2 * linux/arch/m68k/kernel/sys_m68k.c 3 * 4 * This file contains various random system calls that 5 * have a non-standard calling sequence on the Linux/m68k 6 * platform. 7 */ 8 9 #include <linux/capability.h> 10 #include <linux/errno.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/fs.h> 14 #include <linux/smp.h> 15 #include <linux/smp_lock.h> 16 #include <linux/sem.h> 17 #include <linux/msg.h> 18 #include <linux/shm.h> 19 #include <linux/stat.h> 20 #include <linux/syscalls.h> 21 #include <linux/mman.h> 22 #include <linux/file.h> 23 #include <linux/utsname.h> 24 #include <linux/ipc.h> 25 26 #include <asm/setup.h> 27 #include <asm/uaccess.h> 28 #include <asm/cachectl.h> 29 #include <asm/traps.h> 30 #include <asm/page.h> 31 #include <asm/unistd.h> 32 33 /* 34 * sys_pipe() is the normal C calling standard for creating 35 * a pipe. It's not the way unix traditionally does this, though. 36 */ 37 asmlinkage int sys_pipe(unsigned long __user * fildes) 38 { 39 int fd[2]; 40 int error; 41 42 error = do_pipe(fd); 43 if (!error) { 44 if (copy_to_user(fildes, fd, 2*sizeof(int))) 45 error = -EFAULT; 46 } 47 return error; 48 } 49 50 /* common code for old and new mmaps */ 51 static inline long do_mmap2( 52 unsigned long addr, unsigned long len, 53 unsigned long prot, unsigned long flags, 54 unsigned long fd, unsigned long pgoff) 55 { 56 int error = -EBADF; 57 struct file * file = NULL; 58 59 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 60 if (!(flags & MAP_ANONYMOUS)) { 61 file = fget(fd); 62 if (!file) 63 goto out; 64 } 65 66 down_write(¤t->mm->mmap_sem); 67 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); 68 up_write(¤t->mm->mmap_sem); 69 70 if (file) 71 fput(file); 72 out: 73 return error; 74 } 75 76 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 77 unsigned long prot, unsigned long flags, 78 unsigned long fd, unsigned long pgoff) 79 { 80 return do_mmap2(addr, len, prot, flags, fd, pgoff); 81 } 82 83 /* 84 * Perform the select(nd, in, out, ex, tv) and mmap() system 85 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to 86 * handle more than 4 system call parameters, so these system calls 87 * used a memory block for parameter passing.. 88 */ 89 90 struct mmap_arg_struct { 91 unsigned long addr; 92 unsigned long len; 93 unsigned long prot; 94 unsigned long flags; 95 unsigned long fd; 96 unsigned long offset; 97 }; 98 99 asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) 100 { 101 struct mmap_arg_struct a; 102 int error = -EFAULT; 103 104 if (copy_from_user(&a, arg, sizeof(a))) 105 goto out; 106 107 error = -EINVAL; 108 if (a.offset & ~PAGE_MASK) 109 goto out; 110 111 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 112 113 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); 114 out: 115 return error; 116 } 117 118 #if 0 119 struct mmap_arg_struct64 { 120 __u32 addr; 121 __u32 len; 122 __u32 prot; 123 __u32 flags; 124 __u64 offset; /* 64 bits */ 125 __u32 fd; 126 }; 127 128 asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg) 129 { 130 int error = -EFAULT; 131 struct file * file = NULL; 132 struct mmap_arg_struct64 a; 133 unsigned long pgoff; 134 135 if (copy_from_user(&a, arg, sizeof(a))) 136 return -EFAULT; 137 138 if ((long)a.offset & ~PAGE_MASK) 139 return -EINVAL; 140 141 pgoff = a.offset >> PAGE_SHIFT; 142 if ((a.offset >> PAGE_SHIFT) != pgoff) 143 return -EINVAL; 144 145 if (!(a.flags & MAP_ANONYMOUS)) { 146 error = -EBADF; 147 file = fget(a.fd); 148 if (!file) 149 goto out; 150 } 151 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 152 153 down_write(¤t->mm->mmap_sem); 154 error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff); 155 up_write(¤t->mm->mmap_sem); 156 if (file) 157 fput(file); 158 out: 159 return error; 160 } 161 #endif 162 163 struct sel_arg_struct { 164 unsigned long n; 165 fd_set __user *inp, *outp, *exp; 166 struct timeval __user *tvp; 167 }; 168 169 asmlinkage int old_select(struct sel_arg_struct __user *arg) 170 { 171 struct sel_arg_struct a; 172 173 if (copy_from_user(&a, arg, sizeof(a))) 174 return -EFAULT; 175 /* sys_select() does the appropriate kernel locking */ 176 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); 177 } 178 179 /* 180 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 181 * 182 * This is really horribly ugly. 183 */ 184 asmlinkage int sys_ipc (uint call, int first, int second, 185 int third, void __user *ptr, long fifth) 186 { 187 int version, ret; 188 189 version = call >> 16; /* hack for backward compatibility */ 190 call &= 0xffff; 191 192 if (call <= SEMCTL) 193 switch (call) { 194 case SEMOP: 195 return sys_semop (first, ptr, second); 196 case SEMGET: 197 return sys_semget (first, second, third); 198 case SEMCTL: { 199 union semun fourth; 200 if (!ptr) 201 return -EINVAL; 202 if (get_user(fourth.__pad, (void __user *__user *) ptr)) 203 return -EFAULT; 204 return sys_semctl (first, second, third, fourth); 205 } 206 default: 207 return -ENOSYS; 208 } 209 if (call <= MSGCTL) 210 switch (call) { 211 case MSGSND: 212 return sys_msgsnd (first, ptr, second, third); 213 case MSGRCV: 214 switch (version) { 215 case 0: { 216 struct ipc_kludge tmp; 217 if (!ptr) 218 return -EINVAL; 219 if (copy_from_user (&tmp, ptr, sizeof (tmp))) 220 return -EFAULT; 221 return sys_msgrcv (first, tmp.msgp, second, 222 tmp.msgtyp, third); 223 } 224 default: 225 return sys_msgrcv (first, ptr, 226 second, fifth, third); 227 } 228 case MSGGET: 229 return sys_msgget ((key_t) first, second); 230 case MSGCTL: 231 return sys_msgctl (first, second, ptr); 232 default: 233 return -ENOSYS; 234 } 235 if (call <= SHMCTL) 236 switch (call) { 237 case SHMAT: 238 switch (version) { 239 default: { 240 ulong raddr; 241 ret = do_shmat (first, ptr, second, &raddr); 242 if (ret) 243 return ret; 244 return put_user (raddr, (ulong __user *) third); 245 } 246 } 247 case SHMDT: 248 return sys_shmdt (ptr); 249 case SHMGET: 250 return sys_shmget (first, second, third); 251 case SHMCTL: 252 return sys_shmctl (first, second, ptr); 253 default: 254 return -ENOSYS; 255 } 256 257 return -EINVAL; 258 } 259 260 /* Convert virtual (user) address VADDR to physical address PADDR */ 261 #define virt_to_phys_040(vaddr) \ 262 ({ \ 263 unsigned long _mmusr, _paddr; \ 264 \ 265 __asm__ __volatile__ (".chip 68040\n\t" \ 266 "ptestr (%1)\n\t" \ 267 "movec %%mmusr,%0\n\t" \ 268 ".chip 68k" \ 269 : "=r" (_mmusr) \ 270 : "a" (vaddr)); \ 271 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \ 272 _paddr; \ 273 }) 274 275 static inline int 276 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len) 277 { 278 unsigned long paddr, i; 279 280 switch (scope) 281 { 282 case FLUSH_SCOPE_ALL: 283 switch (cache) 284 { 285 case FLUSH_CACHE_DATA: 286 /* This nop is needed for some broken versions of the 68040. */ 287 __asm__ __volatile__ ("nop\n\t" 288 ".chip 68040\n\t" 289 "cpusha %dc\n\t" 290 ".chip 68k"); 291 break; 292 case FLUSH_CACHE_INSN: 293 __asm__ __volatile__ ("nop\n\t" 294 ".chip 68040\n\t" 295 "cpusha %ic\n\t" 296 ".chip 68k"); 297 break; 298 default: 299 case FLUSH_CACHE_BOTH: 300 __asm__ __volatile__ ("nop\n\t" 301 ".chip 68040\n\t" 302 "cpusha %bc\n\t" 303 ".chip 68k"); 304 break; 305 } 306 break; 307 308 case FLUSH_SCOPE_LINE: 309 /* Find the physical address of the first mapped page in the 310 address range. */ 311 if ((paddr = virt_to_phys_040(addr))) { 312 paddr += addr & ~(PAGE_MASK | 15); 313 len = (len + (addr & 15) + 15) >> 4; 314 } else { 315 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); 316 317 if (len <= tmp) 318 return 0; 319 addr += tmp; 320 len -= tmp; 321 tmp = PAGE_SIZE; 322 for (;;) 323 { 324 if ((paddr = virt_to_phys_040(addr))) 325 break; 326 if (len <= tmp) 327 return 0; 328 addr += tmp; 329 len -= tmp; 330 } 331 len = (len + 15) >> 4; 332 } 333 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; 334 while (len--) 335 { 336 switch (cache) 337 { 338 case FLUSH_CACHE_DATA: 339 __asm__ __volatile__ ("nop\n\t" 340 ".chip 68040\n\t" 341 "cpushl %%dc,(%0)\n\t" 342 ".chip 68k" 343 : : "a" (paddr)); 344 break; 345 case FLUSH_CACHE_INSN: 346 __asm__ __volatile__ ("nop\n\t" 347 ".chip 68040\n\t" 348 "cpushl %%ic,(%0)\n\t" 349 ".chip 68k" 350 : : "a" (paddr)); 351 break; 352 default: 353 case FLUSH_CACHE_BOTH: 354 __asm__ __volatile__ ("nop\n\t" 355 ".chip 68040\n\t" 356 "cpushl %%bc,(%0)\n\t" 357 ".chip 68k" 358 : : "a" (paddr)); 359 break; 360 } 361 if (!--i && len) 362 { 363 /* 364 * No need to page align here since it is done by 365 * virt_to_phys_040(). 366 */ 367 addr += PAGE_SIZE; 368 i = PAGE_SIZE / 16; 369 /* Recompute physical address when crossing a page 370 boundary. */ 371 for (;;) 372 { 373 if ((paddr = virt_to_phys_040(addr))) 374 break; 375 if (len <= i) 376 return 0; 377 len -= i; 378 addr += PAGE_SIZE; 379 } 380 } 381 else 382 paddr += 16; 383 } 384 break; 385 386 default: 387 case FLUSH_SCOPE_PAGE: 388 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); 389 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) 390 { 391 if (!(paddr = virt_to_phys_040(addr))) 392 continue; 393 switch (cache) 394 { 395 case FLUSH_CACHE_DATA: 396 __asm__ __volatile__ ("nop\n\t" 397 ".chip 68040\n\t" 398 "cpushp %%dc,(%0)\n\t" 399 ".chip 68k" 400 : : "a" (paddr)); 401 break; 402 case FLUSH_CACHE_INSN: 403 __asm__ __volatile__ ("nop\n\t" 404 ".chip 68040\n\t" 405 "cpushp %%ic,(%0)\n\t" 406 ".chip 68k" 407 : : "a" (paddr)); 408 break; 409 default: 410 case FLUSH_CACHE_BOTH: 411 __asm__ __volatile__ ("nop\n\t" 412 ".chip 68040\n\t" 413 "cpushp %%bc,(%0)\n\t" 414 ".chip 68k" 415 : : "a" (paddr)); 416 break; 417 } 418 } 419 break; 420 } 421 return 0; 422 } 423 424 #define virt_to_phys_060(vaddr) \ 425 ({ \ 426 unsigned long paddr; \ 427 __asm__ __volatile__ (".chip 68060\n\t" \ 428 "plpar (%0)\n\t" \ 429 ".chip 68k" \ 430 : "=a" (paddr) \ 431 : "0" (vaddr)); \ 432 (paddr); /* XXX */ \ 433 }) 434 435 static inline int 436 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len) 437 { 438 unsigned long paddr, i; 439 440 /* 441 * 68060 manual says: 442 * cpush %dc : flush DC, remains valid (with our %cacr setup) 443 * cpush %ic : invalidate IC 444 * cpush %bc : flush DC + invalidate IC 445 */ 446 switch (scope) 447 { 448 case FLUSH_SCOPE_ALL: 449 switch (cache) 450 { 451 case FLUSH_CACHE_DATA: 452 __asm__ __volatile__ (".chip 68060\n\t" 453 "cpusha %dc\n\t" 454 ".chip 68k"); 455 break; 456 case FLUSH_CACHE_INSN: 457 __asm__ __volatile__ (".chip 68060\n\t" 458 "cpusha %ic\n\t" 459 ".chip 68k"); 460 break; 461 default: 462 case FLUSH_CACHE_BOTH: 463 __asm__ __volatile__ (".chip 68060\n\t" 464 "cpusha %bc\n\t" 465 ".chip 68k"); 466 break; 467 } 468 break; 469 470 case FLUSH_SCOPE_LINE: 471 /* Find the physical address of the first mapped page in the 472 address range. */ 473 len += addr & 15; 474 addr &= -16; 475 if (!(paddr = virt_to_phys_060(addr))) { 476 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); 477 478 if (len <= tmp) 479 return 0; 480 addr += tmp; 481 len -= tmp; 482 tmp = PAGE_SIZE; 483 for (;;) 484 { 485 if ((paddr = virt_to_phys_060(addr))) 486 break; 487 if (len <= tmp) 488 return 0; 489 addr += tmp; 490 len -= tmp; 491 } 492 } 493 len = (len + 15) >> 4; 494 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; 495 while (len--) 496 { 497 switch (cache) 498 { 499 case FLUSH_CACHE_DATA: 500 __asm__ __volatile__ (".chip 68060\n\t" 501 "cpushl %%dc,(%0)\n\t" 502 ".chip 68k" 503 : : "a" (paddr)); 504 break; 505 case FLUSH_CACHE_INSN: 506 __asm__ __volatile__ (".chip 68060\n\t" 507 "cpushl %%ic,(%0)\n\t" 508 ".chip 68k" 509 : : "a" (paddr)); 510 break; 511 default: 512 case FLUSH_CACHE_BOTH: 513 __asm__ __volatile__ (".chip 68060\n\t" 514 "cpushl %%bc,(%0)\n\t" 515 ".chip 68k" 516 : : "a" (paddr)); 517 break; 518 } 519 if (!--i && len) 520 { 521 522 /* 523 * We just want to jump to the first cache line 524 * in the next page. 525 */ 526 addr += PAGE_SIZE; 527 addr &= PAGE_MASK; 528 529 i = PAGE_SIZE / 16; 530 /* Recompute physical address when crossing a page 531 boundary. */ 532 for (;;) 533 { 534 if ((paddr = virt_to_phys_060(addr))) 535 break; 536 if (len <= i) 537 return 0; 538 len -= i; 539 addr += PAGE_SIZE; 540 } 541 } 542 else 543 paddr += 16; 544 } 545 break; 546 547 default: 548 case FLUSH_SCOPE_PAGE: 549 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); 550 addr &= PAGE_MASK; /* Workaround for bug in some 551 revisions of the 68060 */ 552 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) 553 { 554 if (!(paddr = virt_to_phys_060(addr))) 555 continue; 556 switch (cache) 557 { 558 case FLUSH_CACHE_DATA: 559 __asm__ __volatile__ (".chip 68060\n\t" 560 "cpushp %%dc,(%0)\n\t" 561 ".chip 68k" 562 : : "a" (paddr)); 563 break; 564 case FLUSH_CACHE_INSN: 565 __asm__ __volatile__ (".chip 68060\n\t" 566 "cpushp %%ic,(%0)\n\t" 567 ".chip 68k" 568 : : "a" (paddr)); 569 break; 570 default: 571 case FLUSH_CACHE_BOTH: 572 __asm__ __volatile__ (".chip 68060\n\t" 573 "cpushp %%bc,(%0)\n\t" 574 ".chip 68k" 575 : : "a" (paddr)); 576 break; 577 } 578 } 579 break; 580 } 581 return 0; 582 } 583 584 /* sys_cacheflush -- flush (part of) the processor cache. */ 585 asmlinkage int 586 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) 587 { 588 struct vm_area_struct *vma; 589 int ret = -EINVAL; 590 591 lock_kernel(); 592 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL || 593 cache & ~FLUSH_CACHE_BOTH) 594 goto out; 595 596 if (scope == FLUSH_SCOPE_ALL) { 597 /* Only the superuser may explicitly flush the whole cache. */ 598 ret = -EPERM; 599 if (!capable(CAP_SYS_ADMIN)) 600 goto out; 601 } else { 602 /* 603 * Verify that the specified address region actually belongs 604 * to this process. 605 */ 606 vma = find_vma (current->mm, addr); 607 ret = -EINVAL; 608 /* Check for overflow. */ 609 if (addr + len < addr) 610 goto out; 611 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) 612 goto out; 613 } 614 615 if (CPU_IS_020_OR_030) { 616 if (scope == FLUSH_SCOPE_LINE && len < 256) { 617 unsigned long cacr; 618 __asm__ ("movec %%cacr, %0" : "=r" (cacr)); 619 if (cache & FLUSH_CACHE_INSN) 620 cacr |= 4; 621 if (cache & FLUSH_CACHE_DATA) 622 cacr |= 0x400; 623 len >>= 2; 624 while (len--) { 625 __asm__ __volatile__ ("movec %1, %%caar\n\t" 626 "movec %0, %%cacr" 627 : /* no outputs */ 628 : "r" (cacr), "r" (addr)); 629 addr += 4; 630 } 631 } else { 632 /* Flush the whole cache, even if page granularity requested. */ 633 unsigned long cacr; 634 __asm__ ("movec %%cacr, %0" : "=r" (cacr)); 635 if (cache & FLUSH_CACHE_INSN) 636 cacr |= 8; 637 if (cache & FLUSH_CACHE_DATA) 638 cacr |= 0x800; 639 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr)); 640 } 641 ret = 0; 642 goto out; 643 } else { 644 /* 645 * 040 or 060: don't blindly trust 'scope', someone could 646 * try to flush a few megs of memory. 647 */ 648 649 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE) 650 scope=FLUSH_SCOPE_PAGE; 651 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL) 652 scope=FLUSH_SCOPE_ALL; 653 if (CPU_IS_040) { 654 ret = cache_flush_040 (addr, scope, cache, len); 655 } else if (CPU_IS_060) { 656 ret = cache_flush_060 (addr, scope, cache, len); 657 } 658 } 659 out: 660 unlock_kernel(); 661 return ret; 662 } 663 664 asmlinkage int sys_getpagesize(void) 665 { 666 return PAGE_SIZE; 667 } 668 669 /* 670 * Do a system call from kernel instead of calling sys_execve so we 671 * end up with proper pt_regs. 672 */ 673 int kernel_execve(const char *filename, char *const argv[], char *const envp[]) 674 { 675 register long __res asm ("%d0") = __NR_execve; 676 register long __a asm ("%d1") = (long)(filename); 677 register long __b asm ("%d2") = (long)(argv); 678 register long __c asm ("%d3") = (long)(envp); 679 asm volatile ("trap #0" : "+d" (__res) 680 : "d" (__a), "d" (__b), "d" (__c)); 681 return __res; 682 } 683