1 /* 2 * linux/arch/m68k/kernel/sys_m68k.c 3 * 4 * This file contains various random system calls that 5 * have a non-standard calling sequence on the Linux/m68k 6 * platform. 7 */ 8 9 #include <linux/capability.h> 10 #include <linux/errno.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/fs.h> 14 #include <linux/smp.h> 15 #include <linux/sem.h> 16 #include <linux/msg.h> 17 #include <linux/shm.h> 18 #include <linux/stat.h> 19 #include <linux/syscalls.h> 20 #include <linux/mman.h> 21 #include <linux/file.h> 22 #include <linux/ipc.h> 23 24 #include <asm/setup.h> 25 #include <linux/uaccess.h> 26 #include <asm/cachectl.h> 27 #include <asm/traps.h> 28 #include <asm/page.h> 29 #include <asm/unistd.h> 30 #include <asm/cacheflush.h> 31 32 #ifdef CONFIG_MMU 33 34 #include <asm/tlb.h> 35 36 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, 37 unsigned long error_code); 38 39 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 40 unsigned long prot, unsigned long flags, 41 unsigned long fd, unsigned long pgoff) 42 { 43 /* 44 * This is wrong for sun3 - there PAGE_SIZE is 8Kb, 45 * so we need to shift the argument down by 1; m68k mmap64(3) 46 * (in libc) expects the last argument of mmap2 in 4Kb units. 47 */ 48 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 49 } 50 51 /* Convert virtual (user) address VADDR to physical address PADDR */ 52 #define virt_to_phys_040(vaddr) \ 53 ({ \ 54 unsigned long _mmusr, _paddr; \ 55 \ 56 __asm__ __volatile__ (".chip 68040\n\t" \ 57 "ptestr (%1)\n\t" \ 58 "movec %%mmusr,%0\n\t" \ 59 ".chip 68k" \ 60 : "=r" (_mmusr) \ 61 : "a" (vaddr)); \ 62 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \ 63 _paddr; \ 64 }) 65 66 static inline int 67 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len) 68 { 69 unsigned long paddr, i; 70 71 switch (scope) 72 { 73 case FLUSH_SCOPE_ALL: 74 switch (cache) 75 { 76 case FLUSH_CACHE_DATA: 77 /* This nop is needed for some broken versions of the 68040. */ 78 __asm__ __volatile__ ("nop\n\t" 79 ".chip 68040\n\t" 80 "cpusha %dc\n\t" 81 ".chip 68k"); 82 break; 83 case FLUSH_CACHE_INSN: 84 __asm__ __volatile__ ("nop\n\t" 85 ".chip 68040\n\t" 86 "cpusha %ic\n\t" 87 ".chip 68k"); 88 break; 89 default: 90 case FLUSH_CACHE_BOTH: 91 __asm__ __volatile__ ("nop\n\t" 92 ".chip 68040\n\t" 93 "cpusha %bc\n\t" 94 ".chip 68k"); 95 break; 96 } 97 break; 98 99 case FLUSH_SCOPE_LINE: 100 /* Find the physical address of the first mapped page in the 101 address range. */ 102 if ((paddr = virt_to_phys_040(addr))) { 103 paddr += addr & ~(PAGE_MASK | 15); 104 len = (len + (addr & 15) + 15) >> 4; 105 } else { 106 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); 107 108 if (len <= tmp) 109 return 0; 110 addr += tmp; 111 len -= tmp; 112 tmp = PAGE_SIZE; 113 for (;;) 114 { 115 if ((paddr = virt_to_phys_040(addr))) 116 break; 117 if (len <= tmp) 118 return 0; 119 addr += tmp; 120 len -= tmp; 121 } 122 len = (len + 15) >> 4; 123 } 124 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; 125 while (len--) 126 { 127 switch (cache) 128 { 129 case FLUSH_CACHE_DATA: 130 __asm__ __volatile__ ("nop\n\t" 131 ".chip 68040\n\t" 132 "cpushl %%dc,(%0)\n\t" 133 ".chip 68k" 134 : : "a" (paddr)); 135 break; 136 case FLUSH_CACHE_INSN: 137 __asm__ __volatile__ ("nop\n\t" 138 ".chip 68040\n\t" 139 "cpushl %%ic,(%0)\n\t" 140 ".chip 68k" 141 : : "a" (paddr)); 142 break; 143 default: 144 case FLUSH_CACHE_BOTH: 145 __asm__ __volatile__ ("nop\n\t" 146 ".chip 68040\n\t" 147 "cpushl %%bc,(%0)\n\t" 148 ".chip 68k" 149 : : "a" (paddr)); 150 break; 151 } 152 if (!--i && len) 153 { 154 /* 155 * No need to page align here since it is done by 156 * virt_to_phys_040(). 157 */ 158 addr += PAGE_SIZE; 159 i = PAGE_SIZE / 16; 160 /* Recompute physical address when crossing a page 161 boundary. */ 162 for (;;) 163 { 164 if ((paddr = virt_to_phys_040(addr))) 165 break; 166 if (len <= i) 167 return 0; 168 len -= i; 169 addr += PAGE_SIZE; 170 } 171 } 172 else 173 paddr += 16; 174 } 175 break; 176 177 default: 178 case FLUSH_SCOPE_PAGE: 179 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); 180 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) 181 { 182 if (!(paddr = virt_to_phys_040(addr))) 183 continue; 184 switch (cache) 185 { 186 case FLUSH_CACHE_DATA: 187 __asm__ __volatile__ ("nop\n\t" 188 ".chip 68040\n\t" 189 "cpushp %%dc,(%0)\n\t" 190 ".chip 68k" 191 : : "a" (paddr)); 192 break; 193 case FLUSH_CACHE_INSN: 194 __asm__ __volatile__ ("nop\n\t" 195 ".chip 68040\n\t" 196 "cpushp %%ic,(%0)\n\t" 197 ".chip 68k" 198 : : "a" (paddr)); 199 break; 200 default: 201 case FLUSH_CACHE_BOTH: 202 __asm__ __volatile__ ("nop\n\t" 203 ".chip 68040\n\t" 204 "cpushp %%bc,(%0)\n\t" 205 ".chip 68k" 206 : : "a" (paddr)); 207 break; 208 } 209 } 210 break; 211 } 212 return 0; 213 } 214 215 #define virt_to_phys_060(vaddr) \ 216 ({ \ 217 unsigned long paddr; \ 218 __asm__ __volatile__ (".chip 68060\n\t" \ 219 "plpar (%0)\n\t" \ 220 ".chip 68k" \ 221 : "=a" (paddr) \ 222 : "0" (vaddr)); \ 223 (paddr); /* XXX */ \ 224 }) 225 226 static inline int 227 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len) 228 { 229 unsigned long paddr, i; 230 231 /* 232 * 68060 manual says: 233 * cpush %dc : flush DC, remains valid (with our %cacr setup) 234 * cpush %ic : invalidate IC 235 * cpush %bc : flush DC + invalidate IC 236 */ 237 switch (scope) 238 { 239 case FLUSH_SCOPE_ALL: 240 switch (cache) 241 { 242 case FLUSH_CACHE_DATA: 243 __asm__ __volatile__ (".chip 68060\n\t" 244 "cpusha %dc\n\t" 245 ".chip 68k"); 246 break; 247 case FLUSH_CACHE_INSN: 248 __asm__ __volatile__ (".chip 68060\n\t" 249 "cpusha %ic\n\t" 250 ".chip 68k"); 251 break; 252 default: 253 case FLUSH_CACHE_BOTH: 254 __asm__ __volatile__ (".chip 68060\n\t" 255 "cpusha %bc\n\t" 256 ".chip 68k"); 257 break; 258 } 259 break; 260 261 case FLUSH_SCOPE_LINE: 262 /* Find the physical address of the first mapped page in the 263 address range. */ 264 len += addr & 15; 265 addr &= -16; 266 if (!(paddr = virt_to_phys_060(addr))) { 267 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); 268 269 if (len <= tmp) 270 return 0; 271 addr += tmp; 272 len -= tmp; 273 tmp = PAGE_SIZE; 274 for (;;) 275 { 276 if ((paddr = virt_to_phys_060(addr))) 277 break; 278 if (len <= tmp) 279 return 0; 280 addr += tmp; 281 len -= tmp; 282 } 283 } 284 len = (len + 15) >> 4; 285 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; 286 while (len--) 287 { 288 switch (cache) 289 { 290 case FLUSH_CACHE_DATA: 291 __asm__ __volatile__ (".chip 68060\n\t" 292 "cpushl %%dc,(%0)\n\t" 293 ".chip 68k" 294 : : "a" (paddr)); 295 break; 296 case FLUSH_CACHE_INSN: 297 __asm__ __volatile__ (".chip 68060\n\t" 298 "cpushl %%ic,(%0)\n\t" 299 ".chip 68k" 300 : : "a" (paddr)); 301 break; 302 default: 303 case FLUSH_CACHE_BOTH: 304 __asm__ __volatile__ (".chip 68060\n\t" 305 "cpushl %%bc,(%0)\n\t" 306 ".chip 68k" 307 : : "a" (paddr)); 308 break; 309 } 310 if (!--i && len) 311 { 312 313 /* 314 * We just want to jump to the first cache line 315 * in the next page. 316 */ 317 addr += PAGE_SIZE; 318 addr &= PAGE_MASK; 319 320 i = PAGE_SIZE / 16; 321 /* Recompute physical address when crossing a page 322 boundary. */ 323 for (;;) 324 { 325 if ((paddr = virt_to_phys_060(addr))) 326 break; 327 if (len <= i) 328 return 0; 329 len -= i; 330 addr += PAGE_SIZE; 331 } 332 } 333 else 334 paddr += 16; 335 } 336 break; 337 338 default: 339 case FLUSH_SCOPE_PAGE: 340 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); 341 addr &= PAGE_MASK; /* Workaround for bug in some 342 revisions of the 68060 */ 343 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) 344 { 345 if (!(paddr = virt_to_phys_060(addr))) 346 continue; 347 switch (cache) 348 { 349 case FLUSH_CACHE_DATA: 350 __asm__ __volatile__ (".chip 68060\n\t" 351 "cpushp %%dc,(%0)\n\t" 352 ".chip 68k" 353 : : "a" (paddr)); 354 break; 355 case FLUSH_CACHE_INSN: 356 __asm__ __volatile__ (".chip 68060\n\t" 357 "cpushp %%ic,(%0)\n\t" 358 ".chip 68k" 359 : : "a" (paddr)); 360 break; 361 default: 362 case FLUSH_CACHE_BOTH: 363 __asm__ __volatile__ (".chip 68060\n\t" 364 "cpushp %%bc,(%0)\n\t" 365 ".chip 68k" 366 : : "a" (paddr)); 367 break; 368 } 369 } 370 break; 371 } 372 return 0; 373 } 374 375 /* sys_cacheflush -- flush (part of) the processor cache. */ 376 asmlinkage int 377 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) 378 { 379 int ret = -EINVAL; 380 381 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL || 382 cache & ~FLUSH_CACHE_BOTH) 383 goto out; 384 385 if (scope == FLUSH_SCOPE_ALL) { 386 /* Only the superuser may explicitly flush the whole cache. */ 387 ret = -EPERM; 388 if (!capable(CAP_SYS_ADMIN)) 389 goto out; 390 } else { 391 struct vm_area_struct *vma; 392 393 /* Check for overflow. */ 394 if (addr + len < addr) 395 goto out; 396 397 /* 398 * Verify that the specified address region actually belongs 399 * to this process. 400 */ 401 down_read(¤t->mm->mmap_sem); 402 vma = find_vma(current->mm, addr); 403 if (!vma || addr < vma->vm_start || addr + len > vma->vm_end) 404 goto out_unlock; 405 } 406 407 if (CPU_IS_020_OR_030) { 408 if (scope == FLUSH_SCOPE_LINE && len < 256) { 409 unsigned long cacr; 410 __asm__ ("movec %%cacr, %0" : "=r" (cacr)); 411 if (cache & FLUSH_CACHE_INSN) 412 cacr |= 4; 413 if (cache & FLUSH_CACHE_DATA) 414 cacr |= 0x400; 415 len >>= 2; 416 while (len--) { 417 __asm__ __volatile__ ("movec %1, %%caar\n\t" 418 "movec %0, %%cacr" 419 : /* no outputs */ 420 : "r" (cacr), "r" (addr)); 421 addr += 4; 422 } 423 } else { 424 /* Flush the whole cache, even if page granularity requested. */ 425 unsigned long cacr; 426 __asm__ ("movec %%cacr, %0" : "=r" (cacr)); 427 if (cache & FLUSH_CACHE_INSN) 428 cacr |= 8; 429 if (cache & FLUSH_CACHE_DATA) 430 cacr |= 0x800; 431 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr)); 432 } 433 ret = 0; 434 goto out_unlock; 435 } else { 436 /* 437 * 040 or 060: don't blindly trust 'scope', someone could 438 * try to flush a few megs of memory. 439 */ 440 441 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE) 442 scope=FLUSH_SCOPE_PAGE; 443 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL) 444 scope=FLUSH_SCOPE_ALL; 445 if (CPU_IS_040) { 446 ret = cache_flush_040 (addr, scope, cache, len); 447 } else if (CPU_IS_060) { 448 ret = cache_flush_060 (addr, scope, cache, len); 449 } 450 } 451 out_unlock: 452 up_read(¤t->mm->mmap_sem); 453 out: 454 return ret; 455 } 456 457 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and 458 D1 (newval). */ 459 asmlinkage int 460 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, 461 unsigned long __user * mem) 462 { 463 /* This was borrowed from ARM's implementation. */ 464 for (;;) { 465 struct mm_struct *mm = current->mm; 466 pgd_t *pgd; 467 pmd_t *pmd; 468 pte_t *pte; 469 spinlock_t *ptl; 470 unsigned long mem_value; 471 472 down_read(&mm->mmap_sem); 473 pgd = pgd_offset(mm, (unsigned long)mem); 474 if (!pgd_present(*pgd)) 475 goto bad_access; 476 pmd = pmd_offset(pgd, (unsigned long)mem); 477 if (!pmd_present(*pmd)) 478 goto bad_access; 479 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); 480 if (!pte_present(*pte) || !pte_dirty(*pte) 481 || !pte_write(*pte)) { 482 pte_unmap_unlock(pte, ptl); 483 goto bad_access; 484 } 485 486 /* 487 * No need to check for EFAULT; we know that the page is 488 * present and writable. 489 */ 490 __get_user(mem_value, mem); 491 if (mem_value == oldval) 492 __put_user(newval, mem); 493 494 pte_unmap_unlock(pte, ptl); 495 up_read(&mm->mmap_sem); 496 return mem_value; 497 498 bad_access: 499 up_read(&mm->mmap_sem); 500 /* This is not necessarily a bad access, we can get here if 501 a memory we're trying to write to should be copied-on-write. 502 Make the kernel do the necessary page stuff, then re-iterate. 503 Simulate a write access fault to do that. */ 504 { 505 /* The first argument of the function corresponds to 506 D1, which is the first field of struct pt_regs. */ 507 struct pt_regs *fp = (struct pt_regs *)&newval; 508 509 /* '3' is an RMW flag. */ 510 if (do_page_fault(fp, (unsigned long)mem, 3)) 511 /* If the do_page_fault() failed, we don't 512 have anything meaningful to return. 513 There should be a SIGSEGV pending for 514 the process. */ 515 return 0xdeadbeef; 516 } 517 } 518 } 519 520 #else 521 522 /* sys_cacheflush -- flush (part of) the processor cache. */ 523 asmlinkage int 524 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) 525 { 526 flush_cache_all(); 527 return 0; 528 } 529 530 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and 531 D1 (newval). */ 532 asmlinkage int 533 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, 534 unsigned long __user * mem) 535 { 536 struct mm_struct *mm = current->mm; 537 unsigned long mem_value; 538 539 down_read(&mm->mmap_sem); 540 541 mem_value = *mem; 542 if (mem_value == oldval) 543 *mem = newval; 544 545 up_read(&mm->mmap_sem); 546 return mem_value; 547 } 548 549 #endif /* CONFIG_MMU */ 550 551 asmlinkage int sys_getpagesize(void) 552 { 553 return PAGE_SIZE; 554 } 555 556 asmlinkage unsigned long sys_get_thread_area(void) 557 { 558 return current_thread_info()->tp_value; 559 } 560 561 asmlinkage int sys_set_thread_area(unsigned long tp) 562 { 563 current_thread_info()->tp_value = tp; 564 return 0; 565 } 566 567 asmlinkage int sys_atomic_barrier(void) 568 { 569 /* no code needed for uniprocs */ 570 return 0; 571 } 572