xref: /openbmc/linux/arch/m68k/kernel/sys_m68k.c (revision 1da177e4)
1 /*
2  * linux/arch/m68k/kernel/sys_m68k.c
3  *
4  * This file contains various random system calls that
5  * have a non-standard calling sequence on the Linux/m68k
6  * platform.
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/syscalls.h>
19 #include <linux/mman.h>
20 #include <linux/file.h>
21 #include <linux/utsname.h>
22 
23 #include <asm/setup.h>
24 #include <asm/uaccess.h>
25 #include <asm/cachectl.h>
26 #include <asm/traps.h>
27 #include <asm/ipc.h>
28 #include <asm/page.h>
29 
30 /*
31  * sys_pipe() is the normal C calling standard for creating
32  * a pipe. It's not the way unix traditionally does this, though.
33  */
34 asmlinkage int sys_pipe(unsigned long * fildes)
35 {
36 	int fd[2];
37 	int error;
38 
39 	error = do_pipe(fd);
40 	if (!error) {
41 		if (copy_to_user(fildes, fd, 2*sizeof(int)))
42 			error = -EFAULT;
43 	}
44 	return error;
45 }
46 
47 /* common code for old and new mmaps */
48 static inline long do_mmap2(
49 	unsigned long addr, unsigned long len,
50 	unsigned long prot, unsigned long flags,
51 	unsigned long fd, unsigned long pgoff)
52 {
53 	int error = -EBADF;
54 	struct file * file = NULL;
55 
56 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
57 	if (!(flags & MAP_ANONYMOUS)) {
58 		file = fget(fd);
59 		if (!file)
60 			goto out;
61 	}
62 
63 	down_write(&current->mm->mmap_sem);
64 	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
65 	up_write(&current->mm->mmap_sem);
66 
67 	if (file)
68 		fput(file);
69 out:
70 	return error;
71 }
72 
73 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
74 	unsigned long prot, unsigned long flags,
75 	unsigned long fd, unsigned long pgoff)
76 {
77 	return do_mmap2(addr, len, prot, flags, fd, pgoff);
78 }
79 
80 /*
81  * Perform the select(nd, in, out, ex, tv) and mmap() system
82  * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
83  * handle more than 4 system call parameters, so these system calls
84  * used a memory block for parameter passing..
85  */
86 
87 struct mmap_arg_struct {
88 	unsigned long addr;
89 	unsigned long len;
90 	unsigned long prot;
91 	unsigned long flags;
92 	unsigned long fd;
93 	unsigned long offset;
94 };
95 
96 asmlinkage int old_mmap(struct mmap_arg_struct *arg)
97 {
98 	struct mmap_arg_struct a;
99 	int error = -EFAULT;
100 
101 	if (copy_from_user(&a, arg, sizeof(a)))
102 		goto out;
103 
104 	error = -EINVAL;
105 	if (a.offset & ~PAGE_MASK)
106 		goto out;
107 
108 	a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
109 
110 	error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
111 out:
112 	return error;
113 }
114 
115 #if 0
116 struct mmap_arg_struct64 {
117 	__u32 addr;
118 	__u32 len;
119 	__u32 prot;
120 	__u32 flags;
121 	__u64 offset; /* 64 bits */
122 	__u32 fd;
123 };
124 
125 asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
126 {
127 	int error = -EFAULT;
128 	struct file * file = NULL;
129 	struct mmap_arg_struct64 a;
130 	unsigned long pgoff;
131 
132 	if (copy_from_user(&a, arg, sizeof(a)))
133 		return -EFAULT;
134 
135 	if ((long)a.offset & ~PAGE_MASK)
136 		return -EINVAL;
137 
138 	pgoff = a.offset >> PAGE_SHIFT;
139 	if ((a.offset >> PAGE_SHIFT) != pgoff)
140 		return -EINVAL;
141 
142 	if (!(a.flags & MAP_ANONYMOUS)) {
143 		error = -EBADF;
144 		file = fget(a.fd);
145 		if (!file)
146 			goto out;
147 	}
148 	a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
149 
150 	down_write(&current->mm->mmap_sem);
151 	error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
152 	up_write(&current->mm->mmap_sem);
153 	if (file)
154 		fput(file);
155 out:
156 	return error;
157 }
158 #endif
159 
160 struct sel_arg_struct {
161 	unsigned long n;
162 	fd_set *inp, *outp, *exp;
163 	struct timeval *tvp;
164 };
165 
166 asmlinkage int old_select(struct sel_arg_struct *arg)
167 {
168 	struct sel_arg_struct a;
169 
170 	if (copy_from_user(&a, arg, sizeof(a)))
171 		return -EFAULT;
172 	/* sys_select() does the appropriate kernel locking */
173 	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
174 }
175 
176 /*
177  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
178  *
179  * This is really horribly ugly.
180  */
181 asmlinkage int sys_ipc (uint call, int first, int second,
182 			int third, void *ptr, long fifth)
183 {
184 	int version, ret;
185 
186 	version = call >> 16; /* hack for backward compatibility */
187 	call &= 0xffff;
188 
189 	if (call <= SEMCTL)
190 		switch (call) {
191 		case SEMOP:
192 			return sys_semop (first, (struct sembuf *)ptr, second);
193 		case SEMGET:
194 			return sys_semget (first, second, third);
195 		case SEMCTL: {
196 			union semun fourth;
197 			if (!ptr)
198 				return -EINVAL;
199 			if (get_user(fourth.__pad, (void **) ptr))
200 				return -EFAULT;
201 			return sys_semctl (first, second, third, fourth);
202 			}
203 		default:
204 			return -ENOSYS;
205 		}
206 	if (call <= MSGCTL)
207 		switch (call) {
208 		case MSGSND:
209 			return sys_msgsnd (first, (struct msgbuf *) ptr,
210 					  second, third);
211 		case MSGRCV:
212 			switch (version) {
213 			case 0: {
214 				struct ipc_kludge tmp;
215 				if (!ptr)
216 					return -EINVAL;
217 				if (copy_from_user (&tmp,
218 						    (struct ipc_kludge *)ptr,
219 						    sizeof (tmp)))
220 					return -EFAULT;
221 				return sys_msgrcv (first, tmp.msgp, second,
222 						   tmp.msgtyp, third);
223 				}
224 			default:
225 				return sys_msgrcv (first,
226 						   (struct msgbuf *) ptr,
227 						   second, fifth, third);
228 			}
229 		case MSGGET:
230 			return sys_msgget ((key_t) first, second);
231 		case MSGCTL:
232 			return sys_msgctl (first, second,
233 					   (struct msqid_ds *) ptr);
234 		default:
235 			return -ENOSYS;
236 		}
237 	if (call <= SHMCTL)
238 		switch (call) {
239 		case SHMAT:
240 			switch (version) {
241 			default: {
242 				ulong raddr;
243 				ret = do_shmat (first, (char *) ptr,
244 						 second, &raddr);
245 				if (ret)
246 					return ret;
247 				return put_user (raddr, (ulong *) third);
248 			}
249 			}
250 		case SHMDT:
251 			return sys_shmdt ((char *)ptr);
252 		case SHMGET:
253 			return sys_shmget (first, second, third);
254 		case SHMCTL:
255 			return sys_shmctl (first, second,
256 					   (struct shmid_ds *) ptr);
257 		default:
258 			return -ENOSYS;
259 		}
260 
261 	return -EINVAL;
262 }
263 
264 /* Convert virtual (user) address VADDR to physical address PADDR */
265 #define virt_to_phys_040(vaddr)						\
266 ({									\
267   unsigned long _mmusr, _paddr;						\
268 									\
269   __asm__ __volatile__ (".chip 68040\n\t"				\
270 			"ptestr (%1)\n\t"				\
271 			"movec %%mmusr,%0\n\t"				\
272 			".chip 68k"					\
273 			: "=r" (_mmusr)					\
274 			: "a" (vaddr));					\
275   _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;		\
276   _paddr;								\
277 })
278 
279 static inline int
280 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
281 {
282   unsigned long paddr, i;
283 
284   switch (scope)
285     {
286     case FLUSH_SCOPE_ALL:
287       switch (cache)
288 	{
289 	case FLUSH_CACHE_DATA:
290 	  /* This nop is needed for some broken versions of the 68040.  */
291 	  __asm__ __volatile__ ("nop\n\t"
292 				".chip 68040\n\t"
293 				"cpusha %dc\n\t"
294 				".chip 68k");
295 	  break;
296 	case FLUSH_CACHE_INSN:
297 	  __asm__ __volatile__ ("nop\n\t"
298 				".chip 68040\n\t"
299 				"cpusha %ic\n\t"
300 				".chip 68k");
301 	  break;
302 	default:
303 	case FLUSH_CACHE_BOTH:
304 	  __asm__ __volatile__ ("nop\n\t"
305 				".chip 68040\n\t"
306 				"cpusha %bc\n\t"
307 				".chip 68k");
308 	  break;
309 	}
310       break;
311 
312     case FLUSH_SCOPE_LINE:
313       /* Find the physical address of the first mapped page in the
314 	 address range.  */
315       if ((paddr = virt_to_phys_040(addr))) {
316         paddr += addr & ~(PAGE_MASK | 15);
317         len = (len + (addr & 15) + 15) >> 4;
318       } else {
319 	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
320 
321 	if (len <= tmp)
322 	  return 0;
323 	addr += tmp;
324 	len -= tmp;
325 	tmp = PAGE_SIZE;
326 	for (;;)
327 	  {
328 	    if ((paddr = virt_to_phys_040(addr)))
329 	      break;
330 	    if (len <= tmp)
331 	      return 0;
332 	    addr += tmp;
333 	    len -= tmp;
334 	  }
335 	len = (len + 15) >> 4;
336       }
337       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
338       while (len--)
339 	{
340 	  switch (cache)
341 	    {
342 	    case FLUSH_CACHE_DATA:
343 	      __asm__ __volatile__ ("nop\n\t"
344 				    ".chip 68040\n\t"
345 				    "cpushl %%dc,(%0)\n\t"
346 				    ".chip 68k"
347 				    : : "a" (paddr));
348 	      break;
349 	    case FLUSH_CACHE_INSN:
350 	      __asm__ __volatile__ ("nop\n\t"
351 				    ".chip 68040\n\t"
352 				    "cpushl %%ic,(%0)\n\t"
353 				    ".chip 68k"
354 				    : : "a" (paddr));
355 	      break;
356 	    default:
357 	    case FLUSH_CACHE_BOTH:
358 	      __asm__ __volatile__ ("nop\n\t"
359 				    ".chip 68040\n\t"
360 				    "cpushl %%bc,(%0)\n\t"
361 				    ".chip 68k"
362 				    : : "a" (paddr));
363 	      break;
364 	    }
365 	  if (!--i && len)
366 	    {
367 	      /*
368 	       * No need to page align here since it is done by
369 	       * virt_to_phys_040().
370 	       */
371 	      addr += PAGE_SIZE;
372 	      i = PAGE_SIZE / 16;
373 	      /* Recompute physical address when crossing a page
374 	         boundary. */
375 	      for (;;)
376 		{
377 		  if ((paddr = virt_to_phys_040(addr)))
378 		    break;
379 		  if (len <= i)
380 		    return 0;
381 		  len -= i;
382 		  addr += PAGE_SIZE;
383 		}
384 	    }
385 	  else
386 	    paddr += 16;
387 	}
388       break;
389 
390     default:
391     case FLUSH_SCOPE_PAGE:
392       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
393       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
394 	{
395 	  if (!(paddr = virt_to_phys_040(addr)))
396 	    continue;
397 	  switch (cache)
398 	    {
399 	    case FLUSH_CACHE_DATA:
400 	      __asm__ __volatile__ ("nop\n\t"
401 				    ".chip 68040\n\t"
402 				    "cpushp %%dc,(%0)\n\t"
403 				    ".chip 68k"
404 				    : : "a" (paddr));
405 	      break;
406 	    case FLUSH_CACHE_INSN:
407 	      __asm__ __volatile__ ("nop\n\t"
408 				    ".chip 68040\n\t"
409 				    "cpushp %%ic,(%0)\n\t"
410 				    ".chip 68k"
411 				    : : "a" (paddr));
412 	      break;
413 	    default:
414 	    case FLUSH_CACHE_BOTH:
415 	      __asm__ __volatile__ ("nop\n\t"
416 				    ".chip 68040\n\t"
417 				    "cpushp %%bc,(%0)\n\t"
418 				    ".chip 68k"
419 				    : : "a" (paddr));
420 	      break;
421 	    }
422 	}
423       break;
424     }
425   return 0;
426 }
427 
428 #define virt_to_phys_060(vaddr)				\
429 ({							\
430   unsigned long paddr;					\
431   __asm__ __volatile__ (".chip 68060\n\t"		\
432 			"plpar (%0)\n\t"		\
433 			".chip 68k"			\
434 			: "=a" (paddr)			\
435 			: "0" (vaddr));			\
436   (paddr); /* XXX */					\
437 })
438 
439 static inline int
440 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
441 {
442   unsigned long paddr, i;
443 
444   /*
445    * 68060 manual says:
446    *  cpush %dc : flush DC, remains valid (with our %cacr setup)
447    *  cpush %ic : invalidate IC
448    *  cpush %bc : flush DC + invalidate IC
449    */
450   switch (scope)
451     {
452     case FLUSH_SCOPE_ALL:
453       switch (cache)
454 	{
455 	case FLUSH_CACHE_DATA:
456 	  __asm__ __volatile__ (".chip 68060\n\t"
457 				"cpusha %dc\n\t"
458 				".chip 68k");
459 	  break;
460 	case FLUSH_CACHE_INSN:
461 	  __asm__ __volatile__ (".chip 68060\n\t"
462 				"cpusha %ic\n\t"
463 				".chip 68k");
464 	  break;
465 	default:
466 	case FLUSH_CACHE_BOTH:
467 	  __asm__ __volatile__ (".chip 68060\n\t"
468 				"cpusha %bc\n\t"
469 				".chip 68k");
470 	  break;
471 	}
472       break;
473 
474     case FLUSH_SCOPE_LINE:
475       /* Find the physical address of the first mapped page in the
476 	 address range.  */
477       len += addr & 15;
478       addr &= -16;
479       if (!(paddr = virt_to_phys_060(addr))) {
480 	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
481 
482 	if (len <= tmp)
483 	  return 0;
484 	addr += tmp;
485 	len -= tmp;
486 	tmp = PAGE_SIZE;
487 	for (;;)
488 	  {
489 	    if ((paddr = virt_to_phys_060(addr)))
490 	      break;
491 	    if (len <= tmp)
492 	      return 0;
493 	    addr += tmp;
494 	    len -= tmp;
495 	  }
496       }
497       len = (len + 15) >> 4;
498       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
499       while (len--)
500 	{
501 	  switch (cache)
502 	    {
503 	    case FLUSH_CACHE_DATA:
504 	      __asm__ __volatile__ (".chip 68060\n\t"
505 				    "cpushl %%dc,(%0)\n\t"
506 				    ".chip 68k"
507 				    : : "a" (paddr));
508 	      break;
509 	    case FLUSH_CACHE_INSN:
510 	      __asm__ __volatile__ (".chip 68060\n\t"
511 				    "cpushl %%ic,(%0)\n\t"
512 				    ".chip 68k"
513 				    : : "a" (paddr));
514 	      break;
515 	    default:
516 	    case FLUSH_CACHE_BOTH:
517 	      __asm__ __volatile__ (".chip 68060\n\t"
518 				    "cpushl %%bc,(%0)\n\t"
519 				    ".chip 68k"
520 				    : : "a" (paddr));
521 	      break;
522 	    }
523 	  if (!--i && len)
524 	    {
525 
526 	      /*
527 	       * We just want to jump to the first cache line
528 	       * in the next page.
529 	       */
530 	      addr += PAGE_SIZE;
531 	      addr &= PAGE_MASK;
532 
533 	      i = PAGE_SIZE / 16;
534 	      /* Recompute physical address when crossing a page
535 	         boundary. */
536 	      for (;;)
537 	        {
538 	          if ((paddr = virt_to_phys_060(addr)))
539 	            break;
540 	          if (len <= i)
541 	            return 0;
542 	          len -= i;
543 	          addr += PAGE_SIZE;
544 	        }
545 	    }
546 	  else
547 	    paddr += 16;
548 	}
549       break;
550 
551     default:
552     case FLUSH_SCOPE_PAGE:
553       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
554       addr &= PAGE_MASK;	/* Workaround for bug in some
555 				   revisions of the 68060 */
556       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
557 	{
558 	  if (!(paddr = virt_to_phys_060(addr)))
559 	    continue;
560 	  switch (cache)
561 	    {
562 	    case FLUSH_CACHE_DATA:
563 	      __asm__ __volatile__ (".chip 68060\n\t"
564 				    "cpushp %%dc,(%0)\n\t"
565 				    ".chip 68k"
566 				    : : "a" (paddr));
567 	      break;
568 	    case FLUSH_CACHE_INSN:
569 	      __asm__ __volatile__ (".chip 68060\n\t"
570 				    "cpushp %%ic,(%0)\n\t"
571 				    ".chip 68k"
572 				    : : "a" (paddr));
573 	      break;
574 	    default:
575 	    case FLUSH_CACHE_BOTH:
576 	      __asm__ __volatile__ (".chip 68060\n\t"
577 				    "cpushp %%bc,(%0)\n\t"
578 				    ".chip 68k"
579 				    : : "a" (paddr));
580 	      break;
581 	    }
582 	}
583       break;
584     }
585   return 0;
586 }
587 
588 /* sys_cacheflush -- flush (part of) the processor cache.  */
589 asmlinkage int
590 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
591 {
592 	struct vm_area_struct *vma;
593 	int ret = -EINVAL;
594 
595 	lock_kernel();
596 	if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
597 	    cache & ~FLUSH_CACHE_BOTH)
598 		goto out;
599 
600 	if (scope == FLUSH_SCOPE_ALL) {
601 		/* Only the superuser may explicitly flush the whole cache. */
602 		ret = -EPERM;
603 		if (!capable(CAP_SYS_ADMIN))
604 			goto out;
605 	} else {
606 		/*
607 		 * Verify that the specified address region actually belongs
608 		 * to this process.
609 		 */
610 		vma = find_vma (current->mm, addr);
611 		ret = -EINVAL;
612 		/* Check for overflow.  */
613 		if (addr + len < addr)
614 			goto out;
615 		if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
616 			goto out;
617 	}
618 
619 	if (CPU_IS_020_OR_030) {
620 		if (scope == FLUSH_SCOPE_LINE && len < 256) {
621 			unsigned long cacr;
622 			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
623 			if (cache & FLUSH_CACHE_INSN)
624 				cacr |= 4;
625 			if (cache & FLUSH_CACHE_DATA)
626 				cacr |= 0x400;
627 			len >>= 2;
628 			while (len--) {
629 				__asm__ __volatile__ ("movec %1, %%caar\n\t"
630 						      "movec %0, %%cacr"
631 						      : /* no outputs */
632 						      : "r" (cacr), "r" (addr));
633 				addr += 4;
634 			}
635 		} else {
636 			/* Flush the whole cache, even if page granularity requested. */
637 			unsigned long cacr;
638 			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
639 			if (cache & FLUSH_CACHE_INSN)
640 				cacr |= 8;
641 			if (cache & FLUSH_CACHE_DATA)
642 				cacr |= 0x800;
643 			__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
644 		}
645 		ret = 0;
646 		goto out;
647 	} else {
648 	    /*
649 	     * 040 or 060: don't blindly trust 'scope', someone could
650 	     * try to flush a few megs of memory.
651 	     */
652 
653 	    if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
654 	        scope=FLUSH_SCOPE_PAGE;
655 	    if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
656 	        scope=FLUSH_SCOPE_ALL;
657 	    if (CPU_IS_040) {
658 		ret = cache_flush_040 (addr, scope, cache, len);
659 	    } else if (CPU_IS_060) {
660 		ret = cache_flush_060 (addr, scope, cache, len);
661 	    }
662 	}
663 out:
664 	unlock_kernel();
665 	return ret;
666 }
667 
668 asmlinkage int sys_getpagesize(void)
669 {
670 	return PAGE_SIZE;
671 }
672