xref: /openbmc/linux/arch/m68k/kernel/sys_m68k.c (revision f8b72560)
1 /*
2  * linux/arch/m68k/kernel/sys_m68k.c
3  *
4  * This file contains various random system calls that
5  * have a non-standard calling sequence on the Linux/m68k
6  * platform.
7  */
8 
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/fs.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/ipc.h>
24 
25 #include <asm/setup.h>
26 #include <asm/uaccess.h>
27 #include <asm/cachectl.h>
28 #include <asm/traps.h>
29 #include <asm/page.h>
30 #include <asm/unistd.h>
31 
32 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
33 	unsigned long prot, unsigned long flags,
34 	unsigned long fd, unsigned long pgoff)
35 {
36 	/*
37 	 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
38 	 * so we need to shift the argument down by 1; m68k mmap64(3)
39 	 * (in libc) expects the last argument of mmap2 in 4Kb units.
40 	 */
41 	return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
42 }
43 
44 /*
45  * Perform the select(nd, in, out, ex, tv) and mmap() system
46  * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
47  * handle more than 4 system call parameters, so these system calls
48  * used a memory block for parameter passing..
49  */
50 
51 struct mmap_arg_struct {
52 	unsigned long addr;
53 	unsigned long len;
54 	unsigned long prot;
55 	unsigned long flags;
56 	unsigned long fd;
57 	unsigned long offset;
58 };
59 
60 asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
61 {
62 	struct mmap_arg_struct a;
63 	int error = -EFAULT;
64 
65 	if (copy_from_user(&a, arg, sizeof(a)))
66 		goto out;
67 
68 	error = -EINVAL;
69 	if (a.offset & ~PAGE_MASK)
70 		goto out;
71 
72 	error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
73 			       a.offset >> PAGE_SHIFT);
74 out:
75 	return error;
76 }
77 
78 struct sel_arg_struct {
79 	unsigned long n;
80 	fd_set __user *inp, *outp, *exp;
81 	struct timeval __user *tvp;
82 };
83 
84 asmlinkage int old_select(struct sel_arg_struct __user *arg)
85 {
86 	struct sel_arg_struct a;
87 
88 	if (copy_from_user(&a, arg, sizeof(a)))
89 		return -EFAULT;
90 	/* sys_select() does the appropriate kernel locking */
91 	return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
92 }
93 
94 /*
95  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
96  *
97  * This is really horribly ugly.
98  */
99 asmlinkage int sys_ipc (uint call, int first, int second,
100 			int third, void __user *ptr, long fifth)
101 {
102 	int version, ret;
103 
104 	version = call >> 16; /* hack for backward compatibility */
105 	call &= 0xffff;
106 
107 	if (call <= SEMCTL)
108 		switch (call) {
109 		case SEMOP:
110 			return sys_semop (first, ptr, second);
111 		case SEMGET:
112 			return sys_semget (first, second, third);
113 		case SEMCTL: {
114 			union semun fourth;
115 			if (!ptr)
116 				return -EINVAL;
117 			if (get_user(fourth.__pad, (void __user *__user *) ptr))
118 				return -EFAULT;
119 			return sys_semctl (first, second, third, fourth);
120 			}
121 		default:
122 			return -ENOSYS;
123 		}
124 	if (call <= MSGCTL)
125 		switch (call) {
126 		case MSGSND:
127 			return sys_msgsnd (first, ptr, second, third);
128 		case MSGRCV:
129 			switch (version) {
130 			case 0: {
131 				struct ipc_kludge tmp;
132 				if (!ptr)
133 					return -EINVAL;
134 				if (copy_from_user (&tmp, ptr, sizeof (tmp)))
135 					return -EFAULT;
136 				return sys_msgrcv (first, tmp.msgp, second,
137 						   tmp.msgtyp, third);
138 				}
139 			default:
140 				return sys_msgrcv (first, ptr,
141 						   second, fifth, third);
142 			}
143 		case MSGGET:
144 			return sys_msgget ((key_t) first, second);
145 		case MSGCTL:
146 			return sys_msgctl (first, second, ptr);
147 		default:
148 			return -ENOSYS;
149 		}
150 	if (call <= SHMCTL)
151 		switch (call) {
152 		case SHMAT:
153 			switch (version) {
154 			default: {
155 				ulong raddr;
156 				ret = do_shmat (first, ptr, second, &raddr);
157 				if (ret)
158 					return ret;
159 				return put_user (raddr, (ulong __user *) third);
160 			}
161 			}
162 		case SHMDT:
163 			return sys_shmdt (ptr);
164 		case SHMGET:
165 			return sys_shmget (first, second, third);
166 		case SHMCTL:
167 			return sys_shmctl (first, second, ptr);
168 		default:
169 			return -ENOSYS;
170 		}
171 
172 	return -EINVAL;
173 }
174 
175 /* Convert virtual (user) address VADDR to physical address PADDR */
176 #define virt_to_phys_040(vaddr)						\
177 ({									\
178   unsigned long _mmusr, _paddr;						\
179 									\
180   __asm__ __volatile__ (".chip 68040\n\t"				\
181 			"ptestr (%1)\n\t"				\
182 			"movec %%mmusr,%0\n\t"				\
183 			".chip 68k"					\
184 			: "=r" (_mmusr)					\
185 			: "a" (vaddr));					\
186   _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;		\
187   _paddr;								\
188 })
189 
190 static inline int
191 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
192 {
193   unsigned long paddr, i;
194 
195   switch (scope)
196     {
197     case FLUSH_SCOPE_ALL:
198       switch (cache)
199 	{
200 	case FLUSH_CACHE_DATA:
201 	  /* This nop is needed for some broken versions of the 68040.  */
202 	  __asm__ __volatile__ ("nop\n\t"
203 				".chip 68040\n\t"
204 				"cpusha %dc\n\t"
205 				".chip 68k");
206 	  break;
207 	case FLUSH_CACHE_INSN:
208 	  __asm__ __volatile__ ("nop\n\t"
209 				".chip 68040\n\t"
210 				"cpusha %ic\n\t"
211 				".chip 68k");
212 	  break;
213 	default:
214 	case FLUSH_CACHE_BOTH:
215 	  __asm__ __volatile__ ("nop\n\t"
216 				".chip 68040\n\t"
217 				"cpusha %bc\n\t"
218 				".chip 68k");
219 	  break;
220 	}
221       break;
222 
223     case FLUSH_SCOPE_LINE:
224       /* Find the physical address of the first mapped page in the
225 	 address range.  */
226       if ((paddr = virt_to_phys_040(addr))) {
227         paddr += addr & ~(PAGE_MASK | 15);
228         len = (len + (addr & 15) + 15) >> 4;
229       } else {
230 	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
231 
232 	if (len <= tmp)
233 	  return 0;
234 	addr += tmp;
235 	len -= tmp;
236 	tmp = PAGE_SIZE;
237 	for (;;)
238 	  {
239 	    if ((paddr = virt_to_phys_040(addr)))
240 	      break;
241 	    if (len <= tmp)
242 	      return 0;
243 	    addr += tmp;
244 	    len -= tmp;
245 	  }
246 	len = (len + 15) >> 4;
247       }
248       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
249       while (len--)
250 	{
251 	  switch (cache)
252 	    {
253 	    case FLUSH_CACHE_DATA:
254 	      __asm__ __volatile__ ("nop\n\t"
255 				    ".chip 68040\n\t"
256 				    "cpushl %%dc,(%0)\n\t"
257 				    ".chip 68k"
258 				    : : "a" (paddr));
259 	      break;
260 	    case FLUSH_CACHE_INSN:
261 	      __asm__ __volatile__ ("nop\n\t"
262 				    ".chip 68040\n\t"
263 				    "cpushl %%ic,(%0)\n\t"
264 				    ".chip 68k"
265 				    : : "a" (paddr));
266 	      break;
267 	    default:
268 	    case FLUSH_CACHE_BOTH:
269 	      __asm__ __volatile__ ("nop\n\t"
270 				    ".chip 68040\n\t"
271 				    "cpushl %%bc,(%0)\n\t"
272 				    ".chip 68k"
273 				    : : "a" (paddr));
274 	      break;
275 	    }
276 	  if (!--i && len)
277 	    {
278 	      /*
279 	       * No need to page align here since it is done by
280 	       * virt_to_phys_040().
281 	       */
282 	      addr += PAGE_SIZE;
283 	      i = PAGE_SIZE / 16;
284 	      /* Recompute physical address when crossing a page
285 	         boundary. */
286 	      for (;;)
287 		{
288 		  if ((paddr = virt_to_phys_040(addr)))
289 		    break;
290 		  if (len <= i)
291 		    return 0;
292 		  len -= i;
293 		  addr += PAGE_SIZE;
294 		}
295 	    }
296 	  else
297 	    paddr += 16;
298 	}
299       break;
300 
301     default:
302     case FLUSH_SCOPE_PAGE:
303       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
304       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
305 	{
306 	  if (!(paddr = virt_to_phys_040(addr)))
307 	    continue;
308 	  switch (cache)
309 	    {
310 	    case FLUSH_CACHE_DATA:
311 	      __asm__ __volatile__ ("nop\n\t"
312 				    ".chip 68040\n\t"
313 				    "cpushp %%dc,(%0)\n\t"
314 				    ".chip 68k"
315 				    : : "a" (paddr));
316 	      break;
317 	    case FLUSH_CACHE_INSN:
318 	      __asm__ __volatile__ ("nop\n\t"
319 				    ".chip 68040\n\t"
320 				    "cpushp %%ic,(%0)\n\t"
321 				    ".chip 68k"
322 				    : : "a" (paddr));
323 	      break;
324 	    default:
325 	    case FLUSH_CACHE_BOTH:
326 	      __asm__ __volatile__ ("nop\n\t"
327 				    ".chip 68040\n\t"
328 				    "cpushp %%bc,(%0)\n\t"
329 				    ".chip 68k"
330 				    : : "a" (paddr));
331 	      break;
332 	    }
333 	}
334       break;
335     }
336   return 0;
337 }
338 
339 #define virt_to_phys_060(vaddr)				\
340 ({							\
341   unsigned long paddr;					\
342   __asm__ __volatile__ (".chip 68060\n\t"		\
343 			"plpar (%0)\n\t"		\
344 			".chip 68k"			\
345 			: "=a" (paddr)			\
346 			: "0" (vaddr));			\
347   (paddr); /* XXX */					\
348 })
349 
350 static inline int
351 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
352 {
353   unsigned long paddr, i;
354 
355   /*
356    * 68060 manual says:
357    *  cpush %dc : flush DC, remains valid (with our %cacr setup)
358    *  cpush %ic : invalidate IC
359    *  cpush %bc : flush DC + invalidate IC
360    */
361   switch (scope)
362     {
363     case FLUSH_SCOPE_ALL:
364       switch (cache)
365 	{
366 	case FLUSH_CACHE_DATA:
367 	  __asm__ __volatile__ (".chip 68060\n\t"
368 				"cpusha %dc\n\t"
369 				".chip 68k");
370 	  break;
371 	case FLUSH_CACHE_INSN:
372 	  __asm__ __volatile__ (".chip 68060\n\t"
373 				"cpusha %ic\n\t"
374 				".chip 68k");
375 	  break;
376 	default:
377 	case FLUSH_CACHE_BOTH:
378 	  __asm__ __volatile__ (".chip 68060\n\t"
379 				"cpusha %bc\n\t"
380 				".chip 68k");
381 	  break;
382 	}
383       break;
384 
385     case FLUSH_SCOPE_LINE:
386       /* Find the physical address of the first mapped page in the
387 	 address range.  */
388       len += addr & 15;
389       addr &= -16;
390       if (!(paddr = virt_to_phys_060(addr))) {
391 	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
392 
393 	if (len <= tmp)
394 	  return 0;
395 	addr += tmp;
396 	len -= tmp;
397 	tmp = PAGE_SIZE;
398 	for (;;)
399 	  {
400 	    if ((paddr = virt_to_phys_060(addr)))
401 	      break;
402 	    if (len <= tmp)
403 	      return 0;
404 	    addr += tmp;
405 	    len -= tmp;
406 	  }
407       }
408       len = (len + 15) >> 4;
409       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
410       while (len--)
411 	{
412 	  switch (cache)
413 	    {
414 	    case FLUSH_CACHE_DATA:
415 	      __asm__ __volatile__ (".chip 68060\n\t"
416 				    "cpushl %%dc,(%0)\n\t"
417 				    ".chip 68k"
418 				    : : "a" (paddr));
419 	      break;
420 	    case FLUSH_CACHE_INSN:
421 	      __asm__ __volatile__ (".chip 68060\n\t"
422 				    "cpushl %%ic,(%0)\n\t"
423 				    ".chip 68k"
424 				    : : "a" (paddr));
425 	      break;
426 	    default:
427 	    case FLUSH_CACHE_BOTH:
428 	      __asm__ __volatile__ (".chip 68060\n\t"
429 				    "cpushl %%bc,(%0)\n\t"
430 				    ".chip 68k"
431 				    : : "a" (paddr));
432 	      break;
433 	    }
434 	  if (!--i && len)
435 	    {
436 
437 	      /*
438 	       * We just want to jump to the first cache line
439 	       * in the next page.
440 	       */
441 	      addr += PAGE_SIZE;
442 	      addr &= PAGE_MASK;
443 
444 	      i = PAGE_SIZE / 16;
445 	      /* Recompute physical address when crossing a page
446 	         boundary. */
447 	      for (;;)
448 	        {
449 	          if ((paddr = virt_to_phys_060(addr)))
450 	            break;
451 	          if (len <= i)
452 	            return 0;
453 	          len -= i;
454 	          addr += PAGE_SIZE;
455 	        }
456 	    }
457 	  else
458 	    paddr += 16;
459 	}
460       break;
461 
462     default:
463     case FLUSH_SCOPE_PAGE:
464       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
465       addr &= PAGE_MASK;	/* Workaround for bug in some
466 				   revisions of the 68060 */
467       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
468 	{
469 	  if (!(paddr = virt_to_phys_060(addr)))
470 	    continue;
471 	  switch (cache)
472 	    {
473 	    case FLUSH_CACHE_DATA:
474 	      __asm__ __volatile__ (".chip 68060\n\t"
475 				    "cpushp %%dc,(%0)\n\t"
476 				    ".chip 68k"
477 				    : : "a" (paddr));
478 	      break;
479 	    case FLUSH_CACHE_INSN:
480 	      __asm__ __volatile__ (".chip 68060\n\t"
481 				    "cpushp %%ic,(%0)\n\t"
482 				    ".chip 68k"
483 				    : : "a" (paddr));
484 	      break;
485 	    default:
486 	    case FLUSH_CACHE_BOTH:
487 	      __asm__ __volatile__ (".chip 68060\n\t"
488 				    "cpushp %%bc,(%0)\n\t"
489 				    ".chip 68k"
490 				    : : "a" (paddr));
491 	      break;
492 	    }
493 	}
494       break;
495     }
496   return 0;
497 }
498 
499 /* sys_cacheflush -- flush (part of) the processor cache.  */
500 asmlinkage int
501 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
502 {
503 	struct vm_area_struct *vma;
504 	int ret = -EINVAL;
505 
506 	lock_kernel();
507 	if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
508 	    cache & ~FLUSH_CACHE_BOTH)
509 		goto out;
510 
511 	if (scope == FLUSH_SCOPE_ALL) {
512 		/* Only the superuser may explicitly flush the whole cache. */
513 		ret = -EPERM;
514 		if (!capable(CAP_SYS_ADMIN))
515 			goto out;
516 	} else {
517 		/*
518 		 * Verify that the specified address region actually belongs
519 		 * to this process.
520 		 */
521 		vma = find_vma (current->mm, addr);
522 		ret = -EINVAL;
523 		/* Check for overflow.  */
524 		if (addr + len < addr)
525 			goto out;
526 		if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
527 			goto out;
528 	}
529 
530 	if (CPU_IS_020_OR_030) {
531 		if (scope == FLUSH_SCOPE_LINE && len < 256) {
532 			unsigned long cacr;
533 			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
534 			if (cache & FLUSH_CACHE_INSN)
535 				cacr |= 4;
536 			if (cache & FLUSH_CACHE_DATA)
537 				cacr |= 0x400;
538 			len >>= 2;
539 			while (len--) {
540 				__asm__ __volatile__ ("movec %1, %%caar\n\t"
541 						      "movec %0, %%cacr"
542 						      : /* no outputs */
543 						      : "r" (cacr), "r" (addr));
544 				addr += 4;
545 			}
546 		} else {
547 			/* Flush the whole cache, even if page granularity requested. */
548 			unsigned long cacr;
549 			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
550 			if (cache & FLUSH_CACHE_INSN)
551 				cacr |= 8;
552 			if (cache & FLUSH_CACHE_DATA)
553 				cacr |= 0x800;
554 			__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
555 		}
556 		ret = 0;
557 		goto out;
558 	} else {
559 	    /*
560 	     * 040 or 060: don't blindly trust 'scope', someone could
561 	     * try to flush a few megs of memory.
562 	     */
563 
564 	    if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
565 	        scope=FLUSH_SCOPE_PAGE;
566 	    if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
567 	        scope=FLUSH_SCOPE_ALL;
568 	    if (CPU_IS_040) {
569 		ret = cache_flush_040 (addr, scope, cache, len);
570 	    } else if (CPU_IS_060) {
571 		ret = cache_flush_060 (addr, scope, cache, len);
572 	    }
573 	}
574 out:
575 	unlock_kernel();
576 	return ret;
577 }
578 
579 asmlinkage int sys_getpagesize(void)
580 {
581 	return PAGE_SIZE;
582 }
583 
584 /*
585  * Do a system call from kernel instead of calling sys_execve so we
586  * end up with proper pt_regs.
587  */
588 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
589 {
590 	register long __res asm ("%d0") = __NR_execve;
591 	register long __a asm ("%d1") = (long)(filename);
592 	register long __b asm ("%d2") = (long)(argv);
593 	register long __c asm ("%d3") = (long)(envp);
594 	asm volatile ("trap  #0" : "+d" (__res)
595 			: "d" (__a), "d" (__b), "d" (__c));
596 	return __res;
597 }
598