xref: /openbmc/linux/arch/sh/kernel/sys_sh.c (revision 87c2ce3b)
1 /*
2  * linux/arch/sh/kernel/sys_sh.c
3  *
4  * This file contains various random system calls that
5  * have a non-standard calling sequence on the Linux/SuperH
6  * platform.
7  *
8  * Taken from i386 version.
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/utsname.h>
24 
25 #include <asm/uaccess.h>
26 #include <asm/ipc.h>
27 
28 /*
29  * sys_pipe() is the normal C calling standard for creating
30  * a pipe. It's not the way Unix traditionally does this, though.
31  */
32 asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
33 	unsigned long r6, unsigned long r7,
34 	struct pt_regs regs)
35 {
36 	int fd[2];
37 	int error;
38 
39 	error = do_pipe(fd);
40 	if (!error) {
41 		regs.regs[1] = fd[1];
42 		return fd[0];
43 	}
44 	return error;
45 }
46 
47 #if defined(HAVE_ARCH_UNMAPPED_AREA)
48 /*
49  * To avoid cache alias, we map the shard page with same color.
50  */
51 #define COLOUR_ALIGN(addr)	(((addr)+SHMLBA-1)&~(SHMLBA-1))
52 
53 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
54 	unsigned long len, unsigned long pgoff, unsigned long flags)
55 {
56 	struct mm_struct *mm = current->mm;
57 	struct vm_area_struct *vma;
58 	unsigned long start_addr;
59 
60 	if (flags & MAP_FIXED) {
61 		/* We do not accept a shared mapping if it would violate
62 		 * cache aliasing constraints.
63 		 */
64 		if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
65 			return -EINVAL;
66 		return addr;
67 	}
68 
69 	if (len > TASK_SIZE)
70 		return -ENOMEM;
71 
72 	if (addr) {
73 		if (flags & MAP_PRIVATE)
74 			addr = PAGE_ALIGN(addr);
75 		else
76 			addr = COLOUR_ALIGN(addr);
77 		vma = find_vma(mm, addr);
78 		if (TASK_SIZE - len >= addr &&
79 		    (!vma || addr + len <= vma->vm_start))
80 			return addr;
81 	}
82 	if (len <= mm->cached_hole_size) {
83 	        mm->cached_hole_size = 0;
84 		mm->free_area_cache = TASK_UNMAPPED_BASE;
85 	}
86 	if (flags & MAP_PRIVATE)
87 		addr = PAGE_ALIGN(mm->free_area_cache);
88 	else
89 		addr = COLOUR_ALIGN(mm->free_area_cache);
90 	start_addr = addr;
91 
92 full_search:
93 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
94 		/* At this point:  (!vma || addr < vma->vm_end). */
95 		if (TASK_SIZE - len < addr) {
96 			/*
97 			 * Start a new search - just in case we missed
98 			 * some holes.
99 			 */
100 			if (start_addr != TASK_UNMAPPED_BASE) {
101 				start_addr = addr = TASK_UNMAPPED_BASE;
102 				mm->cached_hole_size = 0;
103 				goto full_search;
104 			}
105 			return -ENOMEM;
106 		}
107 		if (!vma || addr + len <= vma->vm_start) {
108 			/*
109 			 * Remember the place where we stopped the search:
110 			 */
111 			mm->free_area_cache = addr + len;
112 			return addr;
113 		}
114 		if (addr + mm->cached_hole_size < vma->vm_start)
115 		        mm->cached_hole_size = vma->vm_start - addr;
116 
117 		addr = vma->vm_end;
118 		if (!(flags & MAP_PRIVATE))
119 			addr = COLOUR_ALIGN(addr);
120 	}
121 }
122 #endif
123 
124 static inline long
125 do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
126 	 unsigned long flags, int fd, unsigned long pgoff)
127 {
128 	int error = -EBADF;
129 	struct file *file = NULL;
130 
131 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
132 	if (!(flags & MAP_ANONYMOUS)) {
133 		file = fget(fd);
134 		if (!file)
135 			goto out;
136 	}
137 
138 	down_write(&current->mm->mmap_sem);
139 	error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
140 	up_write(&current->mm->mmap_sem);
141 
142 	if (file)
143 		fput(file);
144 out:
145 	return error;
146 }
147 
148 asmlinkage int old_mmap(unsigned long addr, unsigned long len,
149 	unsigned long prot, unsigned long flags,
150 	int fd, unsigned long off)
151 {
152 	if (off & ~PAGE_MASK)
153 		return -EINVAL;
154 	return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
155 }
156 
157 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
158 	unsigned long prot, unsigned long flags,
159 	unsigned long fd, unsigned long pgoff)
160 {
161 	return do_mmap2(addr, len, prot, flags, fd, pgoff);
162 }
163 
164 /*
165  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
166  *
167  * This is really horribly ugly.
168  */
169 asmlinkage int sys_ipc(uint call, int first, int second,
170 		       int third, void __user *ptr, long fifth)
171 {
172 	int version, ret;
173 
174 	version = call >> 16; /* hack for backward compatibility */
175 	call &= 0xffff;
176 
177 	if (call <= SEMCTL)
178 		switch (call) {
179 		case SEMOP:
180 			return sys_semtimedop(first, (struct sembuf __user *)ptr,
181 					      second, NULL);
182 		case SEMTIMEDOP:
183 			return sys_semtimedop(first, (struct sembuf __user *)ptr,
184 					      second,
185 					      (const struct timespec __user *)fifth);
186 		case SEMGET:
187 			return sys_semget (first, second, third);
188 		case SEMCTL: {
189 			union semun fourth;
190 			if (!ptr)
191 				return -EINVAL;
192 			if (get_user(fourth.__pad, (void * __user *) ptr))
193 				return -EFAULT;
194 			return sys_semctl (first, second, third, fourth);
195 			}
196 		default:
197 			return -EINVAL;
198 		}
199 
200 	if (call <= MSGCTL)
201 		switch (call) {
202 		case MSGSND:
203 			return sys_msgsnd (first, (struct msgbuf __user *) ptr,
204 					  second, third);
205 		case MSGRCV:
206 			switch (version) {
207 			case 0: {
208 				struct ipc_kludge tmp;
209 				if (!ptr)
210 					return -EINVAL;
211 
212 				if (copy_from_user(&tmp,
213 						   (struct ipc_kludge __user *) ptr,
214 						   sizeof (tmp)))
215 					return -EFAULT;
216 				return sys_msgrcv (first, tmp.msgp, second,
217 						   tmp.msgtyp, third);
218 				}
219 			default:
220 				return sys_msgrcv (first,
221 						   (struct msgbuf __user *) ptr,
222 						   second, fifth, third);
223 			}
224 		case MSGGET:
225 			return sys_msgget ((key_t) first, second);
226 		case MSGCTL:
227 			return sys_msgctl (first, second,
228 					   (struct msqid_ds __user *) ptr);
229 		default:
230 			return -EINVAL;
231 		}
232 	if (call <= SHMCTL)
233 		switch (call) {
234 		case SHMAT:
235 			switch (version) {
236 			default: {
237 				ulong raddr;
238 				ret = do_shmat (first, (char __user *) ptr,
239 						 second, &raddr);
240 				if (ret)
241 					return ret;
242 				return put_user (raddr, (ulong __user *) third);
243 			}
244 			case 1:	/* iBCS2 emulator entry point */
245 				if (!segment_eq(get_fs(), get_ds()))
246 					return -EINVAL;
247 				return do_shmat (first, (char __user *) ptr,
248 						  second, (ulong *) third);
249 			}
250 		case SHMDT:
251 			return sys_shmdt ((char __user *)ptr);
252 		case SHMGET:
253 			return sys_shmget (first, second, third);
254 		case SHMCTL:
255 			return sys_shmctl (first, second,
256 					   (struct shmid_ds __user *) ptr);
257 		default:
258 			return -EINVAL;
259 		}
260 
261 	return -EINVAL;
262 }
263 
264 asmlinkage int sys_uname(struct old_utsname * name)
265 {
266 	int err;
267 	if (!name)
268 		return -EFAULT;
269 	down_read(&uts_sem);
270 	err=copy_to_user(name, &system_utsname, sizeof (*name));
271 	up_read(&uts_sem);
272 	return err?-EFAULT:0;
273 }
274 
275 asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
276 			     size_t count, long dummy, loff_t pos)
277 {
278 	return sys_pread64(fd, buf, count, pos);
279 }
280 
281 asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
282 			      size_t count, long dummy, loff_t pos)
283 {
284 	return sys_pwrite64(fd, buf, count, pos);
285 }
286 
287 asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
288 				u32 len0, u32 len1, int advice)
289 {
290 #ifdef  __LITTLE_ENDIAN__
291 	return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
292 				(u64)len1 << 32 | len0,	advice);
293 #else
294 	return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
295 				(u64)len0 << 32 | len1,	advice);
296 #endif
297 }
298