1 /* 2 * linux/arch/sh/kernel/sys_sh.c 3 * 4 * This file contains various random system calls that 5 * have a non-standard calling sequence on the Linux/SuperH 6 * platform. 7 * 8 * Taken from i386 version. 9 */ 10 #include <linux/errno.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/smp.h> 14 #include <linux/sem.h> 15 #include <linux/msg.h> 16 #include <linux/shm.h> 17 #include <linux/stat.h> 18 #include <linux/syscalls.h> 19 #include <linux/mman.h> 20 #include <linux/file.h> 21 #include <linux/utsname.h> 22 #include <linux/module.h> 23 #include <linux/fs.h> 24 #include <linux/ipc.h> 25 #include <asm/cacheflush.h> 26 #include <asm/uaccess.h> 27 #include <asm/unistd.h> 28 29 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ 30 EXPORT_SYMBOL(shm_align_mask); 31 32 #ifdef CONFIG_MMU 33 /* 34 * To avoid cache aliases, we map the shared page with same color. 35 */ 36 #define COLOUR_ALIGN(addr, pgoff) \ 37 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 38 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 39 40 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 41 unsigned long len, unsigned long pgoff, unsigned long flags) 42 { 43 struct mm_struct *mm = current->mm; 44 struct vm_area_struct *vma; 45 unsigned long start_addr; 46 int do_colour_align; 47 48 if (flags & MAP_FIXED) { 49 /* We do not accept a shared mapping if it would violate 50 * cache aliasing constraints. 51 */ 52 if ((flags & MAP_SHARED) && (addr & shm_align_mask)) 53 return -EINVAL; 54 return addr; 55 } 56 57 if (unlikely(len > TASK_SIZE)) 58 return -ENOMEM; 59 60 do_colour_align = 0; 61 if (filp || (flags & MAP_SHARED)) 62 do_colour_align = 1; 63 64 if (addr) { 65 if (do_colour_align) 66 addr = COLOUR_ALIGN(addr, pgoff); 67 else 68 addr = PAGE_ALIGN(addr); 69 70 vma = find_vma(mm, addr); 71 if (TASK_SIZE - len >= addr && 72 (!vma || addr + len <= vma->vm_start)) 73 return addr; 74 } 75 76 if (len > mm->cached_hole_size) { 77 start_addr = addr = mm->free_area_cache; 78 } else { 79 mm->cached_hole_size = 0; 80 start_addr = addr = TASK_UNMAPPED_BASE; 81 } 82 83 full_search: 84 if (do_colour_align) 85 addr = COLOUR_ALIGN(addr, pgoff); 86 else 87 addr = PAGE_ALIGN(mm->free_area_cache); 88 89 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 90 /* At this point: (!vma || addr < vma->vm_end). */ 91 if (unlikely(TASK_SIZE - len < addr)) { 92 /* 93 * Start a new search - just in case we missed 94 * some holes. 95 */ 96 if (start_addr != TASK_UNMAPPED_BASE) { 97 start_addr = addr = TASK_UNMAPPED_BASE; 98 mm->cached_hole_size = 0; 99 goto full_search; 100 } 101 return -ENOMEM; 102 } 103 if (likely(!vma || addr + len <= vma->vm_start)) { 104 /* 105 * Remember the place where we stopped the search: 106 */ 107 mm->free_area_cache = addr + len; 108 return addr; 109 } 110 if (addr + mm->cached_hole_size < vma->vm_start) 111 mm->cached_hole_size = vma->vm_start - addr; 112 113 addr = vma->vm_end; 114 if (do_colour_align) 115 addr = COLOUR_ALIGN(addr, pgoff); 116 } 117 } 118 #endif /* CONFIG_MMU */ 119 120 static inline long 121 do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 122 unsigned long flags, int fd, unsigned long pgoff) 123 { 124 int error = -EBADF; 125 struct file *file = NULL; 126 127 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 128 if (!(flags & MAP_ANONYMOUS)) { 129 file = fget(fd); 130 if (!file) 131 goto out; 132 } 133 134 down_write(¤t->mm->mmap_sem); 135 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); 136 up_write(¤t->mm->mmap_sem); 137 138 if (file) 139 fput(file); 140 out: 141 return error; 142 } 143 144 asmlinkage int old_mmap(unsigned long addr, unsigned long len, 145 unsigned long prot, unsigned long flags, 146 int fd, unsigned long off) 147 { 148 if (off & ~PAGE_MASK) 149 return -EINVAL; 150 return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT); 151 } 152 153 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 154 unsigned long prot, unsigned long flags, 155 unsigned long fd, unsigned long pgoff) 156 { 157 return do_mmap2(addr, len, prot, flags, fd, pgoff); 158 } 159 160 /* 161 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 162 * 163 * This is really horribly ugly. 164 */ 165 asmlinkage int sys_ipc(uint call, int first, int second, 166 int third, void __user *ptr, long fifth) 167 { 168 int version, ret; 169 170 version = call >> 16; /* hack for backward compatibility */ 171 call &= 0xffff; 172 173 if (call <= SEMCTL) 174 switch (call) { 175 case SEMOP: 176 return sys_semtimedop(first, 177 (struct sembuf __user *)ptr, 178 second, NULL); 179 case SEMTIMEDOP: 180 return sys_semtimedop(first, 181 (struct sembuf __user *)ptr, second, 182 (const struct timespec __user *)fifth); 183 case SEMGET: 184 return sys_semget (first, second, third); 185 case SEMCTL: { 186 union semun fourth; 187 if (!ptr) 188 return -EINVAL; 189 if (get_user(fourth.__pad, (void * __user *) ptr)) 190 return -EFAULT; 191 return sys_semctl (first, second, third, fourth); 192 } 193 default: 194 return -EINVAL; 195 } 196 197 if (call <= MSGCTL) 198 switch (call) { 199 case MSGSND: 200 return sys_msgsnd (first, (struct msgbuf __user *) ptr, 201 second, third); 202 case MSGRCV: 203 switch (version) { 204 case 0: 205 { 206 struct ipc_kludge tmp; 207 208 if (!ptr) 209 return -EINVAL; 210 211 if (copy_from_user(&tmp, 212 (struct ipc_kludge __user *) ptr, 213 sizeof (tmp))) 214 return -EFAULT; 215 216 return sys_msgrcv (first, tmp.msgp, second, 217 tmp.msgtyp, third); 218 } 219 default: 220 return sys_msgrcv (first, 221 (struct msgbuf __user *) ptr, 222 second, fifth, third); 223 } 224 case MSGGET: 225 return sys_msgget ((key_t) first, second); 226 case MSGCTL: 227 return sys_msgctl (first, second, 228 (struct msqid_ds __user *) ptr); 229 default: 230 return -EINVAL; 231 } 232 if (call <= SHMCTL) 233 switch (call) { 234 case SHMAT: 235 switch (version) { 236 default: { 237 ulong raddr; 238 ret = do_shmat (first, (char __user *) ptr, 239 second, &raddr); 240 if (ret) 241 return ret; 242 return put_user (raddr, (ulong __user *) third); 243 } 244 case 1: /* iBCS2 emulator entry point */ 245 if (!segment_eq(get_fs(), get_ds())) 246 return -EINVAL; 247 return do_shmat (first, (char __user *) ptr, 248 second, (ulong *) third); 249 } 250 case SHMDT: 251 return sys_shmdt ((char __user *)ptr); 252 case SHMGET: 253 return sys_shmget (first, second, third); 254 case SHMCTL: 255 return sys_shmctl (first, second, 256 (struct shmid_ds __user *) ptr); 257 default: 258 return -EINVAL; 259 } 260 261 return -EINVAL; 262 } 263 264 asmlinkage int sys_uname(struct old_utsname * name) 265 { 266 int err; 267 if (!name) 268 return -EFAULT; 269 down_read(&uts_sem); 270 err = copy_to_user(name, utsname(), sizeof (*name)); 271 up_read(&uts_sem); 272 return err?-EFAULT:0; 273 } 274