1 2 /* 3 * PARISC specific syscalls 4 * 5 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org> 6 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> 7 * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org> 8 * Copyright (C) 1999-2014 Helge Deller <deller@gmx.de> 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 */ 25 26 #include <linux/uaccess.h> 27 #include <asm/elf.h> 28 #include <linux/file.h> 29 #include <linux/fs.h> 30 #include <linux/linkage.h> 31 #include <linux/mm.h> 32 #include <linux/mman.h> 33 #include <linux/sched/signal.h> 34 #include <linux/sched/mm.h> 35 #include <linux/shm.h> 36 #include <linux/syscalls.h> 37 #include <linux/utsname.h> 38 #include <linux/personality.h> 39 #include <linux/random.h> 40 41 /* we construct an artificial offset for the mapping based on the physical 42 * address of the kernel mapping variable */ 43 #define GET_LAST_MMAP(filp) \ 44 (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL) 45 #define SET_LAST_MMAP(filp, val) \ 46 { /* nothing */ } 47 48 static int get_offset(unsigned int last_mmap) 49 { 50 return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT; 51 } 52 53 static unsigned long shared_align_offset(unsigned int last_mmap, 54 unsigned long pgoff) 55 { 56 return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT; 57 } 58 59 static inline unsigned long COLOR_ALIGN(unsigned long addr, 60 unsigned int last_mmap, unsigned long pgoff) 61 { 62 unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1); 63 unsigned long off = (SHM_COLOUR-1) & 64 (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT); 65 66 return base + off; 67 } 68 69 /* 70 * Top of mmap area (just below the process stack). 71 */ 72 73 static unsigned long mmap_upper_limit(void) 74 { 75 unsigned long stack_base; 76 77 /* Limit stack size - see setup_arg_pages() in fs/exec.c */ 78 stack_base = rlimit_max(RLIMIT_STACK); 79 if (stack_base > STACK_SIZE_MAX) 80 stack_base = STACK_SIZE_MAX; 81 82 /* Add space for stack randomization. */ 83 stack_base += (STACK_RND_MASK << PAGE_SHIFT); 84 85 return PAGE_ALIGN(STACK_TOP - stack_base); 86 } 87 88 89 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 90 unsigned long len, unsigned long pgoff, unsigned long flags) 91 { 92 struct mm_struct *mm = current->mm; 93 struct vm_area_struct *vma, *prev; 94 unsigned long task_size = TASK_SIZE; 95 int do_color_align, last_mmap; 96 struct vm_unmapped_area_info info; 97 98 if (len > task_size) 99 return -ENOMEM; 100 101 do_color_align = 0; 102 if (filp || (flags & MAP_SHARED)) 103 do_color_align = 1; 104 last_mmap = GET_LAST_MMAP(filp); 105 106 if (flags & MAP_FIXED) { 107 if ((flags & MAP_SHARED) && last_mmap && 108 (addr - shared_align_offset(last_mmap, pgoff)) 109 & (SHM_COLOUR - 1)) 110 return -EINVAL; 111 goto found_addr; 112 } 113 114 if (addr) { 115 if (do_color_align && last_mmap) 116 addr = COLOR_ALIGN(addr, last_mmap, pgoff); 117 else 118 addr = PAGE_ALIGN(addr); 119 120 vma = find_vma_prev(mm, addr, &prev); 121 if (task_size - len >= addr && 122 (!vma || addr + len <= vm_start_gap(vma)) && 123 (!prev || addr >= vm_end_gap(prev))) 124 goto found_addr; 125 } 126 127 info.flags = 0; 128 info.length = len; 129 info.low_limit = mm->mmap_legacy_base; 130 info.high_limit = mmap_upper_limit(); 131 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; 132 info.align_offset = shared_align_offset(last_mmap, pgoff); 133 addr = vm_unmapped_area(&info); 134 135 found_addr: 136 if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) 137 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); 138 139 return addr; 140 } 141 142 unsigned long 143 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 144 const unsigned long len, const unsigned long pgoff, 145 const unsigned long flags) 146 { 147 struct vm_area_struct *vma, *prev; 148 struct mm_struct *mm = current->mm; 149 unsigned long addr = addr0; 150 int do_color_align, last_mmap; 151 struct vm_unmapped_area_info info; 152 153 #ifdef CONFIG_64BIT 154 /* This should only ever run for 32-bit processes. */ 155 BUG_ON(!test_thread_flag(TIF_32BIT)); 156 #endif 157 158 /* requested length too big for entire address space */ 159 if (len > TASK_SIZE) 160 return -ENOMEM; 161 162 do_color_align = 0; 163 if (filp || (flags & MAP_SHARED)) 164 do_color_align = 1; 165 last_mmap = GET_LAST_MMAP(filp); 166 167 if (flags & MAP_FIXED) { 168 if ((flags & MAP_SHARED) && last_mmap && 169 (addr - shared_align_offset(last_mmap, pgoff)) 170 & (SHM_COLOUR - 1)) 171 return -EINVAL; 172 goto found_addr; 173 } 174 175 /* requesting a specific address */ 176 if (addr) { 177 if (do_color_align && last_mmap) 178 addr = COLOR_ALIGN(addr, last_mmap, pgoff); 179 else 180 addr = PAGE_ALIGN(addr); 181 182 vma = find_vma_prev(mm, addr, &prev); 183 if (TASK_SIZE - len >= addr && 184 (!vma || addr + len <= vm_start_gap(vma)) && 185 (!prev || addr >= vm_end_gap(prev))) 186 goto found_addr; 187 } 188 189 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 190 info.length = len; 191 info.low_limit = PAGE_SIZE; 192 info.high_limit = mm->mmap_base; 193 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; 194 info.align_offset = shared_align_offset(last_mmap, pgoff); 195 addr = vm_unmapped_area(&info); 196 if (!(addr & ~PAGE_MASK)) 197 goto found_addr; 198 VM_BUG_ON(addr != -ENOMEM); 199 200 /* 201 * A failed mmap() very likely causes application failure, 202 * so fall back to the bottom-up function here. This scenario 203 * can happen with large stack limits and large mmap() 204 * allocations. 205 */ 206 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 207 208 found_addr: 209 if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) 210 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); 211 212 return addr; 213 } 214 215 static int mmap_is_legacy(void) 216 { 217 if (current->personality & ADDR_COMPAT_LAYOUT) 218 return 1; 219 220 /* parisc stack always grows up - so a unlimited stack should 221 * not be an indicator to use the legacy memory layout. 222 * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) 223 * return 1; 224 */ 225 226 return sysctl_legacy_va_layout; 227 } 228 229 static unsigned long mmap_rnd(void) 230 { 231 unsigned long rnd = 0; 232 233 if (current->flags & PF_RANDOMIZE) 234 rnd = get_random_int() & MMAP_RND_MASK; 235 236 return rnd << PAGE_SHIFT; 237 } 238 239 unsigned long arch_mmap_rnd(void) 240 { 241 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT; 242 } 243 244 static unsigned long mmap_legacy_base(void) 245 { 246 return TASK_UNMAPPED_BASE + mmap_rnd(); 247 } 248 249 /* 250 * This function, called very early during the creation of a new 251 * process VM image, sets up which VM layout function to use: 252 */ 253 void arch_pick_mmap_layout(struct mm_struct *mm) 254 { 255 mm->mmap_legacy_base = mmap_legacy_base(); 256 mm->mmap_base = mmap_upper_limit(); 257 258 if (mmap_is_legacy()) { 259 mm->mmap_base = mm->mmap_legacy_base; 260 mm->get_unmapped_area = arch_get_unmapped_area; 261 } else { 262 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 263 } 264 } 265 266 267 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, 268 unsigned long prot, unsigned long flags, unsigned long fd, 269 unsigned long pgoff) 270 { 271 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE 272 we have. */ 273 return sys_mmap_pgoff(addr, len, prot, flags, fd, 274 pgoff >> (PAGE_SHIFT - 12)); 275 } 276 277 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, 278 unsigned long prot, unsigned long flags, unsigned long fd, 279 unsigned long offset) 280 { 281 if (!(offset & ~PAGE_MASK)) { 282 return sys_mmap_pgoff(addr, len, prot, flags, fd, 283 offset >> PAGE_SHIFT); 284 } else { 285 return -EINVAL; 286 } 287 } 288 289 /* Fucking broken ABI */ 290 291 #ifdef CONFIG_64BIT 292 asmlinkage long parisc_truncate64(const char __user * path, 293 unsigned int high, unsigned int low) 294 { 295 return sys_truncate(path, (long)high << 32 | low); 296 } 297 298 asmlinkage long parisc_ftruncate64(unsigned int fd, 299 unsigned int high, unsigned int low) 300 { 301 return sys_ftruncate(fd, (long)high << 32 | low); 302 } 303 304 /* stubs for the benefit of the syscall_table since truncate64 and truncate 305 * are identical on LP64 */ 306 asmlinkage long sys_truncate64(const char __user * path, unsigned long length) 307 { 308 return sys_truncate(path, length); 309 } 310 asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length) 311 { 312 return sys_ftruncate(fd, length); 313 } 314 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) 315 { 316 return sys_fcntl(fd, cmd, arg); 317 } 318 #else 319 320 asmlinkage long parisc_truncate64(const char __user * path, 321 unsigned int high, unsigned int low) 322 { 323 return sys_truncate64(path, (loff_t)high << 32 | low); 324 } 325 326 asmlinkage long parisc_ftruncate64(unsigned int fd, 327 unsigned int high, unsigned int low) 328 { 329 return sys_ftruncate64(fd, (loff_t)high << 32 | low); 330 } 331 #endif 332 333 asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count, 334 unsigned int high, unsigned int low) 335 { 336 return sys_pread64(fd, buf, count, (loff_t)high << 32 | low); 337 } 338 339 asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf, 340 size_t count, unsigned int high, unsigned int low) 341 { 342 return sys_pwrite64(fd, buf, count, (loff_t)high << 32 | low); 343 } 344 345 asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low, 346 size_t count) 347 { 348 return sys_readahead(fd, (loff_t)high << 32 | low, count); 349 } 350 351 asmlinkage long parisc_fadvise64_64(int fd, 352 unsigned int high_off, unsigned int low_off, 353 unsigned int high_len, unsigned int low_len, int advice) 354 { 355 return sys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off, 356 (loff_t)high_len << 32 | low_len, advice); 357 } 358 359 asmlinkage long parisc_sync_file_range(int fd, 360 u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes, 361 unsigned int flags) 362 { 363 return sys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off, 364 (loff_t)hi_nbytes << 32 | lo_nbytes, flags); 365 } 366 367 asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo, 368 u32 lenhi, u32 lenlo) 369 { 370 return sys_fallocate(fd, mode, ((u64)offhi << 32) | offlo, 371 ((u64)lenhi << 32) | lenlo); 372 } 373 374 long parisc_personality(unsigned long personality) 375 { 376 long err; 377 378 if (personality(current->personality) == PER_LINUX32 379 && personality(personality) == PER_LINUX) 380 personality = (personality & ~PER_MASK) | PER_LINUX32; 381 382 err = sys_personality(personality); 383 if (personality(err) == PER_LINUX32) 384 err = (err & ~PER_MASK) | PER_LINUX; 385 386 return err; 387 } 388