1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #include <linux/fs.h> 5 #include <linux/mm.h> 6 #include <linux/mman.h> 7 #include <linux/shm.h> 8 #include <linux/sched.h> 9 #include <linux/random.h> 10 #include <linux/io.h> 11 12 #define COLOUR_ALIGN(addr,pgoff) \ 13 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 14 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 15 16 /* 17 * We need to ensure that shared mappings are correctly aligned to 18 * avoid aliasing issues with VIPT caches. We need to ensure that 19 * a specific page of an object is always mapped at a multiple of 20 * SHMLBA bytes. 21 * 22 * We unconditionally provide this function for all cases. 23 */ 24 unsigned long 25 arch_get_unmapped_area(struct file *filp, unsigned long addr, 26 unsigned long len, unsigned long pgoff, unsigned long flags) 27 { 28 struct mm_struct *mm = current->mm; 29 struct vm_area_struct *vma; 30 int do_align = 0; 31 struct vm_unmapped_area_info info; 32 33 /* 34 * We only need to do colour alignment if either the I or D 35 * caches alias. 36 */ 37 do_align = filp || (flags & MAP_SHARED); 38 39 /* 40 * We enforce the MAP_FIXED case. 41 */ 42 if (flags & MAP_FIXED) { 43 if (flags & MAP_SHARED && 44 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 45 return -EINVAL; 46 return addr; 47 } 48 49 if (len > TASK_SIZE) 50 return -ENOMEM; 51 52 if (addr) { 53 if (do_align) 54 addr = COLOUR_ALIGN(addr, pgoff); 55 else 56 addr = PAGE_ALIGN(addr); 57 58 vma = find_vma(mm, addr); 59 if (TASK_SIZE - len >= addr && 60 (!vma || addr + len <= vm_start_gap(vma))) 61 return addr; 62 } 63 64 info.flags = 0; 65 info.length = len; 66 info.low_limit = mm->mmap_base; 67 info.high_limit = TASK_SIZE; 68 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 69 info.align_offset = pgoff << PAGE_SHIFT; 70 return vm_unmapped_area(&info); 71 } 72