1013de2d6SGuo Ren // SPDX-License-Identifier: GPL-2.0
2013de2d6SGuo Ren // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3013de2d6SGuo Ren
4013de2d6SGuo Ren #include <linux/fs.h>
5013de2d6SGuo Ren #include <linux/mm.h>
6013de2d6SGuo Ren #include <linux/mman.h>
7013de2d6SGuo Ren #include <linux/shm.h>
8013de2d6SGuo Ren #include <linux/sched.h>
9013de2d6SGuo Ren #include <linux/random.h>
10013de2d6SGuo Ren #include <linux/io.h>
11013de2d6SGuo Ren
12013de2d6SGuo Ren #define COLOUR_ALIGN(addr,pgoff) \
13*be819aa6SGuo Ren ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
14*be819aa6SGuo Ren (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
15013de2d6SGuo Ren
16*be819aa6SGuo Ren /*
17*be819aa6SGuo Ren * We need to ensure that shared mappings are correctly aligned to
18*be819aa6SGuo Ren * avoid aliasing issues with VIPT caches. We need to ensure that
19*be819aa6SGuo Ren * a specific page of an object is always mapped at a multiple of
20*be819aa6SGuo Ren * SHMLBA bytes.
21*be819aa6SGuo Ren *
22*be819aa6SGuo Ren * We unconditionally provide this function for all cases.
23*be819aa6SGuo Ren */
24*be819aa6SGuo Ren unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)25*be819aa6SGuo Ren arch_get_unmapped_area(struct file *filp, unsigned long addr,
26013de2d6SGuo Ren unsigned long len, unsigned long pgoff, unsigned long flags)
27013de2d6SGuo Ren {
28*be819aa6SGuo Ren struct mm_struct *mm = current->mm;
29*be819aa6SGuo Ren struct vm_area_struct *vma;
30*be819aa6SGuo Ren int do_align = 0;
31*be819aa6SGuo Ren struct vm_unmapped_area_info info;
32013de2d6SGuo Ren
33013de2d6SGuo Ren /*
34*be819aa6SGuo Ren * We only need to do colour alignment if either the I or D
35*be819aa6SGuo Ren * caches alias.
36013de2d6SGuo Ren */
37*be819aa6SGuo Ren do_align = filp || (flags & MAP_SHARED);
38*be819aa6SGuo Ren
39*be819aa6SGuo Ren /*
40*be819aa6SGuo Ren * We enforce the MAP_FIXED case.
41*be819aa6SGuo Ren */
42*be819aa6SGuo Ren if (flags & MAP_FIXED) {
43*be819aa6SGuo Ren if (flags & MAP_SHARED &&
44*be819aa6SGuo Ren (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
45013de2d6SGuo Ren return -EINVAL;
46013de2d6SGuo Ren return addr;
47013de2d6SGuo Ren }
48013de2d6SGuo Ren
49013de2d6SGuo Ren if (len > TASK_SIZE)
50013de2d6SGuo Ren return -ENOMEM;
51*be819aa6SGuo Ren
52013de2d6SGuo Ren if (addr) {
53*be819aa6SGuo Ren if (do_align)
54013de2d6SGuo Ren addr = COLOUR_ALIGN(addr, pgoff);
55013de2d6SGuo Ren else
56013de2d6SGuo Ren addr = PAGE_ALIGN(addr);
57013de2d6SGuo Ren
58*be819aa6SGuo Ren vma = find_vma(mm, addr);
59*be819aa6SGuo Ren if (TASK_SIZE - len >= addr &&
60*be819aa6SGuo Ren (!vma || addr + len <= vm_start_gap(vma)))
61013de2d6SGuo Ren return addr;
62013de2d6SGuo Ren }
63*be819aa6SGuo Ren
64*be819aa6SGuo Ren info.flags = 0;
65*be819aa6SGuo Ren info.length = len;
66*be819aa6SGuo Ren info.low_limit = mm->mmap_base;
67*be819aa6SGuo Ren info.high_limit = TASK_SIZE;
68*be819aa6SGuo Ren info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
69*be819aa6SGuo Ren info.align_offset = pgoff << PAGE_SHIFT;
70*be819aa6SGuo Ren return vm_unmapped_area(&info);
71013de2d6SGuo Ren }
72