1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5 #include <linux/export.h>
6 #include <linux/io.h>
7 #include <linux/memblock.h>
8 #include <linux/mm.h>
9 #include <linux/mman.h>
10
11 #define SHM_ALIGN_MASK (SHMLBA - 1)
12
13 #define COLOUR_ALIGN(addr, pgoff) \
14 ((((addr) + SHM_ALIGN_MASK) & ~SHM_ALIGN_MASK) \
15 + (((pgoff) << PAGE_SHIFT) & SHM_ALIGN_MASK))
16
17 enum mmap_allocation_direction {UP, DOWN};
18
arch_get_unmapped_area_common(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags,enum mmap_allocation_direction dir)19 static unsigned long arch_get_unmapped_area_common(struct file *filp,
20 unsigned long addr0, unsigned long len, unsigned long pgoff,
21 unsigned long flags, enum mmap_allocation_direction dir)
22 {
23 struct mm_struct *mm = current->mm;
24 struct vm_area_struct *vma;
25 unsigned long addr = addr0;
26 int do_color_align;
27 struct vm_unmapped_area_info info;
28
29 if (unlikely(len > TASK_SIZE))
30 return -ENOMEM;
31
32 if (flags & MAP_FIXED) {
33 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
34 if (TASK_SIZE - len < addr)
35 return -EINVAL;
36
37 /*
38 * We do not accept a shared mapping if it would violate
39 * cache aliasing constraints.
40 */
41 if ((flags & MAP_SHARED) &&
42 ((addr - (pgoff << PAGE_SHIFT)) & SHM_ALIGN_MASK))
43 return -EINVAL;
44 return addr;
45 }
46
47 do_color_align = 0;
48 if (filp || (flags & MAP_SHARED))
49 do_color_align = 1;
50
51 /* requesting a specific address */
52 if (addr) {
53 if (do_color_align)
54 addr = COLOUR_ALIGN(addr, pgoff);
55 else
56 addr = PAGE_ALIGN(addr);
57
58 vma = find_vma(mm, addr);
59 if (TASK_SIZE - len >= addr &&
60 (!vma || addr + len <= vm_start_gap(vma)))
61 return addr;
62 }
63
64 info.length = len;
65 info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
66 info.align_offset = pgoff << PAGE_SHIFT;
67
68 if (dir == DOWN) {
69 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
70 info.low_limit = PAGE_SIZE;
71 info.high_limit = mm->mmap_base;
72 addr = vm_unmapped_area(&info);
73
74 if (!(addr & ~PAGE_MASK))
75 return addr;
76
77 /*
78 * A failed mmap() very likely causes application failure,
79 * so fall back to the bottom-up function here. This scenario
80 * can happen with large stack limits and large mmap()
81 * allocations.
82 */
83 }
84
85 info.flags = 0;
86 info.low_limit = mm->mmap_base;
87 info.high_limit = TASK_SIZE;
88 return vm_unmapped_area(&info);
89 }
90
arch_get_unmapped_area(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags)91 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
92 unsigned long len, unsigned long pgoff, unsigned long flags)
93 {
94 return arch_get_unmapped_area_common(filp,
95 addr0, len, pgoff, flags, UP);
96 }
97
98 /*
99 * There is no need to export this but sched.h declares the function as
100 * extern so making it static here results in an error.
101 */
arch_get_unmapped_area_topdown(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags)102 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
103 unsigned long addr0, unsigned long len, unsigned long pgoff,
104 unsigned long flags)
105 {
106 return arch_get_unmapped_area_common(filp,
107 addr0, len, pgoff, flags, DOWN);
108 }
109
__virt_addr_valid(volatile void * kaddr)110 int __virt_addr_valid(volatile void *kaddr)
111 {
112 unsigned long vaddr = (unsigned long)kaddr;
113
114 if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
115 return 0;
116
117 return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
118 }
119 EXPORT_SYMBOL_GPL(__virt_addr_valid);
120
121 /*
122 * You really shouldn't be using read() or write() on /dev/mem. This might go
123 * away in the future.
124 */
valid_phys_addr_range(phys_addr_t addr,size_t size)125 int valid_phys_addr_range(phys_addr_t addr, size_t size)
126 {
127 /*
128 * Check whether addr is covered by a memory region without the
129 * MEMBLOCK_NOMAP attribute, and whether that region covers the
130 * entire range. In theory, this could lead to false negatives
131 * if the range is covered by distinct but adjacent memory regions
132 * that only differ in other attributes. However, few of such
133 * attributes have been defined, and it is debatable whether it
134 * follows that /dev/mem read() calls should be able traverse
135 * such boundaries.
136 */
137 return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr);
138 }
139
140 /*
141 * Do not allow /dev/mem mappings beyond the supported physical range.
142 */
valid_mmap_phys_addr_range(unsigned long pfn,size_t size)143 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
144 {
145 return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0)));
146 }
147