1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * flexible mmap layout support 4 * 5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 6 * All Rights Reserved. 7 * 8 * Started by Ingo Molnar <mingo@elte.hu> 9 */ 10 11 #include <linux/elf-randomize.h> 12 #include <linux/personality.h> 13 #include <linux/mm.h> 14 #include <linux/mman.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sched/mm.h> 17 #include <linux/random.h> 18 #include <linux/compat.h> 19 #include <linux/security.h> 20 #include <asm/elf.h> 21 22 static unsigned long stack_maxrandom_size(void) 23 { 24 if (!(current->flags & PF_RANDOMIZE)) 25 return 0; 26 return STACK_RND_MASK << PAGE_SHIFT; 27 } 28 29 static inline int mmap_is_legacy(struct rlimit *rlim_stack) 30 { 31 if (current->personality & ADDR_COMPAT_LAYOUT) 32 return 1; 33 if (rlim_stack->rlim_cur == RLIM_INFINITY) 34 return 1; 35 return sysctl_legacy_va_layout; 36 } 37 38 unsigned long arch_mmap_rnd(void) 39 { 40 return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT; 41 } 42 43 static unsigned long mmap_base_legacy(unsigned long rnd) 44 { 45 return TASK_UNMAPPED_BASE + rnd; 46 } 47 48 static inline unsigned long mmap_base(unsigned long rnd, 49 struct rlimit *rlim_stack) 50 { 51 unsigned long gap = rlim_stack->rlim_cur; 52 unsigned long pad = stack_maxrandom_size() + stack_guard_gap; 53 unsigned long gap_min, gap_max; 54 55 /* Values close to RLIM_INFINITY can overflow. */ 56 if (gap + pad > gap) 57 gap += pad; 58 59 /* 60 * Top of mmap area (just below the process stack). 61 * Leave at least a ~128 MB hole. 62 */ 63 gap_min = SZ_128M; 64 gap_max = (STACK_TOP / 6) * 5; 65 66 if (gap < gap_min) 67 gap = gap_min; 68 else if (gap > gap_max) 69 gap = gap_max; 70 71 return PAGE_ALIGN(STACK_TOP - gap - rnd); 72 } 73 74 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 75 unsigned long len, unsigned long pgoff, 76 unsigned long flags) 77 { 78 struct mm_struct *mm = current->mm; 79 struct vm_area_struct *vma; 80 struct vm_unmapped_area_info info; 81 82 if (len > TASK_SIZE - mmap_min_addr) 83 return -ENOMEM; 84 85 if (flags & MAP_FIXED) 86 goto check_asce_limit; 87 88 if (addr) { 89 addr = PAGE_ALIGN(addr); 90 vma = find_vma(mm, addr); 91 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 92 (!vma || addr + len <= vm_start_gap(vma))) 93 goto check_asce_limit; 94 } 95 96 info.flags = 0; 97 info.length = len; 98 info.low_limit = mm->mmap_base; 99 info.high_limit = TASK_SIZE; 100 if (filp || (flags & MAP_SHARED)) 101 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; 102 else 103 info.align_mask = 0; 104 info.align_offset = pgoff << PAGE_SHIFT; 105 addr = vm_unmapped_area(&info); 106 if (offset_in_page(addr)) 107 return addr; 108 109 check_asce_limit: 110 return check_asce_limit(mm, addr, len); 111 } 112 113 unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 114 unsigned long len, unsigned long pgoff, 115 unsigned long flags) 116 { 117 struct vm_area_struct *vma; 118 struct mm_struct *mm = current->mm; 119 struct vm_unmapped_area_info info; 120 121 /* requested length too big for entire address space */ 122 if (len > TASK_SIZE - mmap_min_addr) 123 return -ENOMEM; 124 125 if (flags & MAP_FIXED) 126 goto check_asce_limit; 127 128 /* requesting a specific address */ 129 if (addr) { 130 addr = PAGE_ALIGN(addr); 131 vma = find_vma(mm, addr); 132 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 133 (!vma || addr + len <= vm_start_gap(vma))) 134 goto check_asce_limit; 135 } 136 137 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 138 info.length = len; 139 info.low_limit = PAGE_SIZE; 140 info.high_limit = mm->mmap_base; 141 if (filp || (flags & MAP_SHARED)) 142 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; 143 else 144 info.align_mask = 0; 145 info.align_offset = pgoff << PAGE_SHIFT; 146 addr = vm_unmapped_area(&info); 147 148 /* 149 * A failed mmap() very likely causes application failure, 150 * so fall back to the bottom-up function here. This scenario 151 * can happen with large stack limits and large mmap() 152 * allocations. 153 */ 154 if (offset_in_page(addr)) { 155 VM_BUG_ON(addr != -ENOMEM); 156 info.flags = 0; 157 info.low_limit = TASK_UNMAPPED_BASE; 158 info.high_limit = TASK_SIZE; 159 addr = vm_unmapped_area(&info); 160 if (offset_in_page(addr)) 161 return addr; 162 } 163 164 check_asce_limit: 165 return check_asce_limit(mm, addr, len); 166 } 167 168 /* 169 * This function, called very early during the creation of a new 170 * process VM image, sets up which VM layout function to use: 171 */ 172 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 173 { 174 unsigned long random_factor = 0UL; 175 176 if (current->flags & PF_RANDOMIZE) 177 random_factor = arch_mmap_rnd(); 178 179 /* 180 * Fall back to the standard layout if the personality 181 * bit is set, or if the expected stack growth is unlimited: 182 */ 183 if (mmap_is_legacy(rlim_stack)) { 184 mm->mmap_base = mmap_base_legacy(random_factor); 185 mm->get_unmapped_area = arch_get_unmapped_area; 186 } else { 187 mm->mmap_base = mmap_base(random_factor, rlim_stack); 188 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 189 } 190 } 191 192 static const pgprot_t protection_map[16] = { 193 [VM_NONE] = PAGE_NONE, 194 [VM_READ] = PAGE_RO, 195 [VM_WRITE] = PAGE_RO, 196 [VM_WRITE | VM_READ] = PAGE_RO, 197 [VM_EXEC] = PAGE_RX, 198 [VM_EXEC | VM_READ] = PAGE_RX, 199 [VM_EXEC | VM_WRITE] = PAGE_RX, 200 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX, 201 [VM_SHARED] = PAGE_NONE, 202 [VM_SHARED | VM_READ] = PAGE_RO, 203 [VM_SHARED | VM_WRITE] = PAGE_RW, 204 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW, 205 [VM_SHARED | VM_EXEC] = PAGE_RX, 206 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX, 207 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, 208 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX 209 }; 210 DECLARE_VM_GET_PAGE_PROT 211