1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * flexible mmap layout support 4 * 5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 6 * All Rights Reserved. 7 * 8 * Started by Ingo Molnar <mingo@elte.hu> 9 */ 10 11 #include <linux/elf-randomize.h> 12 #include <linux/personality.h> 13 #include <linux/mm.h> 14 #include <linux/mman.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sched/mm.h> 17 #include <linux/random.h> 18 #include <linux/compat.h> 19 #include <linux/security.h> 20 #include <asm/pgalloc.h> 21 #include <asm/elf.h> 22 23 static unsigned long stack_maxrandom_size(void) 24 { 25 if (!(current->flags & PF_RANDOMIZE)) 26 return 0; 27 if (current->personality & ADDR_NO_RANDOMIZE) 28 return 0; 29 return STACK_RND_MASK << PAGE_SHIFT; 30 } 31 32 static inline int mmap_is_legacy(struct rlimit *rlim_stack) 33 { 34 if (current->personality & ADDR_COMPAT_LAYOUT) 35 return 1; 36 if (rlim_stack->rlim_cur == RLIM_INFINITY) 37 return 1; 38 return sysctl_legacy_va_layout; 39 } 40 41 unsigned long arch_mmap_rnd(void) 42 { 43 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT; 44 } 45 46 static unsigned long mmap_base_legacy(unsigned long rnd) 47 { 48 return TASK_UNMAPPED_BASE + rnd; 49 } 50 51 static inline unsigned long mmap_base(unsigned long rnd, 52 struct rlimit *rlim_stack) 53 { 54 unsigned long gap = rlim_stack->rlim_cur; 55 unsigned long pad = stack_maxrandom_size() + stack_guard_gap; 56 unsigned long gap_min, gap_max; 57 58 /* Values close to RLIM_INFINITY can overflow. */ 59 if (gap + pad > gap) 60 gap += pad; 61 62 /* 63 * Top of mmap area (just below the process stack). 64 * Leave at least a ~32 MB hole. 65 */ 66 gap_min = 32 * 1024 * 1024UL; 67 gap_max = (STACK_TOP / 6) * 5; 68 69 if (gap < gap_min) 70 gap = gap_min; 71 else if (gap > gap_max) 72 gap = gap_max; 73 74 return PAGE_ALIGN(STACK_TOP - gap - rnd); 75 } 76 77 unsigned long 78 arch_get_unmapped_area(struct file *filp, unsigned long addr, 79 unsigned long len, unsigned long pgoff, unsigned long flags) 80 { 81 struct mm_struct *mm = current->mm; 82 struct vm_area_struct *vma; 83 struct vm_unmapped_area_info info; 84 int rc; 85 86 if (len > TASK_SIZE - mmap_min_addr) 87 return -ENOMEM; 88 89 if (flags & MAP_FIXED) 90 goto check_asce_limit; 91 92 if (addr) { 93 addr = PAGE_ALIGN(addr); 94 vma = find_vma(mm, addr); 95 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 96 (!vma || addr + len <= vm_start_gap(vma))) 97 goto check_asce_limit; 98 } 99 100 info.flags = 0; 101 info.length = len; 102 info.low_limit = mm->mmap_base; 103 info.high_limit = TASK_SIZE; 104 if (filp || (flags & MAP_SHARED)) 105 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; 106 else 107 info.align_mask = 0; 108 info.align_offset = pgoff << PAGE_SHIFT; 109 addr = vm_unmapped_area(&info); 110 if (addr & ~PAGE_MASK) 111 return addr; 112 113 check_asce_limit: 114 if (addr + len > current->mm->context.asce_limit && 115 addr + len <= TASK_SIZE) { 116 rc = crst_table_upgrade(mm, addr + len); 117 if (rc) 118 return (unsigned long) rc; 119 } 120 121 return addr; 122 } 123 124 unsigned long 125 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 126 const unsigned long len, const unsigned long pgoff, 127 const unsigned long flags) 128 { 129 struct vm_area_struct *vma; 130 struct mm_struct *mm = current->mm; 131 unsigned long addr = addr0; 132 struct vm_unmapped_area_info info; 133 int rc; 134 135 /* requested length too big for entire address space */ 136 if (len > TASK_SIZE - mmap_min_addr) 137 return -ENOMEM; 138 139 if (flags & MAP_FIXED) 140 goto check_asce_limit; 141 142 /* requesting a specific address */ 143 if (addr) { 144 addr = PAGE_ALIGN(addr); 145 vma = find_vma(mm, addr); 146 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 147 (!vma || addr + len <= vm_start_gap(vma))) 148 goto check_asce_limit; 149 } 150 151 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 152 info.length = len; 153 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 154 info.high_limit = mm->mmap_base; 155 if (filp || (flags & MAP_SHARED)) 156 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; 157 else 158 info.align_mask = 0; 159 info.align_offset = pgoff << PAGE_SHIFT; 160 addr = vm_unmapped_area(&info); 161 162 /* 163 * A failed mmap() very likely causes application failure, 164 * so fall back to the bottom-up function here. This scenario 165 * can happen with large stack limits and large mmap() 166 * allocations. 167 */ 168 if (addr & ~PAGE_MASK) { 169 VM_BUG_ON(addr != -ENOMEM); 170 info.flags = 0; 171 info.low_limit = TASK_UNMAPPED_BASE; 172 info.high_limit = TASK_SIZE; 173 addr = vm_unmapped_area(&info); 174 if (addr & ~PAGE_MASK) 175 return addr; 176 } 177 178 check_asce_limit: 179 if (addr + len > current->mm->context.asce_limit && 180 addr + len <= TASK_SIZE) { 181 rc = crst_table_upgrade(mm, addr + len); 182 if (rc) 183 return (unsigned long) rc; 184 } 185 186 return addr; 187 } 188 189 /* 190 * This function, called very early during the creation of a new 191 * process VM image, sets up which VM layout function to use: 192 */ 193 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 194 { 195 unsigned long random_factor = 0UL; 196 197 if (current->flags & PF_RANDOMIZE) 198 random_factor = arch_mmap_rnd(); 199 200 /* 201 * Fall back to the standard layout if the personality 202 * bit is set, or if the expected stack growth is unlimited: 203 */ 204 if (mmap_is_legacy(rlim_stack)) { 205 mm->mmap_base = mmap_base_legacy(random_factor); 206 mm->get_unmapped_area = arch_get_unmapped_area; 207 } else { 208 mm->mmap_base = mmap_base(random_factor, rlim_stack); 209 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 210 } 211 } 212