1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * flexible mmap layout support 4 * 5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 6 * All Rights Reserved. 7 * 8 * Started by Ingo Molnar <mingo@elte.hu> 9 */ 10 11 #include <linux/elf-randomize.h> 12 #include <linux/personality.h> 13 #include <linux/mm.h> 14 #include <linux/mman.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sched/mm.h> 17 #include <linux/random.h> 18 #include <linux/compat.h> 19 #include <linux/security.h> 20 #include <asm/pgalloc.h> 21 #include <asm/elf.h> 22 23 static unsigned long stack_maxrandom_size(void) 24 { 25 if (!(current->flags & PF_RANDOMIZE)) 26 return 0; 27 return STACK_RND_MASK << PAGE_SHIFT; 28 } 29 30 static inline int mmap_is_legacy(struct rlimit *rlim_stack) 31 { 32 if (current->personality & ADDR_COMPAT_LAYOUT) 33 return 1; 34 if (rlim_stack->rlim_cur == RLIM_INFINITY) 35 return 1; 36 return sysctl_legacy_va_layout; 37 } 38 39 unsigned long arch_mmap_rnd(void) 40 { 41 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT; 42 } 43 44 static unsigned long mmap_base_legacy(unsigned long rnd) 45 { 46 return TASK_UNMAPPED_BASE + rnd; 47 } 48 49 static inline unsigned long mmap_base(unsigned long rnd, 50 struct rlimit *rlim_stack) 51 { 52 unsigned long gap = rlim_stack->rlim_cur; 53 unsigned long pad = stack_maxrandom_size() + stack_guard_gap; 54 unsigned long gap_min, gap_max; 55 56 /* Values close to RLIM_INFINITY can overflow. */ 57 if (gap + pad > gap) 58 gap += pad; 59 60 /* 61 * Top of mmap area (just below the process stack). 62 * Leave at least a ~32 MB hole. 63 */ 64 gap_min = 32 * 1024 * 1024UL; 65 gap_max = (STACK_TOP / 6) * 5; 66 67 if (gap < gap_min) 68 gap = gap_min; 69 else if (gap > gap_max) 70 gap = gap_max; 71 72 return PAGE_ALIGN(STACK_TOP - gap - rnd); 73 } 74 75 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 76 unsigned long len, unsigned long pgoff, 77 unsigned long flags) 78 { 79 struct mm_struct *mm = current->mm; 80 struct vm_area_struct *vma; 81 struct vm_unmapped_area_info info; 82 83 if (len > TASK_SIZE - mmap_min_addr) 84 return -ENOMEM; 85 86 if (flags & MAP_FIXED) 87 goto check_asce_limit; 88 89 if (addr) { 90 addr = PAGE_ALIGN(addr); 91 vma = find_vma(mm, addr); 92 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 93 (!vma || addr + len <= vm_start_gap(vma))) 94 goto check_asce_limit; 95 } 96 97 info.flags = 0; 98 info.length = len; 99 info.low_limit = mm->mmap_base; 100 info.high_limit = TASK_SIZE; 101 if (filp || (flags & MAP_SHARED)) 102 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; 103 else 104 info.align_mask = 0; 105 info.align_offset = pgoff << PAGE_SHIFT; 106 addr = vm_unmapped_area(&info); 107 if (offset_in_page(addr)) 108 return addr; 109 110 check_asce_limit: 111 return check_asce_limit(mm, addr, len); 112 } 113 114 unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 115 unsigned long len, unsigned long pgoff, 116 unsigned long flags) 117 { 118 struct vm_area_struct *vma; 119 struct mm_struct *mm = current->mm; 120 struct vm_unmapped_area_info info; 121 122 /* requested length too big for entire address space */ 123 if (len > TASK_SIZE - mmap_min_addr) 124 return -ENOMEM; 125 126 if (flags & MAP_FIXED) 127 goto check_asce_limit; 128 129 /* requesting a specific address */ 130 if (addr) { 131 addr = PAGE_ALIGN(addr); 132 vma = find_vma(mm, addr); 133 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 134 (!vma || addr + len <= vm_start_gap(vma))) 135 goto check_asce_limit; 136 } 137 138 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 139 info.length = len; 140 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 141 info.high_limit = mm->mmap_base; 142 if (filp || (flags & MAP_SHARED)) 143 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; 144 else 145 info.align_mask = 0; 146 info.align_offset = pgoff << PAGE_SHIFT; 147 addr = vm_unmapped_area(&info); 148 149 /* 150 * A failed mmap() very likely causes application failure, 151 * so fall back to the bottom-up function here. This scenario 152 * can happen with large stack limits and large mmap() 153 * allocations. 154 */ 155 if (offset_in_page(addr)) { 156 VM_BUG_ON(addr != -ENOMEM); 157 info.flags = 0; 158 info.low_limit = TASK_UNMAPPED_BASE; 159 info.high_limit = TASK_SIZE; 160 addr = vm_unmapped_area(&info); 161 if (offset_in_page(addr)) 162 return addr; 163 } 164 165 check_asce_limit: 166 return check_asce_limit(mm, addr, len); 167 } 168 169 /* 170 * This function, called very early during the creation of a new 171 * process VM image, sets up which VM layout function to use: 172 */ 173 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 174 { 175 unsigned long random_factor = 0UL; 176 177 if (current->flags & PF_RANDOMIZE) 178 random_factor = arch_mmap_rnd(); 179 180 /* 181 * Fall back to the standard layout if the personality 182 * bit is set, or if the expected stack growth is unlimited: 183 */ 184 if (mmap_is_legacy(rlim_stack)) { 185 mm->mmap_base = mmap_base_legacy(random_factor); 186 mm->get_unmapped_area = arch_get_unmapped_area; 187 } else { 188 mm->mmap_base = mmap_base(random_factor, rlim_stack); 189 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 190 } 191 } 192