1 /* 2 * flexible mmap layout support 3 * 4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 5 * All Rights Reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 * 22 * Started by Ingo Molnar <mingo@elte.hu> 23 */ 24 25 #include <linux/personality.h> 26 #include <linux/mm.h> 27 #include <linux/mman.h> 28 #include <linux/module.h> 29 #include <linux/random.h> 30 #include <linux/compat.h> 31 #include <linux/security.h> 32 #include <asm/pgalloc.h> 33 34 static unsigned long stack_maxrandom_size(void) 35 { 36 if (!(current->flags & PF_RANDOMIZE)) 37 return 0; 38 if (current->personality & ADDR_NO_RANDOMIZE) 39 return 0; 40 return STACK_RND_MASK << PAGE_SHIFT; 41 } 42 43 /* 44 * Top of mmap area (just below the process stack). 45 * 46 * Leave at least a ~32 MB hole. 47 */ 48 #define MIN_GAP (32*1024*1024) 49 #define MAX_GAP (STACK_TOP/6*5) 50 51 static inline int mmap_is_legacy(void) 52 { 53 if (current->personality & ADDR_COMPAT_LAYOUT) 54 return 1; 55 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) 56 return 1; 57 return sysctl_legacy_va_layout; 58 } 59 60 unsigned long arch_mmap_rnd(void) 61 { 62 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT; 63 } 64 65 static unsigned long mmap_base_legacy(unsigned long rnd) 66 { 67 return TASK_UNMAPPED_BASE + rnd; 68 } 69 70 static inline unsigned long mmap_base(unsigned long rnd) 71 { 72 unsigned long gap = rlimit(RLIMIT_STACK); 73 74 if (gap < MIN_GAP) 75 gap = MIN_GAP; 76 else if (gap > MAX_GAP) 77 gap = MAX_GAP; 78 gap &= PAGE_MASK; 79 return STACK_TOP - stack_maxrandom_size() - rnd - gap; 80 } 81 82 unsigned long 83 arch_get_unmapped_area(struct file *filp, unsigned long addr, 84 unsigned long len, unsigned long pgoff, unsigned long flags) 85 { 86 struct mm_struct *mm = current->mm; 87 struct vm_area_struct *vma; 88 struct vm_unmapped_area_info info; 89 90 if (len > TASK_SIZE - mmap_min_addr) 91 return -ENOMEM; 92 93 if (flags & MAP_FIXED) 94 return addr; 95 96 if (addr) { 97 addr = PAGE_ALIGN(addr); 98 vma = find_vma(mm, addr); 99 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 100 (!vma || addr + len <= vma->vm_start)) 101 return addr; 102 } 103 104 info.flags = 0; 105 info.length = len; 106 info.low_limit = mm->mmap_base; 107 info.high_limit = TASK_SIZE; 108 if (filp || (flags & MAP_SHARED)) 109 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; 110 else 111 info.align_mask = 0; 112 info.align_offset = pgoff << PAGE_SHIFT; 113 return vm_unmapped_area(&info); 114 } 115 116 unsigned long 117 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 118 const unsigned long len, const unsigned long pgoff, 119 const unsigned long flags) 120 { 121 struct vm_area_struct *vma; 122 struct mm_struct *mm = current->mm; 123 unsigned long addr = addr0; 124 struct vm_unmapped_area_info info; 125 126 /* requested length too big for entire address space */ 127 if (len > TASK_SIZE - mmap_min_addr) 128 return -ENOMEM; 129 130 if (flags & MAP_FIXED) 131 return addr; 132 133 /* requesting a specific address */ 134 if (addr) { 135 addr = PAGE_ALIGN(addr); 136 vma = find_vma(mm, addr); 137 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 138 (!vma || addr + len <= vma->vm_start)) 139 return addr; 140 } 141 142 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 143 info.length = len; 144 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 145 info.high_limit = mm->mmap_base; 146 if (filp || (flags & MAP_SHARED)) 147 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; 148 else 149 info.align_mask = 0; 150 info.align_offset = pgoff << PAGE_SHIFT; 151 addr = vm_unmapped_area(&info); 152 153 /* 154 * A failed mmap() very likely causes application failure, 155 * so fall back to the bottom-up function here. This scenario 156 * can happen with large stack limits and large mmap() 157 * allocations. 158 */ 159 if (addr & ~PAGE_MASK) { 160 VM_BUG_ON(addr != -ENOMEM); 161 info.flags = 0; 162 info.low_limit = TASK_UNMAPPED_BASE; 163 info.high_limit = TASK_SIZE; 164 addr = vm_unmapped_area(&info); 165 } 166 167 return addr; 168 } 169 170 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) 171 { 172 if (is_compat_task() || TASK_SIZE >= TASK_MAX_SIZE) 173 return 0; 174 if (!(flags & MAP_FIXED)) 175 addr = 0; 176 if ((addr + len) >= TASK_SIZE) 177 return crst_table_upgrade(current->mm, TASK_MAX_SIZE); 178 return 0; 179 } 180 181 static unsigned long 182 s390_get_unmapped_area(struct file *filp, unsigned long addr, 183 unsigned long len, unsigned long pgoff, unsigned long flags) 184 { 185 struct mm_struct *mm = current->mm; 186 unsigned long area; 187 int rc; 188 189 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 190 if (!(area & ~PAGE_MASK)) 191 return area; 192 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { 193 /* Upgrade the page table to 4 levels and retry. */ 194 rc = crst_table_upgrade(mm, TASK_MAX_SIZE); 195 if (rc) 196 return (unsigned long) rc; 197 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 198 } 199 return area; 200 } 201 202 static unsigned long 203 s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, 204 const unsigned long len, const unsigned long pgoff, 205 const unsigned long flags) 206 { 207 struct mm_struct *mm = current->mm; 208 unsigned long area; 209 int rc; 210 211 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 212 if (!(area & ~PAGE_MASK)) 213 return area; 214 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { 215 /* Upgrade the page table to 4 levels and retry. */ 216 rc = crst_table_upgrade(mm, TASK_MAX_SIZE); 217 if (rc) 218 return (unsigned long) rc; 219 area = arch_get_unmapped_area_topdown(filp, addr, len, 220 pgoff, flags); 221 } 222 return area; 223 } 224 /* 225 * This function, called very early during the creation of a new 226 * process VM image, sets up which VM layout function to use: 227 */ 228 void arch_pick_mmap_layout(struct mm_struct *mm) 229 { 230 unsigned long random_factor = 0UL; 231 232 if (current->flags & PF_RANDOMIZE) 233 random_factor = arch_mmap_rnd(); 234 235 /* 236 * Fall back to the standard layout if the personality 237 * bit is set, or if the expected stack growth is unlimited: 238 */ 239 if (mmap_is_legacy()) { 240 mm->mmap_base = mmap_base_legacy(random_factor); 241 mm->get_unmapped_area = s390_get_unmapped_area; 242 } else { 243 mm->mmap_base = mmap_base(random_factor); 244 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 245 } 246 } 247