1 /* 2 * flexible mmap layout support 3 * 4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 5 * All Rights Reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 * 22 * Started by Ingo Molnar <mingo@elte.hu> 23 */ 24 25 #include <linux/personality.h> 26 #include <linux/mm.h> 27 #include <linux/mman.h> 28 #include <linux/module.h> 29 #include <linux/random.h> 30 #include <linux/compat.h> 31 #include <linux/security.h> 32 #include <asm/pgalloc.h> 33 34 unsigned long mmap_rnd_mask; 35 unsigned long mmap_align_mask; 36 37 static unsigned long stack_maxrandom_size(void) 38 { 39 if (!(current->flags & PF_RANDOMIZE)) 40 return 0; 41 if (current->personality & ADDR_NO_RANDOMIZE) 42 return 0; 43 return STACK_RND_MASK << PAGE_SHIFT; 44 } 45 46 /* 47 * Top of mmap area (just below the process stack). 48 * 49 * Leave at least a ~32 MB hole. 50 */ 51 #define MIN_GAP (32*1024*1024) 52 #define MAX_GAP (STACK_TOP/6*5) 53 54 static inline int mmap_is_legacy(void) 55 { 56 if (current->personality & ADDR_COMPAT_LAYOUT) 57 return 1; 58 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) 59 return 1; 60 return sysctl_legacy_va_layout; 61 } 62 63 static unsigned long mmap_rnd(void) 64 { 65 if (!(current->flags & PF_RANDOMIZE)) 66 return 0; 67 if (is_32bit_task()) 68 return (get_random_int() & 0x7ff) << PAGE_SHIFT; 69 else 70 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT; 71 } 72 73 static unsigned long mmap_base_legacy(void) 74 { 75 return TASK_UNMAPPED_BASE + mmap_rnd(); 76 } 77 78 static inline unsigned long mmap_base(void) 79 { 80 unsigned long gap = rlimit(RLIMIT_STACK); 81 82 if (gap < MIN_GAP) 83 gap = MIN_GAP; 84 else if (gap > MAX_GAP) 85 gap = MAX_GAP; 86 gap &= PAGE_MASK; 87 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; 88 } 89 90 unsigned long 91 arch_get_unmapped_area(struct file *filp, unsigned long addr, 92 unsigned long len, unsigned long pgoff, unsigned long flags) 93 { 94 struct mm_struct *mm = current->mm; 95 struct vm_area_struct *vma; 96 struct vm_unmapped_area_info info; 97 int do_color_align; 98 99 if (len > TASK_SIZE - mmap_min_addr) 100 return -ENOMEM; 101 102 if (flags & MAP_FIXED) 103 return addr; 104 105 if (addr) { 106 addr = PAGE_ALIGN(addr); 107 vma = find_vma(mm, addr); 108 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 109 (!vma || addr + len <= vma->vm_start)) 110 return addr; 111 } 112 113 do_color_align = 0; 114 if (filp || (flags & MAP_SHARED)) 115 do_color_align = !is_32bit_task(); 116 117 info.flags = 0; 118 info.length = len; 119 info.low_limit = mm->mmap_base; 120 info.high_limit = TASK_SIZE; 121 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; 122 info.align_offset = pgoff << PAGE_SHIFT; 123 return vm_unmapped_area(&info); 124 } 125 126 unsigned long 127 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 128 const unsigned long len, const unsigned long pgoff, 129 const unsigned long flags) 130 { 131 struct vm_area_struct *vma; 132 struct mm_struct *mm = current->mm; 133 unsigned long addr = addr0; 134 struct vm_unmapped_area_info info; 135 int do_color_align; 136 137 /* requested length too big for entire address space */ 138 if (len > TASK_SIZE - mmap_min_addr) 139 return -ENOMEM; 140 141 if (flags & MAP_FIXED) 142 return addr; 143 144 /* requesting a specific address */ 145 if (addr) { 146 addr = PAGE_ALIGN(addr); 147 vma = find_vma(mm, addr); 148 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 149 (!vma || addr + len <= vma->vm_start)) 150 return addr; 151 } 152 153 do_color_align = 0; 154 if (filp || (flags & MAP_SHARED)) 155 do_color_align = !is_32bit_task(); 156 157 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 158 info.length = len; 159 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 160 info.high_limit = mm->mmap_base; 161 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; 162 info.align_offset = pgoff << PAGE_SHIFT; 163 addr = vm_unmapped_area(&info); 164 165 /* 166 * A failed mmap() very likely causes application failure, 167 * so fall back to the bottom-up function here. This scenario 168 * can happen with large stack limits and large mmap() 169 * allocations. 170 */ 171 if (addr & ~PAGE_MASK) { 172 VM_BUG_ON(addr != -ENOMEM); 173 info.flags = 0; 174 info.low_limit = TASK_UNMAPPED_BASE; 175 info.high_limit = TASK_SIZE; 176 addr = vm_unmapped_area(&info); 177 } 178 179 return addr; 180 } 181 182 unsigned long randomize_et_dyn(void) 183 { 184 unsigned long base; 185 186 base = STACK_TOP / 3 * 2; 187 if (!is_32bit_task()) 188 /* Align to 4GB */ 189 base &= ~((1UL << 32) - 1); 190 return base + mmap_rnd(); 191 } 192 193 #ifndef CONFIG_64BIT 194 195 /* 196 * This function, called very early during the creation of a new 197 * process VM image, sets up which VM layout function to use: 198 */ 199 void arch_pick_mmap_layout(struct mm_struct *mm) 200 { 201 /* 202 * Fall back to the standard layout if the personality 203 * bit is set, or if the expected stack growth is unlimited: 204 */ 205 if (mmap_is_legacy()) { 206 mm->mmap_base = mmap_base_legacy(); 207 mm->get_unmapped_area = arch_get_unmapped_area; 208 } else { 209 mm->mmap_base = mmap_base(); 210 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 211 } 212 } 213 214 #else 215 216 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) 217 { 218 if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) 219 return 0; 220 if (!(flags & MAP_FIXED)) 221 addr = 0; 222 if ((addr + len) >= TASK_SIZE) 223 return crst_table_upgrade(current->mm, 1UL << 53); 224 return 0; 225 } 226 227 static unsigned long 228 s390_get_unmapped_area(struct file *filp, unsigned long addr, 229 unsigned long len, unsigned long pgoff, unsigned long flags) 230 { 231 struct mm_struct *mm = current->mm; 232 unsigned long area; 233 int rc; 234 235 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 236 if (!(area & ~PAGE_MASK)) 237 return area; 238 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 239 /* Upgrade the page table to 4 levels and retry. */ 240 rc = crst_table_upgrade(mm, 1UL << 53); 241 if (rc) 242 return (unsigned long) rc; 243 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 244 } 245 return area; 246 } 247 248 static unsigned long 249 s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, 250 const unsigned long len, const unsigned long pgoff, 251 const unsigned long flags) 252 { 253 struct mm_struct *mm = current->mm; 254 unsigned long area; 255 int rc; 256 257 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 258 if (!(area & ~PAGE_MASK)) 259 return area; 260 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 261 /* Upgrade the page table to 4 levels and retry. */ 262 rc = crst_table_upgrade(mm, 1UL << 53); 263 if (rc) 264 return (unsigned long) rc; 265 area = arch_get_unmapped_area_topdown(filp, addr, len, 266 pgoff, flags); 267 } 268 return area; 269 } 270 /* 271 * This function, called very early during the creation of a new 272 * process VM image, sets up which VM layout function to use: 273 */ 274 void arch_pick_mmap_layout(struct mm_struct *mm) 275 { 276 /* 277 * Fall back to the standard layout if the personality 278 * bit is set, or if the expected stack growth is unlimited: 279 */ 280 if (mmap_is_legacy()) { 281 mm->mmap_base = mmap_base_legacy(); 282 mm->get_unmapped_area = s390_get_unmapped_area; 283 } else { 284 mm->mmap_base = mmap_base(); 285 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 286 } 287 } 288 289 static int __init setup_mmap_rnd(void) 290 { 291 struct cpuid cpu_id; 292 293 get_cpu_id(&cpu_id); 294 switch (cpu_id.machine) { 295 case 0x9672: 296 case 0x2064: 297 case 0x2066: 298 case 0x2084: 299 case 0x2086: 300 case 0x2094: 301 case 0x2096: 302 case 0x2097: 303 case 0x2098: 304 case 0x2817: 305 case 0x2818: 306 case 0x2827: 307 case 0x2828: 308 mmap_rnd_mask = 0x7ffUL; 309 mmap_align_mask = 0UL; 310 break; 311 case 0x2964: /* z13 */ 312 default: 313 mmap_rnd_mask = 0x3ff80UL; 314 mmap_align_mask = 0x7fUL; 315 break; 316 } 317 return 0; 318 } 319 early_initcall(setup_mmap_rnd); 320 321 #endif 322