1 /* 2 * flexible mmap layout support 3 * 4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 5 * All Rights Reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 * 22 * Started by Ingo Molnar <mingo@elte.hu> 23 */ 24 25 #include <linux/personality.h> 26 #include <linux/mm.h> 27 #include <linux/mman.h> 28 #include <linux/module.h> 29 #include <linux/random.h> 30 #include <linux/compat.h> 31 #include <asm/pgalloc.h> 32 33 static unsigned long stack_maxrandom_size(void) 34 { 35 if (!(current->flags & PF_RANDOMIZE)) 36 return 0; 37 if (current->personality & ADDR_NO_RANDOMIZE) 38 return 0; 39 return STACK_RND_MASK << PAGE_SHIFT; 40 } 41 42 /* 43 * Top of mmap area (just below the process stack). 44 * 45 * Leave at least a ~32 MB hole. 46 */ 47 #define MIN_GAP (32*1024*1024) 48 #define MAX_GAP (STACK_TOP/6*5) 49 50 static inline int mmap_is_legacy(void) 51 { 52 if (current->personality & ADDR_COMPAT_LAYOUT) 53 return 1; 54 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) 55 return 1; 56 return sysctl_legacy_va_layout; 57 } 58 59 static unsigned long mmap_rnd(void) 60 { 61 if (!(current->flags & PF_RANDOMIZE)) 62 return 0; 63 /* 8MB randomization for mmap_base */ 64 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; 65 } 66 67 static unsigned long mmap_base_legacy(void) 68 { 69 return TASK_UNMAPPED_BASE + mmap_rnd(); 70 } 71 72 static inline unsigned long mmap_base(void) 73 { 74 unsigned long gap = rlimit(RLIMIT_STACK); 75 76 if (gap < MIN_GAP) 77 gap = MIN_GAP; 78 else if (gap > MAX_GAP) 79 gap = MAX_GAP; 80 gap &= PAGE_MASK; 81 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; 82 } 83 84 #ifndef CONFIG_64BIT 85 86 /* 87 * This function, called very early during the creation of a new 88 * process VM image, sets up which VM layout function to use: 89 */ 90 void arch_pick_mmap_layout(struct mm_struct *mm) 91 { 92 /* 93 * Fall back to the standard layout if the personality 94 * bit is set, or if the expected stack growth is unlimited: 95 */ 96 if (mmap_is_legacy()) { 97 mm->mmap_base = mmap_base_legacy(); 98 mm->get_unmapped_area = arch_get_unmapped_area; 99 } else { 100 mm->mmap_base = mmap_base(); 101 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 102 } 103 } 104 105 #else 106 107 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) 108 { 109 if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) 110 return 0; 111 if (!(flags & MAP_FIXED)) 112 addr = 0; 113 if ((addr + len) >= TASK_SIZE) 114 return crst_table_upgrade(current->mm, 1UL << 53); 115 return 0; 116 } 117 118 static unsigned long 119 s390_get_unmapped_area(struct file *filp, unsigned long addr, 120 unsigned long len, unsigned long pgoff, unsigned long flags) 121 { 122 struct mm_struct *mm = current->mm; 123 unsigned long area; 124 int rc; 125 126 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 127 if (!(area & ~PAGE_MASK)) 128 return area; 129 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 130 /* Upgrade the page table to 4 levels and retry. */ 131 rc = crst_table_upgrade(mm, 1UL << 53); 132 if (rc) 133 return (unsigned long) rc; 134 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 135 } 136 return area; 137 } 138 139 static unsigned long 140 s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, 141 const unsigned long len, const unsigned long pgoff, 142 const unsigned long flags) 143 { 144 struct mm_struct *mm = current->mm; 145 unsigned long area; 146 int rc; 147 148 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 149 if (!(area & ~PAGE_MASK)) 150 return area; 151 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 152 /* Upgrade the page table to 4 levels and retry. */ 153 rc = crst_table_upgrade(mm, 1UL << 53); 154 if (rc) 155 return (unsigned long) rc; 156 area = arch_get_unmapped_area_topdown(filp, addr, len, 157 pgoff, flags); 158 } 159 return area; 160 } 161 /* 162 * This function, called very early during the creation of a new 163 * process VM image, sets up which VM layout function to use: 164 */ 165 void arch_pick_mmap_layout(struct mm_struct *mm) 166 { 167 /* 168 * Fall back to the standard layout if the personality 169 * bit is set, or if the expected stack growth is unlimited: 170 */ 171 if (mmap_is_legacy()) { 172 mm->mmap_base = mmap_base_legacy(); 173 mm->get_unmapped_area = s390_get_unmapped_area; 174 } else { 175 mm->mmap_base = mmap_base(); 176 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 177 } 178 } 179 180 #endif 181