1 /* 2 * flexible mmap layout support 3 * 4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 5 * All Rights Reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 * 22 * Started by Ingo Molnar <mingo@elte.hu> 23 */ 24 25 #include <linux/personality.h> 26 #include <linux/mm.h> 27 #include <linux/mman.h> 28 #include <linux/module.h> 29 #include <linux/random.h> 30 #include <linux/compat.h> 31 #include <asm/pgalloc.h> 32 33 static unsigned long stack_maxrandom_size(void) 34 { 35 if (!(current->flags & PF_RANDOMIZE)) 36 return 0; 37 if (current->personality & ADDR_NO_RANDOMIZE) 38 return 0; 39 return STACK_RND_MASK << PAGE_SHIFT; 40 } 41 42 /* 43 * Top of mmap area (just below the process stack). 44 * 45 * Leave at least a ~32 MB hole. 46 */ 47 #define MIN_GAP (32*1024*1024) 48 #define MAX_GAP (STACK_TOP/6*5) 49 50 static inline int mmap_is_legacy(void) 51 { 52 if (current->personality & ADDR_COMPAT_LAYOUT) 53 return 1; 54 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) 55 return 1; 56 return sysctl_legacy_va_layout; 57 } 58 59 static unsigned long mmap_rnd(void) 60 { 61 if (!(current->flags & PF_RANDOMIZE)) 62 return 0; 63 /* 8MB randomization for mmap_base */ 64 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; 65 } 66 67 static inline unsigned long mmap_base(void) 68 { 69 unsigned long gap = rlimit(RLIMIT_STACK); 70 71 if (gap < MIN_GAP) 72 gap = MIN_GAP; 73 else if (gap > MAX_GAP) 74 gap = MAX_GAP; 75 gap &= PAGE_MASK; 76 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; 77 } 78 79 #ifndef CONFIG_64BIT 80 81 /* 82 * This function, called very early during the creation of a new 83 * process VM image, sets up which VM layout function to use: 84 */ 85 void arch_pick_mmap_layout(struct mm_struct *mm) 86 { 87 /* 88 * Fall back to the standard layout if the personality 89 * bit is set, or if the expected stack growth is unlimited: 90 */ 91 if (mmap_is_legacy()) { 92 mm->mmap_base = TASK_UNMAPPED_BASE; 93 mm->get_unmapped_area = arch_get_unmapped_area; 94 mm->unmap_area = arch_unmap_area; 95 } else { 96 mm->mmap_base = mmap_base(); 97 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 98 mm->unmap_area = arch_unmap_area_topdown; 99 } 100 } 101 102 #else 103 104 int s390_mmap_check(unsigned long addr, unsigned long len) 105 { 106 int rc; 107 108 if (!is_compat_task() && 109 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) { 110 rc = crst_table_upgrade(current->mm, 1UL << 53); 111 if (rc) 112 return rc; 113 update_mm(current->mm, current); 114 } 115 return 0; 116 } 117 118 static unsigned long 119 s390_get_unmapped_area(struct file *filp, unsigned long addr, 120 unsigned long len, unsigned long pgoff, unsigned long flags) 121 { 122 struct mm_struct *mm = current->mm; 123 unsigned long area; 124 int rc; 125 126 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 127 if (!(area & ~PAGE_MASK)) 128 return area; 129 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 130 /* Upgrade the page table to 4 levels and retry. */ 131 rc = crst_table_upgrade(mm, 1UL << 53); 132 if (rc) 133 return (unsigned long) rc; 134 update_mm(mm, current); 135 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 136 } 137 return area; 138 } 139 140 static unsigned long 141 s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, 142 const unsigned long len, const unsigned long pgoff, 143 const unsigned long flags) 144 { 145 struct mm_struct *mm = current->mm; 146 unsigned long area; 147 int rc; 148 149 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 150 if (!(area & ~PAGE_MASK)) 151 return area; 152 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 153 /* Upgrade the page table to 4 levels and retry. */ 154 rc = crst_table_upgrade(mm, 1UL << 53); 155 if (rc) 156 return (unsigned long) rc; 157 update_mm(mm, current); 158 area = arch_get_unmapped_area_topdown(filp, addr, len, 159 pgoff, flags); 160 } 161 return area; 162 } 163 /* 164 * This function, called very early during the creation of a new 165 * process VM image, sets up which VM layout function to use: 166 */ 167 void arch_pick_mmap_layout(struct mm_struct *mm) 168 { 169 /* 170 * Fall back to the standard layout if the personality 171 * bit is set, or if the expected stack growth is unlimited: 172 */ 173 if (mmap_is_legacy()) { 174 mm->mmap_base = TASK_UNMAPPED_BASE; 175 mm->get_unmapped_area = s390_get_unmapped_area; 176 mm->unmap_area = arch_unmap_area; 177 } else { 178 mm->mmap_base = mmap_base(); 179 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 180 mm->unmap_area = arch_unmap_area_topdown; 181 } 182 } 183 184 #endif 185