1 /* 2 * linux/arch/s390/mm/mmap.c 3 * 4 * flexible mmap layout support 5 * 6 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 7 * All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * 23 * 24 * Started by Ingo Molnar <mingo@elte.hu> 25 */ 26 27 #include <linux/personality.h> 28 #include <linux/mm.h> 29 #include <linux/mman.h> 30 #include <linux/module.h> 31 #include <linux/random.h> 32 #include <linux/compat.h> 33 #include <asm/pgalloc.h> 34 35 static unsigned long stack_maxrandom_size(void) 36 { 37 if (!(current->flags & PF_RANDOMIZE)) 38 return 0; 39 if (current->personality & ADDR_NO_RANDOMIZE) 40 return 0; 41 return STACK_RND_MASK << PAGE_SHIFT; 42 } 43 44 /* 45 * Top of mmap area (just below the process stack). 46 * 47 * Leave at least a ~32 MB hole. 48 */ 49 #define MIN_GAP (32*1024*1024) 50 #define MAX_GAP (STACK_TOP/6*5) 51 52 static inline int mmap_is_legacy(void) 53 { 54 if (current->personality & ADDR_COMPAT_LAYOUT) 55 return 1; 56 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) 57 return 1; 58 return sysctl_legacy_va_layout; 59 } 60 61 static unsigned long mmap_rnd(void) 62 { 63 if (!(current->flags & PF_RANDOMIZE)) 64 return 0; 65 /* 8MB randomization for mmap_base */ 66 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; 67 } 68 69 static inline unsigned long mmap_base(void) 70 { 71 unsigned long gap = rlimit(RLIMIT_STACK); 72 73 if (gap < MIN_GAP) 74 gap = MIN_GAP; 75 else if (gap > MAX_GAP) 76 gap = MAX_GAP; 77 gap &= PAGE_MASK; 78 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; 79 } 80 81 #ifndef CONFIG_64BIT 82 83 /* 84 * This function, called very early during the creation of a new 85 * process VM image, sets up which VM layout function to use: 86 */ 87 void arch_pick_mmap_layout(struct mm_struct *mm) 88 { 89 /* 90 * Fall back to the standard layout if the personality 91 * bit is set, or if the expected stack growth is unlimited: 92 */ 93 if (mmap_is_legacy()) { 94 mm->mmap_base = TASK_UNMAPPED_BASE; 95 mm->get_unmapped_area = arch_get_unmapped_area; 96 mm->unmap_area = arch_unmap_area; 97 } else { 98 mm->mmap_base = mmap_base(); 99 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 100 mm->unmap_area = arch_unmap_area_topdown; 101 } 102 } 103 104 #else 105 106 int s390_mmap_check(unsigned long addr, unsigned long len) 107 { 108 if (!is_compat_task() && 109 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) 110 return crst_table_upgrade(current->mm, 1UL << 53); 111 return 0; 112 } 113 114 static unsigned long 115 s390_get_unmapped_area(struct file *filp, unsigned long addr, 116 unsigned long len, unsigned long pgoff, unsigned long flags) 117 { 118 struct mm_struct *mm = current->mm; 119 unsigned long area; 120 int rc; 121 122 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 123 if (!(area & ~PAGE_MASK)) 124 return area; 125 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 126 /* Upgrade the page table to 4 levels and retry. */ 127 rc = crst_table_upgrade(mm, 1UL << 53); 128 if (rc) 129 return (unsigned long) rc; 130 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 131 } 132 return area; 133 } 134 135 static unsigned long 136 s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, 137 const unsigned long len, const unsigned long pgoff, 138 const unsigned long flags) 139 { 140 struct mm_struct *mm = current->mm; 141 unsigned long area; 142 int rc; 143 144 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 145 if (!(area & ~PAGE_MASK)) 146 return area; 147 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 148 /* Upgrade the page table to 4 levels and retry. */ 149 rc = crst_table_upgrade(mm, 1UL << 53); 150 if (rc) 151 return (unsigned long) rc; 152 area = arch_get_unmapped_area_topdown(filp, addr, len, 153 pgoff, flags); 154 } 155 return area; 156 } 157 /* 158 * This function, called very early during the creation of a new 159 * process VM image, sets up which VM layout function to use: 160 */ 161 void arch_pick_mmap_layout(struct mm_struct *mm) 162 { 163 /* 164 * Fall back to the standard layout if the personality 165 * bit is set, or if the expected stack growth is unlimited: 166 */ 167 if (mmap_is_legacy()) { 168 mm->mmap_base = TASK_UNMAPPED_BASE; 169 mm->get_unmapped_area = s390_get_unmapped_area; 170 mm->unmap_area = arch_unmap_area; 171 } else { 172 mm->mmap_base = mmap_base(); 173 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 174 mm->unmap_area = arch_unmap_area_topdown; 175 } 176 } 177 178 #endif 179