1 /* 2 * linux/arch/s390/mm/mmap.c 3 * 4 * flexible mmap layout support 5 * 6 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 7 * All Rights Reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * 23 * 24 * Started by Ingo Molnar <mingo@elte.hu> 25 */ 26 27 #include <linux/personality.h> 28 #include <linux/mm.h> 29 #include <linux/module.h> 30 #include <asm/pgalloc.h> 31 32 /* 33 * Top of mmap area (just below the process stack). 34 * 35 * Leave an at least ~128 MB hole. 36 */ 37 #define MIN_GAP (128*1024*1024) 38 #define MAX_GAP (STACK_TOP/6*5) 39 40 static inline unsigned long mmap_base(void) 41 { 42 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; 43 44 if (gap < MIN_GAP) 45 gap = MIN_GAP; 46 else if (gap > MAX_GAP) 47 gap = MAX_GAP; 48 49 return STACK_TOP - (gap & PAGE_MASK); 50 } 51 52 static inline int mmap_is_legacy(void) 53 { 54 #ifdef CONFIG_64BIT 55 /* 56 * Force standard allocation for 64 bit programs. 57 */ 58 if (!test_thread_flag(TIF_31BIT)) 59 return 1; 60 #endif 61 return sysctl_legacy_va_layout || 62 (current->personality & ADDR_COMPAT_LAYOUT) || 63 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY; 64 } 65 66 #ifndef CONFIG_64BIT 67 68 /* 69 * This function, called very early during the creation of a new 70 * process VM image, sets up which VM layout function to use: 71 */ 72 void arch_pick_mmap_layout(struct mm_struct *mm) 73 { 74 /* 75 * Fall back to the standard layout if the personality 76 * bit is set, or if the expected stack growth is unlimited: 77 */ 78 if (mmap_is_legacy()) { 79 mm->mmap_base = TASK_UNMAPPED_BASE; 80 mm->get_unmapped_area = arch_get_unmapped_area; 81 mm->unmap_area = arch_unmap_area; 82 } else { 83 mm->mmap_base = mmap_base(); 84 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 85 mm->unmap_area = arch_unmap_area_topdown; 86 } 87 } 88 EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); 89 90 #else 91 92 int s390_mmap_check(unsigned long addr, unsigned long len) 93 { 94 if (!test_thread_flag(TIF_31BIT) && 95 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) 96 return crst_table_upgrade(current->mm, 1UL << 53); 97 return 0; 98 } 99 100 static unsigned long 101 s390_get_unmapped_area(struct file *filp, unsigned long addr, 102 unsigned long len, unsigned long pgoff, unsigned long flags) 103 { 104 struct mm_struct *mm = current->mm; 105 unsigned long area; 106 int rc; 107 108 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 109 if (!(area & ~PAGE_MASK)) 110 return area; 111 if (area == -ENOMEM && 112 !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { 113 /* Upgrade the page table to 4 levels and retry. */ 114 rc = crst_table_upgrade(mm, 1UL << 53); 115 if (rc) 116 return (unsigned long) rc; 117 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 118 } 119 return area; 120 } 121 122 static unsigned long 123 s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, 124 const unsigned long len, const unsigned long pgoff, 125 const unsigned long flags) 126 { 127 struct mm_struct *mm = current->mm; 128 unsigned long area; 129 int rc; 130 131 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 132 if (!(area & ~PAGE_MASK)) 133 return area; 134 if (area == -ENOMEM && 135 !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { 136 /* Upgrade the page table to 4 levels and retry. */ 137 rc = crst_table_upgrade(mm, 1UL << 53); 138 if (rc) 139 return (unsigned long) rc; 140 area = arch_get_unmapped_area_topdown(filp, addr, len, 141 pgoff, flags); 142 } 143 return area; 144 } 145 /* 146 * This function, called very early during the creation of a new 147 * process VM image, sets up which VM layout function to use: 148 */ 149 void arch_pick_mmap_layout(struct mm_struct *mm) 150 { 151 /* 152 * Fall back to the standard layout if the personality 153 * bit is set, or if the expected stack growth is unlimited: 154 */ 155 if (mmap_is_legacy()) { 156 mm->mmap_base = TASK_UNMAPPED_BASE; 157 mm->get_unmapped_area = s390_get_unmapped_area; 158 mm->unmap_area = arch_unmap_area; 159 } else { 160 mm->mmap_base = mmap_base(); 161 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 162 mm->unmap_area = arch_unmap_area_topdown; 163 } 164 } 165 EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); 166 167 #endif 168