xref: /openbmc/linux/arch/s390/mm/mmap.c (revision 160b8e75)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  flexible mmap layout support
4  *
5  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6  * All Rights Reserved.
7  *
8  * Started by Ingo Molnar <mingo@elte.hu>
9  */
10 
11 #include <linux/elf-randomize.h>
12 #include <linux/personality.h>
13 #include <linux/mm.h>
14 #include <linux/mman.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/mm.h>
17 #include <linux/random.h>
18 #include <linux/compat.h>
19 #include <linux/security.h>
20 #include <asm/pgalloc.h>
21 #include <asm/elf.h>
22 
23 static unsigned long stack_maxrandom_size(void)
24 {
25 	if (!(current->flags & PF_RANDOMIZE))
26 		return 0;
27 	if (current->personality & ADDR_NO_RANDOMIZE)
28 		return 0;
29 	return STACK_RND_MASK << PAGE_SHIFT;
30 }
31 
32 /*
33  * Top of mmap area (just below the process stack).
34  *
35  * Leave at least a ~32 MB hole.
36  */
37 #define MIN_GAP (32*1024*1024)
38 #define MAX_GAP (STACK_TOP/6*5)
39 
40 static inline int mmap_is_legacy(void)
41 {
42 	if (current->personality & ADDR_COMPAT_LAYOUT)
43 		return 1;
44 	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
45 		return 1;
46 	return sysctl_legacy_va_layout;
47 }
48 
49 unsigned long arch_mmap_rnd(void)
50 {
51 	return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
52 }
53 
54 static unsigned long mmap_base_legacy(unsigned long rnd)
55 {
56 	return TASK_UNMAPPED_BASE + rnd;
57 }
58 
59 static inline unsigned long mmap_base(unsigned long rnd)
60 {
61 	unsigned long gap = rlimit(RLIMIT_STACK);
62 
63 	if (gap < MIN_GAP)
64 		gap = MIN_GAP;
65 	else if (gap > MAX_GAP)
66 		gap = MAX_GAP;
67 	gap &= PAGE_MASK;
68 	return STACK_TOP - stack_maxrandom_size() - rnd - gap;
69 }
70 
71 unsigned long
72 arch_get_unmapped_area(struct file *filp, unsigned long addr,
73 		unsigned long len, unsigned long pgoff, unsigned long flags)
74 {
75 	struct mm_struct *mm = current->mm;
76 	struct vm_area_struct *vma;
77 	struct vm_unmapped_area_info info;
78 	int rc;
79 
80 	if (len > TASK_SIZE - mmap_min_addr)
81 		return -ENOMEM;
82 
83 	if (flags & MAP_FIXED)
84 		goto check_asce_limit;
85 
86 	if (addr) {
87 		addr = PAGE_ALIGN(addr);
88 		vma = find_vma(mm, addr);
89 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
90 		    (!vma || addr + len <= vm_start_gap(vma)))
91 			goto check_asce_limit;
92 	}
93 
94 	info.flags = 0;
95 	info.length = len;
96 	info.low_limit = mm->mmap_base;
97 	info.high_limit = TASK_SIZE;
98 	if (filp || (flags & MAP_SHARED))
99 		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
100 	else
101 		info.align_mask = 0;
102 	info.align_offset = pgoff << PAGE_SHIFT;
103 	addr = vm_unmapped_area(&info);
104 	if (addr & ~PAGE_MASK)
105 		return addr;
106 
107 check_asce_limit:
108 	if (addr + len > current->mm->context.asce_limit &&
109 	    addr + len <= TASK_SIZE) {
110 		rc = crst_table_upgrade(mm, addr + len);
111 		if (rc)
112 			return (unsigned long) rc;
113 	}
114 
115 	return addr;
116 }
117 
118 unsigned long
119 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
120 			  const unsigned long len, const unsigned long pgoff,
121 			  const unsigned long flags)
122 {
123 	struct vm_area_struct *vma;
124 	struct mm_struct *mm = current->mm;
125 	unsigned long addr = addr0;
126 	struct vm_unmapped_area_info info;
127 	int rc;
128 
129 	/* requested length too big for entire address space */
130 	if (len > TASK_SIZE - mmap_min_addr)
131 		return -ENOMEM;
132 
133 	if (flags & MAP_FIXED)
134 		goto check_asce_limit;
135 
136 	/* requesting a specific address */
137 	if (addr) {
138 		addr = PAGE_ALIGN(addr);
139 		vma = find_vma(mm, addr);
140 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
141 				(!vma || addr + len <= vm_start_gap(vma)))
142 			goto check_asce_limit;
143 	}
144 
145 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
146 	info.length = len;
147 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
148 	info.high_limit = mm->mmap_base;
149 	if (filp || (flags & MAP_SHARED))
150 		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
151 	else
152 		info.align_mask = 0;
153 	info.align_offset = pgoff << PAGE_SHIFT;
154 	addr = vm_unmapped_area(&info);
155 
156 	/*
157 	 * A failed mmap() very likely causes application failure,
158 	 * so fall back to the bottom-up function here. This scenario
159 	 * can happen with large stack limits and large mmap()
160 	 * allocations.
161 	 */
162 	if (addr & ~PAGE_MASK) {
163 		VM_BUG_ON(addr != -ENOMEM);
164 		info.flags = 0;
165 		info.low_limit = TASK_UNMAPPED_BASE;
166 		info.high_limit = TASK_SIZE;
167 		addr = vm_unmapped_area(&info);
168 		if (addr & ~PAGE_MASK)
169 			return addr;
170 	}
171 
172 check_asce_limit:
173 	if (addr + len > current->mm->context.asce_limit &&
174 	    addr + len <= TASK_SIZE) {
175 		rc = crst_table_upgrade(mm, addr + len);
176 		if (rc)
177 			return (unsigned long) rc;
178 	}
179 
180 	return addr;
181 }
182 
183 /*
184  * This function, called very early during the creation of a new
185  * process VM image, sets up which VM layout function to use:
186  */
187 void arch_pick_mmap_layout(struct mm_struct *mm)
188 {
189 	unsigned long random_factor = 0UL;
190 
191 	if (current->flags & PF_RANDOMIZE)
192 		random_factor = arch_mmap_rnd();
193 
194 	/*
195 	 * Fall back to the standard layout if the personality
196 	 * bit is set, or if the expected stack growth is unlimited:
197 	 */
198 	if (mmap_is_legacy()) {
199 		mm->mmap_base = mmap_base_legacy(random_factor);
200 		mm->get_unmapped_area = arch_get_unmapped_area;
201 	} else {
202 		mm->mmap_base = mmap_base(random_factor);
203 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
204 	}
205 }
206