xref: /openbmc/linux/arch/s390/mm/mmap.c (revision 4cff79e9)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  flexible mmap layout support
4  *
5  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6  * All Rights Reserved.
7  *
8  * Started by Ingo Molnar <mingo@elte.hu>
9  */
10 
11 #include <linux/elf-randomize.h>
12 #include <linux/personality.h>
13 #include <linux/mm.h>
14 #include <linux/mman.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/mm.h>
17 #include <linux/random.h>
18 #include <linux/compat.h>
19 #include <linux/security.h>
20 #include <asm/pgalloc.h>
21 #include <asm/elf.h>
22 
23 static unsigned long stack_maxrandom_size(void)
24 {
25 	if (!(current->flags & PF_RANDOMIZE))
26 		return 0;
27 	if (current->personality & ADDR_NO_RANDOMIZE)
28 		return 0;
29 	return STACK_RND_MASK << PAGE_SHIFT;
30 }
31 
32 /*
33  * Top of mmap area (just below the process stack).
34  *
35  * Leave at least a ~32 MB hole.
36  */
37 #define MIN_GAP (32*1024*1024)
38 #define MAX_GAP (STACK_TOP/6*5)
39 
40 static inline int mmap_is_legacy(struct rlimit *rlim_stack)
41 {
42 	if (current->personality & ADDR_COMPAT_LAYOUT)
43 		return 1;
44 	if (rlim_stack->rlim_cur == RLIM_INFINITY)
45 		return 1;
46 	return sysctl_legacy_va_layout;
47 }
48 
49 unsigned long arch_mmap_rnd(void)
50 {
51 	return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
52 }
53 
54 static unsigned long mmap_base_legacy(unsigned long rnd)
55 {
56 	return TASK_UNMAPPED_BASE + rnd;
57 }
58 
59 static inline unsigned long mmap_base(unsigned long rnd,
60 				      struct rlimit *rlim_stack)
61 {
62 	unsigned long gap = rlim_stack->rlim_cur;
63 
64 	if (gap < MIN_GAP)
65 		gap = MIN_GAP;
66 	else if (gap > MAX_GAP)
67 		gap = MAX_GAP;
68 	gap &= PAGE_MASK;
69 	return STACK_TOP - stack_maxrandom_size() - rnd - gap;
70 }
71 
72 unsigned long
73 arch_get_unmapped_area(struct file *filp, unsigned long addr,
74 		unsigned long len, unsigned long pgoff, unsigned long flags)
75 {
76 	struct mm_struct *mm = current->mm;
77 	struct vm_area_struct *vma;
78 	struct vm_unmapped_area_info info;
79 	int rc;
80 
81 	if (len > TASK_SIZE - mmap_min_addr)
82 		return -ENOMEM;
83 
84 	if (flags & MAP_FIXED)
85 		goto check_asce_limit;
86 
87 	if (addr) {
88 		addr = PAGE_ALIGN(addr);
89 		vma = find_vma(mm, addr);
90 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
91 		    (!vma || addr + len <= vm_start_gap(vma)))
92 			goto check_asce_limit;
93 	}
94 
95 	info.flags = 0;
96 	info.length = len;
97 	info.low_limit = mm->mmap_base;
98 	info.high_limit = TASK_SIZE;
99 	if (filp || (flags & MAP_SHARED))
100 		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
101 	else
102 		info.align_mask = 0;
103 	info.align_offset = pgoff << PAGE_SHIFT;
104 	addr = vm_unmapped_area(&info);
105 	if (addr & ~PAGE_MASK)
106 		return addr;
107 
108 check_asce_limit:
109 	if (addr + len > current->mm->context.asce_limit &&
110 	    addr + len <= TASK_SIZE) {
111 		rc = crst_table_upgrade(mm, addr + len);
112 		if (rc)
113 			return (unsigned long) rc;
114 	}
115 
116 	return addr;
117 }
118 
119 unsigned long
120 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
121 			  const unsigned long len, const unsigned long pgoff,
122 			  const unsigned long flags)
123 {
124 	struct vm_area_struct *vma;
125 	struct mm_struct *mm = current->mm;
126 	unsigned long addr = addr0;
127 	struct vm_unmapped_area_info info;
128 	int rc;
129 
130 	/* requested length too big for entire address space */
131 	if (len > TASK_SIZE - mmap_min_addr)
132 		return -ENOMEM;
133 
134 	if (flags & MAP_FIXED)
135 		goto check_asce_limit;
136 
137 	/* requesting a specific address */
138 	if (addr) {
139 		addr = PAGE_ALIGN(addr);
140 		vma = find_vma(mm, addr);
141 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
142 				(!vma || addr + len <= vm_start_gap(vma)))
143 			goto check_asce_limit;
144 	}
145 
146 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
147 	info.length = len;
148 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
149 	info.high_limit = mm->mmap_base;
150 	if (filp || (flags & MAP_SHARED))
151 		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
152 	else
153 		info.align_mask = 0;
154 	info.align_offset = pgoff << PAGE_SHIFT;
155 	addr = vm_unmapped_area(&info);
156 
157 	/*
158 	 * A failed mmap() very likely causes application failure,
159 	 * so fall back to the bottom-up function here. This scenario
160 	 * can happen with large stack limits and large mmap()
161 	 * allocations.
162 	 */
163 	if (addr & ~PAGE_MASK) {
164 		VM_BUG_ON(addr != -ENOMEM);
165 		info.flags = 0;
166 		info.low_limit = TASK_UNMAPPED_BASE;
167 		info.high_limit = TASK_SIZE;
168 		addr = vm_unmapped_area(&info);
169 		if (addr & ~PAGE_MASK)
170 			return addr;
171 	}
172 
173 check_asce_limit:
174 	if (addr + len > current->mm->context.asce_limit &&
175 	    addr + len <= TASK_SIZE) {
176 		rc = crst_table_upgrade(mm, addr + len);
177 		if (rc)
178 			return (unsigned long) rc;
179 	}
180 
181 	return addr;
182 }
183 
184 /*
185  * This function, called very early during the creation of a new
186  * process VM image, sets up which VM layout function to use:
187  */
188 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
189 {
190 	unsigned long random_factor = 0UL;
191 
192 	if (current->flags & PF_RANDOMIZE)
193 		random_factor = arch_mmap_rnd();
194 
195 	/*
196 	 * Fall back to the standard layout if the personality
197 	 * bit is set, or if the expected stack growth is unlimited:
198 	 */
199 	if (mmap_is_legacy(rlim_stack)) {
200 		mm->mmap_base = mmap_base_legacy(random_factor);
201 		mm->get_unmapped_area = arch_get_unmapped_area;
202 	} else {
203 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
204 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
205 	}
206 }
207