xref: /openbmc/linux/arch/x86/kernel/sys_x86_64.c (revision 5b828263)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/compat.h>
3 #include <linux/errno.h>
4 #include <linux/sched.h>
5 #include <linux/sched/mm.h>
6 #include <linux/syscalls.h>
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/smp.h>
10 #include <linux/sem.h>
11 #include <linux/msg.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/mman.h>
15 #include <linux/file.h>
16 #include <linux/utsname.h>
17 #include <linux/personality.h>
18 #include <linux/random.h>
19 #include <linux/uaccess.h>
20 #include <linux/elf.h>
21 
22 #include <asm/elf.h>
23 #include <asm/ia32.h>
24 
25 /*
26  * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27  */
28 static unsigned long get_align_mask(void)
29 {
30 	/* handle 32- and 64-bit case with a single conditional */
31 	if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
32 		return 0;
33 
34 	if (!(current->flags & PF_RANDOMIZE))
35 		return 0;
36 
37 	return va_align.mask;
38 }
39 
40 /*
41  * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
42  * va_align.bits, [12:upper_bit), are set to a random value instead of
43  * zeroing them. This random value is computed once per boot. This form
44  * of ASLR is known as "per-boot ASLR".
45  *
46  * To achieve this, the random value is added to the info.align_offset
47  * value before calling vm_unmapped_area() or ORed directly to the
48  * address.
49  */
50 static unsigned long get_align_bits(void)
51 {
52 	return va_align.bits & get_align_mask();
53 }
54 
55 unsigned long align_vdso_addr(unsigned long addr)
56 {
57 	unsigned long align_mask = get_align_mask();
58 	addr = (addr + align_mask) & ~align_mask;
59 	return addr | get_align_bits();
60 }
61 
62 static int __init control_va_addr_alignment(char *str)
63 {
64 	/* guard against enabling this on other CPU families */
65 	if (va_align.flags < 0)
66 		return 1;
67 
68 	if (*str == 0)
69 		return 1;
70 
71 	if (*str == '=')
72 		str++;
73 
74 	if (!strcmp(str, "32"))
75 		va_align.flags = ALIGN_VA_32;
76 	else if (!strcmp(str, "64"))
77 		va_align.flags = ALIGN_VA_64;
78 	else if (!strcmp(str, "off"))
79 		va_align.flags = 0;
80 	else if (!strcmp(str, "on"))
81 		va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
82 	else
83 		return 0;
84 
85 	return 1;
86 }
87 __setup("align_va_addr", control_va_addr_alignment);
88 
89 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
90 		unsigned long, prot, unsigned long, flags,
91 		unsigned long, fd, unsigned long, off)
92 {
93 	if (off & ~PAGE_MASK)
94 		return -EINVAL;
95 
96 	return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
97 }
98 
99 static void find_start_end(unsigned long addr, unsigned long flags,
100 		unsigned long *begin, unsigned long *end)
101 {
102 	if (!in_32bit_syscall() && (flags & MAP_32BIT)) {
103 		/* This is usually used needed to map code in small
104 		   model, so it needs to be in the first 31bit. Limit
105 		   it to that.  This means we need to move the
106 		   unmapped base down for this case. This can give
107 		   conflicts with the heap, but we assume that glibc
108 		   malloc knows how to fall back to mmap. Give it 1GB
109 		   of playground for now. -AK */
110 		*begin = 0x40000000;
111 		*end = 0x80000000;
112 		if (current->flags & PF_RANDOMIZE) {
113 			*begin = randomize_page(*begin, 0x02000000);
114 		}
115 		return;
116 	}
117 
118 	*begin	= get_mmap_base(1);
119 	if (in_32bit_syscall())
120 		*end = task_size_32bit();
121 	else
122 		*end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
123 }
124 
125 unsigned long
126 arch_get_unmapped_area(struct file *filp, unsigned long addr,
127 		unsigned long len, unsigned long pgoff, unsigned long flags)
128 {
129 	struct mm_struct *mm = current->mm;
130 	struct vm_area_struct *vma;
131 	struct vm_unmapped_area_info info;
132 	unsigned long begin, end;
133 
134 	if (flags & MAP_FIXED)
135 		return addr;
136 
137 	find_start_end(addr, flags, &begin, &end);
138 
139 	if (len > end)
140 		return -ENOMEM;
141 
142 	if (addr) {
143 		addr = PAGE_ALIGN(addr);
144 		vma = find_vma(mm, addr);
145 		if (end - len >= addr &&
146 		    (!vma || addr + len <= vm_start_gap(vma)))
147 			return addr;
148 	}
149 
150 	info.flags = 0;
151 	info.length = len;
152 	info.low_limit = begin;
153 	info.high_limit = end;
154 	info.align_mask = 0;
155 	info.align_offset = pgoff << PAGE_SHIFT;
156 	if (filp) {
157 		info.align_mask = get_align_mask();
158 		info.align_offset += get_align_bits();
159 	}
160 	return vm_unmapped_area(&info);
161 }
162 
163 unsigned long
164 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
165 			  const unsigned long len, const unsigned long pgoff,
166 			  const unsigned long flags)
167 {
168 	struct vm_area_struct *vma;
169 	struct mm_struct *mm = current->mm;
170 	unsigned long addr = addr0;
171 	struct vm_unmapped_area_info info;
172 
173 	/* requested length too big for entire address space */
174 	if (len > TASK_SIZE)
175 		return -ENOMEM;
176 
177 	/* No address checking. See comment at mmap_address_hint_valid() */
178 	if (flags & MAP_FIXED)
179 		return addr;
180 
181 	/* for MAP_32BIT mappings we force the legacy mmap base */
182 	if (!in_32bit_syscall() && (flags & MAP_32BIT))
183 		goto bottomup;
184 
185 	/* requesting a specific address */
186 	if (addr) {
187 		addr &= PAGE_MASK;
188 		if (!mmap_address_hint_valid(addr, len))
189 			goto get_unmapped_area;
190 
191 		vma = find_vma(mm, addr);
192 		if (!vma || addr + len <= vm_start_gap(vma))
193 			return addr;
194 	}
195 get_unmapped_area:
196 
197 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
198 	info.length = len;
199 	info.low_limit = PAGE_SIZE;
200 	info.high_limit = get_mmap_base(0);
201 
202 	/*
203 	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
204 	 * in the full address space.
205 	 *
206 	 * !in_32bit_syscall() check to avoid high addresses for x32
207 	 * (and make it no op on native i386).
208 	 */
209 	if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
210 		info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
211 
212 	info.align_mask = 0;
213 	info.align_offset = pgoff << PAGE_SHIFT;
214 	if (filp) {
215 		info.align_mask = get_align_mask();
216 		info.align_offset += get_align_bits();
217 	}
218 	addr = vm_unmapped_area(&info);
219 	if (!(addr & ~PAGE_MASK))
220 		return addr;
221 	VM_BUG_ON(addr != -ENOMEM);
222 
223 bottomup:
224 	/*
225 	 * A failed mmap() very likely causes application failure,
226 	 * so fall back to the bottom-up function here. This scenario
227 	 * can happen with large stack limits and large mmap()
228 	 * allocations.
229 	 */
230 	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
231 }
232