xref: /openbmc/linux/arch/x86/kernel/sys_x86_64.c (revision ca481398)
1 #include <linux/errno.h>
2 #include <linux/sched.h>
3 #include <linux/sched/mm.h>
4 #include <linux/syscalls.h>
5 #include <linux/mm.h>
6 #include <linux/fs.h>
7 #include <linux/smp.h>
8 #include <linux/sem.h>
9 #include <linux/msg.h>
10 #include <linux/shm.h>
11 #include <linux/stat.h>
12 #include <linux/mman.h>
13 #include <linux/file.h>
14 #include <linux/utsname.h>
15 #include <linux/personality.h>
16 #include <linux/random.h>
17 #include <linux/uaccess.h>
18 #include <linux/elf.h>
19 
20 #include <asm/elf.h>
21 #include <asm/compat.h>
22 #include <asm/ia32.h>
23 #include <asm/syscalls.h>
24 #include <asm/mpx.h>
25 
26 /*
27  * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
28  */
29 static unsigned long get_align_mask(void)
30 {
31 	/* handle 32- and 64-bit case with a single conditional */
32 	if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
33 		return 0;
34 
35 	if (!(current->flags & PF_RANDOMIZE))
36 		return 0;
37 
38 	return va_align.mask;
39 }
40 
41 /*
42  * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
43  * va_align.bits, [12:upper_bit), are set to a random value instead of
44  * zeroing them. This random value is computed once per boot. This form
45  * of ASLR is known as "per-boot ASLR".
46  *
47  * To achieve this, the random value is added to the info.align_offset
48  * value before calling vm_unmapped_area() or ORed directly to the
49  * address.
50  */
51 static unsigned long get_align_bits(void)
52 {
53 	return va_align.bits & get_align_mask();
54 }
55 
56 unsigned long align_vdso_addr(unsigned long addr)
57 {
58 	unsigned long align_mask = get_align_mask();
59 	addr = (addr + align_mask) & ~align_mask;
60 	return addr | get_align_bits();
61 }
62 
63 static int __init control_va_addr_alignment(char *str)
64 {
65 	/* guard against enabling this on other CPU families */
66 	if (va_align.flags < 0)
67 		return 1;
68 
69 	if (*str == 0)
70 		return 1;
71 
72 	if (*str == '=')
73 		str++;
74 
75 	if (!strcmp(str, "32"))
76 		va_align.flags = ALIGN_VA_32;
77 	else if (!strcmp(str, "64"))
78 		va_align.flags = ALIGN_VA_64;
79 	else if (!strcmp(str, "off"))
80 		va_align.flags = 0;
81 	else if (!strcmp(str, "on"))
82 		va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
83 	else
84 		return 0;
85 
86 	return 1;
87 }
88 __setup("align_va_addr", control_va_addr_alignment);
89 
90 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
91 		unsigned long, prot, unsigned long, flags,
92 		unsigned long, fd, unsigned long, off)
93 {
94 	long error;
95 	error = -EINVAL;
96 	if (off & ~PAGE_MASK)
97 		goto out;
98 
99 	error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
100 out:
101 	return error;
102 }
103 
104 static void find_start_end(unsigned long addr, unsigned long flags,
105 		unsigned long *begin, unsigned long *end)
106 {
107 	if (!in_compat_syscall() && (flags & MAP_32BIT)) {
108 		/* This is usually used needed to map code in small
109 		   model, so it needs to be in the first 31bit. Limit
110 		   it to that.  This means we need to move the
111 		   unmapped base down for this case. This can give
112 		   conflicts with the heap, but we assume that glibc
113 		   malloc knows how to fall back to mmap. Give it 1GB
114 		   of playground for now. -AK */
115 		*begin = 0x40000000;
116 		*end = 0x80000000;
117 		if (current->flags & PF_RANDOMIZE) {
118 			*begin = randomize_page(*begin, 0x02000000);
119 		}
120 		return;
121 	}
122 
123 	*begin	= get_mmap_base(1);
124 	if (in_compat_syscall())
125 		*end = task_size_32bit();
126 	else
127 		*end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
128 }
129 
130 unsigned long
131 arch_get_unmapped_area(struct file *filp, unsigned long addr,
132 		unsigned long len, unsigned long pgoff, unsigned long flags)
133 {
134 	struct mm_struct *mm = current->mm;
135 	struct vm_area_struct *vma;
136 	struct vm_unmapped_area_info info;
137 	unsigned long begin, end;
138 
139 	addr = mpx_unmapped_area_check(addr, len, flags);
140 	if (IS_ERR_VALUE(addr))
141 		return addr;
142 
143 	if (flags & MAP_FIXED)
144 		return addr;
145 
146 	find_start_end(addr, flags, &begin, &end);
147 
148 	if (len > end)
149 		return -ENOMEM;
150 
151 	if (addr) {
152 		addr = PAGE_ALIGN(addr);
153 		vma = find_vma(mm, addr);
154 		if (end - len >= addr &&
155 		    (!vma || addr + len <= vm_start_gap(vma)))
156 			return addr;
157 	}
158 
159 	info.flags = 0;
160 	info.length = len;
161 	info.low_limit = begin;
162 	info.high_limit = end;
163 	info.align_mask = 0;
164 	info.align_offset = pgoff << PAGE_SHIFT;
165 	if (filp) {
166 		info.align_mask = get_align_mask();
167 		info.align_offset += get_align_bits();
168 	}
169 	return vm_unmapped_area(&info);
170 }
171 
172 unsigned long
173 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
174 			  const unsigned long len, const unsigned long pgoff,
175 			  const unsigned long flags)
176 {
177 	struct vm_area_struct *vma;
178 	struct mm_struct *mm = current->mm;
179 	unsigned long addr = addr0;
180 	struct vm_unmapped_area_info info;
181 
182 	addr = mpx_unmapped_area_check(addr, len, flags);
183 	if (IS_ERR_VALUE(addr))
184 		return addr;
185 
186 	/* requested length too big for entire address space */
187 	if (len > TASK_SIZE)
188 		return -ENOMEM;
189 
190 	if (flags & MAP_FIXED)
191 		return addr;
192 
193 	/* for MAP_32BIT mappings we force the legacy mmap base */
194 	if (!in_compat_syscall() && (flags & MAP_32BIT))
195 		goto bottomup;
196 
197 	/* requesting a specific address */
198 	if (addr) {
199 		addr = PAGE_ALIGN(addr);
200 		vma = find_vma(mm, addr);
201 		if (TASK_SIZE - len >= addr &&
202 				(!vma || addr + len <= vm_start_gap(vma)))
203 			return addr;
204 	}
205 
206 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
207 	info.length = len;
208 	info.low_limit = PAGE_SIZE;
209 	info.high_limit = get_mmap_base(0);
210 
211 	/*
212 	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
213 	 * in the full address space.
214 	 *
215 	 * !in_compat_syscall() check to avoid high addresses for x32.
216 	 */
217 	if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
218 		info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
219 
220 	info.align_mask = 0;
221 	info.align_offset = pgoff << PAGE_SHIFT;
222 	if (filp) {
223 		info.align_mask = get_align_mask();
224 		info.align_offset += get_align_bits();
225 	}
226 	addr = vm_unmapped_area(&info);
227 	if (!(addr & ~PAGE_MASK))
228 		return addr;
229 	VM_BUG_ON(addr != -ENOMEM);
230 
231 bottomup:
232 	/*
233 	 * A failed mmap() very likely causes application failure,
234 	 * so fall back to the bottom-up function here. This scenario
235 	 * can happen with large stack limits and large mmap()
236 	 * allocations.
237 	 */
238 	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
239 }
240