xref: /openbmc/linux/arch/x86/kernel/sys_x86_64.c (revision 174cd4b1)
1 #include <linux/errno.h>
2 #include <linux/sched.h>
3 #include <linux/sched/mm.h>
4 #include <linux/syscalls.h>
5 #include <linux/mm.h>
6 #include <linux/fs.h>
7 #include <linux/smp.h>
8 #include <linux/sem.h>
9 #include <linux/msg.h>
10 #include <linux/shm.h>
11 #include <linux/stat.h>
12 #include <linux/mman.h>
13 #include <linux/file.h>
14 #include <linux/utsname.h>
15 #include <linux/personality.h>
16 #include <linux/random.h>
17 #include <linux/uaccess.h>
18 #include <linux/elf.h>
19 
20 #include <asm/ia32.h>
21 #include <asm/syscalls.h>
22 
23 /*
24  * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
25  */
26 static unsigned long get_align_mask(void)
27 {
28 	/* handle 32- and 64-bit case with a single conditional */
29 	if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
30 		return 0;
31 
32 	if (!(current->flags & PF_RANDOMIZE))
33 		return 0;
34 
35 	return va_align.mask;
36 }
37 
38 /*
39  * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
40  * va_align.bits, [12:upper_bit), are set to a random value instead of
41  * zeroing them. This random value is computed once per boot. This form
42  * of ASLR is known as "per-boot ASLR".
43  *
44  * To achieve this, the random value is added to the info.align_offset
45  * value before calling vm_unmapped_area() or ORed directly to the
46  * address.
47  */
48 static unsigned long get_align_bits(void)
49 {
50 	return va_align.bits & get_align_mask();
51 }
52 
53 unsigned long align_vdso_addr(unsigned long addr)
54 {
55 	unsigned long align_mask = get_align_mask();
56 	addr = (addr + align_mask) & ~align_mask;
57 	return addr | get_align_bits();
58 }
59 
60 static int __init control_va_addr_alignment(char *str)
61 {
62 	/* guard against enabling this on other CPU families */
63 	if (va_align.flags < 0)
64 		return 1;
65 
66 	if (*str == 0)
67 		return 1;
68 
69 	if (*str == '=')
70 		str++;
71 
72 	if (!strcmp(str, "32"))
73 		va_align.flags = ALIGN_VA_32;
74 	else if (!strcmp(str, "64"))
75 		va_align.flags = ALIGN_VA_64;
76 	else if (!strcmp(str, "off"))
77 		va_align.flags = 0;
78 	else if (!strcmp(str, "on"))
79 		va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
80 	else
81 		return 0;
82 
83 	return 1;
84 }
85 __setup("align_va_addr", control_va_addr_alignment);
86 
87 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
88 		unsigned long, prot, unsigned long, flags,
89 		unsigned long, fd, unsigned long, off)
90 {
91 	long error;
92 	error = -EINVAL;
93 	if (off & ~PAGE_MASK)
94 		goto out;
95 
96 	error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
97 out:
98 	return error;
99 }
100 
101 static void find_start_end(unsigned long flags, unsigned long *begin,
102 			   unsigned long *end)
103 {
104 	if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
105 		/* This is usually used needed to map code in small
106 		   model, so it needs to be in the first 31bit. Limit
107 		   it to that.  This means we need to move the
108 		   unmapped base down for this case. This can give
109 		   conflicts with the heap, but we assume that glibc
110 		   malloc knows how to fall back to mmap. Give it 1GB
111 		   of playground for now. -AK */
112 		*begin = 0x40000000;
113 		*end = 0x80000000;
114 		if (current->flags & PF_RANDOMIZE) {
115 			*begin = randomize_page(*begin, 0x02000000);
116 		}
117 	} else {
118 		*begin = current->mm->mmap_legacy_base;
119 		*end = TASK_SIZE;
120 	}
121 }
122 
123 unsigned long
124 arch_get_unmapped_area(struct file *filp, unsigned long addr,
125 		unsigned long len, unsigned long pgoff, unsigned long flags)
126 {
127 	struct mm_struct *mm = current->mm;
128 	struct vm_area_struct *vma;
129 	struct vm_unmapped_area_info info;
130 	unsigned long begin, end;
131 
132 	if (flags & MAP_FIXED)
133 		return addr;
134 
135 	find_start_end(flags, &begin, &end);
136 
137 	if (len > end)
138 		return -ENOMEM;
139 
140 	if (addr) {
141 		addr = PAGE_ALIGN(addr);
142 		vma = find_vma(mm, addr);
143 		if (end - len >= addr &&
144 		    (!vma || addr + len <= vma->vm_start))
145 			return addr;
146 	}
147 
148 	info.flags = 0;
149 	info.length = len;
150 	info.low_limit = begin;
151 	info.high_limit = end;
152 	info.align_mask = 0;
153 	info.align_offset = pgoff << PAGE_SHIFT;
154 	if (filp) {
155 		info.align_mask = get_align_mask();
156 		info.align_offset += get_align_bits();
157 	}
158 	return vm_unmapped_area(&info);
159 }
160 
161 unsigned long
162 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
163 			  const unsigned long len, const unsigned long pgoff,
164 			  const unsigned long flags)
165 {
166 	struct vm_area_struct *vma;
167 	struct mm_struct *mm = current->mm;
168 	unsigned long addr = addr0;
169 	struct vm_unmapped_area_info info;
170 
171 	/* requested length too big for entire address space */
172 	if (len > TASK_SIZE)
173 		return -ENOMEM;
174 
175 	if (flags & MAP_FIXED)
176 		return addr;
177 
178 	/* for MAP_32BIT mappings we force the legacy mmap base */
179 	if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
180 		goto bottomup;
181 
182 	/* requesting a specific address */
183 	if (addr) {
184 		addr = PAGE_ALIGN(addr);
185 		vma = find_vma(mm, addr);
186 		if (TASK_SIZE - len >= addr &&
187 				(!vma || addr + len <= vma->vm_start))
188 			return addr;
189 	}
190 
191 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
192 	info.length = len;
193 	info.low_limit = PAGE_SIZE;
194 	info.high_limit = mm->mmap_base;
195 	info.align_mask = 0;
196 	info.align_offset = pgoff << PAGE_SHIFT;
197 	if (filp) {
198 		info.align_mask = get_align_mask();
199 		info.align_offset += get_align_bits();
200 	}
201 	addr = vm_unmapped_area(&info);
202 	if (!(addr & ~PAGE_MASK))
203 		return addr;
204 	VM_BUG_ON(addr != -ENOMEM);
205 
206 bottomup:
207 	/*
208 	 * A failed mmap() very likely causes application failure,
209 	 * so fall back to the bottom-up function here. This scenario
210 	 * can happen with large stack limits and large mmap()
211 	 * allocations.
212 	 */
213 	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
214 }
215