xref: /openbmc/linux/arch/x86/kernel/sys_x86_64.c (revision 0ac676fb)
1250c2277SThomas Gleixner #include <linux/errno.h>
2250c2277SThomas Gleixner #include <linux/sched.h>
3250c2277SThomas Gleixner #include <linux/syscalls.h>
4250c2277SThomas Gleixner #include <linux/mm.h>
5250c2277SThomas Gleixner #include <linux/fs.h>
6250c2277SThomas Gleixner #include <linux/smp.h>
7250c2277SThomas Gleixner #include <linux/sem.h>
8250c2277SThomas Gleixner #include <linux/msg.h>
9250c2277SThomas Gleixner #include <linux/shm.h>
10250c2277SThomas Gleixner #include <linux/stat.h>
11250c2277SThomas Gleixner #include <linux/mman.h>
12250c2277SThomas Gleixner #include <linux/file.h>
13250c2277SThomas Gleixner #include <linux/utsname.h>
14250c2277SThomas Gleixner #include <linux/personality.h>
15cc503c1bSJiri Kosina #include <linux/random.h>
16e9c8abb6SGustavo F. Padovan #include <linux/uaccess.h>
17250c2277SThomas Gleixner 
18250c2277SThomas Gleixner #include <asm/ia32.h>
19bbc1f698SJaswinder Singh #include <asm/syscalls.h>
20250c2277SThomas Gleixner 
210ac676fbSJason Baron SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
220ac676fbSJason Baron 		unsigned long, prot, unsigned long, flags,
230ac676fbSJason Baron 		unsigned long, fd, unsigned long, off)
24250c2277SThomas Gleixner {
25250c2277SThomas Gleixner 	long error;
26250c2277SThomas Gleixner 	struct file *file;
27250c2277SThomas Gleixner 
28250c2277SThomas Gleixner 	error = -EINVAL;
29250c2277SThomas Gleixner 	if (off & ~PAGE_MASK)
30250c2277SThomas Gleixner 		goto out;
31250c2277SThomas Gleixner 
32250c2277SThomas Gleixner 	error = -EBADF;
33250c2277SThomas Gleixner 	file = NULL;
34250c2277SThomas Gleixner 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
35250c2277SThomas Gleixner 	if (!(flags & MAP_ANONYMOUS)) {
36250c2277SThomas Gleixner 		file = fget(fd);
37250c2277SThomas Gleixner 		if (!file)
38250c2277SThomas Gleixner 			goto out;
39250c2277SThomas Gleixner 	}
40250c2277SThomas Gleixner 	down_write(&current->mm->mmap_sem);
41250c2277SThomas Gleixner 	error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
42250c2277SThomas Gleixner 	up_write(&current->mm->mmap_sem);
43250c2277SThomas Gleixner 
44250c2277SThomas Gleixner 	if (file)
45250c2277SThomas Gleixner 		fput(file);
46250c2277SThomas Gleixner out:
47250c2277SThomas Gleixner 	return error;
48250c2277SThomas Gleixner }
49250c2277SThomas Gleixner 
50250c2277SThomas Gleixner static void find_start_end(unsigned long flags, unsigned long *begin,
51250c2277SThomas Gleixner 			   unsigned long *end)
52250c2277SThomas Gleixner {
53250c2277SThomas Gleixner 	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
54cc503c1bSJiri Kosina 		unsigned long new_begin;
55250c2277SThomas Gleixner 		/* This is usually used needed to map code in small
56250c2277SThomas Gleixner 		   model, so it needs to be in the first 31bit. Limit
57250c2277SThomas Gleixner 		   it to that.  This means we need to move the
58250c2277SThomas Gleixner 		   unmapped base down for this case. This can give
59250c2277SThomas Gleixner 		   conflicts with the heap, but we assume that glibc
60250c2277SThomas Gleixner 		   malloc knows how to fall back to mmap. Give it 1GB
61250c2277SThomas Gleixner 		   of playground for now. -AK */
62250c2277SThomas Gleixner 		*begin = 0x40000000;
63250c2277SThomas Gleixner 		*end = 0x80000000;
64cc503c1bSJiri Kosina 		if (current->flags & PF_RANDOMIZE) {
65cc503c1bSJiri Kosina 			new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
66cc503c1bSJiri Kosina 			if (new_begin)
67cc503c1bSJiri Kosina 				*begin = new_begin;
68cc503c1bSJiri Kosina 		}
69250c2277SThomas Gleixner 	} else {
70250c2277SThomas Gleixner 		*begin = TASK_UNMAPPED_BASE;
71250c2277SThomas Gleixner 		*end = TASK_SIZE;
72250c2277SThomas Gleixner 	}
73250c2277SThomas Gleixner }
74250c2277SThomas Gleixner 
75250c2277SThomas Gleixner unsigned long
76250c2277SThomas Gleixner arch_get_unmapped_area(struct file *filp, unsigned long addr,
77250c2277SThomas Gleixner 		unsigned long len, unsigned long pgoff, unsigned long flags)
78250c2277SThomas Gleixner {
79250c2277SThomas Gleixner 	struct mm_struct *mm = current->mm;
80250c2277SThomas Gleixner 	struct vm_area_struct *vma;
81250c2277SThomas Gleixner 	unsigned long start_addr;
82250c2277SThomas Gleixner 	unsigned long begin, end;
83250c2277SThomas Gleixner 
84250c2277SThomas Gleixner 	if (flags & MAP_FIXED)
85250c2277SThomas Gleixner 		return addr;
86250c2277SThomas Gleixner 
87250c2277SThomas Gleixner 	find_start_end(flags, &begin, &end);
88250c2277SThomas Gleixner 
89250c2277SThomas Gleixner 	if (len > end)
90250c2277SThomas Gleixner 		return -ENOMEM;
91250c2277SThomas Gleixner 
92250c2277SThomas Gleixner 	if (addr) {
93250c2277SThomas Gleixner 		addr = PAGE_ALIGN(addr);
94250c2277SThomas Gleixner 		vma = find_vma(mm, addr);
95250c2277SThomas Gleixner 		if (end - len >= addr &&
96250c2277SThomas Gleixner 		    (!vma || addr + len <= vma->vm_start))
97250c2277SThomas Gleixner 			return addr;
98250c2277SThomas Gleixner 	}
99250c2277SThomas Gleixner 	if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
100250c2277SThomas Gleixner 	    && len <= mm->cached_hole_size) {
101250c2277SThomas Gleixner 		mm->cached_hole_size = 0;
102250c2277SThomas Gleixner 		mm->free_area_cache = begin;
103250c2277SThomas Gleixner 	}
104250c2277SThomas Gleixner 	addr = mm->free_area_cache;
105250c2277SThomas Gleixner 	if (addr < begin)
106250c2277SThomas Gleixner 		addr = begin;
107250c2277SThomas Gleixner 	start_addr = addr;
108250c2277SThomas Gleixner 
109250c2277SThomas Gleixner full_search:
110250c2277SThomas Gleixner 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
111250c2277SThomas Gleixner 		/* At this point:  (!vma || addr < vma->vm_end). */
112250c2277SThomas Gleixner 		if (end - len < addr) {
113250c2277SThomas Gleixner 			/*
114250c2277SThomas Gleixner 			 * Start a new search - just in case we missed
115250c2277SThomas Gleixner 			 * some holes.
116250c2277SThomas Gleixner 			 */
117250c2277SThomas Gleixner 			if (start_addr != begin) {
118250c2277SThomas Gleixner 				start_addr = addr = begin;
119250c2277SThomas Gleixner 				mm->cached_hole_size = 0;
120250c2277SThomas Gleixner 				goto full_search;
121250c2277SThomas Gleixner 			}
122250c2277SThomas Gleixner 			return -ENOMEM;
123250c2277SThomas Gleixner 		}
124250c2277SThomas Gleixner 		if (!vma || addr + len <= vma->vm_start) {
125250c2277SThomas Gleixner 			/*
126250c2277SThomas Gleixner 			 * Remember the place where we stopped the search:
127250c2277SThomas Gleixner 			 */
128250c2277SThomas Gleixner 			mm->free_area_cache = addr + len;
129250c2277SThomas Gleixner 			return addr;
130250c2277SThomas Gleixner 		}
131250c2277SThomas Gleixner 		if (addr + mm->cached_hole_size < vma->vm_start)
132250c2277SThomas Gleixner 			mm->cached_hole_size = vma->vm_start - addr;
133250c2277SThomas Gleixner 
134250c2277SThomas Gleixner 		addr = vma->vm_end;
135250c2277SThomas Gleixner 	}
136250c2277SThomas Gleixner }
137250c2277SThomas Gleixner 
138cc503c1bSJiri Kosina 
139cc503c1bSJiri Kosina unsigned long
140cc503c1bSJiri Kosina arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
141cc503c1bSJiri Kosina 			  const unsigned long len, const unsigned long pgoff,
142cc503c1bSJiri Kosina 			  const unsigned long flags)
143cc503c1bSJiri Kosina {
144cc503c1bSJiri Kosina 	struct vm_area_struct *vma;
145cc503c1bSJiri Kosina 	struct mm_struct *mm = current->mm;
146cc503c1bSJiri Kosina 	unsigned long addr = addr0;
147cc503c1bSJiri Kosina 
148cc503c1bSJiri Kosina 	/* requested length too big for entire address space */
149cc503c1bSJiri Kosina 	if (len > TASK_SIZE)
150cc503c1bSJiri Kosina 		return -ENOMEM;
151cc503c1bSJiri Kosina 
152cc503c1bSJiri Kosina 	if (flags & MAP_FIXED)
153cc503c1bSJiri Kosina 		return addr;
154cc503c1bSJiri Kosina 
155cc503c1bSJiri Kosina 	/* for MAP_32BIT mappings we force the legact mmap base */
156cc503c1bSJiri Kosina 	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
157cc503c1bSJiri Kosina 		goto bottomup;
158cc503c1bSJiri Kosina 
159cc503c1bSJiri Kosina 	/* requesting a specific address */
160cc503c1bSJiri Kosina 	if (addr) {
161cc503c1bSJiri Kosina 		addr = PAGE_ALIGN(addr);
162cc503c1bSJiri Kosina 		vma = find_vma(mm, addr);
163cc503c1bSJiri Kosina 		if (TASK_SIZE - len >= addr &&
164cc503c1bSJiri Kosina 				(!vma || addr + len <= vma->vm_start))
165cc503c1bSJiri Kosina 			return addr;
166cc503c1bSJiri Kosina 	}
167cc503c1bSJiri Kosina 
168cc503c1bSJiri Kosina 	/* check if free_area_cache is useful for us */
169cc503c1bSJiri Kosina 	if (len <= mm->cached_hole_size) {
170cc503c1bSJiri Kosina 		mm->cached_hole_size = 0;
171cc503c1bSJiri Kosina 		mm->free_area_cache = mm->mmap_base;
172cc503c1bSJiri Kosina 	}
173cc503c1bSJiri Kosina 
174cc503c1bSJiri Kosina 	/* either no address requested or can't fit in requested address hole */
175cc503c1bSJiri Kosina 	addr = mm->free_area_cache;
176cc503c1bSJiri Kosina 
177cc503c1bSJiri Kosina 	/* make sure it can fit in the remaining address space */
178cc503c1bSJiri Kosina 	if (addr > len) {
179cc503c1bSJiri Kosina 		vma = find_vma(mm, addr-len);
180cc503c1bSJiri Kosina 		if (!vma || addr <= vma->vm_start)
181cc503c1bSJiri Kosina 			/* remember the address as a hint for next time */
182e9c8abb6SGustavo F. Padovan 			return mm->free_area_cache = addr-len;
183cc503c1bSJiri Kosina 	}
184cc503c1bSJiri Kosina 
185cc503c1bSJiri Kosina 	if (mm->mmap_base < len)
186cc503c1bSJiri Kosina 		goto bottomup;
187cc503c1bSJiri Kosina 
188cc503c1bSJiri Kosina 	addr = mm->mmap_base-len;
189cc503c1bSJiri Kosina 
190cc503c1bSJiri Kosina 	do {
191cc503c1bSJiri Kosina 		/*
192cc503c1bSJiri Kosina 		 * Lookup failure means no vma is above this address,
193cc503c1bSJiri Kosina 		 * else if new region fits below vma->vm_start,
194cc503c1bSJiri Kosina 		 * return with success:
195cc503c1bSJiri Kosina 		 */
196cc503c1bSJiri Kosina 		vma = find_vma(mm, addr);
197cc503c1bSJiri Kosina 		if (!vma || addr+len <= vma->vm_start)
198cc503c1bSJiri Kosina 			/* remember the address as a hint for next time */
199e9c8abb6SGustavo F. Padovan 			return mm->free_area_cache = addr;
200cc503c1bSJiri Kosina 
201cc503c1bSJiri Kosina 		/* remember the largest hole we saw so far */
202cc503c1bSJiri Kosina 		if (addr + mm->cached_hole_size < vma->vm_start)
203cc503c1bSJiri Kosina 			mm->cached_hole_size = vma->vm_start - addr;
204cc503c1bSJiri Kosina 
205cc503c1bSJiri Kosina 		/* try just below the current vma->vm_start */
206cc503c1bSJiri Kosina 		addr = vma->vm_start-len;
207cc503c1bSJiri Kosina 	} while (len < vma->vm_start);
208cc503c1bSJiri Kosina 
209cc503c1bSJiri Kosina bottomup:
210cc503c1bSJiri Kosina 	/*
211cc503c1bSJiri Kosina 	 * A failed mmap() very likely causes application failure,
212cc503c1bSJiri Kosina 	 * so fall back to the bottom-up function here. This scenario
213cc503c1bSJiri Kosina 	 * can happen with large stack limits and large mmap()
214cc503c1bSJiri Kosina 	 * allocations.
215cc503c1bSJiri Kosina 	 */
216cc503c1bSJiri Kosina 	mm->cached_hole_size = ~0UL;
217cc503c1bSJiri Kosina 	mm->free_area_cache = TASK_UNMAPPED_BASE;
218cc503c1bSJiri Kosina 	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
219cc503c1bSJiri Kosina 	/*
220cc503c1bSJiri Kosina 	 * Restore the topdown base:
221cc503c1bSJiri Kosina 	 */
222cc503c1bSJiri Kosina 	mm->free_area_cache = mm->mmap_base;
223cc503c1bSJiri Kosina 	mm->cached_hole_size = ~0UL;
224cc503c1bSJiri Kosina 
225cc503c1bSJiri Kosina 	return addr;
226cc503c1bSJiri Kosina }
227cc503c1bSJiri Kosina 
228cc503c1bSJiri Kosina 
2290ac676fbSJason Baron SYSCALL_DEFINE1(uname, struct new_utsname __user *, name)
230250c2277SThomas Gleixner {
231250c2277SThomas Gleixner 	int err;
232250c2277SThomas Gleixner 	down_read(&uts_sem);
233250c2277SThomas Gleixner 	err = copy_to_user(name, utsname(), sizeof(*name));
234250c2277SThomas Gleixner 	up_read(&uts_sem);
235250c2277SThomas Gleixner 	if (personality(current->personality) == PER_LINUX32)
236250c2277SThomas Gleixner 		err |= copy_to_user(&name->machine, "i686", 5);
237250c2277SThomas Gleixner 	return err ? -EFAULT : 0;
238250c2277SThomas Gleixner }
239