xref: /openbmc/linux/arch/x86/kernel/sys_x86_64.c (revision b627b4ed)
1 #include <linux/errno.h>
2 #include <linux/sched.h>
3 #include <linux/syscalls.h>
4 #include <linux/mm.h>
5 #include <linux/fs.h>
6 #include <linux/smp.h>
7 #include <linux/sem.h>
8 #include <linux/msg.h>
9 #include <linux/shm.h>
10 #include <linux/stat.h>
11 #include <linux/mman.h>
12 #include <linux/file.h>
13 #include <linux/utsname.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/uaccess.h>
17 
18 #include <asm/ia32.h>
19 #include <asm/syscalls.h>
20 
21 asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
22 		unsigned long prot, unsigned long flags,
23 		unsigned long fd, unsigned long off)
24 {
25 	long error;
26 	struct file *file;
27 
28 	error = -EINVAL;
29 	if (off & ~PAGE_MASK)
30 		goto out;
31 
32 	error = -EBADF;
33 	file = NULL;
34 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
35 	if (!(flags & MAP_ANONYMOUS)) {
36 		file = fget(fd);
37 		if (!file)
38 			goto out;
39 	}
40 	down_write(&current->mm->mmap_sem);
41 	error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
42 	up_write(&current->mm->mmap_sem);
43 
44 	if (file)
45 		fput(file);
46 out:
47 	return error;
48 }
49 
50 static void find_start_end(unsigned long flags, unsigned long *begin,
51 			   unsigned long *end)
52 {
53 	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
54 		unsigned long new_begin;
55 		/* This is usually used needed to map code in small
56 		   model, so it needs to be in the first 31bit. Limit
57 		   it to that.  This means we need to move the
58 		   unmapped base down for this case. This can give
59 		   conflicts with the heap, but we assume that glibc
60 		   malloc knows how to fall back to mmap. Give it 1GB
61 		   of playground for now. -AK */
62 		*begin = 0x40000000;
63 		*end = 0x80000000;
64 		if (current->flags & PF_RANDOMIZE) {
65 			new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
66 			if (new_begin)
67 				*begin = new_begin;
68 		}
69 	} else {
70 		*begin = TASK_UNMAPPED_BASE;
71 		*end = TASK_SIZE;
72 	}
73 }
74 
75 unsigned long
76 arch_get_unmapped_area(struct file *filp, unsigned long addr,
77 		unsigned long len, unsigned long pgoff, unsigned long flags)
78 {
79 	struct mm_struct *mm = current->mm;
80 	struct vm_area_struct *vma;
81 	unsigned long start_addr;
82 	unsigned long begin, end;
83 
84 	if (flags & MAP_FIXED)
85 		return addr;
86 
87 	find_start_end(flags, &begin, &end);
88 
89 	if (len > end)
90 		return -ENOMEM;
91 
92 	if (addr) {
93 		addr = PAGE_ALIGN(addr);
94 		vma = find_vma(mm, addr);
95 		if (end - len >= addr &&
96 		    (!vma || addr + len <= vma->vm_start))
97 			return addr;
98 	}
99 	if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
100 	    && len <= mm->cached_hole_size) {
101 		mm->cached_hole_size = 0;
102 		mm->free_area_cache = begin;
103 	}
104 	addr = mm->free_area_cache;
105 	if (addr < begin)
106 		addr = begin;
107 	start_addr = addr;
108 
109 full_search:
110 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
111 		/* At this point:  (!vma || addr < vma->vm_end). */
112 		if (end - len < addr) {
113 			/*
114 			 * Start a new search - just in case we missed
115 			 * some holes.
116 			 */
117 			if (start_addr != begin) {
118 				start_addr = addr = begin;
119 				mm->cached_hole_size = 0;
120 				goto full_search;
121 			}
122 			return -ENOMEM;
123 		}
124 		if (!vma || addr + len <= vma->vm_start) {
125 			/*
126 			 * Remember the place where we stopped the search:
127 			 */
128 			mm->free_area_cache = addr + len;
129 			return addr;
130 		}
131 		if (addr + mm->cached_hole_size < vma->vm_start)
132 			mm->cached_hole_size = vma->vm_start - addr;
133 
134 		addr = vma->vm_end;
135 	}
136 }
137 
138 
139 unsigned long
140 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
141 			  const unsigned long len, const unsigned long pgoff,
142 			  const unsigned long flags)
143 {
144 	struct vm_area_struct *vma;
145 	struct mm_struct *mm = current->mm;
146 	unsigned long addr = addr0;
147 
148 	/* requested length too big for entire address space */
149 	if (len > TASK_SIZE)
150 		return -ENOMEM;
151 
152 	if (flags & MAP_FIXED)
153 		return addr;
154 
155 	/* for MAP_32BIT mappings we force the legact mmap base */
156 	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
157 		goto bottomup;
158 
159 	/* requesting a specific address */
160 	if (addr) {
161 		addr = PAGE_ALIGN(addr);
162 		vma = find_vma(mm, addr);
163 		if (TASK_SIZE - len >= addr &&
164 				(!vma || addr + len <= vma->vm_start))
165 			return addr;
166 	}
167 
168 	/* check if free_area_cache is useful for us */
169 	if (len <= mm->cached_hole_size) {
170 		mm->cached_hole_size = 0;
171 		mm->free_area_cache = mm->mmap_base;
172 	}
173 
174 	/* either no address requested or can't fit in requested address hole */
175 	addr = mm->free_area_cache;
176 
177 	/* make sure it can fit in the remaining address space */
178 	if (addr > len) {
179 		vma = find_vma(mm, addr-len);
180 		if (!vma || addr <= vma->vm_start)
181 			/* remember the address as a hint for next time */
182 			return mm->free_area_cache = addr-len;
183 	}
184 
185 	if (mm->mmap_base < len)
186 		goto bottomup;
187 
188 	addr = mm->mmap_base-len;
189 
190 	do {
191 		/*
192 		 * Lookup failure means no vma is above this address,
193 		 * else if new region fits below vma->vm_start,
194 		 * return with success:
195 		 */
196 		vma = find_vma(mm, addr);
197 		if (!vma || addr+len <= vma->vm_start)
198 			/* remember the address as a hint for next time */
199 			return mm->free_area_cache = addr;
200 
201 		/* remember the largest hole we saw so far */
202 		if (addr + mm->cached_hole_size < vma->vm_start)
203 			mm->cached_hole_size = vma->vm_start - addr;
204 
205 		/* try just below the current vma->vm_start */
206 		addr = vma->vm_start-len;
207 	} while (len < vma->vm_start);
208 
209 bottomup:
210 	/*
211 	 * A failed mmap() very likely causes application failure,
212 	 * so fall back to the bottom-up function here. This scenario
213 	 * can happen with large stack limits and large mmap()
214 	 * allocations.
215 	 */
216 	mm->cached_hole_size = ~0UL;
217 	mm->free_area_cache = TASK_UNMAPPED_BASE;
218 	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
219 	/*
220 	 * Restore the topdown base:
221 	 */
222 	mm->free_area_cache = mm->mmap_base;
223 	mm->cached_hole_size = ~0UL;
224 
225 	return addr;
226 }
227 
228 
229 asmlinkage long sys_uname(struct new_utsname __user *name)
230 {
231 	int err;
232 	down_read(&uts_sem);
233 	err = copy_to_user(name, utsname(), sizeof(*name));
234 	up_read(&uts_sem);
235 	if (personality(current->personality) == PER_LINUX32)
236 		err |= copy_to_user(&name->machine, "i686", 5);
237 	return err ? -EFAULT : 0;
238 }
239