xref: /openbmc/linux/arch/x86/kernel/sys_x86_64.c (revision 545e4006)
1 #include <linux/errno.h>
2 #include <linux/sched.h>
3 #include <linux/syscalls.h>
4 #include <linux/mm.h>
5 #include <linux/fs.h>
6 #include <linux/smp.h>
7 #include <linux/sem.h>
8 #include <linux/msg.h>
9 #include <linux/shm.h>
10 #include <linux/stat.h>
11 #include <linux/mman.h>
12 #include <linux/file.h>
13 #include <linux/utsname.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 
17 #include <asm/uaccess.h>
18 #include <asm/ia32.h>
19 
20 asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags,
21 	unsigned long fd, unsigned long off)
22 {
23 	long error;
24 	struct file * file;
25 
26 	error = -EINVAL;
27 	if (off & ~PAGE_MASK)
28 		goto out;
29 
30 	error = -EBADF;
31 	file = NULL;
32 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
33 	if (!(flags & MAP_ANONYMOUS)) {
34 		file = fget(fd);
35 		if (!file)
36 			goto out;
37 	}
38 	down_write(&current->mm->mmap_sem);
39 	error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
40 	up_write(&current->mm->mmap_sem);
41 
42 	if (file)
43 		fput(file);
44 out:
45 	return error;
46 }
47 
48 static void find_start_end(unsigned long flags, unsigned long *begin,
49 			   unsigned long *end)
50 {
51 	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
52 		unsigned long new_begin;
53 		/* This is usually used needed to map code in small
54 		   model, so it needs to be in the first 31bit. Limit
55 		   it to that.  This means we need to move the
56 		   unmapped base down for this case. This can give
57 		   conflicts with the heap, but we assume that glibc
58 		   malloc knows how to fall back to mmap. Give it 1GB
59 		   of playground for now. -AK */
60 		*begin = 0x40000000;
61 		*end = 0x80000000;
62 		if (current->flags & PF_RANDOMIZE) {
63 			new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
64 			if (new_begin)
65 				*begin = new_begin;
66 		}
67 	} else {
68 		*begin = TASK_UNMAPPED_BASE;
69 		*end = TASK_SIZE;
70 	}
71 }
72 
73 unsigned long
74 arch_get_unmapped_area(struct file *filp, unsigned long addr,
75 		unsigned long len, unsigned long pgoff, unsigned long flags)
76 {
77 	struct mm_struct *mm = current->mm;
78 	struct vm_area_struct *vma;
79 	unsigned long start_addr;
80 	unsigned long begin, end;
81 
82 	if (flags & MAP_FIXED)
83 		return addr;
84 
85 	find_start_end(flags, &begin, &end);
86 
87 	if (len > end)
88 		return -ENOMEM;
89 
90 	if (addr) {
91 		addr = PAGE_ALIGN(addr);
92 		vma = find_vma(mm, addr);
93 		if (end - len >= addr &&
94 		    (!vma || addr + len <= vma->vm_start))
95 			return addr;
96 	}
97 	if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
98 	    && len <= mm->cached_hole_size) {
99 	        mm->cached_hole_size = 0;
100 		mm->free_area_cache = begin;
101 	}
102 	addr = mm->free_area_cache;
103 	if (addr < begin)
104 		addr = begin;
105 	start_addr = addr;
106 
107 full_search:
108 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
109 		/* At this point:  (!vma || addr < vma->vm_end). */
110 		if (end - len < addr) {
111 			/*
112 			 * Start a new search - just in case we missed
113 			 * some holes.
114 			 */
115 			if (start_addr != begin) {
116 				start_addr = addr = begin;
117 				mm->cached_hole_size = 0;
118 				goto full_search;
119 			}
120 			return -ENOMEM;
121 		}
122 		if (!vma || addr + len <= vma->vm_start) {
123 			/*
124 			 * Remember the place where we stopped the search:
125 			 */
126 			mm->free_area_cache = addr + len;
127 			return addr;
128 		}
129 		if (addr + mm->cached_hole_size < vma->vm_start)
130 		        mm->cached_hole_size = vma->vm_start - addr;
131 
132 		addr = vma->vm_end;
133 	}
134 }
135 
136 
137 unsigned long
138 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
139 			  const unsigned long len, const unsigned long pgoff,
140 			  const unsigned long flags)
141 {
142 	struct vm_area_struct *vma;
143 	struct mm_struct *mm = current->mm;
144 	unsigned long addr = addr0;
145 
146 	/* requested length too big for entire address space */
147 	if (len > TASK_SIZE)
148 		return -ENOMEM;
149 
150 	if (flags & MAP_FIXED)
151 		return addr;
152 
153 	/* for MAP_32BIT mappings we force the legact mmap base */
154 	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
155 		goto bottomup;
156 
157 	/* requesting a specific address */
158 	if (addr) {
159 		addr = PAGE_ALIGN(addr);
160 		vma = find_vma(mm, addr);
161 		if (TASK_SIZE - len >= addr &&
162 				(!vma || addr + len <= vma->vm_start))
163 			return addr;
164 	}
165 
166 	/* check if free_area_cache is useful for us */
167 	if (len <= mm->cached_hole_size) {
168 		mm->cached_hole_size = 0;
169 		mm->free_area_cache = mm->mmap_base;
170 	}
171 
172 	/* either no address requested or can't fit in requested address hole */
173 	addr = mm->free_area_cache;
174 
175 	/* make sure it can fit in the remaining address space */
176 	if (addr > len) {
177 		vma = find_vma(mm, addr-len);
178 		if (!vma || addr <= vma->vm_start)
179 			/* remember the address as a hint for next time */
180 			return (mm->free_area_cache = addr-len);
181 	}
182 
183 	if (mm->mmap_base < len)
184 		goto bottomup;
185 
186 	addr = mm->mmap_base-len;
187 
188 	do {
189 		/*
190 		 * Lookup failure means no vma is above this address,
191 		 * else if new region fits below vma->vm_start,
192 		 * return with success:
193 		 */
194 		vma = find_vma(mm, addr);
195 		if (!vma || addr+len <= vma->vm_start)
196 			/* remember the address as a hint for next time */
197 			return (mm->free_area_cache = addr);
198 
199 		/* remember the largest hole we saw so far */
200 		if (addr + mm->cached_hole_size < vma->vm_start)
201 			mm->cached_hole_size = vma->vm_start - addr;
202 
203 		/* try just below the current vma->vm_start */
204 		addr = vma->vm_start-len;
205 	} while (len < vma->vm_start);
206 
207 bottomup:
208 	/*
209 	 * A failed mmap() very likely causes application failure,
210 	 * so fall back to the bottom-up function here. This scenario
211 	 * can happen with large stack limits and large mmap()
212 	 * allocations.
213 	 */
214 	mm->cached_hole_size = ~0UL;
215 	mm->free_area_cache = TASK_UNMAPPED_BASE;
216 	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
217 	/*
218 	 * Restore the topdown base:
219 	 */
220 	mm->free_area_cache = mm->mmap_base;
221 	mm->cached_hole_size = ~0UL;
222 
223 	return addr;
224 }
225 
226 
227 asmlinkage long sys_uname(struct new_utsname __user * name)
228 {
229 	int err;
230 	down_read(&uts_sem);
231 	err = copy_to_user(name, utsname(), sizeof (*name));
232 	up_read(&uts_sem);
233 	if (personality(current->personality) == PER_LINUX32)
234 		err |= copy_to_user(&name->machine, "i686", 5);
235 	return err ? -EFAULT : 0;
236 }
237