xref: /openbmc/qemu/bsd-user/bsd-mem.h (revision 52a7ff52)
1 /*
2  *  memory management system call shims and definitions
3  *
4  *  Copyright (c) 2013-15 Stacey D. Son
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 /*
21  * Copyright (c) 1982, 1986, 1993
22  *      The Regents of the University of California.  All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  * 4. Neither the name of the University nor the names of its contributors
33  *    may be used to endorse or promote products derived from this software
34  *    without specific prior written permission.
35  *
36  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
37  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
40  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46  * SUCH DAMAGE.
47  */
48 
49 #ifndef BSD_USER_BSD_MEM_H
50 #define BSD_USER_BSD_MEM_H
51 
52 #include <sys/types.h>
53 #include <sys/ipc.h>
54 #include <sys/mman.h>
55 #include <sys/shm.h>
56 #include <fcntl.h>
57 
58 #include "qemu-bsd.h"
59 #include "exec/page-protection.h"
60 
61 extern struct bsd_shm_regions bsd_shm_regions[];
62 extern abi_ulong target_brk;
63 extern abi_ulong initial_target_brk;
64 
65 /* mmap(2) */
66 static inline abi_long do_bsd_mmap(void *cpu_env, abi_long arg1, abi_long arg2,
67     abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, abi_long arg7,
68     abi_long arg8)
69 {
70     if (regpairs_aligned(cpu_env) != 0) {
71         arg6 = arg7;
72         arg7 = arg8;
73     }
74     return get_errno(target_mmap(arg1, arg2, arg3,
75                                  target_to_host_bitmask(arg4, mmap_flags_tbl),
76                                  arg5, target_arg64(arg6, arg7)));
77 }
78 
79 /* munmap(2) */
80 static inline abi_long do_bsd_munmap(abi_long arg1, abi_long arg2)
81 {
82     return get_errno(target_munmap(arg1, arg2));
83 }
84 
85 /* mprotect(2) */
86 static inline abi_long do_bsd_mprotect(abi_long arg1, abi_long arg2,
87         abi_long arg3)
88 {
89     return get_errno(target_mprotect(arg1, arg2, arg3));
90 }
91 
92 /* msync(2) */
93 static inline abi_long do_bsd_msync(abi_long addr, abi_long len, abi_long flags)
94 {
95     if (!guest_range_valid_untagged(addr, len)) {
96         /* It seems odd, but POSIX wants this to be ENOMEM */
97         return -TARGET_ENOMEM;
98     }
99 
100     return get_errno(msync(g2h_untagged(addr), len, flags));
101 }
102 
103 /* mlock(2) */
104 static inline abi_long do_bsd_mlock(abi_long arg1, abi_long arg2)
105 {
106     if (!guest_range_valid_untagged(arg1, arg2)) {
107         return -TARGET_EINVAL;
108     }
109     return get_errno(mlock(g2h_untagged(arg1), arg2));
110 }
111 
112 /* munlock(2) */
113 static inline abi_long do_bsd_munlock(abi_long arg1, abi_long arg2)
114 {
115     if (!guest_range_valid_untagged(arg1, arg2)) {
116         return -TARGET_EINVAL;
117     }
118     return get_errno(munlock(g2h_untagged(arg1), arg2));
119 }
120 
121 /* mlockall(2) */
122 static inline abi_long do_bsd_mlockall(abi_long arg1)
123 {
124     return get_errno(mlockall(arg1));
125 }
126 
127 /* munlockall(2) */
128 static inline abi_long do_bsd_munlockall(void)
129 {
130     return get_errno(munlockall());
131 }
132 
133 /* madvise(2) */
134 static inline abi_long do_bsd_madvise(abi_long arg1, abi_long arg2,
135         abi_long arg3)
136 {
137     abi_ulong len;
138     int ret = 0;
139     abi_long start = arg1;
140     abi_long len_in = arg2;
141     abi_long advice = arg3;
142 
143     if (start & ~TARGET_PAGE_MASK) {
144         return -TARGET_EINVAL;
145     }
146     if (len_in == 0) {
147         return 0;
148     }
149     len = TARGET_PAGE_ALIGN(len_in);
150     if (len == 0 || !guest_range_valid_untagged(start, len)) {
151         return -TARGET_EINVAL;
152     }
153 
154     /*
155      * Most advice values are hints, so ignoring and returning success is ok.
156      *
157      * However, some advice values such as MADV_DONTNEED, are not hints and
158      * need to be emulated.
159      *
160      * A straight passthrough for those may not be safe because qemu sometimes
161      * turns private file-backed mappings into anonymous mappings.
162      * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
163      * same semantics for the host as for the guest.
164      *
165      * MADV_DONTNEED is passed through, if possible.
166      * If passthrough isn't possible, we nevertheless (wrongly!) return
167      * success, which is broken but some userspace programs fail to work
168      * otherwise. Completely implementing such emulation is quite complicated
169      * though.
170      */
171     mmap_lock();
172     switch (advice) {
173     case MADV_DONTNEED:
174         if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
175             ret = get_errno(madvise(g2h_untagged(start), len, advice));
176             if (ret == 0) {
177                 page_reset_target_data(start, start + len - 1);
178             }
179         }
180     }
181     mmap_unlock();
182 
183     return ret;
184 }
185 
186 /* minherit(2) */
187 static inline abi_long do_bsd_minherit(abi_long addr, abi_long len,
188         abi_long inherit)
189 {
190     return get_errno(minherit(g2h_untagged(addr), len, inherit));
191 }
192 
193 /* mincore(2) */
194 static inline abi_long do_bsd_mincore(abi_ulong target_addr, abi_ulong len,
195         abi_ulong target_vec)
196 {
197     abi_long ret;
198     void *p;
199     abi_ulong vec_len = DIV_ROUND_UP(len, TARGET_PAGE_SIZE);
200 
201     if (!guest_range_valid_untagged(target_addr, len)
202         || !page_check_range(target_addr, len, PAGE_VALID)) {
203         return -TARGET_EFAULT;
204     }
205 
206     p = lock_user(VERIFY_WRITE, target_vec, vec_len, 0);
207     if (p == NULL) {
208         return -TARGET_EFAULT;
209     }
210     ret = get_errno(mincore(g2h_untagged(target_addr), len, p));
211     unlock_user(p, target_vec, vec_len);
212 
213     return ret;
214 }
215 
216 /* do_brk() must return target values and target errnos. */
217 static inline abi_long do_obreak(abi_ulong brk_val)
218 {
219     abi_long mapped_addr;
220     abi_ulong new_brk;
221     abi_ulong old_brk;
222 
223     /* brk pointers are always untagged */
224 
225     /* do not allow to shrink below initial brk value */
226     if (brk_val < initial_target_brk) {
227         return target_brk;
228     }
229 
230     new_brk = TARGET_PAGE_ALIGN(brk_val);
231     old_brk = TARGET_PAGE_ALIGN(target_brk);
232 
233     /* new and old target_brk might be on the same page */
234     if (new_brk == old_brk) {
235         target_brk = brk_val;
236         return target_brk;
237     }
238 
239     /* Release heap if necessary */
240     if (new_brk < old_brk) {
241         target_munmap(new_brk, old_brk - new_brk);
242 
243         target_brk = brk_val;
244         return target_brk;
245     }
246 
247     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
248                               PROT_READ | PROT_WRITE,
249                               MAP_FIXED | MAP_EXCL | MAP_ANON | MAP_PRIVATE,
250                               -1, 0);
251 
252     if (mapped_addr == old_brk) {
253         target_brk = brk_val;
254         return target_brk;
255     }
256 
257     /* For everything else, return the previous break. */
258     return target_brk;
259 }
260 
261 /* shm_open(2) */
262 static inline abi_long do_bsd_shm_open(abi_ulong arg1, abi_long arg2,
263         abi_long arg3)
264 {
265     int ret;
266     void *p;
267 
268     if (arg1 == (uintptr_t)SHM_ANON) {
269         p = SHM_ANON;
270     } else {
271         p = lock_user_string(arg1);
272         if (p == NULL) {
273             return -TARGET_EFAULT;
274         }
275     }
276     ret = get_errno(shm_open(p, target_to_host_bitmask(arg2, fcntl_flags_tbl),
277                              arg3));
278 
279     if (p != SHM_ANON) {
280         unlock_user(p, arg1, 0);
281     }
282 
283     return ret;
284 }
285 
286 /* shm_unlink(2) */
287 static inline abi_long do_bsd_shm_unlink(abi_ulong arg1)
288 {
289     int ret;
290     void *p;
291 
292     p = lock_user_string(arg1);
293     if (p == NULL) {
294         return -TARGET_EFAULT;
295     }
296     ret = get_errno(shm_unlink(p)); /* XXX path(p)? */
297     unlock_user(p, arg1, 0);
298 
299     return ret;
300 }
301 
302 /* shmget(2) */
303 static inline abi_long do_bsd_shmget(abi_long arg1, abi_ulong arg2,
304         abi_long arg3)
305 {
306     return get_errno(shmget(arg1, arg2, arg3));
307 }
308 
309 /* shmctl(2) */
310 static inline abi_long do_bsd_shmctl(abi_long shmid, abi_long cmd,
311         abi_ulong buff)
312 {
313     struct shmid_ds dsarg;
314     abi_long ret = -TARGET_EINVAL;
315 
316     cmd &= 0xff;
317 
318     switch (cmd) {
319     case IPC_STAT:
320         if (target_to_host_shmid_ds(&dsarg, buff)) {
321             return -TARGET_EFAULT;
322         }
323         ret = get_errno(shmctl(shmid, cmd, &dsarg));
324         if (host_to_target_shmid_ds(buff, &dsarg)) {
325             return -TARGET_EFAULT;
326         }
327         break;
328 
329     case IPC_SET:
330         if (target_to_host_shmid_ds(&dsarg, buff)) {
331             return -TARGET_EFAULT;
332         }
333         ret = get_errno(shmctl(shmid, cmd, &dsarg));
334         break;
335 
336     case IPC_RMID:
337         ret = get_errno(shmctl(shmid, cmd, NULL));
338         break;
339 
340     default:
341         ret = -TARGET_EINVAL;
342         break;
343     }
344 
345     return ret;
346 }
347 
348 /* shmat(2) */
349 static inline abi_long do_bsd_shmat(int shmid, abi_ulong shmaddr, int shmflg)
350 {
351     abi_ulong raddr;
352     abi_long ret;
353     struct shmid_ds shm_info;
354 
355     /* Find out the length of the shared memory segment. */
356     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
357     if (is_error(ret)) {
358         /* Can't get the length */
359         return ret;
360     }
361 
362     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
363         return -TARGET_EINVAL;
364     }
365 
366     WITH_MMAP_LOCK_GUARD() {
367         void *host_raddr;
368 
369         if (shmaddr) {
370             host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
371         } else {
372             abi_ulong mmap_start;
373 
374             mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
375 
376             if (mmap_start == -1) {
377                 return -TARGET_ENOMEM;
378             }
379             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
380                                shmflg | SHM_REMAP);
381         }
382 
383         if (host_raddr == (void *)-1) {
384             return get_errno(-1);
385         }
386         raddr = h2g(host_raddr);
387 
388         page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
389                        PAGE_VALID | PAGE_RESET | PAGE_READ |
390                        (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
391 
392         for (int i = 0; i < N_BSD_SHM_REGIONS; i++) {
393             if (bsd_shm_regions[i].start == 0) {
394                 bsd_shm_regions[i].start = raddr;
395                 bsd_shm_regions[i].size = shm_info.shm_segsz;
396                 break;
397             }
398         }
399     }
400 
401     return raddr;
402 }
403 
404 /* shmdt(2) */
405 static inline abi_long do_bsd_shmdt(abi_ulong shmaddr)
406 {
407     abi_long ret;
408 
409     WITH_MMAP_LOCK_GUARD() {
410         int i;
411 
412         for (i = 0; i < N_BSD_SHM_REGIONS; ++i) {
413             if (bsd_shm_regions[i].start == shmaddr) {
414                 break;
415             }
416         }
417 
418         if (i == N_BSD_SHM_REGIONS) {
419             return -TARGET_EINVAL;
420         }
421 
422         ret = get_errno(shmdt(g2h_untagged(shmaddr)));
423         if (ret == 0) {
424             abi_ulong size = bsd_shm_regions[i].size;
425 
426             bsd_shm_regions[i].start = 0;
427             page_set_flags(shmaddr, shmaddr + size - 1, 0);
428             mmap_reserve(shmaddr, size);
429         }
430     }
431 
432     return ret;
433 }
434 
435 static inline abi_long do_bsd_vadvise(void)
436 {
437     /* See sys_ovadvise() in vm_unix.c */
438     return -TARGET_EINVAL;
439 }
440 
441 static inline abi_long do_bsd_sbrk(void)
442 {
443     /* see sys_sbrk() in vm_mmap.c */
444     return -TARGET_EOPNOTSUPP;
445 }
446 
447 static inline abi_long do_bsd_sstk(void)
448 {
449     /* see sys_sstk() in vm_mmap.c */
450     return -TARGET_EOPNOTSUPP;
451 }
452 
453 #endif /* BSD_USER_BSD_MEM_H */
454