xref: /openbmc/qemu/bsd-user/mmap.c (revision da34e65c)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 - 2008 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include <sys/mman.h>
21 
22 #include "qemu.h"
23 #include "qemu-common.h"
24 #include "bsd-mman.h"
25 
26 //#define DEBUG_MMAP
27 
28 #if defined(CONFIG_USE_NPTL)
29 pthread_mutex_t mmap_mutex;
30 static int __thread mmap_lock_count;
31 
32 void mmap_lock(void)
33 {
34     if (mmap_lock_count++ == 0) {
35         pthread_mutex_lock(&mmap_mutex);
36     }
37 }
38 
39 void mmap_unlock(void)
40 {
41     if (--mmap_lock_count == 0) {
42         pthread_mutex_unlock(&mmap_mutex);
43     }
44 }
45 
46 /* Grab lock to make sure things are in a consistent state after fork().  */
47 void mmap_fork_start(void)
48 {
49     if (mmap_lock_count)
50         abort();
51     pthread_mutex_lock(&mmap_mutex);
52 }
53 
54 void mmap_fork_end(int child)
55 {
56     if (child)
57         pthread_mutex_init(&mmap_mutex, NULL);
58     else
59         pthread_mutex_unlock(&mmap_mutex);
60 }
61 #else
62 /* We aren't threadsafe to start with, so no need to worry about locking.  */
63 void mmap_lock(void)
64 {
65 }
66 
67 void mmap_unlock(void)
68 {
69 }
70 #endif
71 
72 /* NOTE: all the constants are the HOST ones, but addresses are target. */
73 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
74 {
75     abi_ulong end, host_start, host_end, addr;
76     int prot1, ret;
77 
78 #ifdef DEBUG_MMAP
79     printf("mprotect: start=0x" TARGET_FMT_lx
80            " len=0x" TARGET_FMT_lx " prot=%c%c%c\n", start, len,
81            prot & PROT_READ ? 'r' : '-',
82            prot & PROT_WRITE ? 'w' : '-',
83            prot & PROT_EXEC ? 'x' : '-');
84 #endif
85 
86     if ((start & ~TARGET_PAGE_MASK) != 0)
87         return -EINVAL;
88     len = TARGET_PAGE_ALIGN(len);
89     end = start + len;
90     if (end < start)
91         return -EINVAL;
92     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
93     if (len == 0)
94         return 0;
95 
96     mmap_lock();
97     host_start = start & qemu_host_page_mask;
98     host_end = HOST_PAGE_ALIGN(end);
99     if (start > host_start) {
100         /* handle host page containing start */
101         prot1 = prot;
102         for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
103             prot1 |= page_get_flags(addr);
104         }
105         if (host_end == host_start + qemu_host_page_size) {
106             for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
107                 prot1 |= page_get_flags(addr);
108             }
109             end = host_end;
110         }
111         ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
112         if (ret != 0)
113             goto error;
114         host_start += qemu_host_page_size;
115     }
116     if (end < host_end) {
117         prot1 = prot;
118         for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
119             prot1 |= page_get_flags(addr);
120         }
121         ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
122                        prot1 & PAGE_BITS);
123         if (ret != 0)
124             goto error;
125         host_end -= qemu_host_page_size;
126     }
127 
128     /* handle the pages in the middle */
129     if (host_start < host_end) {
130         ret = mprotect(g2h(host_start), host_end - host_start, prot);
131         if (ret != 0)
132             goto error;
133     }
134     page_set_flags(start, start + len, prot | PAGE_VALID);
135     mmap_unlock();
136     return 0;
137 error:
138     mmap_unlock();
139     return ret;
140 }
141 
142 /* map an incomplete host page */
143 static int mmap_frag(abi_ulong real_start,
144                      abi_ulong start, abi_ulong end,
145                      int prot, int flags, int fd, abi_ulong offset)
146 {
147     abi_ulong real_end, addr;
148     void *host_start;
149     int prot1, prot_new;
150 
151     real_end = real_start + qemu_host_page_size;
152     host_start = g2h(real_start);
153 
154     /* get the protection of the target pages outside the mapping */
155     prot1 = 0;
156     for(addr = real_start; addr < real_end; addr++) {
157         if (addr < start || addr >= end)
158             prot1 |= page_get_flags(addr);
159     }
160 
161     if (prot1 == 0) {
162         /* no page was there, so we allocate one */
163         void *p = mmap(host_start, qemu_host_page_size, prot,
164                        flags | MAP_ANON, -1, 0);
165         if (p == MAP_FAILED)
166             return -1;
167         prot1 = prot;
168     }
169     prot1 &= PAGE_BITS;
170 
171     prot_new = prot | prot1;
172     if (!(flags & MAP_ANON)) {
173         /* msync() won't work here, so we return an error if write is
174            possible while it is a shared mapping */
175         if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
176             (prot & PROT_WRITE))
177             return -1;
178 
179         /* adjust protection to be able to read */
180         if (!(prot1 & PROT_WRITE))
181             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
182 
183         /* read the corresponding file data */
184         pread(fd, g2h(start), end - start, offset);
185 
186         /* put final protection */
187         if (prot_new != (prot1 | PROT_WRITE))
188             mprotect(host_start, qemu_host_page_size, prot_new);
189     } else {
190         /* just update the protection */
191         if (prot_new != prot1) {
192             mprotect(host_start, qemu_host_page_size, prot_new);
193         }
194     }
195     return 0;
196 }
197 
198 #if defined(__CYGWIN__)
199 /* Cygwin doesn't have a whole lot of address space.  */
200 static abi_ulong mmap_next_start = 0x18000000;
201 #else
202 static abi_ulong mmap_next_start = 0x40000000;
203 #endif
204 
205 unsigned long last_brk;
206 
207 /* find a free memory area of size 'size'. The search starts at
208    'start'. If 'start' == 0, then a default start address is used.
209    Return -1 if error.
210 */
211 /* page_init() marks pages used by the host as reserved to be sure not
212    to use them. */
213 static abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
214 {
215     abi_ulong addr, addr1, addr_start;
216     int prot;
217     unsigned long new_brk;
218 
219     new_brk = (unsigned long)sbrk(0);
220     if (last_brk && last_brk < new_brk && last_brk == (target_ulong)last_brk) {
221         /* This is a hack to catch the host allocating memory with brk().
222            If it uses mmap then we loose.
223            FIXME: We really want to avoid the host allocating memory in
224            the first place, and maybe leave some slack to avoid switching
225            to mmap.  */
226         page_set_flags(last_brk & TARGET_PAGE_MASK,
227                        TARGET_PAGE_ALIGN(new_brk),
228                        PAGE_RESERVED);
229     }
230     last_brk = new_brk;
231 
232     size = HOST_PAGE_ALIGN(size);
233     start = start & qemu_host_page_mask;
234     addr = start;
235     if (addr == 0)
236         addr = mmap_next_start;
237     addr_start = addr;
238     for(;;) {
239         prot = 0;
240         for(addr1 = addr; addr1 < (addr + size); addr1 += TARGET_PAGE_SIZE) {
241             prot |= page_get_flags(addr1);
242         }
243         if (prot == 0)
244             break;
245         addr += qemu_host_page_size;
246         /* we found nothing */
247         if (addr == addr_start)
248             return (abi_ulong)-1;
249     }
250     if (start == 0)
251         mmap_next_start = addr + size;
252     return addr;
253 }
254 
255 /* NOTE: all the constants are the HOST ones */
256 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
257                      int flags, int fd, abi_ulong offset)
258 {
259     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
260     unsigned long host_start;
261 
262     mmap_lock();
263 #ifdef DEBUG_MMAP
264     {
265         printf("mmap: start=0x" TARGET_FMT_lx
266                " len=0x" TARGET_FMT_lx " prot=%c%c%c flags=",
267                start, len,
268                prot & PROT_READ ? 'r' : '-',
269                prot & PROT_WRITE ? 'w' : '-',
270                prot & PROT_EXEC ? 'x' : '-');
271         if (flags & MAP_FIXED)
272             printf("MAP_FIXED ");
273         if (flags & MAP_ANON)
274             printf("MAP_ANON ");
275         switch(flags & TARGET_BSD_MAP_FLAGMASK) {
276         case MAP_PRIVATE:
277             printf("MAP_PRIVATE ");
278             break;
279         case MAP_SHARED:
280             printf("MAP_SHARED ");
281             break;
282         default:
283             printf("[MAP_FLAGMASK=0x%x] ", flags & TARGET_BSD_MAP_FLAGMASK);
284             break;
285         }
286         printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset);
287     }
288 #endif
289 
290     if (offset & ~TARGET_PAGE_MASK) {
291         errno = EINVAL;
292         goto fail;
293     }
294 
295     len = TARGET_PAGE_ALIGN(len);
296     if (len == 0)
297         goto the_end;
298     real_start = start & qemu_host_page_mask;
299 
300     if (!(flags & MAP_FIXED)) {
301         abi_ulong mmap_start;
302         void *p;
303         host_offset = offset & qemu_host_page_mask;
304         host_len = len + offset - host_offset;
305         host_len = HOST_PAGE_ALIGN(host_len);
306         mmap_start = mmap_find_vma(real_start, host_len);
307         if (mmap_start == (abi_ulong)-1) {
308             errno = ENOMEM;
309             goto fail;
310         }
311         /* Note: we prefer to control the mapping address. It is
312            especially important if qemu_host_page_size >
313            qemu_real_host_page_size */
314         p = mmap(g2h(mmap_start),
315                  host_len, prot, flags | MAP_FIXED, fd, host_offset);
316         if (p == MAP_FAILED)
317             goto fail;
318         /* update start so that it points to the file position at 'offset' */
319         host_start = (unsigned long)p;
320         if (!(flags & MAP_ANON))
321             host_start += offset - host_offset;
322         start = h2g(host_start);
323     } else {
324         int flg;
325         target_ulong addr;
326 
327         if (start & ~TARGET_PAGE_MASK) {
328             errno = EINVAL;
329             goto fail;
330         }
331         end = start + len;
332         real_end = HOST_PAGE_ALIGN(end);
333 
334         for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
335             flg = page_get_flags(addr);
336             if (flg & PAGE_RESERVED) {
337                 errno = ENXIO;
338                 goto fail;
339             }
340         }
341 
342         /* worst case: we cannot map the file because the offset is not
343            aligned, so we read it */
344         if (!(flags & MAP_ANON) &&
345             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
346             /* msync() won't work here, so we return an error if write is
347                possible while it is a shared mapping */
348             if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
349                 (prot & PROT_WRITE)) {
350                 errno = EINVAL;
351                 goto fail;
352             }
353             retaddr = target_mmap(start, len, prot | PROT_WRITE,
354                                   MAP_FIXED | MAP_PRIVATE | MAP_ANON,
355                                   -1, 0);
356             if (retaddr == -1)
357                 goto fail;
358             pread(fd, g2h(start), len, offset);
359             if (!(prot & PROT_WRITE)) {
360                 ret = target_mprotect(start, len, prot);
361                 if (ret != 0) {
362                     start = ret;
363                     goto the_end;
364                 }
365             }
366             goto the_end;
367         }
368 
369         /* handle the start of the mapping */
370         if (start > real_start) {
371             if (real_end == real_start + qemu_host_page_size) {
372                 /* one single host page */
373                 ret = mmap_frag(real_start, start, end,
374                                 prot, flags, fd, offset);
375                 if (ret == -1)
376                     goto fail;
377                 goto the_end1;
378             }
379             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
380                             prot, flags, fd, offset);
381             if (ret == -1)
382                 goto fail;
383             real_start += qemu_host_page_size;
384         }
385         /* handle the end of the mapping */
386         if (end < real_end) {
387             ret = mmap_frag(real_end - qemu_host_page_size,
388                             real_end - qemu_host_page_size, real_end,
389                             prot, flags, fd,
390                             offset + real_end - qemu_host_page_size - start);
391             if (ret == -1)
392                 goto fail;
393             real_end -= qemu_host_page_size;
394         }
395 
396         /* map the middle (easier) */
397         if (real_start < real_end) {
398             void *p;
399             unsigned long offset1;
400             if (flags & MAP_ANON)
401                 offset1 = 0;
402             else
403                 offset1 = offset + real_start - start;
404             p = mmap(g2h(real_start), real_end - real_start,
405                      prot, flags, fd, offset1);
406             if (p == MAP_FAILED)
407                 goto fail;
408         }
409     }
410  the_end1:
411     page_set_flags(start, start + len, prot | PAGE_VALID);
412  the_end:
413 #ifdef DEBUG_MMAP
414     printf("ret=0x" TARGET_FMT_lx "\n", start);
415     page_dump(stdout);
416     printf("\n");
417 #endif
418     mmap_unlock();
419     return start;
420 fail:
421     mmap_unlock();
422     return -1;
423 }
424 
425 int target_munmap(abi_ulong start, abi_ulong len)
426 {
427     abi_ulong end, real_start, real_end, addr;
428     int prot, ret;
429 
430 #ifdef DEBUG_MMAP
431     printf("munmap: start=0x%lx len=0x%lx\n", start, len);
432 #endif
433     if (start & ~TARGET_PAGE_MASK)
434         return -EINVAL;
435     len = TARGET_PAGE_ALIGN(len);
436     if (len == 0)
437         return -EINVAL;
438     mmap_lock();
439     end = start + len;
440     real_start = start & qemu_host_page_mask;
441     real_end = HOST_PAGE_ALIGN(end);
442 
443     if (start > real_start) {
444         /* handle host page containing start */
445         prot = 0;
446         for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
447             prot |= page_get_flags(addr);
448         }
449         if (real_end == real_start + qemu_host_page_size) {
450             for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
451                 prot |= page_get_flags(addr);
452             }
453             end = real_end;
454         }
455         if (prot != 0)
456             real_start += qemu_host_page_size;
457     }
458     if (end < real_end) {
459         prot = 0;
460         for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
461             prot |= page_get_flags(addr);
462         }
463         if (prot != 0)
464             real_end -= qemu_host_page_size;
465     }
466 
467     ret = 0;
468     /* unmap what we can */
469     if (real_start < real_end) {
470         ret = munmap(g2h(real_start), real_end - real_start);
471     }
472 
473     if (ret == 0)
474         page_set_flags(start, start + len, 0);
475     mmap_unlock();
476     return ret;
477 }
478 
479 int target_msync(abi_ulong start, abi_ulong len, int flags)
480 {
481     abi_ulong end;
482 
483     if (start & ~TARGET_PAGE_MASK)
484         return -EINVAL;
485     len = TARGET_PAGE_ALIGN(len);
486     end = start + len;
487     if (end < start)
488         return -EINVAL;
489     if (end == start)
490         return 0;
491 
492     start &= qemu_host_page_mask;
493     return msync(g2h(start), end - start, flags);
494 }
495