xref: /openbmc/qemu/bsd-user/mmap.c (revision 6a3b9bfde0aff84d1bf2901c89a0d7485e1229d0)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 - 2008 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu.h"
22 #include "qemu-common.h"
23 
24 //#define DEBUG_MMAP
25 
26 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
27 static __thread int mmap_lock_count;
28 
29 void mmap_lock(void)
30 {
31     if (mmap_lock_count++ == 0) {
32         pthread_mutex_lock(&mmap_mutex);
33     }
34 }
35 
36 void mmap_unlock(void)
37 {
38     if (--mmap_lock_count == 0) {
39         pthread_mutex_unlock(&mmap_mutex);
40     }
41 }
42 
43 bool have_mmap_lock(void)
44 {
45     return mmap_lock_count > 0 ? true : false;
46 }
47 
48 /* Grab lock to make sure things are in a consistent state after fork().  */
49 void mmap_fork_start(void)
50 {
51     if (mmap_lock_count)
52         abort();
53     pthread_mutex_lock(&mmap_mutex);
54 }
55 
56 void mmap_fork_end(int child)
57 {
58     if (child)
59         pthread_mutex_init(&mmap_mutex, NULL);
60     else
61         pthread_mutex_unlock(&mmap_mutex);
62 }
63 
64 /* NOTE: all the constants are the HOST ones, but addresses are target. */
65 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
66 {
67     abi_ulong end, host_start, host_end, addr;
68     int prot1, ret;
69 
70 #ifdef DEBUG_MMAP
71     printf("mprotect: start=0x" TARGET_ABI_FMT_lx
72            "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
73            prot & PROT_READ ? 'r' : '-',
74            prot & PROT_WRITE ? 'w' : '-',
75            prot & PROT_EXEC ? 'x' : '-');
76 #endif
77 
78     if ((start & ~TARGET_PAGE_MASK) != 0)
79         return -EINVAL;
80     len = TARGET_PAGE_ALIGN(len);
81     end = start + len;
82     if (end < start)
83         return -EINVAL;
84     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
85     if (len == 0)
86         return 0;
87 
88     mmap_lock();
89     host_start = start & qemu_host_page_mask;
90     host_end = HOST_PAGE_ALIGN(end);
91     if (start > host_start) {
92         /* handle host page containing start */
93         prot1 = prot;
94         for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
95             prot1 |= page_get_flags(addr);
96         }
97         if (host_end == host_start + qemu_host_page_size) {
98             for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
99                 prot1 |= page_get_flags(addr);
100             }
101             end = host_end;
102         }
103         ret = mprotect(g2h_untagged(host_start),
104                        qemu_host_page_size, prot1 & PAGE_BITS);
105         if (ret != 0)
106             goto error;
107         host_start += qemu_host_page_size;
108     }
109     if (end < host_end) {
110         prot1 = prot;
111         for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
112             prot1 |= page_get_flags(addr);
113         }
114         ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
115                        qemu_host_page_size, prot1 & PAGE_BITS);
116         if (ret != 0)
117             goto error;
118         host_end -= qemu_host_page_size;
119     }
120 
121     /* handle the pages in the middle */
122     if (host_start < host_end) {
123         ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
124         if (ret != 0)
125             goto error;
126     }
127     page_set_flags(start, start + len, prot | PAGE_VALID);
128     mmap_unlock();
129     return 0;
130 error:
131     mmap_unlock();
132     return ret;
133 }
134 
135 /* map an incomplete host page */
136 static int mmap_frag(abi_ulong real_start,
137                      abi_ulong start, abi_ulong end,
138                      int prot, int flags, int fd, abi_ulong offset)
139 {
140     abi_ulong real_end, addr;
141     void *host_start;
142     int prot1, prot_new;
143 
144     real_end = real_start + qemu_host_page_size;
145     host_start = g2h_untagged(real_start);
146 
147     /* get the protection of the target pages outside the mapping */
148     prot1 = 0;
149     for (addr = real_start; addr < real_end; addr++) {
150         if (addr < start || addr >= end)
151             prot1 |= page_get_flags(addr);
152     }
153 
154     if (prot1 == 0) {
155         /* no page was there, so we allocate one */
156         void *p = mmap(host_start, qemu_host_page_size, prot,
157                        flags | MAP_ANON, -1, 0);
158         if (p == MAP_FAILED)
159             return -1;
160         prot1 = prot;
161     }
162     prot1 &= PAGE_BITS;
163 
164     prot_new = prot | prot1;
165     if (!(flags & MAP_ANON)) {
166         /* msync() won't work here, so we return an error if write is
167            possible while it is a shared mapping */
168         if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
169             (prot & PROT_WRITE))
170             return -1;
171 
172         /* adjust protection to be able to read */
173         if (!(prot1 & PROT_WRITE))
174             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
175 
176         /* read the corresponding file data */
177         pread(fd, g2h_untagged(start), end - start, offset);
178 
179         /* put final protection */
180         if (prot_new != (prot1 | PROT_WRITE))
181             mprotect(host_start, qemu_host_page_size, prot_new);
182     } else {
183         /* just update the protection */
184         if (prot_new != prot1) {
185             mprotect(host_start, qemu_host_page_size, prot_new);
186         }
187     }
188     return 0;
189 }
190 
191 static abi_ulong mmap_next_start = 0x40000000;
192 
193 unsigned long last_brk;
194 
195 /* find a free memory area of size 'size'. The search starts at
196    'start'. If 'start' == 0, then a default start address is used.
197    Return -1 if error.
198 */
199 /* page_init() marks pages used by the host as reserved to be sure not
200    to use them. */
201 static abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
202 {
203     abi_ulong addr, addr1, addr_start;
204     int prot;
205     unsigned long new_brk;
206 
207     new_brk = (unsigned long)sbrk(0);
208     if (last_brk && last_brk < new_brk && last_brk == (target_ulong)last_brk) {
209         /* This is a hack to catch the host allocating memory with brk().
210            If it uses mmap then we loose.
211            FIXME: We really want to avoid the host allocating memory in
212            the first place, and maybe leave some slack to avoid switching
213            to mmap.  */
214         page_set_flags(last_brk & TARGET_PAGE_MASK,
215                        TARGET_PAGE_ALIGN(new_brk),
216                        PAGE_RESERVED);
217     }
218     last_brk = new_brk;
219 
220     size = HOST_PAGE_ALIGN(size);
221     start = start & qemu_host_page_mask;
222     addr = start;
223     if (addr == 0)
224         addr = mmap_next_start;
225     addr_start = addr;
226     for (;;) {
227         prot = 0;
228         for (addr1 = addr; addr1 < (addr + size); addr1 += TARGET_PAGE_SIZE) {
229             prot |= page_get_flags(addr1);
230         }
231         if (prot == 0)
232             break;
233         addr += qemu_host_page_size;
234         /* we found nothing */
235         if (addr == addr_start)
236             return (abi_ulong)-1;
237     }
238     if (start == 0)
239         mmap_next_start = addr + size;
240     return addr;
241 }
242 
243 /* NOTE: all the constants are the HOST ones */
244 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
245                      int flags, int fd, abi_ulong offset)
246 {
247     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
248     unsigned long host_start;
249 
250     mmap_lock();
251 #ifdef DEBUG_MMAP
252     {
253         printf("mmap: start=0x" TARGET_ABI_FMT_lx
254                " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
255                start, len,
256                prot & PROT_READ ? 'r' : '-',
257                prot & PROT_WRITE ? 'w' : '-',
258                prot & PROT_EXEC ? 'x' : '-');
259         if (flags & MAP_ALIGNMENT_MASK) {
260             printf("MAP_ALIGNED(%u) ", (flags & MAP_ALIGNMENT_MASK)
261                     >> MAP_ALIGNMENT_SHIFT);
262         }
263 #if MAP_GUARD
264         if (flags & MAP_GUARD) {
265             printf("MAP_GUARD ");
266         }
267 #endif
268         if (flags & MAP_FIXED) {
269             printf("MAP_FIXED ");
270         }
271         if (flags & MAP_ANONYMOUS) {
272             printf("MAP_ANON ");
273         }
274 #ifdef MAP_EXCL
275         if (flags & MAP_EXCL) {
276             printf("MAP_EXCL ");
277         }
278 #endif
279         if (flags & MAP_PRIVATE) {
280             printf("MAP_PRIVATE ");
281         }
282         if (flags & MAP_SHARED) {
283             printf("MAP_SHARED ");
284         }
285         if (flags & MAP_NOCORE) {
286             printf("MAP_NOCORE ");
287         }
288 #ifdef MAP_STACK
289         if (flags & MAP_STACK) {
290             printf("MAP_STACK ");
291         }
292 #endif
293         printf("fd=%d offset=0x%llx\n", fd, offset);
294     }
295 #endif
296 
297     if (offset & ~TARGET_PAGE_MASK) {
298         errno = EINVAL;
299         goto fail;
300     }
301 
302     len = TARGET_PAGE_ALIGN(len);
303     if (len == 0)
304         goto the_end;
305     real_start = start & qemu_host_page_mask;
306 
307     if (!(flags & MAP_FIXED)) {
308         abi_ulong mmap_start;
309         void *p;
310         host_offset = offset & qemu_host_page_mask;
311         host_len = len + offset - host_offset;
312         host_len = HOST_PAGE_ALIGN(host_len);
313         mmap_start = mmap_find_vma(real_start, host_len);
314         if (mmap_start == (abi_ulong)-1) {
315             errno = ENOMEM;
316             goto fail;
317         }
318         /* Note: we prefer to control the mapping address. It is
319            especially important if qemu_host_page_size >
320            qemu_real_host_page_size */
321         p = mmap(g2h_untagged(mmap_start),
322                  host_len, prot, flags | MAP_FIXED, fd, host_offset);
323         if (p == MAP_FAILED)
324             goto fail;
325         /* update start so that it points to the file position at 'offset' */
326         host_start = (unsigned long)p;
327         if (!(flags & MAP_ANON))
328             host_start += offset - host_offset;
329         start = h2g(host_start);
330     } else {
331         int flg;
332         target_ulong addr;
333 
334         if (start & ~TARGET_PAGE_MASK) {
335             errno = EINVAL;
336             goto fail;
337         }
338         end = start + len;
339         real_end = HOST_PAGE_ALIGN(end);
340 
341         for (addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
342             flg = page_get_flags(addr);
343             if (flg & PAGE_RESERVED) {
344                 errno = ENXIO;
345                 goto fail;
346             }
347         }
348 
349         /* worst case: we cannot map the file because the offset is not
350            aligned, so we read it */
351         if (!(flags & MAP_ANON) &&
352             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
353             /* msync() won't work here, so we return an error if write is
354                possible while it is a shared mapping */
355             if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
356                 (prot & PROT_WRITE)) {
357                 errno = EINVAL;
358                 goto fail;
359             }
360             retaddr = target_mmap(start, len, prot | PROT_WRITE,
361                                   MAP_FIXED | MAP_PRIVATE | MAP_ANON,
362                                   -1, 0);
363             if (retaddr == -1)
364                 goto fail;
365             pread(fd, g2h_untagged(start), len, offset);
366             if (!(prot & PROT_WRITE)) {
367                 ret = target_mprotect(start, len, prot);
368                 if (ret != 0) {
369                     start = ret;
370                     goto the_end;
371                 }
372             }
373             goto the_end;
374         }
375 
376         /* handle the start of the mapping */
377         if (start > real_start) {
378             if (real_end == real_start + qemu_host_page_size) {
379                 /* one single host page */
380                 ret = mmap_frag(real_start, start, end,
381                                 prot, flags, fd, offset);
382                 if (ret == -1)
383                     goto fail;
384                 goto the_end1;
385             }
386             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
387                             prot, flags, fd, offset);
388             if (ret == -1)
389                 goto fail;
390             real_start += qemu_host_page_size;
391         }
392         /* handle the end of the mapping */
393         if (end < real_end) {
394             ret = mmap_frag(real_end - qemu_host_page_size,
395                             real_end - qemu_host_page_size, real_end,
396                             prot, flags, fd,
397                             offset + real_end - qemu_host_page_size - start);
398             if (ret == -1)
399                 goto fail;
400             real_end -= qemu_host_page_size;
401         }
402 
403         /* map the middle (easier) */
404         if (real_start < real_end) {
405             void *p;
406             unsigned long offset1;
407             if (flags & MAP_ANON)
408                 offset1 = 0;
409             else
410                 offset1 = offset + real_start - start;
411             p = mmap(g2h_untagged(real_start), real_end - real_start,
412                      prot, flags, fd, offset1);
413             if (p == MAP_FAILED)
414                 goto fail;
415         }
416     }
417  the_end1:
418     page_set_flags(start, start + len, prot | PAGE_VALID);
419  the_end:
420 #ifdef DEBUG_MMAP
421     printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
422     page_dump(stdout);
423     printf("\n");
424 #endif
425     mmap_unlock();
426     return start;
427 fail:
428     mmap_unlock();
429     return -1;
430 }
431 
432 int target_munmap(abi_ulong start, abi_ulong len)
433 {
434     abi_ulong end, real_start, real_end, addr;
435     int prot, ret;
436 
437 #ifdef DEBUG_MMAP
438     printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
439            TARGET_ABI_FMT_lx "\n",
440            start, len);
441 #endif
442     if (start & ~TARGET_PAGE_MASK)
443         return -EINVAL;
444     len = TARGET_PAGE_ALIGN(len);
445     if (len == 0)
446         return -EINVAL;
447     mmap_lock();
448     end = start + len;
449     real_start = start & qemu_host_page_mask;
450     real_end = HOST_PAGE_ALIGN(end);
451 
452     if (start > real_start) {
453         /* handle host page containing start */
454         prot = 0;
455         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
456             prot |= page_get_flags(addr);
457         }
458         if (real_end == real_start + qemu_host_page_size) {
459             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
460                 prot |= page_get_flags(addr);
461             }
462             end = real_end;
463         }
464         if (prot != 0)
465             real_start += qemu_host_page_size;
466     }
467     if (end < real_end) {
468         prot = 0;
469         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
470             prot |= page_get_flags(addr);
471         }
472         if (prot != 0)
473             real_end -= qemu_host_page_size;
474     }
475 
476     ret = 0;
477     /* unmap what we can */
478     if (real_start < real_end) {
479         ret = munmap(g2h_untagged(real_start), real_end - real_start);
480     }
481 
482     if (ret == 0)
483         page_set_flags(start, start + len, 0);
484     mmap_unlock();
485     return ret;
486 }
487 
488 int target_msync(abi_ulong start, abi_ulong len, int flags)
489 {
490     abi_ulong end;
491 
492     if (start & ~TARGET_PAGE_MASK)
493         return -EINVAL;
494     len = TARGET_PAGE_ALIGN(len);
495     end = start + len;
496     if (end < start)
497         return -EINVAL;
498     if (end == start)
499         return 0;
500 
501     start &= qemu_host_page_mask;
502     return msync(g2h_untagged(start), end - start, flags);
503 }
504