xref: /openbmc/qemu/linux-user/mmap.c (revision 6373fc03)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include <sys/shm.h>
21 #include "trace.h"
22 #include "exec/log.h"
23 #include "exec/page-protection.h"
24 #include "qemu.h"
25 #include "user-internals.h"
26 #include "user-mmap.h"
27 #include "target_mman.h"
28 #include "qemu/interval-tree.h"
29 
30 #ifdef TARGET_ARM
31 #include "target/arm/cpu-features.h"
32 #endif
33 
34 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
35 static __thread int mmap_lock_count;
36 
37 void mmap_lock(void)
38 {
39     if (mmap_lock_count++ == 0) {
40         pthread_mutex_lock(&mmap_mutex);
41     }
42 }
43 
44 void mmap_unlock(void)
45 {
46     assert(mmap_lock_count > 0);
47     if (--mmap_lock_count == 0) {
48         pthread_mutex_unlock(&mmap_mutex);
49     }
50 }
51 
52 bool have_mmap_lock(void)
53 {
54     return mmap_lock_count > 0 ? true : false;
55 }
56 
57 /* Grab lock to make sure things are in a consistent state after fork().  */
58 void mmap_fork_start(void)
59 {
60     if (mmap_lock_count)
61         abort();
62     pthread_mutex_lock(&mmap_mutex);
63 }
64 
65 void mmap_fork_end(int child)
66 {
67     if (child) {
68         pthread_mutex_init(&mmap_mutex, NULL);
69     } else {
70         pthread_mutex_unlock(&mmap_mutex);
71     }
72 }
73 
74 /* Protected by mmap_lock. */
75 static IntervalTreeRoot shm_regions;
76 
77 static void shm_region_add(abi_ptr start, abi_ptr last)
78 {
79     IntervalTreeNode *i = g_new0(IntervalTreeNode, 1);
80 
81     i->start = start;
82     i->last = last;
83     interval_tree_insert(i, &shm_regions);
84 }
85 
86 static abi_ptr shm_region_find(abi_ptr start)
87 {
88     IntervalTreeNode *i;
89 
90     for (i = interval_tree_iter_first(&shm_regions, start, start); i;
91          i = interval_tree_iter_next(i, start, start)) {
92         if (i->start == start) {
93             return i->last;
94         }
95     }
96     return 0;
97 }
98 
99 static void shm_region_rm_complete(abi_ptr start, abi_ptr last)
100 {
101     IntervalTreeNode *i, *n;
102 
103     for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) {
104         n = interval_tree_iter_next(i, start, last);
105         if (i->start >= start && i->last <= last) {
106             interval_tree_remove(i, &shm_regions);
107             g_free(i);
108         }
109     }
110 }
111 
112 /*
113  * Validate target prot bitmask.
114  * Return the prot bitmask for the host in *HOST_PROT.
115  * Return 0 if the target prot bitmask is invalid, otherwise
116  * the internal qemu page_flags (which will include PAGE_VALID).
117  */
118 static int validate_prot_to_pageflags(int prot)
119 {
120     int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
121     int page_flags = (prot & PAGE_RWX) | PAGE_VALID;
122 
123 #ifdef TARGET_AARCH64
124     {
125         ARMCPU *cpu = ARM_CPU(thread_cpu);
126 
127         /*
128          * The PROT_BTI bit is only accepted if the cpu supports the feature.
129          * Since this is the unusual case, don't bother checking unless
130          * the bit has been requested.  If set and valid, record the bit
131          * within QEMU's page_flags.
132          */
133         if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
134             valid |= TARGET_PROT_BTI;
135             page_flags |= PAGE_BTI;
136         }
137         /* Similarly for the PROT_MTE bit. */
138         if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
139             valid |= TARGET_PROT_MTE;
140             page_flags |= PAGE_MTE;
141         }
142     }
143 #elif defined(TARGET_HPPA)
144     valid |= PROT_GROWSDOWN | PROT_GROWSUP;
145 #endif
146 
147     return prot & ~valid ? 0 : page_flags;
148 }
149 
150 /*
151  * For the host, we need not pass anything except read/write/exec.
152  * While PROT_SEM is allowed by all hosts, it is also ignored, so
153  * don't bother transforming guest bit to host bit.  Any other
154  * target-specific prot bits will not be understood by the host
155  * and will need to be encoded into page_flags for qemu emulation.
156  *
157  * Pages that are executable by the guest will never be executed
158  * by the host, but the host will need to be able to read them.
159  */
160 static int target_to_host_prot(int prot)
161 {
162     return (prot & (PROT_READ | PROT_WRITE)) |
163            (prot & PROT_EXEC ? PROT_READ : 0);
164 }
165 
166 /* NOTE: all the constants are the HOST ones, but addresses are target. */
167 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
168 {
169     int host_page_size = qemu_real_host_page_size();
170     abi_ulong starts[3];
171     abi_ulong lens[3];
172     int prots[3];
173     abi_ulong host_start, host_last, last;
174     int prot1, ret, page_flags, nranges;
175 
176     trace_target_mprotect(start, len, target_prot);
177 
178     if ((start & ~TARGET_PAGE_MASK) != 0) {
179         return -TARGET_EINVAL;
180     }
181     page_flags = validate_prot_to_pageflags(target_prot);
182     if (!page_flags) {
183         return -TARGET_EINVAL;
184     }
185     if (len == 0) {
186         return 0;
187     }
188     len = TARGET_PAGE_ALIGN(len);
189     if (!guest_range_valid_untagged(start, len)) {
190         return -TARGET_ENOMEM;
191     }
192 
193     last = start + len - 1;
194     host_start = start & -host_page_size;
195     host_last = ROUND_UP(last, host_page_size) - 1;
196     nranges = 0;
197 
198     mmap_lock();
199 
200     if (host_last - host_start < host_page_size) {
201         /* Single host page contains all guest pages: sum the prot. */
202         prot1 = target_prot;
203         for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
204             prot1 |= page_get_flags(a);
205         }
206         for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
207             prot1 |= page_get_flags(a + 1);
208         }
209         starts[nranges] = host_start;
210         lens[nranges] = host_page_size;
211         prots[nranges] = prot1;
212         nranges++;
213     } else {
214         if (host_start < start) {
215             /* Host page contains more than one guest page: sum the prot. */
216             prot1 = target_prot;
217             for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
218                 prot1 |= page_get_flags(a);
219             }
220             /* If the resulting sum differs, create a new range. */
221             if (prot1 != target_prot) {
222                 starts[nranges] = host_start;
223                 lens[nranges] = host_page_size;
224                 prots[nranges] = prot1;
225                 nranges++;
226                 host_start += host_page_size;
227             }
228         }
229 
230         if (last < host_last) {
231             /* Host page contains more than one guest page: sum the prot. */
232             prot1 = target_prot;
233             for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
234                 prot1 |= page_get_flags(a + 1);
235             }
236             /* If the resulting sum differs, create a new range. */
237             if (prot1 != target_prot) {
238                 host_last -= host_page_size;
239                 starts[nranges] = host_last + 1;
240                 lens[nranges] = host_page_size;
241                 prots[nranges] = prot1;
242                 nranges++;
243             }
244         }
245 
246         /* Create a range for the middle, if any remains. */
247         if (host_start < host_last) {
248             starts[nranges] = host_start;
249             lens[nranges] = host_last - host_start + 1;
250             prots[nranges] = target_prot;
251             nranges++;
252         }
253     }
254 
255     for (int i = 0; i < nranges; ++i) {
256         ret = mprotect(g2h_untagged(starts[i]), lens[i],
257                        target_to_host_prot(prots[i]));
258         if (ret != 0) {
259             goto error;
260         }
261     }
262 
263     page_set_flags(start, last, page_flags);
264     ret = 0;
265 
266  error:
267     mmap_unlock();
268     return ret;
269 }
270 
271 /*
272  * Perform munmap on behalf of the target, with host parameters.
273  * If reserved_va, we must replace the memory reservation.
274  */
275 static int do_munmap(void *addr, size_t len)
276 {
277     if (reserved_va) {
278         void *ptr = mmap(addr, len, PROT_NONE,
279                          MAP_FIXED | MAP_ANONYMOUS
280                          | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
281         return ptr == addr ? 0 : -1;
282     }
283     return munmap(addr, len);
284 }
285 
286 /*
287  * Map an incomplete host page.
288  *
289  * Here be dragons.  This case will not work if there is an existing
290  * overlapping host page, which is file mapped, and for which the mapping
291  * is beyond the end of the file.  In that case, we will see SIGBUS when
292  * trying to write a portion of this page.
293  *
294  * FIXME: Work around this with a temporary signal handler and longjmp.
295  */
296 static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
297                       int prot, int flags, int fd, off_t offset)
298 {
299     int host_page_size = qemu_real_host_page_size();
300     abi_ulong real_last;
301     void *host_start;
302     int prot_old, prot_new;
303     int host_prot_old, host_prot_new;
304 
305     if (!(flags & MAP_ANONYMOUS)
306         && (flags & MAP_TYPE) == MAP_SHARED
307         && (prot & PROT_WRITE)) {
308         /*
309          * msync() won't work with the partial page, so we return an
310          * error if write is possible while it is a shared mapping.
311          */
312         errno = EINVAL;
313         return false;
314     }
315 
316     real_last = real_start + host_page_size - 1;
317     host_start = g2h_untagged(real_start);
318 
319     /* Get the protection of the target pages outside the mapping. */
320     prot_old = 0;
321     for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) {
322         prot_old |= page_get_flags(a);
323     }
324     for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) {
325         prot_old |= page_get_flags(a);
326     }
327 
328     if (prot_old == 0) {
329         /*
330          * Since !(prot_old & PAGE_VALID), there were no guest pages
331          * outside of the fragment we need to map.  Allocate a new host
332          * page to cover, discarding whatever else may have been present.
333          */
334         void *p = mmap(host_start, host_page_size,
335                        target_to_host_prot(prot),
336                        flags | MAP_ANONYMOUS, -1, 0);
337         if (p != host_start) {
338             if (p != MAP_FAILED) {
339                 do_munmap(p, host_page_size);
340                 errno = EEXIST;
341             }
342             return false;
343         }
344         prot_old = prot;
345     }
346     prot_new = prot | prot_old;
347 
348     host_prot_old = target_to_host_prot(prot_old);
349     host_prot_new = target_to_host_prot(prot_new);
350 
351     /* Adjust protection to be able to write. */
352     if (!(host_prot_old & PROT_WRITE)) {
353         host_prot_old |= PROT_WRITE;
354         mprotect(host_start, host_page_size, host_prot_old);
355     }
356 
357     /* Read or zero the new guest pages. */
358     if (flags & MAP_ANONYMOUS) {
359         memset(g2h_untagged(start), 0, last - start + 1);
360     } else {
361         if (pread(fd, g2h_untagged(start), last - start + 1, offset) == -1) {
362             return false;
363         }
364     }
365 
366     /* Put final protection */
367     if (host_prot_new != host_prot_old) {
368         mprotect(host_start, host_page_size, host_prot_new);
369     }
370     return true;
371 }
372 
373 abi_ulong task_unmapped_base;
374 abi_ulong elf_et_dyn_base;
375 abi_ulong mmap_next_start;
376 
377 /*
378  * Subroutine of mmap_find_vma, used when we have pre-allocated
379  * a chunk of guest address space.
380  */
381 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
382                                         abi_ulong align)
383 {
384     target_ulong ret;
385 
386     ret = page_find_range_empty(start, reserved_va, size, align);
387     if (ret == -1 && start > mmap_min_addr) {
388         /* Restart at the beginning of the address space. */
389         ret = page_find_range_empty(mmap_min_addr, start - 1, size, align);
390     }
391 
392     return ret;
393 }
394 
395 /*
396  * Find and reserve a free memory area of size 'size'. The search
397  * starts at 'start'.
398  * It must be called with mmap_lock() held.
399  * Return -1 if error.
400  */
401 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
402 {
403     int host_page_size = qemu_real_host_page_size();
404     void *ptr, *prev;
405     abi_ulong addr;
406     int wrapped, repeat;
407 
408     align = MAX(align, host_page_size);
409 
410     /* If 'start' == 0, then a default start address is used. */
411     if (start == 0) {
412         start = mmap_next_start;
413     } else {
414         start &= -host_page_size;
415     }
416     start = ROUND_UP(start, align);
417     size = ROUND_UP(size, host_page_size);
418 
419     if (reserved_va) {
420         return mmap_find_vma_reserved(start, size, align);
421     }
422 
423     addr = start;
424     wrapped = repeat = 0;
425     prev = 0;
426 
427     for (;; prev = ptr) {
428         /*
429          * Reserve needed memory area to avoid a race.
430          * It should be discarded using:
431          *  - mmap() with MAP_FIXED flag
432          *  - mremap() with MREMAP_FIXED flag
433          *  - shmat() with SHM_REMAP flag
434          */
435         ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
436                    MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
437 
438         /* ENOMEM, if host address space has no memory */
439         if (ptr == MAP_FAILED) {
440             return (abi_ulong)-1;
441         }
442 
443         /*
444          * Count the number of sequential returns of the same address.
445          * This is used to modify the search algorithm below.
446          */
447         repeat = (ptr == prev ? repeat + 1 : 0);
448 
449         if (h2g_valid(ptr + size - 1)) {
450             addr = h2g(ptr);
451 
452             if ((addr & (align - 1)) == 0) {
453                 /* Success.  */
454                 if (start == mmap_next_start && addr >= task_unmapped_base) {
455                     mmap_next_start = addr + size;
456                 }
457                 return addr;
458             }
459 
460             /* The address is not properly aligned for the target.  */
461             switch (repeat) {
462             case 0:
463                 /*
464                  * Assume the result that the kernel gave us is the
465                  * first with enough free space, so start again at the
466                  * next higher target page.
467                  */
468                 addr = ROUND_UP(addr, align);
469                 break;
470             case 1:
471                 /*
472                  * Sometimes the kernel decides to perform the allocation
473                  * at the top end of memory instead.
474                  */
475                 addr &= -align;
476                 break;
477             case 2:
478                 /* Start over at low memory.  */
479                 addr = 0;
480                 break;
481             default:
482                 /* Fail.  This unaligned block must the last.  */
483                 addr = -1;
484                 break;
485             }
486         } else {
487             /*
488              * Since the result the kernel gave didn't fit, start
489              * again at low memory.  If any repetition, fail.
490              */
491             addr = (repeat ? -1 : 0);
492         }
493 
494         /* Unmap and try again.  */
495         munmap(ptr, size);
496 
497         /* ENOMEM if we checked the whole of the target address space.  */
498         if (addr == (abi_ulong)-1) {
499             return (abi_ulong)-1;
500         } else if (addr == 0) {
501             if (wrapped) {
502                 return (abi_ulong)-1;
503             }
504             wrapped = 1;
505             /*
506              * Don't actually use 0 when wrapping, instead indicate
507              * that we'd truly like an allocation in low memory.
508              */
509             addr = (mmap_min_addr > TARGET_PAGE_SIZE
510                      ? TARGET_PAGE_ALIGN(mmap_min_addr)
511                      : TARGET_PAGE_SIZE);
512         } else if (wrapped && addr >= start) {
513             return (abi_ulong)-1;
514         }
515     }
516 }
517 
518 /*
519  * Record a successful mmap within the user-exec interval tree.
520  */
521 static abi_long mmap_end(abi_ulong start, abi_ulong last,
522                          abi_ulong passthrough_start,
523                          abi_ulong passthrough_last,
524                          int flags, int page_flags)
525 {
526     if (flags & MAP_ANONYMOUS) {
527         page_flags |= PAGE_ANON;
528     }
529     page_flags |= PAGE_RESET;
530     if (passthrough_start > passthrough_last) {
531         page_set_flags(start, last, page_flags);
532     } else {
533         if (start < passthrough_start) {
534             page_set_flags(start, passthrough_start - 1, page_flags);
535         }
536         page_set_flags(passthrough_start, passthrough_last,
537                        page_flags | PAGE_PASSTHROUGH);
538         if (passthrough_last < last) {
539             page_set_flags(passthrough_last + 1, last, page_flags);
540         }
541     }
542     shm_region_rm_complete(start, last);
543     trace_target_mmap_complete(start);
544     if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
545         FILE *f = qemu_log_trylock();
546         if (f) {
547             fprintf(f, "page layout changed following mmap\n");
548             page_dump(f);
549             qemu_log_unlock(f);
550         }
551     }
552     return start;
553 }
554 
555 /*
556  * Special case host page size == target page size,
557  * where there are no edge conditions.
558  */
559 static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len,
560                             int host_prot, int flags, int page_flags,
561                             int fd, off_t offset)
562 {
563     void *p, *want_p = NULL;
564     abi_ulong last;
565 
566     if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
567         want_p = g2h_untagged(start);
568     }
569 
570     p = mmap(want_p, len, host_prot, flags, fd, offset);
571     if (p == MAP_FAILED) {
572         return -1;
573     }
574     /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */
575     if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) {
576         do_munmap(p, len);
577         errno = EEXIST;
578         return -1;
579     }
580 
581     start = h2g(p);
582     last = start + len - 1;
583     return mmap_end(start, last, start, last, flags, page_flags);
584 }
585 
586 /*
587  * Special case host page size < target page size.
588  *
589  * The two special cases are increased guest alignment, and mapping
590  * past the end of a file.
591  *
592  * When mapping files into a memory area larger than the file,
593  * accesses to pages beyond the file size will cause a SIGBUS.
594  *
595  * For example, if mmaping a file of 100 bytes on a host with 4K
596  * pages emulating a target with 8K pages, the target expects to
597  * be able to access the first 8K. But the host will trap us on
598  * any access beyond 4K.
599  *
600  * When emulating a target with a larger page-size than the hosts,
601  * we may need to truncate file maps at EOF and add extra anonymous
602  * pages up to the targets page boundary.
603  *
604  * This workaround only works for files that do not change.
605  * If the file is later extended (e.g. ftruncate), the SIGBUS
606  * vanishes and the proper behaviour is that changes within the
607  * anon page should be reflected in the file.
608  *
609  * However, this case is rather common with executable images,
610  * so the workaround is important for even trivial tests, whereas
611  * the mmap of of a file being extended is less common.
612  */
613 static abi_long mmap_h_lt_g(abi_ulong start, abi_ulong len, int host_prot,
614                             int mmap_flags, int page_flags, int fd,
615                             off_t offset, int host_page_size)
616 {
617     void *p, *want_p = NULL;
618     off_t fileend_adj = 0;
619     int flags = mmap_flags;
620     abi_ulong last, pass_last;
621 
622     if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
623         want_p = g2h_untagged(start);
624     }
625 
626     if (!(flags & MAP_ANONYMOUS)) {
627         struct stat sb;
628 
629         if (fstat(fd, &sb) == -1) {
630             return -1;
631         }
632         if (offset >= sb.st_size) {
633             /*
634              * The entire map is beyond the end of the file.
635              * Transform it to an anonymous mapping.
636              */
637             flags |= MAP_ANONYMOUS;
638             fd = -1;
639             offset = 0;
640         } else if (offset + len > sb.st_size) {
641             /*
642              * A portion of the map is beyond the end of the file.
643              * Truncate the file portion of the allocation.
644              */
645             fileend_adj = offset + len - sb.st_size;
646         }
647     }
648 
649     if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
650         if (fileend_adj) {
651             p = mmap(want_p, len, host_prot, flags | MAP_ANONYMOUS, -1, 0);
652         } else {
653             p = mmap(want_p, len, host_prot, flags, fd, offset);
654         }
655         if (p != want_p) {
656             if (p != MAP_FAILED) {
657                 /* Host does not support MAP_FIXED_NOREPLACE: emulate. */
658                 do_munmap(p, len);
659                 errno = EEXIST;
660             }
661             return -1;
662         }
663 
664         if (fileend_adj) {
665             void *t = mmap(p, len - fileend_adj, host_prot,
666                            (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED,
667                            fd, offset);
668 
669             if (t == MAP_FAILED) {
670                 int save_errno = errno;
671 
672                 /*
673                  * We failed a map over the top of the successful anonymous
674                  * mapping above. The only failure mode is running out of VMAs,
675                  * and there's nothing that we can do to detect that earlier.
676                  * If we have replaced an existing mapping with MAP_FIXED,
677                  * then we cannot properly recover.  It's a coin toss whether
678                  * it would be better to exit or continue here.
679                  */
680                 if (!(flags & MAP_FIXED_NOREPLACE) &&
681                     !page_check_range_empty(start, start + len - 1)) {
682                     qemu_log("QEMU target_mmap late failure: %s",
683                              strerror(save_errno));
684                 }
685 
686                 do_munmap(want_p, len);
687                 errno = save_errno;
688                 return -1;
689             }
690         }
691     } else {
692         size_t host_len, part_len;
693 
694         /*
695          * Take care to align the host memory.  Perform a larger anonymous
696          * allocation and extract the aligned portion.  Remap the file on
697          * top of that.
698          */
699         host_len = len + TARGET_PAGE_SIZE - host_page_size;
700         p = mmap(want_p, host_len, host_prot, flags | MAP_ANONYMOUS, -1, 0);
701         if (p == MAP_FAILED) {
702             return -1;
703         }
704 
705         part_len = (uintptr_t)p & (TARGET_PAGE_SIZE - 1);
706         if (part_len) {
707             part_len = TARGET_PAGE_SIZE - part_len;
708             do_munmap(p, part_len);
709             p += part_len;
710             host_len -= part_len;
711         }
712         if (len < host_len) {
713             do_munmap(p + len, host_len - len);
714         }
715 
716         if (!(flags & MAP_ANONYMOUS)) {
717             void *t = mmap(p, len - fileend_adj, host_prot,
718                            flags | MAP_FIXED, fd, offset);
719 
720             if (t == MAP_FAILED) {
721                 int save_errno = errno;
722                 do_munmap(p, len);
723                 errno = save_errno;
724                 return -1;
725             }
726         }
727 
728         start = h2g(p);
729     }
730 
731     last = start + len - 1;
732     if (fileend_adj) {
733         pass_last = ROUND_UP(last - fileend_adj, host_page_size) - 1;
734     } else {
735         pass_last = last;
736     }
737     return mmap_end(start, last, start, pass_last, mmap_flags, page_flags);
738 }
739 
740 /*
741  * Special case host page size > target page size.
742  *
743  * The two special cases are address and file offsets that are valid
744  * for the guest that cannot be directly represented by the host.
745  */
746 static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len,
747                             int target_prot, int host_prot,
748                             int flags, int page_flags, int fd,
749                             off_t offset, int host_page_size)
750 {
751     void *p, *want_p = NULL;
752     off_t host_offset = offset & -host_page_size;
753     abi_ulong last, real_start, real_last;
754     bool misaligned_offset = false;
755     size_t host_len;
756 
757     if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
758         want_p = g2h_untagged(start);
759     }
760 
761     if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
762         /*
763          * Adjust the offset to something representable on the host.
764          */
765         host_len = len + offset - host_offset;
766         p = mmap(want_p, host_len, host_prot, flags, fd, host_offset);
767         if (p == MAP_FAILED) {
768             return -1;
769         }
770 
771         /* Update start to the file position at offset. */
772         p += offset - host_offset;
773 
774         start = h2g(p);
775         last = start + len - 1;
776         return mmap_end(start, last, start, last, flags, page_flags);
777     }
778 
779     if (!(flags & MAP_ANONYMOUS)) {
780         misaligned_offset = (start ^ offset) & (host_page_size - 1);
781 
782         /*
783          * The fallback for misalignment is a private mapping + read.
784          * This carries none of semantics required of MAP_SHARED.
785          */
786         if (misaligned_offset && (flags & MAP_TYPE) != MAP_PRIVATE) {
787             errno = EINVAL;
788             return -1;
789         }
790     }
791 
792     last = start + len - 1;
793     real_start = start & -host_page_size;
794     real_last = ROUND_UP(last, host_page_size) - 1;
795 
796     /*
797      * Handle the start and end of the mapping.
798      */
799     if (real_start < start) {
800         abi_ulong real_page_last = real_start + host_page_size - 1;
801         if (last <= real_page_last) {
802             /* Entire allocation a subset of one host page. */
803             if (!mmap_frag(real_start, start, last, target_prot,
804                            flags, fd, offset)) {
805                 return -1;
806             }
807             return mmap_end(start, last, -1, 0, flags, page_flags);
808         }
809 
810         if (!mmap_frag(real_start, start, real_page_last, target_prot,
811                        flags, fd, offset)) {
812             return -1;
813         }
814         real_start = real_page_last + 1;
815     }
816 
817     if (last < real_last) {
818         abi_ulong real_page_start = real_last - host_page_size + 1;
819         if (!mmap_frag(real_page_start, real_page_start, last,
820                        target_prot, flags, fd,
821                        offset + real_page_start - start)) {
822             return -1;
823         }
824         real_last = real_page_start - 1;
825     }
826 
827     if (real_start > real_last) {
828         return mmap_end(start, last, -1, 0, flags, page_flags);
829     }
830 
831     /*
832      * Handle the middle of the mapping.
833      */
834 
835     host_len = real_last - real_start + 1;
836     want_p += real_start - start;
837 
838     if (flags & MAP_ANONYMOUS) {
839         p = mmap(want_p, host_len, host_prot, flags, -1, 0);
840     } else if (!misaligned_offset) {
841         p = mmap(want_p, host_len, host_prot, flags, fd,
842                  offset + real_start - start);
843     } else {
844         p = mmap(want_p, host_len, host_prot | PROT_WRITE,
845                  flags | MAP_ANONYMOUS, -1, 0);
846     }
847     if (p != want_p) {
848         if (p != MAP_FAILED) {
849             do_munmap(p, host_len);
850             errno = EEXIST;
851         }
852         return -1;
853     }
854 
855     if (misaligned_offset) {
856         /* TODO: The read could be short. */
857         if (pread(fd, p, host_len, offset + real_start - start) != host_len) {
858             do_munmap(p, host_len);
859             return -1;
860         }
861         if (!(host_prot & PROT_WRITE)) {
862             mprotect(p, host_len, host_prot);
863         }
864     }
865 
866     return mmap_end(start, last, -1, 0, flags, page_flags);
867 }
868 
869 static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
870                                     int target_prot, int flags, int page_flags,
871                                     int fd, off_t offset)
872 {
873     int host_page_size = qemu_real_host_page_size();
874     int host_prot;
875 
876     /*
877      * For reserved_va, we are in full control of the allocation.
878      * Find a suitable hole and convert to MAP_FIXED.
879      */
880     if (reserved_va) {
881         if (flags & MAP_FIXED_NOREPLACE) {
882             /* Validate that the chosen range is empty. */
883             if (!page_check_range_empty(start, start + len - 1)) {
884                 errno = EEXIST;
885                 return -1;
886             }
887             flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
888         } else if (!(flags & MAP_FIXED)) {
889             abi_ulong real_start = start & -host_page_size;
890             off_t host_offset = offset & -host_page_size;
891             size_t real_len = len + offset - host_offset;
892             abi_ulong align = MAX(host_page_size, TARGET_PAGE_SIZE);
893 
894             start = mmap_find_vma(real_start, real_len, align);
895             if (start == (abi_ulong)-1) {
896                 errno = ENOMEM;
897                 return -1;
898             }
899             start += offset - host_offset;
900             flags |= MAP_FIXED;
901         }
902     }
903 
904     host_prot = target_to_host_prot(target_prot);
905 
906     if (host_page_size == TARGET_PAGE_SIZE) {
907         return mmap_h_eq_g(start, len, host_prot, flags,
908                            page_flags, fd, offset);
909     } else if (host_page_size < TARGET_PAGE_SIZE) {
910         return mmap_h_lt_g(start, len, host_prot, flags,
911                            page_flags, fd, offset, host_page_size);
912     } else {
913         return mmap_h_gt_g(start, len, target_prot, host_prot, flags,
914                            page_flags, fd, offset, host_page_size);
915     }
916 }
917 
918 /* NOTE: all the constants are the HOST ones */
919 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
920                      int flags, int fd, off_t offset)
921 {
922     abi_long ret;
923     int page_flags;
924 
925     trace_target_mmap(start, len, target_prot, flags, fd, offset);
926 
927     if (!len) {
928         errno = EINVAL;
929         return -1;
930     }
931 
932     page_flags = validate_prot_to_pageflags(target_prot);
933     if (!page_flags) {
934         errno = EINVAL;
935         return -1;
936     }
937 
938     /* Also check for overflows... */
939     len = TARGET_PAGE_ALIGN(len);
940     if (!len || len != (size_t)len) {
941         errno = ENOMEM;
942         return -1;
943     }
944 
945     if (offset & ~TARGET_PAGE_MASK) {
946         errno = EINVAL;
947         return -1;
948     }
949     if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
950         if (start & ~TARGET_PAGE_MASK) {
951             errno = EINVAL;
952             return -1;
953         }
954         if (!guest_range_valid_untagged(start, len)) {
955             errno = ENOMEM;
956             return -1;
957         }
958     }
959 
960     mmap_lock();
961 
962     ret = target_mmap__locked(start, len, target_prot, flags,
963                               page_flags, fd, offset);
964 
965     mmap_unlock();
966 
967     /*
968      * If we're mapping shared memory, ensure we generate code for parallel
969      * execution and flush old translations.  This will work up to the level
970      * supported by the host -- anything that requires EXCP_ATOMIC will not
971      * be atomic with respect to an external process.
972      */
973     if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) {
974         CPUState *cpu = thread_cpu;
975         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
976             tcg_cflags_set(cpu, CF_PARALLEL);
977             tb_flush(cpu);
978         }
979     }
980 
981     return ret;
982 }
983 
984 static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
985 {
986     int host_page_size = qemu_real_host_page_size();
987     abi_ulong real_start;
988     abi_ulong real_last;
989     abi_ulong real_len;
990     abi_ulong last;
991     abi_ulong a;
992     void *host_start;
993     int prot;
994 
995     last = start + len - 1;
996     real_start = start & -host_page_size;
997     real_last = ROUND_UP(last, host_page_size) - 1;
998 
999     /*
1000      * If guest pages remain on the first or last host pages,
1001      * adjust the deallocation to retain those guest pages.
1002      * The single page special case is required for the last page,
1003      * lest real_start overflow to zero.
1004      */
1005     if (real_last - real_start < host_page_size) {
1006         prot = 0;
1007         for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
1008             prot |= page_get_flags(a);
1009         }
1010         for (a = last; a < real_last; a += TARGET_PAGE_SIZE) {
1011             prot |= page_get_flags(a + 1);
1012         }
1013         if (prot != 0) {
1014             return 0;
1015         }
1016     } else {
1017         for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
1018             prot |= page_get_flags(a);
1019         }
1020         if (prot != 0) {
1021             real_start += host_page_size;
1022         }
1023 
1024         for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
1025             prot |= page_get_flags(a + 1);
1026         }
1027         if (prot != 0) {
1028             real_last -= host_page_size;
1029         }
1030 
1031         if (real_last < real_start) {
1032             return 0;
1033         }
1034     }
1035 
1036     real_len = real_last - real_start + 1;
1037     host_start = g2h_untagged(real_start);
1038 
1039     return do_munmap(host_start, real_len);
1040 }
1041 
1042 int target_munmap(abi_ulong start, abi_ulong len)
1043 {
1044     int ret;
1045 
1046     trace_target_munmap(start, len);
1047 
1048     if (start & ~TARGET_PAGE_MASK) {
1049         errno = EINVAL;
1050         return -1;
1051     }
1052     len = TARGET_PAGE_ALIGN(len);
1053     if (len == 0 || !guest_range_valid_untagged(start, len)) {
1054         errno = EINVAL;
1055         return -1;
1056     }
1057 
1058     mmap_lock();
1059     ret = mmap_reserve_or_unmap(start, len);
1060     if (likely(ret == 0)) {
1061         page_set_flags(start, start + len - 1, 0);
1062         shm_region_rm_complete(start, start + len - 1);
1063     }
1064     mmap_unlock();
1065 
1066     return ret;
1067 }
1068 
1069 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
1070                        abi_ulong new_size, unsigned long flags,
1071                        abi_ulong new_addr)
1072 {
1073     int prot;
1074     void *host_addr;
1075 
1076     if (!guest_range_valid_untagged(old_addr, old_size) ||
1077         ((flags & MREMAP_FIXED) &&
1078          !guest_range_valid_untagged(new_addr, new_size)) ||
1079         ((flags & MREMAP_MAYMOVE) == 0 &&
1080          !guest_range_valid_untagged(old_addr, new_size))) {
1081         errno = ENOMEM;
1082         return -1;
1083     }
1084 
1085     mmap_lock();
1086 
1087     if (flags & MREMAP_FIXED) {
1088         host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
1089                            flags, g2h_untagged(new_addr));
1090 
1091         if (reserved_va && host_addr != MAP_FAILED) {
1092             /*
1093              * If new and old addresses overlap then the above mremap will
1094              * already have failed with EINVAL.
1095              */
1096             mmap_reserve_or_unmap(old_addr, old_size);
1097         }
1098     } else if (flags & MREMAP_MAYMOVE) {
1099         abi_ulong mmap_start;
1100 
1101         mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
1102 
1103         if (mmap_start == -1) {
1104             errno = ENOMEM;
1105             host_addr = MAP_FAILED;
1106         } else {
1107             host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
1108                                flags | MREMAP_FIXED,
1109                                g2h_untagged(mmap_start));
1110             if (reserved_va) {
1111                 mmap_reserve_or_unmap(old_addr, old_size);
1112             }
1113         }
1114     } else {
1115         int page_flags = 0;
1116         if (reserved_va && old_size < new_size) {
1117             abi_ulong addr;
1118             for (addr = old_addr + old_size;
1119                  addr < old_addr + new_size;
1120                  addr++) {
1121                 page_flags |= page_get_flags(addr);
1122             }
1123         }
1124         if (page_flags == 0) {
1125             host_addr = mremap(g2h_untagged(old_addr),
1126                                old_size, new_size, flags);
1127 
1128             if (host_addr != MAP_FAILED) {
1129                 /* Check if address fits target address space */
1130                 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
1131                     /* Revert mremap() changes */
1132                     host_addr = mremap(g2h_untagged(old_addr),
1133                                        new_size, old_size, flags);
1134                     errno = ENOMEM;
1135                     host_addr = MAP_FAILED;
1136                 } else if (reserved_va && old_size > new_size) {
1137                     mmap_reserve_or_unmap(old_addr + old_size,
1138                                           old_size - new_size);
1139                 }
1140             }
1141         } else {
1142             errno = ENOMEM;
1143             host_addr = MAP_FAILED;
1144         }
1145     }
1146 
1147     if (host_addr == MAP_FAILED) {
1148         new_addr = -1;
1149     } else {
1150         new_addr = h2g(host_addr);
1151         prot = page_get_flags(old_addr);
1152         page_set_flags(old_addr, old_addr + old_size - 1, 0);
1153         shm_region_rm_complete(old_addr, old_addr + old_size - 1);
1154         page_set_flags(new_addr, new_addr + new_size - 1,
1155                        prot | PAGE_VALID | PAGE_RESET);
1156         shm_region_rm_complete(new_addr, new_addr + new_size - 1);
1157     }
1158     mmap_unlock();
1159     return new_addr;
1160 }
1161 
1162 abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
1163 {
1164     abi_ulong len;
1165     int ret = 0;
1166 
1167     if (start & ~TARGET_PAGE_MASK) {
1168         return -TARGET_EINVAL;
1169     }
1170     if (len_in == 0) {
1171         return 0;
1172     }
1173     len = TARGET_PAGE_ALIGN(len_in);
1174     if (len == 0 || !guest_range_valid_untagged(start, len)) {
1175         return -TARGET_EINVAL;
1176     }
1177 
1178     /* Translate for some architectures which have different MADV_xxx values */
1179     switch (advice) {
1180     case TARGET_MADV_DONTNEED:      /* alpha */
1181         advice = MADV_DONTNEED;
1182         break;
1183     case TARGET_MADV_WIPEONFORK:    /* parisc */
1184         advice = MADV_WIPEONFORK;
1185         break;
1186     case TARGET_MADV_KEEPONFORK:    /* parisc */
1187         advice = MADV_KEEPONFORK;
1188         break;
1189     /* we do not care about the other MADV_xxx values yet */
1190     }
1191 
1192     /*
1193      * Most advice values are hints, so ignoring and returning success is ok.
1194      *
1195      * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and
1196      * MADV_KEEPONFORK are not hints and need to be emulated.
1197      *
1198      * A straight passthrough for those may not be safe because qemu sometimes
1199      * turns private file-backed mappings into anonymous mappings.
1200      * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
1201      * same semantics for the host as for the guest.
1202      *
1203      * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
1204      * return failure if not.
1205      *
1206      * MADV_DONTNEED is passed through as well, if possible.
1207      * If passthrough isn't possible, we nevertheless (wrongly!) return
1208      * success, which is broken but some userspace programs fail to work
1209      * otherwise. Completely implementing such emulation is quite complicated
1210      * though.
1211      */
1212     mmap_lock();
1213     switch (advice) {
1214     case MADV_WIPEONFORK:
1215     case MADV_KEEPONFORK:
1216         ret = -EINVAL;
1217         /* fall through */
1218     case MADV_DONTNEED:
1219         if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
1220             ret = get_errno(madvise(g2h_untagged(start), len, advice));
1221             if ((advice == MADV_DONTNEED) && (ret == 0)) {
1222                 page_reset_target_data(start, start + len - 1);
1223             }
1224         }
1225     }
1226     mmap_unlock();
1227 
1228     return ret;
1229 }
1230 
1231 #ifndef TARGET_FORCE_SHMLBA
1232 /*
1233  * For most architectures, SHMLBA is the same as the page size;
1234  * some architectures have larger values, in which case they should
1235  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
1236  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
1237  * and defining its own value for SHMLBA.
1238  *
1239  * The kernel also permits SHMLBA to be set by the architecture to a
1240  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
1241  * this means that addresses are rounded to the large size if
1242  * SHM_RND is set but addresses not aligned to that size are not rejected
1243  * as long as they are at least page-aligned. Since the only architecture
1244  * which uses this is ia64 this code doesn't provide for that oddity.
1245  */
1246 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
1247 {
1248     return TARGET_PAGE_SIZE;
1249 }
1250 #endif
1251 
1252 #if defined(__arm__) || defined(__mips__) || defined(__sparc__)
1253 #define HOST_FORCE_SHMLBA 1
1254 #else
1255 #define HOST_FORCE_SHMLBA 0
1256 #endif
1257 
1258 abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
1259                        abi_ulong shmaddr, int shmflg)
1260 {
1261     CPUState *cpu = env_cpu(cpu_env);
1262     struct shmid_ds shm_info;
1263     int ret;
1264     int h_pagesize;
1265     int t_shmlba, h_shmlba, m_shmlba;
1266     size_t t_len, h_len, m_len;
1267 
1268     /* shmat pointers are always untagged */
1269 
1270     /*
1271      * Because we can't use host shmat() unless the address is sufficiently
1272      * aligned for the host, we'll need to check both.
1273      * TODO: Could be fixed with softmmu.
1274      */
1275     t_shmlba = target_shmlba(cpu_env);
1276     h_pagesize = qemu_real_host_page_size();
1277     h_shmlba = (HOST_FORCE_SHMLBA ? SHMLBA : h_pagesize);
1278     m_shmlba = MAX(t_shmlba, h_shmlba);
1279 
1280     if (shmaddr) {
1281         if (shmaddr & (m_shmlba - 1)) {
1282             if (shmflg & SHM_RND) {
1283                 /*
1284                  * The guest is allowing the kernel to round the address.
1285                  * Assume that the guest is ok with us rounding to the
1286                  * host required alignment too.  Anyway if we don't, we'll
1287                  * get an error from the kernel.
1288                  */
1289                 shmaddr &= ~(m_shmlba - 1);
1290                 if (shmaddr == 0 && (shmflg & SHM_REMAP)) {
1291                     return -TARGET_EINVAL;
1292                 }
1293             } else {
1294                 int require = TARGET_PAGE_SIZE;
1295 #ifdef TARGET_FORCE_SHMLBA
1296                 require = t_shmlba;
1297 #endif
1298                 /*
1299                  * Include host required alignment, as otherwise we cannot
1300                  * use host shmat at all.
1301                  */
1302                 require = MAX(require, h_shmlba);
1303                 if (shmaddr & (require - 1)) {
1304                     return -TARGET_EINVAL;
1305                 }
1306             }
1307         }
1308     } else {
1309         if (shmflg & SHM_REMAP) {
1310             return -TARGET_EINVAL;
1311         }
1312     }
1313     /* All rounding now manually concluded. */
1314     shmflg &= ~SHM_RND;
1315 
1316     /* Find out the length of the shared memory segment. */
1317     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
1318     if (is_error(ret)) {
1319         /* can't get length, bail out */
1320         return ret;
1321     }
1322     t_len = TARGET_PAGE_ALIGN(shm_info.shm_segsz);
1323     h_len = ROUND_UP(shm_info.shm_segsz, h_pagesize);
1324     m_len = MAX(t_len, h_len);
1325 
1326     if (!guest_range_valid_untagged(shmaddr, m_len)) {
1327         return -TARGET_EINVAL;
1328     }
1329 
1330     WITH_MMAP_LOCK_GUARD() {
1331         bool mapped = false;
1332         void *want, *test;
1333         abi_ulong last;
1334 
1335         if (!shmaddr) {
1336             shmaddr = mmap_find_vma(0, m_len, m_shmlba);
1337             if (shmaddr == -1) {
1338                 return -TARGET_ENOMEM;
1339             }
1340             mapped = !reserved_va;
1341         } else if (shmflg & SHM_REMAP) {
1342             /*
1343              * If host page size > target page size, the host shmat may map
1344              * more memory than the guest expects.  Reject a mapping that
1345              * would replace memory in the unexpected gap.
1346              * TODO: Could be fixed with softmmu.
1347              */
1348             if (t_len < h_len &&
1349                 !page_check_range_empty(shmaddr + t_len,
1350                                         shmaddr + h_len - 1)) {
1351                 return -TARGET_EINVAL;
1352             }
1353         } else {
1354             if (!page_check_range_empty(shmaddr, shmaddr + m_len - 1)) {
1355                 return -TARGET_EINVAL;
1356             }
1357         }
1358 
1359         /* All placement is now complete. */
1360         want = (void *)g2h_untagged(shmaddr);
1361 
1362         /*
1363          * Map anonymous pages across the entire range, then remap with
1364          * the shared memory.  This is required for a number of corner
1365          * cases for which host and guest page sizes differ.
1366          */
1367         if (h_len != t_len) {
1368             int mmap_p = PROT_READ | (shmflg & SHM_RDONLY ? 0 : PROT_WRITE);
1369             int mmap_f = MAP_PRIVATE | MAP_ANONYMOUS
1370                        | (reserved_va || mapped || (shmflg & SHM_REMAP)
1371                           ? MAP_FIXED : MAP_FIXED_NOREPLACE);
1372 
1373             test = mmap(want, m_len, mmap_p, mmap_f, -1, 0);
1374             if (unlikely(test != want)) {
1375                 /* shmat returns EINVAL not EEXIST like mmap. */
1376                 ret = (test == MAP_FAILED && errno != EEXIST
1377                        ? get_errno(-1) : -TARGET_EINVAL);
1378                 if (mapped) {
1379                     do_munmap(want, m_len);
1380                 }
1381                 return ret;
1382             }
1383             mapped = true;
1384         }
1385 
1386         if (reserved_va || mapped) {
1387             shmflg |= SHM_REMAP;
1388         }
1389         test = shmat(shmid, want, shmflg);
1390         if (test == MAP_FAILED) {
1391             ret = get_errno(-1);
1392             if (mapped) {
1393                 do_munmap(want, m_len);
1394             }
1395             return ret;
1396         }
1397         assert(test == want);
1398 
1399         last = shmaddr + m_len - 1;
1400         page_set_flags(shmaddr, last,
1401                        PAGE_VALID | PAGE_RESET | PAGE_READ |
1402                        (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) |
1403                        (shmflg & SHM_EXEC ? PAGE_EXEC : 0));
1404 
1405         shm_region_rm_complete(shmaddr, last);
1406         shm_region_add(shmaddr, last);
1407     }
1408 
1409     /*
1410      * We're mapping shared memory, so ensure we generate code for parallel
1411      * execution and flush old translations.  This will work up to the level
1412      * supported by the host -- anything that requires EXCP_ATOMIC will not
1413      * be atomic with respect to an external process.
1414      */
1415     if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
1416         tcg_cflags_set(cpu, CF_PARALLEL);
1417         tb_flush(cpu);
1418     }
1419 
1420     if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
1421         FILE *f = qemu_log_trylock();
1422         if (f) {
1423             fprintf(f, "page layout changed following shmat\n");
1424             page_dump(f);
1425             qemu_log_unlock(f);
1426         }
1427     }
1428     return shmaddr;
1429 }
1430 
1431 abi_long target_shmdt(abi_ulong shmaddr)
1432 {
1433     abi_long rv;
1434 
1435     /* shmdt pointers are always untagged */
1436 
1437     WITH_MMAP_LOCK_GUARD() {
1438         abi_ulong last = shm_region_find(shmaddr);
1439         if (last == 0) {
1440             return -TARGET_EINVAL;
1441         }
1442 
1443         rv = get_errno(shmdt(g2h_untagged(shmaddr)));
1444         if (rv == 0) {
1445             abi_ulong size = last - shmaddr + 1;
1446 
1447             page_set_flags(shmaddr, last, 0);
1448             shm_region_rm_complete(shmaddr, last);
1449             mmap_reserve_or_unmap(shmaddr, size);
1450         }
1451     }
1452     return rv;
1453 }
1454