1 /*
2 * mmap support for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include <sys/shm.h>
21 #include "trace.h"
22 #include "exec/log.h"
23 #include "exec/page-protection.h"
24 #include "exec/tb-flush.h"
25 #include "exec/translation-block.h"
26 #include "qemu.h"
27 #include "user/page-protection.h"
28 #include "user-internals.h"
29 #include "user-mmap.h"
30 #include "target_mman.h"
31 #include "qemu/interval-tree.h"
32
33 #ifdef TARGET_ARM
34 #include "target/arm/cpu-features.h"
35 #endif
36
37 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
38 static __thread int mmap_lock_count;
39
mmap_lock(void)40 void mmap_lock(void)
41 {
42 if (mmap_lock_count++ == 0) {
43 pthread_mutex_lock(&mmap_mutex);
44 }
45 }
46
mmap_unlock(void)47 void mmap_unlock(void)
48 {
49 assert(mmap_lock_count > 0);
50 if (--mmap_lock_count == 0) {
51 pthread_mutex_unlock(&mmap_mutex);
52 }
53 }
54
have_mmap_lock(void)55 bool have_mmap_lock(void)
56 {
57 return mmap_lock_count > 0 ? true : false;
58 }
59
60 /* Grab lock to make sure things are in a consistent state after fork(). */
mmap_fork_start(void)61 void mmap_fork_start(void)
62 {
63 if (mmap_lock_count)
64 abort();
65 pthread_mutex_lock(&mmap_mutex);
66 }
67
mmap_fork_end(int child)68 void mmap_fork_end(int child)
69 {
70 if (child) {
71 pthread_mutex_init(&mmap_mutex, NULL);
72 } else {
73 pthread_mutex_unlock(&mmap_mutex);
74 }
75 }
76
77 /* Protected by mmap_lock. */
78 static IntervalTreeRoot shm_regions;
79
shm_region_add(abi_ptr start,abi_ptr last)80 static void shm_region_add(abi_ptr start, abi_ptr last)
81 {
82 IntervalTreeNode *i = g_new0(IntervalTreeNode, 1);
83
84 i->start = start;
85 i->last = last;
86 interval_tree_insert(i, &shm_regions);
87 }
88
shm_region_find(abi_ptr start)89 static abi_ptr shm_region_find(abi_ptr start)
90 {
91 IntervalTreeNode *i;
92
93 for (i = interval_tree_iter_first(&shm_regions, start, start); i;
94 i = interval_tree_iter_next(i, start, start)) {
95 if (i->start == start) {
96 return i->last;
97 }
98 }
99 return 0;
100 }
101
shm_region_rm_complete(abi_ptr start,abi_ptr last)102 static void shm_region_rm_complete(abi_ptr start, abi_ptr last)
103 {
104 IntervalTreeNode *i, *n;
105
106 for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) {
107 n = interval_tree_iter_next(i, start, last);
108 if (i->start >= start && i->last <= last) {
109 interval_tree_remove(i, &shm_regions);
110 g_free(i);
111 }
112 }
113 }
114
115 /*
116 * Validate target prot bitmask.
117 * Return the prot bitmask for the host in *HOST_PROT.
118 * Return 0 if the target prot bitmask is invalid, otherwise
119 * the internal qemu page_flags (which will include PAGE_VALID).
120 */
validate_prot_to_pageflags(int prot)121 static int validate_prot_to_pageflags(int prot)
122 {
123 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
124 int page_flags = (prot & PAGE_RWX) | PAGE_VALID;
125
126 #ifdef TARGET_AARCH64
127 {
128 ARMCPU *cpu = ARM_CPU(thread_cpu);
129
130 /*
131 * The PROT_BTI bit is only accepted if the cpu supports the feature.
132 * Since this is the unusual case, don't bother checking unless
133 * the bit has been requested. If set and valid, record the bit
134 * within QEMU's page_flags.
135 */
136 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
137 valid |= TARGET_PROT_BTI;
138 page_flags |= PAGE_BTI;
139 }
140 /* Similarly for the PROT_MTE bit. */
141 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
142 valid |= TARGET_PROT_MTE;
143 page_flags |= PAGE_MTE;
144 }
145 }
146 #elif defined(TARGET_HPPA)
147 valid |= PROT_GROWSDOWN | PROT_GROWSUP;
148 #endif
149
150 return prot & ~valid ? 0 : page_flags;
151 }
152
153 /*
154 * For the host, we need not pass anything except read/write/exec.
155 * While PROT_SEM is allowed by all hosts, it is also ignored, so
156 * don't bother transforming guest bit to host bit. Any other
157 * target-specific prot bits will not be understood by the host
158 * and will need to be encoded into page_flags for qemu emulation.
159 *
160 * Pages that are executable by the guest will never be executed
161 * by the host, but the host will need to be able to read them.
162 */
target_to_host_prot(int prot)163 static int target_to_host_prot(int prot)
164 {
165 return (prot & (PROT_READ | PROT_WRITE)) |
166 (prot & PROT_EXEC ? PROT_READ : 0);
167 }
168
169 /* NOTE: all the constants are the HOST ones, but addresses are target. */
target_mprotect(abi_ulong start,abi_ulong len,int target_prot)170 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
171 {
172 int host_page_size = qemu_real_host_page_size();
173 abi_ulong starts[3];
174 abi_ulong lens[3];
175 int prots[3];
176 abi_ulong host_start, host_last, last;
177 int prot1, ret, page_flags, nranges;
178
179 trace_target_mprotect(start, len, target_prot);
180
181 if ((start & ~TARGET_PAGE_MASK) != 0) {
182 return -TARGET_EINVAL;
183 }
184 page_flags = validate_prot_to_pageflags(target_prot);
185 if (!page_flags) {
186 return -TARGET_EINVAL;
187 }
188 if (len == 0) {
189 return 0;
190 }
191 len = TARGET_PAGE_ALIGN(len);
192 if (!guest_range_valid_untagged(start, len)) {
193 return -TARGET_ENOMEM;
194 }
195
196 last = start + len - 1;
197 host_start = start & -host_page_size;
198 host_last = ROUND_UP(last, host_page_size) - 1;
199 nranges = 0;
200
201 mmap_lock();
202
203 if (host_last - host_start < host_page_size) {
204 /* Single host page contains all guest pages: sum the prot. */
205 prot1 = target_prot;
206 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
207 prot1 |= page_get_flags(a);
208 }
209 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
210 prot1 |= page_get_flags(a + 1);
211 }
212 starts[nranges] = host_start;
213 lens[nranges] = host_page_size;
214 prots[nranges] = prot1;
215 nranges++;
216 } else {
217 if (host_start < start) {
218 /* Host page contains more than one guest page: sum the prot. */
219 prot1 = target_prot;
220 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
221 prot1 |= page_get_flags(a);
222 }
223 /* If the resulting sum differs, create a new range. */
224 if (prot1 != target_prot) {
225 starts[nranges] = host_start;
226 lens[nranges] = host_page_size;
227 prots[nranges] = prot1;
228 nranges++;
229 host_start += host_page_size;
230 }
231 }
232
233 if (last < host_last) {
234 /* Host page contains more than one guest page: sum the prot. */
235 prot1 = target_prot;
236 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
237 prot1 |= page_get_flags(a + 1);
238 }
239 /* If the resulting sum differs, create a new range. */
240 if (prot1 != target_prot) {
241 host_last -= host_page_size;
242 starts[nranges] = host_last + 1;
243 lens[nranges] = host_page_size;
244 prots[nranges] = prot1;
245 nranges++;
246 }
247 }
248
249 /* Create a range for the middle, if any remains. */
250 if (host_start < host_last) {
251 starts[nranges] = host_start;
252 lens[nranges] = host_last - host_start + 1;
253 prots[nranges] = target_prot;
254 nranges++;
255 }
256 }
257
258 for (int i = 0; i < nranges; ++i) {
259 ret = mprotect(g2h_untagged(starts[i]), lens[i],
260 target_to_host_prot(prots[i]));
261 if (ret != 0) {
262 goto error;
263 }
264 }
265
266 page_set_flags(start, last, page_flags);
267 ret = 0;
268
269 error:
270 mmap_unlock();
271 return ret;
272 }
273
274 /*
275 * Perform munmap on behalf of the target, with host parameters.
276 * If reserved_va, we must replace the memory reservation.
277 */
do_munmap(void * addr,size_t len)278 static int do_munmap(void *addr, size_t len)
279 {
280 if (reserved_va) {
281 void *ptr = mmap(addr, len, PROT_NONE,
282 MAP_FIXED | MAP_ANONYMOUS
283 | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
284 return ptr == addr ? 0 : -1;
285 }
286 return munmap(addr, len);
287 }
288
289 /*
290 * Perform a pread on behalf of target_mmap. We can reach EOF, we can be
291 * interrupted by signals, and in general there's no good error return path.
292 * If @zero, zero the rest of the block at EOF.
293 * Return true on success.
294 */
mmap_pread(int fd,void * p,size_t len,off_t offset,bool zero)295 static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero)
296 {
297 while (1) {
298 ssize_t r = pread(fd, p, len, offset);
299
300 if (likely(r == len)) {
301 /* Complete */
302 return true;
303 }
304 if (r == 0) {
305 /* EOF */
306 if (zero) {
307 memset(p, 0, len);
308 }
309 return true;
310 }
311 if (r > 0) {
312 /* Short read */
313 p += r;
314 len -= r;
315 offset += r;
316 } else if (errno != EINTR) {
317 /* Error */
318 return false;
319 }
320 }
321 }
322
323 /*
324 * Map an incomplete host page.
325 *
326 * Here be dragons. This case will not work if there is an existing
327 * overlapping host page, which is file mapped, and for which the mapping
328 * is beyond the end of the file. In that case, we will see SIGBUS when
329 * trying to write a portion of this page.
330 *
331 * FIXME: Work around this with a temporary signal handler and longjmp.
332 */
mmap_frag(abi_ulong real_start,abi_ulong start,abi_ulong last,int prot,int flags,int fd,off_t offset)333 static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
334 int prot, int flags, int fd, off_t offset)
335 {
336 int host_page_size = qemu_real_host_page_size();
337 abi_ulong real_last;
338 void *host_start;
339 int prot_old, prot_new;
340 int host_prot_old, host_prot_new;
341
342 if (!(flags & MAP_ANONYMOUS)
343 && (flags & MAP_TYPE) == MAP_SHARED
344 && (prot & PROT_WRITE)) {
345 /*
346 * msync() won't work with the partial page, so we return an
347 * error if write is possible while it is a shared mapping.
348 */
349 errno = EINVAL;
350 return false;
351 }
352
353 real_last = real_start + host_page_size - 1;
354 host_start = g2h_untagged(real_start);
355
356 /* Get the protection of the target pages outside the mapping. */
357 prot_old = 0;
358 for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) {
359 prot_old |= page_get_flags(a);
360 }
361 for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) {
362 prot_old |= page_get_flags(a);
363 }
364
365 if (prot_old == 0) {
366 /*
367 * Since !(prot_old & PAGE_VALID), there were no guest pages
368 * outside of the fragment we need to map. Allocate a new host
369 * page to cover, discarding whatever else may have been present.
370 */
371 void *p = mmap(host_start, host_page_size,
372 target_to_host_prot(prot),
373 flags | MAP_ANONYMOUS, -1, 0);
374 if (p != host_start) {
375 if (p != MAP_FAILED) {
376 do_munmap(p, host_page_size);
377 errno = EEXIST;
378 }
379 return false;
380 }
381 prot_old = prot;
382 }
383 prot_new = prot | prot_old;
384
385 host_prot_old = target_to_host_prot(prot_old);
386 host_prot_new = target_to_host_prot(prot_new);
387
388 /* Adjust protection to be able to write. */
389 if (!(host_prot_old & PROT_WRITE)) {
390 host_prot_old |= PROT_WRITE;
391 mprotect(host_start, host_page_size, host_prot_old);
392 }
393
394 /* Read or zero the new guest pages. */
395 if (flags & MAP_ANONYMOUS) {
396 memset(g2h_untagged(start), 0, last - start + 1);
397 } else if (!mmap_pread(fd, g2h_untagged(start), last - start + 1,
398 offset, true)) {
399 return false;
400 }
401
402 /* Put final protection */
403 if (host_prot_new != host_prot_old) {
404 mprotect(host_start, host_page_size, host_prot_new);
405 }
406 return true;
407 }
408
409 abi_ulong task_unmapped_base;
410 abi_ulong elf_et_dyn_base;
411 abi_ulong mmap_next_start;
412
413 /*
414 * Subroutine of mmap_find_vma, used when we have pre-allocated
415 * a chunk of guest address space.
416 */
mmap_find_vma_reserved(abi_ulong start,abi_ulong size,abi_ulong align)417 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
418 abi_ulong align)
419 {
420 target_ulong ret;
421
422 ret = page_find_range_empty(start, reserved_va, size, align);
423 if (ret == -1 && start > mmap_min_addr) {
424 /* Restart at the beginning of the address space. */
425 ret = page_find_range_empty(mmap_min_addr, start - 1, size, align);
426 }
427
428 return ret;
429 }
430
431 /*
432 * Find and reserve a free memory area of size 'size'. The search
433 * starts at 'start'.
434 * It must be called with mmap_lock() held.
435 * Return -1 if error.
436 */
mmap_find_vma(abi_ulong start,abi_ulong size,abi_ulong align)437 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
438 {
439 int host_page_size = qemu_real_host_page_size();
440 void *ptr, *prev;
441 abi_ulong addr;
442 int wrapped, repeat;
443
444 align = MAX(align, host_page_size);
445
446 /* If 'start' == 0, then a default start address is used. */
447 if (start == 0) {
448 start = mmap_next_start;
449 } else {
450 start &= -host_page_size;
451 }
452 start = ROUND_UP(start, align);
453 size = ROUND_UP(size, host_page_size);
454
455 if (reserved_va) {
456 return mmap_find_vma_reserved(start, size, align);
457 }
458
459 addr = start;
460 wrapped = repeat = 0;
461 prev = 0;
462
463 for (;; prev = ptr) {
464 /*
465 * Reserve needed memory area to avoid a race.
466 * It should be discarded using:
467 * - mmap() with MAP_FIXED flag
468 * - mremap() with MREMAP_FIXED flag
469 * - shmat() with SHM_REMAP flag
470 */
471 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
472 MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
473
474 /* ENOMEM, if host address space has no memory */
475 if (ptr == MAP_FAILED) {
476 return (abi_ulong)-1;
477 }
478
479 /*
480 * Count the number of sequential returns of the same address.
481 * This is used to modify the search algorithm below.
482 */
483 repeat = (ptr == prev ? repeat + 1 : 0);
484
485 if (h2g_valid(ptr + size - 1)) {
486 addr = h2g(ptr);
487
488 if ((addr & (align - 1)) == 0) {
489 /* Success. */
490 if (start == mmap_next_start && addr >= task_unmapped_base) {
491 mmap_next_start = addr + size;
492 }
493 return addr;
494 }
495
496 /* The address is not properly aligned for the target. */
497 switch (repeat) {
498 case 0:
499 /*
500 * Assume the result that the kernel gave us is the
501 * first with enough free space, so start again at the
502 * next higher target page.
503 */
504 addr = ROUND_UP(addr, align);
505 break;
506 case 1:
507 /*
508 * Sometimes the kernel decides to perform the allocation
509 * at the top end of memory instead.
510 */
511 addr &= -align;
512 break;
513 case 2:
514 /* Start over at low memory. */
515 addr = 0;
516 break;
517 default:
518 /* Fail. This unaligned block must the last. */
519 addr = -1;
520 break;
521 }
522 } else {
523 /*
524 * Since the result the kernel gave didn't fit, start
525 * again at low memory. If any repetition, fail.
526 */
527 addr = (repeat ? -1 : 0);
528 }
529
530 /* Unmap and try again. */
531 munmap(ptr, size);
532
533 /* ENOMEM if we checked the whole of the target address space. */
534 if (addr == (abi_ulong)-1) {
535 return (abi_ulong)-1;
536 } else if (addr == 0) {
537 if (wrapped) {
538 return (abi_ulong)-1;
539 }
540 wrapped = 1;
541 /*
542 * Don't actually use 0 when wrapping, instead indicate
543 * that we'd truly like an allocation in low memory.
544 */
545 addr = (mmap_min_addr > TARGET_PAGE_SIZE
546 ? TARGET_PAGE_ALIGN(mmap_min_addr)
547 : TARGET_PAGE_SIZE);
548 } else if (wrapped && addr >= start) {
549 return (abi_ulong)-1;
550 }
551 }
552 }
553
554 /*
555 * Record a successful mmap within the user-exec interval tree.
556 */
mmap_end(abi_ulong start,abi_ulong last,abi_ulong passthrough_start,abi_ulong passthrough_last,int flags,int page_flags)557 static abi_long mmap_end(abi_ulong start, abi_ulong last,
558 abi_ulong passthrough_start,
559 abi_ulong passthrough_last,
560 int flags, int page_flags)
561 {
562 if (flags & MAP_ANONYMOUS) {
563 page_flags |= PAGE_ANON;
564 }
565 page_flags |= PAGE_RESET;
566 if (passthrough_start > passthrough_last) {
567 page_set_flags(start, last, page_flags);
568 } else {
569 if (start < passthrough_start) {
570 page_set_flags(start, passthrough_start - 1, page_flags);
571 }
572 page_set_flags(passthrough_start, passthrough_last,
573 page_flags | PAGE_PASSTHROUGH);
574 if (passthrough_last < last) {
575 page_set_flags(passthrough_last + 1, last, page_flags);
576 }
577 }
578 shm_region_rm_complete(start, last);
579 trace_target_mmap_complete(start);
580 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
581 FILE *f = qemu_log_trylock();
582 if (f) {
583 fprintf(f, "page layout changed following mmap\n");
584 page_dump(f);
585 qemu_log_unlock(f);
586 }
587 }
588 return start;
589 }
590
591 /*
592 * Special case host page size == target page size,
593 * where there are no edge conditions.
594 */
mmap_h_eq_g(abi_ulong start,abi_ulong len,int host_prot,int flags,int page_flags,int fd,off_t offset)595 static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len,
596 int host_prot, int flags, int page_flags,
597 int fd, off_t offset)
598 {
599 void *p, *want_p = NULL;
600 abi_ulong last;
601
602 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
603 want_p = g2h_untagged(start);
604 }
605
606 p = mmap(want_p, len, host_prot, flags, fd, offset);
607 if (p == MAP_FAILED) {
608 return -1;
609 }
610 /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */
611 if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) {
612 do_munmap(p, len);
613 errno = EEXIST;
614 return -1;
615 }
616
617 start = h2g(p);
618 last = start + len - 1;
619 return mmap_end(start, last, start, last, flags, page_flags);
620 }
621
622 /*
623 * Special case host page size < target page size.
624 *
625 * The two special cases are increased guest alignment, and mapping
626 * past the end of a file.
627 *
628 * When mapping files into a memory area larger than the file,
629 * accesses to pages beyond the file size will cause a SIGBUS.
630 *
631 * For example, if mmaping a file of 100 bytes on a host with 4K
632 * pages emulating a target with 8K pages, the target expects to
633 * be able to access the first 8K. But the host will trap us on
634 * any access beyond 4K.
635 *
636 * When emulating a target with a larger page-size than the hosts,
637 * we may need to truncate file maps at EOF and add extra anonymous
638 * pages up to the targets page boundary.
639 *
640 * This workaround only works for files that do not change.
641 * If the file is later extended (e.g. ftruncate), the SIGBUS
642 * vanishes and the proper behaviour is that changes within the
643 * anon page should be reflected in the file.
644 *
645 * However, this case is rather common with executable images,
646 * so the workaround is important for even trivial tests, whereas
647 * the mmap of of a file being extended is less common.
648 */
mmap_h_lt_g(abi_ulong start,abi_ulong len,int host_prot,int mmap_flags,int page_flags,int fd,off_t offset,int host_page_size)649 static abi_long mmap_h_lt_g(abi_ulong start, abi_ulong len, int host_prot,
650 int mmap_flags, int page_flags, int fd,
651 off_t offset, int host_page_size)
652 {
653 void *p, *want_p = NULL;
654 off_t fileend_adj = 0;
655 int flags = mmap_flags;
656 abi_ulong last, pass_last;
657
658 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
659 want_p = g2h_untagged(start);
660 }
661
662 if (!(flags & MAP_ANONYMOUS)) {
663 struct stat sb;
664
665 if (fstat(fd, &sb) == -1) {
666 return -1;
667 }
668 if (offset >= sb.st_size) {
669 /*
670 * The entire map is beyond the end of the file.
671 * Transform it to an anonymous mapping.
672 */
673 flags |= MAP_ANONYMOUS;
674 fd = -1;
675 offset = 0;
676 } else if (offset + len > sb.st_size) {
677 /*
678 * A portion of the map is beyond the end of the file.
679 * Truncate the file portion of the allocation.
680 */
681 fileend_adj = offset + len - sb.st_size;
682 }
683 }
684
685 if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
686 if (fileend_adj) {
687 p = mmap(want_p, len, host_prot, flags | MAP_ANONYMOUS, -1, 0);
688 } else {
689 p = mmap(want_p, len, host_prot, flags, fd, offset);
690 }
691 if (p != want_p) {
692 if (p != MAP_FAILED) {
693 /* Host does not support MAP_FIXED_NOREPLACE: emulate. */
694 do_munmap(p, len);
695 errno = EEXIST;
696 }
697 return -1;
698 }
699
700 if (fileend_adj) {
701 void *t = mmap(p, len - fileend_adj, host_prot,
702 (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED,
703 fd, offset);
704
705 if (t == MAP_FAILED) {
706 int save_errno = errno;
707
708 /*
709 * We failed a map over the top of the successful anonymous
710 * mapping above. The only failure mode is running out of VMAs,
711 * and there's nothing that we can do to detect that earlier.
712 * If we have replaced an existing mapping with MAP_FIXED,
713 * then we cannot properly recover. It's a coin toss whether
714 * it would be better to exit or continue here.
715 */
716 if (!(flags & MAP_FIXED_NOREPLACE) &&
717 !page_check_range_empty(start, start + len - 1)) {
718 qemu_log("QEMU target_mmap late failure: %s",
719 strerror(save_errno));
720 }
721
722 do_munmap(want_p, len);
723 errno = save_errno;
724 return -1;
725 }
726 }
727 } else {
728 size_t host_len, part_len;
729
730 /*
731 * Take care to align the host memory. Perform a larger anonymous
732 * allocation and extract the aligned portion. Remap the file on
733 * top of that.
734 */
735 host_len = len + TARGET_PAGE_SIZE - host_page_size;
736 p = mmap(want_p, host_len, host_prot, flags | MAP_ANONYMOUS, -1, 0);
737 if (p == MAP_FAILED) {
738 return -1;
739 }
740
741 part_len = (uintptr_t)p & (TARGET_PAGE_SIZE - 1);
742 if (part_len) {
743 part_len = TARGET_PAGE_SIZE - part_len;
744 do_munmap(p, part_len);
745 p += part_len;
746 host_len -= part_len;
747 }
748 if (len < host_len) {
749 do_munmap(p + len, host_len - len);
750 }
751
752 if (!(flags & MAP_ANONYMOUS)) {
753 void *t = mmap(p, len - fileend_adj, host_prot,
754 flags | MAP_FIXED, fd, offset);
755
756 if (t == MAP_FAILED) {
757 int save_errno = errno;
758 do_munmap(p, len);
759 errno = save_errno;
760 return -1;
761 }
762 }
763
764 start = h2g(p);
765 }
766
767 last = start + len - 1;
768 if (fileend_adj) {
769 pass_last = ROUND_UP(last - fileend_adj, host_page_size) - 1;
770 } else {
771 pass_last = last;
772 }
773 return mmap_end(start, last, start, pass_last, mmap_flags, page_flags);
774 }
775
776 /*
777 * Special case host page size > target page size.
778 *
779 * The two special cases are address and file offsets that are valid
780 * for the guest that cannot be directly represented by the host.
781 */
mmap_h_gt_g(abi_ulong start,abi_ulong len,int target_prot,int host_prot,int flags,int page_flags,int fd,off_t offset,int host_page_size)782 static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len,
783 int target_prot, int host_prot,
784 int flags, int page_flags, int fd,
785 off_t offset, int host_page_size)
786 {
787 void *p, *want_p = NULL;
788 off_t host_offset = offset & -host_page_size;
789 abi_ulong last, real_start, real_last;
790 bool misaligned_offset = false;
791 size_t host_len;
792
793 if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
794 want_p = g2h_untagged(start);
795 }
796
797 if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
798 /*
799 * Adjust the offset to something representable on the host.
800 */
801 host_len = len + offset - host_offset;
802 p = mmap(want_p, host_len, host_prot, flags, fd, host_offset);
803 if (p == MAP_FAILED) {
804 return -1;
805 }
806
807 /* Update start to the file position at offset. */
808 p += offset - host_offset;
809
810 start = h2g(p);
811 last = start + len - 1;
812 return mmap_end(start, last, start, last, flags, page_flags);
813 }
814
815 if (!(flags & MAP_ANONYMOUS)) {
816 misaligned_offset = (start ^ offset) & (host_page_size - 1);
817
818 /*
819 * The fallback for misalignment is a private mapping + read.
820 * This carries none of semantics required of MAP_SHARED.
821 */
822 if (misaligned_offset && (flags & MAP_TYPE) != MAP_PRIVATE) {
823 errno = EINVAL;
824 return -1;
825 }
826 }
827
828 last = start + len - 1;
829 real_start = start & -host_page_size;
830 real_last = ROUND_UP(last, host_page_size) - 1;
831
832 /*
833 * Handle the start and end of the mapping.
834 */
835 if (real_start < start) {
836 abi_ulong real_page_last = real_start + host_page_size - 1;
837 if (last <= real_page_last) {
838 /* Entire allocation a subset of one host page. */
839 if (!mmap_frag(real_start, start, last, target_prot,
840 flags, fd, offset)) {
841 return -1;
842 }
843 return mmap_end(start, last, -1, 0, flags, page_flags);
844 }
845
846 if (!mmap_frag(real_start, start, real_page_last, target_prot,
847 flags, fd, offset)) {
848 return -1;
849 }
850 real_start = real_page_last + 1;
851 }
852
853 if (last < real_last) {
854 abi_ulong real_page_start = real_last - host_page_size + 1;
855 if (!mmap_frag(real_page_start, real_page_start, last,
856 target_prot, flags, fd,
857 offset + real_page_start - start)) {
858 return -1;
859 }
860 real_last = real_page_start - 1;
861 }
862
863 if (real_start > real_last) {
864 return mmap_end(start, last, -1, 0, flags, page_flags);
865 }
866
867 /*
868 * Handle the middle of the mapping.
869 */
870
871 host_len = real_last - real_start + 1;
872 want_p += real_start - start;
873
874 if (flags & MAP_ANONYMOUS) {
875 p = mmap(want_p, host_len, host_prot, flags, -1, 0);
876 } else if (!misaligned_offset) {
877 p = mmap(want_p, host_len, host_prot, flags, fd,
878 offset + real_start - start);
879 } else {
880 p = mmap(want_p, host_len, host_prot | PROT_WRITE,
881 flags | MAP_ANONYMOUS, -1, 0);
882 }
883 if (p != want_p) {
884 if (p != MAP_FAILED) {
885 do_munmap(p, host_len);
886 errno = EEXIST;
887 }
888 return -1;
889 }
890
891 if (misaligned_offset) {
892 if (!mmap_pread(fd, p, host_len, offset + real_start - start, false)) {
893 do_munmap(p, host_len);
894 return -1;
895 }
896 if (!(host_prot & PROT_WRITE)) {
897 mprotect(p, host_len, host_prot);
898 }
899 }
900
901 return mmap_end(start, last, -1, 0, flags, page_flags);
902 }
903
target_mmap__locked(abi_ulong start,abi_ulong len,int target_prot,int flags,int page_flags,int fd,off_t offset)904 static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
905 int target_prot, int flags, int page_flags,
906 int fd, off_t offset)
907 {
908 int host_page_size = qemu_real_host_page_size();
909 int host_prot;
910
911 /*
912 * For reserved_va, we are in full control of the allocation.
913 * Find a suitable hole and convert to MAP_FIXED.
914 */
915 if (reserved_va) {
916 if (flags & MAP_FIXED_NOREPLACE) {
917 /* Validate that the chosen range is empty. */
918 if (!page_check_range_empty(start, start + len - 1)) {
919 errno = EEXIST;
920 return -1;
921 }
922 flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
923 } else if (!(flags & MAP_FIXED)) {
924 abi_ulong real_start = start & -host_page_size;
925 off_t host_offset = offset & -host_page_size;
926 size_t real_len = len + offset - host_offset;
927 abi_ulong align = MAX(host_page_size, TARGET_PAGE_SIZE);
928
929 start = mmap_find_vma(real_start, real_len, align);
930 if (start == (abi_ulong)-1) {
931 errno = ENOMEM;
932 return -1;
933 }
934 start += offset - host_offset;
935 flags |= MAP_FIXED;
936 }
937 }
938
939 host_prot = target_to_host_prot(target_prot);
940
941 if (host_page_size == TARGET_PAGE_SIZE) {
942 return mmap_h_eq_g(start, len, host_prot, flags,
943 page_flags, fd, offset);
944 } else if (host_page_size < TARGET_PAGE_SIZE) {
945 return mmap_h_lt_g(start, len, host_prot, flags,
946 page_flags, fd, offset, host_page_size);
947 } else {
948 return mmap_h_gt_g(start, len, target_prot, host_prot, flags,
949 page_flags, fd, offset, host_page_size);
950 }
951 }
952
953 /* NOTE: all the constants are the HOST ones */
target_mmap(abi_ulong start,abi_ulong len,int target_prot,int flags,int fd,off_t offset)954 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
955 int flags, int fd, off_t offset)
956 {
957 abi_long ret;
958 int page_flags;
959
960 trace_target_mmap(start, len, target_prot, flags, fd, offset);
961
962 if (!len) {
963 errno = EINVAL;
964 return -1;
965 }
966
967 page_flags = validate_prot_to_pageflags(target_prot);
968 if (!page_flags) {
969 errno = EINVAL;
970 return -1;
971 }
972
973 /* Also check for overflows... */
974 len = TARGET_PAGE_ALIGN(len);
975 if (!len || len != (size_t)len) {
976 errno = ENOMEM;
977 return -1;
978 }
979
980 if (offset & ~TARGET_PAGE_MASK) {
981 errno = EINVAL;
982 return -1;
983 }
984 if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
985 if (start & ~TARGET_PAGE_MASK) {
986 errno = EINVAL;
987 return -1;
988 }
989 if (!guest_range_valid_untagged(start, len)) {
990 errno = ENOMEM;
991 return -1;
992 }
993 }
994
995 mmap_lock();
996
997 ret = target_mmap__locked(start, len, target_prot, flags,
998 page_flags, fd, offset);
999
1000 mmap_unlock();
1001
1002 /*
1003 * If we're mapping shared memory, ensure we generate code for parallel
1004 * execution and flush old translations. This will work up to the level
1005 * supported by the host -- anything that requires EXCP_ATOMIC will not
1006 * be atomic with respect to an external process.
1007 */
1008 if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) {
1009 CPUState *cpu = thread_cpu;
1010 if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
1011 tcg_cflags_set(cpu, CF_PARALLEL);
1012 tb_flush(cpu);
1013 }
1014 }
1015
1016 return ret;
1017 }
1018
mmap_reserve_or_unmap(abi_ulong start,abi_ulong len)1019 static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
1020 {
1021 int host_page_size = qemu_real_host_page_size();
1022 abi_ulong real_start;
1023 abi_ulong real_last;
1024 abi_ulong real_len;
1025 abi_ulong last;
1026 abi_ulong a;
1027 void *host_start;
1028 int prot;
1029
1030 last = start + len - 1;
1031 real_start = start & -host_page_size;
1032 real_last = ROUND_UP(last, host_page_size) - 1;
1033
1034 /*
1035 * If guest pages remain on the first or last host pages,
1036 * adjust the deallocation to retain those guest pages.
1037 * The single page special case is required for the last page,
1038 * lest real_start overflow to zero.
1039 */
1040 if (real_last - real_start < host_page_size) {
1041 prot = 0;
1042 for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
1043 prot |= page_get_flags(a);
1044 }
1045 for (a = last; a < real_last; a += TARGET_PAGE_SIZE) {
1046 prot |= page_get_flags(a + 1);
1047 }
1048 if (prot != 0) {
1049 return 0;
1050 }
1051 } else {
1052 for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
1053 prot |= page_get_flags(a);
1054 }
1055 if (prot != 0) {
1056 real_start += host_page_size;
1057 }
1058
1059 for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
1060 prot |= page_get_flags(a + 1);
1061 }
1062 if (prot != 0) {
1063 real_last -= host_page_size;
1064 }
1065
1066 if (real_last < real_start) {
1067 return 0;
1068 }
1069 }
1070
1071 real_len = real_last - real_start + 1;
1072 host_start = g2h_untagged(real_start);
1073
1074 return do_munmap(host_start, real_len);
1075 }
1076
target_munmap(abi_ulong start,abi_ulong len)1077 int target_munmap(abi_ulong start, abi_ulong len)
1078 {
1079 int ret;
1080
1081 trace_target_munmap(start, len);
1082
1083 if (start & ~TARGET_PAGE_MASK) {
1084 errno = EINVAL;
1085 return -1;
1086 }
1087 len = TARGET_PAGE_ALIGN(len);
1088 if (len == 0 || !guest_range_valid_untagged(start, len)) {
1089 errno = EINVAL;
1090 return -1;
1091 }
1092
1093 mmap_lock();
1094 ret = mmap_reserve_or_unmap(start, len);
1095 if (likely(ret == 0)) {
1096 page_set_flags(start, start + len - 1, 0);
1097 shm_region_rm_complete(start, start + len - 1);
1098 }
1099 mmap_unlock();
1100
1101 return ret;
1102 }
1103
target_mremap(abi_ulong old_addr,abi_ulong old_size,abi_ulong new_size,unsigned long flags,abi_ulong new_addr)1104 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
1105 abi_ulong new_size, unsigned long flags,
1106 abi_ulong new_addr)
1107 {
1108 int prot;
1109 void *host_addr;
1110
1111 if (!guest_range_valid_untagged(old_addr, old_size) ||
1112 ((flags & MREMAP_FIXED) &&
1113 !guest_range_valid_untagged(new_addr, new_size)) ||
1114 ((flags & MREMAP_MAYMOVE) == 0 &&
1115 !guest_range_valid_untagged(old_addr, new_size))) {
1116 errno = ENOMEM;
1117 return -1;
1118 }
1119
1120 mmap_lock();
1121
1122 if (flags & MREMAP_FIXED) {
1123 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
1124 flags, g2h_untagged(new_addr));
1125
1126 if (reserved_va && host_addr != MAP_FAILED) {
1127 /*
1128 * If new and old addresses overlap then the above mremap will
1129 * already have failed with EINVAL.
1130 */
1131 mmap_reserve_or_unmap(old_addr, old_size);
1132 }
1133 } else if (flags & MREMAP_MAYMOVE) {
1134 abi_ulong mmap_start;
1135
1136 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
1137
1138 if (mmap_start == -1) {
1139 errno = ENOMEM;
1140 host_addr = MAP_FAILED;
1141 } else {
1142 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
1143 flags | MREMAP_FIXED,
1144 g2h_untagged(mmap_start));
1145 if (reserved_va) {
1146 mmap_reserve_or_unmap(old_addr, old_size);
1147 }
1148 }
1149 } else {
1150 int page_flags = 0;
1151 if (reserved_va && old_size < new_size) {
1152 abi_ulong addr;
1153 for (addr = old_addr + old_size;
1154 addr < old_addr + new_size;
1155 addr++) {
1156 page_flags |= page_get_flags(addr);
1157 }
1158 }
1159 if (page_flags == 0) {
1160 host_addr = mremap(g2h_untagged(old_addr),
1161 old_size, new_size, flags);
1162
1163 if (host_addr != MAP_FAILED) {
1164 /* Check if address fits target address space */
1165 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
1166 /* Revert mremap() changes */
1167 host_addr = mremap(g2h_untagged(old_addr),
1168 new_size, old_size, flags);
1169 errno = ENOMEM;
1170 host_addr = MAP_FAILED;
1171 } else if (reserved_va && old_size > new_size) {
1172 mmap_reserve_or_unmap(old_addr + old_size,
1173 old_size - new_size);
1174 }
1175 }
1176 } else {
1177 errno = ENOMEM;
1178 host_addr = MAP_FAILED;
1179 }
1180 }
1181
1182 if (host_addr == MAP_FAILED) {
1183 new_addr = -1;
1184 } else {
1185 new_addr = h2g(host_addr);
1186 prot = page_get_flags(old_addr);
1187 page_set_flags(old_addr, old_addr + old_size - 1, 0);
1188 shm_region_rm_complete(old_addr, old_addr + old_size - 1);
1189 page_set_flags(new_addr, new_addr + new_size - 1,
1190 prot | PAGE_VALID | PAGE_RESET);
1191 shm_region_rm_complete(new_addr, new_addr + new_size - 1);
1192 }
1193 mmap_unlock();
1194 return new_addr;
1195 }
1196
target_madvise(abi_ulong start,abi_ulong len_in,int advice)1197 abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
1198 {
1199 abi_ulong len;
1200 int ret = 0;
1201
1202 if (start & ~TARGET_PAGE_MASK) {
1203 return -TARGET_EINVAL;
1204 }
1205 if (len_in == 0) {
1206 return 0;
1207 }
1208 len = TARGET_PAGE_ALIGN(len_in);
1209 if (len == 0 || !guest_range_valid_untagged(start, len)) {
1210 return -TARGET_EINVAL;
1211 }
1212
1213 /* Translate for some architectures which have different MADV_xxx values */
1214 switch (advice) {
1215 case TARGET_MADV_DONTNEED: /* alpha */
1216 advice = MADV_DONTNEED;
1217 break;
1218 case TARGET_MADV_WIPEONFORK: /* parisc */
1219 advice = MADV_WIPEONFORK;
1220 break;
1221 case TARGET_MADV_KEEPONFORK: /* parisc */
1222 advice = MADV_KEEPONFORK;
1223 break;
1224 /* we do not care about the other MADV_xxx values yet */
1225 }
1226
1227 /*
1228 * Most advice values are hints, so ignoring and returning success is ok.
1229 *
1230 * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and
1231 * MADV_KEEPONFORK are not hints and need to be emulated.
1232 *
1233 * A straight passthrough for those may not be safe because qemu sometimes
1234 * turns private file-backed mappings into anonymous mappings.
1235 * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
1236 * same semantics for the host as for the guest.
1237 *
1238 * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
1239 * return failure if not.
1240 *
1241 * MADV_DONTNEED is passed through as well, if possible.
1242 * If passthrough isn't possible, we nevertheless (wrongly!) return
1243 * success, which is broken but some userspace programs fail to work
1244 * otherwise. Completely implementing such emulation is quite complicated
1245 * though.
1246 */
1247 mmap_lock();
1248 switch (advice) {
1249 case MADV_WIPEONFORK:
1250 case MADV_KEEPONFORK:
1251 ret = -EINVAL;
1252 /* fall through */
1253 case MADV_DONTNEED:
1254 if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
1255 ret = get_errno(madvise(g2h_untagged(start), len, advice));
1256 if ((advice == MADV_DONTNEED) && (ret == 0)) {
1257 page_reset_target_data(start, start + len - 1);
1258 }
1259 }
1260 }
1261 mmap_unlock();
1262
1263 return ret;
1264 }
1265
1266 #ifndef TARGET_FORCE_SHMLBA
1267 /*
1268 * For most architectures, SHMLBA is the same as the page size;
1269 * some architectures have larger values, in which case they should
1270 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
1271 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
1272 * and defining its own value for SHMLBA.
1273 *
1274 * The kernel also permits SHMLBA to be set by the architecture to a
1275 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
1276 * this means that addresses are rounded to the large size if
1277 * SHM_RND is set but addresses not aligned to that size are not rejected
1278 * as long as they are at least page-aligned. Since the only architecture
1279 * which uses this is ia64 this code doesn't provide for that oddity.
1280 */
target_shmlba(CPUArchState * cpu_env)1281 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
1282 {
1283 return TARGET_PAGE_SIZE;
1284 }
1285 #endif
1286
1287 #if defined(__arm__) || defined(__mips__) || defined(__sparc__)
1288 #define HOST_FORCE_SHMLBA 1
1289 #else
1290 #define HOST_FORCE_SHMLBA 0
1291 #endif
1292
target_shmat(CPUArchState * cpu_env,int shmid,abi_ulong shmaddr,int shmflg)1293 abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
1294 abi_ulong shmaddr, int shmflg)
1295 {
1296 CPUState *cpu = env_cpu(cpu_env);
1297 struct shmid_ds shm_info;
1298 int ret;
1299 int h_pagesize;
1300 int t_shmlba, h_shmlba, m_shmlba;
1301 size_t t_len, h_len, m_len;
1302
1303 /* shmat pointers are always untagged */
1304
1305 /*
1306 * Because we can't use host shmat() unless the address is sufficiently
1307 * aligned for the host, we'll need to check both.
1308 * TODO: Could be fixed with softmmu.
1309 */
1310 t_shmlba = target_shmlba(cpu_env);
1311 h_pagesize = qemu_real_host_page_size();
1312 h_shmlba = (HOST_FORCE_SHMLBA ? SHMLBA : h_pagesize);
1313 m_shmlba = MAX(t_shmlba, h_shmlba);
1314
1315 if (shmaddr) {
1316 if (shmaddr & (m_shmlba - 1)) {
1317 if (shmflg & SHM_RND) {
1318 /*
1319 * The guest is allowing the kernel to round the address.
1320 * Assume that the guest is ok with us rounding to the
1321 * host required alignment too. Anyway if we don't, we'll
1322 * get an error from the kernel.
1323 */
1324 shmaddr &= ~(m_shmlba - 1);
1325 if (shmaddr == 0 && (shmflg & SHM_REMAP)) {
1326 return -TARGET_EINVAL;
1327 }
1328 } else {
1329 int require = TARGET_PAGE_SIZE;
1330 #ifdef TARGET_FORCE_SHMLBA
1331 require = t_shmlba;
1332 #endif
1333 /*
1334 * Include host required alignment, as otherwise we cannot
1335 * use host shmat at all.
1336 */
1337 require = MAX(require, h_shmlba);
1338 if (shmaddr & (require - 1)) {
1339 return -TARGET_EINVAL;
1340 }
1341 }
1342 }
1343 } else {
1344 if (shmflg & SHM_REMAP) {
1345 return -TARGET_EINVAL;
1346 }
1347 }
1348 /* All rounding now manually concluded. */
1349 shmflg &= ~SHM_RND;
1350
1351 /* Find out the length of the shared memory segment. */
1352 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
1353 if (is_error(ret)) {
1354 /* can't get length, bail out */
1355 return ret;
1356 }
1357 t_len = TARGET_PAGE_ALIGN(shm_info.shm_segsz);
1358 h_len = ROUND_UP(shm_info.shm_segsz, h_pagesize);
1359 m_len = MAX(t_len, h_len);
1360
1361 if (!guest_range_valid_untagged(shmaddr, m_len)) {
1362 return -TARGET_EINVAL;
1363 }
1364
1365 WITH_MMAP_LOCK_GUARD() {
1366 bool mapped = false;
1367 void *want, *test;
1368 abi_ulong last;
1369
1370 if (!shmaddr) {
1371 shmaddr = mmap_find_vma(0, m_len, m_shmlba);
1372 if (shmaddr == -1) {
1373 return -TARGET_ENOMEM;
1374 }
1375 mapped = !reserved_va;
1376 } else if (shmflg & SHM_REMAP) {
1377 /*
1378 * If host page size > target page size, the host shmat may map
1379 * more memory than the guest expects. Reject a mapping that
1380 * would replace memory in the unexpected gap.
1381 * TODO: Could be fixed with softmmu.
1382 */
1383 if (t_len < h_len &&
1384 !page_check_range_empty(shmaddr + t_len,
1385 shmaddr + h_len - 1)) {
1386 return -TARGET_EINVAL;
1387 }
1388 } else {
1389 if (!page_check_range_empty(shmaddr, shmaddr + m_len - 1)) {
1390 return -TARGET_EINVAL;
1391 }
1392 }
1393
1394 /* All placement is now complete. */
1395 want = (void *)g2h_untagged(shmaddr);
1396
1397 /*
1398 * Map anonymous pages across the entire range, then remap with
1399 * the shared memory. This is required for a number of corner
1400 * cases for which host and guest page sizes differ.
1401 */
1402 if (h_len != t_len) {
1403 int mmap_p = PROT_READ | (shmflg & SHM_RDONLY ? 0 : PROT_WRITE);
1404 int mmap_f = MAP_PRIVATE | MAP_ANONYMOUS
1405 | (reserved_va || mapped || (shmflg & SHM_REMAP)
1406 ? MAP_FIXED : MAP_FIXED_NOREPLACE);
1407
1408 test = mmap(want, m_len, mmap_p, mmap_f, -1, 0);
1409 if (unlikely(test != want)) {
1410 /* shmat returns EINVAL not EEXIST like mmap. */
1411 ret = (test == MAP_FAILED && errno != EEXIST
1412 ? get_errno(-1) : -TARGET_EINVAL);
1413 if (mapped) {
1414 do_munmap(want, m_len);
1415 }
1416 return ret;
1417 }
1418 mapped = true;
1419 }
1420
1421 if (reserved_va || mapped) {
1422 shmflg |= SHM_REMAP;
1423 }
1424 test = shmat(shmid, want, shmflg);
1425 if (test == MAP_FAILED) {
1426 ret = get_errno(-1);
1427 if (mapped) {
1428 do_munmap(want, m_len);
1429 }
1430 return ret;
1431 }
1432 assert(test == want);
1433
1434 last = shmaddr + m_len - 1;
1435 page_set_flags(shmaddr, last,
1436 PAGE_VALID | PAGE_RESET | PAGE_READ |
1437 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) |
1438 (shmflg & SHM_EXEC ? PAGE_EXEC : 0));
1439
1440 shm_region_rm_complete(shmaddr, last);
1441 shm_region_add(shmaddr, last);
1442 }
1443
1444 /*
1445 * We're mapping shared memory, so ensure we generate code for parallel
1446 * execution and flush old translations. This will work up to the level
1447 * supported by the host -- anything that requires EXCP_ATOMIC will not
1448 * be atomic with respect to an external process.
1449 */
1450 if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
1451 tcg_cflags_set(cpu, CF_PARALLEL);
1452 tb_flush(cpu);
1453 }
1454
1455 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
1456 FILE *f = qemu_log_trylock();
1457 if (f) {
1458 fprintf(f, "page layout changed following shmat\n");
1459 page_dump(f);
1460 qemu_log_unlock(f);
1461 }
1462 }
1463 return shmaddr;
1464 }
1465
target_shmdt(abi_ulong shmaddr)1466 abi_long target_shmdt(abi_ulong shmaddr)
1467 {
1468 abi_long rv;
1469
1470 /* shmdt pointers are always untagged */
1471
1472 WITH_MMAP_LOCK_GUARD() {
1473 abi_ulong last = shm_region_find(shmaddr);
1474 if (last == 0) {
1475 return -TARGET_EINVAL;
1476 }
1477
1478 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
1479 if (rv == 0) {
1480 abi_ulong size = last - shmaddr + 1;
1481
1482 page_set_flags(shmaddr, last, 0);
1483 shm_region_rm_complete(shmaddr, last);
1484 mmap_reserve_or_unmap(shmaddr, size);
1485 }
1486 }
1487 return rv;
1488 }
1489