1 /*
2 * User emulator execution
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg.h"
24 #include "qemu/bitops.h"
25 #include "qemu/rcu.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/main-loop.h"
28 #include "exec/translate-all.h"
29 #include "exec/page-protection.h"
30 #include "exec/helper-proto.h"
31 #include "qemu/atomic128.h"
32 #include "trace/trace-root.h"
33 #include "tcg/tcg-ldst.h"
34 #include "internal-common.h"
35 #include "internal-target.h"
36
37 __thread uintptr_t helper_retaddr;
38
39 //#define DEBUG_SIGNAL
40
cpu_interrupt(CPUState * cpu,int mask)41 void cpu_interrupt(CPUState *cpu, int mask)
42 {
43 g_assert(bql_locked());
44 cpu->interrupt_request |= mask;
45 qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
46 }
47
48 /*
49 * Adjust the pc to pass to cpu_restore_state; return the memop type.
50 */
adjust_signal_pc(uintptr_t * pc,bool is_write)51 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
52 {
53 switch (helper_retaddr) {
54 default:
55 /*
56 * Fault during host memory operation within a helper function.
57 * The helper's host return address, saved here, gives us a
58 * pointer into the generated code that will unwind to the
59 * correct guest pc.
60 */
61 *pc = helper_retaddr;
62 break;
63
64 case 0:
65 /*
66 * Fault during host memory operation within generated code.
67 * (Or, a unrelated bug within qemu, but we can't tell from here).
68 *
69 * We take the host pc from the signal frame. However, we cannot
70 * use that value directly. Within cpu_restore_state_from_tb, we
71 * assume PC comes from GETPC(), as used by the helper functions,
72 * so we adjust the address by -GETPC_ADJ to form an address that
73 * is within the call insn, so that the address does not accidentally
74 * match the beginning of the next guest insn. However, when the
75 * pc comes from the signal frame it points to the actual faulting
76 * host memory insn and not the return from a call insn.
77 *
78 * Therefore, adjust to compensate for what will be done later
79 * by cpu_restore_state_from_tb.
80 */
81 *pc += GETPC_ADJ;
82 break;
83
84 case 1:
85 /*
86 * Fault during host read for translation, or loosely, "execution".
87 *
88 * The guest pc is already pointing to the start of the TB for which
89 * code is being generated. If the guest translator manages the
90 * page crossings correctly, this is exactly the correct address
91 * (and if the translator doesn't handle page boundaries correctly
92 * there's little we can do about that here). Therefore, do not
93 * trigger the unwinder.
94 */
95 *pc = 0;
96 return MMU_INST_FETCH;
97 }
98
99 return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
100 }
101
102 /**
103 * handle_sigsegv_accerr_write:
104 * @cpu: the cpu context
105 * @old_set: the sigset_t from the signal ucontext_t
106 * @host_pc: the host pc, adjusted for the signal
107 * @guest_addr: the guest address of the fault
108 *
109 * Return true if the write fault has been handled, and should be re-tried.
110 *
111 * Note that it is important that we don't call page_unprotect() unless
112 * this is really a "write to nonwritable page" fault, because
113 * page_unprotect() assumes that if it is called for an access to
114 * a page that's writable this means we had two threads racing and
115 * another thread got there first and already made the page writable;
116 * so we will retry the access. If we were to call page_unprotect()
117 * for some other kind of fault that should really be passed to the
118 * guest, we'd end up in an infinite loop of retrying the faulting access.
119 */
handle_sigsegv_accerr_write(CPUState * cpu,sigset_t * old_set,uintptr_t host_pc,abi_ptr guest_addr)120 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
121 uintptr_t host_pc, abi_ptr guest_addr)
122 {
123 switch (page_unprotect(guest_addr, host_pc)) {
124 case 0:
125 /*
126 * Fault not caused by a page marked unwritable to protect
127 * cached translations, must be the guest binary's problem.
128 */
129 return false;
130 case 1:
131 /*
132 * Fault caused by protection of cached translation; TBs
133 * invalidated, so resume execution.
134 */
135 return true;
136 case 2:
137 /*
138 * Fault caused by protection of cached translation, and the
139 * currently executing TB was modified and must be exited immediately.
140 */
141 sigprocmask(SIG_SETMASK, old_set, NULL);
142 cpu_loop_exit_noexc(cpu);
143 /* NORETURN */
144 default:
145 g_assert_not_reached();
146 }
147 }
148
149 typedef struct PageFlagsNode {
150 struct rcu_head rcu;
151 IntervalTreeNode itree;
152 int flags;
153 } PageFlagsNode;
154
155 static IntervalTreeRoot pageflags_root;
156
pageflags_find(target_ulong start,target_ulong last)157 static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
158 {
159 IntervalTreeNode *n;
160
161 n = interval_tree_iter_first(&pageflags_root, start, last);
162 return n ? container_of(n, PageFlagsNode, itree) : NULL;
163 }
164
pageflags_next(PageFlagsNode * p,target_ulong start,target_ulong last)165 static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
166 target_ulong last)
167 {
168 IntervalTreeNode *n;
169
170 n = interval_tree_iter_next(&p->itree, start, last);
171 return n ? container_of(n, PageFlagsNode, itree) : NULL;
172 }
173
walk_memory_regions(void * priv,walk_memory_regions_fn fn)174 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
175 {
176 IntervalTreeNode *n;
177 int rc = 0;
178
179 mmap_lock();
180 for (n = interval_tree_iter_first(&pageflags_root, 0, -1);
181 n != NULL;
182 n = interval_tree_iter_next(n, 0, -1)) {
183 PageFlagsNode *p = container_of(n, PageFlagsNode, itree);
184
185 rc = fn(priv, n->start, n->last + 1, p->flags);
186 if (rc != 0) {
187 break;
188 }
189 }
190 mmap_unlock();
191
192 return rc;
193 }
194
dump_region(void * priv,target_ulong start,target_ulong end,unsigned long prot)195 static int dump_region(void *priv, target_ulong start,
196 target_ulong end, unsigned long prot)
197 {
198 FILE *f = (FILE *)priv;
199
200 fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
201 start, end, end - start,
202 ((prot & PAGE_READ) ? 'r' : '-'),
203 ((prot & PAGE_WRITE) ? 'w' : '-'),
204 ((prot & PAGE_EXEC) ? 'x' : '-'));
205 return 0;
206 }
207
208 /* dump memory mappings */
page_dump(FILE * f)209 void page_dump(FILE *f)
210 {
211 const int length = sizeof(target_ulong) * 2;
212
213 fprintf(f, "%-*s %-*s %-*s %s\n",
214 length, "start", length, "end", length, "size", "prot");
215 walk_memory_regions(f, dump_region);
216 }
217
page_get_flags(target_ulong address)218 int page_get_flags(target_ulong address)
219 {
220 PageFlagsNode *p = pageflags_find(address, address);
221
222 /*
223 * See util/interval-tree.c re lockless lookups: no false positives but
224 * there are false negatives. If we find nothing, retry with the mmap
225 * lock acquired.
226 */
227 if (p) {
228 return p->flags;
229 }
230 if (have_mmap_lock()) {
231 return 0;
232 }
233
234 mmap_lock();
235 p = pageflags_find(address, address);
236 mmap_unlock();
237 return p ? p->flags : 0;
238 }
239
240 /* A subroutine of page_set_flags: insert a new node for [start,last]. */
pageflags_create(target_ulong start,target_ulong last,int flags)241 static void pageflags_create(target_ulong start, target_ulong last, int flags)
242 {
243 PageFlagsNode *p = g_new(PageFlagsNode, 1);
244
245 p->itree.start = start;
246 p->itree.last = last;
247 p->flags = flags;
248 interval_tree_insert(&p->itree, &pageflags_root);
249 }
250
251 /* A subroutine of page_set_flags: remove everything in [start,last]. */
pageflags_unset(target_ulong start,target_ulong last)252 static bool pageflags_unset(target_ulong start, target_ulong last)
253 {
254 bool inval_tb = false;
255
256 while (true) {
257 PageFlagsNode *p = pageflags_find(start, last);
258 target_ulong p_last;
259
260 if (!p) {
261 break;
262 }
263
264 if (p->flags & PAGE_EXEC) {
265 inval_tb = true;
266 }
267
268 interval_tree_remove(&p->itree, &pageflags_root);
269 p_last = p->itree.last;
270
271 if (p->itree.start < start) {
272 /* Truncate the node from the end, or split out the middle. */
273 p->itree.last = start - 1;
274 interval_tree_insert(&p->itree, &pageflags_root);
275 if (last < p_last) {
276 pageflags_create(last + 1, p_last, p->flags);
277 break;
278 }
279 } else if (p_last <= last) {
280 /* Range completely covers node -- remove it. */
281 g_free_rcu(p, rcu);
282 } else {
283 /* Truncate the node from the start. */
284 p->itree.start = last + 1;
285 interval_tree_insert(&p->itree, &pageflags_root);
286 break;
287 }
288 }
289
290 return inval_tb;
291 }
292
293 /*
294 * A subroutine of page_set_flags: nothing overlaps [start,last],
295 * but check adjacent mappings and maybe merge into a single range.
296 */
pageflags_create_merge(target_ulong start,target_ulong last,int flags)297 static void pageflags_create_merge(target_ulong start, target_ulong last,
298 int flags)
299 {
300 PageFlagsNode *next = NULL, *prev = NULL;
301
302 if (start > 0) {
303 prev = pageflags_find(start - 1, start - 1);
304 if (prev) {
305 if (prev->flags == flags) {
306 interval_tree_remove(&prev->itree, &pageflags_root);
307 } else {
308 prev = NULL;
309 }
310 }
311 }
312 if (last + 1 != 0) {
313 next = pageflags_find(last + 1, last + 1);
314 if (next) {
315 if (next->flags == flags) {
316 interval_tree_remove(&next->itree, &pageflags_root);
317 } else {
318 next = NULL;
319 }
320 }
321 }
322
323 if (prev) {
324 if (next) {
325 prev->itree.last = next->itree.last;
326 g_free_rcu(next, rcu);
327 } else {
328 prev->itree.last = last;
329 }
330 interval_tree_insert(&prev->itree, &pageflags_root);
331 } else if (next) {
332 next->itree.start = start;
333 interval_tree_insert(&next->itree, &pageflags_root);
334 } else {
335 pageflags_create(start, last, flags);
336 }
337 }
338
339 /*
340 * Allow the target to decide if PAGE_TARGET_[12] may be reset.
341 * By default, they are not kept.
342 */
343 #ifndef PAGE_TARGET_STICKY
344 #define PAGE_TARGET_STICKY 0
345 #endif
346 #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
347
348 /* A subroutine of page_set_flags: add flags to [start,last]. */
pageflags_set_clear(target_ulong start,target_ulong last,int set_flags,int clear_flags)349 static bool pageflags_set_clear(target_ulong start, target_ulong last,
350 int set_flags, int clear_flags)
351 {
352 PageFlagsNode *p;
353 target_ulong p_start, p_last;
354 int p_flags, merge_flags;
355 bool inval_tb = false;
356
357 restart:
358 p = pageflags_find(start, last);
359 if (!p) {
360 if (set_flags) {
361 pageflags_create_merge(start, last, set_flags);
362 }
363 goto done;
364 }
365
366 p_start = p->itree.start;
367 p_last = p->itree.last;
368 p_flags = p->flags;
369 /* Using mprotect on a page does not change sticky bits. */
370 merge_flags = (p_flags & ~clear_flags) | set_flags;
371
372 /*
373 * Need to flush if an overlapping executable region
374 * removes exec, or adds write.
375 */
376 if ((p_flags & PAGE_EXEC)
377 && (!(merge_flags & PAGE_EXEC)
378 || (merge_flags & ~p_flags & PAGE_WRITE))) {
379 inval_tb = true;
380 }
381
382 /*
383 * If there is an exact range match, update and return without
384 * attempting to merge with adjacent regions.
385 */
386 if (start == p_start && last == p_last) {
387 if (merge_flags) {
388 p->flags = merge_flags;
389 } else {
390 interval_tree_remove(&p->itree, &pageflags_root);
391 g_free_rcu(p, rcu);
392 }
393 goto done;
394 }
395
396 /*
397 * If sticky bits affect the original mapping, then we must be more
398 * careful about the existing intervals and the separate flags.
399 */
400 if (set_flags != merge_flags) {
401 if (p_start < start) {
402 interval_tree_remove(&p->itree, &pageflags_root);
403 p->itree.last = start - 1;
404 interval_tree_insert(&p->itree, &pageflags_root);
405
406 if (last < p_last) {
407 if (merge_flags) {
408 pageflags_create(start, last, merge_flags);
409 }
410 pageflags_create(last + 1, p_last, p_flags);
411 } else {
412 if (merge_flags) {
413 pageflags_create(start, p_last, merge_flags);
414 }
415 if (p_last < last) {
416 start = p_last + 1;
417 goto restart;
418 }
419 }
420 } else {
421 if (start < p_start && set_flags) {
422 pageflags_create(start, p_start - 1, set_flags);
423 }
424 if (last < p_last) {
425 interval_tree_remove(&p->itree, &pageflags_root);
426 p->itree.start = last + 1;
427 interval_tree_insert(&p->itree, &pageflags_root);
428 if (merge_flags) {
429 pageflags_create(start, last, merge_flags);
430 }
431 } else {
432 if (merge_flags) {
433 p->flags = merge_flags;
434 } else {
435 interval_tree_remove(&p->itree, &pageflags_root);
436 g_free_rcu(p, rcu);
437 }
438 if (p_last < last) {
439 start = p_last + 1;
440 goto restart;
441 }
442 }
443 }
444 goto done;
445 }
446
447 /* If flags are not changing for this range, incorporate it. */
448 if (set_flags == p_flags) {
449 if (start < p_start) {
450 interval_tree_remove(&p->itree, &pageflags_root);
451 p->itree.start = start;
452 interval_tree_insert(&p->itree, &pageflags_root);
453 }
454 if (p_last < last) {
455 start = p_last + 1;
456 goto restart;
457 }
458 goto done;
459 }
460
461 /* Maybe split out head and/or tail ranges with the original flags. */
462 interval_tree_remove(&p->itree, &pageflags_root);
463 if (p_start < start) {
464 p->itree.last = start - 1;
465 interval_tree_insert(&p->itree, &pageflags_root);
466
467 if (p_last < last) {
468 goto restart;
469 }
470 if (last < p_last) {
471 pageflags_create(last + 1, p_last, p_flags);
472 }
473 } else if (last < p_last) {
474 p->itree.start = last + 1;
475 interval_tree_insert(&p->itree, &pageflags_root);
476 } else {
477 g_free_rcu(p, rcu);
478 goto restart;
479 }
480 if (set_flags) {
481 pageflags_create(start, last, set_flags);
482 }
483
484 done:
485 return inval_tb;
486 }
487
488 /*
489 * Modify the flags of a page and invalidate the code if necessary.
490 * The flag PAGE_WRITE_ORG is positioned automatically depending
491 * on PAGE_WRITE. The mmap_lock should already be held.
492 */
page_set_flags(target_ulong start,target_ulong last,int flags)493 void page_set_flags(target_ulong start, target_ulong last, int flags)
494 {
495 bool reset = false;
496 bool inval_tb = false;
497
498 /* This function should never be called with addresses outside the
499 guest address space. If this assert fires, it probably indicates
500 a missing call to h2g_valid. */
501 assert(start <= last);
502 assert(last <= GUEST_ADDR_MAX);
503 /* Only set PAGE_ANON with new mappings. */
504 assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
505 assert_memory_lock();
506
507 start &= TARGET_PAGE_MASK;
508 last |= ~TARGET_PAGE_MASK;
509
510 if (!(flags & PAGE_VALID)) {
511 flags = 0;
512 } else {
513 reset = flags & PAGE_RESET;
514 flags &= ~PAGE_RESET;
515 if (flags & PAGE_WRITE) {
516 flags |= PAGE_WRITE_ORG;
517 }
518 }
519
520 if (!flags || reset) {
521 page_reset_target_data(start, last);
522 inval_tb |= pageflags_unset(start, last);
523 }
524 if (flags) {
525 inval_tb |= pageflags_set_clear(start, last, flags,
526 ~(reset ? 0 : PAGE_STICKY));
527 }
528 if (inval_tb) {
529 tb_invalidate_phys_range(start, last);
530 }
531 }
532
page_check_range(target_ulong start,target_ulong len,int flags)533 bool page_check_range(target_ulong start, target_ulong len, int flags)
534 {
535 target_ulong last;
536 int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
537 bool ret;
538
539 if (len == 0) {
540 return true; /* trivial length */
541 }
542
543 last = start + len - 1;
544 if (last < start) {
545 return false; /* wrap around */
546 }
547
548 locked = have_mmap_lock();
549 while (true) {
550 PageFlagsNode *p = pageflags_find(start, last);
551 int missing;
552
553 if (!p) {
554 if (!locked) {
555 /*
556 * Lockless lookups have false negatives.
557 * Retry with the lock held.
558 */
559 mmap_lock();
560 locked = -1;
561 p = pageflags_find(start, last);
562 }
563 if (!p) {
564 ret = false; /* entire region invalid */
565 break;
566 }
567 }
568 if (start < p->itree.start) {
569 ret = false; /* initial bytes invalid */
570 break;
571 }
572
573 missing = flags & ~p->flags;
574 if (missing & ~PAGE_WRITE) {
575 ret = false; /* page doesn't match */
576 break;
577 }
578 if (missing & PAGE_WRITE) {
579 if (!(p->flags & PAGE_WRITE_ORG)) {
580 ret = false; /* page not writable */
581 break;
582 }
583 /* Asking about writable, but has been protected: undo. */
584 if (!page_unprotect(start, 0)) {
585 ret = false;
586 break;
587 }
588 /* TODO: page_unprotect should take a range, not a single page. */
589 if (last - start < TARGET_PAGE_SIZE) {
590 ret = true; /* ok */
591 break;
592 }
593 start += TARGET_PAGE_SIZE;
594 continue;
595 }
596
597 if (last <= p->itree.last) {
598 ret = true; /* ok */
599 break;
600 }
601 start = p->itree.last + 1;
602 }
603
604 /* Release the lock if acquired locally. */
605 if (locked < 0) {
606 mmap_unlock();
607 }
608 return ret;
609 }
610
page_check_range_empty(target_ulong start,target_ulong last)611 bool page_check_range_empty(target_ulong start, target_ulong last)
612 {
613 assert(last >= start);
614 assert_memory_lock();
615 return pageflags_find(start, last) == NULL;
616 }
617
page_find_range_empty(target_ulong min,target_ulong max,target_ulong len,target_ulong align)618 target_ulong page_find_range_empty(target_ulong min, target_ulong max,
619 target_ulong len, target_ulong align)
620 {
621 target_ulong len_m1, align_m1;
622
623 assert(min <= max);
624 assert(max <= GUEST_ADDR_MAX);
625 assert(len != 0);
626 assert(is_power_of_2(align));
627 assert_memory_lock();
628
629 len_m1 = len - 1;
630 align_m1 = align - 1;
631
632 /* Iteratively narrow the search region. */
633 while (1) {
634 PageFlagsNode *p;
635
636 /* Align min and double-check there's enough space remaining. */
637 min = (min + align_m1) & ~align_m1;
638 if (min > max) {
639 return -1;
640 }
641 if (len_m1 > max - min) {
642 return -1;
643 }
644
645 p = pageflags_find(min, min + len_m1);
646 if (p == NULL) {
647 /* Found! */
648 return min;
649 }
650 if (max <= p->itree.last) {
651 /* Existing allocation fills the remainder of the search region. */
652 return -1;
653 }
654 /* Skip across existing allocation. */
655 min = p->itree.last + 1;
656 }
657 }
658
page_protect(tb_page_addr_t address)659 void page_protect(tb_page_addr_t address)
660 {
661 PageFlagsNode *p;
662 target_ulong start, last;
663 int host_page_size = qemu_real_host_page_size();
664 int prot;
665
666 assert_memory_lock();
667
668 if (host_page_size <= TARGET_PAGE_SIZE) {
669 start = address & TARGET_PAGE_MASK;
670 last = start + TARGET_PAGE_SIZE - 1;
671 } else {
672 start = address & -host_page_size;
673 last = start + host_page_size - 1;
674 }
675
676 p = pageflags_find(start, last);
677 if (!p) {
678 return;
679 }
680 prot = p->flags;
681
682 if (unlikely(p->itree.last < last)) {
683 /* More than one protection region covers the one host page. */
684 assert(TARGET_PAGE_SIZE < host_page_size);
685 while ((p = pageflags_next(p, start, last)) != NULL) {
686 prot |= p->flags;
687 }
688 }
689
690 if (prot & PAGE_WRITE) {
691 pageflags_set_clear(start, last, 0, PAGE_WRITE);
692 mprotect(g2h_untagged(start), last - start + 1,
693 prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
694 }
695 }
696
697 /*
698 * Called from signal handler: invalidate the code and unprotect the
699 * page. Return 0 if the fault was not handled, 1 if it was handled,
700 * and 2 if it was handled but the caller must cause the TB to be
701 * immediately exited. (We can only return 2 if the 'pc' argument is
702 * non-zero.)
703 */
page_unprotect(target_ulong address,uintptr_t pc)704 int page_unprotect(target_ulong address, uintptr_t pc)
705 {
706 PageFlagsNode *p;
707 bool current_tb_invalidated;
708
709 /*
710 * Technically this isn't safe inside a signal handler. However we
711 * know this only ever happens in a synchronous SEGV handler, so in
712 * practice it seems to be ok.
713 */
714 mmap_lock();
715
716 p = pageflags_find(address, address);
717
718 /* If this address was not really writable, nothing to do. */
719 if (!p || !(p->flags & PAGE_WRITE_ORG)) {
720 mmap_unlock();
721 return 0;
722 }
723
724 current_tb_invalidated = false;
725 if (p->flags & PAGE_WRITE) {
726 /*
727 * If the page is actually marked WRITE then assume this is because
728 * this thread raced with another one which got here first and
729 * set the page to PAGE_WRITE and did the TB invalidate for us.
730 */
731 #ifdef TARGET_HAS_PRECISE_SMC
732 TranslationBlock *current_tb = tcg_tb_lookup(pc);
733 if (current_tb) {
734 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
735 }
736 #endif
737 } else {
738 int host_page_size = qemu_real_host_page_size();
739 target_ulong start, len, i;
740 int prot;
741
742 if (host_page_size <= TARGET_PAGE_SIZE) {
743 start = address & TARGET_PAGE_MASK;
744 len = TARGET_PAGE_SIZE;
745 prot = p->flags | PAGE_WRITE;
746 pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
747 current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
748 } else {
749 start = address & -host_page_size;
750 len = host_page_size;
751 prot = 0;
752
753 for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
754 target_ulong addr = start + i;
755
756 p = pageflags_find(addr, addr);
757 if (p) {
758 prot |= p->flags;
759 if (p->flags & PAGE_WRITE_ORG) {
760 prot |= PAGE_WRITE;
761 pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1,
762 PAGE_WRITE, 0);
763 }
764 }
765 /*
766 * Since the content will be modified, we must invalidate
767 * the corresponding translated code.
768 */
769 current_tb_invalidated |=
770 tb_invalidate_phys_page_unwind(addr, pc);
771 }
772 }
773 if (prot & PAGE_EXEC) {
774 prot = (prot & ~PAGE_EXEC) | PAGE_READ;
775 }
776 mprotect((void *)g2h_untagged(start), len, prot & PAGE_RWX);
777 }
778 mmap_unlock();
779
780 /* If current TB was invalidated return to main loop */
781 return current_tb_invalidated ? 2 : 1;
782 }
783
probe_access_internal(CPUArchState * env,vaddr addr,int fault_size,MMUAccessType access_type,bool nonfault,uintptr_t ra)784 static int probe_access_internal(CPUArchState *env, vaddr addr,
785 int fault_size, MMUAccessType access_type,
786 bool nonfault, uintptr_t ra)
787 {
788 int acc_flag;
789 bool maperr;
790
791 switch (access_type) {
792 case MMU_DATA_STORE:
793 acc_flag = PAGE_WRITE_ORG;
794 break;
795 case MMU_DATA_LOAD:
796 acc_flag = PAGE_READ;
797 break;
798 case MMU_INST_FETCH:
799 acc_flag = PAGE_EXEC;
800 break;
801 default:
802 g_assert_not_reached();
803 }
804
805 if (guest_addr_valid_untagged(addr)) {
806 int page_flags = page_get_flags(addr);
807 if (page_flags & acc_flag) {
808 if (access_type != MMU_INST_FETCH
809 && cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
810 return TLB_MMIO;
811 }
812 return 0; /* success */
813 }
814 maperr = !(page_flags & PAGE_VALID);
815 } else {
816 maperr = true;
817 }
818
819 if (nonfault) {
820 return TLB_INVALID_MASK;
821 }
822
823 cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
824 }
825
probe_access_flags(CPUArchState * env,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,bool nonfault,void ** phost,uintptr_t ra)826 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
827 MMUAccessType access_type, int mmu_idx,
828 bool nonfault, void **phost, uintptr_t ra)
829 {
830 int flags;
831
832 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
833 flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
834 *phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr);
835 return flags;
836 }
837
probe_access(CPUArchState * env,vaddr addr,int size,MMUAccessType access_type,int mmu_idx,uintptr_t ra)838 void *probe_access(CPUArchState *env, vaddr addr, int size,
839 MMUAccessType access_type, int mmu_idx, uintptr_t ra)
840 {
841 int flags;
842
843 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
844 flags = probe_access_internal(env, addr, size, access_type, false, ra);
845 g_assert((flags & ~TLB_MMIO) == 0);
846
847 return size ? g2h(env_cpu(env), addr) : NULL;
848 }
849
get_page_addr_code_hostp(CPUArchState * env,vaddr addr,void ** hostp)850 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
851 void **hostp)
852 {
853 int flags;
854
855 flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
856 g_assert(flags == 0);
857
858 if (hostp) {
859 *hostp = g2h_untagged(addr);
860 }
861 return addr;
862 }
863
864 #ifdef TARGET_PAGE_DATA_SIZE
865 /*
866 * Allocate chunks of target data together. For the only current user,
867 * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
868 * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
869 */
870 #define TPD_PAGES 64
871 #define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
872
873 typedef struct TargetPageDataNode {
874 struct rcu_head rcu;
875 IntervalTreeNode itree;
876 char data[] __attribute__((aligned));
877 } TargetPageDataNode;
878
879 static IntervalTreeRoot targetdata_root;
880
page_reset_target_data(target_ulong start,target_ulong last)881 void page_reset_target_data(target_ulong start, target_ulong last)
882 {
883 IntervalTreeNode *n, *next;
884
885 assert_memory_lock();
886
887 start &= TARGET_PAGE_MASK;
888 last |= ~TARGET_PAGE_MASK;
889
890 for (n = interval_tree_iter_first(&targetdata_root, start, last),
891 next = n ? interval_tree_iter_next(n, start, last) : NULL;
892 n != NULL;
893 n = next,
894 next = next ? interval_tree_iter_next(n, start, last) : NULL) {
895 target_ulong n_start, n_last, p_ofs, p_len;
896 TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
897
898 if (n->start >= start && n->last <= last) {
899 interval_tree_remove(n, &targetdata_root);
900 g_free_rcu(t, rcu);
901 continue;
902 }
903
904 if (n->start < start) {
905 n_start = start;
906 p_ofs = (start - n->start) >> TARGET_PAGE_BITS;
907 } else {
908 n_start = n->start;
909 p_ofs = 0;
910 }
911 n_last = MIN(last, n->last);
912 p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
913
914 memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0,
915 p_len * TARGET_PAGE_DATA_SIZE);
916 }
917 }
918
page_get_target_data(target_ulong address)919 void *page_get_target_data(target_ulong address)
920 {
921 IntervalTreeNode *n;
922 TargetPageDataNode *t;
923 target_ulong page, region, p_ofs;
924
925 page = address & TARGET_PAGE_MASK;
926 region = address & TBD_MASK;
927
928 n = interval_tree_iter_first(&targetdata_root, page, page);
929 if (!n) {
930 /*
931 * See util/interval-tree.c re lockless lookups: no false positives
932 * but there are false negatives. If we find nothing, retry with
933 * the mmap lock acquired. We also need the lock for the
934 * allocation + insert.
935 */
936 mmap_lock();
937 n = interval_tree_iter_first(&targetdata_root, page, page);
938 if (!n) {
939 t = g_malloc0(sizeof(TargetPageDataNode)
940 + TPD_PAGES * TARGET_PAGE_DATA_SIZE);
941 n = &t->itree;
942 n->start = region;
943 n->last = region | ~TBD_MASK;
944 interval_tree_insert(n, &targetdata_root);
945 }
946 mmap_unlock();
947 }
948
949 t = container_of(n, TargetPageDataNode, itree);
950 p_ofs = (page - region) >> TARGET_PAGE_BITS;
951 return t->data + p_ofs * TARGET_PAGE_DATA_SIZE;
952 }
953 #else
page_reset_target_data(target_ulong start,target_ulong last)954 void page_reset_target_data(target_ulong start, target_ulong last) { }
955 #endif /* TARGET_PAGE_DATA_SIZE */
956
957 /* The system-mode versions of these helpers are in cputlb.c. */
958
cpu_mmu_lookup(CPUState * cpu,vaddr addr,MemOp mop,uintptr_t ra,MMUAccessType type)959 static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
960 MemOp mop, uintptr_t ra, MMUAccessType type)
961 {
962 int a_bits = get_alignment_bits(mop);
963 void *ret;
964
965 /* Enforce guest required alignment. */
966 if (unlikely(addr & ((1 << a_bits) - 1))) {
967 cpu_loop_exit_sigbus(cpu, addr, type, ra);
968 }
969
970 ret = g2h(cpu, addr);
971 set_helper_retaddr(ra);
972 return ret;
973 }
974
975 #include "ldst_atomicity.c.inc"
976
do_ld1_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)977 static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
978 uintptr_t ra, MMUAccessType access_type)
979 {
980 void *haddr;
981 uint8_t ret;
982
983 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
984 haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type);
985 ret = ldub_p(haddr);
986 clear_helper_retaddr();
987 return ret;
988 }
989
do_ld2_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)990 static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
991 uintptr_t ra, MMUAccessType access_type)
992 {
993 void *haddr;
994 uint16_t ret;
995 MemOp mop = get_memop(oi);
996
997 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
998 haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
999 ret = load_atom_2(cpu, ra, haddr, mop);
1000 clear_helper_retaddr();
1001
1002 if (mop & MO_BSWAP) {
1003 ret = bswap16(ret);
1004 }
1005 return ret;
1006 }
1007
do_ld4_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)1008 static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
1009 uintptr_t ra, MMUAccessType access_type)
1010 {
1011 void *haddr;
1012 uint32_t ret;
1013 MemOp mop = get_memop(oi);
1014
1015 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1016 haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
1017 ret = load_atom_4(cpu, ra, haddr, mop);
1018 clear_helper_retaddr();
1019
1020 if (mop & MO_BSWAP) {
1021 ret = bswap32(ret);
1022 }
1023 return ret;
1024 }
1025
do_ld8_mmu(CPUState * cpu,vaddr addr,MemOpIdx oi,uintptr_t ra,MMUAccessType access_type)1026 static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
1027 uintptr_t ra, MMUAccessType access_type)
1028 {
1029 void *haddr;
1030 uint64_t ret;
1031 MemOp mop = get_memop(oi);
1032
1033 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1034 haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
1035 ret = load_atom_8(cpu, ra, haddr, mop);
1036 clear_helper_retaddr();
1037
1038 if (mop & MO_BSWAP) {
1039 ret = bswap64(ret);
1040 }
1041 return ret;
1042 }
1043
do_ld16_mmu(CPUState * cpu,abi_ptr addr,MemOpIdx oi,uintptr_t ra)1044 static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
1045 MemOpIdx oi, uintptr_t ra)
1046 {
1047 void *haddr;
1048 Int128 ret;
1049 MemOp mop = get_memop(oi);
1050
1051 tcg_debug_assert((mop & MO_SIZE) == MO_128);
1052 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1053 haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD);
1054 ret = load_atom_16(cpu, ra, haddr, mop);
1055 clear_helper_retaddr();
1056
1057 if (mop & MO_BSWAP) {
1058 ret = bswap128(ret);
1059 }
1060 return ret;
1061 }
1062
do_st1_mmu(CPUState * cpu,vaddr addr,uint8_t val,MemOpIdx oi,uintptr_t ra)1063 static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
1064 MemOpIdx oi, uintptr_t ra)
1065 {
1066 void *haddr;
1067
1068 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1069 haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE);
1070 stb_p(haddr, val);
1071 clear_helper_retaddr();
1072 }
1073
do_st2_mmu(CPUState * cpu,vaddr addr,uint16_t val,MemOpIdx oi,uintptr_t ra)1074 static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
1075 MemOpIdx oi, uintptr_t ra)
1076 {
1077 void *haddr;
1078 MemOp mop = get_memop(oi);
1079
1080 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1081 haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
1082
1083 if (mop & MO_BSWAP) {
1084 val = bswap16(val);
1085 }
1086 store_atom_2(cpu, ra, haddr, mop, val);
1087 clear_helper_retaddr();
1088 }
1089
do_st4_mmu(CPUState * cpu,vaddr addr,uint32_t val,MemOpIdx oi,uintptr_t ra)1090 static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
1091 MemOpIdx oi, uintptr_t ra)
1092 {
1093 void *haddr;
1094 MemOp mop = get_memop(oi);
1095
1096 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1097 haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
1098
1099 if (mop & MO_BSWAP) {
1100 val = bswap32(val);
1101 }
1102 store_atom_4(cpu, ra, haddr, mop, val);
1103 clear_helper_retaddr();
1104 }
1105
do_st8_mmu(CPUState * cpu,vaddr addr,uint64_t val,MemOpIdx oi,uintptr_t ra)1106 static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
1107 MemOpIdx oi, uintptr_t ra)
1108 {
1109 void *haddr;
1110 MemOp mop = get_memop(oi);
1111
1112 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1113 haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
1114
1115 if (mop & MO_BSWAP) {
1116 val = bswap64(val);
1117 }
1118 store_atom_8(cpu, ra, haddr, mop, val);
1119 clear_helper_retaddr();
1120 }
1121
do_st16_mmu(CPUState * cpu,vaddr addr,Int128 val,MemOpIdx oi,uintptr_t ra)1122 static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
1123 MemOpIdx oi, uintptr_t ra)
1124 {
1125 void *haddr;
1126 MemOpIdx mop = get_memop(oi);
1127
1128 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1129 haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
1130
1131 if (mop & MO_BSWAP) {
1132 val = bswap128(val);
1133 }
1134 store_atom_16(cpu, ra, haddr, mop, val);
1135 clear_helper_retaddr();
1136 }
1137
cpu_ldub_code(CPUArchState * env,abi_ptr ptr)1138 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1139 {
1140 uint32_t ret;
1141
1142 set_helper_retaddr(1);
1143 ret = ldub_p(g2h_untagged(ptr));
1144 clear_helper_retaddr();
1145 return ret;
1146 }
1147
cpu_lduw_code(CPUArchState * env,abi_ptr ptr)1148 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
1149 {
1150 uint32_t ret;
1151
1152 set_helper_retaddr(1);
1153 ret = lduw_p(g2h_untagged(ptr));
1154 clear_helper_retaddr();
1155 return ret;
1156 }
1157
cpu_ldl_code(CPUArchState * env,abi_ptr ptr)1158 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
1159 {
1160 uint32_t ret;
1161
1162 set_helper_retaddr(1);
1163 ret = ldl_p(g2h_untagged(ptr));
1164 clear_helper_retaddr();
1165 return ret;
1166 }
1167
cpu_ldq_code(CPUArchState * env,abi_ptr ptr)1168 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1169 {
1170 uint64_t ret;
1171
1172 set_helper_retaddr(1);
1173 ret = ldq_p(g2h_untagged(ptr));
1174 clear_helper_retaddr();
1175 return ret;
1176 }
1177
cpu_ldb_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t ra)1178 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
1179 MemOpIdx oi, uintptr_t ra)
1180 {
1181 void *haddr;
1182 uint8_t ret;
1183
1184 haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
1185 ret = ldub_p(haddr);
1186 clear_helper_retaddr();
1187 return ret;
1188 }
1189
cpu_ldw_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t ra)1190 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
1191 MemOpIdx oi, uintptr_t ra)
1192 {
1193 void *haddr;
1194 uint16_t ret;
1195
1196 haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
1197 ret = lduw_p(haddr);
1198 clear_helper_retaddr();
1199 if (get_memop(oi) & MO_BSWAP) {
1200 ret = bswap16(ret);
1201 }
1202 return ret;
1203 }
1204
cpu_ldl_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t ra)1205 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
1206 MemOpIdx oi, uintptr_t ra)
1207 {
1208 void *haddr;
1209 uint32_t ret;
1210
1211 haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
1212 ret = ldl_p(haddr);
1213 clear_helper_retaddr();
1214 if (get_memop(oi) & MO_BSWAP) {
1215 ret = bswap32(ret);
1216 }
1217 return ret;
1218 }
1219
cpu_ldq_code_mmu(CPUArchState * env,abi_ptr addr,MemOpIdx oi,uintptr_t ra)1220 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
1221 MemOpIdx oi, uintptr_t ra)
1222 {
1223 void *haddr;
1224 uint64_t ret;
1225
1226 haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
1227 ret = ldq_p(haddr);
1228 clear_helper_retaddr();
1229 if (get_memop(oi) & MO_BSWAP) {
1230 ret = bswap64(ret);
1231 }
1232 return ret;
1233 }
1234
1235 #include "ldst_common.c.inc"
1236
1237 /*
1238 * Do not allow unaligned operations to proceed. Return the host address.
1239 */
atomic_mmu_lookup(CPUState * cpu,vaddr addr,MemOpIdx oi,int size,uintptr_t retaddr)1240 static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1241 int size, uintptr_t retaddr)
1242 {
1243 MemOp mop = get_memop(oi);
1244 int a_bits = get_alignment_bits(mop);
1245 void *ret;
1246
1247 /* Enforce guest required alignment. */
1248 if (unlikely(addr & ((1 << a_bits) - 1))) {
1249 cpu_loop_exit_sigbus(cpu, addr, MMU_DATA_STORE, retaddr);
1250 }
1251
1252 /* Enforce qemu required alignment. */
1253 if (unlikely(addr & (size - 1))) {
1254 cpu_loop_exit_atomic(cpu, retaddr);
1255 }
1256
1257 ret = g2h(cpu, addr);
1258 set_helper_retaddr(retaddr);
1259 return ret;
1260 }
1261
1262 #include "atomic_common.c.inc"
1263
1264 /*
1265 * First set of functions passes in OI and RETADDR.
1266 * This makes them callable from other helpers.
1267 */
1268
1269 #define ATOMIC_NAME(X) \
1270 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1271 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1272
1273 #define DATA_SIZE 1
1274 #include "atomic_template.h"
1275
1276 #define DATA_SIZE 2
1277 #include "atomic_template.h"
1278
1279 #define DATA_SIZE 4
1280 #include "atomic_template.h"
1281
1282 #ifdef CONFIG_ATOMIC64
1283 #define DATA_SIZE 8
1284 #include "atomic_template.h"
1285 #endif
1286
1287 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
1288 #define DATA_SIZE 16
1289 #include "atomic_template.h"
1290 #endif
1291