xref: /openbmc/qemu/accel/tcg/user-exec.c (revision 21063bce)
1 /*
2  *  User emulator execution
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg.h"
24 #include "qemu/bitops.h"
25 #include "qemu/rcu.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/translate-all.h"
28 #include "exec/helper-proto.h"
29 #include "qemu/atomic128.h"
30 #include "trace/trace-root.h"
31 #include "tcg/tcg-ldst.h"
32 #include "internal.h"
33 
34 __thread uintptr_t helper_retaddr;
35 
36 //#define DEBUG_SIGNAL
37 
38 /*
39  * Adjust the pc to pass to cpu_restore_state; return the memop type.
40  */
41 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
42 {
43     switch (helper_retaddr) {
44     default:
45         /*
46          * Fault during host memory operation within a helper function.
47          * The helper's host return address, saved here, gives us a
48          * pointer into the generated code that will unwind to the
49          * correct guest pc.
50          */
51         *pc = helper_retaddr;
52         break;
53 
54     case 0:
55         /*
56          * Fault during host memory operation within generated code.
57          * (Or, a unrelated bug within qemu, but we can't tell from here).
58          *
59          * We take the host pc from the signal frame.  However, we cannot
60          * use that value directly.  Within cpu_restore_state_from_tb, we
61          * assume PC comes from GETPC(), as used by the helper functions,
62          * so we adjust the address by -GETPC_ADJ to form an address that
63          * is within the call insn, so that the address does not accidentally
64          * match the beginning of the next guest insn.  However, when the
65          * pc comes from the signal frame it points to the actual faulting
66          * host memory insn and not the return from a call insn.
67          *
68          * Therefore, adjust to compensate for what will be done later
69          * by cpu_restore_state_from_tb.
70          */
71         *pc += GETPC_ADJ;
72         break;
73 
74     case 1:
75         /*
76          * Fault during host read for translation, or loosely, "execution".
77          *
78          * The guest pc is already pointing to the start of the TB for which
79          * code is being generated.  If the guest translator manages the
80          * page crossings correctly, this is exactly the correct address
81          * (and if the translator doesn't handle page boundaries correctly
82          * there's little we can do about that here).  Therefore, do not
83          * trigger the unwinder.
84          */
85         *pc = 0;
86         return MMU_INST_FETCH;
87     }
88 
89     return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
90 }
91 
92 /**
93  * handle_sigsegv_accerr_write:
94  * @cpu: the cpu context
95  * @old_set: the sigset_t from the signal ucontext_t
96  * @host_pc: the host pc, adjusted for the signal
97  * @guest_addr: the guest address of the fault
98  *
99  * Return true if the write fault has been handled, and should be re-tried.
100  *
101  * Note that it is important that we don't call page_unprotect() unless
102  * this is really a "write to nonwritable page" fault, because
103  * page_unprotect() assumes that if it is called for an access to
104  * a page that's writable this means we had two threads racing and
105  * another thread got there first and already made the page writable;
106  * so we will retry the access. If we were to call page_unprotect()
107  * for some other kind of fault that should really be passed to the
108  * guest, we'd end up in an infinite loop of retrying the faulting access.
109  */
110 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
111                                  uintptr_t host_pc, abi_ptr guest_addr)
112 {
113     switch (page_unprotect(guest_addr, host_pc)) {
114     case 0:
115         /*
116          * Fault not caused by a page marked unwritable to protect
117          * cached translations, must be the guest binary's problem.
118          */
119         return false;
120     case 1:
121         /*
122          * Fault caused by protection of cached translation; TBs
123          * invalidated, so resume execution.
124          */
125         return true;
126     case 2:
127         /*
128          * Fault caused by protection of cached translation, and the
129          * currently executing TB was modified and must be exited immediately.
130          */
131         sigprocmask(SIG_SETMASK, old_set, NULL);
132         cpu_loop_exit_noexc(cpu);
133         /* NORETURN */
134     default:
135         g_assert_not_reached();
136     }
137 }
138 
139 typedef struct PageFlagsNode {
140     struct rcu_head rcu;
141     IntervalTreeNode itree;
142     int flags;
143 } PageFlagsNode;
144 
145 static IntervalTreeRoot pageflags_root;
146 
147 static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
148 {
149     IntervalTreeNode *n;
150 
151     n = interval_tree_iter_first(&pageflags_root, start, last);
152     return n ? container_of(n, PageFlagsNode, itree) : NULL;
153 }
154 
155 static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
156                                      target_long last)
157 {
158     IntervalTreeNode *n;
159 
160     n = interval_tree_iter_next(&p->itree, start, last);
161     return n ? container_of(n, PageFlagsNode, itree) : NULL;
162 }
163 
164 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
165 {
166     IntervalTreeNode *n;
167     int rc = 0;
168 
169     mmap_lock();
170     for (n = interval_tree_iter_first(&pageflags_root, 0, -1);
171          n != NULL;
172          n = interval_tree_iter_next(n, 0, -1)) {
173         PageFlagsNode *p = container_of(n, PageFlagsNode, itree);
174 
175         rc = fn(priv, n->start, n->last + 1, p->flags);
176         if (rc != 0) {
177             break;
178         }
179     }
180     mmap_unlock();
181 
182     return rc;
183 }
184 
185 static int dump_region(void *priv, target_ulong start,
186                        target_ulong end, unsigned long prot)
187 {
188     FILE *f = (FILE *)priv;
189 
190     fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
191             start, end, end - start,
192             ((prot & PAGE_READ) ? 'r' : '-'),
193             ((prot & PAGE_WRITE) ? 'w' : '-'),
194             ((prot & PAGE_EXEC) ? 'x' : '-'));
195     return 0;
196 }
197 
198 /* dump memory mappings */
199 void page_dump(FILE *f)
200 {
201     const int length = sizeof(target_ulong) * 2;
202 
203     fprintf(f, "%-*s %-*s %-*s %s\n",
204             length, "start", length, "end", length, "size", "prot");
205     walk_memory_regions(f, dump_region);
206 }
207 
208 int page_get_flags(target_ulong address)
209 {
210     PageFlagsNode *p = pageflags_find(address, address);
211 
212     /*
213      * See util/interval-tree.c re lockless lookups: no false positives but
214      * there are false negatives.  If we find nothing, retry with the mmap
215      * lock acquired.
216      */
217     if (p) {
218         return p->flags;
219     }
220     if (have_mmap_lock()) {
221         return 0;
222     }
223 
224     mmap_lock();
225     p = pageflags_find(address, address);
226     mmap_unlock();
227     return p ? p->flags : 0;
228 }
229 
230 /* A subroutine of page_set_flags: insert a new node for [start,last]. */
231 static void pageflags_create(target_ulong start, target_ulong last, int flags)
232 {
233     PageFlagsNode *p = g_new(PageFlagsNode, 1);
234 
235     p->itree.start = start;
236     p->itree.last = last;
237     p->flags = flags;
238     interval_tree_insert(&p->itree, &pageflags_root);
239 }
240 
241 /* A subroutine of page_set_flags: remove everything in [start,last]. */
242 static bool pageflags_unset(target_ulong start, target_ulong last)
243 {
244     bool inval_tb = false;
245 
246     while (true) {
247         PageFlagsNode *p = pageflags_find(start, last);
248         target_ulong p_last;
249 
250         if (!p) {
251             break;
252         }
253 
254         if (p->flags & PAGE_EXEC) {
255             inval_tb = true;
256         }
257 
258         interval_tree_remove(&p->itree, &pageflags_root);
259         p_last = p->itree.last;
260 
261         if (p->itree.start < start) {
262             /* Truncate the node from the end, or split out the middle. */
263             p->itree.last = start - 1;
264             interval_tree_insert(&p->itree, &pageflags_root);
265             if (last < p_last) {
266                 pageflags_create(last + 1, p_last, p->flags);
267                 break;
268             }
269         } else if (p_last <= last) {
270             /* Range completely covers node -- remove it. */
271             g_free_rcu(p, rcu);
272         } else {
273             /* Truncate the node from the start. */
274             p->itree.start = last + 1;
275             interval_tree_insert(&p->itree, &pageflags_root);
276             break;
277         }
278     }
279 
280     return inval_tb;
281 }
282 
283 /*
284  * A subroutine of page_set_flags: nothing overlaps [start,last],
285  * but check adjacent mappings and maybe merge into a single range.
286  */
287 static void pageflags_create_merge(target_ulong start, target_ulong last,
288                                    int flags)
289 {
290     PageFlagsNode *next = NULL, *prev = NULL;
291 
292     if (start > 0) {
293         prev = pageflags_find(start - 1, start - 1);
294         if (prev) {
295             if (prev->flags == flags) {
296                 interval_tree_remove(&prev->itree, &pageflags_root);
297             } else {
298                 prev = NULL;
299             }
300         }
301     }
302     if (last + 1 != 0) {
303         next = pageflags_find(last + 1, last + 1);
304         if (next) {
305             if (next->flags == flags) {
306                 interval_tree_remove(&next->itree, &pageflags_root);
307             } else {
308                 next = NULL;
309             }
310         }
311     }
312 
313     if (prev) {
314         if (next) {
315             prev->itree.last = next->itree.last;
316             g_free_rcu(next, rcu);
317         } else {
318             prev->itree.last = last;
319         }
320         interval_tree_insert(&prev->itree, &pageflags_root);
321     } else if (next) {
322         next->itree.start = start;
323         interval_tree_insert(&next->itree, &pageflags_root);
324     } else {
325         pageflags_create(start, last, flags);
326     }
327 }
328 
329 /*
330  * Allow the target to decide if PAGE_TARGET_[12] may be reset.
331  * By default, they are not kept.
332  */
333 #ifndef PAGE_TARGET_STICKY
334 #define PAGE_TARGET_STICKY  0
335 #endif
336 #define PAGE_STICKY  (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
337 
338 /* A subroutine of page_set_flags: add flags to [start,last]. */
339 static bool pageflags_set_clear(target_ulong start, target_ulong last,
340                                 int set_flags, int clear_flags)
341 {
342     PageFlagsNode *p;
343     target_ulong p_start, p_last;
344     int p_flags, merge_flags;
345     bool inval_tb = false;
346 
347  restart:
348     p = pageflags_find(start, last);
349     if (!p) {
350         if (set_flags) {
351             pageflags_create_merge(start, last, set_flags);
352         }
353         goto done;
354     }
355 
356     p_start = p->itree.start;
357     p_last = p->itree.last;
358     p_flags = p->flags;
359     /* Using mprotect on a page does not change sticky bits. */
360     merge_flags = (p_flags & ~clear_flags) | set_flags;
361 
362     /*
363      * Need to flush if an overlapping executable region
364      * removes exec, or adds write.
365      */
366     if ((p_flags & PAGE_EXEC)
367         && (!(merge_flags & PAGE_EXEC)
368             || (merge_flags & ~p_flags & PAGE_WRITE))) {
369         inval_tb = true;
370     }
371 
372     /*
373      * If there is an exact range match, update and return without
374      * attempting to merge with adjacent regions.
375      */
376     if (start == p_start && last == p_last) {
377         if (merge_flags) {
378             p->flags = merge_flags;
379         } else {
380             interval_tree_remove(&p->itree, &pageflags_root);
381             g_free_rcu(p, rcu);
382         }
383         goto done;
384     }
385 
386     /*
387      * If sticky bits affect the original mapping, then we must be more
388      * careful about the existing intervals and the separate flags.
389      */
390     if (set_flags != merge_flags) {
391         if (p_start < start) {
392             interval_tree_remove(&p->itree, &pageflags_root);
393             p->itree.last = start - 1;
394             interval_tree_insert(&p->itree, &pageflags_root);
395 
396             if (last < p_last) {
397                 if (merge_flags) {
398                     pageflags_create(start, last, merge_flags);
399                 }
400                 pageflags_create(last + 1, p_last, p_flags);
401             } else {
402                 if (merge_flags) {
403                     pageflags_create(start, p_last, merge_flags);
404                 }
405                 if (p_last < last) {
406                     start = p_last + 1;
407                     goto restart;
408                 }
409             }
410         } else {
411             if (start < p_start && set_flags) {
412                 pageflags_create(start, p_start - 1, set_flags);
413             }
414             if (last < p_last) {
415                 interval_tree_remove(&p->itree, &pageflags_root);
416                 p->itree.start = last + 1;
417                 interval_tree_insert(&p->itree, &pageflags_root);
418                 if (merge_flags) {
419                     pageflags_create(start, last, merge_flags);
420                 }
421             } else {
422                 if (merge_flags) {
423                     p->flags = merge_flags;
424                 } else {
425                     interval_tree_remove(&p->itree, &pageflags_root);
426                     g_free_rcu(p, rcu);
427                 }
428                 if (p_last < last) {
429                     start = p_last + 1;
430                     goto restart;
431                 }
432             }
433         }
434         goto done;
435     }
436 
437     /* If flags are not changing for this range, incorporate it. */
438     if (set_flags == p_flags) {
439         if (start < p_start) {
440             interval_tree_remove(&p->itree, &pageflags_root);
441             p->itree.start = start;
442             interval_tree_insert(&p->itree, &pageflags_root);
443         }
444         if (p_last < last) {
445             start = p_last + 1;
446             goto restart;
447         }
448         goto done;
449     }
450 
451     /* Maybe split out head and/or tail ranges with the original flags. */
452     interval_tree_remove(&p->itree, &pageflags_root);
453     if (p_start < start) {
454         p->itree.last = start - 1;
455         interval_tree_insert(&p->itree, &pageflags_root);
456 
457         if (p_last < last) {
458             goto restart;
459         }
460         if (last < p_last) {
461             pageflags_create(last + 1, p_last, p_flags);
462         }
463     } else if (last < p_last) {
464         p->itree.start = last + 1;
465         interval_tree_insert(&p->itree, &pageflags_root);
466     } else {
467         g_free_rcu(p, rcu);
468         goto restart;
469     }
470     if (set_flags) {
471         pageflags_create(start, last, set_flags);
472     }
473 
474  done:
475     return inval_tb;
476 }
477 
478 /*
479  * Modify the flags of a page and invalidate the code if necessary.
480  * The flag PAGE_WRITE_ORG is positioned automatically depending
481  * on PAGE_WRITE.  The mmap_lock should already be held.
482  */
483 void page_set_flags(target_ulong start, target_ulong end, int flags)
484 {
485     target_ulong last;
486     bool reset = false;
487     bool inval_tb = false;
488 
489     /* This function should never be called with addresses outside the
490        guest address space.  If this assert fires, it probably indicates
491        a missing call to h2g_valid.  */
492     assert(start < end);
493     assert(end - 1 <= GUEST_ADDR_MAX);
494     /* Only set PAGE_ANON with new mappings. */
495     assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
496     assert_memory_lock();
497 
498     start = start & TARGET_PAGE_MASK;
499     end = TARGET_PAGE_ALIGN(end);
500     last = end - 1;
501 
502     if (!(flags & PAGE_VALID)) {
503         flags = 0;
504     } else {
505         reset = flags & PAGE_RESET;
506         flags &= ~PAGE_RESET;
507         if (flags & PAGE_WRITE) {
508             flags |= PAGE_WRITE_ORG;
509         }
510     }
511 
512     if (!flags || reset) {
513         page_reset_target_data(start, end);
514         inval_tb |= pageflags_unset(start, last);
515     }
516     if (flags) {
517         inval_tb |= pageflags_set_clear(start, last, flags,
518                                         ~(reset ? 0 : PAGE_STICKY));
519     }
520     if (inval_tb) {
521         tb_invalidate_phys_range(start, end);
522     }
523 }
524 
525 int page_check_range(target_ulong start, target_ulong len, int flags)
526 {
527     target_ulong last;
528     int locked;  /* tri-state: =0: unlocked, +1: global, -1: local */
529     int ret;
530 
531     if (len == 0) {
532         return 0;  /* trivial length */
533     }
534 
535     last = start + len - 1;
536     if (last < start) {
537         return -1; /* wrap around */
538     }
539 
540     locked = have_mmap_lock();
541     while (true) {
542         PageFlagsNode *p = pageflags_find(start, last);
543         int missing;
544 
545         if (!p) {
546             if (!locked) {
547                 /*
548                  * Lockless lookups have false negatives.
549                  * Retry with the lock held.
550                  */
551                 mmap_lock();
552                 locked = -1;
553                 p = pageflags_find(start, last);
554             }
555             if (!p) {
556                 ret = -1; /* entire region invalid */
557                 break;
558             }
559         }
560         if (start < p->itree.start) {
561             ret = -1; /* initial bytes invalid */
562             break;
563         }
564 
565         missing = flags & ~p->flags;
566         if (missing & PAGE_READ) {
567             ret = -1; /* page not readable */
568             break;
569         }
570         if (missing & PAGE_WRITE) {
571             if (!(p->flags & PAGE_WRITE_ORG)) {
572                 ret = -1; /* page not writable */
573                 break;
574             }
575             /* Asking about writable, but has been protected: undo. */
576             if (!page_unprotect(start, 0)) {
577                 ret = -1;
578                 break;
579             }
580             /* TODO: page_unprotect should take a range, not a single page. */
581             if (last - start < TARGET_PAGE_SIZE) {
582                 ret = 0; /* ok */
583                 break;
584             }
585             start += TARGET_PAGE_SIZE;
586             continue;
587         }
588 
589         if (last <= p->itree.last) {
590             ret = 0; /* ok */
591             break;
592         }
593         start = p->itree.last + 1;
594     }
595 
596     /* Release the lock if acquired locally. */
597     if (locked < 0) {
598         mmap_unlock();
599     }
600     return ret;
601 }
602 
603 void page_protect(tb_page_addr_t address)
604 {
605     PageFlagsNode *p;
606     target_ulong start, last;
607     int prot;
608 
609     assert_memory_lock();
610 
611     if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
612         start = address & TARGET_PAGE_MASK;
613         last = start + TARGET_PAGE_SIZE - 1;
614     } else {
615         start = address & qemu_host_page_mask;
616         last = start + qemu_host_page_size - 1;
617     }
618 
619     p = pageflags_find(start, last);
620     if (!p) {
621         return;
622     }
623     prot = p->flags;
624 
625     if (unlikely(p->itree.last < last)) {
626         /* More than one protection region covers the one host page. */
627         assert(TARGET_PAGE_SIZE < qemu_host_page_size);
628         while ((p = pageflags_next(p, start, last)) != NULL) {
629             prot |= p->flags;
630         }
631     }
632 
633     if (prot & PAGE_WRITE) {
634         pageflags_set_clear(start, last, 0, PAGE_WRITE);
635         mprotect(g2h_untagged(start), qemu_host_page_size,
636                  prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
637     }
638 }
639 
640 /*
641  * Called from signal handler: invalidate the code and unprotect the
642  * page. Return 0 if the fault was not handled, 1 if it was handled,
643  * and 2 if it was handled but the caller must cause the TB to be
644  * immediately exited. (We can only return 2 if the 'pc' argument is
645  * non-zero.)
646  */
647 int page_unprotect(target_ulong address, uintptr_t pc)
648 {
649     PageFlagsNode *p;
650     bool current_tb_invalidated;
651 
652     /*
653      * Technically this isn't safe inside a signal handler.  However we
654      * know this only ever happens in a synchronous SEGV handler, so in
655      * practice it seems to be ok.
656      */
657     mmap_lock();
658 
659     p = pageflags_find(address, address);
660 
661     /* If this address was not really writable, nothing to do. */
662     if (!p || !(p->flags & PAGE_WRITE_ORG)) {
663         mmap_unlock();
664         return 0;
665     }
666 
667     current_tb_invalidated = false;
668     if (p->flags & PAGE_WRITE) {
669         /*
670          * If the page is actually marked WRITE then assume this is because
671          * this thread raced with another one which got here first and
672          * set the page to PAGE_WRITE and did the TB invalidate for us.
673          */
674 #ifdef TARGET_HAS_PRECISE_SMC
675         TranslationBlock *current_tb = tcg_tb_lookup(pc);
676         if (current_tb) {
677             current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
678         }
679 #endif
680     } else {
681         target_ulong start, len, i;
682         int prot;
683 
684         if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
685             start = address & TARGET_PAGE_MASK;
686             len = TARGET_PAGE_SIZE;
687             prot = p->flags | PAGE_WRITE;
688             pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
689             current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
690         } else {
691             start = address & qemu_host_page_mask;
692             len = qemu_host_page_size;
693             prot = 0;
694 
695             for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
696                 target_ulong addr = start + i;
697 
698                 p = pageflags_find(addr, addr);
699                 if (p) {
700                     prot |= p->flags;
701                     if (p->flags & PAGE_WRITE_ORG) {
702                         prot |= PAGE_WRITE;
703                         pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1,
704                                             PAGE_WRITE, 0);
705                     }
706                 }
707                 /*
708                  * Since the content will be modified, we must invalidate
709                  * the corresponding translated code.
710                  */
711                 current_tb_invalidated |=
712                     tb_invalidate_phys_page_unwind(addr, pc);
713             }
714         }
715         if (prot & PAGE_EXEC) {
716             prot = (prot & ~PAGE_EXEC) | PAGE_READ;
717         }
718         mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS);
719     }
720     mmap_unlock();
721 
722     /* If current TB was invalidated return to main loop */
723     return current_tb_invalidated ? 2 : 1;
724 }
725 
726 static int probe_access_internal(CPUArchState *env, target_ulong addr,
727                                  int fault_size, MMUAccessType access_type,
728                                  bool nonfault, uintptr_t ra)
729 {
730     int acc_flag;
731     bool maperr;
732 
733     switch (access_type) {
734     case MMU_DATA_STORE:
735         acc_flag = PAGE_WRITE_ORG;
736         break;
737     case MMU_DATA_LOAD:
738         acc_flag = PAGE_READ;
739         break;
740     case MMU_INST_FETCH:
741         acc_flag = PAGE_EXEC;
742         break;
743     default:
744         g_assert_not_reached();
745     }
746 
747     if (guest_addr_valid_untagged(addr)) {
748         int page_flags = page_get_flags(addr);
749         if (page_flags & acc_flag) {
750             return 0; /* success */
751         }
752         maperr = !(page_flags & PAGE_VALID);
753     } else {
754         maperr = true;
755     }
756 
757     if (nonfault) {
758         return TLB_INVALID_MASK;
759     }
760 
761     cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
762 }
763 
764 int probe_access_flags(CPUArchState *env, target_ulong addr,
765                        MMUAccessType access_type, int mmu_idx,
766                        bool nonfault, void **phost, uintptr_t ra)
767 {
768     int flags;
769 
770     flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
771     *phost = flags ? NULL : g2h(env_cpu(env), addr);
772     return flags;
773 }
774 
775 void *probe_access(CPUArchState *env, target_ulong addr, int size,
776                    MMUAccessType access_type, int mmu_idx, uintptr_t ra)
777 {
778     int flags;
779 
780     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
781     flags = probe_access_internal(env, addr, size, access_type, false, ra);
782     g_assert(flags == 0);
783 
784     return size ? g2h(env_cpu(env), addr) : NULL;
785 }
786 
787 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
788                                         void **hostp)
789 {
790     int flags;
791 
792     flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
793     g_assert(flags == 0);
794 
795     if (hostp) {
796         *hostp = g2h_untagged(addr);
797     }
798     return addr;
799 }
800 
801 #ifdef TARGET_PAGE_DATA_SIZE
802 /*
803  * Allocate chunks of target data together.  For the only current user,
804  * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
805  * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
806  */
807 #define TPD_PAGES  64
808 #define TBD_MASK   (TARGET_PAGE_MASK * TPD_PAGES)
809 
810 typedef struct TargetPageDataNode {
811     struct rcu_head rcu;
812     IntervalTreeNode itree;
813     char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
814 } TargetPageDataNode;
815 
816 static IntervalTreeRoot targetdata_root;
817 
818 void page_reset_target_data(target_ulong start, target_ulong end)
819 {
820     IntervalTreeNode *n, *next;
821     target_ulong last;
822 
823     assert_memory_lock();
824 
825     start = start & TARGET_PAGE_MASK;
826     last = TARGET_PAGE_ALIGN(end) - 1;
827 
828     for (n = interval_tree_iter_first(&targetdata_root, start, last),
829          next = n ? interval_tree_iter_next(n, start, last) : NULL;
830          n != NULL;
831          n = next,
832          next = next ? interval_tree_iter_next(n, start, last) : NULL) {
833         target_ulong n_start, n_last, p_ofs, p_len;
834         TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
835 
836         if (n->start >= start && n->last <= last) {
837             interval_tree_remove(n, &targetdata_root);
838             g_free_rcu(t, rcu);
839             continue;
840         }
841 
842         if (n->start < start) {
843             n_start = start;
844             p_ofs = (start - n->start) >> TARGET_PAGE_BITS;
845         } else {
846             n_start = n->start;
847             p_ofs = 0;
848         }
849         n_last = MIN(last, n->last);
850         p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
851 
852         memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
853     }
854 }
855 
856 void *page_get_target_data(target_ulong address)
857 {
858     IntervalTreeNode *n;
859     TargetPageDataNode *t;
860     target_ulong page, region;
861 
862     page = address & TARGET_PAGE_MASK;
863     region = address & TBD_MASK;
864 
865     n = interval_tree_iter_first(&targetdata_root, page, page);
866     if (!n) {
867         /*
868          * See util/interval-tree.c re lockless lookups: no false positives
869          * but there are false negatives.  If we find nothing, retry with
870          * the mmap lock acquired.  We also need the lock for the
871          * allocation + insert.
872          */
873         mmap_lock();
874         n = interval_tree_iter_first(&targetdata_root, page, page);
875         if (!n) {
876             t = g_new0(TargetPageDataNode, 1);
877             n = &t->itree;
878             n->start = region;
879             n->last = region | ~TBD_MASK;
880             interval_tree_insert(n, &targetdata_root);
881         }
882         mmap_unlock();
883     }
884 
885     t = container_of(n, TargetPageDataNode, itree);
886     return t->data[(page - region) >> TARGET_PAGE_BITS];
887 }
888 #else
889 void page_reset_target_data(target_ulong start, target_ulong end) { }
890 #endif /* TARGET_PAGE_DATA_SIZE */
891 
892 /* The softmmu versions of these helpers are in cputlb.c.  */
893 
894 /*
895  * Verify that we have passed the correct MemOp to the correct function.
896  *
897  * We could present one function to target code, and dispatch based on
898  * the MemOp, but so far we have worked hard to avoid an indirect function
899  * call along the memory path.
900  */
901 static void validate_memop(MemOpIdx oi, MemOp expected)
902 {
903 #ifdef CONFIG_DEBUG_TCG
904     MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
905     assert(have == expected);
906 #endif
907 }
908 
909 void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
910 {
911     cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
912 }
913 
914 void helper_unaligned_st(CPUArchState *env, target_ulong addr)
915 {
916     cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
917 }
918 
919 static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
920                             MemOpIdx oi, uintptr_t ra, MMUAccessType type)
921 {
922     MemOp mop = get_memop(oi);
923     int a_bits = get_alignment_bits(mop);
924     void *ret;
925 
926     /* Enforce guest required alignment.  */
927     if (unlikely(addr & ((1 << a_bits) - 1))) {
928         cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
929     }
930 
931     ret = g2h(env_cpu(env), addr);
932     set_helper_retaddr(ra);
933     return ret;
934 }
935 
936 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
937                     MemOpIdx oi, uintptr_t ra)
938 {
939     void *haddr;
940     uint8_t ret;
941 
942     validate_memop(oi, MO_UB);
943     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
944     ret = ldub_p(haddr);
945     clear_helper_retaddr();
946     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
947     return ret;
948 }
949 
950 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
951                         MemOpIdx oi, uintptr_t ra)
952 {
953     void *haddr;
954     uint16_t ret;
955 
956     validate_memop(oi, MO_BEUW);
957     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
958     ret = lduw_be_p(haddr);
959     clear_helper_retaddr();
960     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
961     return ret;
962 }
963 
964 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
965                         MemOpIdx oi, uintptr_t ra)
966 {
967     void *haddr;
968     uint32_t ret;
969 
970     validate_memop(oi, MO_BEUL);
971     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
972     ret = ldl_be_p(haddr);
973     clear_helper_retaddr();
974     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
975     return ret;
976 }
977 
978 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
979                         MemOpIdx oi, uintptr_t ra)
980 {
981     void *haddr;
982     uint64_t ret;
983 
984     validate_memop(oi, MO_BEUQ);
985     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
986     ret = ldq_be_p(haddr);
987     clear_helper_retaddr();
988     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
989     return ret;
990 }
991 
992 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
993                         MemOpIdx oi, uintptr_t ra)
994 {
995     void *haddr;
996     uint16_t ret;
997 
998     validate_memop(oi, MO_LEUW);
999     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1000     ret = lduw_le_p(haddr);
1001     clear_helper_retaddr();
1002     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1003     return ret;
1004 }
1005 
1006 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
1007                         MemOpIdx oi, uintptr_t ra)
1008 {
1009     void *haddr;
1010     uint32_t ret;
1011 
1012     validate_memop(oi, MO_LEUL);
1013     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1014     ret = ldl_le_p(haddr);
1015     clear_helper_retaddr();
1016     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1017     return ret;
1018 }
1019 
1020 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
1021                         MemOpIdx oi, uintptr_t ra)
1022 {
1023     void *haddr;
1024     uint64_t ret;
1025 
1026     validate_memop(oi, MO_LEUQ);
1027     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1028     ret = ldq_le_p(haddr);
1029     clear_helper_retaddr();
1030     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1031     return ret;
1032 }
1033 
1034 Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
1035                        MemOpIdx oi, uintptr_t ra)
1036 {
1037     void *haddr;
1038     Int128 ret;
1039 
1040     validate_memop(oi, MO_128 | MO_BE);
1041     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1042     memcpy(&ret, haddr, 16);
1043     clear_helper_retaddr();
1044     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1045 
1046     if (!HOST_BIG_ENDIAN) {
1047         ret = bswap128(ret);
1048     }
1049     return ret;
1050 }
1051 
1052 Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
1053                        MemOpIdx oi, uintptr_t ra)
1054 {
1055     void *haddr;
1056     Int128 ret;
1057 
1058     validate_memop(oi, MO_128 | MO_LE);
1059     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1060     memcpy(&ret, haddr, 16);
1061     clear_helper_retaddr();
1062     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1063 
1064     if (HOST_BIG_ENDIAN) {
1065         ret = bswap128(ret);
1066     }
1067     return ret;
1068 }
1069 
1070 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
1071                  MemOpIdx oi, uintptr_t ra)
1072 {
1073     void *haddr;
1074 
1075     validate_memop(oi, MO_UB);
1076     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1077     stb_p(haddr, val);
1078     clear_helper_retaddr();
1079     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1080 }
1081 
1082 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1083                     MemOpIdx oi, uintptr_t ra)
1084 {
1085     void *haddr;
1086 
1087     validate_memop(oi, MO_BEUW);
1088     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1089     stw_be_p(haddr, val);
1090     clear_helper_retaddr();
1091     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1092 }
1093 
1094 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1095                     MemOpIdx oi, uintptr_t ra)
1096 {
1097     void *haddr;
1098 
1099     validate_memop(oi, MO_BEUL);
1100     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1101     stl_be_p(haddr, val);
1102     clear_helper_retaddr();
1103     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1104 }
1105 
1106 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1107                     MemOpIdx oi, uintptr_t ra)
1108 {
1109     void *haddr;
1110 
1111     validate_memop(oi, MO_BEUQ);
1112     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1113     stq_be_p(haddr, val);
1114     clear_helper_retaddr();
1115     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1116 }
1117 
1118 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1119                     MemOpIdx oi, uintptr_t ra)
1120 {
1121     void *haddr;
1122 
1123     validate_memop(oi, MO_LEUW);
1124     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1125     stw_le_p(haddr, val);
1126     clear_helper_retaddr();
1127     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1128 }
1129 
1130 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1131                     MemOpIdx oi, uintptr_t ra)
1132 {
1133     void *haddr;
1134 
1135     validate_memop(oi, MO_LEUL);
1136     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1137     stl_le_p(haddr, val);
1138     clear_helper_retaddr();
1139     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1140 }
1141 
1142 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1143                     MemOpIdx oi, uintptr_t ra)
1144 {
1145     void *haddr;
1146 
1147     validate_memop(oi, MO_LEUQ);
1148     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1149     stq_le_p(haddr, val);
1150     clear_helper_retaddr();
1151     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1152 }
1153 
1154 void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr,
1155                      Int128 val, MemOpIdx oi, uintptr_t ra)
1156 {
1157     void *haddr;
1158 
1159     validate_memop(oi, MO_128 | MO_BE);
1160     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1161     if (!HOST_BIG_ENDIAN) {
1162         val = bswap128(val);
1163     }
1164     memcpy(haddr, &val, 16);
1165     clear_helper_retaddr();
1166     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1167 }
1168 
1169 void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr,
1170                      Int128 val, MemOpIdx oi, uintptr_t ra)
1171 {
1172     void *haddr;
1173 
1174     validate_memop(oi, MO_128 | MO_LE);
1175     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1176     if (HOST_BIG_ENDIAN) {
1177         val = bswap128(val);
1178     }
1179     memcpy(haddr, &val, 16);
1180     clear_helper_retaddr();
1181     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1182 }
1183 
1184 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1185 {
1186     uint32_t ret;
1187 
1188     set_helper_retaddr(1);
1189     ret = ldub_p(g2h_untagged(ptr));
1190     clear_helper_retaddr();
1191     return ret;
1192 }
1193 
1194 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
1195 {
1196     uint32_t ret;
1197 
1198     set_helper_retaddr(1);
1199     ret = lduw_p(g2h_untagged(ptr));
1200     clear_helper_retaddr();
1201     return ret;
1202 }
1203 
1204 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
1205 {
1206     uint32_t ret;
1207 
1208     set_helper_retaddr(1);
1209     ret = ldl_p(g2h_untagged(ptr));
1210     clear_helper_retaddr();
1211     return ret;
1212 }
1213 
1214 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1215 {
1216     uint64_t ret;
1217 
1218     set_helper_retaddr(1);
1219     ret = ldq_p(g2h_untagged(ptr));
1220     clear_helper_retaddr();
1221     return ret;
1222 }
1223 
1224 #include "ldst_common.c.inc"
1225 
1226 /*
1227  * Do not allow unaligned operations to proceed.  Return the host address.
1228  *
1229  * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
1230  */
1231 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1232                                MemOpIdx oi, int size, int prot,
1233                                uintptr_t retaddr)
1234 {
1235     MemOp mop = get_memop(oi);
1236     int a_bits = get_alignment_bits(mop);
1237     void *ret;
1238 
1239     /* Enforce guest required alignment.  */
1240     if (unlikely(addr & ((1 << a_bits) - 1))) {
1241         MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
1242         cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
1243     }
1244 
1245     /* Enforce qemu required alignment.  */
1246     if (unlikely(addr & (size - 1))) {
1247         cpu_loop_exit_atomic(env_cpu(env), retaddr);
1248     }
1249 
1250     ret = g2h(env_cpu(env), addr);
1251     set_helper_retaddr(retaddr);
1252     return ret;
1253 }
1254 
1255 #include "atomic_common.c.inc"
1256 
1257 /*
1258  * First set of functions passes in OI and RETADDR.
1259  * This makes them callable from other helpers.
1260  */
1261 
1262 #define ATOMIC_NAME(X) \
1263     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1264 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1265 
1266 #define DATA_SIZE 1
1267 #include "atomic_template.h"
1268 
1269 #define DATA_SIZE 2
1270 #include "atomic_template.h"
1271 
1272 #define DATA_SIZE 4
1273 #include "atomic_template.h"
1274 
1275 #ifdef CONFIG_ATOMIC64
1276 #define DATA_SIZE 8
1277 #include "atomic_template.h"
1278 #endif
1279 
1280 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
1281 #define DATA_SIZE 16
1282 #include "atomic_template.h"
1283 #endif
1284