xref: /openbmc/qemu/accel/tcg/user-exec.c (revision b3eb5b86)
1 /*
2  *  User emulator execution
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg.h"
24 #include "qemu/bitops.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/translate-all.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/atomic128.h"
29 #include "trace/trace-root.h"
30 #include "tcg/tcg-ldst.h"
31 #include "internal.h"
32 
33 __thread uintptr_t helper_retaddr;
34 
35 //#define DEBUG_SIGNAL
36 
37 /*
38  * Adjust the pc to pass to cpu_restore_state; return the memop type.
39  */
40 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
41 {
42     switch (helper_retaddr) {
43     default:
44         /*
45          * Fault during host memory operation within a helper function.
46          * The helper's host return address, saved here, gives us a
47          * pointer into the generated code that will unwind to the
48          * correct guest pc.
49          */
50         *pc = helper_retaddr;
51         break;
52 
53     case 0:
54         /*
55          * Fault during host memory operation within generated code.
56          * (Or, a unrelated bug within qemu, but we can't tell from here).
57          *
58          * We take the host pc from the signal frame.  However, we cannot
59          * use that value directly.  Within cpu_restore_state_from_tb, we
60          * assume PC comes from GETPC(), as used by the helper functions,
61          * so we adjust the address by -GETPC_ADJ to form an address that
62          * is within the call insn, so that the address does not accidentally
63          * match the beginning of the next guest insn.  However, when the
64          * pc comes from the signal frame it points to the actual faulting
65          * host memory insn and not the return from a call insn.
66          *
67          * Therefore, adjust to compensate for what will be done later
68          * by cpu_restore_state_from_tb.
69          */
70         *pc += GETPC_ADJ;
71         break;
72 
73     case 1:
74         /*
75          * Fault during host read for translation, or loosely, "execution".
76          *
77          * The guest pc is already pointing to the start of the TB for which
78          * code is being generated.  If the guest translator manages the
79          * page crossings correctly, this is exactly the correct address
80          * (and if the translator doesn't handle page boundaries correctly
81          * there's little we can do about that here).  Therefore, do not
82          * trigger the unwinder.
83          */
84         *pc = 0;
85         return MMU_INST_FETCH;
86     }
87 
88     return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
89 }
90 
91 /**
92  * handle_sigsegv_accerr_write:
93  * @cpu: the cpu context
94  * @old_set: the sigset_t from the signal ucontext_t
95  * @host_pc: the host pc, adjusted for the signal
96  * @guest_addr: the guest address of the fault
97  *
98  * Return true if the write fault has been handled, and should be re-tried.
99  *
100  * Note that it is important that we don't call page_unprotect() unless
101  * this is really a "write to nonwritable page" fault, because
102  * page_unprotect() assumes that if it is called for an access to
103  * a page that's writable this means we had two threads racing and
104  * another thread got there first and already made the page writable;
105  * so we will retry the access. If we were to call page_unprotect()
106  * for some other kind of fault that should really be passed to the
107  * guest, we'd end up in an infinite loop of retrying the faulting access.
108  */
109 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
110                                  uintptr_t host_pc, abi_ptr guest_addr)
111 {
112     switch (page_unprotect(guest_addr, host_pc)) {
113     case 0:
114         /*
115          * Fault not caused by a page marked unwritable to protect
116          * cached translations, must be the guest binary's problem.
117          */
118         return false;
119     case 1:
120         /*
121          * Fault caused by protection of cached translation; TBs
122          * invalidated, so resume execution.
123          */
124         return true;
125     case 2:
126         /*
127          * Fault caused by protection of cached translation, and the
128          * currently executing TB was modified and must be exited immediately.
129          */
130         sigprocmask(SIG_SETMASK, old_set, NULL);
131         cpu_loop_exit_noexc(cpu);
132         /* NORETURN */
133     default:
134         g_assert_not_reached();
135     }
136 }
137 
138 typedef struct PageFlagsNode {
139     IntervalTreeNode itree;
140     int flags;
141 } PageFlagsNode;
142 
143 static IntervalTreeRoot pageflags_root;
144 
145 static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
146 {
147     IntervalTreeNode *n;
148 
149     n = interval_tree_iter_first(&pageflags_root, start, last);
150     return n ? container_of(n, PageFlagsNode, itree) : NULL;
151 }
152 
153 static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
154                                      target_long last)
155 {
156     IntervalTreeNode *n;
157 
158     n = interval_tree_iter_next(&p->itree, start, last);
159     return n ? container_of(n, PageFlagsNode, itree) : NULL;
160 }
161 
162 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
163 {
164     IntervalTreeNode *n;
165     int rc = 0;
166 
167     mmap_lock();
168     for (n = interval_tree_iter_first(&pageflags_root, 0, -1);
169          n != NULL;
170          n = interval_tree_iter_next(n, 0, -1)) {
171         PageFlagsNode *p = container_of(n, PageFlagsNode, itree);
172 
173         rc = fn(priv, n->start, n->last + 1, p->flags);
174         if (rc != 0) {
175             break;
176         }
177     }
178     mmap_unlock();
179 
180     return rc;
181 }
182 
183 static int dump_region(void *priv, target_ulong start,
184                        target_ulong end, unsigned long prot)
185 {
186     FILE *f = (FILE *)priv;
187 
188     fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
189             start, end, end - start,
190             ((prot & PAGE_READ) ? 'r' : '-'),
191             ((prot & PAGE_WRITE) ? 'w' : '-'),
192             ((prot & PAGE_EXEC) ? 'x' : '-'));
193     return 0;
194 }
195 
196 /* dump memory mappings */
197 void page_dump(FILE *f)
198 {
199     const int length = sizeof(target_ulong) * 2;
200 
201     fprintf(f, "%-*s %-*s %-*s %s\n",
202             length, "start", length, "end", length, "size", "prot");
203     walk_memory_regions(f, dump_region);
204 }
205 
206 int page_get_flags(target_ulong address)
207 {
208     PageFlagsNode *p = pageflags_find(address, address);
209 
210     /*
211      * See util/interval-tree.c re lockless lookups: no false positives but
212      * there are false negatives.  If we find nothing, retry with the mmap
213      * lock acquired.
214      */
215     if (p) {
216         return p->flags;
217     }
218     if (have_mmap_lock()) {
219         return 0;
220     }
221 
222     mmap_lock();
223     p = pageflags_find(address, address);
224     mmap_unlock();
225     return p ? p->flags : 0;
226 }
227 
228 /* A subroutine of page_set_flags: insert a new node for [start,last]. */
229 static void pageflags_create(target_ulong start, target_ulong last, int flags)
230 {
231     PageFlagsNode *p = g_new(PageFlagsNode, 1);
232 
233     p->itree.start = start;
234     p->itree.last = last;
235     p->flags = flags;
236     interval_tree_insert(&p->itree, &pageflags_root);
237 }
238 
239 /* A subroutine of page_set_flags: remove everything in [start,last]. */
240 static bool pageflags_unset(target_ulong start, target_ulong last)
241 {
242     bool inval_tb = false;
243 
244     while (true) {
245         PageFlagsNode *p = pageflags_find(start, last);
246         target_ulong p_last;
247 
248         if (!p) {
249             break;
250         }
251 
252         if (p->flags & PAGE_EXEC) {
253             inval_tb = true;
254         }
255 
256         interval_tree_remove(&p->itree, &pageflags_root);
257         p_last = p->itree.last;
258 
259         if (p->itree.start < start) {
260             /* Truncate the node from the end, or split out the middle. */
261             p->itree.last = start - 1;
262             interval_tree_insert(&p->itree, &pageflags_root);
263             if (last < p_last) {
264                 pageflags_create(last + 1, p_last, p->flags);
265                 break;
266             }
267         } else if (p_last <= last) {
268             /* Range completely covers node -- remove it. */
269             g_free(p);
270         } else {
271             /* Truncate the node from the start. */
272             p->itree.start = last + 1;
273             interval_tree_insert(&p->itree, &pageflags_root);
274             break;
275         }
276     }
277 
278     return inval_tb;
279 }
280 
281 /*
282  * A subroutine of page_set_flags: nothing overlaps [start,last],
283  * but check adjacent mappings and maybe merge into a single range.
284  */
285 static void pageflags_create_merge(target_ulong start, target_ulong last,
286                                    int flags)
287 {
288     PageFlagsNode *next = NULL, *prev = NULL;
289 
290     if (start > 0) {
291         prev = pageflags_find(start - 1, start - 1);
292         if (prev) {
293             if (prev->flags == flags) {
294                 interval_tree_remove(&prev->itree, &pageflags_root);
295             } else {
296                 prev = NULL;
297             }
298         }
299     }
300     if (last + 1 != 0) {
301         next = pageflags_find(last + 1, last + 1);
302         if (next) {
303             if (next->flags == flags) {
304                 interval_tree_remove(&next->itree, &pageflags_root);
305             } else {
306                 next = NULL;
307             }
308         }
309     }
310 
311     if (prev) {
312         if (next) {
313             prev->itree.last = next->itree.last;
314             g_free(next);
315         } else {
316             prev->itree.last = last;
317         }
318         interval_tree_insert(&prev->itree, &pageflags_root);
319     } else if (next) {
320         next->itree.start = start;
321         interval_tree_insert(&next->itree, &pageflags_root);
322     } else {
323         pageflags_create(start, last, flags);
324     }
325 }
326 
327 /*
328  * Allow the target to decide if PAGE_TARGET_[12] may be reset.
329  * By default, they are not kept.
330  */
331 #ifndef PAGE_TARGET_STICKY
332 #define PAGE_TARGET_STICKY  0
333 #endif
334 #define PAGE_STICKY  (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
335 
336 /* A subroutine of page_set_flags: add flags to [start,last]. */
337 static bool pageflags_set_clear(target_ulong start, target_ulong last,
338                                 int set_flags, int clear_flags)
339 {
340     PageFlagsNode *p;
341     target_ulong p_start, p_last;
342     int p_flags, merge_flags;
343     bool inval_tb = false;
344 
345  restart:
346     p = pageflags_find(start, last);
347     if (!p) {
348         if (set_flags) {
349             pageflags_create_merge(start, last, set_flags);
350         }
351         goto done;
352     }
353 
354     p_start = p->itree.start;
355     p_last = p->itree.last;
356     p_flags = p->flags;
357     /* Using mprotect on a page does not change sticky bits. */
358     merge_flags = (p_flags & ~clear_flags) | set_flags;
359 
360     /*
361      * Need to flush if an overlapping executable region
362      * removes exec, or adds write.
363      */
364     if ((p_flags & PAGE_EXEC)
365         && (!(merge_flags & PAGE_EXEC)
366             || (merge_flags & ~p_flags & PAGE_WRITE))) {
367         inval_tb = true;
368     }
369 
370     /*
371      * If there is an exact range match, update and return without
372      * attempting to merge with adjacent regions.
373      */
374     if (start == p_start && last == p_last) {
375         if (merge_flags) {
376             p->flags = merge_flags;
377         } else {
378             interval_tree_remove(&p->itree, &pageflags_root);
379             g_free(p);
380         }
381         goto done;
382     }
383 
384     /*
385      * If sticky bits affect the original mapping, then we must be more
386      * careful about the existing intervals and the separate flags.
387      */
388     if (set_flags != merge_flags) {
389         if (p_start < start) {
390             interval_tree_remove(&p->itree, &pageflags_root);
391             p->itree.last = start - 1;
392             interval_tree_insert(&p->itree, &pageflags_root);
393 
394             if (last < p_last) {
395                 if (merge_flags) {
396                     pageflags_create(start, last, merge_flags);
397                 }
398                 pageflags_create(last + 1, p_last, p_flags);
399             } else {
400                 if (merge_flags) {
401                     pageflags_create(start, p_last, merge_flags);
402                 }
403                 if (p_last < last) {
404                     start = p_last + 1;
405                     goto restart;
406                 }
407             }
408         } else {
409             if (start < p_start && set_flags) {
410                 pageflags_create(start, p_start - 1, set_flags);
411             }
412             if (last < p_last) {
413                 interval_tree_remove(&p->itree, &pageflags_root);
414                 p->itree.start = last + 1;
415                 interval_tree_insert(&p->itree, &pageflags_root);
416                 if (merge_flags) {
417                     pageflags_create(start, last, merge_flags);
418                 }
419             } else {
420                 if (merge_flags) {
421                     p->flags = merge_flags;
422                 } else {
423                     interval_tree_remove(&p->itree, &pageflags_root);
424                     g_free(p);
425                 }
426                 if (p_last < last) {
427                     start = p_last + 1;
428                     goto restart;
429                 }
430             }
431         }
432         goto done;
433     }
434 
435     /* If flags are not changing for this range, incorporate it. */
436     if (set_flags == p_flags) {
437         if (start < p_start) {
438             interval_tree_remove(&p->itree, &pageflags_root);
439             p->itree.start = start;
440             interval_tree_insert(&p->itree, &pageflags_root);
441         }
442         if (p_last < last) {
443             start = p_last + 1;
444             goto restart;
445         }
446         goto done;
447     }
448 
449     /* Maybe split out head and/or tail ranges with the original flags. */
450     interval_tree_remove(&p->itree, &pageflags_root);
451     if (p_start < start) {
452         p->itree.last = start - 1;
453         interval_tree_insert(&p->itree, &pageflags_root);
454 
455         if (p_last < last) {
456             goto restart;
457         }
458         if (last < p_last) {
459             pageflags_create(last + 1, p_last, p_flags);
460         }
461     } else if (last < p_last) {
462         p->itree.start = last + 1;
463         interval_tree_insert(&p->itree, &pageflags_root);
464     } else {
465         g_free(p);
466         goto restart;
467     }
468     if (set_flags) {
469         pageflags_create(start, last, set_flags);
470     }
471 
472  done:
473     return inval_tb;
474 }
475 
476 /*
477  * Modify the flags of a page and invalidate the code if necessary.
478  * The flag PAGE_WRITE_ORG is positioned automatically depending
479  * on PAGE_WRITE.  The mmap_lock should already be held.
480  */
481 void page_set_flags(target_ulong start, target_ulong end, int flags)
482 {
483     target_ulong last;
484     bool reset = false;
485     bool inval_tb = false;
486 
487     /* This function should never be called with addresses outside the
488        guest address space.  If this assert fires, it probably indicates
489        a missing call to h2g_valid.  */
490     assert(start < end);
491     assert(end - 1 <= GUEST_ADDR_MAX);
492     /* Only set PAGE_ANON with new mappings. */
493     assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
494     assert_memory_lock();
495 
496     start = start & TARGET_PAGE_MASK;
497     end = TARGET_PAGE_ALIGN(end);
498     last = end - 1;
499 
500     if (!(flags & PAGE_VALID)) {
501         flags = 0;
502     } else {
503         reset = flags & PAGE_RESET;
504         flags &= ~PAGE_RESET;
505         if (flags & PAGE_WRITE) {
506             flags |= PAGE_WRITE_ORG;
507         }
508     }
509 
510     if (!flags || reset) {
511         page_reset_target_data(start, end);
512         inval_tb |= pageflags_unset(start, last);
513     }
514     if (flags) {
515         inval_tb |= pageflags_set_clear(start, last, flags,
516                                         ~(reset ? 0 : PAGE_STICKY));
517     }
518     if (inval_tb) {
519         tb_invalidate_phys_range(start, end);
520     }
521 }
522 
523 int page_check_range(target_ulong start, target_ulong len, int flags)
524 {
525     target_ulong last;
526 
527     if (len == 0) {
528         return 0;  /* trivial length */
529     }
530 
531     last = start + len - 1;
532     if (last < start) {
533         return -1; /* wrap around */
534     }
535 
536     while (true) {
537         PageFlagsNode *p = pageflags_find(start, last);
538         int missing;
539 
540         if (!p) {
541             return -1; /* entire region invalid */
542         }
543         if (start < p->itree.start) {
544             return -1; /* initial bytes invalid */
545         }
546 
547         missing = flags & ~p->flags;
548         if (missing & PAGE_READ) {
549             return -1; /* page not readable */
550         }
551         if (missing & PAGE_WRITE) {
552             if (!(p->flags & PAGE_WRITE_ORG)) {
553                 return -1; /* page not writable */
554             }
555             /* Asking about writable, but has been protected: undo. */
556             if (!page_unprotect(start, 0)) {
557                 return -1;
558             }
559             /* TODO: page_unprotect should take a range, not a single page. */
560             if (last - start < TARGET_PAGE_SIZE) {
561                 return 0; /* ok */
562             }
563             start += TARGET_PAGE_SIZE;
564             continue;
565         }
566 
567         if (last <= p->itree.last) {
568             return 0; /* ok */
569         }
570         start = p->itree.last + 1;
571     }
572 }
573 
574 void page_protect(tb_page_addr_t address)
575 {
576     PageFlagsNode *p;
577     target_ulong start, last;
578     int prot;
579 
580     assert_memory_lock();
581 
582     if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
583         start = address & TARGET_PAGE_MASK;
584         last = start + TARGET_PAGE_SIZE - 1;
585     } else {
586         start = address & qemu_host_page_mask;
587         last = start + qemu_host_page_size - 1;
588     }
589 
590     p = pageflags_find(start, last);
591     if (!p) {
592         return;
593     }
594     prot = p->flags;
595 
596     if (unlikely(p->itree.last < last)) {
597         /* More than one protection region covers the one host page. */
598         assert(TARGET_PAGE_SIZE < qemu_host_page_size);
599         while ((p = pageflags_next(p, start, last)) != NULL) {
600             prot |= p->flags;
601         }
602     }
603 
604     if (prot & PAGE_WRITE) {
605         pageflags_set_clear(start, last, 0, PAGE_WRITE);
606         mprotect(g2h_untagged(start), qemu_host_page_size,
607                  prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
608     }
609 }
610 
611 /*
612  * Called from signal handler: invalidate the code and unprotect the
613  * page. Return 0 if the fault was not handled, 1 if it was handled,
614  * and 2 if it was handled but the caller must cause the TB to be
615  * immediately exited. (We can only return 2 if the 'pc' argument is
616  * non-zero.)
617  */
618 int page_unprotect(target_ulong address, uintptr_t pc)
619 {
620     PageFlagsNode *p;
621     bool current_tb_invalidated;
622 
623     /*
624      * Technically this isn't safe inside a signal handler.  However we
625      * know this only ever happens in a synchronous SEGV handler, so in
626      * practice it seems to be ok.
627      */
628     mmap_lock();
629 
630     p = pageflags_find(address, address);
631 
632     /* If this address was not really writable, nothing to do. */
633     if (!p || !(p->flags & PAGE_WRITE_ORG)) {
634         mmap_unlock();
635         return 0;
636     }
637 
638     current_tb_invalidated = false;
639     if (p->flags & PAGE_WRITE) {
640         /*
641          * If the page is actually marked WRITE then assume this is because
642          * this thread raced with another one which got here first and
643          * set the page to PAGE_WRITE and did the TB invalidate for us.
644          */
645 #ifdef TARGET_HAS_PRECISE_SMC
646         TranslationBlock *current_tb = tcg_tb_lookup(pc);
647         if (current_tb) {
648             current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
649         }
650 #endif
651     } else {
652         target_ulong start, len, i;
653         int prot;
654 
655         if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
656             start = address & TARGET_PAGE_MASK;
657             len = TARGET_PAGE_SIZE;
658             prot = p->flags | PAGE_WRITE;
659             pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
660             current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
661         } else {
662             start = address & qemu_host_page_mask;
663             len = qemu_host_page_size;
664             prot = 0;
665 
666             for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
667                 target_ulong addr = start + i;
668 
669                 p = pageflags_find(addr, addr);
670                 if (p) {
671                     prot |= p->flags;
672                     if (p->flags & PAGE_WRITE_ORG) {
673                         prot |= PAGE_WRITE;
674                         pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1,
675                                             PAGE_WRITE, 0);
676                     }
677                 }
678                 /*
679                  * Since the content will be modified, we must invalidate
680                  * the corresponding translated code.
681                  */
682                 current_tb_invalidated |=
683                     tb_invalidate_phys_page_unwind(addr, pc);
684             }
685         }
686         if (prot & PAGE_EXEC) {
687             prot = (prot & ~PAGE_EXEC) | PAGE_READ;
688         }
689         mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS);
690     }
691     mmap_unlock();
692 
693     /* If current TB was invalidated return to main loop */
694     return current_tb_invalidated ? 2 : 1;
695 }
696 
697 static int probe_access_internal(CPUArchState *env, target_ulong addr,
698                                  int fault_size, MMUAccessType access_type,
699                                  bool nonfault, uintptr_t ra)
700 {
701     int acc_flag;
702     bool maperr;
703 
704     switch (access_type) {
705     case MMU_DATA_STORE:
706         acc_flag = PAGE_WRITE_ORG;
707         break;
708     case MMU_DATA_LOAD:
709         acc_flag = PAGE_READ;
710         break;
711     case MMU_INST_FETCH:
712         acc_flag = PAGE_EXEC;
713         break;
714     default:
715         g_assert_not_reached();
716     }
717 
718     if (guest_addr_valid_untagged(addr)) {
719         int page_flags = page_get_flags(addr);
720         if (page_flags & acc_flag) {
721             return 0; /* success */
722         }
723         maperr = !(page_flags & PAGE_VALID);
724     } else {
725         maperr = true;
726     }
727 
728     if (nonfault) {
729         return TLB_INVALID_MASK;
730     }
731 
732     cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
733 }
734 
735 int probe_access_flags(CPUArchState *env, target_ulong addr,
736                        MMUAccessType access_type, int mmu_idx,
737                        bool nonfault, void **phost, uintptr_t ra)
738 {
739     int flags;
740 
741     flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
742     *phost = flags ? NULL : g2h(env_cpu(env), addr);
743     return flags;
744 }
745 
746 void *probe_access(CPUArchState *env, target_ulong addr, int size,
747                    MMUAccessType access_type, int mmu_idx, uintptr_t ra)
748 {
749     int flags;
750 
751     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
752     flags = probe_access_internal(env, addr, size, access_type, false, ra);
753     g_assert(flags == 0);
754 
755     return size ? g2h(env_cpu(env), addr) : NULL;
756 }
757 
758 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
759                                         void **hostp)
760 {
761     int flags;
762 
763     flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
764     g_assert(flags == 0);
765 
766     if (hostp) {
767         *hostp = g2h_untagged(addr);
768     }
769     return addr;
770 }
771 
772 #ifdef TARGET_PAGE_DATA_SIZE
773 /*
774  * Allocate chunks of target data together.  For the only current user,
775  * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
776  * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
777  */
778 #define TPD_PAGES  64
779 #define TBD_MASK   (TARGET_PAGE_MASK * TPD_PAGES)
780 
781 typedef struct TargetPageDataNode {
782     IntervalTreeNode itree;
783     char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
784 } TargetPageDataNode;
785 
786 static IntervalTreeRoot targetdata_root;
787 
788 void page_reset_target_data(target_ulong start, target_ulong end)
789 {
790     IntervalTreeNode *n, *next;
791     target_ulong last;
792 
793     assert_memory_lock();
794 
795     start = start & TARGET_PAGE_MASK;
796     last = TARGET_PAGE_ALIGN(end) - 1;
797 
798     for (n = interval_tree_iter_first(&targetdata_root, start, last),
799          next = n ? interval_tree_iter_next(n, start, last) : NULL;
800          n != NULL;
801          n = next,
802          next = next ? interval_tree_iter_next(n, start, last) : NULL) {
803         target_ulong n_start, n_last, p_ofs, p_len;
804         TargetPageDataNode *t;
805 
806         if (n->start >= start && n->last <= last) {
807             interval_tree_remove(n, &targetdata_root);
808             g_free(n);
809             continue;
810         }
811 
812         if (n->start < start) {
813             n_start = start;
814             p_ofs = (start - n->start) >> TARGET_PAGE_BITS;
815         } else {
816             n_start = n->start;
817             p_ofs = 0;
818         }
819         n_last = MIN(last, n->last);
820         p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
821 
822         t = container_of(n, TargetPageDataNode, itree);
823         memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
824     }
825 }
826 
827 void *page_get_target_data(target_ulong address)
828 {
829     IntervalTreeNode *n;
830     TargetPageDataNode *t;
831     target_ulong page, region;
832 
833     page = address & TARGET_PAGE_MASK;
834     region = address & TBD_MASK;
835 
836     n = interval_tree_iter_first(&targetdata_root, page, page);
837     if (!n) {
838         /*
839          * See util/interval-tree.c re lockless lookups: no false positives
840          * but there are false negatives.  If we find nothing, retry with
841          * the mmap lock acquired.  We also need the lock for the
842          * allocation + insert.
843          */
844         mmap_lock();
845         n = interval_tree_iter_first(&targetdata_root, page, page);
846         if (!n) {
847             t = g_new0(TargetPageDataNode, 1);
848             n = &t->itree;
849             n->start = region;
850             n->last = region | ~TBD_MASK;
851             interval_tree_insert(n, &targetdata_root);
852         }
853         mmap_unlock();
854     }
855 
856     t = container_of(n, TargetPageDataNode, itree);
857     return t->data[(page - region) >> TARGET_PAGE_BITS];
858 }
859 #else
860 void page_reset_target_data(target_ulong start, target_ulong end) { }
861 #endif /* TARGET_PAGE_DATA_SIZE */
862 
863 /* The softmmu versions of these helpers are in cputlb.c.  */
864 
865 /*
866  * Verify that we have passed the correct MemOp to the correct function.
867  *
868  * We could present one function to target code, and dispatch based on
869  * the MemOp, but so far we have worked hard to avoid an indirect function
870  * call along the memory path.
871  */
872 static void validate_memop(MemOpIdx oi, MemOp expected)
873 {
874 #ifdef CONFIG_DEBUG_TCG
875     MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
876     assert(have == expected);
877 #endif
878 }
879 
880 void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
881 {
882     cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
883 }
884 
885 void helper_unaligned_st(CPUArchState *env, target_ulong addr)
886 {
887     cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
888 }
889 
890 static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
891                             MemOpIdx oi, uintptr_t ra, MMUAccessType type)
892 {
893     MemOp mop = get_memop(oi);
894     int a_bits = get_alignment_bits(mop);
895     void *ret;
896 
897     /* Enforce guest required alignment.  */
898     if (unlikely(addr & ((1 << a_bits) - 1))) {
899         cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
900     }
901 
902     ret = g2h(env_cpu(env), addr);
903     set_helper_retaddr(ra);
904     return ret;
905 }
906 
907 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
908                     MemOpIdx oi, uintptr_t ra)
909 {
910     void *haddr;
911     uint8_t ret;
912 
913     validate_memop(oi, MO_UB);
914     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
915     ret = ldub_p(haddr);
916     clear_helper_retaddr();
917     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
918     return ret;
919 }
920 
921 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
922                         MemOpIdx oi, uintptr_t ra)
923 {
924     void *haddr;
925     uint16_t ret;
926 
927     validate_memop(oi, MO_BEUW);
928     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
929     ret = lduw_be_p(haddr);
930     clear_helper_retaddr();
931     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
932     return ret;
933 }
934 
935 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
936                         MemOpIdx oi, uintptr_t ra)
937 {
938     void *haddr;
939     uint32_t ret;
940 
941     validate_memop(oi, MO_BEUL);
942     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
943     ret = ldl_be_p(haddr);
944     clear_helper_retaddr();
945     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
946     return ret;
947 }
948 
949 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
950                         MemOpIdx oi, uintptr_t ra)
951 {
952     void *haddr;
953     uint64_t ret;
954 
955     validate_memop(oi, MO_BEUQ);
956     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
957     ret = ldq_be_p(haddr);
958     clear_helper_retaddr();
959     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
960     return ret;
961 }
962 
963 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
964                         MemOpIdx oi, uintptr_t ra)
965 {
966     void *haddr;
967     uint16_t ret;
968 
969     validate_memop(oi, MO_LEUW);
970     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
971     ret = lduw_le_p(haddr);
972     clear_helper_retaddr();
973     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
974     return ret;
975 }
976 
977 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
978                         MemOpIdx oi, uintptr_t ra)
979 {
980     void *haddr;
981     uint32_t ret;
982 
983     validate_memop(oi, MO_LEUL);
984     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
985     ret = ldl_le_p(haddr);
986     clear_helper_retaddr();
987     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
988     return ret;
989 }
990 
991 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
992                         MemOpIdx oi, uintptr_t ra)
993 {
994     void *haddr;
995     uint64_t ret;
996 
997     validate_memop(oi, MO_LEUQ);
998     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
999     ret = ldq_le_p(haddr);
1000     clear_helper_retaddr();
1001     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1002     return ret;
1003 }
1004 
1005 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
1006                  MemOpIdx oi, uintptr_t ra)
1007 {
1008     void *haddr;
1009 
1010     validate_memop(oi, MO_UB);
1011     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1012     stb_p(haddr, val);
1013     clear_helper_retaddr();
1014     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1015 }
1016 
1017 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1018                     MemOpIdx oi, uintptr_t ra)
1019 {
1020     void *haddr;
1021 
1022     validate_memop(oi, MO_BEUW);
1023     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1024     stw_be_p(haddr, val);
1025     clear_helper_retaddr();
1026     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1027 }
1028 
1029 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1030                     MemOpIdx oi, uintptr_t ra)
1031 {
1032     void *haddr;
1033 
1034     validate_memop(oi, MO_BEUL);
1035     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1036     stl_be_p(haddr, val);
1037     clear_helper_retaddr();
1038     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1039 }
1040 
1041 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1042                     MemOpIdx oi, uintptr_t ra)
1043 {
1044     void *haddr;
1045 
1046     validate_memop(oi, MO_BEUQ);
1047     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1048     stq_be_p(haddr, val);
1049     clear_helper_retaddr();
1050     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1051 }
1052 
1053 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1054                     MemOpIdx oi, uintptr_t ra)
1055 {
1056     void *haddr;
1057 
1058     validate_memop(oi, MO_LEUW);
1059     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1060     stw_le_p(haddr, val);
1061     clear_helper_retaddr();
1062     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1063 }
1064 
1065 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1066                     MemOpIdx oi, uintptr_t ra)
1067 {
1068     void *haddr;
1069 
1070     validate_memop(oi, MO_LEUL);
1071     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1072     stl_le_p(haddr, val);
1073     clear_helper_retaddr();
1074     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1075 }
1076 
1077 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1078                     MemOpIdx oi, uintptr_t ra)
1079 {
1080     void *haddr;
1081 
1082     validate_memop(oi, MO_LEUQ);
1083     haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
1084     stq_le_p(haddr, val);
1085     clear_helper_retaddr();
1086     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1087 }
1088 
1089 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1090 {
1091     uint32_t ret;
1092 
1093     set_helper_retaddr(1);
1094     ret = ldub_p(g2h_untagged(ptr));
1095     clear_helper_retaddr();
1096     return ret;
1097 }
1098 
1099 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
1100 {
1101     uint32_t ret;
1102 
1103     set_helper_retaddr(1);
1104     ret = lduw_p(g2h_untagged(ptr));
1105     clear_helper_retaddr();
1106     return ret;
1107 }
1108 
1109 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
1110 {
1111     uint32_t ret;
1112 
1113     set_helper_retaddr(1);
1114     ret = ldl_p(g2h_untagged(ptr));
1115     clear_helper_retaddr();
1116     return ret;
1117 }
1118 
1119 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1120 {
1121     uint64_t ret;
1122 
1123     set_helper_retaddr(1);
1124     ret = ldq_p(g2h_untagged(ptr));
1125     clear_helper_retaddr();
1126     return ret;
1127 }
1128 
1129 #include "ldst_common.c.inc"
1130 
1131 /*
1132  * Do not allow unaligned operations to proceed.  Return the host address.
1133  *
1134  * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
1135  */
1136 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1137                                MemOpIdx oi, int size, int prot,
1138                                uintptr_t retaddr)
1139 {
1140     MemOp mop = get_memop(oi);
1141     int a_bits = get_alignment_bits(mop);
1142     void *ret;
1143 
1144     /* Enforce guest required alignment.  */
1145     if (unlikely(addr & ((1 << a_bits) - 1))) {
1146         MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
1147         cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
1148     }
1149 
1150     /* Enforce qemu required alignment.  */
1151     if (unlikely(addr & (size - 1))) {
1152         cpu_loop_exit_atomic(env_cpu(env), retaddr);
1153     }
1154 
1155     ret = g2h(env_cpu(env), addr);
1156     set_helper_retaddr(retaddr);
1157     return ret;
1158 }
1159 
1160 #include "atomic_common.c.inc"
1161 
1162 /*
1163  * First set of functions passes in OI and RETADDR.
1164  * This makes them callable from other helpers.
1165  */
1166 
1167 #define ATOMIC_NAME(X) \
1168     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1169 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1170 
1171 #define DATA_SIZE 1
1172 #include "atomic_template.h"
1173 
1174 #define DATA_SIZE 2
1175 #include "atomic_template.h"
1176 
1177 #define DATA_SIZE 4
1178 #include "atomic_template.h"
1179 
1180 #ifdef CONFIG_ATOMIC64
1181 #define DATA_SIZE 8
1182 #include "atomic_template.h"
1183 #endif
1184 
1185 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
1186 #define DATA_SIZE 16
1187 #include "atomic_template.h"
1188 #endif
1189