xref: /openbmc/qemu/accel/tcg/translate-all.c (revision 19f4ed36)
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu-common.h"
23 
24 #define NO_CPU_IO_DEFS
25 #include "cpu.h"
26 #include "trace.h"
27 #include "disas/disas.h"
28 #include "exec/exec-all.h"
29 #include "tcg/tcg.h"
30 #if defined(CONFIG_USER_ONLY)
31 #include "qemu.h"
32 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
33 #include <sys/param.h>
34 #if __FreeBSD_version >= 700104
35 #define HAVE_KINFO_GETVMMAP
36 #define sigqueue sigqueue_freebsd  /* avoid redefinition */
37 #include <sys/proc.h>
38 #include <machine/profile.h>
39 #define _KERNEL
40 #include <sys/user.h>
41 #undef _KERNEL
42 #undef sigqueue
43 #include <libutil.h>
44 #endif
45 #endif
46 #else
47 #include "exec/ram_addr.h"
48 #endif
49 
50 #include "exec/cputlb.h"
51 #include "exec/tb-hash.h"
52 #include "exec/translate-all.h"
53 #include "qemu/bitmap.h"
54 #include "qemu/error-report.h"
55 #include "qemu/qemu-print.h"
56 #include "qemu/timer.h"
57 #include "qemu/main-loop.h"
58 #include "exec/log.h"
59 #include "sysemu/cpus.h"
60 #include "sysemu/cpu-timers.h"
61 #include "sysemu/tcg.h"
62 #include "qapi/error.h"
63 #include "hw/core/tcg-cpu-ops.h"
64 #include "internal.h"
65 
66 /* #define DEBUG_TB_INVALIDATE */
67 /* #define DEBUG_TB_FLUSH */
68 /* make various TB consistency checks */
69 /* #define DEBUG_TB_CHECK */
70 
71 #ifdef DEBUG_TB_INVALIDATE
72 #define DEBUG_TB_INVALIDATE_GATE 1
73 #else
74 #define DEBUG_TB_INVALIDATE_GATE 0
75 #endif
76 
77 #ifdef DEBUG_TB_FLUSH
78 #define DEBUG_TB_FLUSH_GATE 1
79 #else
80 #define DEBUG_TB_FLUSH_GATE 0
81 #endif
82 
83 #if !defined(CONFIG_USER_ONLY)
84 /* TB consistency checks only implemented for usermode emulation.  */
85 #undef DEBUG_TB_CHECK
86 #endif
87 
88 #ifdef DEBUG_TB_CHECK
89 #define DEBUG_TB_CHECK_GATE 1
90 #else
91 #define DEBUG_TB_CHECK_GATE 0
92 #endif
93 
94 /* Access to the various translations structures need to be serialised via locks
95  * for consistency.
96  * In user-mode emulation access to the memory related structures are protected
97  * with mmap_lock.
98  * In !user-mode we use per-page locks.
99  */
100 #ifdef CONFIG_SOFTMMU
101 #define assert_memory_lock()
102 #else
103 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
104 #endif
105 
106 #define SMC_BITMAP_USE_THRESHOLD 10
107 
108 typedef struct PageDesc {
109     /* list of TBs intersecting this ram page */
110     uintptr_t first_tb;
111 #ifdef CONFIG_SOFTMMU
112     /* in order to optimize self modifying code, we count the number
113        of lookups we do to a given page to use a bitmap */
114     unsigned long *code_bitmap;
115     unsigned int code_write_count;
116 #else
117     unsigned long flags;
118     void *target_data;
119 #endif
120 #ifndef CONFIG_USER_ONLY
121     QemuSpin lock;
122 #endif
123 } PageDesc;
124 
125 /**
126  * struct page_entry - page descriptor entry
127  * @pd:     pointer to the &struct PageDesc of the page this entry represents
128  * @index:  page index of the page
129  * @locked: whether the page is locked
130  *
131  * This struct helps us keep track of the locked state of a page, without
132  * bloating &struct PageDesc.
133  *
134  * A page lock protects accesses to all fields of &struct PageDesc.
135  *
136  * See also: &struct page_collection.
137  */
138 struct page_entry {
139     PageDesc *pd;
140     tb_page_addr_t index;
141     bool locked;
142 };
143 
144 /**
145  * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
146  * @tree:   Binary search tree (BST) of the pages, with key == page index
147  * @max:    Pointer to the page in @tree with the highest page index
148  *
149  * To avoid deadlock we lock pages in ascending order of page index.
150  * When operating on a set of pages, we need to keep track of them so that
151  * we can lock them in order and also unlock them later. For this we collect
152  * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
153  * @tree implementation we use does not provide an O(1) operation to obtain the
154  * highest-ranked element, we use @max to keep track of the inserted page
155  * with the highest index. This is valuable because if a page is not in
156  * the tree and its index is higher than @max's, then we can lock it
157  * without breaking the locking order rule.
158  *
159  * Note on naming: 'struct page_set' would be shorter, but we already have a few
160  * page_set_*() helpers, so page_collection is used instead to avoid confusion.
161  *
162  * See also: page_collection_lock().
163  */
164 struct page_collection {
165     GTree *tree;
166     struct page_entry *max;
167 };
168 
169 /* list iterators for lists of tagged pointers in TranslationBlock */
170 #define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
171     for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
172          tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
173              tb = (TranslationBlock *)((uintptr_t)tb & ~1))
174 
175 #define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
176     TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
177 
178 #define TB_FOR_EACH_JMP(head_tb, tb, n)                                 \
179     TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
180 
181 /*
182  * In system mode we want L1_MAP to be based on ram offsets,
183  * while in user mode we want it to be based on virtual addresses.
184  *
185  * TODO: For user mode, see the caveat re host vs guest virtual
186  * address spaces near GUEST_ADDR_MAX.
187  */
188 #if !defined(CONFIG_USER_ONLY)
189 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
190 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
191 #else
192 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
193 #endif
194 #else
195 # define L1_MAP_ADDR_SPACE_BITS  MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
196 #endif
197 
198 /* Size of the L2 (and L3, etc) page tables.  */
199 #define V_L2_BITS 10
200 #define V_L2_SIZE (1 << V_L2_BITS)
201 
202 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
203 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
204                   sizeof_field(TranslationBlock, trace_vcpu_dstate)
205                   * BITS_PER_BYTE);
206 
207 /*
208  * L1 Mapping properties
209  */
210 static int v_l1_size;
211 static int v_l1_shift;
212 static int v_l2_levels;
213 
214 /* The bottom level has pointers to PageDesc, and is indexed by
215  * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
216  */
217 #define V_L1_MIN_BITS 4
218 #define V_L1_MAX_BITS (V_L2_BITS + 3)
219 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
220 
221 static void *l1_map[V_L1_MAX_SIZE];
222 
223 /* code generation context */
224 TCGContext tcg_init_ctx;
225 __thread TCGContext *tcg_ctx;
226 TBContext tb_ctx;
227 
228 static void page_table_config_init(void)
229 {
230     uint32_t v_l1_bits;
231 
232     assert(TARGET_PAGE_BITS);
233     /* The bits remaining after N lower levels of page tables.  */
234     v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
235     if (v_l1_bits < V_L1_MIN_BITS) {
236         v_l1_bits += V_L2_BITS;
237     }
238 
239     v_l1_size = 1 << v_l1_bits;
240     v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
241     v_l2_levels = v_l1_shift / V_L2_BITS - 1;
242 
243     assert(v_l1_bits <= V_L1_MAX_BITS);
244     assert(v_l1_shift % V_L2_BITS == 0);
245     assert(v_l2_levels >= 0);
246 }
247 
248 static void cpu_gen_init(void)
249 {
250     tcg_context_init(&tcg_init_ctx);
251 }
252 
253 /* Encode VAL as a signed leb128 sequence at P.
254    Return P incremented past the encoded value.  */
255 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
256 {
257     int more, byte;
258 
259     do {
260         byte = val & 0x7f;
261         val >>= 7;
262         more = !((val == 0 && (byte & 0x40) == 0)
263                  || (val == -1 && (byte & 0x40) != 0));
264         if (more) {
265             byte |= 0x80;
266         }
267         *p++ = byte;
268     } while (more);
269 
270     return p;
271 }
272 
273 /* Decode a signed leb128 sequence at *PP; increment *PP past the
274    decoded value.  Return the decoded value.  */
275 static target_long decode_sleb128(const uint8_t **pp)
276 {
277     const uint8_t *p = *pp;
278     target_long val = 0;
279     int byte, shift = 0;
280 
281     do {
282         byte = *p++;
283         val |= (target_ulong)(byte & 0x7f) << shift;
284         shift += 7;
285     } while (byte & 0x80);
286     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
287         val |= -(target_ulong)1 << shift;
288     }
289 
290     *pp = p;
291     return val;
292 }
293 
294 /* Encode the data collected about the instructions while compiling TB.
295    Place the data at BLOCK, and return the number of bytes consumed.
296 
297    The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
298    which come from the target's insn_start data, followed by a uintptr_t
299    which comes from the host pc of the end of the code implementing the insn.
300 
301    Each line of the table is encoded as sleb128 deltas from the previous
302    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
303    That is, the first column is seeded with the guest pc, the last column
304    with the host pc, and the middle columns with zeros.  */
305 
306 static int encode_search(TranslationBlock *tb, uint8_t *block)
307 {
308     uint8_t *highwater = tcg_ctx->code_gen_highwater;
309     uint8_t *p = block;
310     int i, j, n;
311 
312     for (i = 0, n = tb->icount; i < n; ++i) {
313         target_ulong prev;
314 
315         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
316             if (i == 0) {
317                 prev = (j == 0 ? tb->pc : 0);
318             } else {
319                 prev = tcg_ctx->gen_insn_data[i - 1][j];
320             }
321             p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
322         }
323         prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
324         p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
325 
326         /* Test for (pending) buffer overflow.  The assumption is that any
327            one row beginning below the high water mark cannot overrun
328            the buffer completely.  Thus we can test for overflow after
329            encoding a row without having to check during encoding.  */
330         if (unlikely(p > highwater)) {
331             return -1;
332         }
333     }
334 
335     return p - block;
336 }
337 
338 /* The cpu state corresponding to 'searched_pc' is restored.
339  * When reset_icount is true, current TB will be interrupted and
340  * icount should be recalculated.
341  */
342 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
343                                      uintptr_t searched_pc, bool reset_icount)
344 {
345     target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
346     uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
347     CPUArchState *env = cpu->env_ptr;
348     const uint8_t *p = tb->tc.ptr + tb->tc.size;
349     int i, j, num_insns = tb->icount;
350 #ifdef CONFIG_PROFILER
351     TCGProfile *prof = &tcg_ctx->prof;
352     int64_t ti = profile_getclock();
353 #endif
354 
355     searched_pc -= GETPC_ADJ;
356 
357     if (searched_pc < host_pc) {
358         return -1;
359     }
360 
361     /* Reconstruct the stored insn data while looking for the point at
362        which the end of the insn exceeds the searched_pc.  */
363     for (i = 0; i < num_insns; ++i) {
364         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
365             data[j] += decode_sleb128(&p);
366         }
367         host_pc += decode_sleb128(&p);
368         if (host_pc > searched_pc) {
369             goto found;
370         }
371     }
372     return -1;
373 
374  found:
375     if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
376         assert(icount_enabled());
377         /* Reset the cycle counter to the start of the block
378            and shift if to the number of actually executed instructions */
379         cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
380     }
381     restore_state_to_opc(env, tb, data);
382 
383 #ifdef CONFIG_PROFILER
384     qatomic_set(&prof->restore_time,
385                 prof->restore_time + profile_getclock() - ti);
386     qatomic_set(&prof->restore_count, prof->restore_count + 1);
387 #endif
388     return 0;
389 }
390 
391 void tb_destroy(TranslationBlock *tb)
392 {
393     qemu_spin_destroy(&tb->jmp_lock);
394 }
395 
396 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
397 {
398     /*
399      * The host_pc has to be in the rx region of the code buffer.
400      * If it is not we will not be able to resolve it here.
401      * The two cases where host_pc will not be correct are:
402      *
403      *  - fault during translation (instruction fetch)
404      *  - fault from helper (not using GETPC() macro)
405      *
406      * Either way we need return early as we can't resolve it here.
407      */
408     if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
409         TranslationBlock *tb = tcg_tb_lookup(host_pc);
410         if (tb) {
411             cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
412             return true;
413         }
414     }
415     return false;
416 }
417 
418 static void page_init(void)
419 {
420     page_size_init();
421     page_table_config_init();
422 
423 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
424     {
425 #ifdef HAVE_KINFO_GETVMMAP
426         struct kinfo_vmentry *freep;
427         int i, cnt;
428 
429         freep = kinfo_getvmmap(getpid(), &cnt);
430         if (freep) {
431             mmap_lock();
432             for (i = 0; i < cnt; i++) {
433                 unsigned long startaddr, endaddr;
434 
435                 startaddr = freep[i].kve_start;
436                 endaddr = freep[i].kve_end;
437                 if (h2g_valid(startaddr)) {
438                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
439 
440                     if (h2g_valid(endaddr)) {
441                         endaddr = h2g(endaddr);
442                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
443                     } else {
444 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
445                         endaddr = ~0ul;
446                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
447 #endif
448                     }
449                 }
450             }
451             free(freep);
452             mmap_unlock();
453         }
454 #else
455         FILE *f;
456 
457         last_brk = (unsigned long)sbrk(0);
458 
459         f = fopen("/compat/linux/proc/self/maps", "r");
460         if (f) {
461             mmap_lock();
462 
463             do {
464                 unsigned long startaddr, endaddr;
465                 int n;
466 
467                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
468 
469                 if (n == 2 && h2g_valid(startaddr)) {
470                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
471 
472                     if (h2g_valid(endaddr)) {
473                         endaddr = h2g(endaddr);
474                     } else {
475                         endaddr = ~0ul;
476                     }
477                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
478                 }
479             } while (!feof(f));
480 
481             fclose(f);
482             mmap_unlock();
483         }
484 #endif
485     }
486 #endif
487 }
488 
489 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
490 {
491     PageDesc *pd;
492     void **lp;
493     int i;
494 
495     /* Level 1.  Always allocated.  */
496     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
497 
498     /* Level 2..N-1.  */
499     for (i = v_l2_levels; i > 0; i--) {
500         void **p = qatomic_rcu_read(lp);
501 
502         if (p == NULL) {
503             void *existing;
504 
505             if (!alloc) {
506                 return NULL;
507             }
508             p = g_new0(void *, V_L2_SIZE);
509             existing = qatomic_cmpxchg(lp, NULL, p);
510             if (unlikely(existing)) {
511                 g_free(p);
512                 p = existing;
513             }
514         }
515 
516         lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
517     }
518 
519     pd = qatomic_rcu_read(lp);
520     if (pd == NULL) {
521         void *existing;
522 
523         if (!alloc) {
524             return NULL;
525         }
526         pd = g_new0(PageDesc, V_L2_SIZE);
527 #ifndef CONFIG_USER_ONLY
528         {
529             int i;
530 
531             for (i = 0; i < V_L2_SIZE; i++) {
532                 qemu_spin_init(&pd[i].lock);
533             }
534         }
535 #endif
536         existing = qatomic_cmpxchg(lp, NULL, pd);
537         if (unlikely(existing)) {
538 #ifndef CONFIG_USER_ONLY
539             {
540                 int i;
541 
542                 for (i = 0; i < V_L2_SIZE; i++) {
543                     qemu_spin_destroy(&pd[i].lock);
544                 }
545             }
546 #endif
547             g_free(pd);
548             pd = existing;
549         }
550     }
551 
552     return pd + (index & (V_L2_SIZE - 1));
553 }
554 
555 static inline PageDesc *page_find(tb_page_addr_t index)
556 {
557     return page_find_alloc(index, 0);
558 }
559 
560 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
561                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
562 
563 /* In user-mode page locks aren't used; mmap_lock is enough */
564 #ifdef CONFIG_USER_ONLY
565 
566 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
567 
568 static inline void page_lock(PageDesc *pd)
569 { }
570 
571 static inline void page_unlock(PageDesc *pd)
572 { }
573 
574 static inline void page_lock_tb(const TranslationBlock *tb)
575 { }
576 
577 static inline void page_unlock_tb(const TranslationBlock *tb)
578 { }
579 
580 struct page_collection *
581 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
582 {
583     return NULL;
584 }
585 
586 void page_collection_unlock(struct page_collection *set)
587 { }
588 #else /* !CONFIG_USER_ONLY */
589 
590 #ifdef CONFIG_DEBUG_TCG
591 
592 static __thread GHashTable *ht_pages_locked_debug;
593 
594 static void ht_pages_locked_debug_init(void)
595 {
596     if (ht_pages_locked_debug) {
597         return;
598     }
599     ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
600 }
601 
602 static bool page_is_locked(const PageDesc *pd)
603 {
604     PageDesc *found;
605 
606     ht_pages_locked_debug_init();
607     found = g_hash_table_lookup(ht_pages_locked_debug, pd);
608     return !!found;
609 }
610 
611 static void page_lock__debug(PageDesc *pd)
612 {
613     ht_pages_locked_debug_init();
614     g_assert(!page_is_locked(pd));
615     g_hash_table_insert(ht_pages_locked_debug, pd, pd);
616 }
617 
618 static void page_unlock__debug(const PageDesc *pd)
619 {
620     bool removed;
621 
622     ht_pages_locked_debug_init();
623     g_assert(page_is_locked(pd));
624     removed = g_hash_table_remove(ht_pages_locked_debug, pd);
625     g_assert(removed);
626 }
627 
628 static void
629 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
630 {
631     if (unlikely(!page_is_locked(pd))) {
632         error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
633                      pd, file, line);
634         abort();
635     }
636 }
637 
638 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
639 
640 void assert_no_pages_locked(void)
641 {
642     ht_pages_locked_debug_init();
643     g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
644 }
645 
646 #else /* !CONFIG_DEBUG_TCG */
647 
648 #define assert_page_locked(pd)
649 
650 static inline void page_lock__debug(const PageDesc *pd)
651 {
652 }
653 
654 static inline void page_unlock__debug(const PageDesc *pd)
655 {
656 }
657 
658 #endif /* CONFIG_DEBUG_TCG */
659 
660 static inline void page_lock(PageDesc *pd)
661 {
662     page_lock__debug(pd);
663     qemu_spin_lock(&pd->lock);
664 }
665 
666 static inline void page_unlock(PageDesc *pd)
667 {
668     qemu_spin_unlock(&pd->lock);
669     page_unlock__debug(pd);
670 }
671 
672 /* lock the page(s) of a TB in the correct acquisition order */
673 static inline void page_lock_tb(const TranslationBlock *tb)
674 {
675     page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
676 }
677 
678 static inline void page_unlock_tb(const TranslationBlock *tb)
679 {
680     PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
681 
682     page_unlock(p1);
683     if (unlikely(tb->page_addr[1] != -1)) {
684         PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
685 
686         if (p2 != p1) {
687             page_unlock(p2);
688         }
689     }
690 }
691 
692 static inline struct page_entry *
693 page_entry_new(PageDesc *pd, tb_page_addr_t index)
694 {
695     struct page_entry *pe = g_malloc(sizeof(*pe));
696 
697     pe->index = index;
698     pe->pd = pd;
699     pe->locked = false;
700     return pe;
701 }
702 
703 static void page_entry_destroy(gpointer p)
704 {
705     struct page_entry *pe = p;
706 
707     g_assert(pe->locked);
708     page_unlock(pe->pd);
709     g_free(pe);
710 }
711 
712 /* returns false on success */
713 static bool page_entry_trylock(struct page_entry *pe)
714 {
715     bool busy;
716 
717     busy = qemu_spin_trylock(&pe->pd->lock);
718     if (!busy) {
719         g_assert(!pe->locked);
720         pe->locked = true;
721         page_lock__debug(pe->pd);
722     }
723     return busy;
724 }
725 
726 static void do_page_entry_lock(struct page_entry *pe)
727 {
728     page_lock(pe->pd);
729     g_assert(!pe->locked);
730     pe->locked = true;
731 }
732 
733 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
734 {
735     struct page_entry *pe = value;
736 
737     do_page_entry_lock(pe);
738     return FALSE;
739 }
740 
741 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
742 {
743     struct page_entry *pe = value;
744 
745     if (pe->locked) {
746         pe->locked = false;
747         page_unlock(pe->pd);
748     }
749     return FALSE;
750 }
751 
752 /*
753  * Trylock a page, and if successful, add the page to a collection.
754  * Returns true ("busy") if the page could not be locked; false otherwise.
755  */
756 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
757 {
758     tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
759     struct page_entry *pe;
760     PageDesc *pd;
761 
762     pe = g_tree_lookup(set->tree, &index);
763     if (pe) {
764         return false;
765     }
766 
767     pd = page_find(index);
768     if (pd == NULL) {
769         return false;
770     }
771 
772     pe = page_entry_new(pd, index);
773     g_tree_insert(set->tree, &pe->index, pe);
774 
775     /*
776      * If this is either (1) the first insertion or (2) a page whose index
777      * is higher than any other so far, just lock the page and move on.
778      */
779     if (set->max == NULL || pe->index > set->max->index) {
780         set->max = pe;
781         do_page_entry_lock(pe);
782         return false;
783     }
784     /*
785      * Try to acquire out-of-order lock; if busy, return busy so that we acquire
786      * locks in order.
787      */
788     return page_entry_trylock(pe);
789 }
790 
791 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
792 {
793     tb_page_addr_t a = *(const tb_page_addr_t *)ap;
794     tb_page_addr_t b = *(const tb_page_addr_t *)bp;
795 
796     if (a == b) {
797         return 0;
798     } else if (a < b) {
799         return -1;
800     }
801     return 1;
802 }
803 
804 /*
805  * Lock a range of pages ([@start,@end[) as well as the pages of all
806  * intersecting TBs.
807  * Locking order: acquire locks in ascending order of page index.
808  */
809 struct page_collection *
810 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
811 {
812     struct page_collection *set = g_malloc(sizeof(*set));
813     tb_page_addr_t index;
814     PageDesc *pd;
815 
816     start >>= TARGET_PAGE_BITS;
817     end   >>= TARGET_PAGE_BITS;
818     g_assert(start <= end);
819 
820     set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
821                                 page_entry_destroy);
822     set->max = NULL;
823     assert_no_pages_locked();
824 
825  retry:
826     g_tree_foreach(set->tree, page_entry_lock, NULL);
827 
828     for (index = start; index <= end; index++) {
829         TranslationBlock *tb;
830         int n;
831 
832         pd = page_find(index);
833         if (pd == NULL) {
834             continue;
835         }
836         if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
837             g_tree_foreach(set->tree, page_entry_unlock, NULL);
838             goto retry;
839         }
840         assert_page_locked(pd);
841         PAGE_FOR_EACH_TB(pd, tb, n) {
842             if (page_trylock_add(set, tb->page_addr[0]) ||
843                 (tb->page_addr[1] != -1 &&
844                  page_trylock_add(set, tb->page_addr[1]))) {
845                 /* drop all locks, and reacquire in order */
846                 g_tree_foreach(set->tree, page_entry_unlock, NULL);
847                 goto retry;
848             }
849         }
850     }
851     return set;
852 }
853 
854 void page_collection_unlock(struct page_collection *set)
855 {
856     /* entries are unlocked and freed via page_entry_destroy */
857     g_tree_destroy(set->tree);
858     g_free(set);
859 }
860 
861 #endif /* !CONFIG_USER_ONLY */
862 
863 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
864                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
865 {
866     PageDesc *p1, *p2;
867     tb_page_addr_t page1;
868     tb_page_addr_t page2;
869 
870     assert_memory_lock();
871     g_assert(phys1 != -1);
872 
873     page1 = phys1 >> TARGET_PAGE_BITS;
874     page2 = phys2 >> TARGET_PAGE_BITS;
875 
876     p1 = page_find_alloc(page1, alloc);
877     if (ret_p1) {
878         *ret_p1 = p1;
879     }
880     if (likely(phys2 == -1)) {
881         page_lock(p1);
882         return;
883     } else if (page1 == page2) {
884         page_lock(p1);
885         if (ret_p2) {
886             *ret_p2 = p1;
887         }
888         return;
889     }
890     p2 = page_find_alloc(page2, alloc);
891     if (ret_p2) {
892         *ret_p2 = p2;
893     }
894     if (page1 < page2) {
895         page_lock(p1);
896         page_lock(p2);
897     } else {
898         page_lock(p2);
899         page_lock(p1);
900     }
901 }
902 
903 /* Minimum size of the code gen buffer.  This number is randomly chosen,
904    but not so small that we can't have a fair number of TB's live.  */
905 #define MIN_CODE_GEN_BUFFER_SIZE     (1 * MiB)
906 
907 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
908    indicated, this is constrained by the range of direct branches on the
909    host cpu, as used by the TCG implementation of goto_tb.  */
910 #if defined(__x86_64__)
911 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
912 #elif defined(__sparc__)
913 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
914 #elif defined(__powerpc64__)
915 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
916 #elif defined(__powerpc__)
917 # define MAX_CODE_GEN_BUFFER_SIZE  (32 * MiB)
918 #elif defined(__aarch64__)
919 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
920 #elif defined(__s390x__)
921   /* We have a +- 4GB range on the branches; leave some slop.  */
922 # define MAX_CODE_GEN_BUFFER_SIZE  (3 * GiB)
923 #elif defined(__mips__)
924   /* We have a 256MB branch region, but leave room to make sure the
925      main executable is also within that region.  */
926 # define MAX_CODE_GEN_BUFFER_SIZE  (128 * MiB)
927 #else
928 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
929 #endif
930 
931 #if TCG_TARGET_REG_BITS == 32
932 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
933 #ifdef CONFIG_USER_ONLY
934 /*
935  * For user mode on smaller 32 bit systems we may run into trouble
936  * allocating big chunks of data in the right place. On these systems
937  * we utilise a static code generation buffer directly in the binary.
938  */
939 #define USE_STATIC_CODE_GEN_BUFFER
940 #endif
941 #else /* TCG_TARGET_REG_BITS == 64 */
942 #ifdef CONFIG_USER_ONLY
943 /*
944  * As user-mode emulation typically means running multiple instances
945  * of the translator don't go too nuts with our default code gen
946  * buffer lest we make things too hard for the OS.
947  */
948 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
949 #else
950 /*
951  * We expect most system emulation to run one or two guests per host.
952  * Users running large scale system emulation may want to tweak their
953  * runtime setup via the tb-size control on the command line.
954  */
955 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
956 #endif
957 #endif
958 
959 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
960   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
961    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
962 
963 static size_t size_code_gen_buffer(size_t tb_size)
964 {
965     /* Size the buffer.  */
966     if (tb_size == 0) {
967         size_t phys_mem = qemu_get_host_physmem();
968         if (phys_mem == 0) {
969             tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
970         } else {
971             tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
972         }
973     }
974     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
975         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
976     }
977     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
978         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
979     }
980     return tb_size;
981 }
982 
983 #ifdef __mips__
984 /* In order to use J and JAL within the code_gen_buffer, we require
985    that the buffer not cross a 256MB boundary.  */
986 static inline bool cross_256mb(void *addr, size_t size)
987 {
988     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
989 }
990 
991 /* We weren't able to allocate a buffer without crossing that boundary,
992    so make do with the larger portion of the buffer that doesn't cross.
993    Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
994 static inline void *split_cross_256mb(void *buf1, size_t size1)
995 {
996     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
997     size_t size2 = buf1 + size1 - buf2;
998 
999     size1 = buf2 - buf1;
1000     if (size1 < size2) {
1001         size1 = size2;
1002         buf1 = buf2;
1003     }
1004 
1005     tcg_ctx->code_gen_buffer_size = size1;
1006     return buf1;
1007 }
1008 #endif
1009 
1010 #ifdef USE_STATIC_CODE_GEN_BUFFER
1011 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
1012     __attribute__((aligned(CODE_GEN_ALIGN)));
1013 
1014 static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
1015 {
1016     void *buf, *end;
1017     size_t size;
1018 
1019     if (splitwx > 0) {
1020         error_setg(errp, "jit split-wx not supported");
1021         return false;
1022     }
1023 
1024     /* page-align the beginning and end of the buffer */
1025     buf = static_code_gen_buffer;
1026     end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
1027     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
1028     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1029 
1030     size = end - buf;
1031 
1032     /* Honor a command-line option limiting the size of the buffer.  */
1033     if (size > tb_size) {
1034         size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
1035     }
1036     tcg_ctx->code_gen_buffer_size = size;
1037 
1038 #ifdef __mips__
1039     if (cross_256mb(buf, size)) {
1040         buf = split_cross_256mb(buf, size);
1041         size = tcg_ctx->code_gen_buffer_size;
1042     }
1043 #endif
1044 
1045     if (qemu_mprotect_rwx(buf, size)) {
1046         error_setg_errno(errp, errno, "mprotect of jit buffer");
1047         return false;
1048     }
1049     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1050 
1051     tcg_ctx->code_gen_buffer = buf;
1052     return true;
1053 }
1054 #elif defined(_WIN32)
1055 static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
1056 {
1057     void *buf;
1058 
1059     if (splitwx > 0) {
1060         error_setg(errp, "jit split-wx not supported");
1061         return false;
1062     }
1063 
1064     buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1065                              PAGE_EXECUTE_READWRITE);
1066     if (buf == NULL) {
1067         error_setg_win32(errp, GetLastError(),
1068                          "allocate %zu bytes for jit buffer", size);
1069         return false;
1070     }
1071 
1072     tcg_ctx->code_gen_buffer = buf;
1073     tcg_ctx->code_gen_buffer_size = size;
1074     return true;
1075 }
1076 #else
1077 static bool alloc_code_gen_buffer_anon(size_t size, int prot,
1078                                        int flags, Error **errp)
1079 {
1080     void *buf;
1081 
1082     buf = mmap(NULL, size, prot, flags, -1, 0);
1083     if (buf == MAP_FAILED) {
1084         error_setg_errno(errp, errno,
1085                          "allocate %zu bytes for jit buffer", size);
1086         return false;
1087     }
1088     tcg_ctx->code_gen_buffer_size = size;
1089 
1090 #ifdef __mips__
1091     if (cross_256mb(buf, size)) {
1092         /*
1093          * Try again, with the original still mapped, to avoid re-acquiring
1094          * the same 256mb crossing.
1095          */
1096         size_t size2;
1097         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1098         switch ((int)(buf2 != MAP_FAILED)) {
1099         case 1:
1100             if (!cross_256mb(buf2, size)) {
1101                 /* Success!  Use the new buffer.  */
1102                 munmap(buf, size);
1103                 break;
1104             }
1105             /* Failure.  Work with what we had.  */
1106             munmap(buf2, size);
1107             /* fallthru */
1108         default:
1109             /* Split the original buffer.  Free the smaller half.  */
1110             buf2 = split_cross_256mb(buf, size);
1111             size2 = tcg_ctx->code_gen_buffer_size;
1112             if (buf == buf2) {
1113                 munmap(buf + size2, size - size2);
1114             } else {
1115                 munmap(buf, size - size2);
1116             }
1117             size = size2;
1118             break;
1119         }
1120         buf = buf2;
1121     }
1122 #endif
1123 
1124     /* Request large pages for the buffer.  */
1125     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1126 
1127     tcg_ctx->code_gen_buffer = buf;
1128     return true;
1129 }
1130 
1131 #ifndef CONFIG_TCG_INTERPRETER
1132 #ifdef CONFIG_POSIX
1133 #include "qemu/memfd.h"
1134 
1135 static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
1136 {
1137     void *buf_rw = NULL, *buf_rx = MAP_FAILED;
1138     int fd = -1;
1139 
1140 #ifdef __mips__
1141     /* Find space for the RX mapping, vs the 256MiB regions. */
1142     if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
1143                                     MAP_PRIVATE | MAP_ANONYMOUS |
1144                                     MAP_NORESERVE, errp)) {
1145         return false;
1146     }
1147     /* The size of the mapping may have been adjusted. */
1148     size = tcg_ctx->code_gen_buffer_size;
1149     buf_rx = tcg_ctx->code_gen_buffer;
1150 #endif
1151 
1152     buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
1153     if (buf_rw == NULL) {
1154         goto fail;
1155     }
1156 
1157 #ifdef __mips__
1158     void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
1159                      MAP_SHARED | MAP_FIXED, fd, 0);
1160     if (tmp != buf_rx) {
1161         goto fail_rx;
1162     }
1163 #else
1164     buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
1165     if (buf_rx == MAP_FAILED) {
1166         goto fail_rx;
1167     }
1168 #endif
1169 
1170     close(fd);
1171     tcg_ctx->code_gen_buffer = buf_rw;
1172     tcg_ctx->code_gen_buffer_size = size;
1173     tcg_splitwx_diff = buf_rx - buf_rw;
1174 
1175     /* Request large pages for the buffer and the splitwx.  */
1176     qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
1177     qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
1178     return true;
1179 
1180  fail_rx:
1181     error_setg_errno(errp, errno, "failed to map shared memory for execute");
1182  fail:
1183     if (buf_rx != MAP_FAILED) {
1184         munmap(buf_rx, size);
1185     }
1186     if (buf_rw) {
1187         munmap(buf_rw, size);
1188     }
1189     if (fd >= 0) {
1190         close(fd);
1191     }
1192     return false;
1193 }
1194 #endif /* CONFIG_POSIX */
1195 
1196 #ifdef CONFIG_DARWIN
1197 #include <mach/mach.h>
1198 
1199 extern kern_return_t mach_vm_remap(vm_map_t target_task,
1200                                    mach_vm_address_t *target_address,
1201                                    mach_vm_size_t size,
1202                                    mach_vm_offset_t mask,
1203                                    int flags,
1204                                    vm_map_t src_task,
1205                                    mach_vm_address_t src_address,
1206                                    boolean_t copy,
1207                                    vm_prot_t *cur_protection,
1208                                    vm_prot_t *max_protection,
1209                                    vm_inherit_t inheritance);
1210 
1211 static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
1212 {
1213     kern_return_t ret;
1214     mach_vm_address_t buf_rw, buf_rx;
1215     vm_prot_t cur_prot, max_prot;
1216 
1217     /* Map the read-write portion via normal anon memory. */
1218     if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
1219                                     MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
1220         return false;
1221     }
1222 
1223     buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
1224     buf_rx = 0;
1225     ret = mach_vm_remap(mach_task_self(),
1226                         &buf_rx,
1227                         size,
1228                         0,
1229                         VM_FLAGS_ANYWHERE,
1230                         mach_task_self(),
1231                         buf_rw,
1232                         false,
1233                         &cur_prot,
1234                         &max_prot,
1235                         VM_INHERIT_NONE);
1236     if (ret != KERN_SUCCESS) {
1237         /* TODO: Convert "ret" to a human readable error message. */
1238         error_setg(errp, "vm_remap for jit splitwx failed");
1239         munmap((void *)buf_rw, size);
1240         return false;
1241     }
1242 
1243     if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
1244         error_setg_errno(errp, errno, "mprotect for jit splitwx");
1245         munmap((void *)buf_rx, size);
1246         munmap((void *)buf_rw, size);
1247         return false;
1248     }
1249 
1250     tcg_splitwx_diff = buf_rx - buf_rw;
1251     return true;
1252 }
1253 #endif /* CONFIG_DARWIN */
1254 #endif /* CONFIG_TCG_INTERPRETER */
1255 
1256 static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
1257 {
1258 #ifndef CONFIG_TCG_INTERPRETER
1259 # ifdef CONFIG_DARWIN
1260     return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
1261 # endif
1262 # ifdef CONFIG_POSIX
1263     return alloc_code_gen_buffer_splitwx_memfd(size, errp);
1264 # endif
1265 #endif
1266     error_setg(errp, "jit split-wx not supported");
1267     return false;
1268 }
1269 
1270 static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
1271 {
1272     ERRP_GUARD();
1273     int prot, flags;
1274 
1275     if (splitwx) {
1276         if (alloc_code_gen_buffer_splitwx(size, errp)) {
1277             return true;
1278         }
1279         /*
1280          * If splitwx force-on (1), fail;
1281          * if splitwx default-on (-1), fall through to splitwx off.
1282          */
1283         if (splitwx > 0) {
1284             return false;
1285         }
1286         error_free_or_abort(errp);
1287     }
1288 
1289     prot = PROT_READ | PROT_WRITE | PROT_EXEC;
1290     flags = MAP_PRIVATE | MAP_ANONYMOUS;
1291 #ifdef CONFIG_TCG_INTERPRETER
1292     /* The tcg interpreter does not need execute permission. */
1293     prot = PROT_READ | PROT_WRITE;
1294 #elif defined(CONFIG_DARWIN)
1295     /* Applicable to both iOS and macOS (Apple Silicon). */
1296     if (!splitwx) {
1297         flags |= MAP_JIT;
1298     }
1299 #endif
1300 
1301     return alloc_code_gen_buffer_anon(size, prot, flags, errp);
1302 }
1303 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1304 
1305 static bool tb_cmp(const void *ap, const void *bp)
1306 {
1307     const TranslationBlock *a = ap;
1308     const TranslationBlock *b = bp;
1309 
1310     return a->pc == b->pc &&
1311         a->cs_base == b->cs_base &&
1312         a->flags == b->flags &&
1313         (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
1314         a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1315         a->page_addr[0] == b->page_addr[0] &&
1316         a->page_addr[1] == b->page_addr[1];
1317 }
1318 
1319 static void tb_htable_init(void)
1320 {
1321     unsigned int mode = QHT_MODE_AUTO_RESIZE;
1322 
1323     qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1324 }
1325 
1326 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1327    (in bytes) allocated to the translation buffer. Zero means default
1328    size. */
1329 void tcg_exec_init(unsigned long tb_size, int splitwx)
1330 {
1331     bool ok;
1332 
1333     tcg_allowed = true;
1334     cpu_gen_init();
1335     page_init();
1336     tb_htable_init();
1337 
1338     ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
1339                                splitwx, &error_fatal);
1340     assert(ok);
1341 
1342 #if defined(CONFIG_SOFTMMU)
1343     /* There's no guest base to take into account, so go ahead and
1344        initialize the prologue now.  */
1345     tcg_prologue_init(tcg_ctx);
1346 #endif
1347 }
1348 
1349 /* call with @p->lock held */
1350 static inline void invalidate_page_bitmap(PageDesc *p)
1351 {
1352     assert_page_locked(p);
1353 #ifdef CONFIG_SOFTMMU
1354     g_free(p->code_bitmap);
1355     p->code_bitmap = NULL;
1356     p->code_write_count = 0;
1357 #endif
1358 }
1359 
1360 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1361 static void page_flush_tb_1(int level, void **lp)
1362 {
1363     int i;
1364 
1365     if (*lp == NULL) {
1366         return;
1367     }
1368     if (level == 0) {
1369         PageDesc *pd = *lp;
1370 
1371         for (i = 0; i < V_L2_SIZE; ++i) {
1372             page_lock(&pd[i]);
1373             pd[i].first_tb = (uintptr_t)NULL;
1374             invalidate_page_bitmap(pd + i);
1375             page_unlock(&pd[i]);
1376         }
1377     } else {
1378         void **pp = *lp;
1379 
1380         for (i = 0; i < V_L2_SIZE; ++i) {
1381             page_flush_tb_1(level - 1, pp + i);
1382         }
1383     }
1384 }
1385 
1386 static void page_flush_tb(void)
1387 {
1388     int i, l1_sz = v_l1_size;
1389 
1390     for (i = 0; i < l1_sz; i++) {
1391         page_flush_tb_1(v_l2_levels, l1_map + i);
1392     }
1393 }
1394 
1395 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1396 {
1397     const TranslationBlock *tb = value;
1398     size_t *size = data;
1399 
1400     *size += tb->tc.size;
1401     return false;
1402 }
1403 
1404 /* flush all the translation blocks */
1405 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1406 {
1407     bool did_flush = false;
1408 
1409     mmap_lock();
1410     /* If it is already been done on request of another CPU,
1411      * just retry.
1412      */
1413     if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1414         goto done;
1415     }
1416     did_flush = true;
1417 
1418     if (DEBUG_TB_FLUSH_GATE) {
1419         size_t nb_tbs = tcg_nb_tbs();
1420         size_t host_size = 0;
1421 
1422         tcg_tb_foreach(tb_host_size_iter, &host_size);
1423         printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1424                tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1425     }
1426 
1427     CPU_FOREACH(cpu) {
1428         cpu_tb_jmp_cache_clear(cpu);
1429     }
1430 
1431     qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1432     page_flush_tb();
1433 
1434     tcg_region_reset_all();
1435     /* XXX: flush processor icache at this point if cache flush is
1436        expensive */
1437     qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1438 
1439 done:
1440     mmap_unlock();
1441     if (did_flush) {
1442         qemu_plugin_flush_cb();
1443     }
1444 }
1445 
1446 void tb_flush(CPUState *cpu)
1447 {
1448     if (tcg_enabled()) {
1449         unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
1450 
1451         if (cpu_in_exclusive_context(cpu)) {
1452             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1453         } else {
1454             async_safe_run_on_cpu(cpu, do_tb_flush,
1455                                   RUN_ON_CPU_HOST_INT(tb_flush_count));
1456         }
1457     }
1458 }
1459 
1460 /*
1461  * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1462  * so in order to prevent bit rot we compile them unconditionally in user-mode,
1463  * and let the optimizer get rid of them by wrapping their user-only callers
1464  * with if (DEBUG_TB_CHECK_GATE).
1465  */
1466 #ifdef CONFIG_USER_ONLY
1467 
1468 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1469 {
1470     TranslationBlock *tb = p;
1471     target_ulong addr = *(target_ulong *)userp;
1472 
1473     if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1474         printf("ERROR invalidate: address=" TARGET_FMT_lx
1475                " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1476     }
1477 }
1478 
1479 /* verify that all the pages have correct rights for code
1480  *
1481  * Called with mmap_lock held.
1482  */
1483 static void tb_invalidate_check(target_ulong address)
1484 {
1485     address &= TARGET_PAGE_MASK;
1486     qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1487 }
1488 
1489 static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1490 {
1491     TranslationBlock *tb = p;
1492     int flags1, flags2;
1493 
1494     flags1 = page_get_flags(tb->pc);
1495     flags2 = page_get_flags(tb->pc + tb->size - 1);
1496     if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1497         printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1498                (long)tb->pc, tb->size, flags1, flags2);
1499     }
1500 }
1501 
1502 /* verify that all the pages have correct rights for code */
1503 static void tb_page_check(void)
1504 {
1505     qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1506 }
1507 
1508 #endif /* CONFIG_USER_ONLY */
1509 
1510 /*
1511  * user-mode: call with mmap_lock held
1512  * !user-mode: call with @pd->lock held
1513  */
1514 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1515 {
1516     TranslationBlock *tb1;
1517     uintptr_t *pprev;
1518     unsigned int n1;
1519 
1520     assert_page_locked(pd);
1521     pprev = &pd->first_tb;
1522     PAGE_FOR_EACH_TB(pd, tb1, n1) {
1523         if (tb1 == tb) {
1524             *pprev = tb1->page_next[n1];
1525             return;
1526         }
1527         pprev = &tb1->page_next[n1];
1528     }
1529     g_assert_not_reached();
1530 }
1531 
1532 /* remove @orig from its @n_orig-th jump list */
1533 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1534 {
1535     uintptr_t ptr, ptr_locked;
1536     TranslationBlock *dest;
1537     TranslationBlock *tb;
1538     uintptr_t *pprev;
1539     int n;
1540 
1541     /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1542     ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1543     dest = (TranslationBlock *)(ptr & ~1);
1544     if (dest == NULL) {
1545         return;
1546     }
1547 
1548     qemu_spin_lock(&dest->jmp_lock);
1549     /*
1550      * While acquiring the lock, the jump might have been removed if the
1551      * destination TB was invalidated; check again.
1552      */
1553     ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
1554     if (ptr_locked != ptr) {
1555         qemu_spin_unlock(&dest->jmp_lock);
1556         /*
1557          * The only possibility is that the jump was unlinked via
1558          * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1559          * because we set the LSB above.
1560          */
1561         g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1562         return;
1563     }
1564     /*
1565      * We first acquired the lock, and since the destination pointer matches,
1566      * we know for sure that @orig is in the jmp list.
1567      */
1568     pprev = &dest->jmp_list_head;
1569     TB_FOR_EACH_JMP(dest, tb, n) {
1570         if (tb == orig && n == n_orig) {
1571             *pprev = tb->jmp_list_next[n];
1572             /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1573             qemu_spin_unlock(&dest->jmp_lock);
1574             return;
1575         }
1576         pprev = &tb->jmp_list_next[n];
1577     }
1578     g_assert_not_reached();
1579 }
1580 
1581 /* reset the jump entry 'n' of a TB so that it is not chained to
1582    another TB */
1583 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1584 {
1585     uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1586     tb_set_jmp_target(tb, n, addr);
1587 }
1588 
1589 /* remove any jumps to the TB */
1590 static inline void tb_jmp_unlink(TranslationBlock *dest)
1591 {
1592     TranslationBlock *tb;
1593     int n;
1594 
1595     qemu_spin_lock(&dest->jmp_lock);
1596 
1597     TB_FOR_EACH_JMP(dest, tb, n) {
1598         tb_reset_jump(tb, n);
1599         qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1600         /* No need to clear the list entry; setting the dest ptr is enough */
1601     }
1602     dest->jmp_list_head = (uintptr_t)NULL;
1603 
1604     qemu_spin_unlock(&dest->jmp_lock);
1605 }
1606 
1607 /*
1608  * In user-mode, call with mmap_lock held.
1609  * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1610  * locks held.
1611  */
1612 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1613 {
1614     CPUState *cpu;
1615     PageDesc *p;
1616     uint32_t h;
1617     tb_page_addr_t phys_pc;
1618     uint32_t orig_cflags = tb_cflags(tb);
1619 
1620     assert_memory_lock();
1621 
1622     /* make sure no further incoming jumps will be chained to this TB */
1623     qemu_spin_lock(&tb->jmp_lock);
1624     qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1625     qemu_spin_unlock(&tb->jmp_lock);
1626 
1627     /* remove the TB from the hash list */
1628     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1629     h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
1630                      tb->trace_vcpu_dstate);
1631     if (!qht_remove(&tb_ctx.htable, tb, h)) {
1632         return;
1633     }
1634 
1635     /* remove the TB from the page list */
1636     if (rm_from_page_list) {
1637         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1638         tb_page_remove(p, tb);
1639         invalidate_page_bitmap(p);
1640         if (tb->page_addr[1] != -1) {
1641             p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1642             tb_page_remove(p, tb);
1643             invalidate_page_bitmap(p);
1644         }
1645     }
1646 
1647     /* remove the TB from the hash list */
1648     h = tb_jmp_cache_hash_func(tb->pc);
1649     CPU_FOREACH(cpu) {
1650         if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1651             qatomic_set(&cpu->tb_jmp_cache[h], NULL);
1652         }
1653     }
1654 
1655     /* suppress this TB from the two jump lists */
1656     tb_remove_from_jmp_list(tb, 0);
1657     tb_remove_from_jmp_list(tb, 1);
1658 
1659     /* suppress any remaining jumps to this TB */
1660     tb_jmp_unlink(tb);
1661 
1662     qatomic_set(&tcg_ctx->tb_phys_invalidate_count,
1663                tcg_ctx->tb_phys_invalidate_count + 1);
1664 }
1665 
1666 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1667 {
1668     qemu_thread_jit_write();
1669     do_tb_phys_invalidate(tb, true);
1670     qemu_thread_jit_execute();
1671 }
1672 
1673 /* invalidate one TB
1674  *
1675  * Called with mmap_lock held in user-mode.
1676  */
1677 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1678 {
1679     if (page_addr == -1 && tb->page_addr[0] != -1) {
1680         page_lock_tb(tb);
1681         do_tb_phys_invalidate(tb, true);
1682         page_unlock_tb(tb);
1683     } else {
1684         do_tb_phys_invalidate(tb, false);
1685     }
1686 }
1687 
1688 #ifdef CONFIG_SOFTMMU
1689 /* call with @p->lock held */
1690 static void build_page_bitmap(PageDesc *p)
1691 {
1692     int n, tb_start, tb_end;
1693     TranslationBlock *tb;
1694 
1695     assert_page_locked(p);
1696     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1697 
1698     PAGE_FOR_EACH_TB(p, tb, n) {
1699         /* NOTE: this is subtle as a TB may span two physical pages */
1700         if (n == 0) {
1701             /* NOTE: tb_end may be after the end of the page, but
1702                it is not a problem */
1703             tb_start = tb->pc & ~TARGET_PAGE_MASK;
1704             tb_end = tb_start + tb->size;
1705             if (tb_end > TARGET_PAGE_SIZE) {
1706                 tb_end = TARGET_PAGE_SIZE;
1707              }
1708         } else {
1709             tb_start = 0;
1710             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1711         }
1712         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1713     }
1714 }
1715 #endif
1716 
1717 /* add the tb in the target page and protect it if necessary
1718  *
1719  * Called with mmap_lock held for user-mode emulation.
1720  * Called with @p->lock held in !user-mode.
1721  */
1722 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1723                                unsigned int n, tb_page_addr_t page_addr)
1724 {
1725 #ifndef CONFIG_USER_ONLY
1726     bool page_already_protected;
1727 #endif
1728 
1729     assert_page_locked(p);
1730 
1731     tb->page_addr[n] = page_addr;
1732     tb->page_next[n] = p->first_tb;
1733 #ifndef CONFIG_USER_ONLY
1734     page_already_protected = p->first_tb != (uintptr_t)NULL;
1735 #endif
1736     p->first_tb = (uintptr_t)tb | n;
1737     invalidate_page_bitmap(p);
1738 
1739 #if defined(CONFIG_USER_ONLY)
1740     if (p->flags & PAGE_WRITE) {
1741         target_ulong addr;
1742         PageDesc *p2;
1743         int prot;
1744 
1745         /* force the host page as non writable (writes will have a
1746            page fault + mprotect overhead) */
1747         page_addr &= qemu_host_page_mask;
1748         prot = 0;
1749         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1750             addr += TARGET_PAGE_SIZE) {
1751 
1752             p2 = page_find(addr >> TARGET_PAGE_BITS);
1753             if (!p2) {
1754                 continue;
1755             }
1756             prot |= p2->flags;
1757             p2->flags &= ~PAGE_WRITE;
1758           }
1759         mprotect(g2h_untagged(page_addr), qemu_host_page_size,
1760                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1761         if (DEBUG_TB_INVALIDATE_GATE) {
1762             printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1763         }
1764     }
1765 #else
1766     /* if some code is already present, then the pages are already
1767        protected. So we handle the case where only the first TB is
1768        allocated in a physical page */
1769     if (!page_already_protected) {
1770         tlb_protect_code(page_addr);
1771     }
1772 #endif
1773 }
1774 
1775 /*
1776  * Add a new TB and link it to the physical page tables. phys_page2 is
1777  * (-1) to indicate that only one page contains the TB.
1778  *
1779  * Called with mmap_lock held for user-mode emulation.
1780  *
1781  * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1782  * Note that in !user-mode, another thread might have already added a TB
1783  * for the same block of guest code that @tb corresponds to. In that case,
1784  * the caller should discard the original @tb, and use instead the returned TB.
1785  */
1786 static TranslationBlock *
1787 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1788              tb_page_addr_t phys_page2)
1789 {
1790     PageDesc *p;
1791     PageDesc *p2 = NULL;
1792     void *existing_tb = NULL;
1793     uint32_t h;
1794 
1795     assert_memory_lock();
1796     tcg_debug_assert(!(tb->cflags & CF_INVALID));
1797 
1798     /*
1799      * Add the TB to the page list, acquiring first the pages's locks.
1800      * We keep the locks held until after inserting the TB in the hash table,
1801      * so that if the insertion fails we know for sure that the TBs are still
1802      * in the page descriptors.
1803      * Note that inserting into the hash table first isn't an option, since
1804      * we can only insert TBs that are fully initialized.
1805      */
1806     page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1807     tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1808     if (p2) {
1809         tb_page_add(p2, tb, 1, phys_page2);
1810     } else {
1811         tb->page_addr[1] = -1;
1812     }
1813 
1814     /* add in the hash table */
1815     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
1816                      tb->trace_vcpu_dstate);
1817     qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1818 
1819     /* remove TB from the page(s) if we couldn't insert it */
1820     if (unlikely(existing_tb)) {
1821         tb_page_remove(p, tb);
1822         invalidate_page_bitmap(p);
1823         if (p2) {
1824             tb_page_remove(p2, tb);
1825             invalidate_page_bitmap(p2);
1826         }
1827         tb = existing_tb;
1828     }
1829 
1830     if (p2 && p2 != p) {
1831         page_unlock(p2);
1832     }
1833     page_unlock(p);
1834 
1835 #ifdef CONFIG_USER_ONLY
1836     if (DEBUG_TB_CHECK_GATE) {
1837         tb_page_check();
1838     }
1839 #endif
1840     return tb;
1841 }
1842 
1843 /* Called with mmap_lock held for user mode emulation.  */
1844 TranslationBlock *tb_gen_code(CPUState *cpu,
1845                               target_ulong pc, target_ulong cs_base,
1846                               uint32_t flags, int cflags)
1847 {
1848     CPUArchState *env = cpu->env_ptr;
1849     TranslationBlock *tb, *existing_tb;
1850     tb_page_addr_t phys_pc, phys_page2;
1851     target_ulong virt_page2;
1852     tcg_insn_unit *gen_code_buf;
1853     int gen_code_size, search_size, max_insns;
1854 #ifdef CONFIG_PROFILER
1855     TCGProfile *prof = &tcg_ctx->prof;
1856     int64_t ti;
1857 #endif
1858 
1859     assert_memory_lock();
1860     qemu_thread_jit_write();
1861 
1862     phys_pc = get_page_addr_code(env, pc);
1863 
1864     if (phys_pc == -1) {
1865         /* Generate a one-shot TB with 1 insn in it */
1866         cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1;
1867     }
1868 
1869     max_insns = cflags & CF_COUNT_MASK;
1870     if (max_insns == 0) {
1871         max_insns = CF_COUNT_MASK;
1872     }
1873     if (max_insns > TCG_MAX_INSNS) {
1874         max_insns = TCG_MAX_INSNS;
1875     }
1876     if (cpu->singlestep_enabled || singlestep) {
1877         max_insns = 1;
1878     }
1879 
1880  buffer_overflow:
1881     tb = tcg_tb_alloc(tcg_ctx);
1882     if (unlikely(!tb)) {
1883         /* flush must be done */
1884         tb_flush(cpu);
1885         mmap_unlock();
1886         /* Make the execution loop process the flush as soon as possible.  */
1887         cpu->exception_index = EXCP_INTERRUPT;
1888         cpu_loop_exit(cpu);
1889     }
1890 
1891     gen_code_buf = tcg_ctx->code_gen_ptr;
1892     tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
1893     tb->pc = pc;
1894     tb->cs_base = cs_base;
1895     tb->flags = flags;
1896     tb->cflags = cflags;
1897     tb->trace_vcpu_dstate = *cpu->trace_dstate;
1898     tcg_ctx->tb_cflags = cflags;
1899  tb_overflow:
1900 
1901 #ifdef CONFIG_PROFILER
1902     /* includes aborted translations because of exceptions */
1903     qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1904     ti = profile_getclock();
1905 #endif
1906 
1907     gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0);
1908     if (unlikely(gen_code_size != 0)) {
1909         goto error_return;
1910     }
1911 
1912     tcg_func_start(tcg_ctx);
1913 
1914     tcg_ctx->cpu = env_cpu(env);
1915     gen_intermediate_code(cpu, tb, max_insns);
1916     tcg_ctx->cpu = NULL;
1917     max_insns = tb->icount;
1918 
1919     trace_translate_block(tb, tb->pc, tb->tc.ptr);
1920 
1921     /* generate machine code */
1922     tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1923     tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1924     tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1925     if (TCG_TARGET_HAS_direct_jump) {
1926         tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1927         tcg_ctx->tb_jmp_target_addr = NULL;
1928     } else {
1929         tcg_ctx->tb_jmp_insn_offset = NULL;
1930         tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1931     }
1932 
1933 #ifdef CONFIG_PROFILER
1934     qatomic_set(&prof->tb_count, prof->tb_count + 1);
1935     qatomic_set(&prof->interm_time,
1936                 prof->interm_time + profile_getclock() - ti);
1937     ti = profile_getclock();
1938 #endif
1939 
1940     gen_code_size = tcg_gen_code(tcg_ctx, tb);
1941     if (unlikely(gen_code_size < 0)) {
1942  error_return:
1943         switch (gen_code_size) {
1944         case -1:
1945             /*
1946              * Overflow of code_gen_buffer, or the current slice of it.
1947              *
1948              * TODO: We don't need to re-do gen_intermediate_code, nor
1949              * should we re-do the tcg optimization currently hidden
1950              * inside tcg_gen_code.  All that should be required is to
1951              * flush the TBs, allocate a new TB, re-initialize it per
1952              * above, and re-do the actual code generation.
1953              */
1954             qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1955                           "Restarting code generation for "
1956                           "code_gen_buffer overflow\n");
1957             goto buffer_overflow;
1958 
1959         case -2:
1960             /*
1961              * The code generated for the TranslationBlock is too large.
1962              * The maximum size allowed by the unwind info is 64k.
1963              * There may be stricter constraints from relocations
1964              * in the tcg backend.
1965              *
1966              * Try again with half as many insns as we attempted this time.
1967              * If a single insn overflows, there's a bug somewhere...
1968              */
1969             assert(max_insns > 1);
1970             max_insns /= 2;
1971             qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1972                           "Restarting code generation with "
1973                           "smaller translation block (max %d insns)\n",
1974                           max_insns);
1975             goto tb_overflow;
1976 
1977         default:
1978             g_assert_not_reached();
1979         }
1980     }
1981     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1982     if (unlikely(search_size < 0)) {
1983         goto buffer_overflow;
1984     }
1985     tb->tc.size = gen_code_size;
1986 
1987 #ifdef CONFIG_PROFILER
1988     qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1989     qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1990     qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1991     qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1992 #endif
1993 
1994 #ifdef DEBUG_DISAS
1995     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1996         qemu_log_in_addr_range(tb->pc)) {
1997         FILE *logfile = qemu_log_lock();
1998         int code_size, data_size;
1999         const tcg_target_ulong *rx_data_gen_ptr;
2000         size_t chunk_start;
2001         int insn = 0;
2002 
2003         if (tcg_ctx->data_gen_ptr) {
2004             rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
2005             code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
2006             data_size = gen_code_size - code_size;
2007         } else {
2008             rx_data_gen_ptr = 0;
2009             code_size = gen_code_size;
2010             data_size = 0;
2011         }
2012 
2013         /* Dump header and the first instruction */
2014         qemu_log("OUT: [size=%d]\n", gen_code_size);
2015         qemu_log("  -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
2016                  tcg_ctx->gen_insn_data[insn][0]);
2017         chunk_start = tcg_ctx->gen_insn_end_off[insn];
2018         log_disas(tb->tc.ptr, chunk_start);
2019 
2020         /*
2021          * Dump each instruction chunk, wrapping up empty chunks into
2022          * the next instruction. The whole array is offset so the
2023          * first entry is the beginning of the 2nd instruction.
2024          */
2025         while (insn < tb->icount) {
2026             size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
2027             if (chunk_end > chunk_start) {
2028                 qemu_log("  -- guest addr 0x" TARGET_FMT_lx "\n",
2029                          tcg_ctx->gen_insn_data[insn][0]);
2030                 log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start);
2031                 chunk_start = chunk_end;
2032             }
2033             insn++;
2034         }
2035 
2036         if (chunk_start < code_size) {
2037             qemu_log("  -- tb slow paths + alignment\n");
2038             log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start);
2039         }
2040 
2041         /* Finally dump any data we may have after the block */
2042         if (data_size) {
2043             int i;
2044             qemu_log("  data: [size=%d]\n", data_size);
2045             for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
2046                 qemu_log("0x%08" PRIxPTR ":  .quad  0x%" TCG_PRIlx "\n",
2047                          (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
2048             }
2049         }
2050         qemu_log("\n");
2051         qemu_log_flush();
2052         qemu_log_unlock(logfile);
2053     }
2054 #endif
2055 
2056     qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
2057         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
2058                  CODE_GEN_ALIGN));
2059 
2060     /* init jump list */
2061     qemu_spin_init(&tb->jmp_lock);
2062     tb->jmp_list_head = (uintptr_t)NULL;
2063     tb->jmp_list_next[0] = (uintptr_t)NULL;
2064     tb->jmp_list_next[1] = (uintptr_t)NULL;
2065     tb->jmp_dest[0] = (uintptr_t)NULL;
2066     tb->jmp_dest[1] = (uintptr_t)NULL;
2067 
2068     /* init original jump addresses which have been set during tcg_gen_code() */
2069     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2070         tb_reset_jump(tb, 0);
2071     }
2072     if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2073         tb_reset_jump(tb, 1);
2074     }
2075 
2076     /*
2077      * If the TB is not associated with a physical RAM page then
2078      * it must be a temporary one-insn TB, and we have nothing to do
2079      * except fill in the page_addr[] fields. Return early before
2080      * attempting to link to other TBs or add to the lookup table.
2081      */
2082     if (phys_pc == -1) {
2083         tb->page_addr[0] = tb->page_addr[1] = -1;
2084         return tb;
2085     }
2086 
2087     /* check next page if needed */
2088     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
2089     phys_page2 = -1;
2090     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
2091         phys_page2 = get_page_addr_code(env, virt_page2);
2092     }
2093     /*
2094      * No explicit memory barrier is required -- tb_link_page() makes the
2095      * TB visible in a consistent state.
2096      */
2097     existing_tb = tb_link_page(tb, phys_pc, phys_page2);
2098     /* if the TB already exists, discard what we just translated */
2099     if (unlikely(existing_tb != tb)) {
2100         uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
2101 
2102         orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
2103         qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
2104         tb_destroy(tb);
2105         return existing_tb;
2106     }
2107     tcg_tb_insert(tb);
2108     return tb;
2109 }
2110 
2111 /*
2112  * @p must be non-NULL.
2113  * user-mode: call with mmap_lock held.
2114  * !user-mode: call with all @pages locked.
2115  */
2116 static void
2117 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
2118                                       PageDesc *p, tb_page_addr_t start,
2119                                       tb_page_addr_t end,
2120                                       uintptr_t retaddr)
2121 {
2122     TranslationBlock *tb;
2123     tb_page_addr_t tb_start, tb_end;
2124     int n;
2125 #ifdef TARGET_HAS_PRECISE_SMC
2126     CPUState *cpu = current_cpu;
2127     CPUArchState *env = NULL;
2128     bool current_tb_not_found = retaddr != 0;
2129     bool current_tb_modified = false;
2130     TranslationBlock *current_tb = NULL;
2131     target_ulong current_pc = 0;
2132     target_ulong current_cs_base = 0;
2133     uint32_t current_flags = 0;
2134 #endif /* TARGET_HAS_PRECISE_SMC */
2135 
2136     assert_page_locked(p);
2137 
2138 #if defined(TARGET_HAS_PRECISE_SMC)
2139     if (cpu != NULL) {
2140         env = cpu->env_ptr;
2141     }
2142 #endif
2143 
2144     /* we remove all the TBs in the range [start, end[ */
2145     /* XXX: see if in some cases it could be faster to invalidate all
2146        the code */
2147     PAGE_FOR_EACH_TB(p, tb, n) {
2148         assert_page_locked(p);
2149         /* NOTE: this is subtle as a TB may span two physical pages */
2150         if (n == 0) {
2151             /* NOTE: tb_end may be after the end of the page, but
2152                it is not a problem */
2153             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
2154             tb_end = tb_start + tb->size;
2155         } else {
2156             tb_start = tb->page_addr[1];
2157             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
2158         }
2159         if (!(tb_end <= start || tb_start >= end)) {
2160 #ifdef TARGET_HAS_PRECISE_SMC
2161             if (current_tb_not_found) {
2162                 current_tb_not_found = false;
2163                 /* now we have a real cpu fault */
2164                 current_tb = tcg_tb_lookup(retaddr);
2165             }
2166             if (current_tb == tb &&
2167                 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2168                 /*
2169                  * If we are modifying the current TB, we must stop
2170                  * its execution. We could be more precise by checking
2171                  * that the modification is after the current PC, but it
2172                  * would require a specialized function to partially
2173                  * restore the CPU state.
2174                  */
2175                 current_tb_modified = true;
2176                 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
2177                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2178                                      &current_flags);
2179             }
2180 #endif /* TARGET_HAS_PRECISE_SMC */
2181             tb_phys_invalidate__locked(tb);
2182         }
2183     }
2184 #if !defined(CONFIG_USER_ONLY)
2185     /* if no code remaining, no need to continue to use slow writes */
2186     if (!p->first_tb) {
2187         invalidate_page_bitmap(p);
2188         tlb_unprotect_code(start);
2189     }
2190 #endif
2191 #ifdef TARGET_HAS_PRECISE_SMC
2192     if (current_tb_modified) {
2193         page_collection_unlock(pages);
2194         /* Force execution of one insn next time.  */
2195         cpu->cflags_next_tb = 1 | curr_cflags(cpu);
2196         mmap_unlock();
2197         cpu_loop_exit_noexc(cpu);
2198     }
2199 #endif
2200 }
2201 
2202 /*
2203  * Invalidate all TBs which intersect with the target physical address range
2204  * [start;end[. NOTE: start and end must refer to the *same* physical page.
2205  * 'is_cpu_write_access' should be true if called from a real cpu write
2206  * access: the virtual CPU will exit the current TB if code is modified inside
2207  * this TB.
2208  *
2209  * Called with mmap_lock held for user-mode emulation
2210  */
2211 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
2212 {
2213     struct page_collection *pages;
2214     PageDesc *p;
2215 
2216     assert_memory_lock();
2217 
2218     p = page_find(start >> TARGET_PAGE_BITS);
2219     if (p == NULL) {
2220         return;
2221     }
2222     pages = page_collection_lock(start, end);
2223     tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
2224     page_collection_unlock(pages);
2225 }
2226 
2227 /*
2228  * Invalidate all TBs which intersect with the target physical address range
2229  * [start;end[. NOTE: start and end may refer to *different* physical pages.
2230  * 'is_cpu_write_access' should be true if called from a real cpu write
2231  * access: the virtual CPU will exit the current TB if code is modified inside
2232  * this TB.
2233  *
2234  * Called with mmap_lock held for user-mode emulation.
2235  */
2236 #ifdef CONFIG_SOFTMMU
2237 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
2238 #else
2239 void tb_invalidate_phys_range(target_ulong start, target_ulong end)
2240 #endif
2241 {
2242     struct page_collection *pages;
2243     tb_page_addr_t next;
2244 
2245     assert_memory_lock();
2246 
2247     pages = page_collection_lock(start, end);
2248     for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2249          start < end;
2250          start = next, next += TARGET_PAGE_SIZE) {
2251         PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
2252         tb_page_addr_t bound = MIN(next, end);
2253 
2254         if (pd == NULL) {
2255             continue;
2256         }
2257         tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
2258     }
2259     page_collection_unlock(pages);
2260 }
2261 
2262 #ifdef CONFIG_SOFTMMU
2263 /* len must be <= 8 and start must be a multiple of len.
2264  * Called via softmmu_template.h when code areas are written to with
2265  * iothread mutex not held.
2266  *
2267  * Call with all @pages in the range [@start, @start + len[ locked.
2268  */
2269 void tb_invalidate_phys_page_fast(struct page_collection *pages,
2270                                   tb_page_addr_t start, int len,
2271                                   uintptr_t retaddr)
2272 {
2273     PageDesc *p;
2274 
2275     assert_memory_lock();
2276 
2277     p = page_find(start >> TARGET_PAGE_BITS);
2278     if (!p) {
2279         return;
2280     }
2281 
2282     assert_page_locked(p);
2283     if (!p->code_bitmap &&
2284         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2285         build_page_bitmap(p);
2286     }
2287     if (p->code_bitmap) {
2288         unsigned int nr;
2289         unsigned long b;
2290 
2291         nr = start & ~TARGET_PAGE_MASK;
2292         b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2293         if (b & ((1 << len) - 1)) {
2294             goto do_invalidate;
2295         }
2296     } else {
2297     do_invalidate:
2298         tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2299                                               retaddr);
2300     }
2301 }
2302 #else
2303 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2304  * host PC of the faulting store instruction that caused this invalidate.
2305  * Returns true if the caller needs to abort execution of the current
2306  * TB (because it was modified by this store and the guest CPU has
2307  * precise-SMC semantics).
2308  */
2309 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2310 {
2311     TranslationBlock *tb;
2312     PageDesc *p;
2313     int n;
2314 #ifdef TARGET_HAS_PRECISE_SMC
2315     TranslationBlock *current_tb = NULL;
2316     CPUState *cpu = current_cpu;
2317     CPUArchState *env = NULL;
2318     int current_tb_modified = 0;
2319     target_ulong current_pc = 0;
2320     target_ulong current_cs_base = 0;
2321     uint32_t current_flags = 0;
2322 #endif
2323 
2324     assert_memory_lock();
2325 
2326     addr &= TARGET_PAGE_MASK;
2327     p = page_find(addr >> TARGET_PAGE_BITS);
2328     if (!p) {
2329         return false;
2330     }
2331 
2332 #ifdef TARGET_HAS_PRECISE_SMC
2333     if (p->first_tb && pc != 0) {
2334         current_tb = tcg_tb_lookup(pc);
2335     }
2336     if (cpu != NULL) {
2337         env = cpu->env_ptr;
2338     }
2339 #endif
2340     assert_page_locked(p);
2341     PAGE_FOR_EACH_TB(p, tb, n) {
2342 #ifdef TARGET_HAS_PRECISE_SMC
2343         if (current_tb == tb &&
2344             (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2345                 /* If we are modifying the current TB, we must stop
2346                    its execution. We could be more precise by checking
2347                    that the modification is after the current PC, but it
2348                    would require a specialized function to partially
2349                    restore the CPU state */
2350 
2351             current_tb_modified = 1;
2352             cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2353             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2354                                  &current_flags);
2355         }
2356 #endif /* TARGET_HAS_PRECISE_SMC */
2357         tb_phys_invalidate(tb, addr);
2358     }
2359     p->first_tb = (uintptr_t)NULL;
2360 #ifdef TARGET_HAS_PRECISE_SMC
2361     if (current_tb_modified) {
2362         /* Force execution of one insn next time.  */
2363         cpu->cflags_next_tb = 1 | curr_cflags(cpu);
2364         return true;
2365     }
2366 #endif
2367 
2368     return false;
2369 }
2370 #endif
2371 
2372 /* user-mode: call with mmap_lock held */
2373 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
2374 {
2375     TranslationBlock *tb;
2376 
2377     assert_memory_lock();
2378 
2379     tb = tcg_tb_lookup(retaddr);
2380     if (tb) {
2381         /* We can use retranslation to find the PC.  */
2382         cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2383         tb_phys_invalidate(tb, -1);
2384     } else {
2385         /* The exception probably happened in a helper.  The CPU state should
2386            have been saved before calling it. Fetch the PC from there.  */
2387         CPUArchState *env = cpu->env_ptr;
2388         target_ulong pc, cs_base;
2389         tb_page_addr_t addr;
2390         uint32_t flags;
2391 
2392         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2393         addr = get_page_addr_code(env, pc);
2394         if (addr != -1) {
2395             tb_invalidate_phys_range(addr, addr + 1);
2396         }
2397     }
2398 }
2399 
2400 #ifndef CONFIG_USER_ONLY
2401 /*
2402  * In deterministic execution mode, instructions doing device I/Os
2403  * must be at the end of the TB.
2404  *
2405  * Called by softmmu_template.h, with iothread mutex not held.
2406  */
2407 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2408 {
2409     TranslationBlock *tb;
2410     CPUClass *cc;
2411     uint32_t n;
2412 
2413     tb = tcg_tb_lookup(retaddr);
2414     if (!tb) {
2415         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2416                   (void *)retaddr);
2417     }
2418     cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2419 
2420     /*
2421      * Some guests must re-execute the branch when re-executing a delay
2422      * slot instruction.  When this is the case, adjust icount and N
2423      * to account for the re-execution of the branch.
2424      */
2425     n = 1;
2426     cc = CPU_GET_CLASS(cpu);
2427     if (cc->tcg_ops->io_recompile_replay_branch &&
2428         cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
2429         cpu_neg(cpu)->icount_decr.u16.low++;
2430         n = 2;
2431     }
2432 
2433     /*
2434      * Exit the loop and potentially generate a new TB executing the
2435      * just the I/O insns. We also limit instrumentation to memory
2436      * operations only (which execute after completion) so we don't
2437      * double instrument the instruction.
2438      */
2439     cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
2440 
2441     qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
2442                            "cpu_io_recompile: rewound execution of TB to "
2443                            TARGET_FMT_lx "\n", tb->pc);
2444 
2445     cpu_loop_exit_noexc(cpu);
2446 }
2447 
2448 static void print_qht_statistics(struct qht_stats hst)
2449 {
2450     uint32_t hgram_opts;
2451     size_t hgram_bins;
2452     char *hgram;
2453 
2454     if (!hst.head_buckets) {
2455         return;
2456     }
2457     qemu_printf("TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2458                 hst.used_head_buckets, hst.head_buckets,
2459                 (double)hst.used_head_buckets / hst.head_buckets * 100);
2460 
2461     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2462     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2463     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2464         hgram_opts |= QDIST_PR_NODECIMAL;
2465     }
2466     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2467     qemu_printf("TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2468                 qdist_avg(&hst.occupancy) * 100, hgram);
2469     g_free(hgram);
2470 
2471     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2472     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2473     if (hgram_bins > 10) {
2474         hgram_bins = 10;
2475     } else {
2476         hgram_bins = 0;
2477         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2478     }
2479     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2480     qemu_printf("TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2481                 qdist_avg(&hst.chain), hgram);
2482     g_free(hgram);
2483 }
2484 
2485 struct tb_tree_stats {
2486     size_t nb_tbs;
2487     size_t host_size;
2488     size_t target_size;
2489     size_t max_target_size;
2490     size_t direct_jmp_count;
2491     size_t direct_jmp2_count;
2492     size_t cross_page;
2493 };
2494 
2495 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2496 {
2497     const TranslationBlock *tb = value;
2498     struct tb_tree_stats *tst = data;
2499 
2500     tst->nb_tbs++;
2501     tst->host_size += tb->tc.size;
2502     tst->target_size += tb->size;
2503     if (tb->size > tst->max_target_size) {
2504         tst->max_target_size = tb->size;
2505     }
2506     if (tb->page_addr[1] != -1) {
2507         tst->cross_page++;
2508     }
2509     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2510         tst->direct_jmp_count++;
2511         if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2512             tst->direct_jmp2_count++;
2513         }
2514     }
2515     return false;
2516 }
2517 
2518 void dump_exec_info(void)
2519 {
2520     struct tb_tree_stats tst = {};
2521     struct qht_stats hst;
2522     size_t nb_tbs, flush_full, flush_part, flush_elide;
2523 
2524     tcg_tb_foreach(tb_tree_stats_iter, &tst);
2525     nb_tbs = tst.nb_tbs;
2526     /* XXX: avoid using doubles ? */
2527     qemu_printf("Translation buffer state:\n");
2528     /*
2529      * Report total code size including the padding and TB structs;
2530      * otherwise users might think "-accel tcg,tb-size" is not honoured.
2531      * For avg host size we use the precise numbers from tb_tree_stats though.
2532      */
2533     qemu_printf("gen code size       %zu/%zu\n",
2534                 tcg_code_size(), tcg_code_capacity());
2535     qemu_printf("TB count            %zu\n", nb_tbs);
2536     qemu_printf("TB avg target size  %zu max=%zu bytes\n",
2537                 nb_tbs ? tst.target_size / nb_tbs : 0,
2538                 tst.max_target_size);
2539     qemu_printf("TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2540                 nb_tbs ? tst.host_size / nb_tbs : 0,
2541                 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2542     qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2543                 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2544     qemu_printf("direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2545                 tst.direct_jmp_count,
2546                 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2547                 tst.direct_jmp2_count,
2548                 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2549 
2550     qht_statistics_init(&tb_ctx.htable, &hst);
2551     print_qht_statistics(hst);
2552     qht_statistics_destroy(&hst);
2553 
2554     qemu_printf("\nStatistics:\n");
2555     qemu_printf("TB flush count      %u\n",
2556                 qatomic_read(&tb_ctx.tb_flush_count));
2557     qemu_printf("TB invalidate count %zu\n",
2558                 tcg_tb_phys_invalidate_count());
2559 
2560     tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2561     qemu_printf("TLB full flushes    %zu\n", flush_full);
2562     qemu_printf("TLB partial flushes %zu\n", flush_part);
2563     qemu_printf("TLB elided flushes  %zu\n", flush_elide);
2564     tcg_dump_info();
2565 }
2566 
2567 void dump_opcount_info(void)
2568 {
2569     tcg_dump_op_count();
2570 }
2571 
2572 #else /* CONFIG_USER_ONLY */
2573 
2574 void cpu_interrupt(CPUState *cpu, int mask)
2575 {
2576     g_assert(qemu_mutex_iothread_locked());
2577     cpu->interrupt_request |= mask;
2578     qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2579 }
2580 
2581 /*
2582  * Walks guest process memory "regions" one by one
2583  * and calls callback function 'fn' for each region.
2584  */
2585 struct walk_memory_regions_data {
2586     walk_memory_regions_fn fn;
2587     void *priv;
2588     target_ulong start;
2589     int prot;
2590 };
2591 
2592 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2593                                    target_ulong end, int new_prot)
2594 {
2595     if (data->start != -1u) {
2596         int rc = data->fn(data->priv, data->start, end, data->prot);
2597         if (rc != 0) {
2598             return rc;
2599         }
2600     }
2601 
2602     data->start = (new_prot ? end : -1u);
2603     data->prot = new_prot;
2604 
2605     return 0;
2606 }
2607 
2608 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2609                                  target_ulong base, int level, void **lp)
2610 {
2611     target_ulong pa;
2612     int i, rc;
2613 
2614     if (*lp == NULL) {
2615         return walk_memory_regions_end(data, base, 0);
2616     }
2617 
2618     if (level == 0) {
2619         PageDesc *pd = *lp;
2620 
2621         for (i = 0; i < V_L2_SIZE; ++i) {
2622             int prot = pd[i].flags;
2623 
2624             pa = base | (i << TARGET_PAGE_BITS);
2625             if (prot != data->prot) {
2626                 rc = walk_memory_regions_end(data, pa, prot);
2627                 if (rc != 0) {
2628                     return rc;
2629                 }
2630             }
2631         }
2632     } else {
2633         void **pp = *lp;
2634 
2635         for (i = 0; i < V_L2_SIZE; ++i) {
2636             pa = base | ((target_ulong)i <<
2637                 (TARGET_PAGE_BITS + V_L2_BITS * level));
2638             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2639             if (rc != 0) {
2640                 return rc;
2641             }
2642         }
2643     }
2644 
2645     return 0;
2646 }
2647 
2648 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2649 {
2650     struct walk_memory_regions_data data;
2651     uintptr_t i, l1_sz = v_l1_size;
2652 
2653     data.fn = fn;
2654     data.priv = priv;
2655     data.start = -1u;
2656     data.prot = 0;
2657 
2658     for (i = 0; i < l1_sz; i++) {
2659         target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2660         int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2661         if (rc != 0) {
2662             return rc;
2663         }
2664     }
2665 
2666     return walk_memory_regions_end(&data, 0, 0);
2667 }
2668 
2669 static int dump_region(void *priv, target_ulong start,
2670     target_ulong end, unsigned long prot)
2671 {
2672     FILE *f = (FILE *)priv;
2673 
2674     (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2675         " "TARGET_FMT_lx" %c%c%c\n",
2676         start, end, end - start,
2677         ((prot & PAGE_READ) ? 'r' : '-'),
2678         ((prot & PAGE_WRITE) ? 'w' : '-'),
2679         ((prot & PAGE_EXEC) ? 'x' : '-'));
2680 
2681     return 0;
2682 }
2683 
2684 /* dump memory mappings */
2685 void page_dump(FILE *f)
2686 {
2687     const int length = sizeof(target_ulong) * 2;
2688     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2689             length, "start", length, "end", length, "size", "prot");
2690     walk_memory_regions(f, dump_region);
2691 }
2692 
2693 int page_get_flags(target_ulong address)
2694 {
2695     PageDesc *p;
2696 
2697     p = page_find(address >> TARGET_PAGE_BITS);
2698     if (!p) {
2699         return 0;
2700     }
2701     return p->flags;
2702 }
2703 
2704 /* Modify the flags of a page and invalidate the code if necessary.
2705    The flag PAGE_WRITE_ORG is positioned automatically depending
2706    on PAGE_WRITE.  The mmap_lock should already be held.  */
2707 void page_set_flags(target_ulong start, target_ulong end, int flags)
2708 {
2709     target_ulong addr, len;
2710     bool reset_target_data;
2711 
2712     /* This function should never be called with addresses outside the
2713        guest address space.  If this assert fires, it probably indicates
2714        a missing call to h2g_valid.  */
2715     assert(end - 1 <= GUEST_ADDR_MAX);
2716     assert(start < end);
2717     /* Only set PAGE_ANON with new mappings. */
2718     assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
2719     assert_memory_lock();
2720 
2721     start = start & TARGET_PAGE_MASK;
2722     end = TARGET_PAGE_ALIGN(end);
2723 
2724     if (flags & PAGE_WRITE) {
2725         flags |= PAGE_WRITE_ORG;
2726     }
2727     reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
2728     flags &= ~PAGE_RESET;
2729 
2730     for (addr = start, len = end - start;
2731          len != 0;
2732          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2733         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2734 
2735         /* If the write protection bit is set, then we invalidate
2736            the code inside.  */
2737         if (!(p->flags & PAGE_WRITE) &&
2738             (flags & PAGE_WRITE) &&
2739             p->first_tb) {
2740             tb_invalidate_phys_page(addr, 0);
2741         }
2742         if (reset_target_data) {
2743             g_free(p->target_data);
2744             p->target_data = NULL;
2745             p->flags = flags;
2746         } else {
2747             /* Using mprotect on a page does not change MAP_ANON. */
2748             p->flags = (p->flags & PAGE_ANON) | flags;
2749         }
2750     }
2751 }
2752 
2753 void *page_get_target_data(target_ulong address)
2754 {
2755     PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2756     return p ? p->target_data : NULL;
2757 }
2758 
2759 void *page_alloc_target_data(target_ulong address, size_t size)
2760 {
2761     PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2762     void *ret = NULL;
2763 
2764     if (p->flags & PAGE_VALID) {
2765         ret = p->target_data;
2766         if (!ret) {
2767             p->target_data = ret = g_malloc0(size);
2768         }
2769     }
2770     return ret;
2771 }
2772 
2773 int page_check_range(target_ulong start, target_ulong len, int flags)
2774 {
2775     PageDesc *p;
2776     target_ulong end;
2777     target_ulong addr;
2778 
2779     /* This function should never be called with addresses outside the
2780        guest address space.  If this assert fires, it probably indicates
2781        a missing call to h2g_valid.  */
2782     if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) {
2783         assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2784     }
2785 
2786     if (len == 0) {
2787         return 0;
2788     }
2789     if (start + len - 1 < start) {
2790         /* We've wrapped around.  */
2791         return -1;
2792     }
2793 
2794     /* must do before we loose bits in the next step */
2795     end = TARGET_PAGE_ALIGN(start + len);
2796     start = start & TARGET_PAGE_MASK;
2797 
2798     for (addr = start, len = end - start;
2799          len != 0;
2800          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2801         p = page_find(addr >> TARGET_PAGE_BITS);
2802         if (!p) {
2803             return -1;
2804         }
2805         if (!(p->flags & PAGE_VALID)) {
2806             return -1;
2807         }
2808 
2809         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2810             return -1;
2811         }
2812         if (flags & PAGE_WRITE) {
2813             if (!(p->flags & PAGE_WRITE_ORG)) {
2814                 return -1;
2815             }
2816             /* unprotect the page if it was put read-only because it
2817                contains translated code */
2818             if (!(p->flags & PAGE_WRITE)) {
2819                 if (!page_unprotect(addr, 0)) {
2820                     return -1;
2821                 }
2822             }
2823         }
2824     }
2825     return 0;
2826 }
2827 
2828 /* called from signal handler: invalidate the code and unprotect the
2829  * page. Return 0 if the fault was not handled, 1 if it was handled,
2830  * and 2 if it was handled but the caller must cause the TB to be
2831  * immediately exited. (We can only return 2 if the 'pc' argument is
2832  * non-zero.)
2833  */
2834 int page_unprotect(target_ulong address, uintptr_t pc)
2835 {
2836     unsigned int prot;
2837     bool current_tb_invalidated;
2838     PageDesc *p;
2839     target_ulong host_start, host_end, addr;
2840 
2841     /* Technically this isn't safe inside a signal handler.  However we
2842        know this only ever happens in a synchronous SEGV handler, so in
2843        practice it seems to be ok.  */
2844     mmap_lock();
2845 
2846     p = page_find(address >> TARGET_PAGE_BITS);
2847     if (!p) {
2848         mmap_unlock();
2849         return 0;
2850     }
2851 
2852     /* if the page was really writable, then we change its
2853        protection back to writable */
2854     if (p->flags & PAGE_WRITE_ORG) {
2855         current_tb_invalidated = false;
2856         if (p->flags & PAGE_WRITE) {
2857             /* If the page is actually marked WRITE then assume this is because
2858              * this thread raced with another one which got here first and
2859              * set the page to PAGE_WRITE and did the TB invalidate for us.
2860              */
2861 #ifdef TARGET_HAS_PRECISE_SMC
2862             TranslationBlock *current_tb = tcg_tb_lookup(pc);
2863             if (current_tb) {
2864                 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2865             }
2866 #endif
2867         } else {
2868             host_start = address & qemu_host_page_mask;
2869             host_end = host_start + qemu_host_page_size;
2870 
2871             prot = 0;
2872             for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2873                 p = page_find(addr >> TARGET_PAGE_BITS);
2874                 p->flags |= PAGE_WRITE;
2875                 prot |= p->flags;
2876 
2877                 /* and since the content will be modified, we must invalidate
2878                    the corresponding translated code. */
2879                 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2880 #ifdef CONFIG_USER_ONLY
2881                 if (DEBUG_TB_CHECK_GATE) {
2882                     tb_invalidate_check(addr);
2883                 }
2884 #endif
2885             }
2886             mprotect((void *)g2h_untagged(host_start), qemu_host_page_size,
2887                      prot & PAGE_BITS);
2888         }
2889         mmap_unlock();
2890         /* If current TB was invalidated return to main loop */
2891         return current_tb_invalidated ? 2 : 1;
2892     }
2893     mmap_unlock();
2894     return 0;
2895 }
2896 #endif /* CONFIG_USER_ONLY */
2897 
2898 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2899 void tcg_flush_softmmu_tlb(CPUState *cs)
2900 {
2901 #ifdef CONFIG_SOFTMMU
2902     tlb_flush(cs);
2903 #endif
2904 }
2905