xref: /openbmc/qemu/accel/tcg/translate-all.c (revision d9c58585)
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu-common.h"
23 
24 #define NO_CPU_IO_DEFS
25 #include "cpu.h"
26 #include "trace.h"
27 #include "disas/disas.h"
28 #include "exec/exec-all.h"
29 #include "tcg/tcg.h"
30 #if defined(CONFIG_USER_ONLY)
31 #include "qemu.h"
32 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
33 #include <sys/param.h>
34 #if __FreeBSD_version >= 700104
35 #define HAVE_KINFO_GETVMMAP
36 #define sigqueue sigqueue_freebsd  /* avoid redefinition */
37 #include <sys/proc.h>
38 #include <machine/profile.h>
39 #define _KERNEL
40 #include <sys/user.h>
41 #undef _KERNEL
42 #undef sigqueue
43 #include <libutil.h>
44 #endif
45 #endif
46 #else
47 #include "exec/ram_addr.h"
48 #endif
49 
50 #include "exec/cputlb.h"
51 #include "exec/tb-hash.h"
52 #include "exec/translate-all.h"
53 #include "qemu/bitmap.h"
54 #include "qemu/error-report.h"
55 #include "qemu/qemu-print.h"
56 #include "qemu/timer.h"
57 #include "qemu/main-loop.h"
58 #include "exec/log.h"
59 #include "sysemu/cpus.h"
60 #include "sysemu/cpu-timers.h"
61 #include "sysemu/tcg.h"
62 #include "qapi/error.h"
63 #include "internal.h"
64 
65 /* #define DEBUG_TB_INVALIDATE */
66 /* #define DEBUG_TB_FLUSH */
67 /* make various TB consistency checks */
68 /* #define DEBUG_TB_CHECK */
69 
70 #ifdef DEBUG_TB_INVALIDATE
71 #define DEBUG_TB_INVALIDATE_GATE 1
72 #else
73 #define DEBUG_TB_INVALIDATE_GATE 0
74 #endif
75 
76 #ifdef DEBUG_TB_FLUSH
77 #define DEBUG_TB_FLUSH_GATE 1
78 #else
79 #define DEBUG_TB_FLUSH_GATE 0
80 #endif
81 
82 #if !defined(CONFIG_USER_ONLY)
83 /* TB consistency checks only implemented for usermode emulation.  */
84 #undef DEBUG_TB_CHECK
85 #endif
86 
87 #ifdef DEBUG_TB_CHECK
88 #define DEBUG_TB_CHECK_GATE 1
89 #else
90 #define DEBUG_TB_CHECK_GATE 0
91 #endif
92 
93 /* Access to the various translations structures need to be serialised via locks
94  * for consistency.
95  * In user-mode emulation access to the memory related structures are protected
96  * with mmap_lock.
97  * In !user-mode we use per-page locks.
98  */
99 #ifdef CONFIG_SOFTMMU
100 #define assert_memory_lock()
101 #else
102 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
103 #endif
104 
105 #define SMC_BITMAP_USE_THRESHOLD 10
106 
107 typedef struct PageDesc {
108     /* list of TBs intersecting this ram page */
109     uintptr_t first_tb;
110 #ifdef CONFIG_SOFTMMU
111     /* in order to optimize self modifying code, we count the number
112        of lookups we do to a given page to use a bitmap */
113     unsigned long *code_bitmap;
114     unsigned int code_write_count;
115 #else
116     unsigned long flags;
117     void *target_data;
118 #endif
119 #ifndef CONFIG_USER_ONLY
120     QemuSpin lock;
121 #endif
122 } PageDesc;
123 
124 /**
125  * struct page_entry - page descriptor entry
126  * @pd:     pointer to the &struct PageDesc of the page this entry represents
127  * @index:  page index of the page
128  * @locked: whether the page is locked
129  *
130  * This struct helps us keep track of the locked state of a page, without
131  * bloating &struct PageDesc.
132  *
133  * A page lock protects accesses to all fields of &struct PageDesc.
134  *
135  * See also: &struct page_collection.
136  */
137 struct page_entry {
138     PageDesc *pd;
139     tb_page_addr_t index;
140     bool locked;
141 };
142 
143 /**
144  * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
145  * @tree:   Binary search tree (BST) of the pages, with key == page index
146  * @max:    Pointer to the page in @tree with the highest page index
147  *
148  * To avoid deadlock we lock pages in ascending order of page index.
149  * When operating on a set of pages, we need to keep track of them so that
150  * we can lock them in order and also unlock them later. For this we collect
151  * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
152  * @tree implementation we use does not provide an O(1) operation to obtain the
153  * highest-ranked element, we use @max to keep track of the inserted page
154  * with the highest index. This is valuable because if a page is not in
155  * the tree and its index is higher than @max's, then we can lock it
156  * without breaking the locking order rule.
157  *
158  * Note on naming: 'struct page_set' would be shorter, but we already have a few
159  * page_set_*() helpers, so page_collection is used instead to avoid confusion.
160  *
161  * See also: page_collection_lock().
162  */
163 struct page_collection {
164     GTree *tree;
165     struct page_entry *max;
166 };
167 
168 /* list iterators for lists of tagged pointers in TranslationBlock */
169 #define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
170     for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
171          tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
172              tb = (TranslationBlock *)((uintptr_t)tb & ~1))
173 
174 #define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
175     TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
176 
177 #define TB_FOR_EACH_JMP(head_tb, tb, n)                                 \
178     TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
179 
180 /*
181  * In system mode we want L1_MAP to be based on ram offsets,
182  * while in user mode we want it to be based on virtual addresses.
183  *
184  * TODO: For user mode, see the caveat re host vs guest virtual
185  * address spaces near GUEST_ADDR_MAX.
186  */
187 #if !defined(CONFIG_USER_ONLY)
188 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
189 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
190 #else
191 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
192 #endif
193 #else
194 # define L1_MAP_ADDR_SPACE_BITS  MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
195 #endif
196 
197 /* Size of the L2 (and L3, etc) page tables.  */
198 #define V_L2_BITS 10
199 #define V_L2_SIZE (1 << V_L2_BITS)
200 
201 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
202 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
203                   sizeof_field(TranslationBlock, trace_vcpu_dstate)
204                   * BITS_PER_BYTE);
205 
206 /*
207  * L1 Mapping properties
208  */
209 static int v_l1_size;
210 static int v_l1_shift;
211 static int v_l2_levels;
212 
213 /* The bottom level has pointers to PageDesc, and is indexed by
214  * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
215  */
216 #define V_L1_MIN_BITS 4
217 #define V_L1_MAX_BITS (V_L2_BITS + 3)
218 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
219 
220 static void *l1_map[V_L1_MAX_SIZE];
221 
222 /* code generation context */
223 TCGContext tcg_init_ctx;
224 __thread TCGContext *tcg_ctx;
225 TBContext tb_ctx;
226 bool parallel_cpus;
227 
228 static void page_table_config_init(void)
229 {
230     uint32_t v_l1_bits;
231 
232     assert(TARGET_PAGE_BITS);
233     /* The bits remaining after N lower levels of page tables.  */
234     v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
235     if (v_l1_bits < V_L1_MIN_BITS) {
236         v_l1_bits += V_L2_BITS;
237     }
238 
239     v_l1_size = 1 << v_l1_bits;
240     v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
241     v_l2_levels = v_l1_shift / V_L2_BITS - 1;
242 
243     assert(v_l1_bits <= V_L1_MAX_BITS);
244     assert(v_l1_shift % V_L2_BITS == 0);
245     assert(v_l2_levels >= 0);
246 }
247 
248 static void cpu_gen_init(void)
249 {
250     tcg_context_init(&tcg_init_ctx);
251 }
252 
253 /* Encode VAL as a signed leb128 sequence at P.
254    Return P incremented past the encoded value.  */
255 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
256 {
257     int more, byte;
258 
259     do {
260         byte = val & 0x7f;
261         val >>= 7;
262         more = !((val == 0 && (byte & 0x40) == 0)
263                  || (val == -1 && (byte & 0x40) != 0));
264         if (more) {
265             byte |= 0x80;
266         }
267         *p++ = byte;
268     } while (more);
269 
270     return p;
271 }
272 
273 /* Decode a signed leb128 sequence at *PP; increment *PP past the
274    decoded value.  Return the decoded value.  */
275 static target_long decode_sleb128(const uint8_t **pp)
276 {
277     const uint8_t *p = *pp;
278     target_long val = 0;
279     int byte, shift = 0;
280 
281     do {
282         byte = *p++;
283         val |= (target_ulong)(byte & 0x7f) << shift;
284         shift += 7;
285     } while (byte & 0x80);
286     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
287         val |= -(target_ulong)1 << shift;
288     }
289 
290     *pp = p;
291     return val;
292 }
293 
294 /* Encode the data collected about the instructions while compiling TB.
295    Place the data at BLOCK, and return the number of bytes consumed.
296 
297    The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
298    which come from the target's insn_start data, followed by a uintptr_t
299    which comes from the host pc of the end of the code implementing the insn.
300 
301    Each line of the table is encoded as sleb128 deltas from the previous
302    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
303    That is, the first column is seeded with the guest pc, the last column
304    with the host pc, and the middle columns with zeros.  */
305 
306 static int encode_search(TranslationBlock *tb, uint8_t *block)
307 {
308     uint8_t *highwater = tcg_ctx->code_gen_highwater;
309     uint8_t *p = block;
310     int i, j, n;
311 
312     for (i = 0, n = tb->icount; i < n; ++i) {
313         target_ulong prev;
314 
315         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
316             if (i == 0) {
317                 prev = (j == 0 ? tb->pc : 0);
318             } else {
319                 prev = tcg_ctx->gen_insn_data[i - 1][j];
320             }
321             p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
322         }
323         prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
324         p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
325 
326         /* Test for (pending) buffer overflow.  The assumption is that any
327            one row beginning below the high water mark cannot overrun
328            the buffer completely.  Thus we can test for overflow after
329            encoding a row without having to check during encoding.  */
330         if (unlikely(p > highwater)) {
331             return -1;
332         }
333     }
334 
335     return p - block;
336 }
337 
338 /* The cpu state corresponding to 'searched_pc' is restored.
339  * When reset_icount is true, current TB will be interrupted and
340  * icount should be recalculated.
341  */
342 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
343                                      uintptr_t searched_pc, bool reset_icount)
344 {
345     target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
346     uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
347     CPUArchState *env = cpu->env_ptr;
348     const uint8_t *p = tb->tc.ptr + tb->tc.size;
349     int i, j, num_insns = tb->icount;
350 #ifdef CONFIG_PROFILER
351     TCGProfile *prof = &tcg_ctx->prof;
352     int64_t ti = profile_getclock();
353 #endif
354 
355     searched_pc -= GETPC_ADJ;
356 
357     if (searched_pc < host_pc) {
358         return -1;
359     }
360 
361     /* Reconstruct the stored insn data while looking for the point at
362        which the end of the insn exceeds the searched_pc.  */
363     for (i = 0; i < num_insns; ++i) {
364         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
365             data[j] += decode_sleb128(&p);
366         }
367         host_pc += decode_sleb128(&p);
368         if (host_pc > searched_pc) {
369             goto found;
370         }
371     }
372     return -1;
373 
374  found:
375     if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
376         assert(icount_enabled());
377         /* Reset the cycle counter to the start of the block
378            and shift if to the number of actually executed instructions */
379         cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
380     }
381     restore_state_to_opc(env, tb, data);
382 
383 #ifdef CONFIG_PROFILER
384     qatomic_set(&prof->restore_time,
385                 prof->restore_time + profile_getclock() - ti);
386     qatomic_set(&prof->restore_count, prof->restore_count + 1);
387 #endif
388     return 0;
389 }
390 
391 void tb_destroy(TranslationBlock *tb)
392 {
393     qemu_spin_destroy(&tb->jmp_lock);
394 }
395 
396 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
397 {
398     /*
399      * The host_pc has to be in the rx region of the code buffer.
400      * If it is not we will not be able to resolve it here.
401      * The two cases where host_pc will not be correct are:
402      *
403      *  - fault during translation (instruction fetch)
404      *  - fault from helper (not using GETPC() macro)
405      *
406      * Either way we need return early as we can't resolve it here.
407      */
408     if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
409         TranslationBlock *tb = tcg_tb_lookup(host_pc);
410         if (tb) {
411             cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
412             if (tb_cflags(tb) & CF_NOCACHE) {
413                 /* one-shot translation, invalidate it immediately */
414                 tb_phys_invalidate(tb, -1);
415                 tcg_tb_remove(tb);
416                 tb_destroy(tb);
417             }
418             return true;
419         }
420     }
421     return false;
422 }
423 
424 static void page_init(void)
425 {
426     page_size_init();
427     page_table_config_init();
428 
429 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
430     {
431 #ifdef HAVE_KINFO_GETVMMAP
432         struct kinfo_vmentry *freep;
433         int i, cnt;
434 
435         freep = kinfo_getvmmap(getpid(), &cnt);
436         if (freep) {
437             mmap_lock();
438             for (i = 0; i < cnt; i++) {
439                 unsigned long startaddr, endaddr;
440 
441                 startaddr = freep[i].kve_start;
442                 endaddr = freep[i].kve_end;
443                 if (h2g_valid(startaddr)) {
444                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
445 
446                     if (h2g_valid(endaddr)) {
447                         endaddr = h2g(endaddr);
448                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
449                     } else {
450 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
451                         endaddr = ~0ul;
452                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
453 #endif
454                     }
455                 }
456             }
457             free(freep);
458             mmap_unlock();
459         }
460 #else
461         FILE *f;
462 
463         last_brk = (unsigned long)sbrk(0);
464 
465         f = fopen("/compat/linux/proc/self/maps", "r");
466         if (f) {
467             mmap_lock();
468 
469             do {
470                 unsigned long startaddr, endaddr;
471                 int n;
472 
473                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
474 
475                 if (n == 2 && h2g_valid(startaddr)) {
476                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
477 
478                     if (h2g_valid(endaddr)) {
479                         endaddr = h2g(endaddr);
480                     } else {
481                         endaddr = ~0ul;
482                     }
483                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
484                 }
485             } while (!feof(f));
486 
487             fclose(f);
488             mmap_unlock();
489         }
490 #endif
491     }
492 #endif
493 }
494 
495 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
496 {
497     PageDesc *pd;
498     void **lp;
499     int i;
500 
501     /* Level 1.  Always allocated.  */
502     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
503 
504     /* Level 2..N-1.  */
505     for (i = v_l2_levels; i > 0; i--) {
506         void **p = qatomic_rcu_read(lp);
507 
508         if (p == NULL) {
509             void *existing;
510 
511             if (!alloc) {
512                 return NULL;
513             }
514             p = g_new0(void *, V_L2_SIZE);
515             existing = qatomic_cmpxchg(lp, NULL, p);
516             if (unlikely(existing)) {
517                 g_free(p);
518                 p = existing;
519             }
520         }
521 
522         lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
523     }
524 
525     pd = qatomic_rcu_read(lp);
526     if (pd == NULL) {
527         void *existing;
528 
529         if (!alloc) {
530             return NULL;
531         }
532         pd = g_new0(PageDesc, V_L2_SIZE);
533 #ifndef CONFIG_USER_ONLY
534         {
535             int i;
536 
537             for (i = 0; i < V_L2_SIZE; i++) {
538                 qemu_spin_init(&pd[i].lock);
539             }
540         }
541 #endif
542         existing = qatomic_cmpxchg(lp, NULL, pd);
543         if (unlikely(existing)) {
544 #ifndef CONFIG_USER_ONLY
545             {
546                 int i;
547 
548                 for (i = 0; i < V_L2_SIZE; i++) {
549                     qemu_spin_destroy(&pd[i].lock);
550                 }
551             }
552 #endif
553             g_free(pd);
554             pd = existing;
555         }
556     }
557 
558     return pd + (index & (V_L2_SIZE - 1));
559 }
560 
561 static inline PageDesc *page_find(tb_page_addr_t index)
562 {
563     return page_find_alloc(index, 0);
564 }
565 
566 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
567                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
568 
569 /* In user-mode page locks aren't used; mmap_lock is enough */
570 #ifdef CONFIG_USER_ONLY
571 
572 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
573 
574 static inline void page_lock(PageDesc *pd)
575 { }
576 
577 static inline void page_unlock(PageDesc *pd)
578 { }
579 
580 static inline void page_lock_tb(const TranslationBlock *tb)
581 { }
582 
583 static inline void page_unlock_tb(const TranslationBlock *tb)
584 { }
585 
586 struct page_collection *
587 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
588 {
589     return NULL;
590 }
591 
592 void page_collection_unlock(struct page_collection *set)
593 { }
594 #else /* !CONFIG_USER_ONLY */
595 
596 #ifdef CONFIG_DEBUG_TCG
597 
598 static __thread GHashTable *ht_pages_locked_debug;
599 
600 static void ht_pages_locked_debug_init(void)
601 {
602     if (ht_pages_locked_debug) {
603         return;
604     }
605     ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
606 }
607 
608 static bool page_is_locked(const PageDesc *pd)
609 {
610     PageDesc *found;
611 
612     ht_pages_locked_debug_init();
613     found = g_hash_table_lookup(ht_pages_locked_debug, pd);
614     return !!found;
615 }
616 
617 static void page_lock__debug(PageDesc *pd)
618 {
619     ht_pages_locked_debug_init();
620     g_assert(!page_is_locked(pd));
621     g_hash_table_insert(ht_pages_locked_debug, pd, pd);
622 }
623 
624 static void page_unlock__debug(const PageDesc *pd)
625 {
626     bool removed;
627 
628     ht_pages_locked_debug_init();
629     g_assert(page_is_locked(pd));
630     removed = g_hash_table_remove(ht_pages_locked_debug, pd);
631     g_assert(removed);
632 }
633 
634 static void
635 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
636 {
637     if (unlikely(!page_is_locked(pd))) {
638         error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
639                      pd, file, line);
640         abort();
641     }
642 }
643 
644 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
645 
646 void assert_no_pages_locked(void)
647 {
648     ht_pages_locked_debug_init();
649     g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
650 }
651 
652 #else /* !CONFIG_DEBUG_TCG */
653 
654 #define assert_page_locked(pd)
655 
656 static inline void page_lock__debug(const PageDesc *pd)
657 {
658 }
659 
660 static inline void page_unlock__debug(const PageDesc *pd)
661 {
662 }
663 
664 #endif /* CONFIG_DEBUG_TCG */
665 
666 static inline void page_lock(PageDesc *pd)
667 {
668     page_lock__debug(pd);
669     qemu_spin_lock(&pd->lock);
670 }
671 
672 static inline void page_unlock(PageDesc *pd)
673 {
674     qemu_spin_unlock(&pd->lock);
675     page_unlock__debug(pd);
676 }
677 
678 /* lock the page(s) of a TB in the correct acquisition order */
679 static inline void page_lock_tb(const TranslationBlock *tb)
680 {
681     page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
682 }
683 
684 static inline void page_unlock_tb(const TranslationBlock *tb)
685 {
686     PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
687 
688     page_unlock(p1);
689     if (unlikely(tb->page_addr[1] != -1)) {
690         PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
691 
692         if (p2 != p1) {
693             page_unlock(p2);
694         }
695     }
696 }
697 
698 static inline struct page_entry *
699 page_entry_new(PageDesc *pd, tb_page_addr_t index)
700 {
701     struct page_entry *pe = g_malloc(sizeof(*pe));
702 
703     pe->index = index;
704     pe->pd = pd;
705     pe->locked = false;
706     return pe;
707 }
708 
709 static void page_entry_destroy(gpointer p)
710 {
711     struct page_entry *pe = p;
712 
713     g_assert(pe->locked);
714     page_unlock(pe->pd);
715     g_free(pe);
716 }
717 
718 /* returns false on success */
719 static bool page_entry_trylock(struct page_entry *pe)
720 {
721     bool busy;
722 
723     busy = qemu_spin_trylock(&pe->pd->lock);
724     if (!busy) {
725         g_assert(!pe->locked);
726         pe->locked = true;
727         page_lock__debug(pe->pd);
728     }
729     return busy;
730 }
731 
732 static void do_page_entry_lock(struct page_entry *pe)
733 {
734     page_lock(pe->pd);
735     g_assert(!pe->locked);
736     pe->locked = true;
737 }
738 
739 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
740 {
741     struct page_entry *pe = value;
742 
743     do_page_entry_lock(pe);
744     return FALSE;
745 }
746 
747 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
748 {
749     struct page_entry *pe = value;
750 
751     if (pe->locked) {
752         pe->locked = false;
753         page_unlock(pe->pd);
754     }
755     return FALSE;
756 }
757 
758 /*
759  * Trylock a page, and if successful, add the page to a collection.
760  * Returns true ("busy") if the page could not be locked; false otherwise.
761  */
762 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
763 {
764     tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
765     struct page_entry *pe;
766     PageDesc *pd;
767 
768     pe = g_tree_lookup(set->tree, &index);
769     if (pe) {
770         return false;
771     }
772 
773     pd = page_find(index);
774     if (pd == NULL) {
775         return false;
776     }
777 
778     pe = page_entry_new(pd, index);
779     g_tree_insert(set->tree, &pe->index, pe);
780 
781     /*
782      * If this is either (1) the first insertion or (2) a page whose index
783      * is higher than any other so far, just lock the page and move on.
784      */
785     if (set->max == NULL || pe->index > set->max->index) {
786         set->max = pe;
787         do_page_entry_lock(pe);
788         return false;
789     }
790     /*
791      * Try to acquire out-of-order lock; if busy, return busy so that we acquire
792      * locks in order.
793      */
794     return page_entry_trylock(pe);
795 }
796 
797 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
798 {
799     tb_page_addr_t a = *(const tb_page_addr_t *)ap;
800     tb_page_addr_t b = *(const tb_page_addr_t *)bp;
801 
802     if (a == b) {
803         return 0;
804     } else if (a < b) {
805         return -1;
806     }
807     return 1;
808 }
809 
810 /*
811  * Lock a range of pages ([@start,@end[) as well as the pages of all
812  * intersecting TBs.
813  * Locking order: acquire locks in ascending order of page index.
814  */
815 struct page_collection *
816 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
817 {
818     struct page_collection *set = g_malloc(sizeof(*set));
819     tb_page_addr_t index;
820     PageDesc *pd;
821 
822     start >>= TARGET_PAGE_BITS;
823     end   >>= TARGET_PAGE_BITS;
824     g_assert(start <= end);
825 
826     set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
827                                 page_entry_destroy);
828     set->max = NULL;
829     assert_no_pages_locked();
830 
831  retry:
832     g_tree_foreach(set->tree, page_entry_lock, NULL);
833 
834     for (index = start; index <= end; index++) {
835         TranslationBlock *tb;
836         int n;
837 
838         pd = page_find(index);
839         if (pd == NULL) {
840             continue;
841         }
842         if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
843             g_tree_foreach(set->tree, page_entry_unlock, NULL);
844             goto retry;
845         }
846         assert_page_locked(pd);
847         PAGE_FOR_EACH_TB(pd, tb, n) {
848             if (page_trylock_add(set, tb->page_addr[0]) ||
849                 (tb->page_addr[1] != -1 &&
850                  page_trylock_add(set, tb->page_addr[1]))) {
851                 /* drop all locks, and reacquire in order */
852                 g_tree_foreach(set->tree, page_entry_unlock, NULL);
853                 goto retry;
854             }
855         }
856     }
857     return set;
858 }
859 
860 void page_collection_unlock(struct page_collection *set)
861 {
862     /* entries are unlocked and freed via page_entry_destroy */
863     g_tree_destroy(set->tree);
864     g_free(set);
865 }
866 
867 #endif /* !CONFIG_USER_ONLY */
868 
869 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
870                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
871 {
872     PageDesc *p1, *p2;
873     tb_page_addr_t page1;
874     tb_page_addr_t page2;
875 
876     assert_memory_lock();
877     g_assert(phys1 != -1);
878 
879     page1 = phys1 >> TARGET_PAGE_BITS;
880     page2 = phys2 >> TARGET_PAGE_BITS;
881 
882     p1 = page_find_alloc(page1, alloc);
883     if (ret_p1) {
884         *ret_p1 = p1;
885     }
886     if (likely(phys2 == -1)) {
887         page_lock(p1);
888         return;
889     } else if (page1 == page2) {
890         page_lock(p1);
891         if (ret_p2) {
892             *ret_p2 = p1;
893         }
894         return;
895     }
896     p2 = page_find_alloc(page2, alloc);
897     if (ret_p2) {
898         *ret_p2 = p2;
899     }
900     if (page1 < page2) {
901         page_lock(p1);
902         page_lock(p2);
903     } else {
904         page_lock(p2);
905         page_lock(p1);
906     }
907 }
908 
909 /* Minimum size of the code gen buffer.  This number is randomly chosen,
910    but not so small that we can't have a fair number of TB's live.  */
911 #define MIN_CODE_GEN_BUFFER_SIZE     (1 * MiB)
912 
913 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
914    indicated, this is constrained by the range of direct branches on the
915    host cpu, as used by the TCG implementation of goto_tb.  */
916 #if defined(__x86_64__)
917 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
918 #elif defined(__sparc__)
919 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
920 #elif defined(__powerpc64__)
921 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
922 #elif defined(__powerpc__)
923 # define MAX_CODE_GEN_BUFFER_SIZE  (32 * MiB)
924 #elif defined(__aarch64__)
925 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
926 #elif defined(__s390x__)
927   /* We have a +- 4GB range on the branches; leave some slop.  */
928 # define MAX_CODE_GEN_BUFFER_SIZE  (3 * GiB)
929 #elif defined(__mips__)
930   /* We have a 256MB branch region, but leave room to make sure the
931      main executable is also within that region.  */
932 # define MAX_CODE_GEN_BUFFER_SIZE  (128 * MiB)
933 #else
934 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
935 #endif
936 
937 #if TCG_TARGET_REG_BITS == 32
938 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
939 #ifdef CONFIG_USER_ONLY
940 /*
941  * For user mode on smaller 32 bit systems we may run into trouble
942  * allocating big chunks of data in the right place. On these systems
943  * we utilise a static code generation buffer directly in the binary.
944  */
945 #define USE_STATIC_CODE_GEN_BUFFER
946 #endif
947 #else /* TCG_TARGET_REG_BITS == 64 */
948 #ifdef CONFIG_USER_ONLY
949 /*
950  * As user-mode emulation typically means running multiple instances
951  * of the translator don't go too nuts with our default code gen
952  * buffer lest we make things too hard for the OS.
953  */
954 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
955 #else
956 /*
957  * We expect most system emulation to run one or two guests per host.
958  * Users running large scale system emulation may want to tweak their
959  * runtime setup via the tb-size control on the command line.
960  */
961 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
962 #endif
963 #endif
964 
965 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
966   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
967    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
968 
969 static size_t size_code_gen_buffer(size_t tb_size)
970 {
971     /* Size the buffer.  */
972     if (tb_size == 0) {
973         size_t phys_mem = qemu_get_host_physmem();
974         if (phys_mem == 0) {
975             tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
976         } else {
977             tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
978         }
979     }
980     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
981         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
982     }
983     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
984         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
985     }
986     return tb_size;
987 }
988 
989 #ifdef __mips__
990 /* In order to use J and JAL within the code_gen_buffer, we require
991    that the buffer not cross a 256MB boundary.  */
992 static inline bool cross_256mb(void *addr, size_t size)
993 {
994     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
995 }
996 
997 /* We weren't able to allocate a buffer without crossing that boundary,
998    so make do with the larger portion of the buffer that doesn't cross.
999    Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
1000 static inline void *split_cross_256mb(void *buf1, size_t size1)
1001 {
1002     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
1003     size_t size2 = buf1 + size1 - buf2;
1004 
1005     size1 = buf2 - buf1;
1006     if (size1 < size2) {
1007         size1 = size2;
1008         buf1 = buf2;
1009     }
1010 
1011     tcg_ctx->code_gen_buffer_size = size1;
1012     return buf1;
1013 }
1014 #endif
1015 
1016 #ifdef USE_STATIC_CODE_GEN_BUFFER
1017 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
1018     __attribute__((aligned(CODE_GEN_ALIGN)));
1019 
1020 static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
1021 {
1022     void *buf, *end;
1023     size_t size;
1024 
1025     if (splitwx > 0) {
1026         error_setg(errp, "jit split-wx not supported");
1027         return false;
1028     }
1029 
1030     /* page-align the beginning and end of the buffer */
1031     buf = static_code_gen_buffer;
1032     end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
1033     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
1034     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1035 
1036     size = end - buf;
1037 
1038     /* Honor a command-line option limiting the size of the buffer.  */
1039     if (size > tb_size) {
1040         size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
1041     }
1042     tcg_ctx->code_gen_buffer_size = size;
1043 
1044 #ifdef __mips__
1045     if (cross_256mb(buf, size)) {
1046         buf = split_cross_256mb(buf, size);
1047         size = tcg_ctx->code_gen_buffer_size;
1048     }
1049 #endif
1050 
1051     if (qemu_mprotect_rwx(buf, size)) {
1052         error_setg_errno(errp, errno, "mprotect of jit buffer");
1053         return false;
1054     }
1055     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1056 
1057     tcg_ctx->code_gen_buffer = buf;
1058     return true;
1059 }
1060 #elif defined(_WIN32)
1061 static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
1062 {
1063     void *buf;
1064 
1065     if (splitwx > 0) {
1066         error_setg(errp, "jit split-wx not supported");
1067         return false;
1068     }
1069 
1070     buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1071                              PAGE_EXECUTE_READWRITE);
1072     if (buf == NULL) {
1073         error_setg_win32(errp, GetLastError(),
1074                          "allocate %zu bytes for jit buffer", size);
1075         return false;
1076     }
1077 
1078     tcg_ctx->code_gen_buffer = buf;
1079     tcg_ctx->code_gen_buffer_size = size;
1080     return true;
1081 }
1082 #else
1083 static bool alloc_code_gen_buffer_anon(size_t size, int prot,
1084                                        int flags, Error **errp)
1085 {
1086     void *buf;
1087 
1088     buf = mmap(NULL, size, prot, flags, -1, 0);
1089     if (buf == MAP_FAILED) {
1090         error_setg_errno(errp, errno,
1091                          "allocate %zu bytes for jit buffer", size);
1092         return false;
1093     }
1094     tcg_ctx->code_gen_buffer_size = size;
1095 
1096 #ifdef __mips__
1097     if (cross_256mb(buf, size)) {
1098         /*
1099          * Try again, with the original still mapped, to avoid re-acquiring
1100          * the same 256mb crossing.
1101          */
1102         size_t size2;
1103         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1104         switch ((int)(buf2 != MAP_FAILED)) {
1105         case 1:
1106             if (!cross_256mb(buf2, size)) {
1107                 /* Success!  Use the new buffer.  */
1108                 munmap(buf, size);
1109                 break;
1110             }
1111             /* Failure.  Work with what we had.  */
1112             munmap(buf2, size);
1113             /* fallthru */
1114         default:
1115             /* Split the original buffer.  Free the smaller half.  */
1116             buf2 = split_cross_256mb(buf, size);
1117             size2 = tcg_ctx->code_gen_buffer_size;
1118             if (buf == buf2) {
1119                 munmap(buf + size2, size - size2);
1120             } else {
1121                 munmap(buf, size - size2);
1122             }
1123             size = size2;
1124             break;
1125         }
1126         buf = buf2;
1127     }
1128 #endif
1129 
1130     /* Request large pages for the buffer.  */
1131     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1132 
1133     tcg_ctx->code_gen_buffer = buf;
1134     return true;
1135 }
1136 
1137 #ifndef CONFIG_TCG_INTERPRETER
1138 #ifdef CONFIG_POSIX
1139 #include "qemu/memfd.h"
1140 
1141 static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
1142 {
1143     void *buf_rw = NULL, *buf_rx = MAP_FAILED;
1144     int fd = -1;
1145 
1146 #ifdef __mips__
1147     /* Find space for the RX mapping, vs the 256MiB regions. */
1148     if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
1149                                     MAP_PRIVATE | MAP_ANONYMOUS |
1150                                     MAP_NORESERVE, errp)) {
1151         return false;
1152     }
1153     /* The size of the mapping may have been adjusted. */
1154     size = tcg_ctx->code_gen_buffer_size;
1155     buf_rx = tcg_ctx->code_gen_buffer;
1156 #endif
1157 
1158     buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
1159     if (buf_rw == NULL) {
1160         goto fail;
1161     }
1162 
1163 #ifdef __mips__
1164     void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
1165                      MAP_SHARED | MAP_FIXED, fd, 0);
1166     if (tmp != buf_rx) {
1167         goto fail_rx;
1168     }
1169 #else
1170     buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
1171     if (buf_rx == MAP_FAILED) {
1172         goto fail_rx;
1173     }
1174 #endif
1175 
1176     close(fd);
1177     tcg_ctx->code_gen_buffer = buf_rw;
1178     tcg_ctx->code_gen_buffer_size = size;
1179     tcg_splitwx_diff = buf_rx - buf_rw;
1180 
1181     /* Request large pages for the buffer and the splitwx.  */
1182     qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
1183     qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
1184     return true;
1185 
1186  fail_rx:
1187     error_setg_errno(errp, errno, "failed to map shared memory for execute");
1188  fail:
1189     if (buf_rx != MAP_FAILED) {
1190         munmap(buf_rx, size);
1191     }
1192     if (buf_rw) {
1193         munmap(buf_rw, size);
1194     }
1195     if (fd >= 0) {
1196         close(fd);
1197     }
1198     return false;
1199 }
1200 #endif /* CONFIG_POSIX */
1201 
1202 #ifdef CONFIG_DARWIN
1203 #include <mach/mach.h>
1204 
1205 extern kern_return_t mach_vm_remap(vm_map_t target_task,
1206                                    mach_vm_address_t *target_address,
1207                                    mach_vm_size_t size,
1208                                    mach_vm_offset_t mask,
1209                                    int flags,
1210                                    vm_map_t src_task,
1211                                    mach_vm_address_t src_address,
1212                                    boolean_t copy,
1213                                    vm_prot_t *cur_protection,
1214                                    vm_prot_t *max_protection,
1215                                    vm_inherit_t inheritance);
1216 
1217 static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
1218 {
1219     kern_return_t ret;
1220     mach_vm_address_t buf_rw, buf_rx;
1221     vm_prot_t cur_prot, max_prot;
1222 
1223     /* Map the read-write portion via normal anon memory. */
1224     if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
1225                                     MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
1226         return false;
1227     }
1228 
1229     buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
1230     buf_rx = 0;
1231     ret = mach_vm_remap(mach_task_self(),
1232                         &buf_rx,
1233                         size,
1234                         0,
1235                         VM_FLAGS_ANYWHERE,
1236                         mach_task_self(),
1237                         buf_rw,
1238                         false,
1239                         &cur_prot,
1240                         &max_prot,
1241                         VM_INHERIT_NONE);
1242     if (ret != KERN_SUCCESS) {
1243         /* TODO: Convert "ret" to a human readable error message. */
1244         error_setg(errp, "vm_remap for jit splitwx failed");
1245         munmap((void *)buf_rw, size);
1246         return false;
1247     }
1248 
1249     if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
1250         error_setg_errno(errp, errno, "mprotect for jit splitwx");
1251         munmap((void *)buf_rx, size);
1252         munmap((void *)buf_rw, size);
1253         return false;
1254     }
1255 
1256     tcg_splitwx_diff = buf_rx - buf_rw;
1257     return true;
1258 }
1259 #endif /* CONFIG_DARWIN */
1260 #endif /* CONFIG_TCG_INTERPRETER */
1261 
1262 static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
1263 {
1264 #ifndef CONFIG_TCG_INTERPRETER
1265 # ifdef CONFIG_DARWIN
1266     return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
1267 # endif
1268 # ifdef CONFIG_POSIX
1269     return alloc_code_gen_buffer_splitwx_memfd(size, errp);
1270 # endif
1271 #endif
1272     error_setg(errp, "jit split-wx not supported");
1273     return false;
1274 }
1275 
1276 static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
1277 {
1278     ERRP_GUARD();
1279     int prot, flags;
1280 
1281     if (splitwx) {
1282         if (alloc_code_gen_buffer_splitwx(size, errp)) {
1283             return true;
1284         }
1285         /*
1286          * If splitwx force-on (1), fail;
1287          * if splitwx default-on (-1), fall through to splitwx off.
1288          */
1289         if (splitwx > 0) {
1290             return false;
1291         }
1292         error_free_or_abort(errp);
1293     }
1294 
1295     prot = PROT_READ | PROT_WRITE | PROT_EXEC;
1296     flags = MAP_PRIVATE | MAP_ANONYMOUS;
1297 #ifdef CONFIG_TCG_INTERPRETER
1298     /* The tcg interpreter does not need execute permission. */
1299     prot = PROT_READ | PROT_WRITE;
1300 #elif defined(CONFIG_DARWIN)
1301     /* Applicable to both iOS and macOS (Apple Silicon). */
1302     if (!splitwx) {
1303         flags |= MAP_JIT;
1304     }
1305 #endif
1306 
1307     return alloc_code_gen_buffer_anon(size, prot, flags, errp);
1308 }
1309 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1310 
1311 static bool tb_cmp(const void *ap, const void *bp)
1312 {
1313     const TranslationBlock *a = ap;
1314     const TranslationBlock *b = bp;
1315 
1316     return a->pc == b->pc &&
1317         a->cs_base == b->cs_base &&
1318         a->flags == b->flags &&
1319         (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1320         a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1321         a->page_addr[0] == b->page_addr[0] &&
1322         a->page_addr[1] == b->page_addr[1];
1323 }
1324 
1325 static void tb_htable_init(void)
1326 {
1327     unsigned int mode = QHT_MODE_AUTO_RESIZE;
1328 
1329     qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1330 }
1331 
1332 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1333    (in bytes) allocated to the translation buffer. Zero means default
1334    size. */
1335 void tcg_exec_init(unsigned long tb_size, int splitwx)
1336 {
1337     bool ok;
1338 
1339     tcg_allowed = true;
1340     cpu_gen_init();
1341     page_init();
1342     tb_htable_init();
1343 
1344     ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
1345                                splitwx, &error_fatal);
1346     assert(ok);
1347 
1348 #if defined(CONFIG_SOFTMMU)
1349     /* There's no guest base to take into account, so go ahead and
1350        initialize the prologue now.  */
1351     tcg_prologue_init(tcg_ctx);
1352 #endif
1353 }
1354 
1355 /* call with @p->lock held */
1356 static inline void invalidate_page_bitmap(PageDesc *p)
1357 {
1358     assert_page_locked(p);
1359 #ifdef CONFIG_SOFTMMU
1360     g_free(p->code_bitmap);
1361     p->code_bitmap = NULL;
1362     p->code_write_count = 0;
1363 #endif
1364 }
1365 
1366 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1367 static void page_flush_tb_1(int level, void **lp)
1368 {
1369     int i;
1370 
1371     if (*lp == NULL) {
1372         return;
1373     }
1374     if (level == 0) {
1375         PageDesc *pd = *lp;
1376 
1377         for (i = 0; i < V_L2_SIZE; ++i) {
1378             page_lock(&pd[i]);
1379             pd[i].first_tb = (uintptr_t)NULL;
1380             invalidate_page_bitmap(pd + i);
1381             page_unlock(&pd[i]);
1382         }
1383     } else {
1384         void **pp = *lp;
1385 
1386         for (i = 0; i < V_L2_SIZE; ++i) {
1387             page_flush_tb_1(level - 1, pp + i);
1388         }
1389     }
1390 }
1391 
1392 static void page_flush_tb(void)
1393 {
1394     int i, l1_sz = v_l1_size;
1395 
1396     for (i = 0; i < l1_sz; i++) {
1397         page_flush_tb_1(v_l2_levels, l1_map + i);
1398     }
1399 }
1400 
1401 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1402 {
1403     const TranslationBlock *tb = value;
1404     size_t *size = data;
1405 
1406     *size += tb->tc.size;
1407     return false;
1408 }
1409 
1410 /* flush all the translation blocks */
1411 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1412 {
1413     bool did_flush = false;
1414 
1415     mmap_lock();
1416     /* If it is already been done on request of another CPU,
1417      * just retry.
1418      */
1419     if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1420         goto done;
1421     }
1422     did_flush = true;
1423 
1424     if (DEBUG_TB_FLUSH_GATE) {
1425         size_t nb_tbs = tcg_nb_tbs();
1426         size_t host_size = 0;
1427 
1428         tcg_tb_foreach(tb_host_size_iter, &host_size);
1429         printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1430                tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1431     }
1432 
1433     CPU_FOREACH(cpu) {
1434         cpu_tb_jmp_cache_clear(cpu);
1435     }
1436 
1437     qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1438     page_flush_tb();
1439 
1440     tcg_region_reset_all();
1441     /* XXX: flush processor icache at this point if cache flush is
1442        expensive */
1443     qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1444 
1445 done:
1446     mmap_unlock();
1447     if (did_flush) {
1448         qemu_plugin_flush_cb();
1449     }
1450 }
1451 
1452 void tb_flush(CPUState *cpu)
1453 {
1454     if (tcg_enabled()) {
1455         unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
1456 
1457         if (cpu_in_exclusive_context(cpu)) {
1458             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1459         } else {
1460             async_safe_run_on_cpu(cpu, do_tb_flush,
1461                                   RUN_ON_CPU_HOST_INT(tb_flush_count));
1462         }
1463     }
1464 }
1465 
1466 /*
1467  * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1468  * so in order to prevent bit rot we compile them unconditionally in user-mode,
1469  * and let the optimizer get rid of them by wrapping their user-only callers
1470  * with if (DEBUG_TB_CHECK_GATE).
1471  */
1472 #ifdef CONFIG_USER_ONLY
1473 
1474 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1475 {
1476     TranslationBlock *tb = p;
1477     target_ulong addr = *(target_ulong *)userp;
1478 
1479     if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1480         printf("ERROR invalidate: address=" TARGET_FMT_lx
1481                " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1482     }
1483 }
1484 
1485 /* verify that all the pages have correct rights for code
1486  *
1487  * Called with mmap_lock held.
1488  */
1489 static void tb_invalidate_check(target_ulong address)
1490 {
1491     address &= TARGET_PAGE_MASK;
1492     qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1493 }
1494 
1495 static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1496 {
1497     TranslationBlock *tb = p;
1498     int flags1, flags2;
1499 
1500     flags1 = page_get_flags(tb->pc);
1501     flags2 = page_get_flags(tb->pc + tb->size - 1);
1502     if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1503         printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1504                (long)tb->pc, tb->size, flags1, flags2);
1505     }
1506 }
1507 
1508 /* verify that all the pages have correct rights for code */
1509 static void tb_page_check(void)
1510 {
1511     qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1512 }
1513 
1514 #endif /* CONFIG_USER_ONLY */
1515 
1516 /*
1517  * user-mode: call with mmap_lock held
1518  * !user-mode: call with @pd->lock held
1519  */
1520 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1521 {
1522     TranslationBlock *tb1;
1523     uintptr_t *pprev;
1524     unsigned int n1;
1525 
1526     assert_page_locked(pd);
1527     pprev = &pd->first_tb;
1528     PAGE_FOR_EACH_TB(pd, tb1, n1) {
1529         if (tb1 == tb) {
1530             *pprev = tb1->page_next[n1];
1531             return;
1532         }
1533         pprev = &tb1->page_next[n1];
1534     }
1535     g_assert_not_reached();
1536 }
1537 
1538 /* remove @orig from its @n_orig-th jump list */
1539 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1540 {
1541     uintptr_t ptr, ptr_locked;
1542     TranslationBlock *dest;
1543     TranslationBlock *tb;
1544     uintptr_t *pprev;
1545     int n;
1546 
1547     /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1548     ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1549     dest = (TranslationBlock *)(ptr & ~1);
1550     if (dest == NULL) {
1551         return;
1552     }
1553 
1554     qemu_spin_lock(&dest->jmp_lock);
1555     /*
1556      * While acquiring the lock, the jump might have been removed if the
1557      * destination TB was invalidated; check again.
1558      */
1559     ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
1560     if (ptr_locked != ptr) {
1561         qemu_spin_unlock(&dest->jmp_lock);
1562         /*
1563          * The only possibility is that the jump was unlinked via
1564          * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1565          * because we set the LSB above.
1566          */
1567         g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1568         return;
1569     }
1570     /*
1571      * We first acquired the lock, and since the destination pointer matches,
1572      * we know for sure that @orig is in the jmp list.
1573      */
1574     pprev = &dest->jmp_list_head;
1575     TB_FOR_EACH_JMP(dest, tb, n) {
1576         if (tb == orig && n == n_orig) {
1577             *pprev = tb->jmp_list_next[n];
1578             /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1579             qemu_spin_unlock(&dest->jmp_lock);
1580             return;
1581         }
1582         pprev = &tb->jmp_list_next[n];
1583     }
1584     g_assert_not_reached();
1585 }
1586 
1587 /* reset the jump entry 'n' of a TB so that it is not chained to
1588    another TB */
1589 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1590 {
1591     uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1592     tb_set_jmp_target(tb, n, addr);
1593 }
1594 
1595 /* remove any jumps to the TB */
1596 static inline void tb_jmp_unlink(TranslationBlock *dest)
1597 {
1598     TranslationBlock *tb;
1599     int n;
1600 
1601     qemu_spin_lock(&dest->jmp_lock);
1602 
1603     TB_FOR_EACH_JMP(dest, tb, n) {
1604         tb_reset_jump(tb, n);
1605         qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1606         /* No need to clear the list entry; setting the dest ptr is enough */
1607     }
1608     dest->jmp_list_head = (uintptr_t)NULL;
1609 
1610     qemu_spin_unlock(&dest->jmp_lock);
1611 }
1612 
1613 /*
1614  * In user-mode, call with mmap_lock held.
1615  * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1616  * locks held.
1617  */
1618 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1619 {
1620     CPUState *cpu;
1621     PageDesc *p;
1622     uint32_t h;
1623     tb_page_addr_t phys_pc;
1624 
1625     assert_memory_lock();
1626 
1627     /* make sure no further incoming jumps will be chained to this TB */
1628     qemu_spin_lock(&tb->jmp_lock);
1629     qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1630     qemu_spin_unlock(&tb->jmp_lock);
1631 
1632     /* remove the TB from the hash list */
1633     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1634     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1635                      tb->trace_vcpu_dstate);
1636     if (!(tb->cflags & CF_NOCACHE) &&
1637         !qht_remove(&tb_ctx.htable, tb, h)) {
1638         return;
1639     }
1640 
1641     /* remove the TB from the page list */
1642     if (rm_from_page_list) {
1643         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1644         tb_page_remove(p, tb);
1645         invalidate_page_bitmap(p);
1646         if (tb->page_addr[1] != -1) {
1647             p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1648             tb_page_remove(p, tb);
1649             invalidate_page_bitmap(p);
1650         }
1651     }
1652 
1653     /* remove the TB from the hash list */
1654     h = tb_jmp_cache_hash_func(tb->pc);
1655     CPU_FOREACH(cpu) {
1656         if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1657             qatomic_set(&cpu->tb_jmp_cache[h], NULL);
1658         }
1659     }
1660 
1661     /* suppress this TB from the two jump lists */
1662     tb_remove_from_jmp_list(tb, 0);
1663     tb_remove_from_jmp_list(tb, 1);
1664 
1665     /* suppress any remaining jumps to this TB */
1666     tb_jmp_unlink(tb);
1667 
1668     qatomic_set(&tcg_ctx->tb_phys_invalidate_count,
1669                tcg_ctx->tb_phys_invalidate_count + 1);
1670 }
1671 
1672 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1673 {
1674     qemu_thread_jit_write();
1675     do_tb_phys_invalidate(tb, true);
1676     qemu_thread_jit_execute();
1677 }
1678 
1679 /* invalidate one TB
1680  *
1681  * Called with mmap_lock held in user-mode.
1682  */
1683 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1684 {
1685     if (page_addr == -1 && tb->page_addr[0] != -1) {
1686         page_lock_tb(tb);
1687         do_tb_phys_invalidate(tb, true);
1688         page_unlock_tb(tb);
1689     } else {
1690         do_tb_phys_invalidate(tb, false);
1691     }
1692 }
1693 
1694 #ifdef CONFIG_SOFTMMU
1695 /* call with @p->lock held */
1696 static void build_page_bitmap(PageDesc *p)
1697 {
1698     int n, tb_start, tb_end;
1699     TranslationBlock *tb;
1700 
1701     assert_page_locked(p);
1702     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1703 
1704     PAGE_FOR_EACH_TB(p, tb, n) {
1705         /* NOTE: this is subtle as a TB may span two physical pages */
1706         if (n == 0) {
1707             /* NOTE: tb_end may be after the end of the page, but
1708                it is not a problem */
1709             tb_start = tb->pc & ~TARGET_PAGE_MASK;
1710             tb_end = tb_start + tb->size;
1711             if (tb_end > TARGET_PAGE_SIZE) {
1712                 tb_end = TARGET_PAGE_SIZE;
1713              }
1714         } else {
1715             tb_start = 0;
1716             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1717         }
1718         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1719     }
1720 }
1721 #endif
1722 
1723 /* add the tb in the target page and protect it if necessary
1724  *
1725  * Called with mmap_lock held for user-mode emulation.
1726  * Called with @p->lock held in !user-mode.
1727  */
1728 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1729                                unsigned int n, tb_page_addr_t page_addr)
1730 {
1731 #ifndef CONFIG_USER_ONLY
1732     bool page_already_protected;
1733 #endif
1734 
1735     assert_page_locked(p);
1736 
1737     tb->page_addr[n] = page_addr;
1738     tb->page_next[n] = p->first_tb;
1739 #ifndef CONFIG_USER_ONLY
1740     page_already_protected = p->first_tb != (uintptr_t)NULL;
1741 #endif
1742     p->first_tb = (uintptr_t)tb | n;
1743     invalidate_page_bitmap(p);
1744 
1745 #if defined(CONFIG_USER_ONLY)
1746     if (p->flags & PAGE_WRITE) {
1747         target_ulong addr;
1748         PageDesc *p2;
1749         int prot;
1750 
1751         /* force the host page as non writable (writes will have a
1752            page fault + mprotect overhead) */
1753         page_addr &= qemu_host_page_mask;
1754         prot = 0;
1755         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1756             addr += TARGET_PAGE_SIZE) {
1757 
1758             p2 = page_find(addr >> TARGET_PAGE_BITS);
1759             if (!p2) {
1760                 continue;
1761             }
1762             prot |= p2->flags;
1763             p2->flags &= ~PAGE_WRITE;
1764           }
1765         mprotect(g2h(page_addr), qemu_host_page_size,
1766                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1767         if (DEBUG_TB_INVALIDATE_GATE) {
1768             printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1769         }
1770     }
1771 #else
1772     /* if some code is already present, then the pages are already
1773        protected. So we handle the case where only the first TB is
1774        allocated in a physical page */
1775     if (!page_already_protected) {
1776         tlb_protect_code(page_addr);
1777     }
1778 #endif
1779 }
1780 
1781 /* add a new TB and link it to the physical page tables. phys_page2 is
1782  * (-1) to indicate that only one page contains the TB.
1783  *
1784  * Called with mmap_lock held for user-mode emulation.
1785  *
1786  * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1787  * Note that in !user-mode, another thread might have already added a TB
1788  * for the same block of guest code that @tb corresponds to. In that case,
1789  * the caller should discard the original @tb, and use instead the returned TB.
1790  */
1791 static TranslationBlock *
1792 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1793              tb_page_addr_t phys_page2)
1794 {
1795     PageDesc *p;
1796     PageDesc *p2 = NULL;
1797 
1798     assert_memory_lock();
1799 
1800     if (phys_pc == -1) {
1801         /*
1802          * If the TB is not associated with a physical RAM page then
1803          * it must be a temporary one-insn TB, and we have nothing to do
1804          * except fill in the page_addr[] fields.
1805          */
1806         assert(tb->cflags & CF_NOCACHE);
1807         tb->page_addr[0] = tb->page_addr[1] = -1;
1808         return tb;
1809     }
1810 
1811     /*
1812      * Add the TB to the page list, acquiring first the pages's locks.
1813      * We keep the locks held until after inserting the TB in the hash table,
1814      * so that if the insertion fails we know for sure that the TBs are still
1815      * in the page descriptors.
1816      * Note that inserting into the hash table first isn't an option, since
1817      * we can only insert TBs that are fully initialized.
1818      */
1819     page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1820     tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1821     if (p2) {
1822         tb_page_add(p2, tb, 1, phys_page2);
1823     } else {
1824         tb->page_addr[1] = -1;
1825     }
1826 
1827     if (!(tb->cflags & CF_NOCACHE)) {
1828         void *existing_tb = NULL;
1829         uint32_t h;
1830 
1831         /* add in the hash table */
1832         h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1833                          tb->trace_vcpu_dstate);
1834         qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1835 
1836         /* remove TB from the page(s) if we couldn't insert it */
1837         if (unlikely(existing_tb)) {
1838             tb_page_remove(p, tb);
1839             invalidate_page_bitmap(p);
1840             if (p2) {
1841                 tb_page_remove(p2, tb);
1842                 invalidate_page_bitmap(p2);
1843             }
1844             tb = existing_tb;
1845         }
1846     }
1847 
1848     if (p2 && p2 != p) {
1849         page_unlock(p2);
1850     }
1851     page_unlock(p);
1852 
1853 #ifdef CONFIG_USER_ONLY
1854     if (DEBUG_TB_CHECK_GATE) {
1855         tb_page_check();
1856     }
1857 #endif
1858     return tb;
1859 }
1860 
1861 /* Called with mmap_lock held for user mode emulation.  */
1862 TranslationBlock *tb_gen_code(CPUState *cpu,
1863                               target_ulong pc, target_ulong cs_base,
1864                               uint32_t flags, int cflags)
1865 {
1866     CPUArchState *env = cpu->env_ptr;
1867     TranslationBlock *tb, *existing_tb;
1868     tb_page_addr_t phys_pc, phys_page2;
1869     target_ulong virt_page2;
1870     tcg_insn_unit *gen_code_buf;
1871     int gen_code_size, search_size, max_insns;
1872 #ifdef CONFIG_PROFILER
1873     TCGProfile *prof = &tcg_ctx->prof;
1874     int64_t ti;
1875 #endif
1876 
1877     assert_memory_lock();
1878     qemu_thread_jit_write();
1879 
1880     phys_pc = get_page_addr_code(env, pc);
1881 
1882     if (phys_pc == -1) {
1883         /* Generate a temporary TB with 1 insn in it */
1884         cflags &= ~CF_COUNT_MASK;
1885         cflags |= CF_NOCACHE | 1;
1886     }
1887 
1888     cflags &= ~CF_CLUSTER_MASK;
1889     cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
1890 
1891     max_insns = cflags & CF_COUNT_MASK;
1892     if (max_insns == 0) {
1893         max_insns = CF_COUNT_MASK;
1894     }
1895     if (max_insns > TCG_MAX_INSNS) {
1896         max_insns = TCG_MAX_INSNS;
1897     }
1898     if (cpu->singlestep_enabled || singlestep) {
1899         max_insns = 1;
1900     }
1901 
1902  buffer_overflow:
1903     tb = tcg_tb_alloc(tcg_ctx);
1904     if (unlikely(!tb)) {
1905         /* flush must be done */
1906         tb_flush(cpu);
1907         mmap_unlock();
1908         /* Make the execution loop process the flush as soon as possible.  */
1909         cpu->exception_index = EXCP_INTERRUPT;
1910         cpu_loop_exit(cpu);
1911     }
1912 
1913     gen_code_buf = tcg_ctx->code_gen_ptr;
1914     tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
1915     tb->pc = pc;
1916     tb->cs_base = cs_base;
1917     tb->flags = flags;
1918     tb->cflags = cflags;
1919     tb->orig_tb = NULL;
1920     tb->trace_vcpu_dstate = *cpu->trace_dstate;
1921     tcg_ctx->tb_cflags = cflags;
1922  tb_overflow:
1923 
1924 #ifdef CONFIG_PROFILER
1925     /* includes aborted translations because of exceptions */
1926     qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1927     ti = profile_getclock();
1928 #endif
1929 
1930     gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0);
1931     if (unlikely(gen_code_size != 0)) {
1932         goto error_return;
1933     }
1934 
1935     tcg_func_start(tcg_ctx);
1936 
1937     tcg_ctx->cpu = env_cpu(env);
1938     gen_intermediate_code(cpu, tb, max_insns);
1939     tcg_ctx->cpu = NULL;
1940     max_insns = tb->icount;
1941 
1942     trace_translate_block(tb, tb->pc, tb->tc.ptr);
1943 
1944     /* generate machine code */
1945     tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1946     tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1947     tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1948     if (TCG_TARGET_HAS_direct_jump) {
1949         tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1950         tcg_ctx->tb_jmp_target_addr = NULL;
1951     } else {
1952         tcg_ctx->tb_jmp_insn_offset = NULL;
1953         tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1954     }
1955 
1956 #ifdef CONFIG_PROFILER
1957     qatomic_set(&prof->tb_count, prof->tb_count + 1);
1958     qatomic_set(&prof->interm_time,
1959                 prof->interm_time + profile_getclock() - ti);
1960     ti = profile_getclock();
1961 #endif
1962 
1963     gen_code_size = tcg_gen_code(tcg_ctx, tb);
1964     if (unlikely(gen_code_size < 0)) {
1965  error_return:
1966         switch (gen_code_size) {
1967         case -1:
1968             /*
1969              * Overflow of code_gen_buffer, or the current slice of it.
1970              *
1971              * TODO: We don't need to re-do gen_intermediate_code, nor
1972              * should we re-do the tcg optimization currently hidden
1973              * inside tcg_gen_code.  All that should be required is to
1974              * flush the TBs, allocate a new TB, re-initialize it per
1975              * above, and re-do the actual code generation.
1976              */
1977             qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1978                           "Restarting code generation for "
1979                           "code_gen_buffer overflow\n");
1980             goto buffer_overflow;
1981 
1982         case -2:
1983             /*
1984              * The code generated for the TranslationBlock is too large.
1985              * The maximum size allowed by the unwind info is 64k.
1986              * There may be stricter constraints from relocations
1987              * in the tcg backend.
1988              *
1989              * Try again with half as many insns as we attempted this time.
1990              * If a single insn overflows, there's a bug somewhere...
1991              */
1992             assert(max_insns > 1);
1993             max_insns /= 2;
1994             qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1995                           "Restarting code generation with "
1996                           "smaller translation block (max %d insns)\n",
1997                           max_insns);
1998             goto tb_overflow;
1999 
2000         default:
2001             g_assert_not_reached();
2002         }
2003     }
2004     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
2005     if (unlikely(search_size < 0)) {
2006         goto buffer_overflow;
2007     }
2008     tb->tc.size = gen_code_size;
2009 
2010 #ifdef CONFIG_PROFILER
2011     qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
2012     qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
2013     qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
2014     qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
2015 #endif
2016 
2017 #ifdef DEBUG_DISAS
2018     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
2019         qemu_log_in_addr_range(tb->pc)) {
2020         FILE *logfile = qemu_log_lock();
2021         int code_size, data_size;
2022         const tcg_target_ulong *rx_data_gen_ptr;
2023         size_t chunk_start;
2024         int insn = 0;
2025 
2026         if (tcg_ctx->data_gen_ptr) {
2027             rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
2028             code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
2029             data_size = gen_code_size - code_size;
2030         } else {
2031             rx_data_gen_ptr = 0;
2032             code_size = gen_code_size;
2033             data_size = 0;
2034         }
2035 
2036         /* Dump header and the first instruction */
2037         qemu_log("OUT: [size=%d]\n", gen_code_size);
2038         qemu_log("  -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
2039                  tcg_ctx->gen_insn_data[insn][0]);
2040         chunk_start = tcg_ctx->gen_insn_end_off[insn];
2041         log_disas(tb->tc.ptr, chunk_start);
2042 
2043         /*
2044          * Dump each instruction chunk, wrapping up empty chunks into
2045          * the next instruction. The whole array is offset so the
2046          * first entry is the beginning of the 2nd instruction.
2047          */
2048         while (insn < tb->icount) {
2049             size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
2050             if (chunk_end > chunk_start) {
2051                 qemu_log("  -- guest addr 0x" TARGET_FMT_lx "\n",
2052                          tcg_ctx->gen_insn_data[insn][0]);
2053                 log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start);
2054                 chunk_start = chunk_end;
2055             }
2056             insn++;
2057         }
2058 
2059         if (chunk_start < code_size) {
2060             qemu_log("  -- tb slow paths + alignment\n");
2061             log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start);
2062         }
2063 
2064         /* Finally dump any data we may have after the block */
2065         if (data_size) {
2066             int i;
2067             qemu_log("  data: [size=%d]\n", data_size);
2068             for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
2069                 qemu_log("0x%08" PRIxPTR ":  .quad  0x%" TCG_PRIlx "\n",
2070                          (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
2071             }
2072         }
2073         qemu_log("\n");
2074         qemu_log_flush();
2075         qemu_log_unlock(logfile);
2076     }
2077 #endif
2078 
2079     qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
2080         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
2081                  CODE_GEN_ALIGN));
2082 
2083     /* init jump list */
2084     qemu_spin_init(&tb->jmp_lock);
2085     tb->jmp_list_head = (uintptr_t)NULL;
2086     tb->jmp_list_next[0] = (uintptr_t)NULL;
2087     tb->jmp_list_next[1] = (uintptr_t)NULL;
2088     tb->jmp_dest[0] = (uintptr_t)NULL;
2089     tb->jmp_dest[1] = (uintptr_t)NULL;
2090 
2091     /* init original jump addresses which have been set during tcg_gen_code() */
2092     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2093         tb_reset_jump(tb, 0);
2094     }
2095     if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2096         tb_reset_jump(tb, 1);
2097     }
2098 
2099     /* check next page if needed */
2100     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
2101     phys_page2 = -1;
2102     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
2103         phys_page2 = get_page_addr_code(env, virt_page2);
2104     }
2105     /*
2106      * No explicit memory barrier is required -- tb_link_page() makes the
2107      * TB visible in a consistent state.
2108      */
2109     existing_tb = tb_link_page(tb, phys_pc, phys_page2);
2110     /* if the TB already exists, discard what we just translated */
2111     if (unlikely(existing_tb != tb)) {
2112         uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
2113 
2114         orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
2115         qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
2116         tb_destroy(tb);
2117         return existing_tb;
2118     }
2119     tcg_tb_insert(tb);
2120     return tb;
2121 }
2122 
2123 /*
2124  * @p must be non-NULL.
2125  * user-mode: call with mmap_lock held.
2126  * !user-mode: call with all @pages locked.
2127  */
2128 static void
2129 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
2130                                       PageDesc *p, tb_page_addr_t start,
2131                                       tb_page_addr_t end,
2132                                       uintptr_t retaddr)
2133 {
2134     TranslationBlock *tb;
2135     tb_page_addr_t tb_start, tb_end;
2136     int n;
2137 #ifdef TARGET_HAS_PRECISE_SMC
2138     CPUState *cpu = current_cpu;
2139     CPUArchState *env = NULL;
2140     bool current_tb_not_found = retaddr != 0;
2141     bool current_tb_modified = false;
2142     TranslationBlock *current_tb = NULL;
2143     target_ulong current_pc = 0;
2144     target_ulong current_cs_base = 0;
2145     uint32_t current_flags = 0;
2146 #endif /* TARGET_HAS_PRECISE_SMC */
2147 
2148     assert_page_locked(p);
2149 
2150 #if defined(TARGET_HAS_PRECISE_SMC)
2151     if (cpu != NULL) {
2152         env = cpu->env_ptr;
2153     }
2154 #endif
2155 
2156     /* we remove all the TBs in the range [start, end[ */
2157     /* XXX: see if in some cases it could be faster to invalidate all
2158        the code */
2159     PAGE_FOR_EACH_TB(p, tb, n) {
2160         assert_page_locked(p);
2161         /* NOTE: this is subtle as a TB may span two physical pages */
2162         if (n == 0) {
2163             /* NOTE: tb_end may be after the end of the page, but
2164                it is not a problem */
2165             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
2166             tb_end = tb_start + tb->size;
2167         } else {
2168             tb_start = tb->page_addr[1];
2169             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
2170         }
2171         if (!(tb_end <= start || tb_start >= end)) {
2172 #ifdef TARGET_HAS_PRECISE_SMC
2173             if (current_tb_not_found) {
2174                 current_tb_not_found = false;
2175                 /* now we have a real cpu fault */
2176                 current_tb = tcg_tb_lookup(retaddr);
2177             }
2178             if (current_tb == tb &&
2179                 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2180                 /*
2181                  * If we are modifying the current TB, we must stop
2182                  * its execution. We could be more precise by checking
2183                  * that the modification is after the current PC, but it
2184                  * would require a specialized function to partially
2185                  * restore the CPU state.
2186                  */
2187                 current_tb_modified = true;
2188                 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
2189                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2190                                      &current_flags);
2191             }
2192 #endif /* TARGET_HAS_PRECISE_SMC */
2193             tb_phys_invalidate__locked(tb);
2194         }
2195     }
2196 #if !defined(CONFIG_USER_ONLY)
2197     /* if no code remaining, no need to continue to use slow writes */
2198     if (!p->first_tb) {
2199         invalidate_page_bitmap(p);
2200         tlb_unprotect_code(start);
2201     }
2202 #endif
2203 #ifdef TARGET_HAS_PRECISE_SMC
2204     if (current_tb_modified) {
2205         page_collection_unlock(pages);
2206         /* Force execution of one insn next time.  */
2207         cpu->cflags_next_tb = 1 | curr_cflags();
2208         mmap_unlock();
2209         cpu_loop_exit_noexc(cpu);
2210     }
2211 #endif
2212 }
2213 
2214 /*
2215  * Invalidate all TBs which intersect with the target physical address range
2216  * [start;end[. NOTE: start and end must refer to the *same* physical page.
2217  * 'is_cpu_write_access' should be true if called from a real cpu write
2218  * access: the virtual CPU will exit the current TB if code is modified inside
2219  * this TB.
2220  *
2221  * Called with mmap_lock held for user-mode emulation
2222  */
2223 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
2224 {
2225     struct page_collection *pages;
2226     PageDesc *p;
2227 
2228     assert_memory_lock();
2229 
2230     p = page_find(start >> TARGET_PAGE_BITS);
2231     if (p == NULL) {
2232         return;
2233     }
2234     pages = page_collection_lock(start, end);
2235     tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
2236     page_collection_unlock(pages);
2237 }
2238 
2239 /*
2240  * Invalidate all TBs which intersect with the target physical address range
2241  * [start;end[. NOTE: start and end may refer to *different* physical pages.
2242  * 'is_cpu_write_access' should be true if called from a real cpu write
2243  * access: the virtual CPU will exit the current TB if code is modified inside
2244  * this TB.
2245  *
2246  * Called with mmap_lock held for user-mode emulation.
2247  */
2248 #ifdef CONFIG_SOFTMMU
2249 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
2250 #else
2251 void tb_invalidate_phys_range(target_ulong start, target_ulong end)
2252 #endif
2253 {
2254     struct page_collection *pages;
2255     tb_page_addr_t next;
2256 
2257     assert_memory_lock();
2258 
2259     pages = page_collection_lock(start, end);
2260     for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2261          start < end;
2262          start = next, next += TARGET_PAGE_SIZE) {
2263         PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
2264         tb_page_addr_t bound = MIN(next, end);
2265 
2266         if (pd == NULL) {
2267             continue;
2268         }
2269         tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
2270     }
2271     page_collection_unlock(pages);
2272 }
2273 
2274 #ifdef CONFIG_SOFTMMU
2275 /* len must be <= 8 and start must be a multiple of len.
2276  * Called via softmmu_template.h when code areas are written to with
2277  * iothread mutex not held.
2278  *
2279  * Call with all @pages in the range [@start, @start + len[ locked.
2280  */
2281 void tb_invalidate_phys_page_fast(struct page_collection *pages,
2282                                   tb_page_addr_t start, int len,
2283                                   uintptr_t retaddr)
2284 {
2285     PageDesc *p;
2286 
2287     assert_memory_lock();
2288 
2289     p = page_find(start >> TARGET_PAGE_BITS);
2290     if (!p) {
2291         return;
2292     }
2293 
2294     assert_page_locked(p);
2295     if (!p->code_bitmap &&
2296         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2297         build_page_bitmap(p);
2298     }
2299     if (p->code_bitmap) {
2300         unsigned int nr;
2301         unsigned long b;
2302 
2303         nr = start & ~TARGET_PAGE_MASK;
2304         b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2305         if (b & ((1 << len) - 1)) {
2306             goto do_invalidate;
2307         }
2308     } else {
2309     do_invalidate:
2310         tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2311                                               retaddr);
2312     }
2313 }
2314 #else
2315 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2316  * host PC of the faulting store instruction that caused this invalidate.
2317  * Returns true if the caller needs to abort execution of the current
2318  * TB (because it was modified by this store and the guest CPU has
2319  * precise-SMC semantics).
2320  */
2321 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2322 {
2323     TranslationBlock *tb;
2324     PageDesc *p;
2325     int n;
2326 #ifdef TARGET_HAS_PRECISE_SMC
2327     TranslationBlock *current_tb = NULL;
2328     CPUState *cpu = current_cpu;
2329     CPUArchState *env = NULL;
2330     int current_tb_modified = 0;
2331     target_ulong current_pc = 0;
2332     target_ulong current_cs_base = 0;
2333     uint32_t current_flags = 0;
2334 #endif
2335 
2336     assert_memory_lock();
2337 
2338     addr &= TARGET_PAGE_MASK;
2339     p = page_find(addr >> TARGET_PAGE_BITS);
2340     if (!p) {
2341         return false;
2342     }
2343 
2344 #ifdef TARGET_HAS_PRECISE_SMC
2345     if (p->first_tb && pc != 0) {
2346         current_tb = tcg_tb_lookup(pc);
2347     }
2348     if (cpu != NULL) {
2349         env = cpu->env_ptr;
2350     }
2351 #endif
2352     assert_page_locked(p);
2353     PAGE_FOR_EACH_TB(p, tb, n) {
2354 #ifdef TARGET_HAS_PRECISE_SMC
2355         if (current_tb == tb &&
2356             (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2357                 /* If we are modifying the current TB, we must stop
2358                    its execution. We could be more precise by checking
2359                    that the modification is after the current PC, but it
2360                    would require a specialized function to partially
2361                    restore the CPU state */
2362 
2363             current_tb_modified = 1;
2364             cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2365             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2366                                  &current_flags);
2367         }
2368 #endif /* TARGET_HAS_PRECISE_SMC */
2369         tb_phys_invalidate(tb, addr);
2370     }
2371     p->first_tb = (uintptr_t)NULL;
2372 #ifdef TARGET_HAS_PRECISE_SMC
2373     if (current_tb_modified) {
2374         /* Force execution of one insn next time.  */
2375         cpu->cflags_next_tb = 1 | curr_cflags();
2376         return true;
2377     }
2378 #endif
2379 
2380     return false;
2381 }
2382 #endif
2383 
2384 /* user-mode: call with mmap_lock held */
2385 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
2386 {
2387     TranslationBlock *tb;
2388 
2389     assert_memory_lock();
2390 
2391     tb = tcg_tb_lookup(retaddr);
2392     if (tb) {
2393         /* We can use retranslation to find the PC.  */
2394         cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2395         tb_phys_invalidate(tb, -1);
2396     } else {
2397         /* The exception probably happened in a helper.  The CPU state should
2398            have been saved before calling it. Fetch the PC from there.  */
2399         CPUArchState *env = cpu->env_ptr;
2400         target_ulong pc, cs_base;
2401         tb_page_addr_t addr;
2402         uint32_t flags;
2403 
2404         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2405         addr = get_page_addr_code(env, pc);
2406         if (addr != -1) {
2407             tb_invalidate_phys_range(addr, addr + 1);
2408         }
2409     }
2410 }
2411 
2412 #ifndef CONFIG_USER_ONLY
2413 /* in deterministic execution mode, instructions doing device I/Os
2414  * must be at the end of the TB.
2415  *
2416  * Called by softmmu_template.h, with iothread mutex not held.
2417  */
2418 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2419 {
2420 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2421     CPUArchState *env = cpu->env_ptr;
2422 #endif
2423     TranslationBlock *tb;
2424     uint32_t n;
2425 
2426     tb = tcg_tb_lookup(retaddr);
2427     if (!tb) {
2428         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2429                   (void *)retaddr);
2430     }
2431     cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2432 
2433     /* On MIPS and SH, delay slot instructions can only be restarted if
2434        they were already the first instruction in the TB.  If this is not
2435        the first instruction in a TB then re-execute the preceding
2436        branch.  */
2437     n = 1;
2438 #if defined(TARGET_MIPS)
2439     if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2440         && env->active_tc.PC != tb->pc) {
2441         env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2442         cpu_neg(cpu)->icount_decr.u16.low++;
2443         env->hflags &= ~MIPS_HFLAG_BMASK;
2444         n = 2;
2445     }
2446 #elif defined(TARGET_SH4)
2447     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2448         && env->pc != tb->pc) {
2449         env->pc -= 2;
2450         cpu_neg(cpu)->icount_decr.u16.low++;
2451         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2452         n = 2;
2453     }
2454 #endif
2455 
2456     /* Generate a new TB executing the I/O insn.  */
2457     cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2458 
2459     if (tb_cflags(tb) & CF_NOCACHE) {
2460         if (tb->orig_tb) {
2461             /* Invalidate original TB if this TB was generated in
2462              * cpu_exec_nocache() */
2463             tb_phys_invalidate(tb->orig_tb, -1);
2464         }
2465         tcg_tb_remove(tb);
2466         tb_destroy(tb);
2467     }
2468 
2469     qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
2470                            "cpu_io_recompile: rewound execution of TB to "
2471                            TARGET_FMT_lx "\n", tb->pc);
2472 
2473     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2474      * the first in the TB) then we end up generating a whole new TB and
2475      *  repeating the fault, which is horribly inefficient.
2476      *  Better would be to execute just this insn uncached, or generate a
2477      *  second new TB.
2478      */
2479     cpu_loop_exit_noexc(cpu);
2480 }
2481 
2482 static void print_qht_statistics(struct qht_stats hst)
2483 {
2484     uint32_t hgram_opts;
2485     size_t hgram_bins;
2486     char *hgram;
2487 
2488     if (!hst.head_buckets) {
2489         return;
2490     }
2491     qemu_printf("TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2492                 hst.used_head_buckets, hst.head_buckets,
2493                 (double)hst.used_head_buckets / hst.head_buckets * 100);
2494 
2495     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2496     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2497     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2498         hgram_opts |= QDIST_PR_NODECIMAL;
2499     }
2500     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2501     qemu_printf("TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2502                 qdist_avg(&hst.occupancy) * 100, hgram);
2503     g_free(hgram);
2504 
2505     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2506     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2507     if (hgram_bins > 10) {
2508         hgram_bins = 10;
2509     } else {
2510         hgram_bins = 0;
2511         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2512     }
2513     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2514     qemu_printf("TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2515                 qdist_avg(&hst.chain), hgram);
2516     g_free(hgram);
2517 }
2518 
2519 struct tb_tree_stats {
2520     size_t nb_tbs;
2521     size_t host_size;
2522     size_t target_size;
2523     size_t max_target_size;
2524     size_t direct_jmp_count;
2525     size_t direct_jmp2_count;
2526     size_t cross_page;
2527 };
2528 
2529 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2530 {
2531     const TranslationBlock *tb = value;
2532     struct tb_tree_stats *tst = data;
2533 
2534     tst->nb_tbs++;
2535     tst->host_size += tb->tc.size;
2536     tst->target_size += tb->size;
2537     if (tb->size > tst->max_target_size) {
2538         tst->max_target_size = tb->size;
2539     }
2540     if (tb->page_addr[1] != -1) {
2541         tst->cross_page++;
2542     }
2543     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2544         tst->direct_jmp_count++;
2545         if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2546             tst->direct_jmp2_count++;
2547         }
2548     }
2549     return false;
2550 }
2551 
2552 void dump_exec_info(void)
2553 {
2554     struct tb_tree_stats tst = {};
2555     struct qht_stats hst;
2556     size_t nb_tbs, flush_full, flush_part, flush_elide;
2557 
2558     tcg_tb_foreach(tb_tree_stats_iter, &tst);
2559     nb_tbs = tst.nb_tbs;
2560     /* XXX: avoid using doubles ? */
2561     qemu_printf("Translation buffer state:\n");
2562     /*
2563      * Report total code size including the padding and TB structs;
2564      * otherwise users might think "-accel tcg,tb-size" is not honoured.
2565      * For avg host size we use the precise numbers from tb_tree_stats though.
2566      */
2567     qemu_printf("gen code size       %zu/%zu\n",
2568                 tcg_code_size(), tcg_code_capacity());
2569     qemu_printf("TB count            %zu\n", nb_tbs);
2570     qemu_printf("TB avg target size  %zu max=%zu bytes\n",
2571                 nb_tbs ? tst.target_size / nb_tbs : 0,
2572                 tst.max_target_size);
2573     qemu_printf("TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2574                 nb_tbs ? tst.host_size / nb_tbs : 0,
2575                 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2576     qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2577                 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2578     qemu_printf("direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2579                 tst.direct_jmp_count,
2580                 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2581                 tst.direct_jmp2_count,
2582                 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2583 
2584     qht_statistics_init(&tb_ctx.htable, &hst);
2585     print_qht_statistics(hst);
2586     qht_statistics_destroy(&hst);
2587 
2588     qemu_printf("\nStatistics:\n");
2589     qemu_printf("TB flush count      %u\n",
2590                 qatomic_read(&tb_ctx.tb_flush_count));
2591     qemu_printf("TB invalidate count %zu\n",
2592                 tcg_tb_phys_invalidate_count());
2593 
2594     tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2595     qemu_printf("TLB full flushes    %zu\n", flush_full);
2596     qemu_printf("TLB partial flushes %zu\n", flush_part);
2597     qemu_printf("TLB elided flushes  %zu\n", flush_elide);
2598     tcg_dump_info();
2599 }
2600 
2601 void dump_opcount_info(void)
2602 {
2603     tcg_dump_op_count();
2604 }
2605 
2606 #else /* CONFIG_USER_ONLY */
2607 
2608 void cpu_interrupt(CPUState *cpu, int mask)
2609 {
2610     g_assert(qemu_mutex_iothread_locked());
2611     cpu->interrupt_request |= mask;
2612     qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2613 }
2614 
2615 /*
2616  * Walks guest process memory "regions" one by one
2617  * and calls callback function 'fn' for each region.
2618  */
2619 struct walk_memory_regions_data {
2620     walk_memory_regions_fn fn;
2621     void *priv;
2622     target_ulong start;
2623     int prot;
2624 };
2625 
2626 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2627                                    target_ulong end, int new_prot)
2628 {
2629     if (data->start != -1u) {
2630         int rc = data->fn(data->priv, data->start, end, data->prot);
2631         if (rc != 0) {
2632             return rc;
2633         }
2634     }
2635 
2636     data->start = (new_prot ? end : -1u);
2637     data->prot = new_prot;
2638 
2639     return 0;
2640 }
2641 
2642 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2643                                  target_ulong base, int level, void **lp)
2644 {
2645     target_ulong pa;
2646     int i, rc;
2647 
2648     if (*lp == NULL) {
2649         return walk_memory_regions_end(data, base, 0);
2650     }
2651 
2652     if (level == 0) {
2653         PageDesc *pd = *lp;
2654 
2655         for (i = 0; i < V_L2_SIZE; ++i) {
2656             int prot = pd[i].flags;
2657 
2658             pa = base | (i << TARGET_PAGE_BITS);
2659             if (prot != data->prot) {
2660                 rc = walk_memory_regions_end(data, pa, prot);
2661                 if (rc != 0) {
2662                     return rc;
2663                 }
2664             }
2665         }
2666     } else {
2667         void **pp = *lp;
2668 
2669         for (i = 0; i < V_L2_SIZE; ++i) {
2670             pa = base | ((target_ulong)i <<
2671                 (TARGET_PAGE_BITS + V_L2_BITS * level));
2672             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2673             if (rc != 0) {
2674                 return rc;
2675             }
2676         }
2677     }
2678 
2679     return 0;
2680 }
2681 
2682 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2683 {
2684     struct walk_memory_regions_data data;
2685     uintptr_t i, l1_sz = v_l1_size;
2686 
2687     data.fn = fn;
2688     data.priv = priv;
2689     data.start = -1u;
2690     data.prot = 0;
2691 
2692     for (i = 0; i < l1_sz; i++) {
2693         target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2694         int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2695         if (rc != 0) {
2696             return rc;
2697         }
2698     }
2699 
2700     return walk_memory_regions_end(&data, 0, 0);
2701 }
2702 
2703 static int dump_region(void *priv, target_ulong start,
2704     target_ulong end, unsigned long prot)
2705 {
2706     FILE *f = (FILE *)priv;
2707 
2708     (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2709         " "TARGET_FMT_lx" %c%c%c\n",
2710         start, end, end - start,
2711         ((prot & PAGE_READ) ? 'r' : '-'),
2712         ((prot & PAGE_WRITE) ? 'w' : '-'),
2713         ((prot & PAGE_EXEC) ? 'x' : '-'));
2714 
2715     return 0;
2716 }
2717 
2718 /* dump memory mappings */
2719 void page_dump(FILE *f)
2720 {
2721     const int length = sizeof(target_ulong) * 2;
2722     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2723             length, "start", length, "end", length, "size", "prot");
2724     walk_memory_regions(f, dump_region);
2725 }
2726 
2727 int page_get_flags(target_ulong address)
2728 {
2729     PageDesc *p;
2730 
2731     p = page_find(address >> TARGET_PAGE_BITS);
2732     if (!p) {
2733         return 0;
2734     }
2735     return p->flags;
2736 }
2737 
2738 /* Modify the flags of a page and invalidate the code if necessary.
2739    The flag PAGE_WRITE_ORG is positioned automatically depending
2740    on PAGE_WRITE.  The mmap_lock should already be held.  */
2741 void page_set_flags(target_ulong start, target_ulong end, int flags)
2742 {
2743     target_ulong addr, len;
2744     bool reset_target_data;
2745 
2746     /* This function should never be called with addresses outside the
2747        guest address space.  If this assert fires, it probably indicates
2748        a missing call to h2g_valid.  */
2749     assert(end - 1 <= GUEST_ADDR_MAX);
2750     assert(start < end);
2751     assert_memory_lock();
2752 
2753     start = start & TARGET_PAGE_MASK;
2754     end = TARGET_PAGE_ALIGN(end);
2755 
2756     if (flags & PAGE_WRITE) {
2757         flags |= PAGE_WRITE_ORG;
2758     }
2759     reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
2760     flags &= ~PAGE_RESET;
2761 
2762     for (addr = start, len = end - start;
2763          len != 0;
2764          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2765         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2766 
2767         /* If the write protection bit is set, then we invalidate
2768            the code inside.  */
2769         if (!(p->flags & PAGE_WRITE) &&
2770             (flags & PAGE_WRITE) &&
2771             p->first_tb) {
2772             tb_invalidate_phys_page(addr, 0);
2773         }
2774         if (reset_target_data && p->target_data) {
2775             g_free(p->target_data);
2776             p->target_data = NULL;
2777         }
2778         p->flags = flags;
2779     }
2780 }
2781 
2782 void *page_get_target_data(target_ulong address)
2783 {
2784     PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2785     return p ? p->target_data : NULL;
2786 }
2787 
2788 void *page_alloc_target_data(target_ulong address, size_t size)
2789 {
2790     PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2791     void *ret = NULL;
2792 
2793     if (p->flags & PAGE_VALID) {
2794         ret = p->target_data;
2795         if (!ret) {
2796             p->target_data = ret = g_malloc0(size);
2797         }
2798     }
2799     return ret;
2800 }
2801 
2802 int page_check_range(target_ulong start, target_ulong len, int flags)
2803 {
2804     PageDesc *p;
2805     target_ulong end;
2806     target_ulong addr;
2807 
2808     /* This function should never be called with addresses outside the
2809        guest address space.  If this assert fires, it probably indicates
2810        a missing call to h2g_valid.  */
2811     if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) {
2812         assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2813     }
2814 
2815     if (len == 0) {
2816         return 0;
2817     }
2818     if (start + len - 1 < start) {
2819         /* We've wrapped around.  */
2820         return -1;
2821     }
2822 
2823     /* must do before we loose bits in the next step */
2824     end = TARGET_PAGE_ALIGN(start + len);
2825     start = start & TARGET_PAGE_MASK;
2826 
2827     for (addr = start, len = end - start;
2828          len != 0;
2829          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2830         p = page_find(addr >> TARGET_PAGE_BITS);
2831         if (!p) {
2832             return -1;
2833         }
2834         if (!(p->flags & PAGE_VALID)) {
2835             return -1;
2836         }
2837 
2838         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2839             return -1;
2840         }
2841         if (flags & PAGE_WRITE) {
2842             if (!(p->flags & PAGE_WRITE_ORG)) {
2843                 return -1;
2844             }
2845             /* unprotect the page if it was put read-only because it
2846                contains translated code */
2847             if (!(p->flags & PAGE_WRITE)) {
2848                 if (!page_unprotect(addr, 0)) {
2849                     return -1;
2850                 }
2851             }
2852         }
2853     }
2854     return 0;
2855 }
2856 
2857 /* called from signal handler: invalidate the code and unprotect the
2858  * page. Return 0 if the fault was not handled, 1 if it was handled,
2859  * and 2 if it was handled but the caller must cause the TB to be
2860  * immediately exited. (We can only return 2 if the 'pc' argument is
2861  * non-zero.)
2862  */
2863 int page_unprotect(target_ulong address, uintptr_t pc)
2864 {
2865     unsigned int prot;
2866     bool current_tb_invalidated;
2867     PageDesc *p;
2868     target_ulong host_start, host_end, addr;
2869 
2870     /* Technically this isn't safe inside a signal handler.  However we
2871        know this only ever happens in a synchronous SEGV handler, so in
2872        practice it seems to be ok.  */
2873     mmap_lock();
2874 
2875     p = page_find(address >> TARGET_PAGE_BITS);
2876     if (!p) {
2877         mmap_unlock();
2878         return 0;
2879     }
2880 
2881     /* if the page was really writable, then we change its
2882        protection back to writable */
2883     if (p->flags & PAGE_WRITE_ORG) {
2884         current_tb_invalidated = false;
2885         if (p->flags & PAGE_WRITE) {
2886             /* If the page is actually marked WRITE then assume this is because
2887              * this thread raced with another one which got here first and
2888              * set the page to PAGE_WRITE and did the TB invalidate for us.
2889              */
2890 #ifdef TARGET_HAS_PRECISE_SMC
2891             TranslationBlock *current_tb = tcg_tb_lookup(pc);
2892             if (current_tb) {
2893                 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2894             }
2895 #endif
2896         } else {
2897             host_start = address & qemu_host_page_mask;
2898             host_end = host_start + qemu_host_page_size;
2899 
2900             prot = 0;
2901             for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2902                 p = page_find(addr >> TARGET_PAGE_BITS);
2903                 p->flags |= PAGE_WRITE;
2904                 prot |= p->flags;
2905 
2906                 /* and since the content will be modified, we must invalidate
2907                    the corresponding translated code. */
2908                 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2909 #ifdef CONFIG_USER_ONLY
2910                 if (DEBUG_TB_CHECK_GATE) {
2911                     tb_invalidate_check(addr);
2912                 }
2913 #endif
2914             }
2915             mprotect((void *)g2h(host_start), qemu_host_page_size,
2916                      prot & PAGE_BITS);
2917         }
2918         mmap_unlock();
2919         /* If current TB was invalidated return to main loop */
2920         return current_tb_invalidated ? 2 : 1;
2921     }
2922     mmap_unlock();
2923     return 0;
2924 }
2925 #endif /* CONFIG_USER_ONLY */
2926 
2927 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2928 void tcg_flush_softmmu_tlb(CPUState *cs)
2929 {
2930 #ifdef CONFIG_SOFTMMU
2931     tlb_flush(cs);
2932 #endif
2933 }
2934