xref: /openbmc/qemu/accel/tcg/translate-all.c (revision 89854803)
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifdef _WIN32
20 #include <windows.h>
21 #endif
22 #include "qemu/osdep.h"
23 
24 
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
27 #include "cpu.h"
28 #include "trace.h"
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
31 #include "tcg.h"
32 #if defined(CONFIG_USER_ONLY)
33 #include "qemu.h"
34 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
35 #include <sys/param.h>
36 #if __FreeBSD_version >= 700104
37 #define HAVE_KINFO_GETVMMAP
38 #define sigqueue sigqueue_freebsd  /* avoid redefinition */
39 #include <sys/proc.h>
40 #include <machine/profile.h>
41 #define _KERNEL
42 #include <sys/user.h>
43 #undef _KERNEL
44 #undef sigqueue
45 #include <libutil.h>
46 #endif
47 #endif
48 #else
49 #include "exec/address-spaces.h"
50 #endif
51 
52 #include "exec/cputlb.h"
53 #include "exec/tb-hash.h"
54 #include "translate-all.h"
55 #include "qemu/bitmap.h"
56 #include "qemu/error-report.h"
57 #include "qemu/timer.h"
58 #include "qemu/main-loop.h"
59 #include "exec/log.h"
60 #include "sysemu/cpus.h"
61 
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
66 
67 #ifdef DEBUG_TB_INVALIDATE
68 #define DEBUG_TB_INVALIDATE_GATE 1
69 #else
70 #define DEBUG_TB_INVALIDATE_GATE 0
71 #endif
72 
73 #ifdef DEBUG_TB_FLUSH
74 #define DEBUG_TB_FLUSH_GATE 1
75 #else
76 #define DEBUG_TB_FLUSH_GATE 0
77 #endif
78 
79 #if !defined(CONFIG_USER_ONLY)
80 /* TB consistency checks only implemented for usermode emulation.  */
81 #undef DEBUG_TB_CHECK
82 #endif
83 
84 #ifdef DEBUG_TB_CHECK
85 #define DEBUG_TB_CHECK_GATE 1
86 #else
87 #define DEBUG_TB_CHECK_GATE 0
88 #endif
89 
90 /* Access to the various translations structures need to be serialised via locks
91  * for consistency. This is automatic for SoftMMU based system
92  * emulation due to its single threaded nature. In user-mode emulation
93  * access to the memory related structures are protected with the
94  * mmap_lock.
95  */
96 #ifdef CONFIG_SOFTMMU
97 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
98 #else
99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
100 #endif
101 
102 #define SMC_BITMAP_USE_THRESHOLD 10
103 
104 typedef struct PageDesc {
105     /* list of TBs intersecting this ram page */
106     TranslationBlock *first_tb;
107 #ifdef CONFIG_SOFTMMU
108     /* in order to optimize self modifying code, we count the number
109        of lookups we do to a given page to use a bitmap */
110     unsigned int code_write_count;
111     unsigned long *code_bitmap;
112 #else
113     unsigned long flags;
114 #endif
115 } PageDesc;
116 
117 /* In system mode we want L1_MAP to be based on ram offsets,
118    while in user mode we want it to be based on virtual addresses.  */
119 #if !defined(CONFIG_USER_ONLY)
120 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
121 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
122 #else
123 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
124 #endif
125 #else
126 # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
127 #endif
128 
129 /* Size of the L2 (and L3, etc) page tables.  */
130 #define V_L2_BITS 10
131 #define V_L2_SIZE (1 << V_L2_BITS)
132 
133 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
134 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
135                   sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
136                   * BITS_PER_BYTE);
137 
138 /*
139  * L1 Mapping properties
140  */
141 static int v_l1_size;
142 static int v_l1_shift;
143 static int v_l2_levels;
144 
145 /* The bottom level has pointers to PageDesc, and is indexed by
146  * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
147  */
148 #define V_L1_MIN_BITS 4
149 #define V_L1_MAX_BITS (V_L2_BITS + 3)
150 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
151 
152 static void *l1_map[V_L1_MAX_SIZE];
153 
154 /* code generation context */
155 TCGContext tcg_init_ctx;
156 __thread TCGContext *tcg_ctx;
157 TBContext tb_ctx;
158 bool parallel_cpus;
159 
160 /* translation block context */
161 static __thread int have_tb_lock;
162 
163 static void page_table_config_init(void)
164 {
165     uint32_t v_l1_bits;
166 
167     assert(TARGET_PAGE_BITS);
168     /* The bits remaining after N lower levels of page tables.  */
169     v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
170     if (v_l1_bits < V_L1_MIN_BITS) {
171         v_l1_bits += V_L2_BITS;
172     }
173 
174     v_l1_size = 1 << v_l1_bits;
175     v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
176     v_l2_levels = v_l1_shift / V_L2_BITS - 1;
177 
178     assert(v_l1_bits <= V_L1_MAX_BITS);
179     assert(v_l1_shift % V_L2_BITS == 0);
180     assert(v_l2_levels >= 0);
181 }
182 
183 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
184 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
185 
186 void tb_lock(void)
187 {
188     assert_tb_unlocked();
189     qemu_mutex_lock(&tb_ctx.tb_lock);
190     have_tb_lock++;
191 }
192 
193 void tb_unlock(void)
194 {
195     assert_tb_locked();
196     have_tb_lock--;
197     qemu_mutex_unlock(&tb_ctx.tb_lock);
198 }
199 
200 void tb_lock_reset(void)
201 {
202     if (have_tb_lock) {
203         qemu_mutex_unlock(&tb_ctx.tb_lock);
204         have_tb_lock = 0;
205     }
206 }
207 
208 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
209 
210 void cpu_gen_init(void)
211 {
212     tcg_context_init(&tcg_init_ctx);
213 }
214 
215 /* Encode VAL as a signed leb128 sequence at P.
216    Return P incremented past the encoded value.  */
217 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
218 {
219     int more, byte;
220 
221     do {
222         byte = val & 0x7f;
223         val >>= 7;
224         more = !((val == 0 && (byte & 0x40) == 0)
225                  || (val == -1 && (byte & 0x40) != 0));
226         if (more) {
227             byte |= 0x80;
228         }
229         *p++ = byte;
230     } while (more);
231 
232     return p;
233 }
234 
235 /* Decode a signed leb128 sequence at *PP; increment *PP past the
236    decoded value.  Return the decoded value.  */
237 static target_long decode_sleb128(uint8_t **pp)
238 {
239     uint8_t *p = *pp;
240     target_long val = 0;
241     int byte, shift = 0;
242 
243     do {
244         byte = *p++;
245         val |= (target_ulong)(byte & 0x7f) << shift;
246         shift += 7;
247     } while (byte & 0x80);
248     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
249         val |= -(target_ulong)1 << shift;
250     }
251 
252     *pp = p;
253     return val;
254 }
255 
256 /* Encode the data collected about the instructions while compiling TB.
257    Place the data at BLOCK, and return the number of bytes consumed.
258 
259    The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
260    which come from the target's insn_start data, followed by a uintptr_t
261    which comes from the host pc of the end of the code implementing the insn.
262 
263    Each line of the table is encoded as sleb128 deltas from the previous
264    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
265    That is, the first column is seeded with the guest pc, the last column
266    with the host pc, and the middle columns with zeros.  */
267 
268 static int encode_search(TranslationBlock *tb, uint8_t *block)
269 {
270     uint8_t *highwater = tcg_ctx->code_gen_highwater;
271     uint8_t *p = block;
272     int i, j, n;
273 
274     for (i = 0, n = tb->icount; i < n; ++i) {
275         target_ulong prev;
276 
277         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
278             if (i == 0) {
279                 prev = (j == 0 ? tb->pc : 0);
280             } else {
281                 prev = tcg_ctx->gen_insn_data[i - 1][j];
282             }
283             p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
284         }
285         prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
286         p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
287 
288         /* Test for (pending) buffer overflow.  The assumption is that any
289            one row beginning below the high water mark cannot overrun
290            the buffer completely.  Thus we can test for overflow after
291            encoding a row without having to check during encoding.  */
292         if (unlikely(p > highwater)) {
293             return -1;
294         }
295     }
296 
297     return p - block;
298 }
299 
300 /* The cpu state corresponding to 'searched_pc' is restored.
301  * Called with tb_lock held.
302  * When reset_icount is true, current TB will be interrupted and
303  * icount should be recalculated.
304  */
305 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
306                                      uintptr_t searched_pc, bool reset_icount)
307 {
308     target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
309     uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
310     CPUArchState *env = cpu->env_ptr;
311     uint8_t *p = tb->tc.ptr + tb->tc.size;
312     int i, j, num_insns = tb->icount;
313 #ifdef CONFIG_PROFILER
314     TCGProfile *prof = &tcg_ctx->prof;
315     int64_t ti = profile_getclock();
316 #endif
317 
318     searched_pc -= GETPC_ADJ;
319 
320     if (searched_pc < host_pc) {
321         return -1;
322     }
323 
324     /* Reconstruct the stored insn data while looking for the point at
325        which the end of the insn exceeds the searched_pc.  */
326     for (i = 0; i < num_insns; ++i) {
327         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
328             data[j] += decode_sleb128(&p);
329         }
330         host_pc += decode_sleb128(&p);
331         if (host_pc > searched_pc) {
332             goto found;
333         }
334     }
335     return -1;
336 
337  found:
338     if (reset_icount && (tb->cflags & CF_USE_ICOUNT)) {
339         assert(use_icount);
340         /* Reset the cycle counter to the start of the block
341            and shift if to the number of actually executed instructions */
342         cpu->icount_decr.u16.low += num_insns - i;
343     }
344     restore_state_to_opc(env, tb, data);
345 
346 #ifdef CONFIG_PROFILER
347     atomic_set(&prof->restore_time,
348                 prof->restore_time + profile_getclock() - ti);
349     atomic_set(&prof->restore_count, prof->restore_count + 1);
350 #endif
351     return 0;
352 }
353 
354 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
355 {
356     TranslationBlock *tb;
357     bool r = false;
358     uintptr_t check_offset;
359 
360     /* The host_pc has to be in the region of current code buffer. If
361      * it is not we will not be able to resolve it here. The two cases
362      * where host_pc will not be correct are:
363      *
364      *  - fault during translation (instruction fetch)
365      *  - fault from helper (not using GETPC() macro)
366      *
367      * Either way we need return early to avoid blowing up on a
368      * recursive tb_lock() as we can't resolve it here.
369      *
370      * We are using unsigned arithmetic so if host_pc <
371      * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
372      * above the code_gen_buffer_size
373      */
374     check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
375 
376     if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
377         tb_lock();
378         tb = tb_find_pc(host_pc);
379         if (tb) {
380             cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
381             if (tb->cflags & CF_NOCACHE) {
382                 /* one-shot translation, invalidate it immediately */
383                 tb_phys_invalidate(tb, -1);
384                 tb_remove(tb);
385             }
386             r = true;
387         }
388         tb_unlock();
389     }
390 
391     return r;
392 }
393 
394 static void page_init(void)
395 {
396     page_size_init();
397     page_table_config_init();
398 
399 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
400     {
401 #ifdef HAVE_KINFO_GETVMMAP
402         struct kinfo_vmentry *freep;
403         int i, cnt;
404 
405         freep = kinfo_getvmmap(getpid(), &cnt);
406         if (freep) {
407             mmap_lock();
408             for (i = 0; i < cnt; i++) {
409                 unsigned long startaddr, endaddr;
410 
411                 startaddr = freep[i].kve_start;
412                 endaddr = freep[i].kve_end;
413                 if (h2g_valid(startaddr)) {
414                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
415 
416                     if (h2g_valid(endaddr)) {
417                         endaddr = h2g(endaddr);
418                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
419                     } else {
420 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
421                         endaddr = ~0ul;
422                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
423 #endif
424                     }
425                 }
426             }
427             free(freep);
428             mmap_unlock();
429         }
430 #else
431         FILE *f;
432 
433         last_brk = (unsigned long)sbrk(0);
434 
435         f = fopen("/compat/linux/proc/self/maps", "r");
436         if (f) {
437             mmap_lock();
438 
439             do {
440                 unsigned long startaddr, endaddr;
441                 int n;
442 
443                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
444 
445                 if (n == 2 && h2g_valid(startaddr)) {
446                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
447 
448                     if (h2g_valid(endaddr)) {
449                         endaddr = h2g(endaddr);
450                     } else {
451                         endaddr = ~0ul;
452                     }
453                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
454                 }
455             } while (!feof(f));
456 
457             fclose(f);
458             mmap_unlock();
459         }
460 #endif
461     }
462 #endif
463 }
464 
465 /* If alloc=1:
466  * Called with tb_lock held for system emulation.
467  * Called with mmap_lock held for user-mode emulation.
468  */
469 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
470 {
471     PageDesc *pd;
472     void **lp;
473     int i;
474 
475     if (alloc) {
476         assert_memory_lock();
477     }
478 
479     /* Level 1.  Always allocated.  */
480     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
481 
482     /* Level 2..N-1.  */
483     for (i = v_l2_levels; i > 0; i--) {
484         void **p = atomic_rcu_read(lp);
485 
486         if (p == NULL) {
487             if (!alloc) {
488                 return NULL;
489             }
490             p = g_new0(void *, V_L2_SIZE);
491             atomic_rcu_set(lp, p);
492         }
493 
494         lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
495     }
496 
497     pd = atomic_rcu_read(lp);
498     if (pd == NULL) {
499         if (!alloc) {
500             return NULL;
501         }
502         pd = g_new0(PageDesc, V_L2_SIZE);
503         atomic_rcu_set(lp, pd);
504     }
505 
506     return pd + (index & (V_L2_SIZE - 1));
507 }
508 
509 static inline PageDesc *page_find(tb_page_addr_t index)
510 {
511     return page_find_alloc(index, 0);
512 }
513 
514 #if defined(CONFIG_USER_ONLY)
515 /* Currently it is not recommended to allocate big chunks of data in
516    user mode. It will change when a dedicated libc will be used.  */
517 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
518    region in which the guest needs to run.  Revisit this.  */
519 #define USE_STATIC_CODE_GEN_BUFFER
520 #endif
521 
522 /* Minimum size of the code gen buffer.  This number is randomly chosen,
523    but not so small that we can't have a fair number of TB's live.  */
524 #define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
525 
526 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
527    indicated, this is constrained by the range of direct branches on the
528    host cpu, as used by the TCG implementation of goto_tb.  */
529 #if defined(__x86_64__)
530 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
531 #elif defined(__sparc__)
532 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
533 #elif defined(__powerpc64__)
534 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
535 #elif defined(__powerpc__)
536 # define MAX_CODE_GEN_BUFFER_SIZE  (32u * 1024 * 1024)
537 #elif defined(__aarch64__)
538 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
539 #elif defined(__s390x__)
540   /* We have a +- 4GB range on the branches; leave some slop.  */
541 # define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
542 #elif defined(__mips__)
543   /* We have a 256MB branch region, but leave room to make sure the
544      main executable is also within that region.  */
545 # define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
546 #else
547 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
548 #endif
549 
550 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
551 
552 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
553   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
554    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
555 
556 static inline size_t size_code_gen_buffer(size_t tb_size)
557 {
558     /* Size the buffer.  */
559     if (tb_size == 0) {
560 #ifdef USE_STATIC_CODE_GEN_BUFFER
561         tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
562 #else
563         /* ??? Needs adjustments.  */
564         /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
565            static buffer, we could size this on RESERVED_VA, on the text
566            segment size of the executable, or continue to use the default.  */
567         tb_size = (unsigned long)(ram_size / 4);
568 #endif
569     }
570     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
571         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
572     }
573     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
574         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
575     }
576     return tb_size;
577 }
578 
579 #ifdef __mips__
580 /* In order to use J and JAL within the code_gen_buffer, we require
581    that the buffer not cross a 256MB boundary.  */
582 static inline bool cross_256mb(void *addr, size_t size)
583 {
584     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
585 }
586 
587 /* We weren't able to allocate a buffer without crossing that boundary,
588    so make do with the larger portion of the buffer that doesn't cross.
589    Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
590 static inline void *split_cross_256mb(void *buf1, size_t size1)
591 {
592     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
593     size_t size2 = buf1 + size1 - buf2;
594 
595     size1 = buf2 - buf1;
596     if (size1 < size2) {
597         size1 = size2;
598         buf1 = buf2;
599     }
600 
601     tcg_ctx->code_gen_buffer_size = size1;
602     return buf1;
603 }
604 #endif
605 
606 #ifdef USE_STATIC_CODE_GEN_BUFFER
607 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
608     __attribute__((aligned(CODE_GEN_ALIGN)));
609 
610 static inline void *alloc_code_gen_buffer(void)
611 {
612     void *buf = static_code_gen_buffer;
613     void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
614     size_t size;
615 
616     /* page-align the beginning and end of the buffer */
617     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
618     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
619 
620     size = end - buf;
621 
622     /* Honor a command-line option limiting the size of the buffer.  */
623     if (size > tcg_ctx->code_gen_buffer_size) {
624         size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
625                                qemu_real_host_page_size);
626     }
627     tcg_ctx->code_gen_buffer_size = size;
628 
629 #ifdef __mips__
630     if (cross_256mb(buf, size)) {
631         buf = split_cross_256mb(buf, size);
632         size = tcg_ctx->code_gen_buffer_size;
633     }
634 #endif
635 
636     if (qemu_mprotect_rwx(buf, size)) {
637         abort();
638     }
639     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
640 
641     return buf;
642 }
643 #elif defined(_WIN32)
644 static inline void *alloc_code_gen_buffer(void)
645 {
646     size_t size = tcg_ctx->code_gen_buffer_size;
647     void *buf;
648 
649     buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
650                         PAGE_EXECUTE_READWRITE);
651     return buf;
652 }
653 #else
654 static inline void *alloc_code_gen_buffer(void)
655 {
656     int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
657     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
658     uintptr_t start = 0;
659     size_t size = tcg_ctx->code_gen_buffer_size;
660     void *buf;
661 
662     /* Constrain the position of the buffer based on the host cpu.
663        Note that these addresses are chosen in concert with the
664        addresses assigned in the relevant linker script file.  */
665 # if defined(__PIE__) || defined(__PIC__)
666     /* Don't bother setting a preferred location if we're building
667        a position-independent executable.  We're more likely to get
668        an address near the main executable if we let the kernel
669        choose the address.  */
670 # elif defined(__x86_64__) && defined(MAP_32BIT)
671     /* Force the memory down into low memory with the executable.
672        Leave the choice of exact location with the kernel.  */
673     flags |= MAP_32BIT;
674     /* Cannot expect to map more than 800MB in low memory.  */
675     if (size > 800u * 1024 * 1024) {
676         tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
677     }
678 # elif defined(__sparc__)
679     start = 0x40000000ul;
680 # elif defined(__s390x__)
681     start = 0x90000000ul;
682 # elif defined(__mips__)
683 #  if _MIPS_SIM == _ABI64
684     start = 0x128000000ul;
685 #  else
686     start = 0x08000000ul;
687 #  endif
688 # endif
689 
690     buf = mmap((void *)start, size, prot, flags, -1, 0);
691     if (buf == MAP_FAILED) {
692         return NULL;
693     }
694 
695 #ifdef __mips__
696     if (cross_256mb(buf, size)) {
697         /* Try again, with the original still mapped, to avoid re-acquiring
698            that 256mb crossing.  This time don't specify an address.  */
699         size_t size2;
700         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
701         switch ((int)(buf2 != MAP_FAILED)) {
702         case 1:
703             if (!cross_256mb(buf2, size)) {
704                 /* Success!  Use the new buffer.  */
705                 munmap(buf, size);
706                 break;
707             }
708             /* Failure.  Work with what we had.  */
709             munmap(buf2, size);
710             /* fallthru */
711         default:
712             /* Split the original buffer.  Free the smaller half.  */
713             buf2 = split_cross_256mb(buf, size);
714             size2 = tcg_ctx->code_gen_buffer_size;
715             if (buf == buf2) {
716                 munmap(buf + size2, size - size2);
717             } else {
718                 munmap(buf, size - size2);
719             }
720             size = size2;
721             break;
722         }
723         buf = buf2;
724     }
725 #endif
726 
727     /* Request large pages for the buffer.  */
728     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
729 
730     return buf;
731 }
732 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
733 
734 /* compare a pointer @ptr and a tb_tc @s */
735 static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
736 {
737     if (ptr >= s->ptr + s->size) {
738         return 1;
739     } else if (ptr < s->ptr) {
740         return -1;
741     }
742     return 0;
743 }
744 
745 static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
746 {
747     const struct tb_tc *a = ap;
748     const struct tb_tc *b = bp;
749 
750     /*
751      * When both sizes are set, we know this isn't a lookup.
752      * This is the most likely case: every TB must be inserted; lookups
753      * are a lot less frequent.
754      */
755     if (likely(a->size && b->size)) {
756         if (a->ptr > b->ptr) {
757             return 1;
758         } else if (a->ptr < b->ptr) {
759             return -1;
760         }
761         /* a->ptr == b->ptr should happen only on deletions */
762         g_assert(a->size == b->size);
763         return 0;
764     }
765     /*
766      * All lookups have either .size field set to 0.
767      * From the glib sources we see that @ap is always the lookup key. However
768      * the docs provide no guarantee, so we just mark this case as likely.
769      */
770     if (likely(a->size == 0)) {
771         return ptr_cmp_tb_tc(a->ptr, b);
772     }
773     return ptr_cmp_tb_tc(b->ptr, a);
774 }
775 
776 static inline void code_gen_alloc(size_t tb_size)
777 {
778     tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
779     tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
780     if (tcg_ctx->code_gen_buffer == NULL) {
781         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
782         exit(1);
783     }
784     tb_ctx.tb_tree = g_tree_new(tb_tc_cmp);
785     qemu_mutex_init(&tb_ctx.tb_lock);
786 }
787 
788 static void tb_htable_init(void)
789 {
790     unsigned int mode = QHT_MODE_AUTO_RESIZE;
791 
792     qht_init(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
793 }
794 
795 /* Must be called before using the QEMU cpus. 'tb_size' is the size
796    (in bytes) allocated to the translation buffer. Zero means default
797    size. */
798 void tcg_exec_init(unsigned long tb_size)
799 {
800     tcg_allowed = true;
801     cpu_gen_init();
802     page_init();
803     tb_htable_init();
804     code_gen_alloc(tb_size);
805 #if defined(CONFIG_SOFTMMU)
806     /* There's no guest base to take into account, so go ahead and
807        initialize the prologue now.  */
808     tcg_prologue_init(tcg_ctx);
809 #endif
810 }
811 
812 /*
813  * Allocate a new translation block. Flush the translation buffer if
814  * too many translation blocks or too much generated code.
815  *
816  * Called with tb_lock held.
817  */
818 static TranslationBlock *tb_alloc(target_ulong pc)
819 {
820     TranslationBlock *tb;
821 
822     assert_tb_locked();
823 
824     tb = tcg_tb_alloc(tcg_ctx);
825     if (unlikely(tb == NULL)) {
826         return NULL;
827     }
828     return tb;
829 }
830 
831 /* Called with tb_lock held.  */
832 void tb_remove(TranslationBlock *tb)
833 {
834     assert_tb_locked();
835 
836     g_tree_remove(tb_ctx.tb_tree, &tb->tc);
837 }
838 
839 static inline void invalidate_page_bitmap(PageDesc *p)
840 {
841 #ifdef CONFIG_SOFTMMU
842     g_free(p->code_bitmap);
843     p->code_bitmap = NULL;
844     p->code_write_count = 0;
845 #endif
846 }
847 
848 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
849 static void page_flush_tb_1(int level, void **lp)
850 {
851     int i;
852 
853     if (*lp == NULL) {
854         return;
855     }
856     if (level == 0) {
857         PageDesc *pd = *lp;
858 
859         for (i = 0; i < V_L2_SIZE; ++i) {
860             pd[i].first_tb = NULL;
861             invalidate_page_bitmap(pd + i);
862         }
863     } else {
864         void **pp = *lp;
865 
866         for (i = 0; i < V_L2_SIZE; ++i) {
867             page_flush_tb_1(level - 1, pp + i);
868         }
869     }
870 }
871 
872 static void page_flush_tb(void)
873 {
874     int i, l1_sz = v_l1_size;
875 
876     for (i = 0; i < l1_sz; i++) {
877         page_flush_tb_1(v_l2_levels, l1_map + i);
878     }
879 }
880 
881 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
882 {
883     const TranslationBlock *tb = value;
884     size_t *size = data;
885 
886     *size += tb->tc.size;
887     return false;
888 }
889 
890 /* flush all the translation blocks */
891 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
892 {
893     tb_lock();
894 
895     /* If it is already been done on request of another CPU,
896      * just retry.
897      */
898     if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
899         goto done;
900     }
901 
902     if (DEBUG_TB_FLUSH_GATE) {
903         size_t nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
904         size_t host_size = 0;
905 
906         g_tree_foreach(tb_ctx.tb_tree, tb_host_size_iter, &host_size);
907         printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
908                tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
909     }
910 
911     CPU_FOREACH(cpu) {
912         cpu_tb_jmp_cache_clear(cpu);
913     }
914 
915     /* Increment the refcount first so that destroy acts as a reset */
916     g_tree_ref(tb_ctx.tb_tree);
917     g_tree_destroy(tb_ctx.tb_tree);
918 
919     qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
920     page_flush_tb();
921 
922     tcg_region_reset_all();
923     /* XXX: flush processor icache at this point if cache flush is
924        expensive */
925     atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
926 
927 done:
928     tb_unlock();
929 }
930 
931 void tb_flush(CPUState *cpu)
932 {
933     if (tcg_enabled()) {
934         unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
935         async_safe_run_on_cpu(cpu, do_tb_flush,
936                               RUN_ON_CPU_HOST_INT(tb_flush_count));
937     }
938 }
939 
940 /*
941  * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
942  * so in order to prevent bit rot we compile them unconditionally in user-mode,
943  * and let the optimizer get rid of them by wrapping their user-only callers
944  * with if (DEBUG_TB_CHECK_GATE).
945  */
946 #ifdef CONFIG_USER_ONLY
947 
948 static void
949 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
950 {
951     TranslationBlock *tb = p;
952     target_ulong addr = *(target_ulong *)userp;
953 
954     if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
955         printf("ERROR invalidate: address=" TARGET_FMT_lx
956                " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
957     }
958 }
959 
960 /* verify that all the pages have correct rights for code
961  *
962  * Called with tb_lock held.
963  */
964 static void tb_invalidate_check(target_ulong address)
965 {
966     address &= TARGET_PAGE_MASK;
967     qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
968 }
969 
970 static void
971 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
972 {
973     TranslationBlock *tb = p;
974     int flags1, flags2;
975 
976     flags1 = page_get_flags(tb->pc);
977     flags2 = page_get_flags(tb->pc + tb->size - 1);
978     if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
979         printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
980                (long)tb->pc, tb->size, flags1, flags2);
981     }
982 }
983 
984 /* verify that all the pages have correct rights for code */
985 static void tb_page_check(void)
986 {
987     qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
988 }
989 
990 #endif /* CONFIG_USER_ONLY */
991 
992 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
993 {
994     TranslationBlock *tb1;
995     unsigned int n1;
996 
997     for (;;) {
998         tb1 = *ptb;
999         n1 = (uintptr_t)tb1 & 3;
1000         tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1001         if (tb1 == tb) {
1002             *ptb = tb1->page_next[n1];
1003             break;
1004         }
1005         ptb = &tb1->page_next[n1];
1006     }
1007 }
1008 
1009 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1010 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1011 {
1012     TranslationBlock *tb1;
1013     uintptr_t *ptb, ntb;
1014     unsigned int n1;
1015 
1016     ptb = &tb->jmp_list_next[n];
1017     if (*ptb) {
1018         /* find tb(n) in circular list */
1019         for (;;) {
1020             ntb = *ptb;
1021             n1 = ntb & 3;
1022             tb1 = (TranslationBlock *)(ntb & ~3);
1023             if (n1 == n && tb1 == tb) {
1024                 break;
1025             }
1026             if (n1 == 2) {
1027                 ptb = &tb1->jmp_list_first;
1028             } else {
1029                 ptb = &tb1->jmp_list_next[n1];
1030             }
1031         }
1032         /* now we can suppress tb(n) from the list */
1033         *ptb = tb->jmp_list_next[n];
1034 
1035         tb->jmp_list_next[n] = (uintptr_t)NULL;
1036     }
1037 }
1038 
1039 /* reset the jump entry 'n' of a TB so that it is not chained to
1040    another TB */
1041 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1042 {
1043     uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1044     tb_set_jmp_target(tb, n, addr);
1045 }
1046 
1047 /* remove any jumps to the TB */
1048 static inline void tb_jmp_unlink(TranslationBlock *tb)
1049 {
1050     TranslationBlock *tb1;
1051     uintptr_t *ptb, ntb;
1052     unsigned int n1;
1053 
1054     ptb = &tb->jmp_list_first;
1055     for (;;) {
1056         ntb = *ptb;
1057         n1 = ntb & 3;
1058         tb1 = (TranslationBlock *)(ntb & ~3);
1059         if (n1 == 2) {
1060             break;
1061         }
1062         tb_reset_jump(tb1, n1);
1063         *ptb = tb1->jmp_list_next[n1];
1064         tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1065     }
1066 }
1067 
1068 /* invalidate one TB
1069  *
1070  * Called with tb_lock held.
1071  */
1072 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1073 {
1074     CPUState *cpu;
1075     PageDesc *p;
1076     uint32_t h;
1077     tb_page_addr_t phys_pc;
1078 
1079     assert_tb_locked();
1080 
1081     atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1082 
1083     /* remove the TB from the hash list */
1084     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1085     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1086                      tb->trace_vcpu_dstate);
1087     if (!qht_remove(&tb_ctx.htable, tb, h)) {
1088         return;
1089     }
1090 
1091     /* remove the TB from the page list */
1092     if (tb->page_addr[0] != page_addr) {
1093         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1094         tb_page_remove(&p->first_tb, tb);
1095         invalidate_page_bitmap(p);
1096     }
1097     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1098         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1099         tb_page_remove(&p->first_tb, tb);
1100         invalidate_page_bitmap(p);
1101     }
1102 
1103     /* remove the TB from the hash list */
1104     h = tb_jmp_cache_hash_func(tb->pc);
1105     CPU_FOREACH(cpu) {
1106         if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1107             atomic_set(&cpu->tb_jmp_cache[h], NULL);
1108         }
1109     }
1110 
1111     /* suppress this TB from the two jump lists */
1112     tb_remove_from_jmp_list(tb, 0);
1113     tb_remove_from_jmp_list(tb, 1);
1114 
1115     /* suppress any remaining jumps to this TB */
1116     tb_jmp_unlink(tb);
1117 
1118     tb_ctx.tb_phys_invalidate_count++;
1119 }
1120 
1121 #ifdef CONFIG_SOFTMMU
1122 static void build_page_bitmap(PageDesc *p)
1123 {
1124     int n, tb_start, tb_end;
1125     TranslationBlock *tb;
1126 
1127     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1128 
1129     tb = p->first_tb;
1130     while (tb != NULL) {
1131         n = (uintptr_t)tb & 3;
1132         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1133         /* NOTE: this is subtle as a TB may span two physical pages */
1134         if (n == 0) {
1135             /* NOTE: tb_end may be after the end of the page, but
1136                it is not a problem */
1137             tb_start = tb->pc & ~TARGET_PAGE_MASK;
1138             tb_end = tb_start + tb->size;
1139             if (tb_end > TARGET_PAGE_SIZE) {
1140                 tb_end = TARGET_PAGE_SIZE;
1141              }
1142         } else {
1143             tb_start = 0;
1144             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1145         }
1146         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1147         tb = tb->page_next[n];
1148     }
1149 }
1150 #endif
1151 
1152 /* add the tb in the target page and protect it if necessary
1153  *
1154  * Called with mmap_lock held for user-mode emulation.
1155  */
1156 static inline void tb_alloc_page(TranslationBlock *tb,
1157                                  unsigned int n, tb_page_addr_t page_addr)
1158 {
1159     PageDesc *p;
1160 #ifndef CONFIG_USER_ONLY
1161     bool page_already_protected;
1162 #endif
1163 
1164     assert_memory_lock();
1165 
1166     tb->page_addr[n] = page_addr;
1167     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1168     tb->page_next[n] = p->first_tb;
1169 #ifndef CONFIG_USER_ONLY
1170     page_already_protected = p->first_tb != NULL;
1171 #endif
1172     p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1173     invalidate_page_bitmap(p);
1174 
1175 #if defined(CONFIG_USER_ONLY)
1176     if (p->flags & PAGE_WRITE) {
1177         target_ulong addr;
1178         PageDesc *p2;
1179         int prot;
1180 
1181         /* force the host page as non writable (writes will have a
1182            page fault + mprotect overhead) */
1183         page_addr &= qemu_host_page_mask;
1184         prot = 0;
1185         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1186             addr += TARGET_PAGE_SIZE) {
1187 
1188             p2 = page_find(addr >> TARGET_PAGE_BITS);
1189             if (!p2) {
1190                 continue;
1191             }
1192             prot |= p2->flags;
1193             p2->flags &= ~PAGE_WRITE;
1194           }
1195         mprotect(g2h(page_addr), qemu_host_page_size,
1196                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1197         if (DEBUG_TB_INVALIDATE_GATE) {
1198             printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1199         }
1200     }
1201 #else
1202     /* if some code is already present, then the pages are already
1203        protected. So we handle the case where only the first TB is
1204        allocated in a physical page */
1205     if (!page_already_protected) {
1206         tlb_protect_code(page_addr);
1207     }
1208 #endif
1209 }
1210 
1211 /* add a new TB and link it to the physical page tables. phys_page2 is
1212  * (-1) to indicate that only one page contains the TB.
1213  *
1214  * Called with mmap_lock held for user-mode emulation.
1215  */
1216 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1217                          tb_page_addr_t phys_page2)
1218 {
1219     uint32_t h;
1220 
1221     assert_memory_lock();
1222 
1223     /* add in the page list */
1224     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1225     if (phys_page2 != -1) {
1226         tb_alloc_page(tb, 1, phys_page2);
1227     } else {
1228         tb->page_addr[1] = -1;
1229     }
1230 
1231     /* add in the hash table */
1232     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1233                      tb->trace_vcpu_dstate);
1234     qht_insert(&tb_ctx.htable, tb, h);
1235 
1236 #ifdef CONFIG_USER_ONLY
1237     if (DEBUG_TB_CHECK_GATE) {
1238         tb_page_check();
1239     }
1240 #endif
1241 }
1242 
1243 /* Called with mmap_lock held for user mode emulation.  */
1244 TranslationBlock *tb_gen_code(CPUState *cpu,
1245                               target_ulong pc, target_ulong cs_base,
1246                               uint32_t flags, int cflags)
1247 {
1248     CPUArchState *env = cpu->env_ptr;
1249     TranslationBlock *tb;
1250     tb_page_addr_t phys_pc, phys_page2;
1251     target_ulong virt_page2;
1252     tcg_insn_unit *gen_code_buf;
1253     int gen_code_size, search_size;
1254 #ifdef CONFIG_PROFILER
1255     TCGProfile *prof = &tcg_ctx->prof;
1256     int64_t ti;
1257 #endif
1258     assert_memory_lock();
1259 
1260     phys_pc = get_page_addr_code(env, pc);
1261 
1262  buffer_overflow:
1263     tb = tb_alloc(pc);
1264     if (unlikely(!tb)) {
1265         /* flush must be done */
1266         tb_flush(cpu);
1267         mmap_unlock();
1268         /* Make the execution loop process the flush as soon as possible.  */
1269         cpu->exception_index = EXCP_INTERRUPT;
1270         cpu_loop_exit(cpu);
1271     }
1272 
1273     gen_code_buf = tcg_ctx->code_gen_ptr;
1274     tb->tc.ptr = gen_code_buf;
1275     tb->pc = pc;
1276     tb->cs_base = cs_base;
1277     tb->flags = flags;
1278     tb->cflags = cflags;
1279     tb->trace_vcpu_dstate = *cpu->trace_dstate;
1280     tcg_ctx->tb_cflags = cflags;
1281 
1282 #ifdef CONFIG_PROFILER
1283     /* includes aborted translations because of exceptions */
1284     atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1285     ti = profile_getclock();
1286 #endif
1287 
1288     tcg_func_start(tcg_ctx);
1289 
1290     tcg_ctx->cpu = ENV_GET_CPU(env);
1291     gen_intermediate_code(cpu, tb);
1292     tcg_ctx->cpu = NULL;
1293 
1294     trace_translate_block(tb, tb->pc, tb->tc.ptr);
1295 
1296     /* generate machine code */
1297     tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1298     tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1299     tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1300     if (TCG_TARGET_HAS_direct_jump) {
1301         tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1302         tcg_ctx->tb_jmp_target_addr = NULL;
1303     } else {
1304         tcg_ctx->tb_jmp_insn_offset = NULL;
1305         tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1306     }
1307 
1308 #ifdef CONFIG_PROFILER
1309     atomic_set(&prof->tb_count, prof->tb_count + 1);
1310     atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1311     ti = profile_getclock();
1312 #endif
1313 
1314     /* ??? Overflow could be handled better here.  In particular, we
1315        don't need to re-do gen_intermediate_code, nor should we re-do
1316        the tcg optimization currently hidden inside tcg_gen_code.  All
1317        that should be required is to flush the TBs, allocate a new TB,
1318        re-initialize it per above, and re-do the actual code generation.  */
1319     gen_code_size = tcg_gen_code(tcg_ctx, tb);
1320     if (unlikely(gen_code_size < 0)) {
1321         goto buffer_overflow;
1322     }
1323     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1324     if (unlikely(search_size < 0)) {
1325         goto buffer_overflow;
1326     }
1327     tb->tc.size = gen_code_size;
1328 
1329 #ifdef CONFIG_PROFILER
1330     atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1331     atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1332     atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1333     atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1334 #endif
1335 
1336 #ifdef DEBUG_DISAS
1337     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1338         qemu_log_in_addr_range(tb->pc)) {
1339         qemu_log_lock();
1340         qemu_log("OUT: [size=%d]\n", gen_code_size);
1341         if (tcg_ctx->data_gen_ptr) {
1342             size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1343             size_t data_size = gen_code_size - code_size;
1344             size_t i;
1345 
1346             log_disas(tb->tc.ptr, code_size);
1347 
1348             for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1349                 if (sizeof(tcg_target_ulong) == 8) {
1350                     qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1351                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1352                              *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1353                 } else {
1354                     qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1355                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1356                              *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1357                 }
1358             }
1359         } else {
1360             log_disas(tb->tc.ptr, gen_code_size);
1361         }
1362         qemu_log("\n");
1363         qemu_log_flush();
1364         qemu_log_unlock();
1365     }
1366 #endif
1367 
1368     atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1369         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1370                  CODE_GEN_ALIGN));
1371 
1372     /* init jump list */
1373     assert(((uintptr_t)tb & 3) == 0);
1374     tb->jmp_list_first = (uintptr_t)tb | 2;
1375     tb->jmp_list_next[0] = (uintptr_t)NULL;
1376     tb->jmp_list_next[1] = (uintptr_t)NULL;
1377 
1378     /* init original jump addresses wich has been set during tcg_gen_code() */
1379     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1380         tb_reset_jump(tb, 0);
1381     }
1382     if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1383         tb_reset_jump(tb, 1);
1384     }
1385 
1386     /* check next page if needed */
1387     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1388     phys_page2 = -1;
1389     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1390         phys_page2 = get_page_addr_code(env, virt_page2);
1391     }
1392     /* As long as consistency of the TB stuff is provided by tb_lock in user
1393      * mode and is implicit in single-threaded softmmu emulation, no explicit
1394      * memory barrier is required before tb_link_page() makes the TB visible
1395      * through the physical hash table and physical page list.
1396      */
1397     tb_link_page(tb, phys_pc, phys_page2);
1398     g_tree_insert(tb_ctx.tb_tree, &tb->tc, tb);
1399     return tb;
1400 }
1401 
1402 /*
1403  * Invalidate all TBs which intersect with the target physical address range
1404  * [start;end[. NOTE: start and end may refer to *different* physical pages.
1405  * 'is_cpu_write_access' should be true if called from a real cpu write
1406  * access: the virtual CPU will exit the current TB if code is modified inside
1407  * this TB.
1408  *
1409  * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1410  * Called with tb_lock held for system-mode emulation
1411  */
1412 static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1413 {
1414     while (start < end) {
1415         tb_invalidate_phys_page_range(start, end, 0);
1416         start &= TARGET_PAGE_MASK;
1417         start += TARGET_PAGE_SIZE;
1418     }
1419 }
1420 
1421 #ifdef CONFIG_SOFTMMU
1422 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1423 {
1424     assert_tb_locked();
1425     tb_invalidate_phys_range_1(start, end);
1426 }
1427 #else
1428 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1429 {
1430     assert_memory_lock();
1431     tb_lock();
1432     tb_invalidate_phys_range_1(start, end);
1433     tb_unlock();
1434 }
1435 #endif
1436 /*
1437  * Invalidate all TBs which intersect with the target physical address range
1438  * [start;end[. NOTE: start and end must refer to the *same* physical page.
1439  * 'is_cpu_write_access' should be true if called from a real cpu write
1440  * access: the virtual CPU will exit the current TB if code is modified inside
1441  * this TB.
1442  *
1443  * Called with tb_lock/mmap_lock held for user-mode emulation
1444  * Called with tb_lock held for system-mode emulation
1445  */
1446 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1447                                    int is_cpu_write_access)
1448 {
1449     TranslationBlock *tb, *tb_next;
1450     tb_page_addr_t tb_start, tb_end;
1451     PageDesc *p;
1452     int n;
1453 #ifdef TARGET_HAS_PRECISE_SMC
1454     CPUState *cpu = current_cpu;
1455     CPUArchState *env = NULL;
1456     int current_tb_not_found = is_cpu_write_access;
1457     TranslationBlock *current_tb = NULL;
1458     int current_tb_modified = 0;
1459     target_ulong current_pc = 0;
1460     target_ulong current_cs_base = 0;
1461     uint32_t current_flags = 0;
1462 #endif /* TARGET_HAS_PRECISE_SMC */
1463 
1464     assert_memory_lock();
1465     assert_tb_locked();
1466 
1467     p = page_find(start >> TARGET_PAGE_BITS);
1468     if (!p) {
1469         return;
1470     }
1471 #if defined(TARGET_HAS_PRECISE_SMC)
1472     if (cpu != NULL) {
1473         env = cpu->env_ptr;
1474     }
1475 #endif
1476 
1477     /* we remove all the TBs in the range [start, end[ */
1478     /* XXX: see if in some cases it could be faster to invalidate all
1479        the code */
1480     tb = p->first_tb;
1481     while (tb != NULL) {
1482         n = (uintptr_t)tb & 3;
1483         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1484         tb_next = tb->page_next[n];
1485         /* NOTE: this is subtle as a TB may span two physical pages */
1486         if (n == 0) {
1487             /* NOTE: tb_end may be after the end of the page, but
1488                it is not a problem */
1489             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1490             tb_end = tb_start + tb->size;
1491         } else {
1492             tb_start = tb->page_addr[1];
1493             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1494         }
1495         if (!(tb_end <= start || tb_start >= end)) {
1496 #ifdef TARGET_HAS_PRECISE_SMC
1497             if (current_tb_not_found) {
1498                 current_tb_not_found = 0;
1499                 current_tb = NULL;
1500                 if (cpu->mem_io_pc) {
1501                     /* now we have a real cpu fault */
1502                     current_tb = tb_find_pc(cpu->mem_io_pc);
1503                 }
1504             }
1505             if (current_tb == tb &&
1506                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1507                 /* If we are modifying the current TB, we must stop
1508                 its execution. We could be more precise by checking
1509                 that the modification is after the current PC, but it
1510                 would require a specialized function to partially
1511                 restore the CPU state */
1512 
1513                 current_tb_modified = 1;
1514                 cpu_restore_state_from_tb(cpu, current_tb,
1515                                           cpu->mem_io_pc, true);
1516                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1517                                      &current_flags);
1518             }
1519 #endif /* TARGET_HAS_PRECISE_SMC */
1520             tb_phys_invalidate(tb, -1);
1521         }
1522         tb = tb_next;
1523     }
1524 #if !defined(CONFIG_USER_ONLY)
1525     /* if no code remaining, no need to continue to use slow writes */
1526     if (!p->first_tb) {
1527         invalidate_page_bitmap(p);
1528         tlb_unprotect_code(start);
1529     }
1530 #endif
1531 #ifdef TARGET_HAS_PRECISE_SMC
1532     if (current_tb_modified) {
1533         /* Force execution of one insn next time.  */
1534         cpu->cflags_next_tb = 1 | curr_cflags();
1535         cpu_loop_exit_noexc(cpu);
1536     }
1537 #endif
1538 }
1539 
1540 #ifdef CONFIG_SOFTMMU
1541 /* len must be <= 8 and start must be a multiple of len.
1542  * Called via softmmu_template.h when code areas are written to with
1543  * iothread mutex not held.
1544  */
1545 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1546 {
1547     PageDesc *p;
1548 
1549 #if 0
1550     if (1) {
1551         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1552                   cpu_single_env->mem_io_vaddr, len,
1553                   cpu_single_env->eip,
1554                   cpu_single_env->eip +
1555                   (intptr_t)cpu_single_env->segs[R_CS].base);
1556     }
1557 #endif
1558     assert_memory_lock();
1559 
1560     p = page_find(start >> TARGET_PAGE_BITS);
1561     if (!p) {
1562         return;
1563     }
1564     if (!p->code_bitmap &&
1565         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1566         /* build code bitmap.  FIXME: writes should be protected by
1567          * tb_lock, reads by tb_lock or RCU.
1568          */
1569         build_page_bitmap(p);
1570     }
1571     if (p->code_bitmap) {
1572         unsigned int nr;
1573         unsigned long b;
1574 
1575         nr = start & ~TARGET_PAGE_MASK;
1576         b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1577         if (b & ((1 << len) - 1)) {
1578             goto do_invalidate;
1579         }
1580     } else {
1581     do_invalidate:
1582         tb_invalidate_phys_page_range(start, start + len, 1);
1583     }
1584 }
1585 #else
1586 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1587  * host PC of the faulting store instruction that caused this invalidate.
1588  * Returns true if the caller needs to abort execution of the current
1589  * TB (because it was modified by this store and the guest CPU has
1590  * precise-SMC semantics).
1591  */
1592 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1593 {
1594     TranslationBlock *tb;
1595     PageDesc *p;
1596     int n;
1597 #ifdef TARGET_HAS_PRECISE_SMC
1598     TranslationBlock *current_tb = NULL;
1599     CPUState *cpu = current_cpu;
1600     CPUArchState *env = NULL;
1601     int current_tb_modified = 0;
1602     target_ulong current_pc = 0;
1603     target_ulong current_cs_base = 0;
1604     uint32_t current_flags = 0;
1605 #endif
1606 
1607     assert_memory_lock();
1608 
1609     addr &= TARGET_PAGE_MASK;
1610     p = page_find(addr >> TARGET_PAGE_BITS);
1611     if (!p) {
1612         return false;
1613     }
1614 
1615     tb_lock();
1616     tb = p->first_tb;
1617 #ifdef TARGET_HAS_PRECISE_SMC
1618     if (tb && pc != 0) {
1619         current_tb = tb_find_pc(pc);
1620     }
1621     if (cpu != NULL) {
1622         env = cpu->env_ptr;
1623     }
1624 #endif
1625     while (tb != NULL) {
1626         n = (uintptr_t)tb & 3;
1627         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1628 #ifdef TARGET_HAS_PRECISE_SMC
1629         if (current_tb == tb &&
1630             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1631                 /* If we are modifying the current TB, we must stop
1632                    its execution. We could be more precise by checking
1633                    that the modification is after the current PC, but it
1634                    would require a specialized function to partially
1635                    restore the CPU state */
1636 
1637             current_tb_modified = 1;
1638             cpu_restore_state_from_tb(cpu, current_tb, pc, true);
1639             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1640                                  &current_flags);
1641         }
1642 #endif /* TARGET_HAS_PRECISE_SMC */
1643         tb_phys_invalidate(tb, addr);
1644         tb = tb->page_next[n];
1645     }
1646     p->first_tb = NULL;
1647 #ifdef TARGET_HAS_PRECISE_SMC
1648     if (current_tb_modified) {
1649         /* Force execution of one insn next time.  */
1650         cpu->cflags_next_tb = 1 | curr_cflags();
1651         /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1652          * back into the cpu_exec loop. */
1653         return true;
1654     }
1655 #endif
1656     tb_unlock();
1657 
1658     return false;
1659 }
1660 #endif
1661 
1662 /*
1663  * Find the TB 'tb' such that
1664  * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
1665  * Return NULL if not found.
1666  */
1667 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1668 {
1669     struct tb_tc s = { .ptr = (void *)tc_ptr };
1670 
1671     return g_tree_lookup(tb_ctx.tb_tree, &s);
1672 }
1673 
1674 #if !defined(CONFIG_USER_ONLY)
1675 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1676 {
1677     ram_addr_t ram_addr;
1678     MemoryRegion *mr;
1679     hwaddr l = 1;
1680 
1681     rcu_read_lock();
1682     mr = address_space_translate(as, addr, &addr, &l, false);
1683     if (!(memory_region_is_ram(mr)
1684           || memory_region_is_romd(mr))) {
1685         rcu_read_unlock();
1686         return;
1687     }
1688     ram_addr = memory_region_get_ram_addr(mr) + addr;
1689     tb_lock();
1690     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1691     tb_unlock();
1692     rcu_read_unlock();
1693 }
1694 #endif /* !defined(CONFIG_USER_ONLY) */
1695 
1696 /* Called with tb_lock held.  */
1697 void tb_check_watchpoint(CPUState *cpu)
1698 {
1699     TranslationBlock *tb;
1700 
1701     tb = tb_find_pc(cpu->mem_io_pc);
1702     if (tb) {
1703         /* We can use retranslation to find the PC.  */
1704         cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
1705         tb_phys_invalidate(tb, -1);
1706     } else {
1707         /* The exception probably happened in a helper.  The CPU state should
1708            have been saved before calling it. Fetch the PC from there.  */
1709         CPUArchState *env = cpu->env_ptr;
1710         target_ulong pc, cs_base;
1711         tb_page_addr_t addr;
1712         uint32_t flags;
1713 
1714         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1715         addr = get_page_addr_code(env, pc);
1716         tb_invalidate_phys_range(addr, addr + 1);
1717     }
1718 }
1719 
1720 #ifndef CONFIG_USER_ONLY
1721 /* in deterministic execution mode, instructions doing device I/Os
1722  * must be at the end of the TB.
1723  *
1724  * Called by softmmu_template.h, with iothread mutex not held.
1725  */
1726 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1727 {
1728 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1729     CPUArchState *env = cpu->env_ptr;
1730 #endif
1731     TranslationBlock *tb;
1732     uint32_t n;
1733 
1734     tb_lock();
1735     tb = tb_find_pc(retaddr);
1736     if (!tb) {
1737         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1738                   (void *)retaddr);
1739     }
1740     cpu_restore_state_from_tb(cpu, tb, retaddr, true);
1741 
1742     /* On MIPS and SH, delay slot instructions can only be restarted if
1743        they were already the first instruction in the TB.  If this is not
1744        the first instruction in a TB then re-execute the preceding
1745        branch.  */
1746     n = 1;
1747 #if defined(TARGET_MIPS)
1748     if ((env->hflags & MIPS_HFLAG_BMASK) != 0
1749         && env->active_tc.PC != tb->pc) {
1750         env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1751         cpu->icount_decr.u16.low++;
1752         env->hflags &= ~MIPS_HFLAG_BMASK;
1753         n = 2;
1754     }
1755 #elif defined(TARGET_SH4)
1756     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1757         && env->pc != tb->pc) {
1758         env->pc -= 2;
1759         cpu->icount_decr.u16.low++;
1760         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1761         n = 2;
1762     }
1763 #endif
1764 
1765     /* Generate a new TB executing the I/O insn.  */
1766     cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
1767 
1768     if (tb->cflags & CF_NOCACHE) {
1769         if (tb->orig_tb) {
1770             /* Invalidate original TB if this TB was generated in
1771              * cpu_exec_nocache() */
1772             tb_phys_invalidate(tb->orig_tb, -1);
1773         }
1774         tb_remove(tb);
1775     }
1776 
1777     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1778      * the first in the TB) then we end up generating a whole new TB and
1779      *  repeating the fault, which is horribly inefficient.
1780      *  Better would be to execute just this insn uncached, or generate a
1781      *  second new TB.
1782      *
1783      * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1784      * tb_lock gets reset.
1785      */
1786     cpu_loop_exit_noexc(cpu);
1787 }
1788 
1789 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
1790 {
1791     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
1792 
1793     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1794         atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1795     }
1796 }
1797 
1798 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1799 {
1800     /* Discard jump cache entries for any tb which might potentially
1801        overlap the flushed page.  */
1802     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
1803     tb_jmp_cache_clear_page(cpu, addr);
1804 }
1805 
1806 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1807                                  struct qht_stats hst)
1808 {
1809     uint32_t hgram_opts;
1810     size_t hgram_bins;
1811     char *hgram;
1812 
1813     if (!hst.head_buckets) {
1814         return;
1815     }
1816     cpu_fprintf(f, "TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
1817                 hst.used_head_buckets, hst.head_buckets,
1818                 (double)hst.used_head_buckets / hst.head_buckets * 100);
1819 
1820     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
1821     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
1822     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1823         hgram_opts |= QDIST_PR_NODECIMAL;
1824     }
1825     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1826     cpu_fprintf(f, "TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
1827                 qdist_avg(&hst.occupancy) * 100, hgram);
1828     g_free(hgram);
1829 
1830     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1831     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1832     if (hgram_bins > 10) {
1833         hgram_bins = 10;
1834     } else {
1835         hgram_bins = 0;
1836         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1837     }
1838     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1839     cpu_fprintf(f, "TB hash avg chain   %0.3f buckets. Histogram: %s\n",
1840                 qdist_avg(&hst.chain), hgram);
1841     g_free(hgram);
1842 }
1843 
1844 struct tb_tree_stats {
1845     size_t host_size;
1846     size_t target_size;
1847     size_t max_target_size;
1848     size_t direct_jmp_count;
1849     size_t direct_jmp2_count;
1850     size_t cross_page;
1851 };
1852 
1853 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
1854 {
1855     const TranslationBlock *tb = value;
1856     struct tb_tree_stats *tst = data;
1857 
1858     tst->host_size += tb->tc.size;
1859     tst->target_size += tb->size;
1860     if (tb->size > tst->max_target_size) {
1861         tst->max_target_size = tb->size;
1862     }
1863     if (tb->page_addr[1] != -1) {
1864         tst->cross_page++;
1865     }
1866     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1867         tst->direct_jmp_count++;
1868         if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1869             tst->direct_jmp2_count++;
1870         }
1871     }
1872     return false;
1873 }
1874 
1875 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1876 {
1877     struct tb_tree_stats tst = {};
1878     struct qht_stats hst;
1879     size_t nb_tbs;
1880 
1881     tb_lock();
1882 
1883     nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
1884     g_tree_foreach(tb_ctx.tb_tree, tb_tree_stats_iter, &tst);
1885     /* XXX: avoid using doubles ? */
1886     cpu_fprintf(f, "Translation buffer state:\n");
1887     /*
1888      * Report total code size including the padding and TB structs;
1889      * otherwise users might think "-tb-size" is not honoured.
1890      * For avg host size we use the precise numbers from tb_tree_stats though.
1891      */
1892     cpu_fprintf(f, "gen code size       %zu/%zu\n",
1893                 tcg_code_size(), tcg_code_capacity());
1894     cpu_fprintf(f, "TB count            %zu\n", nb_tbs);
1895     cpu_fprintf(f, "TB avg target size  %zu max=%zu bytes\n",
1896                 nb_tbs ? tst.target_size / nb_tbs : 0,
1897                 tst.max_target_size);
1898     cpu_fprintf(f, "TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
1899                 nb_tbs ? tst.host_size / nb_tbs : 0,
1900                 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
1901     cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page,
1902             nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
1903     cpu_fprintf(f, "direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
1904                 tst.direct_jmp_count,
1905                 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
1906                 tst.direct_jmp2_count,
1907                 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
1908 
1909     qht_statistics_init(&tb_ctx.htable, &hst);
1910     print_qht_statistics(f, cpu_fprintf, hst);
1911     qht_statistics_destroy(&hst);
1912 
1913     cpu_fprintf(f, "\nStatistics:\n");
1914     cpu_fprintf(f, "TB flush count      %u\n",
1915                 atomic_read(&tb_ctx.tb_flush_count));
1916     cpu_fprintf(f, "TB invalidate count %d\n", tb_ctx.tb_phys_invalidate_count);
1917     cpu_fprintf(f, "TLB flush count     %zu\n", tlb_flush_count());
1918     tcg_dump_info(f, cpu_fprintf);
1919 
1920     tb_unlock();
1921 }
1922 
1923 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1924 {
1925     tcg_dump_op_count(f, cpu_fprintf);
1926 }
1927 
1928 #else /* CONFIG_USER_ONLY */
1929 
1930 void cpu_interrupt(CPUState *cpu, int mask)
1931 {
1932     g_assert(qemu_mutex_iothread_locked());
1933     cpu->interrupt_request |= mask;
1934     cpu->icount_decr.u16.high = -1;
1935 }
1936 
1937 /*
1938  * Walks guest process memory "regions" one by one
1939  * and calls callback function 'fn' for each region.
1940  */
1941 struct walk_memory_regions_data {
1942     walk_memory_regions_fn fn;
1943     void *priv;
1944     target_ulong start;
1945     int prot;
1946 };
1947 
1948 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1949                                    target_ulong end, int new_prot)
1950 {
1951     if (data->start != -1u) {
1952         int rc = data->fn(data->priv, data->start, end, data->prot);
1953         if (rc != 0) {
1954             return rc;
1955         }
1956     }
1957 
1958     data->start = (new_prot ? end : -1u);
1959     data->prot = new_prot;
1960 
1961     return 0;
1962 }
1963 
1964 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1965                                  target_ulong base, int level, void **lp)
1966 {
1967     target_ulong pa;
1968     int i, rc;
1969 
1970     if (*lp == NULL) {
1971         return walk_memory_regions_end(data, base, 0);
1972     }
1973 
1974     if (level == 0) {
1975         PageDesc *pd = *lp;
1976 
1977         for (i = 0; i < V_L2_SIZE; ++i) {
1978             int prot = pd[i].flags;
1979 
1980             pa = base | (i << TARGET_PAGE_BITS);
1981             if (prot != data->prot) {
1982                 rc = walk_memory_regions_end(data, pa, prot);
1983                 if (rc != 0) {
1984                     return rc;
1985                 }
1986             }
1987         }
1988     } else {
1989         void **pp = *lp;
1990 
1991         for (i = 0; i < V_L2_SIZE; ++i) {
1992             pa = base | ((target_ulong)i <<
1993                 (TARGET_PAGE_BITS + V_L2_BITS * level));
1994             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1995             if (rc != 0) {
1996                 return rc;
1997             }
1998         }
1999     }
2000 
2001     return 0;
2002 }
2003 
2004 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2005 {
2006     struct walk_memory_regions_data data;
2007     uintptr_t i, l1_sz = v_l1_size;
2008 
2009     data.fn = fn;
2010     data.priv = priv;
2011     data.start = -1u;
2012     data.prot = 0;
2013 
2014     for (i = 0; i < l1_sz; i++) {
2015         target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2016         int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2017         if (rc != 0) {
2018             return rc;
2019         }
2020     }
2021 
2022     return walk_memory_regions_end(&data, 0, 0);
2023 }
2024 
2025 static int dump_region(void *priv, target_ulong start,
2026     target_ulong end, unsigned long prot)
2027 {
2028     FILE *f = (FILE *)priv;
2029 
2030     (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2031         " "TARGET_FMT_lx" %c%c%c\n",
2032         start, end, end - start,
2033         ((prot & PAGE_READ) ? 'r' : '-'),
2034         ((prot & PAGE_WRITE) ? 'w' : '-'),
2035         ((prot & PAGE_EXEC) ? 'x' : '-'));
2036 
2037     return 0;
2038 }
2039 
2040 /* dump memory mappings */
2041 void page_dump(FILE *f)
2042 {
2043     const int length = sizeof(target_ulong) * 2;
2044     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2045             length, "start", length, "end", length, "size", "prot");
2046     walk_memory_regions(f, dump_region);
2047 }
2048 
2049 int page_get_flags(target_ulong address)
2050 {
2051     PageDesc *p;
2052 
2053     p = page_find(address >> TARGET_PAGE_BITS);
2054     if (!p) {
2055         return 0;
2056     }
2057     return p->flags;
2058 }
2059 
2060 /* Modify the flags of a page and invalidate the code if necessary.
2061    The flag PAGE_WRITE_ORG is positioned automatically depending
2062    on PAGE_WRITE.  The mmap_lock should already be held.  */
2063 void page_set_flags(target_ulong start, target_ulong end, int flags)
2064 {
2065     target_ulong addr, len;
2066 
2067     /* This function should never be called with addresses outside the
2068        guest address space.  If this assert fires, it probably indicates
2069        a missing call to h2g_valid.  */
2070 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2071     assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2072 #endif
2073     assert(start < end);
2074     assert_memory_lock();
2075 
2076     start = start & TARGET_PAGE_MASK;
2077     end = TARGET_PAGE_ALIGN(end);
2078 
2079     if (flags & PAGE_WRITE) {
2080         flags |= PAGE_WRITE_ORG;
2081     }
2082 
2083     for (addr = start, len = end - start;
2084          len != 0;
2085          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2086         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2087 
2088         /* If the write protection bit is set, then we invalidate
2089            the code inside.  */
2090         if (!(p->flags & PAGE_WRITE) &&
2091             (flags & PAGE_WRITE) &&
2092             p->first_tb) {
2093             tb_invalidate_phys_page(addr, 0);
2094         }
2095         p->flags = flags;
2096     }
2097 }
2098 
2099 int page_check_range(target_ulong start, target_ulong len, int flags)
2100 {
2101     PageDesc *p;
2102     target_ulong end;
2103     target_ulong addr;
2104 
2105     /* This function should never be called with addresses outside the
2106        guest address space.  If this assert fires, it probably indicates
2107        a missing call to h2g_valid.  */
2108 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2109     assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2110 #endif
2111 
2112     if (len == 0) {
2113         return 0;
2114     }
2115     if (start + len - 1 < start) {
2116         /* We've wrapped around.  */
2117         return -1;
2118     }
2119 
2120     /* must do before we loose bits in the next step */
2121     end = TARGET_PAGE_ALIGN(start + len);
2122     start = start & TARGET_PAGE_MASK;
2123 
2124     for (addr = start, len = end - start;
2125          len != 0;
2126          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2127         p = page_find(addr >> TARGET_PAGE_BITS);
2128         if (!p) {
2129             return -1;
2130         }
2131         if (!(p->flags & PAGE_VALID)) {
2132             return -1;
2133         }
2134 
2135         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2136             return -1;
2137         }
2138         if (flags & PAGE_WRITE) {
2139             if (!(p->flags & PAGE_WRITE_ORG)) {
2140                 return -1;
2141             }
2142             /* unprotect the page if it was put read-only because it
2143                contains translated code */
2144             if (!(p->flags & PAGE_WRITE)) {
2145                 if (!page_unprotect(addr, 0)) {
2146                     return -1;
2147                 }
2148             }
2149         }
2150     }
2151     return 0;
2152 }
2153 
2154 /* called from signal handler: invalidate the code and unprotect the
2155  * page. Return 0 if the fault was not handled, 1 if it was handled,
2156  * and 2 if it was handled but the caller must cause the TB to be
2157  * immediately exited. (We can only return 2 if the 'pc' argument is
2158  * non-zero.)
2159  */
2160 int page_unprotect(target_ulong address, uintptr_t pc)
2161 {
2162     unsigned int prot;
2163     bool current_tb_invalidated;
2164     PageDesc *p;
2165     target_ulong host_start, host_end, addr;
2166 
2167     /* Technically this isn't safe inside a signal handler.  However we
2168        know this only ever happens in a synchronous SEGV handler, so in
2169        practice it seems to be ok.  */
2170     mmap_lock();
2171 
2172     p = page_find(address >> TARGET_PAGE_BITS);
2173     if (!p) {
2174         mmap_unlock();
2175         return 0;
2176     }
2177 
2178     /* if the page was really writable, then we change its
2179        protection back to writable */
2180     if (p->flags & PAGE_WRITE_ORG) {
2181         current_tb_invalidated = false;
2182         if (p->flags & PAGE_WRITE) {
2183             /* If the page is actually marked WRITE then assume this is because
2184              * this thread raced with another one which got here first and
2185              * set the page to PAGE_WRITE and did the TB invalidate for us.
2186              */
2187 #ifdef TARGET_HAS_PRECISE_SMC
2188             TranslationBlock *current_tb = tb_find_pc(pc);
2189             if (current_tb) {
2190                 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2191             }
2192 #endif
2193         } else {
2194             host_start = address & qemu_host_page_mask;
2195             host_end = host_start + qemu_host_page_size;
2196 
2197             prot = 0;
2198             for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2199                 p = page_find(addr >> TARGET_PAGE_BITS);
2200                 p->flags |= PAGE_WRITE;
2201                 prot |= p->flags;
2202 
2203                 /* and since the content will be modified, we must invalidate
2204                    the corresponding translated code. */
2205                 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2206 #ifdef CONFIG_USER_ONLY
2207                 if (DEBUG_TB_CHECK_GATE) {
2208                     tb_invalidate_check(addr);
2209                 }
2210 #endif
2211             }
2212             mprotect((void *)g2h(host_start), qemu_host_page_size,
2213                      prot & PAGE_BITS);
2214         }
2215         mmap_unlock();
2216         /* If current TB was invalidated return to main loop */
2217         return current_tb_invalidated ? 2 : 1;
2218     }
2219     mmap_unlock();
2220     return 0;
2221 }
2222 #endif /* CONFIG_USER_ONLY */
2223 
2224 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2225 void tcg_flush_softmmu_tlb(CPUState *cs)
2226 {
2227 #ifdef CONFIG_SOFTMMU
2228     tlb_flush(cs);
2229 #endif
2230 }
2231