xref: /openbmc/qemu/accel/tcg/translate-all.c (revision fe9b676f)
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 
23 #define NO_CPU_IO_DEFS
24 #include "cpu.h"
25 #include "trace.h"
26 #include "disas/disas.h"
27 #include "exec/exec-all.h"
28 #include "tcg.h"
29 #if defined(CONFIG_USER_ONLY)
30 #include "qemu.h"
31 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
32 #include <sys/param.h>
33 #if __FreeBSD_version >= 700104
34 #define HAVE_KINFO_GETVMMAP
35 #define sigqueue sigqueue_freebsd  /* avoid redefinition */
36 #include <sys/proc.h>
37 #include <machine/profile.h>
38 #define _KERNEL
39 #include <sys/user.h>
40 #undef _KERNEL
41 #undef sigqueue
42 #include <libutil.h>
43 #endif
44 #endif
45 #else
46 #include "exec/ram_addr.h"
47 #endif
48 
49 #include "exec/cputlb.h"
50 #include "exec/tb-hash.h"
51 #include "translate-all.h"
52 #include "qemu/bitmap.h"
53 #include "qemu/error-report.h"
54 #include "qemu/qemu-print.h"
55 #include "qemu/timer.h"
56 #include "qemu/main-loop.h"
57 #include "exec/log.h"
58 #include "sysemu/cpus.h"
59 #include "sysemu/tcg.h"
60 
61 /* #define DEBUG_TB_INVALIDATE */
62 /* #define DEBUG_TB_FLUSH */
63 /* make various TB consistency checks */
64 /* #define DEBUG_TB_CHECK */
65 
66 #ifdef DEBUG_TB_INVALIDATE
67 #define DEBUG_TB_INVALIDATE_GATE 1
68 #else
69 #define DEBUG_TB_INVALIDATE_GATE 0
70 #endif
71 
72 #ifdef DEBUG_TB_FLUSH
73 #define DEBUG_TB_FLUSH_GATE 1
74 #else
75 #define DEBUG_TB_FLUSH_GATE 0
76 #endif
77 
78 #if !defined(CONFIG_USER_ONLY)
79 /* TB consistency checks only implemented for usermode emulation.  */
80 #undef DEBUG_TB_CHECK
81 #endif
82 
83 #ifdef DEBUG_TB_CHECK
84 #define DEBUG_TB_CHECK_GATE 1
85 #else
86 #define DEBUG_TB_CHECK_GATE 0
87 #endif
88 
89 /* Access to the various translations structures need to be serialised via locks
90  * for consistency.
91  * In user-mode emulation access to the memory related structures are protected
92  * with mmap_lock.
93  * In !user-mode we use per-page locks.
94  */
95 #ifdef CONFIG_SOFTMMU
96 #define assert_memory_lock()
97 #else
98 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
99 #endif
100 
101 #define SMC_BITMAP_USE_THRESHOLD 10
102 
103 typedef struct PageDesc {
104     /* list of TBs intersecting this ram page */
105     uintptr_t first_tb;
106 #ifdef CONFIG_SOFTMMU
107     /* in order to optimize self modifying code, we count the number
108        of lookups we do to a given page to use a bitmap */
109     unsigned long *code_bitmap;
110     unsigned int code_write_count;
111 #else
112     unsigned long flags;
113 #endif
114 #ifndef CONFIG_USER_ONLY
115     QemuSpin lock;
116 #endif
117 } PageDesc;
118 
119 /**
120  * struct page_entry - page descriptor entry
121  * @pd:     pointer to the &struct PageDesc of the page this entry represents
122  * @index:  page index of the page
123  * @locked: whether the page is locked
124  *
125  * This struct helps us keep track of the locked state of a page, without
126  * bloating &struct PageDesc.
127  *
128  * A page lock protects accesses to all fields of &struct PageDesc.
129  *
130  * See also: &struct page_collection.
131  */
132 struct page_entry {
133     PageDesc *pd;
134     tb_page_addr_t index;
135     bool locked;
136 };
137 
138 /**
139  * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
140  * @tree:   Binary search tree (BST) of the pages, with key == page index
141  * @max:    Pointer to the page in @tree with the highest page index
142  *
143  * To avoid deadlock we lock pages in ascending order of page index.
144  * When operating on a set of pages, we need to keep track of them so that
145  * we can lock them in order and also unlock them later. For this we collect
146  * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
147  * @tree implementation we use does not provide an O(1) operation to obtain the
148  * highest-ranked element, we use @max to keep track of the inserted page
149  * with the highest index. This is valuable because if a page is not in
150  * the tree and its index is higher than @max's, then we can lock it
151  * without breaking the locking order rule.
152  *
153  * Note on naming: 'struct page_set' would be shorter, but we already have a few
154  * page_set_*() helpers, so page_collection is used instead to avoid confusion.
155  *
156  * See also: page_collection_lock().
157  */
158 struct page_collection {
159     GTree *tree;
160     struct page_entry *max;
161 };
162 
163 /* list iterators for lists of tagged pointers in TranslationBlock */
164 #define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
165     for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
166          tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
167              tb = (TranslationBlock *)((uintptr_t)tb & ~1))
168 
169 #define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
170     TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
171 
172 #define TB_FOR_EACH_JMP(head_tb, tb, n)                                 \
173     TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
174 
175 /* In system mode we want L1_MAP to be based on ram offsets,
176    while in user mode we want it to be based on virtual addresses.  */
177 #if !defined(CONFIG_USER_ONLY)
178 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
179 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
180 #else
181 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
182 #endif
183 #else
184 # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
185 #endif
186 
187 /* Size of the L2 (and L3, etc) page tables.  */
188 #define V_L2_BITS 10
189 #define V_L2_SIZE (1 << V_L2_BITS)
190 
191 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
192 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
193                   sizeof_field(TranslationBlock, trace_vcpu_dstate)
194                   * BITS_PER_BYTE);
195 
196 /*
197  * L1 Mapping properties
198  */
199 static int v_l1_size;
200 static int v_l1_shift;
201 static int v_l2_levels;
202 
203 /* The bottom level has pointers to PageDesc, and is indexed by
204  * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
205  */
206 #define V_L1_MIN_BITS 4
207 #define V_L1_MAX_BITS (V_L2_BITS + 3)
208 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
209 
210 static void *l1_map[V_L1_MAX_SIZE];
211 
212 /* code generation context */
213 TCGContext tcg_init_ctx;
214 __thread TCGContext *tcg_ctx;
215 TBContext tb_ctx;
216 bool parallel_cpus;
217 
218 static void page_table_config_init(void)
219 {
220     uint32_t v_l1_bits;
221 
222     assert(TARGET_PAGE_BITS);
223     /* The bits remaining after N lower levels of page tables.  */
224     v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
225     if (v_l1_bits < V_L1_MIN_BITS) {
226         v_l1_bits += V_L2_BITS;
227     }
228 
229     v_l1_size = 1 << v_l1_bits;
230     v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
231     v_l2_levels = v_l1_shift / V_L2_BITS - 1;
232 
233     assert(v_l1_bits <= V_L1_MAX_BITS);
234     assert(v_l1_shift % V_L2_BITS == 0);
235     assert(v_l2_levels >= 0);
236 }
237 
238 void cpu_gen_init(void)
239 {
240     tcg_context_init(&tcg_init_ctx);
241 }
242 
243 /* Encode VAL as a signed leb128 sequence at P.
244    Return P incremented past the encoded value.  */
245 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
246 {
247     int more, byte;
248 
249     do {
250         byte = val & 0x7f;
251         val >>= 7;
252         more = !((val == 0 && (byte & 0x40) == 0)
253                  || (val == -1 && (byte & 0x40) != 0));
254         if (more) {
255             byte |= 0x80;
256         }
257         *p++ = byte;
258     } while (more);
259 
260     return p;
261 }
262 
263 /* Decode a signed leb128 sequence at *PP; increment *PP past the
264    decoded value.  Return the decoded value.  */
265 static target_long decode_sleb128(uint8_t **pp)
266 {
267     uint8_t *p = *pp;
268     target_long val = 0;
269     int byte, shift = 0;
270 
271     do {
272         byte = *p++;
273         val |= (target_ulong)(byte & 0x7f) << shift;
274         shift += 7;
275     } while (byte & 0x80);
276     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
277         val |= -(target_ulong)1 << shift;
278     }
279 
280     *pp = p;
281     return val;
282 }
283 
284 /* Encode the data collected about the instructions while compiling TB.
285    Place the data at BLOCK, and return the number of bytes consumed.
286 
287    The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
288    which come from the target's insn_start data, followed by a uintptr_t
289    which comes from the host pc of the end of the code implementing the insn.
290 
291    Each line of the table is encoded as sleb128 deltas from the previous
292    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
293    That is, the first column is seeded with the guest pc, the last column
294    with the host pc, and the middle columns with zeros.  */
295 
296 static int encode_search(TranslationBlock *tb, uint8_t *block)
297 {
298     uint8_t *highwater = tcg_ctx->code_gen_highwater;
299     uint8_t *p = block;
300     int i, j, n;
301 
302     for (i = 0, n = tb->icount; i < n; ++i) {
303         target_ulong prev;
304 
305         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
306             if (i == 0) {
307                 prev = (j == 0 ? tb->pc : 0);
308             } else {
309                 prev = tcg_ctx->gen_insn_data[i - 1][j];
310             }
311             p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
312         }
313         prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
314         p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
315 
316         /* Test for (pending) buffer overflow.  The assumption is that any
317            one row beginning below the high water mark cannot overrun
318            the buffer completely.  Thus we can test for overflow after
319            encoding a row without having to check during encoding.  */
320         if (unlikely(p > highwater)) {
321             return -1;
322         }
323     }
324 
325     return p - block;
326 }
327 
328 /* The cpu state corresponding to 'searched_pc' is restored.
329  * When reset_icount is true, current TB will be interrupted and
330  * icount should be recalculated.
331  */
332 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
333                                      uintptr_t searched_pc, bool reset_icount)
334 {
335     target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
336     uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
337     CPUArchState *env = cpu->env_ptr;
338     uint8_t *p = tb->tc.ptr + tb->tc.size;
339     int i, j, num_insns = tb->icount;
340 #ifdef CONFIG_PROFILER
341     TCGProfile *prof = &tcg_ctx->prof;
342     int64_t ti = profile_getclock();
343 #endif
344 
345     searched_pc -= GETPC_ADJ;
346 
347     if (searched_pc < host_pc) {
348         return -1;
349     }
350 
351     /* Reconstruct the stored insn data while looking for the point at
352        which the end of the insn exceeds the searched_pc.  */
353     for (i = 0; i < num_insns; ++i) {
354         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
355             data[j] += decode_sleb128(&p);
356         }
357         host_pc += decode_sleb128(&p);
358         if (host_pc > searched_pc) {
359             goto found;
360         }
361     }
362     return -1;
363 
364  found:
365     if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
366         assert(use_icount);
367         /* Reset the cycle counter to the start of the block
368            and shift if to the number of actually executed instructions */
369         cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
370     }
371     restore_state_to_opc(env, tb, data);
372 
373 #ifdef CONFIG_PROFILER
374     atomic_set(&prof->restore_time,
375                 prof->restore_time + profile_getclock() - ti);
376     atomic_set(&prof->restore_count, prof->restore_count + 1);
377 #endif
378     return 0;
379 }
380 
381 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
382 {
383     TranslationBlock *tb;
384     bool r = false;
385     uintptr_t check_offset;
386 
387     /* The host_pc has to be in the region of current code buffer. If
388      * it is not we will not be able to resolve it here. The two cases
389      * where host_pc will not be correct are:
390      *
391      *  - fault during translation (instruction fetch)
392      *  - fault from helper (not using GETPC() macro)
393      *
394      * Either way we need return early as we can't resolve it here.
395      *
396      * We are using unsigned arithmetic so if host_pc <
397      * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
398      * above the code_gen_buffer_size
399      */
400     check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
401 
402     if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
403         tb = tcg_tb_lookup(host_pc);
404         if (tb) {
405             cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
406             if (tb_cflags(tb) & CF_NOCACHE) {
407                 /* one-shot translation, invalidate it immediately */
408                 tb_phys_invalidate(tb, -1);
409                 tcg_tb_remove(tb);
410             }
411             r = true;
412         }
413     }
414 
415     return r;
416 }
417 
418 static void page_init(void)
419 {
420     page_size_init();
421     page_table_config_init();
422 
423 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
424     {
425 #ifdef HAVE_KINFO_GETVMMAP
426         struct kinfo_vmentry *freep;
427         int i, cnt;
428 
429         freep = kinfo_getvmmap(getpid(), &cnt);
430         if (freep) {
431             mmap_lock();
432             for (i = 0; i < cnt; i++) {
433                 unsigned long startaddr, endaddr;
434 
435                 startaddr = freep[i].kve_start;
436                 endaddr = freep[i].kve_end;
437                 if (h2g_valid(startaddr)) {
438                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
439 
440                     if (h2g_valid(endaddr)) {
441                         endaddr = h2g(endaddr);
442                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
443                     } else {
444 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
445                         endaddr = ~0ul;
446                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
447 #endif
448                     }
449                 }
450             }
451             free(freep);
452             mmap_unlock();
453         }
454 #else
455         FILE *f;
456 
457         last_brk = (unsigned long)sbrk(0);
458 
459         f = fopen("/compat/linux/proc/self/maps", "r");
460         if (f) {
461             mmap_lock();
462 
463             do {
464                 unsigned long startaddr, endaddr;
465                 int n;
466 
467                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
468 
469                 if (n == 2 && h2g_valid(startaddr)) {
470                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
471 
472                     if (h2g_valid(endaddr)) {
473                         endaddr = h2g(endaddr);
474                     } else {
475                         endaddr = ~0ul;
476                     }
477                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
478                 }
479             } while (!feof(f));
480 
481             fclose(f);
482             mmap_unlock();
483         }
484 #endif
485     }
486 #endif
487 }
488 
489 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
490 {
491     PageDesc *pd;
492     void **lp;
493     int i;
494 
495     /* Level 1.  Always allocated.  */
496     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
497 
498     /* Level 2..N-1.  */
499     for (i = v_l2_levels; i > 0; i--) {
500         void **p = atomic_rcu_read(lp);
501 
502         if (p == NULL) {
503             void *existing;
504 
505             if (!alloc) {
506                 return NULL;
507             }
508             p = g_new0(void *, V_L2_SIZE);
509             existing = atomic_cmpxchg(lp, NULL, p);
510             if (unlikely(existing)) {
511                 g_free(p);
512                 p = existing;
513             }
514         }
515 
516         lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
517     }
518 
519     pd = atomic_rcu_read(lp);
520     if (pd == NULL) {
521         void *existing;
522 
523         if (!alloc) {
524             return NULL;
525         }
526         pd = g_new0(PageDesc, V_L2_SIZE);
527 #ifndef CONFIG_USER_ONLY
528         {
529             int i;
530 
531             for (i = 0; i < V_L2_SIZE; i++) {
532                 qemu_spin_init(&pd[i].lock);
533             }
534         }
535 #endif
536         existing = atomic_cmpxchg(lp, NULL, pd);
537         if (unlikely(existing)) {
538             g_free(pd);
539             pd = existing;
540         }
541     }
542 
543     return pd + (index & (V_L2_SIZE - 1));
544 }
545 
546 static inline PageDesc *page_find(tb_page_addr_t index)
547 {
548     return page_find_alloc(index, 0);
549 }
550 
551 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
552                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
553 
554 /* In user-mode page locks aren't used; mmap_lock is enough */
555 #ifdef CONFIG_USER_ONLY
556 
557 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
558 
559 static inline void page_lock(PageDesc *pd)
560 { }
561 
562 static inline void page_unlock(PageDesc *pd)
563 { }
564 
565 static inline void page_lock_tb(const TranslationBlock *tb)
566 { }
567 
568 static inline void page_unlock_tb(const TranslationBlock *tb)
569 { }
570 
571 struct page_collection *
572 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
573 {
574     return NULL;
575 }
576 
577 void page_collection_unlock(struct page_collection *set)
578 { }
579 #else /* !CONFIG_USER_ONLY */
580 
581 #ifdef CONFIG_DEBUG_TCG
582 
583 static __thread GHashTable *ht_pages_locked_debug;
584 
585 static void ht_pages_locked_debug_init(void)
586 {
587     if (ht_pages_locked_debug) {
588         return;
589     }
590     ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
591 }
592 
593 static bool page_is_locked(const PageDesc *pd)
594 {
595     PageDesc *found;
596 
597     ht_pages_locked_debug_init();
598     found = g_hash_table_lookup(ht_pages_locked_debug, pd);
599     return !!found;
600 }
601 
602 static void page_lock__debug(PageDesc *pd)
603 {
604     ht_pages_locked_debug_init();
605     g_assert(!page_is_locked(pd));
606     g_hash_table_insert(ht_pages_locked_debug, pd, pd);
607 }
608 
609 static void page_unlock__debug(const PageDesc *pd)
610 {
611     bool removed;
612 
613     ht_pages_locked_debug_init();
614     g_assert(page_is_locked(pd));
615     removed = g_hash_table_remove(ht_pages_locked_debug, pd);
616     g_assert(removed);
617 }
618 
619 static void
620 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
621 {
622     if (unlikely(!page_is_locked(pd))) {
623         error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
624                      pd, file, line);
625         abort();
626     }
627 }
628 
629 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
630 
631 void assert_no_pages_locked(void)
632 {
633     ht_pages_locked_debug_init();
634     g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
635 }
636 
637 #else /* !CONFIG_DEBUG_TCG */
638 
639 #define assert_page_locked(pd)
640 
641 static inline void page_lock__debug(const PageDesc *pd)
642 {
643 }
644 
645 static inline void page_unlock__debug(const PageDesc *pd)
646 {
647 }
648 
649 #endif /* CONFIG_DEBUG_TCG */
650 
651 static inline void page_lock(PageDesc *pd)
652 {
653     page_lock__debug(pd);
654     qemu_spin_lock(&pd->lock);
655 }
656 
657 static inline void page_unlock(PageDesc *pd)
658 {
659     qemu_spin_unlock(&pd->lock);
660     page_unlock__debug(pd);
661 }
662 
663 /* lock the page(s) of a TB in the correct acquisition order */
664 static inline void page_lock_tb(const TranslationBlock *tb)
665 {
666     page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
667 }
668 
669 static inline void page_unlock_tb(const TranslationBlock *tb)
670 {
671     PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
672 
673     page_unlock(p1);
674     if (unlikely(tb->page_addr[1] != -1)) {
675         PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
676 
677         if (p2 != p1) {
678             page_unlock(p2);
679         }
680     }
681 }
682 
683 static inline struct page_entry *
684 page_entry_new(PageDesc *pd, tb_page_addr_t index)
685 {
686     struct page_entry *pe = g_malloc(sizeof(*pe));
687 
688     pe->index = index;
689     pe->pd = pd;
690     pe->locked = false;
691     return pe;
692 }
693 
694 static void page_entry_destroy(gpointer p)
695 {
696     struct page_entry *pe = p;
697 
698     g_assert(pe->locked);
699     page_unlock(pe->pd);
700     g_free(pe);
701 }
702 
703 /* returns false on success */
704 static bool page_entry_trylock(struct page_entry *pe)
705 {
706     bool busy;
707 
708     busy = qemu_spin_trylock(&pe->pd->lock);
709     if (!busy) {
710         g_assert(!pe->locked);
711         pe->locked = true;
712         page_lock__debug(pe->pd);
713     }
714     return busy;
715 }
716 
717 static void do_page_entry_lock(struct page_entry *pe)
718 {
719     page_lock(pe->pd);
720     g_assert(!pe->locked);
721     pe->locked = true;
722 }
723 
724 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
725 {
726     struct page_entry *pe = value;
727 
728     do_page_entry_lock(pe);
729     return FALSE;
730 }
731 
732 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
733 {
734     struct page_entry *pe = value;
735 
736     if (pe->locked) {
737         pe->locked = false;
738         page_unlock(pe->pd);
739     }
740     return FALSE;
741 }
742 
743 /*
744  * Trylock a page, and if successful, add the page to a collection.
745  * Returns true ("busy") if the page could not be locked; false otherwise.
746  */
747 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
748 {
749     tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
750     struct page_entry *pe;
751     PageDesc *pd;
752 
753     pe = g_tree_lookup(set->tree, &index);
754     if (pe) {
755         return false;
756     }
757 
758     pd = page_find(index);
759     if (pd == NULL) {
760         return false;
761     }
762 
763     pe = page_entry_new(pd, index);
764     g_tree_insert(set->tree, &pe->index, pe);
765 
766     /*
767      * If this is either (1) the first insertion or (2) a page whose index
768      * is higher than any other so far, just lock the page and move on.
769      */
770     if (set->max == NULL || pe->index > set->max->index) {
771         set->max = pe;
772         do_page_entry_lock(pe);
773         return false;
774     }
775     /*
776      * Try to acquire out-of-order lock; if busy, return busy so that we acquire
777      * locks in order.
778      */
779     return page_entry_trylock(pe);
780 }
781 
782 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
783 {
784     tb_page_addr_t a = *(const tb_page_addr_t *)ap;
785     tb_page_addr_t b = *(const tb_page_addr_t *)bp;
786 
787     if (a == b) {
788         return 0;
789     } else if (a < b) {
790         return -1;
791     }
792     return 1;
793 }
794 
795 /*
796  * Lock a range of pages ([@start,@end[) as well as the pages of all
797  * intersecting TBs.
798  * Locking order: acquire locks in ascending order of page index.
799  */
800 struct page_collection *
801 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
802 {
803     struct page_collection *set = g_malloc(sizeof(*set));
804     tb_page_addr_t index;
805     PageDesc *pd;
806 
807     start >>= TARGET_PAGE_BITS;
808     end   >>= TARGET_PAGE_BITS;
809     g_assert(start <= end);
810 
811     set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
812                                 page_entry_destroy);
813     set->max = NULL;
814     assert_no_pages_locked();
815 
816  retry:
817     g_tree_foreach(set->tree, page_entry_lock, NULL);
818 
819     for (index = start; index <= end; index++) {
820         TranslationBlock *tb;
821         int n;
822 
823         pd = page_find(index);
824         if (pd == NULL) {
825             continue;
826         }
827         if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
828             g_tree_foreach(set->tree, page_entry_unlock, NULL);
829             goto retry;
830         }
831         assert_page_locked(pd);
832         PAGE_FOR_EACH_TB(pd, tb, n) {
833             if (page_trylock_add(set, tb->page_addr[0]) ||
834                 (tb->page_addr[1] != -1 &&
835                  page_trylock_add(set, tb->page_addr[1]))) {
836                 /* drop all locks, and reacquire in order */
837                 g_tree_foreach(set->tree, page_entry_unlock, NULL);
838                 goto retry;
839             }
840         }
841     }
842     return set;
843 }
844 
845 void page_collection_unlock(struct page_collection *set)
846 {
847     /* entries are unlocked and freed via page_entry_destroy */
848     g_tree_destroy(set->tree);
849     g_free(set);
850 }
851 
852 #endif /* !CONFIG_USER_ONLY */
853 
854 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
855                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
856 {
857     PageDesc *p1, *p2;
858     tb_page_addr_t page1;
859     tb_page_addr_t page2;
860 
861     assert_memory_lock();
862     g_assert(phys1 != -1);
863 
864     page1 = phys1 >> TARGET_PAGE_BITS;
865     page2 = phys2 >> TARGET_PAGE_BITS;
866 
867     p1 = page_find_alloc(page1, alloc);
868     if (ret_p1) {
869         *ret_p1 = p1;
870     }
871     if (likely(phys2 == -1)) {
872         page_lock(p1);
873         return;
874     } else if (page1 == page2) {
875         page_lock(p1);
876         if (ret_p2) {
877             *ret_p2 = p1;
878         }
879         return;
880     }
881     p2 = page_find_alloc(page2, alloc);
882     if (ret_p2) {
883         *ret_p2 = p2;
884     }
885     if (page1 < page2) {
886         page_lock(p1);
887         page_lock(p2);
888     } else {
889         page_lock(p2);
890         page_lock(p1);
891     }
892 }
893 
894 #if defined(CONFIG_USER_ONLY)
895 /* Currently it is not recommended to allocate big chunks of data in
896    user mode. It will change when a dedicated libc will be used.  */
897 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
898    region in which the guest needs to run.  Revisit this.  */
899 #define USE_STATIC_CODE_GEN_BUFFER
900 #endif
901 
902 /* Minimum size of the code gen buffer.  This number is randomly chosen,
903    but not so small that we can't have a fair number of TB's live.  */
904 #define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
905 
906 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
907    indicated, this is constrained by the range of direct branches on the
908    host cpu, as used by the TCG implementation of goto_tb.  */
909 #if defined(__x86_64__)
910 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
911 #elif defined(__sparc__)
912 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
913 #elif defined(__powerpc64__)
914 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
915 #elif defined(__powerpc__)
916 # define MAX_CODE_GEN_BUFFER_SIZE  (32u * 1024 * 1024)
917 #elif defined(__aarch64__)
918 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
919 #elif defined(__s390x__)
920   /* We have a +- 4GB range on the branches; leave some slop.  */
921 # define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
922 #elif defined(__mips__)
923   /* We have a 256MB branch region, but leave room to make sure the
924      main executable is also within that region.  */
925 # define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
926 #else
927 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
928 #endif
929 
930 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
931 
932 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
933   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
934    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
935 
936 static inline size_t size_code_gen_buffer(size_t tb_size)
937 {
938     /* Size the buffer.  */
939     if (tb_size == 0) {
940 #ifdef USE_STATIC_CODE_GEN_BUFFER
941         tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
942 #else
943         /* ??? Needs adjustments.  */
944         /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
945            static buffer, we could size this on RESERVED_VA, on the text
946            segment size of the executable, or continue to use the default.  */
947         tb_size = (unsigned long)(ram_size / 4);
948 #endif
949     }
950     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
951         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
952     }
953     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
954         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
955     }
956     return tb_size;
957 }
958 
959 #ifdef __mips__
960 /* In order to use J and JAL within the code_gen_buffer, we require
961    that the buffer not cross a 256MB boundary.  */
962 static inline bool cross_256mb(void *addr, size_t size)
963 {
964     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
965 }
966 
967 /* We weren't able to allocate a buffer without crossing that boundary,
968    so make do with the larger portion of the buffer that doesn't cross.
969    Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
970 static inline void *split_cross_256mb(void *buf1, size_t size1)
971 {
972     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
973     size_t size2 = buf1 + size1 - buf2;
974 
975     size1 = buf2 - buf1;
976     if (size1 < size2) {
977         size1 = size2;
978         buf1 = buf2;
979     }
980 
981     tcg_ctx->code_gen_buffer_size = size1;
982     return buf1;
983 }
984 #endif
985 
986 #ifdef USE_STATIC_CODE_GEN_BUFFER
987 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
988     __attribute__((aligned(CODE_GEN_ALIGN)));
989 
990 static inline void *alloc_code_gen_buffer(void)
991 {
992     void *buf = static_code_gen_buffer;
993     void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
994     size_t size;
995 
996     /* page-align the beginning and end of the buffer */
997     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
998     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
999 
1000     size = end - buf;
1001 
1002     /* Honor a command-line option limiting the size of the buffer.  */
1003     if (size > tcg_ctx->code_gen_buffer_size) {
1004         size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1005                                qemu_real_host_page_size);
1006     }
1007     tcg_ctx->code_gen_buffer_size = size;
1008 
1009 #ifdef __mips__
1010     if (cross_256mb(buf, size)) {
1011         buf = split_cross_256mb(buf, size);
1012         size = tcg_ctx->code_gen_buffer_size;
1013     }
1014 #endif
1015 
1016     if (qemu_mprotect_rwx(buf, size)) {
1017         abort();
1018     }
1019     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1020 
1021     return buf;
1022 }
1023 #elif defined(_WIN32)
1024 static inline void *alloc_code_gen_buffer(void)
1025 {
1026     size_t size = tcg_ctx->code_gen_buffer_size;
1027     return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1028                         PAGE_EXECUTE_READWRITE);
1029 }
1030 #else
1031 static inline void *alloc_code_gen_buffer(void)
1032 {
1033     int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1034     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1035     uintptr_t start = 0;
1036     size_t size = tcg_ctx->code_gen_buffer_size;
1037     void *buf;
1038 
1039     /* Constrain the position of the buffer based on the host cpu.
1040        Note that these addresses are chosen in concert with the
1041        addresses assigned in the relevant linker script file.  */
1042 # if defined(__PIE__) || defined(__PIC__)
1043     /* Don't bother setting a preferred location if we're building
1044        a position-independent executable.  We're more likely to get
1045        an address near the main executable if we let the kernel
1046        choose the address.  */
1047 # elif defined(__x86_64__) && defined(MAP_32BIT)
1048     /* Force the memory down into low memory with the executable.
1049        Leave the choice of exact location with the kernel.  */
1050     flags |= MAP_32BIT;
1051     /* Cannot expect to map more than 800MB in low memory.  */
1052     if (size > 800u * 1024 * 1024) {
1053         tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
1054     }
1055 # elif defined(__sparc__)
1056     start = 0x40000000ul;
1057 # elif defined(__s390x__)
1058     start = 0x90000000ul;
1059 # elif defined(__mips__)
1060 #  if _MIPS_SIM == _ABI64
1061     start = 0x128000000ul;
1062 #  else
1063     start = 0x08000000ul;
1064 #  endif
1065 # endif
1066 
1067     buf = mmap((void *)start, size, prot, flags, -1, 0);
1068     if (buf == MAP_FAILED) {
1069         return NULL;
1070     }
1071 
1072 #ifdef __mips__
1073     if (cross_256mb(buf, size)) {
1074         /* Try again, with the original still mapped, to avoid re-acquiring
1075            that 256mb crossing.  This time don't specify an address.  */
1076         size_t size2;
1077         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1078         switch ((int)(buf2 != MAP_FAILED)) {
1079         case 1:
1080             if (!cross_256mb(buf2, size)) {
1081                 /* Success!  Use the new buffer.  */
1082                 munmap(buf, size);
1083                 break;
1084             }
1085             /* Failure.  Work with what we had.  */
1086             munmap(buf2, size);
1087             /* fallthru */
1088         default:
1089             /* Split the original buffer.  Free the smaller half.  */
1090             buf2 = split_cross_256mb(buf, size);
1091             size2 = tcg_ctx->code_gen_buffer_size;
1092             if (buf == buf2) {
1093                 munmap(buf + size2, size - size2);
1094             } else {
1095                 munmap(buf, size - size2);
1096             }
1097             size = size2;
1098             break;
1099         }
1100         buf = buf2;
1101     }
1102 #endif
1103 
1104     /* Request large pages for the buffer.  */
1105     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1106 
1107     return buf;
1108 }
1109 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1110 
1111 static inline void code_gen_alloc(size_t tb_size)
1112 {
1113     tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1114     tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1115     if (tcg_ctx->code_gen_buffer == NULL) {
1116         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1117         exit(1);
1118     }
1119 }
1120 
1121 static bool tb_cmp(const void *ap, const void *bp)
1122 {
1123     const TranslationBlock *a = ap;
1124     const TranslationBlock *b = bp;
1125 
1126     return a->pc == b->pc &&
1127         a->cs_base == b->cs_base &&
1128         a->flags == b->flags &&
1129         (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1130         a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1131         a->page_addr[0] == b->page_addr[0] &&
1132         a->page_addr[1] == b->page_addr[1];
1133 }
1134 
1135 static void tb_htable_init(void)
1136 {
1137     unsigned int mode = QHT_MODE_AUTO_RESIZE;
1138 
1139     qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1140 }
1141 
1142 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1143    (in bytes) allocated to the translation buffer. Zero means default
1144    size. */
1145 void tcg_exec_init(unsigned long tb_size)
1146 {
1147     tcg_allowed = true;
1148     cpu_gen_init();
1149     page_init();
1150     tb_htable_init();
1151     code_gen_alloc(tb_size);
1152 #if defined(CONFIG_SOFTMMU)
1153     /* There's no guest base to take into account, so go ahead and
1154        initialize the prologue now.  */
1155     tcg_prologue_init(tcg_ctx);
1156 #endif
1157 }
1158 
1159 /* call with @p->lock held */
1160 static inline void invalidate_page_bitmap(PageDesc *p)
1161 {
1162     assert_page_locked(p);
1163 #ifdef CONFIG_SOFTMMU
1164     g_free(p->code_bitmap);
1165     p->code_bitmap = NULL;
1166     p->code_write_count = 0;
1167 #endif
1168 }
1169 
1170 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1171 static void page_flush_tb_1(int level, void **lp)
1172 {
1173     int i;
1174 
1175     if (*lp == NULL) {
1176         return;
1177     }
1178     if (level == 0) {
1179         PageDesc *pd = *lp;
1180 
1181         for (i = 0; i < V_L2_SIZE; ++i) {
1182             page_lock(&pd[i]);
1183             pd[i].first_tb = (uintptr_t)NULL;
1184             invalidate_page_bitmap(pd + i);
1185             page_unlock(&pd[i]);
1186         }
1187     } else {
1188         void **pp = *lp;
1189 
1190         for (i = 0; i < V_L2_SIZE; ++i) {
1191             page_flush_tb_1(level - 1, pp + i);
1192         }
1193     }
1194 }
1195 
1196 static void page_flush_tb(void)
1197 {
1198     int i, l1_sz = v_l1_size;
1199 
1200     for (i = 0; i < l1_sz; i++) {
1201         page_flush_tb_1(v_l2_levels, l1_map + i);
1202     }
1203 }
1204 
1205 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1206 {
1207     const TranslationBlock *tb = value;
1208     size_t *size = data;
1209 
1210     *size += tb->tc.size;
1211     return false;
1212 }
1213 
1214 /* flush all the translation blocks */
1215 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1216 {
1217     mmap_lock();
1218     /* If it is already been done on request of another CPU,
1219      * just retry.
1220      */
1221     if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1222         goto done;
1223     }
1224 
1225     if (DEBUG_TB_FLUSH_GATE) {
1226         size_t nb_tbs = tcg_nb_tbs();
1227         size_t host_size = 0;
1228 
1229         tcg_tb_foreach(tb_host_size_iter, &host_size);
1230         printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1231                tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1232     }
1233 
1234     CPU_FOREACH(cpu) {
1235         cpu_tb_jmp_cache_clear(cpu);
1236     }
1237 
1238     qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1239     page_flush_tb();
1240 
1241     tcg_region_reset_all();
1242     /* XXX: flush processor icache at this point if cache flush is
1243        expensive */
1244     atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1245 
1246 done:
1247     mmap_unlock();
1248 }
1249 
1250 void tb_flush(CPUState *cpu)
1251 {
1252     if (tcg_enabled()) {
1253         unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
1254         async_safe_run_on_cpu(cpu, do_tb_flush,
1255                               RUN_ON_CPU_HOST_INT(tb_flush_count));
1256     }
1257 }
1258 
1259 /*
1260  * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1261  * so in order to prevent bit rot we compile them unconditionally in user-mode,
1262  * and let the optimizer get rid of them by wrapping their user-only callers
1263  * with if (DEBUG_TB_CHECK_GATE).
1264  */
1265 #ifdef CONFIG_USER_ONLY
1266 
1267 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1268 {
1269     TranslationBlock *tb = p;
1270     target_ulong addr = *(target_ulong *)userp;
1271 
1272     if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1273         printf("ERROR invalidate: address=" TARGET_FMT_lx
1274                " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1275     }
1276 }
1277 
1278 /* verify that all the pages have correct rights for code
1279  *
1280  * Called with mmap_lock held.
1281  */
1282 static void tb_invalidate_check(target_ulong address)
1283 {
1284     address &= TARGET_PAGE_MASK;
1285     qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1286 }
1287 
1288 static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1289 {
1290     TranslationBlock *tb = p;
1291     int flags1, flags2;
1292 
1293     flags1 = page_get_flags(tb->pc);
1294     flags2 = page_get_flags(tb->pc + tb->size - 1);
1295     if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1296         printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1297                (long)tb->pc, tb->size, flags1, flags2);
1298     }
1299 }
1300 
1301 /* verify that all the pages have correct rights for code */
1302 static void tb_page_check(void)
1303 {
1304     qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1305 }
1306 
1307 #endif /* CONFIG_USER_ONLY */
1308 
1309 /*
1310  * user-mode: call with mmap_lock held
1311  * !user-mode: call with @pd->lock held
1312  */
1313 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1314 {
1315     TranslationBlock *tb1;
1316     uintptr_t *pprev;
1317     unsigned int n1;
1318 
1319     assert_page_locked(pd);
1320     pprev = &pd->first_tb;
1321     PAGE_FOR_EACH_TB(pd, tb1, n1) {
1322         if (tb1 == tb) {
1323             *pprev = tb1->page_next[n1];
1324             return;
1325         }
1326         pprev = &tb1->page_next[n1];
1327     }
1328     g_assert_not_reached();
1329 }
1330 
1331 /* remove @orig from its @n_orig-th jump list */
1332 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1333 {
1334     uintptr_t ptr, ptr_locked;
1335     TranslationBlock *dest;
1336     TranslationBlock *tb;
1337     uintptr_t *pprev;
1338     int n;
1339 
1340     /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1341     ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1342     dest = (TranslationBlock *)(ptr & ~1);
1343     if (dest == NULL) {
1344         return;
1345     }
1346 
1347     qemu_spin_lock(&dest->jmp_lock);
1348     /*
1349      * While acquiring the lock, the jump might have been removed if the
1350      * destination TB was invalidated; check again.
1351      */
1352     ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
1353     if (ptr_locked != ptr) {
1354         qemu_spin_unlock(&dest->jmp_lock);
1355         /*
1356          * The only possibility is that the jump was unlinked via
1357          * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1358          * because we set the LSB above.
1359          */
1360         g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1361         return;
1362     }
1363     /*
1364      * We first acquired the lock, and since the destination pointer matches,
1365      * we know for sure that @orig is in the jmp list.
1366      */
1367     pprev = &dest->jmp_list_head;
1368     TB_FOR_EACH_JMP(dest, tb, n) {
1369         if (tb == orig && n == n_orig) {
1370             *pprev = tb->jmp_list_next[n];
1371             /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1372             qemu_spin_unlock(&dest->jmp_lock);
1373             return;
1374         }
1375         pprev = &tb->jmp_list_next[n];
1376     }
1377     g_assert_not_reached();
1378 }
1379 
1380 /* reset the jump entry 'n' of a TB so that it is not chained to
1381    another TB */
1382 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1383 {
1384     uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1385     tb_set_jmp_target(tb, n, addr);
1386 }
1387 
1388 /* remove any jumps to the TB */
1389 static inline void tb_jmp_unlink(TranslationBlock *dest)
1390 {
1391     TranslationBlock *tb;
1392     int n;
1393 
1394     qemu_spin_lock(&dest->jmp_lock);
1395 
1396     TB_FOR_EACH_JMP(dest, tb, n) {
1397         tb_reset_jump(tb, n);
1398         atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1399         /* No need to clear the list entry; setting the dest ptr is enough */
1400     }
1401     dest->jmp_list_head = (uintptr_t)NULL;
1402 
1403     qemu_spin_unlock(&dest->jmp_lock);
1404 }
1405 
1406 /*
1407  * In user-mode, call with mmap_lock held.
1408  * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1409  * locks held.
1410  */
1411 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1412 {
1413     CPUState *cpu;
1414     PageDesc *p;
1415     uint32_t h;
1416     tb_page_addr_t phys_pc;
1417 
1418     assert_memory_lock();
1419 
1420     /* make sure no further incoming jumps will be chained to this TB */
1421     qemu_spin_lock(&tb->jmp_lock);
1422     atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1423     qemu_spin_unlock(&tb->jmp_lock);
1424 
1425     /* remove the TB from the hash list */
1426     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1427     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1428                      tb->trace_vcpu_dstate);
1429     if (!(tb->cflags & CF_NOCACHE) &&
1430         !qht_remove(&tb_ctx.htable, tb, h)) {
1431         return;
1432     }
1433 
1434     /* remove the TB from the page list */
1435     if (rm_from_page_list) {
1436         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1437         tb_page_remove(p, tb);
1438         invalidate_page_bitmap(p);
1439         if (tb->page_addr[1] != -1) {
1440             p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1441             tb_page_remove(p, tb);
1442             invalidate_page_bitmap(p);
1443         }
1444     }
1445 
1446     /* remove the TB from the hash list */
1447     h = tb_jmp_cache_hash_func(tb->pc);
1448     CPU_FOREACH(cpu) {
1449         if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1450             atomic_set(&cpu->tb_jmp_cache[h], NULL);
1451         }
1452     }
1453 
1454     /* suppress this TB from the two jump lists */
1455     tb_remove_from_jmp_list(tb, 0);
1456     tb_remove_from_jmp_list(tb, 1);
1457 
1458     /* suppress any remaining jumps to this TB */
1459     tb_jmp_unlink(tb);
1460 
1461     atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1462                tcg_ctx->tb_phys_invalidate_count + 1);
1463 }
1464 
1465 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1466 {
1467     do_tb_phys_invalidate(tb, true);
1468 }
1469 
1470 /* invalidate one TB
1471  *
1472  * Called with mmap_lock held in user-mode.
1473  */
1474 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1475 {
1476     if (page_addr == -1 && tb->page_addr[0] != -1) {
1477         page_lock_tb(tb);
1478         do_tb_phys_invalidate(tb, true);
1479         page_unlock_tb(tb);
1480     } else {
1481         do_tb_phys_invalidate(tb, false);
1482     }
1483 }
1484 
1485 #ifdef CONFIG_SOFTMMU
1486 /* call with @p->lock held */
1487 static void build_page_bitmap(PageDesc *p)
1488 {
1489     int n, tb_start, tb_end;
1490     TranslationBlock *tb;
1491 
1492     assert_page_locked(p);
1493     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1494 
1495     PAGE_FOR_EACH_TB(p, tb, n) {
1496         /* NOTE: this is subtle as a TB may span two physical pages */
1497         if (n == 0) {
1498             /* NOTE: tb_end may be after the end of the page, but
1499                it is not a problem */
1500             tb_start = tb->pc & ~TARGET_PAGE_MASK;
1501             tb_end = tb_start + tb->size;
1502             if (tb_end > TARGET_PAGE_SIZE) {
1503                 tb_end = TARGET_PAGE_SIZE;
1504              }
1505         } else {
1506             tb_start = 0;
1507             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1508         }
1509         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1510     }
1511 }
1512 #endif
1513 
1514 /* add the tb in the target page and protect it if necessary
1515  *
1516  * Called with mmap_lock held for user-mode emulation.
1517  * Called with @p->lock held in !user-mode.
1518  */
1519 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1520                                unsigned int n, tb_page_addr_t page_addr)
1521 {
1522 #ifndef CONFIG_USER_ONLY
1523     bool page_already_protected;
1524 #endif
1525 
1526     assert_page_locked(p);
1527 
1528     tb->page_addr[n] = page_addr;
1529     tb->page_next[n] = p->first_tb;
1530 #ifndef CONFIG_USER_ONLY
1531     page_already_protected = p->first_tb != (uintptr_t)NULL;
1532 #endif
1533     p->first_tb = (uintptr_t)tb | n;
1534     invalidate_page_bitmap(p);
1535 
1536 #if defined(CONFIG_USER_ONLY)
1537     if (p->flags & PAGE_WRITE) {
1538         target_ulong addr;
1539         PageDesc *p2;
1540         int prot;
1541 
1542         /* force the host page as non writable (writes will have a
1543            page fault + mprotect overhead) */
1544         page_addr &= qemu_host_page_mask;
1545         prot = 0;
1546         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1547             addr += TARGET_PAGE_SIZE) {
1548 
1549             p2 = page_find(addr >> TARGET_PAGE_BITS);
1550             if (!p2) {
1551                 continue;
1552             }
1553             prot |= p2->flags;
1554             p2->flags &= ~PAGE_WRITE;
1555           }
1556         mprotect(g2h(page_addr), qemu_host_page_size,
1557                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1558         if (DEBUG_TB_INVALIDATE_GATE) {
1559             printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1560         }
1561     }
1562 #else
1563     /* if some code is already present, then the pages are already
1564        protected. So we handle the case where only the first TB is
1565        allocated in a physical page */
1566     if (!page_already_protected) {
1567         tlb_protect_code(page_addr);
1568     }
1569 #endif
1570 }
1571 
1572 /* add a new TB and link it to the physical page tables. phys_page2 is
1573  * (-1) to indicate that only one page contains the TB.
1574  *
1575  * Called with mmap_lock held for user-mode emulation.
1576  *
1577  * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1578  * Note that in !user-mode, another thread might have already added a TB
1579  * for the same block of guest code that @tb corresponds to. In that case,
1580  * the caller should discard the original @tb, and use instead the returned TB.
1581  */
1582 static TranslationBlock *
1583 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1584              tb_page_addr_t phys_page2)
1585 {
1586     PageDesc *p;
1587     PageDesc *p2 = NULL;
1588 
1589     assert_memory_lock();
1590 
1591     if (phys_pc == -1) {
1592         /*
1593          * If the TB is not associated with a physical RAM page then
1594          * it must be a temporary one-insn TB, and we have nothing to do
1595          * except fill in the page_addr[] fields.
1596          */
1597         assert(tb->cflags & CF_NOCACHE);
1598         tb->page_addr[0] = tb->page_addr[1] = -1;
1599         return tb;
1600     }
1601 
1602     /*
1603      * Add the TB to the page list, acquiring first the pages's locks.
1604      * We keep the locks held until after inserting the TB in the hash table,
1605      * so that if the insertion fails we know for sure that the TBs are still
1606      * in the page descriptors.
1607      * Note that inserting into the hash table first isn't an option, since
1608      * we can only insert TBs that are fully initialized.
1609      */
1610     page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1611     tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1612     if (p2) {
1613         tb_page_add(p2, tb, 1, phys_page2);
1614     } else {
1615         tb->page_addr[1] = -1;
1616     }
1617 
1618     if (!(tb->cflags & CF_NOCACHE)) {
1619         void *existing_tb = NULL;
1620         uint32_t h;
1621 
1622         /* add in the hash table */
1623         h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1624                          tb->trace_vcpu_dstate);
1625         qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1626 
1627         /* remove TB from the page(s) if we couldn't insert it */
1628         if (unlikely(existing_tb)) {
1629             tb_page_remove(p, tb);
1630             invalidate_page_bitmap(p);
1631             if (p2) {
1632                 tb_page_remove(p2, tb);
1633                 invalidate_page_bitmap(p2);
1634             }
1635             tb = existing_tb;
1636         }
1637     }
1638 
1639     if (p2 && p2 != p) {
1640         page_unlock(p2);
1641     }
1642     page_unlock(p);
1643 
1644 #ifdef CONFIG_USER_ONLY
1645     if (DEBUG_TB_CHECK_GATE) {
1646         tb_page_check();
1647     }
1648 #endif
1649     return tb;
1650 }
1651 
1652 /* Called with mmap_lock held for user mode emulation.  */
1653 TranslationBlock *tb_gen_code(CPUState *cpu,
1654                               target_ulong pc, target_ulong cs_base,
1655                               uint32_t flags, int cflags)
1656 {
1657     CPUArchState *env = cpu->env_ptr;
1658     TranslationBlock *tb, *existing_tb;
1659     tb_page_addr_t phys_pc, phys_page2;
1660     target_ulong virt_page2;
1661     tcg_insn_unit *gen_code_buf;
1662     int gen_code_size, search_size, max_insns;
1663 #ifdef CONFIG_PROFILER
1664     TCGProfile *prof = &tcg_ctx->prof;
1665     int64_t ti;
1666 #endif
1667 
1668     assert_memory_lock();
1669 
1670     phys_pc = get_page_addr_code(env, pc);
1671 
1672     if (phys_pc == -1) {
1673         /* Generate a temporary TB with 1 insn in it */
1674         cflags &= ~CF_COUNT_MASK;
1675         cflags |= CF_NOCACHE | 1;
1676     }
1677 
1678     cflags &= ~CF_CLUSTER_MASK;
1679     cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
1680 
1681     max_insns = cflags & CF_COUNT_MASK;
1682     if (max_insns == 0) {
1683         max_insns = CF_COUNT_MASK;
1684     }
1685     if (max_insns > TCG_MAX_INSNS) {
1686         max_insns = TCG_MAX_INSNS;
1687     }
1688     if (cpu->singlestep_enabled || singlestep) {
1689         max_insns = 1;
1690     }
1691 
1692  buffer_overflow:
1693     tb = tcg_tb_alloc(tcg_ctx);
1694     if (unlikely(!tb)) {
1695         /* flush must be done */
1696         tb_flush(cpu);
1697         mmap_unlock();
1698         /* Make the execution loop process the flush as soon as possible.  */
1699         cpu->exception_index = EXCP_INTERRUPT;
1700         cpu_loop_exit(cpu);
1701     }
1702 
1703     gen_code_buf = tcg_ctx->code_gen_ptr;
1704     tb->tc.ptr = gen_code_buf;
1705     tb->pc = pc;
1706     tb->cs_base = cs_base;
1707     tb->flags = flags;
1708     tb->cflags = cflags;
1709     tb->orig_tb = NULL;
1710     tb->trace_vcpu_dstate = *cpu->trace_dstate;
1711     tcg_ctx->tb_cflags = cflags;
1712  tb_overflow:
1713 
1714 #ifdef CONFIG_PROFILER
1715     /* includes aborted translations because of exceptions */
1716     atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1717     ti = profile_getclock();
1718 #endif
1719 
1720     tcg_func_start(tcg_ctx);
1721 
1722     tcg_ctx->cpu = env_cpu(env);
1723     gen_intermediate_code(cpu, tb, max_insns);
1724     tcg_ctx->cpu = NULL;
1725 
1726     trace_translate_block(tb, tb->pc, tb->tc.ptr);
1727 
1728     /* generate machine code */
1729     tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1730     tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1731     tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1732     if (TCG_TARGET_HAS_direct_jump) {
1733         tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1734         tcg_ctx->tb_jmp_target_addr = NULL;
1735     } else {
1736         tcg_ctx->tb_jmp_insn_offset = NULL;
1737         tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1738     }
1739 
1740 #ifdef CONFIG_PROFILER
1741     atomic_set(&prof->tb_count, prof->tb_count + 1);
1742     atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1743     ti = profile_getclock();
1744 #endif
1745 
1746     gen_code_size = tcg_gen_code(tcg_ctx, tb);
1747     if (unlikely(gen_code_size < 0)) {
1748         switch (gen_code_size) {
1749         case -1:
1750             /*
1751              * Overflow of code_gen_buffer, or the current slice of it.
1752              *
1753              * TODO: We don't need to re-do gen_intermediate_code, nor
1754              * should we re-do the tcg optimization currently hidden
1755              * inside tcg_gen_code.  All that should be required is to
1756              * flush the TBs, allocate a new TB, re-initialize it per
1757              * above, and re-do the actual code generation.
1758              */
1759             goto buffer_overflow;
1760 
1761         case -2:
1762             /*
1763              * The code generated for the TranslationBlock is too large.
1764              * The maximum size allowed by the unwind info is 64k.
1765              * There may be stricter constraints from relocations
1766              * in the tcg backend.
1767              *
1768              * Try again with half as many insns as we attempted this time.
1769              * If a single insn overflows, there's a bug somewhere...
1770              */
1771             max_insns = tb->icount;
1772             assert(max_insns > 1);
1773             max_insns /= 2;
1774             goto tb_overflow;
1775 
1776         default:
1777             g_assert_not_reached();
1778         }
1779     }
1780     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1781     if (unlikely(search_size < 0)) {
1782         goto buffer_overflow;
1783     }
1784     tb->tc.size = gen_code_size;
1785 
1786 #ifdef CONFIG_PROFILER
1787     atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1788     atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1789     atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1790     atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1791 #endif
1792 
1793 #ifdef DEBUG_DISAS
1794     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1795         qemu_log_in_addr_range(tb->pc)) {
1796         qemu_log_lock();
1797         qemu_log("OUT: [size=%d]\n", gen_code_size);
1798         if (tcg_ctx->data_gen_ptr) {
1799             size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1800             size_t data_size = gen_code_size - code_size;
1801             size_t i;
1802 
1803             log_disas(tb->tc.ptr, code_size);
1804 
1805             for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1806                 if (sizeof(tcg_target_ulong) == 8) {
1807                     qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1808                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1809                              *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1810                 } else {
1811                     qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1812                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1813                              *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1814                 }
1815             }
1816         } else {
1817             log_disas(tb->tc.ptr, gen_code_size);
1818         }
1819         qemu_log("\n");
1820         qemu_log_flush();
1821         qemu_log_unlock();
1822     }
1823 #endif
1824 
1825     atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1826         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1827                  CODE_GEN_ALIGN));
1828 
1829     /* init jump list */
1830     qemu_spin_init(&tb->jmp_lock);
1831     tb->jmp_list_head = (uintptr_t)NULL;
1832     tb->jmp_list_next[0] = (uintptr_t)NULL;
1833     tb->jmp_list_next[1] = (uintptr_t)NULL;
1834     tb->jmp_dest[0] = (uintptr_t)NULL;
1835     tb->jmp_dest[1] = (uintptr_t)NULL;
1836 
1837     /* init original jump addresses which have been set during tcg_gen_code() */
1838     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1839         tb_reset_jump(tb, 0);
1840     }
1841     if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1842         tb_reset_jump(tb, 1);
1843     }
1844 
1845     /* check next page if needed */
1846     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1847     phys_page2 = -1;
1848     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1849         phys_page2 = get_page_addr_code(env, virt_page2);
1850     }
1851     /*
1852      * No explicit memory barrier is required -- tb_link_page() makes the
1853      * TB visible in a consistent state.
1854      */
1855     existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1856     /* if the TB already exists, discard what we just translated */
1857     if (unlikely(existing_tb != tb)) {
1858         uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1859 
1860         orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1861         atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1862         return existing_tb;
1863     }
1864     tcg_tb_insert(tb);
1865     return tb;
1866 }
1867 
1868 /*
1869  * @p must be non-NULL.
1870  * user-mode: call with mmap_lock held.
1871  * !user-mode: call with all @pages locked.
1872  */
1873 static void
1874 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1875                                       PageDesc *p, tb_page_addr_t start,
1876                                       tb_page_addr_t end,
1877                                       uintptr_t retaddr)
1878 {
1879     TranslationBlock *tb;
1880     tb_page_addr_t tb_start, tb_end;
1881     int n;
1882 #ifdef TARGET_HAS_PRECISE_SMC
1883     CPUState *cpu = current_cpu;
1884     CPUArchState *env = NULL;
1885     bool current_tb_not_found = retaddr != 0;
1886     bool current_tb_modified = false;
1887     TranslationBlock *current_tb = NULL;
1888     target_ulong current_pc = 0;
1889     target_ulong current_cs_base = 0;
1890     uint32_t current_flags = 0;
1891 #endif /* TARGET_HAS_PRECISE_SMC */
1892 
1893     assert_page_locked(p);
1894 
1895 #if defined(TARGET_HAS_PRECISE_SMC)
1896     if (cpu != NULL) {
1897         env = cpu->env_ptr;
1898     }
1899 #endif
1900 
1901     /* we remove all the TBs in the range [start, end[ */
1902     /* XXX: see if in some cases it could be faster to invalidate all
1903        the code */
1904     PAGE_FOR_EACH_TB(p, tb, n) {
1905         assert_page_locked(p);
1906         /* NOTE: this is subtle as a TB may span two physical pages */
1907         if (n == 0) {
1908             /* NOTE: tb_end may be after the end of the page, but
1909                it is not a problem */
1910             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1911             tb_end = tb_start + tb->size;
1912         } else {
1913             tb_start = tb->page_addr[1];
1914             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1915         }
1916         if (!(tb_end <= start || tb_start >= end)) {
1917 #ifdef TARGET_HAS_PRECISE_SMC
1918             if (current_tb_not_found) {
1919                 current_tb_not_found = false;
1920                 /* now we have a real cpu fault */
1921                 current_tb = tcg_tb_lookup(retaddr);
1922             }
1923             if (current_tb == tb &&
1924                 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1925                 /*
1926                  * If we are modifying the current TB, we must stop
1927                  * its execution. We could be more precise by checking
1928                  * that the modification is after the current PC, but it
1929                  * would require a specialized function to partially
1930                  * restore the CPU state.
1931                  */
1932                 current_tb_modified = true;
1933                 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
1934                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1935                                      &current_flags);
1936             }
1937 #endif /* TARGET_HAS_PRECISE_SMC */
1938             tb_phys_invalidate__locked(tb);
1939         }
1940     }
1941 #if !defined(CONFIG_USER_ONLY)
1942     /* if no code remaining, no need to continue to use slow writes */
1943     if (!p->first_tb) {
1944         invalidate_page_bitmap(p);
1945         tlb_unprotect_code(start);
1946     }
1947 #endif
1948 #ifdef TARGET_HAS_PRECISE_SMC
1949     if (current_tb_modified) {
1950         page_collection_unlock(pages);
1951         /* Force execution of one insn next time.  */
1952         cpu->cflags_next_tb = 1 | curr_cflags();
1953         mmap_unlock();
1954         cpu_loop_exit_noexc(cpu);
1955     }
1956 #endif
1957 }
1958 
1959 /*
1960  * Invalidate all TBs which intersect with the target physical address range
1961  * [start;end[. NOTE: start and end must refer to the *same* physical page.
1962  * 'is_cpu_write_access' should be true if called from a real cpu write
1963  * access: the virtual CPU will exit the current TB if code is modified inside
1964  * this TB.
1965  *
1966  * Called with mmap_lock held for user-mode emulation
1967  */
1968 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
1969 {
1970     struct page_collection *pages;
1971     PageDesc *p;
1972 
1973     assert_memory_lock();
1974 
1975     p = page_find(start >> TARGET_PAGE_BITS);
1976     if (p == NULL) {
1977         return;
1978     }
1979     pages = page_collection_lock(start, end);
1980     tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
1981     page_collection_unlock(pages);
1982 }
1983 
1984 /*
1985  * Invalidate all TBs which intersect with the target physical address range
1986  * [start;end[. NOTE: start and end may refer to *different* physical pages.
1987  * 'is_cpu_write_access' should be true if called from a real cpu write
1988  * access: the virtual CPU will exit the current TB if code is modified inside
1989  * this TB.
1990  *
1991  * Called with mmap_lock held for user-mode emulation.
1992  */
1993 #ifdef CONFIG_SOFTMMU
1994 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
1995 #else
1996 void tb_invalidate_phys_range(target_ulong start, target_ulong end)
1997 #endif
1998 {
1999     struct page_collection *pages;
2000     tb_page_addr_t next;
2001 
2002     assert_memory_lock();
2003 
2004     pages = page_collection_lock(start, end);
2005     for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2006          start < end;
2007          start = next, next += TARGET_PAGE_SIZE) {
2008         PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
2009         tb_page_addr_t bound = MIN(next, end);
2010 
2011         if (pd == NULL) {
2012             continue;
2013         }
2014         tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
2015     }
2016     page_collection_unlock(pages);
2017 }
2018 
2019 #ifdef CONFIG_SOFTMMU
2020 /* len must be <= 8 and start must be a multiple of len.
2021  * Called via softmmu_template.h when code areas are written to with
2022  * iothread mutex not held.
2023  *
2024  * Call with all @pages in the range [@start, @start + len[ locked.
2025  */
2026 void tb_invalidate_phys_page_fast(struct page_collection *pages,
2027                                   tb_page_addr_t start, int len,
2028                                   uintptr_t retaddr)
2029 {
2030     PageDesc *p;
2031 
2032     assert_memory_lock();
2033 
2034     p = page_find(start >> TARGET_PAGE_BITS);
2035     if (!p) {
2036         return;
2037     }
2038 
2039     assert_page_locked(p);
2040     if (!p->code_bitmap &&
2041         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2042         build_page_bitmap(p);
2043     }
2044     if (p->code_bitmap) {
2045         unsigned int nr;
2046         unsigned long b;
2047 
2048         nr = start & ~TARGET_PAGE_MASK;
2049         b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2050         if (b & ((1 << len) - 1)) {
2051             goto do_invalidate;
2052         }
2053     } else {
2054     do_invalidate:
2055         tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2056                                               retaddr);
2057     }
2058 }
2059 #else
2060 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2061  * host PC of the faulting store instruction that caused this invalidate.
2062  * Returns true if the caller needs to abort execution of the current
2063  * TB (because it was modified by this store and the guest CPU has
2064  * precise-SMC semantics).
2065  */
2066 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2067 {
2068     TranslationBlock *tb;
2069     PageDesc *p;
2070     int n;
2071 #ifdef TARGET_HAS_PRECISE_SMC
2072     TranslationBlock *current_tb = NULL;
2073     CPUState *cpu = current_cpu;
2074     CPUArchState *env = NULL;
2075     int current_tb_modified = 0;
2076     target_ulong current_pc = 0;
2077     target_ulong current_cs_base = 0;
2078     uint32_t current_flags = 0;
2079 #endif
2080 
2081     assert_memory_lock();
2082 
2083     addr &= TARGET_PAGE_MASK;
2084     p = page_find(addr >> TARGET_PAGE_BITS);
2085     if (!p) {
2086         return false;
2087     }
2088 
2089 #ifdef TARGET_HAS_PRECISE_SMC
2090     if (p->first_tb && pc != 0) {
2091         current_tb = tcg_tb_lookup(pc);
2092     }
2093     if (cpu != NULL) {
2094         env = cpu->env_ptr;
2095     }
2096 #endif
2097     assert_page_locked(p);
2098     PAGE_FOR_EACH_TB(p, tb, n) {
2099 #ifdef TARGET_HAS_PRECISE_SMC
2100         if (current_tb == tb &&
2101             (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2102                 /* If we are modifying the current TB, we must stop
2103                    its execution. We could be more precise by checking
2104                    that the modification is after the current PC, but it
2105                    would require a specialized function to partially
2106                    restore the CPU state */
2107 
2108             current_tb_modified = 1;
2109             cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2110             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2111                                  &current_flags);
2112         }
2113 #endif /* TARGET_HAS_PRECISE_SMC */
2114         tb_phys_invalidate(tb, addr);
2115     }
2116     p->first_tb = (uintptr_t)NULL;
2117 #ifdef TARGET_HAS_PRECISE_SMC
2118     if (current_tb_modified) {
2119         /* Force execution of one insn next time.  */
2120         cpu->cflags_next_tb = 1 | curr_cflags();
2121         return true;
2122     }
2123 #endif
2124 
2125     return false;
2126 }
2127 #endif
2128 
2129 /* user-mode: call with mmap_lock held */
2130 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
2131 {
2132     TranslationBlock *tb;
2133 
2134     assert_memory_lock();
2135 
2136     tb = tcg_tb_lookup(retaddr);
2137     if (tb) {
2138         /* We can use retranslation to find the PC.  */
2139         cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2140         tb_phys_invalidate(tb, -1);
2141     } else {
2142         /* The exception probably happened in a helper.  The CPU state should
2143            have been saved before calling it. Fetch the PC from there.  */
2144         CPUArchState *env = cpu->env_ptr;
2145         target_ulong pc, cs_base;
2146         tb_page_addr_t addr;
2147         uint32_t flags;
2148 
2149         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2150         addr = get_page_addr_code(env, pc);
2151         if (addr != -1) {
2152             tb_invalidate_phys_range(addr, addr + 1);
2153         }
2154     }
2155 }
2156 
2157 #ifndef CONFIG_USER_ONLY
2158 /* in deterministic execution mode, instructions doing device I/Os
2159  * must be at the end of the TB.
2160  *
2161  * Called by softmmu_template.h, with iothread mutex not held.
2162  */
2163 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2164 {
2165 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2166     CPUArchState *env = cpu->env_ptr;
2167 #endif
2168     TranslationBlock *tb;
2169     uint32_t n;
2170 
2171     tb = tcg_tb_lookup(retaddr);
2172     if (!tb) {
2173         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2174                   (void *)retaddr);
2175     }
2176     cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2177 
2178     /* On MIPS and SH, delay slot instructions can only be restarted if
2179        they were already the first instruction in the TB.  If this is not
2180        the first instruction in a TB then re-execute the preceding
2181        branch.  */
2182     n = 1;
2183 #if defined(TARGET_MIPS)
2184     if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2185         && env->active_tc.PC != tb->pc) {
2186         env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2187         cpu_neg(cpu)->icount_decr.u16.low++;
2188         env->hflags &= ~MIPS_HFLAG_BMASK;
2189         n = 2;
2190     }
2191 #elif defined(TARGET_SH4)
2192     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2193         && env->pc != tb->pc) {
2194         env->pc -= 2;
2195         cpu_neg(cpu)->icount_decr.u16.low++;
2196         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2197         n = 2;
2198     }
2199 #endif
2200 
2201     /* Generate a new TB executing the I/O insn.  */
2202     cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2203 
2204     if (tb_cflags(tb) & CF_NOCACHE) {
2205         if (tb->orig_tb) {
2206             /* Invalidate original TB if this TB was generated in
2207              * cpu_exec_nocache() */
2208             tb_phys_invalidate(tb->orig_tb, -1);
2209         }
2210         tcg_tb_remove(tb);
2211     }
2212 
2213     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2214      * the first in the TB) then we end up generating a whole new TB and
2215      *  repeating the fault, which is horribly inefficient.
2216      *  Better would be to execute just this insn uncached, or generate a
2217      *  second new TB.
2218      */
2219     cpu_loop_exit_noexc(cpu);
2220 }
2221 
2222 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2223 {
2224     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2225 
2226     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2227         atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2228     }
2229 }
2230 
2231 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2232 {
2233     /* Discard jump cache entries for any tb which might potentially
2234        overlap the flushed page.  */
2235     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2236     tb_jmp_cache_clear_page(cpu, addr);
2237 }
2238 
2239 static void print_qht_statistics(struct qht_stats hst)
2240 {
2241     uint32_t hgram_opts;
2242     size_t hgram_bins;
2243     char *hgram;
2244 
2245     if (!hst.head_buckets) {
2246         return;
2247     }
2248     qemu_printf("TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2249                 hst.used_head_buckets, hst.head_buckets,
2250                 (double)hst.used_head_buckets / hst.head_buckets * 100);
2251 
2252     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2253     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2254     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2255         hgram_opts |= QDIST_PR_NODECIMAL;
2256     }
2257     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2258     qemu_printf("TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2259                 qdist_avg(&hst.occupancy) * 100, hgram);
2260     g_free(hgram);
2261 
2262     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2263     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2264     if (hgram_bins > 10) {
2265         hgram_bins = 10;
2266     } else {
2267         hgram_bins = 0;
2268         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2269     }
2270     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2271     qemu_printf("TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2272                 qdist_avg(&hst.chain), hgram);
2273     g_free(hgram);
2274 }
2275 
2276 struct tb_tree_stats {
2277     size_t nb_tbs;
2278     size_t host_size;
2279     size_t target_size;
2280     size_t max_target_size;
2281     size_t direct_jmp_count;
2282     size_t direct_jmp2_count;
2283     size_t cross_page;
2284 };
2285 
2286 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2287 {
2288     const TranslationBlock *tb = value;
2289     struct tb_tree_stats *tst = data;
2290 
2291     tst->nb_tbs++;
2292     tst->host_size += tb->tc.size;
2293     tst->target_size += tb->size;
2294     if (tb->size > tst->max_target_size) {
2295         tst->max_target_size = tb->size;
2296     }
2297     if (tb->page_addr[1] != -1) {
2298         tst->cross_page++;
2299     }
2300     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2301         tst->direct_jmp_count++;
2302         if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2303             tst->direct_jmp2_count++;
2304         }
2305     }
2306     return false;
2307 }
2308 
2309 void dump_exec_info(void)
2310 {
2311     struct tb_tree_stats tst = {};
2312     struct qht_stats hst;
2313     size_t nb_tbs, flush_full, flush_part, flush_elide;
2314 
2315     tcg_tb_foreach(tb_tree_stats_iter, &tst);
2316     nb_tbs = tst.nb_tbs;
2317     /* XXX: avoid using doubles ? */
2318     qemu_printf("Translation buffer state:\n");
2319     /*
2320      * Report total code size including the padding and TB structs;
2321      * otherwise users might think "-tb-size" is not honoured.
2322      * For avg host size we use the precise numbers from tb_tree_stats though.
2323      */
2324     qemu_printf("gen code size       %zu/%zu\n",
2325                 tcg_code_size(), tcg_code_capacity());
2326     qemu_printf("TB count            %zu\n", nb_tbs);
2327     qemu_printf("TB avg target size  %zu max=%zu bytes\n",
2328                 nb_tbs ? tst.target_size / nb_tbs : 0,
2329                 tst.max_target_size);
2330     qemu_printf("TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2331                 nb_tbs ? tst.host_size / nb_tbs : 0,
2332                 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2333     qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2334                 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2335     qemu_printf("direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2336                 tst.direct_jmp_count,
2337                 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2338                 tst.direct_jmp2_count,
2339                 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2340 
2341     qht_statistics_init(&tb_ctx.htable, &hst);
2342     print_qht_statistics(hst);
2343     qht_statistics_destroy(&hst);
2344 
2345     qemu_printf("\nStatistics:\n");
2346     qemu_printf("TB flush count      %u\n",
2347                 atomic_read(&tb_ctx.tb_flush_count));
2348     qemu_printf("TB invalidate count %zu\n",
2349                 tcg_tb_phys_invalidate_count());
2350 
2351     tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2352     qemu_printf("TLB full flushes    %zu\n", flush_full);
2353     qemu_printf("TLB partial flushes %zu\n", flush_part);
2354     qemu_printf("TLB elided flushes  %zu\n", flush_elide);
2355     tcg_dump_info();
2356 }
2357 
2358 void dump_opcount_info(void)
2359 {
2360     tcg_dump_op_count();
2361 }
2362 
2363 #else /* CONFIG_USER_ONLY */
2364 
2365 void cpu_interrupt(CPUState *cpu, int mask)
2366 {
2367     g_assert(qemu_mutex_iothread_locked());
2368     cpu->interrupt_request |= mask;
2369     atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2370 }
2371 
2372 /*
2373  * Walks guest process memory "regions" one by one
2374  * and calls callback function 'fn' for each region.
2375  */
2376 struct walk_memory_regions_data {
2377     walk_memory_regions_fn fn;
2378     void *priv;
2379     target_ulong start;
2380     int prot;
2381 };
2382 
2383 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2384                                    target_ulong end, int new_prot)
2385 {
2386     if (data->start != -1u) {
2387         int rc = data->fn(data->priv, data->start, end, data->prot);
2388         if (rc != 0) {
2389             return rc;
2390         }
2391     }
2392 
2393     data->start = (new_prot ? end : -1u);
2394     data->prot = new_prot;
2395 
2396     return 0;
2397 }
2398 
2399 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2400                                  target_ulong base, int level, void **lp)
2401 {
2402     target_ulong pa;
2403     int i, rc;
2404 
2405     if (*lp == NULL) {
2406         return walk_memory_regions_end(data, base, 0);
2407     }
2408 
2409     if (level == 0) {
2410         PageDesc *pd = *lp;
2411 
2412         for (i = 0; i < V_L2_SIZE; ++i) {
2413             int prot = pd[i].flags;
2414 
2415             pa = base | (i << TARGET_PAGE_BITS);
2416             if (prot != data->prot) {
2417                 rc = walk_memory_regions_end(data, pa, prot);
2418                 if (rc != 0) {
2419                     return rc;
2420                 }
2421             }
2422         }
2423     } else {
2424         void **pp = *lp;
2425 
2426         for (i = 0; i < V_L2_SIZE; ++i) {
2427             pa = base | ((target_ulong)i <<
2428                 (TARGET_PAGE_BITS + V_L2_BITS * level));
2429             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2430             if (rc != 0) {
2431                 return rc;
2432             }
2433         }
2434     }
2435 
2436     return 0;
2437 }
2438 
2439 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2440 {
2441     struct walk_memory_regions_data data;
2442     uintptr_t i, l1_sz = v_l1_size;
2443 
2444     data.fn = fn;
2445     data.priv = priv;
2446     data.start = -1u;
2447     data.prot = 0;
2448 
2449     for (i = 0; i < l1_sz; i++) {
2450         target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2451         int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2452         if (rc != 0) {
2453             return rc;
2454         }
2455     }
2456 
2457     return walk_memory_regions_end(&data, 0, 0);
2458 }
2459 
2460 static int dump_region(void *priv, target_ulong start,
2461     target_ulong end, unsigned long prot)
2462 {
2463     FILE *f = (FILE *)priv;
2464 
2465     (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2466         " "TARGET_FMT_lx" %c%c%c\n",
2467         start, end, end - start,
2468         ((prot & PAGE_READ) ? 'r' : '-'),
2469         ((prot & PAGE_WRITE) ? 'w' : '-'),
2470         ((prot & PAGE_EXEC) ? 'x' : '-'));
2471 
2472     return 0;
2473 }
2474 
2475 /* dump memory mappings */
2476 void page_dump(FILE *f)
2477 {
2478     const int length = sizeof(target_ulong) * 2;
2479     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2480             length, "start", length, "end", length, "size", "prot");
2481     walk_memory_regions(f, dump_region);
2482 }
2483 
2484 int page_get_flags(target_ulong address)
2485 {
2486     PageDesc *p;
2487 
2488     p = page_find(address >> TARGET_PAGE_BITS);
2489     if (!p) {
2490         return 0;
2491     }
2492     return p->flags;
2493 }
2494 
2495 /* Modify the flags of a page and invalidate the code if necessary.
2496    The flag PAGE_WRITE_ORG is positioned automatically depending
2497    on PAGE_WRITE.  The mmap_lock should already be held.  */
2498 void page_set_flags(target_ulong start, target_ulong end, int flags)
2499 {
2500     target_ulong addr, len;
2501 
2502     /* This function should never be called with addresses outside the
2503        guest address space.  If this assert fires, it probably indicates
2504        a missing call to h2g_valid.  */
2505 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2506     assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2507 #endif
2508     assert(start < end);
2509     assert_memory_lock();
2510 
2511     start = start & TARGET_PAGE_MASK;
2512     end = TARGET_PAGE_ALIGN(end);
2513 
2514     if (flags & PAGE_WRITE) {
2515         flags |= PAGE_WRITE_ORG;
2516     }
2517 
2518     for (addr = start, len = end - start;
2519          len != 0;
2520          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2521         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2522 
2523         /* If the write protection bit is set, then we invalidate
2524            the code inside.  */
2525         if (!(p->flags & PAGE_WRITE) &&
2526             (flags & PAGE_WRITE) &&
2527             p->first_tb) {
2528             tb_invalidate_phys_page(addr, 0);
2529         }
2530         p->flags = flags;
2531     }
2532 }
2533 
2534 int page_check_range(target_ulong start, target_ulong len, int flags)
2535 {
2536     PageDesc *p;
2537     target_ulong end;
2538     target_ulong addr;
2539 
2540     /* This function should never be called with addresses outside the
2541        guest address space.  If this assert fires, it probably indicates
2542        a missing call to h2g_valid.  */
2543 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2544     assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2545 #endif
2546 
2547     if (len == 0) {
2548         return 0;
2549     }
2550     if (start + len - 1 < start) {
2551         /* We've wrapped around.  */
2552         return -1;
2553     }
2554 
2555     /* must do before we loose bits in the next step */
2556     end = TARGET_PAGE_ALIGN(start + len);
2557     start = start & TARGET_PAGE_MASK;
2558 
2559     for (addr = start, len = end - start;
2560          len != 0;
2561          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2562         p = page_find(addr >> TARGET_PAGE_BITS);
2563         if (!p) {
2564             return -1;
2565         }
2566         if (!(p->flags & PAGE_VALID)) {
2567             return -1;
2568         }
2569 
2570         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2571             return -1;
2572         }
2573         if (flags & PAGE_WRITE) {
2574             if (!(p->flags & PAGE_WRITE_ORG)) {
2575                 return -1;
2576             }
2577             /* unprotect the page if it was put read-only because it
2578                contains translated code */
2579             if (!(p->flags & PAGE_WRITE)) {
2580                 if (!page_unprotect(addr, 0)) {
2581                     return -1;
2582                 }
2583             }
2584         }
2585     }
2586     return 0;
2587 }
2588 
2589 /* called from signal handler: invalidate the code and unprotect the
2590  * page. Return 0 if the fault was not handled, 1 if it was handled,
2591  * and 2 if it was handled but the caller must cause the TB to be
2592  * immediately exited. (We can only return 2 if the 'pc' argument is
2593  * non-zero.)
2594  */
2595 int page_unprotect(target_ulong address, uintptr_t pc)
2596 {
2597     unsigned int prot;
2598     bool current_tb_invalidated;
2599     PageDesc *p;
2600     target_ulong host_start, host_end, addr;
2601 
2602     /* Technically this isn't safe inside a signal handler.  However we
2603        know this only ever happens in a synchronous SEGV handler, so in
2604        practice it seems to be ok.  */
2605     mmap_lock();
2606 
2607     p = page_find(address >> TARGET_PAGE_BITS);
2608     if (!p) {
2609         mmap_unlock();
2610         return 0;
2611     }
2612 
2613     /* if the page was really writable, then we change its
2614        protection back to writable */
2615     if (p->flags & PAGE_WRITE_ORG) {
2616         current_tb_invalidated = false;
2617         if (p->flags & PAGE_WRITE) {
2618             /* If the page is actually marked WRITE then assume this is because
2619              * this thread raced with another one which got here first and
2620              * set the page to PAGE_WRITE and did the TB invalidate for us.
2621              */
2622 #ifdef TARGET_HAS_PRECISE_SMC
2623             TranslationBlock *current_tb = tcg_tb_lookup(pc);
2624             if (current_tb) {
2625                 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2626             }
2627 #endif
2628         } else {
2629             host_start = address & qemu_host_page_mask;
2630             host_end = host_start + qemu_host_page_size;
2631 
2632             prot = 0;
2633             for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2634                 p = page_find(addr >> TARGET_PAGE_BITS);
2635                 p->flags |= PAGE_WRITE;
2636                 prot |= p->flags;
2637 
2638                 /* and since the content will be modified, we must invalidate
2639                    the corresponding translated code. */
2640                 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2641 #ifdef CONFIG_USER_ONLY
2642                 if (DEBUG_TB_CHECK_GATE) {
2643                     tb_invalidate_check(addr);
2644                 }
2645 #endif
2646             }
2647             mprotect((void *)g2h(host_start), qemu_host_page_size,
2648                      prot & PAGE_BITS);
2649         }
2650         mmap_unlock();
2651         /* If current TB was invalidated return to main loop */
2652         return current_tb_invalidated ? 2 : 1;
2653     }
2654     mmap_unlock();
2655     return 0;
2656 }
2657 #endif /* CONFIG_USER_ONLY */
2658 
2659 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2660 void tcg_flush_softmmu_tlb(CPUState *cs)
2661 {
2662 #ifdef CONFIG_SOFTMMU
2663     tlb_flush(cs);
2664 #endif
2665 }
2666