xref: /openbmc/qemu/accel/tcg/translate-all.c (revision a2fa63a8)
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu-common.h"
23 
24 #define NO_CPU_IO_DEFS
25 #include "cpu.h"
26 #include "trace.h"
27 #include "disas/disas.h"
28 #include "exec/exec-all.h"
29 #include "tcg/tcg.h"
30 #if defined(CONFIG_USER_ONLY)
31 #include "qemu.h"
32 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
33 #include <sys/param.h>
34 #if __FreeBSD_version >= 700104
35 #define HAVE_KINFO_GETVMMAP
36 #define sigqueue sigqueue_freebsd  /* avoid redefinition */
37 #include <sys/proc.h>
38 #include <machine/profile.h>
39 #define _KERNEL
40 #include <sys/user.h>
41 #undef _KERNEL
42 #undef sigqueue
43 #include <libutil.h>
44 #endif
45 #endif
46 #else
47 #include "exec/ram_addr.h"
48 #endif
49 
50 #include "exec/cputlb.h"
51 #include "exec/tb-hash.h"
52 #include "translate-all.h"
53 #include "qemu/bitmap.h"
54 #include "qemu/error-report.h"
55 #include "qemu/qemu-print.h"
56 #include "qemu/timer.h"
57 #include "qemu/main-loop.h"
58 #include "exec/log.h"
59 #include "sysemu/cpus.h"
60 #include "sysemu/tcg.h"
61 
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
66 
67 #ifdef DEBUG_TB_INVALIDATE
68 #define DEBUG_TB_INVALIDATE_GATE 1
69 #else
70 #define DEBUG_TB_INVALIDATE_GATE 0
71 #endif
72 
73 #ifdef DEBUG_TB_FLUSH
74 #define DEBUG_TB_FLUSH_GATE 1
75 #else
76 #define DEBUG_TB_FLUSH_GATE 0
77 #endif
78 
79 #if !defined(CONFIG_USER_ONLY)
80 /* TB consistency checks only implemented for usermode emulation.  */
81 #undef DEBUG_TB_CHECK
82 #endif
83 
84 #ifdef DEBUG_TB_CHECK
85 #define DEBUG_TB_CHECK_GATE 1
86 #else
87 #define DEBUG_TB_CHECK_GATE 0
88 #endif
89 
90 /* Access to the various translations structures need to be serialised via locks
91  * for consistency.
92  * In user-mode emulation access to the memory related structures are protected
93  * with mmap_lock.
94  * In !user-mode we use per-page locks.
95  */
96 #ifdef CONFIG_SOFTMMU
97 #define assert_memory_lock()
98 #else
99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
100 #endif
101 
102 #define SMC_BITMAP_USE_THRESHOLD 10
103 
104 typedef struct PageDesc {
105     /* list of TBs intersecting this ram page */
106     uintptr_t first_tb;
107 #ifdef CONFIG_SOFTMMU
108     /* in order to optimize self modifying code, we count the number
109        of lookups we do to a given page to use a bitmap */
110     unsigned long *code_bitmap;
111     unsigned int code_write_count;
112 #else
113     unsigned long flags;
114 #endif
115 #ifndef CONFIG_USER_ONLY
116     QemuSpin lock;
117 #endif
118 } PageDesc;
119 
120 /**
121  * struct page_entry - page descriptor entry
122  * @pd:     pointer to the &struct PageDesc of the page this entry represents
123  * @index:  page index of the page
124  * @locked: whether the page is locked
125  *
126  * This struct helps us keep track of the locked state of a page, without
127  * bloating &struct PageDesc.
128  *
129  * A page lock protects accesses to all fields of &struct PageDesc.
130  *
131  * See also: &struct page_collection.
132  */
133 struct page_entry {
134     PageDesc *pd;
135     tb_page_addr_t index;
136     bool locked;
137 };
138 
139 /**
140  * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
141  * @tree:   Binary search tree (BST) of the pages, with key == page index
142  * @max:    Pointer to the page in @tree with the highest page index
143  *
144  * To avoid deadlock we lock pages in ascending order of page index.
145  * When operating on a set of pages, we need to keep track of them so that
146  * we can lock them in order and also unlock them later. For this we collect
147  * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
148  * @tree implementation we use does not provide an O(1) operation to obtain the
149  * highest-ranked element, we use @max to keep track of the inserted page
150  * with the highest index. This is valuable because if a page is not in
151  * the tree and its index is higher than @max's, then we can lock it
152  * without breaking the locking order rule.
153  *
154  * Note on naming: 'struct page_set' would be shorter, but we already have a few
155  * page_set_*() helpers, so page_collection is used instead to avoid confusion.
156  *
157  * See also: page_collection_lock().
158  */
159 struct page_collection {
160     GTree *tree;
161     struct page_entry *max;
162 };
163 
164 /* list iterators for lists of tagged pointers in TranslationBlock */
165 #define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
166     for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
167          tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
168              tb = (TranslationBlock *)((uintptr_t)tb & ~1))
169 
170 #define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
171     TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
172 
173 #define TB_FOR_EACH_JMP(head_tb, tb, n)                                 \
174     TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
175 
176 /* In system mode we want L1_MAP to be based on ram offsets,
177    while in user mode we want it to be based on virtual addresses.  */
178 #if !defined(CONFIG_USER_ONLY)
179 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
180 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
181 #else
182 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
183 #endif
184 #else
185 # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
186 #endif
187 
188 /* Size of the L2 (and L3, etc) page tables.  */
189 #define V_L2_BITS 10
190 #define V_L2_SIZE (1 << V_L2_BITS)
191 
192 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
193 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
194                   sizeof_field(TranslationBlock, trace_vcpu_dstate)
195                   * BITS_PER_BYTE);
196 
197 /*
198  * L1 Mapping properties
199  */
200 static int v_l1_size;
201 static int v_l1_shift;
202 static int v_l2_levels;
203 
204 /* The bottom level has pointers to PageDesc, and is indexed by
205  * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
206  */
207 #define V_L1_MIN_BITS 4
208 #define V_L1_MAX_BITS (V_L2_BITS + 3)
209 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
210 
211 static void *l1_map[V_L1_MAX_SIZE];
212 
213 /* code generation context */
214 TCGContext tcg_init_ctx;
215 __thread TCGContext *tcg_ctx;
216 TBContext tb_ctx;
217 bool parallel_cpus;
218 
219 static void page_table_config_init(void)
220 {
221     uint32_t v_l1_bits;
222 
223     assert(TARGET_PAGE_BITS);
224     /* The bits remaining after N lower levels of page tables.  */
225     v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
226     if (v_l1_bits < V_L1_MIN_BITS) {
227         v_l1_bits += V_L2_BITS;
228     }
229 
230     v_l1_size = 1 << v_l1_bits;
231     v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
232     v_l2_levels = v_l1_shift / V_L2_BITS - 1;
233 
234     assert(v_l1_bits <= V_L1_MAX_BITS);
235     assert(v_l1_shift % V_L2_BITS == 0);
236     assert(v_l2_levels >= 0);
237 }
238 
239 void cpu_gen_init(void)
240 {
241     tcg_context_init(&tcg_init_ctx);
242 }
243 
244 /* Encode VAL as a signed leb128 sequence at P.
245    Return P incremented past the encoded value.  */
246 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
247 {
248     int more, byte;
249 
250     do {
251         byte = val & 0x7f;
252         val >>= 7;
253         more = !((val == 0 && (byte & 0x40) == 0)
254                  || (val == -1 && (byte & 0x40) != 0));
255         if (more) {
256             byte |= 0x80;
257         }
258         *p++ = byte;
259     } while (more);
260 
261     return p;
262 }
263 
264 /* Decode a signed leb128 sequence at *PP; increment *PP past the
265    decoded value.  Return the decoded value.  */
266 static target_long decode_sleb128(uint8_t **pp)
267 {
268     uint8_t *p = *pp;
269     target_long val = 0;
270     int byte, shift = 0;
271 
272     do {
273         byte = *p++;
274         val |= (target_ulong)(byte & 0x7f) << shift;
275         shift += 7;
276     } while (byte & 0x80);
277     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
278         val |= -(target_ulong)1 << shift;
279     }
280 
281     *pp = p;
282     return val;
283 }
284 
285 /* Encode the data collected about the instructions while compiling TB.
286    Place the data at BLOCK, and return the number of bytes consumed.
287 
288    The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
289    which come from the target's insn_start data, followed by a uintptr_t
290    which comes from the host pc of the end of the code implementing the insn.
291 
292    Each line of the table is encoded as sleb128 deltas from the previous
293    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
294    That is, the first column is seeded with the guest pc, the last column
295    with the host pc, and the middle columns with zeros.  */
296 
297 static int encode_search(TranslationBlock *tb, uint8_t *block)
298 {
299     uint8_t *highwater = tcg_ctx->code_gen_highwater;
300     uint8_t *p = block;
301     int i, j, n;
302 
303     for (i = 0, n = tb->icount; i < n; ++i) {
304         target_ulong prev;
305 
306         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
307             if (i == 0) {
308                 prev = (j == 0 ? tb->pc : 0);
309             } else {
310                 prev = tcg_ctx->gen_insn_data[i - 1][j];
311             }
312             p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
313         }
314         prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
315         p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
316 
317         /* Test for (pending) buffer overflow.  The assumption is that any
318            one row beginning below the high water mark cannot overrun
319            the buffer completely.  Thus we can test for overflow after
320            encoding a row without having to check during encoding.  */
321         if (unlikely(p > highwater)) {
322             return -1;
323         }
324     }
325 
326     return p - block;
327 }
328 
329 /* The cpu state corresponding to 'searched_pc' is restored.
330  * When reset_icount is true, current TB will be interrupted and
331  * icount should be recalculated.
332  */
333 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
334                                      uintptr_t searched_pc, bool reset_icount)
335 {
336     target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
337     uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
338     CPUArchState *env = cpu->env_ptr;
339     uint8_t *p = tb->tc.ptr + tb->tc.size;
340     int i, j, num_insns = tb->icount;
341 #ifdef CONFIG_PROFILER
342     TCGProfile *prof = &tcg_ctx->prof;
343     int64_t ti = profile_getclock();
344 #endif
345 
346     searched_pc -= GETPC_ADJ;
347 
348     if (searched_pc < host_pc) {
349         return -1;
350     }
351 
352     /* Reconstruct the stored insn data while looking for the point at
353        which the end of the insn exceeds the searched_pc.  */
354     for (i = 0; i < num_insns; ++i) {
355         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
356             data[j] += decode_sleb128(&p);
357         }
358         host_pc += decode_sleb128(&p);
359         if (host_pc > searched_pc) {
360             goto found;
361         }
362     }
363     return -1;
364 
365  found:
366     if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
367         assert(use_icount);
368         /* Reset the cycle counter to the start of the block
369            and shift if to the number of actually executed instructions */
370         cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
371     }
372     restore_state_to_opc(env, tb, data);
373 
374 #ifdef CONFIG_PROFILER
375     atomic_set(&prof->restore_time,
376                 prof->restore_time + profile_getclock() - ti);
377     atomic_set(&prof->restore_count, prof->restore_count + 1);
378 #endif
379     return 0;
380 }
381 
382 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
383 {
384     TranslationBlock *tb;
385     bool r = false;
386     uintptr_t check_offset;
387 
388     /* The host_pc has to be in the region of current code buffer. If
389      * it is not we will not be able to resolve it here. The two cases
390      * where host_pc will not be correct are:
391      *
392      *  - fault during translation (instruction fetch)
393      *  - fault from helper (not using GETPC() macro)
394      *
395      * Either way we need return early as we can't resolve it here.
396      *
397      * We are using unsigned arithmetic so if host_pc <
398      * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
399      * above the code_gen_buffer_size
400      */
401     check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
402 
403     if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
404         tb = tcg_tb_lookup(host_pc);
405         if (tb) {
406             cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
407             if (tb_cflags(tb) & CF_NOCACHE) {
408                 /* one-shot translation, invalidate it immediately */
409                 tb_phys_invalidate(tb, -1);
410                 tcg_tb_remove(tb);
411             }
412             r = true;
413         }
414     }
415 
416     return r;
417 }
418 
419 static void page_init(void)
420 {
421     page_size_init();
422     page_table_config_init();
423 
424 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
425     {
426 #ifdef HAVE_KINFO_GETVMMAP
427         struct kinfo_vmentry *freep;
428         int i, cnt;
429 
430         freep = kinfo_getvmmap(getpid(), &cnt);
431         if (freep) {
432             mmap_lock();
433             for (i = 0; i < cnt; i++) {
434                 unsigned long startaddr, endaddr;
435 
436                 startaddr = freep[i].kve_start;
437                 endaddr = freep[i].kve_end;
438                 if (h2g_valid(startaddr)) {
439                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
440 
441                     if (h2g_valid(endaddr)) {
442                         endaddr = h2g(endaddr);
443                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
444                     } else {
445 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
446                         endaddr = ~0ul;
447                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
448 #endif
449                     }
450                 }
451             }
452             free(freep);
453             mmap_unlock();
454         }
455 #else
456         FILE *f;
457 
458         last_brk = (unsigned long)sbrk(0);
459 
460         f = fopen("/compat/linux/proc/self/maps", "r");
461         if (f) {
462             mmap_lock();
463 
464             do {
465                 unsigned long startaddr, endaddr;
466                 int n;
467 
468                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
469 
470                 if (n == 2 && h2g_valid(startaddr)) {
471                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
472 
473                     if (h2g_valid(endaddr)) {
474                         endaddr = h2g(endaddr);
475                     } else {
476                         endaddr = ~0ul;
477                     }
478                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
479                 }
480             } while (!feof(f));
481 
482             fclose(f);
483             mmap_unlock();
484         }
485 #endif
486     }
487 #endif
488 }
489 
490 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
491 {
492     PageDesc *pd;
493     void **lp;
494     int i;
495 
496     /* Level 1.  Always allocated.  */
497     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
498 
499     /* Level 2..N-1.  */
500     for (i = v_l2_levels; i > 0; i--) {
501         void **p = atomic_rcu_read(lp);
502 
503         if (p == NULL) {
504             void *existing;
505 
506             if (!alloc) {
507                 return NULL;
508             }
509             p = g_new0(void *, V_L2_SIZE);
510             existing = atomic_cmpxchg(lp, NULL, p);
511             if (unlikely(existing)) {
512                 g_free(p);
513                 p = existing;
514             }
515         }
516 
517         lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
518     }
519 
520     pd = atomic_rcu_read(lp);
521     if (pd == NULL) {
522         void *existing;
523 
524         if (!alloc) {
525             return NULL;
526         }
527         pd = g_new0(PageDesc, V_L2_SIZE);
528 #ifndef CONFIG_USER_ONLY
529         {
530             int i;
531 
532             for (i = 0; i < V_L2_SIZE; i++) {
533                 qemu_spin_init(&pd[i].lock);
534             }
535         }
536 #endif
537         existing = atomic_cmpxchg(lp, NULL, pd);
538         if (unlikely(existing)) {
539             g_free(pd);
540             pd = existing;
541         }
542     }
543 
544     return pd + (index & (V_L2_SIZE - 1));
545 }
546 
547 static inline PageDesc *page_find(tb_page_addr_t index)
548 {
549     return page_find_alloc(index, 0);
550 }
551 
552 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
553                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
554 
555 /* In user-mode page locks aren't used; mmap_lock is enough */
556 #ifdef CONFIG_USER_ONLY
557 
558 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
559 
560 static inline void page_lock(PageDesc *pd)
561 { }
562 
563 static inline void page_unlock(PageDesc *pd)
564 { }
565 
566 static inline void page_lock_tb(const TranslationBlock *tb)
567 { }
568 
569 static inline void page_unlock_tb(const TranslationBlock *tb)
570 { }
571 
572 struct page_collection *
573 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
574 {
575     return NULL;
576 }
577 
578 void page_collection_unlock(struct page_collection *set)
579 { }
580 #else /* !CONFIG_USER_ONLY */
581 
582 #ifdef CONFIG_DEBUG_TCG
583 
584 static __thread GHashTable *ht_pages_locked_debug;
585 
586 static void ht_pages_locked_debug_init(void)
587 {
588     if (ht_pages_locked_debug) {
589         return;
590     }
591     ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
592 }
593 
594 static bool page_is_locked(const PageDesc *pd)
595 {
596     PageDesc *found;
597 
598     ht_pages_locked_debug_init();
599     found = g_hash_table_lookup(ht_pages_locked_debug, pd);
600     return !!found;
601 }
602 
603 static void page_lock__debug(PageDesc *pd)
604 {
605     ht_pages_locked_debug_init();
606     g_assert(!page_is_locked(pd));
607     g_hash_table_insert(ht_pages_locked_debug, pd, pd);
608 }
609 
610 static void page_unlock__debug(const PageDesc *pd)
611 {
612     bool removed;
613 
614     ht_pages_locked_debug_init();
615     g_assert(page_is_locked(pd));
616     removed = g_hash_table_remove(ht_pages_locked_debug, pd);
617     g_assert(removed);
618 }
619 
620 static void
621 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
622 {
623     if (unlikely(!page_is_locked(pd))) {
624         error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
625                      pd, file, line);
626         abort();
627     }
628 }
629 
630 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
631 
632 void assert_no_pages_locked(void)
633 {
634     ht_pages_locked_debug_init();
635     g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
636 }
637 
638 #else /* !CONFIG_DEBUG_TCG */
639 
640 #define assert_page_locked(pd)
641 
642 static inline void page_lock__debug(const PageDesc *pd)
643 {
644 }
645 
646 static inline void page_unlock__debug(const PageDesc *pd)
647 {
648 }
649 
650 #endif /* CONFIG_DEBUG_TCG */
651 
652 static inline void page_lock(PageDesc *pd)
653 {
654     page_lock__debug(pd);
655     qemu_spin_lock(&pd->lock);
656 }
657 
658 static inline void page_unlock(PageDesc *pd)
659 {
660     qemu_spin_unlock(&pd->lock);
661     page_unlock__debug(pd);
662 }
663 
664 /* lock the page(s) of a TB in the correct acquisition order */
665 static inline void page_lock_tb(const TranslationBlock *tb)
666 {
667     page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
668 }
669 
670 static inline void page_unlock_tb(const TranslationBlock *tb)
671 {
672     PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
673 
674     page_unlock(p1);
675     if (unlikely(tb->page_addr[1] != -1)) {
676         PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
677 
678         if (p2 != p1) {
679             page_unlock(p2);
680         }
681     }
682 }
683 
684 static inline struct page_entry *
685 page_entry_new(PageDesc *pd, tb_page_addr_t index)
686 {
687     struct page_entry *pe = g_malloc(sizeof(*pe));
688 
689     pe->index = index;
690     pe->pd = pd;
691     pe->locked = false;
692     return pe;
693 }
694 
695 static void page_entry_destroy(gpointer p)
696 {
697     struct page_entry *pe = p;
698 
699     g_assert(pe->locked);
700     page_unlock(pe->pd);
701     g_free(pe);
702 }
703 
704 /* returns false on success */
705 static bool page_entry_trylock(struct page_entry *pe)
706 {
707     bool busy;
708 
709     busy = qemu_spin_trylock(&pe->pd->lock);
710     if (!busy) {
711         g_assert(!pe->locked);
712         pe->locked = true;
713         page_lock__debug(pe->pd);
714     }
715     return busy;
716 }
717 
718 static void do_page_entry_lock(struct page_entry *pe)
719 {
720     page_lock(pe->pd);
721     g_assert(!pe->locked);
722     pe->locked = true;
723 }
724 
725 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
726 {
727     struct page_entry *pe = value;
728 
729     do_page_entry_lock(pe);
730     return FALSE;
731 }
732 
733 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
734 {
735     struct page_entry *pe = value;
736 
737     if (pe->locked) {
738         pe->locked = false;
739         page_unlock(pe->pd);
740     }
741     return FALSE;
742 }
743 
744 /*
745  * Trylock a page, and if successful, add the page to a collection.
746  * Returns true ("busy") if the page could not be locked; false otherwise.
747  */
748 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
749 {
750     tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
751     struct page_entry *pe;
752     PageDesc *pd;
753 
754     pe = g_tree_lookup(set->tree, &index);
755     if (pe) {
756         return false;
757     }
758 
759     pd = page_find(index);
760     if (pd == NULL) {
761         return false;
762     }
763 
764     pe = page_entry_new(pd, index);
765     g_tree_insert(set->tree, &pe->index, pe);
766 
767     /*
768      * If this is either (1) the first insertion or (2) a page whose index
769      * is higher than any other so far, just lock the page and move on.
770      */
771     if (set->max == NULL || pe->index > set->max->index) {
772         set->max = pe;
773         do_page_entry_lock(pe);
774         return false;
775     }
776     /*
777      * Try to acquire out-of-order lock; if busy, return busy so that we acquire
778      * locks in order.
779      */
780     return page_entry_trylock(pe);
781 }
782 
783 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
784 {
785     tb_page_addr_t a = *(const tb_page_addr_t *)ap;
786     tb_page_addr_t b = *(const tb_page_addr_t *)bp;
787 
788     if (a == b) {
789         return 0;
790     } else if (a < b) {
791         return -1;
792     }
793     return 1;
794 }
795 
796 /*
797  * Lock a range of pages ([@start,@end[) as well as the pages of all
798  * intersecting TBs.
799  * Locking order: acquire locks in ascending order of page index.
800  */
801 struct page_collection *
802 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
803 {
804     struct page_collection *set = g_malloc(sizeof(*set));
805     tb_page_addr_t index;
806     PageDesc *pd;
807 
808     start >>= TARGET_PAGE_BITS;
809     end   >>= TARGET_PAGE_BITS;
810     g_assert(start <= end);
811 
812     set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
813                                 page_entry_destroy);
814     set->max = NULL;
815     assert_no_pages_locked();
816 
817  retry:
818     g_tree_foreach(set->tree, page_entry_lock, NULL);
819 
820     for (index = start; index <= end; index++) {
821         TranslationBlock *tb;
822         int n;
823 
824         pd = page_find(index);
825         if (pd == NULL) {
826             continue;
827         }
828         if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
829             g_tree_foreach(set->tree, page_entry_unlock, NULL);
830             goto retry;
831         }
832         assert_page_locked(pd);
833         PAGE_FOR_EACH_TB(pd, tb, n) {
834             if (page_trylock_add(set, tb->page_addr[0]) ||
835                 (tb->page_addr[1] != -1 &&
836                  page_trylock_add(set, tb->page_addr[1]))) {
837                 /* drop all locks, and reacquire in order */
838                 g_tree_foreach(set->tree, page_entry_unlock, NULL);
839                 goto retry;
840             }
841         }
842     }
843     return set;
844 }
845 
846 void page_collection_unlock(struct page_collection *set)
847 {
848     /* entries are unlocked and freed via page_entry_destroy */
849     g_tree_destroy(set->tree);
850     g_free(set);
851 }
852 
853 #endif /* !CONFIG_USER_ONLY */
854 
855 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
856                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
857 {
858     PageDesc *p1, *p2;
859     tb_page_addr_t page1;
860     tb_page_addr_t page2;
861 
862     assert_memory_lock();
863     g_assert(phys1 != -1);
864 
865     page1 = phys1 >> TARGET_PAGE_BITS;
866     page2 = phys2 >> TARGET_PAGE_BITS;
867 
868     p1 = page_find_alloc(page1, alloc);
869     if (ret_p1) {
870         *ret_p1 = p1;
871     }
872     if (likely(phys2 == -1)) {
873         page_lock(p1);
874         return;
875     } else if (page1 == page2) {
876         page_lock(p1);
877         if (ret_p2) {
878             *ret_p2 = p1;
879         }
880         return;
881     }
882     p2 = page_find_alloc(page2, alloc);
883     if (ret_p2) {
884         *ret_p2 = p2;
885     }
886     if (page1 < page2) {
887         page_lock(p1);
888         page_lock(p2);
889     } else {
890         page_lock(p2);
891         page_lock(p1);
892     }
893 }
894 
895 #if defined(CONFIG_USER_ONLY)
896 /* Currently it is not recommended to allocate big chunks of data in
897    user mode. It will change when a dedicated libc will be used.  */
898 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
899    region in which the guest needs to run.  Revisit this.  */
900 #define USE_STATIC_CODE_GEN_BUFFER
901 #endif
902 
903 /* Minimum size of the code gen buffer.  This number is randomly chosen,
904    but not so small that we can't have a fair number of TB's live.  */
905 #define MIN_CODE_GEN_BUFFER_SIZE     (1 * MiB)
906 
907 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
908    indicated, this is constrained by the range of direct branches on the
909    host cpu, as used by the TCG implementation of goto_tb.  */
910 #if defined(__x86_64__)
911 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
912 #elif defined(__sparc__)
913 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
914 #elif defined(__powerpc64__)
915 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
916 #elif defined(__powerpc__)
917 # define MAX_CODE_GEN_BUFFER_SIZE  (32 * MiB)
918 #elif defined(__aarch64__)
919 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
920 #elif defined(__s390x__)
921   /* We have a +- 4GB range on the branches; leave some slop.  */
922 # define MAX_CODE_GEN_BUFFER_SIZE  (3 * GiB)
923 #elif defined(__mips__)
924   /* We have a 256MB branch region, but leave room to make sure the
925      main executable is also within that region.  */
926 # define MAX_CODE_GEN_BUFFER_SIZE  (128 * MiB)
927 #else
928 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
929 #endif
930 
931 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
932 
933 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
934   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
935    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
936 
937 static inline size_t size_code_gen_buffer(size_t tb_size)
938 {
939     /* Size the buffer.  */
940     if (tb_size == 0) {
941 #ifdef USE_STATIC_CODE_GEN_BUFFER
942         tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
943 #else
944         /* ??? Needs adjustments.  */
945         /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
946            static buffer, we could size this on RESERVED_VA, on the text
947            segment size of the executable, or continue to use the default.  */
948         tb_size = (unsigned long)(ram_size / 4);
949 #endif
950     }
951     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
952         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
953     }
954     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
955         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
956     }
957     return tb_size;
958 }
959 
960 #ifdef __mips__
961 /* In order to use J and JAL within the code_gen_buffer, we require
962    that the buffer not cross a 256MB boundary.  */
963 static inline bool cross_256mb(void *addr, size_t size)
964 {
965     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
966 }
967 
968 /* We weren't able to allocate a buffer without crossing that boundary,
969    so make do with the larger portion of the buffer that doesn't cross.
970    Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
971 static inline void *split_cross_256mb(void *buf1, size_t size1)
972 {
973     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
974     size_t size2 = buf1 + size1 - buf2;
975 
976     size1 = buf2 - buf1;
977     if (size1 < size2) {
978         size1 = size2;
979         buf1 = buf2;
980     }
981 
982     tcg_ctx->code_gen_buffer_size = size1;
983     return buf1;
984 }
985 #endif
986 
987 #ifdef USE_STATIC_CODE_GEN_BUFFER
988 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
989     __attribute__((aligned(CODE_GEN_ALIGN)));
990 
991 static inline void *alloc_code_gen_buffer(void)
992 {
993     void *buf = static_code_gen_buffer;
994     void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
995     size_t size;
996 
997     /* page-align the beginning and end of the buffer */
998     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
999     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1000 
1001     size = end - buf;
1002 
1003     /* Honor a command-line option limiting the size of the buffer.  */
1004     if (size > tcg_ctx->code_gen_buffer_size) {
1005         size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1006                                qemu_real_host_page_size);
1007     }
1008     tcg_ctx->code_gen_buffer_size = size;
1009 
1010 #ifdef __mips__
1011     if (cross_256mb(buf, size)) {
1012         buf = split_cross_256mb(buf, size);
1013         size = tcg_ctx->code_gen_buffer_size;
1014     }
1015 #endif
1016 
1017     if (qemu_mprotect_rwx(buf, size)) {
1018         abort();
1019     }
1020     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1021 
1022     return buf;
1023 }
1024 #elif defined(_WIN32)
1025 static inline void *alloc_code_gen_buffer(void)
1026 {
1027     size_t size = tcg_ctx->code_gen_buffer_size;
1028     return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1029                         PAGE_EXECUTE_READWRITE);
1030 }
1031 #else
1032 static inline void *alloc_code_gen_buffer(void)
1033 {
1034     int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1035     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1036     uintptr_t start = 0;
1037     size_t size = tcg_ctx->code_gen_buffer_size;
1038     void *buf;
1039 
1040     /* Constrain the position of the buffer based on the host cpu.
1041        Note that these addresses are chosen in concert with the
1042        addresses assigned in the relevant linker script file.  */
1043 # if defined(__PIE__) || defined(__PIC__)
1044     /* Don't bother setting a preferred location if we're building
1045        a position-independent executable.  We're more likely to get
1046        an address near the main executable if we let the kernel
1047        choose the address.  */
1048 # elif defined(__x86_64__) && defined(MAP_32BIT)
1049     /* Force the memory down into low memory with the executable.
1050        Leave the choice of exact location with the kernel.  */
1051     flags |= MAP_32BIT;
1052     /* Cannot expect to map more than 800MB in low memory.  */
1053     if (size > 800u * 1024 * 1024) {
1054         tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
1055     }
1056 # elif defined(__sparc__)
1057     start = 0x40000000ul;
1058 # elif defined(__s390x__)
1059     start = 0x90000000ul;
1060 # elif defined(__mips__)
1061 #  if _MIPS_SIM == _ABI64
1062     start = 0x128000000ul;
1063 #  else
1064     start = 0x08000000ul;
1065 #  endif
1066 # endif
1067 
1068     buf = mmap((void *)start, size, prot, flags, -1, 0);
1069     if (buf == MAP_FAILED) {
1070         return NULL;
1071     }
1072 
1073 #ifdef __mips__
1074     if (cross_256mb(buf, size)) {
1075         /* Try again, with the original still mapped, to avoid re-acquiring
1076            that 256mb crossing.  This time don't specify an address.  */
1077         size_t size2;
1078         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1079         switch ((int)(buf2 != MAP_FAILED)) {
1080         case 1:
1081             if (!cross_256mb(buf2, size)) {
1082                 /* Success!  Use the new buffer.  */
1083                 munmap(buf, size);
1084                 break;
1085             }
1086             /* Failure.  Work with what we had.  */
1087             munmap(buf2, size);
1088             /* fallthru */
1089         default:
1090             /* Split the original buffer.  Free the smaller half.  */
1091             buf2 = split_cross_256mb(buf, size);
1092             size2 = tcg_ctx->code_gen_buffer_size;
1093             if (buf == buf2) {
1094                 munmap(buf + size2, size - size2);
1095             } else {
1096                 munmap(buf, size - size2);
1097             }
1098             size = size2;
1099             break;
1100         }
1101         buf = buf2;
1102     }
1103 #endif
1104 
1105     /* Request large pages for the buffer.  */
1106     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1107 
1108     return buf;
1109 }
1110 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1111 
1112 static inline void code_gen_alloc(size_t tb_size)
1113 {
1114     tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1115     tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1116     if (tcg_ctx->code_gen_buffer == NULL) {
1117         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1118         exit(1);
1119     }
1120 }
1121 
1122 static bool tb_cmp(const void *ap, const void *bp)
1123 {
1124     const TranslationBlock *a = ap;
1125     const TranslationBlock *b = bp;
1126 
1127     return a->pc == b->pc &&
1128         a->cs_base == b->cs_base &&
1129         a->flags == b->flags &&
1130         (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1131         a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1132         a->page_addr[0] == b->page_addr[0] &&
1133         a->page_addr[1] == b->page_addr[1];
1134 }
1135 
1136 static void tb_htable_init(void)
1137 {
1138     unsigned int mode = QHT_MODE_AUTO_RESIZE;
1139 
1140     qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1141 }
1142 
1143 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1144    (in bytes) allocated to the translation buffer. Zero means default
1145    size. */
1146 void tcg_exec_init(unsigned long tb_size)
1147 {
1148     tcg_allowed = true;
1149     cpu_gen_init();
1150     page_init();
1151     tb_htable_init();
1152     code_gen_alloc(tb_size);
1153 #if defined(CONFIG_SOFTMMU)
1154     /* There's no guest base to take into account, so go ahead and
1155        initialize the prologue now.  */
1156     tcg_prologue_init(tcg_ctx);
1157 #endif
1158 }
1159 
1160 /* call with @p->lock held */
1161 static inline void invalidate_page_bitmap(PageDesc *p)
1162 {
1163     assert_page_locked(p);
1164 #ifdef CONFIG_SOFTMMU
1165     g_free(p->code_bitmap);
1166     p->code_bitmap = NULL;
1167     p->code_write_count = 0;
1168 #endif
1169 }
1170 
1171 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1172 static void page_flush_tb_1(int level, void **lp)
1173 {
1174     int i;
1175 
1176     if (*lp == NULL) {
1177         return;
1178     }
1179     if (level == 0) {
1180         PageDesc *pd = *lp;
1181 
1182         for (i = 0; i < V_L2_SIZE; ++i) {
1183             page_lock(&pd[i]);
1184             pd[i].first_tb = (uintptr_t)NULL;
1185             invalidate_page_bitmap(pd + i);
1186             page_unlock(&pd[i]);
1187         }
1188     } else {
1189         void **pp = *lp;
1190 
1191         for (i = 0; i < V_L2_SIZE; ++i) {
1192             page_flush_tb_1(level - 1, pp + i);
1193         }
1194     }
1195 }
1196 
1197 static void page_flush_tb(void)
1198 {
1199     int i, l1_sz = v_l1_size;
1200 
1201     for (i = 0; i < l1_sz; i++) {
1202         page_flush_tb_1(v_l2_levels, l1_map + i);
1203     }
1204 }
1205 
1206 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1207 {
1208     const TranslationBlock *tb = value;
1209     size_t *size = data;
1210 
1211     *size += tb->tc.size;
1212     return false;
1213 }
1214 
1215 /* flush all the translation blocks */
1216 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1217 {
1218     bool did_flush = false;
1219 
1220     mmap_lock();
1221     /* If it is already been done on request of another CPU,
1222      * just retry.
1223      */
1224     if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1225         goto done;
1226     }
1227     did_flush = true;
1228 
1229     if (DEBUG_TB_FLUSH_GATE) {
1230         size_t nb_tbs = tcg_nb_tbs();
1231         size_t host_size = 0;
1232 
1233         tcg_tb_foreach(tb_host_size_iter, &host_size);
1234         printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1235                tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1236     }
1237 
1238     CPU_FOREACH(cpu) {
1239         cpu_tb_jmp_cache_clear(cpu);
1240     }
1241 
1242     qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1243     page_flush_tb();
1244 
1245     tcg_region_reset_all();
1246     /* XXX: flush processor icache at this point if cache flush is
1247        expensive */
1248     atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1249 
1250 done:
1251     mmap_unlock();
1252     if (did_flush) {
1253         qemu_plugin_flush_cb();
1254     }
1255 }
1256 
1257 void tb_flush(CPUState *cpu)
1258 {
1259     if (tcg_enabled()) {
1260         unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
1261 
1262         if (cpu_in_exclusive_context(cpu)) {
1263             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1264         } else {
1265             async_safe_run_on_cpu(cpu, do_tb_flush,
1266                                   RUN_ON_CPU_HOST_INT(tb_flush_count));
1267         }
1268     }
1269 }
1270 
1271 /*
1272  * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1273  * so in order to prevent bit rot we compile them unconditionally in user-mode,
1274  * and let the optimizer get rid of them by wrapping their user-only callers
1275  * with if (DEBUG_TB_CHECK_GATE).
1276  */
1277 #ifdef CONFIG_USER_ONLY
1278 
1279 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1280 {
1281     TranslationBlock *tb = p;
1282     target_ulong addr = *(target_ulong *)userp;
1283 
1284     if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1285         printf("ERROR invalidate: address=" TARGET_FMT_lx
1286                " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1287     }
1288 }
1289 
1290 /* verify that all the pages have correct rights for code
1291  *
1292  * Called with mmap_lock held.
1293  */
1294 static void tb_invalidate_check(target_ulong address)
1295 {
1296     address &= TARGET_PAGE_MASK;
1297     qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1298 }
1299 
1300 static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1301 {
1302     TranslationBlock *tb = p;
1303     int flags1, flags2;
1304 
1305     flags1 = page_get_flags(tb->pc);
1306     flags2 = page_get_flags(tb->pc + tb->size - 1);
1307     if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1308         printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1309                (long)tb->pc, tb->size, flags1, flags2);
1310     }
1311 }
1312 
1313 /* verify that all the pages have correct rights for code */
1314 static void tb_page_check(void)
1315 {
1316     qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1317 }
1318 
1319 #endif /* CONFIG_USER_ONLY */
1320 
1321 /*
1322  * user-mode: call with mmap_lock held
1323  * !user-mode: call with @pd->lock held
1324  */
1325 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1326 {
1327     TranslationBlock *tb1;
1328     uintptr_t *pprev;
1329     unsigned int n1;
1330 
1331     assert_page_locked(pd);
1332     pprev = &pd->first_tb;
1333     PAGE_FOR_EACH_TB(pd, tb1, n1) {
1334         if (tb1 == tb) {
1335             *pprev = tb1->page_next[n1];
1336             return;
1337         }
1338         pprev = &tb1->page_next[n1];
1339     }
1340     g_assert_not_reached();
1341 }
1342 
1343 /* remove @orig from its @n_orig-th jump list */
1344 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1345 {
1346     uintptr_t ptr, ptr_locked;
1347     TranslationBlock *dest;
1348     TranslationBlock *tb;
1349     uintptr_t *pprev;
1350     int n;
1351 
1352     /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1353     ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1354     dest = (TranslationBlock *)(ptr & ~1);
1355     if (dest == NULL) {
1356         return;
1357     }
1358 
1359     qemu_spin_lock(&dest->jmp_lock);
1360     /*
1361      * While acquiring the lock, the jump might have been removed if the
1362      * destination TB was invalidated; check again.
1363      */
1364     ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
1365     if (ptr_locked != ptr) {
1366         qemu_spin_unlock(&dest->jmp_lock);
1367         /*
1368          * The only possibility is that the jump was unlinked via
1369          * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1370          * because we set the LSB above.
1371          */
1372         g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1373         return;
1374     }
1375     /*
1376      * We first acquired the lock, and since the destination pointer matches,
1377      * we know for sure that @orig is in the jmp list.
1378      */
1379     pprev = &dest->jmp_list_head;
1380     TB_FOR_EACH_JMP(dest, tb, n) {
1381         if (tb == orig && n == n_orig) {
1382             *pprev = tb->jmp_list_next[n];
1383             /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1384             qemu_spin_unlock(&dest->jmp_lock);
1385             return;
1386         }
1387         pprev = &tb->jmp_list_next[n];
1388     }
1389     g_assert_not_reached();
1390 }
1391 
1392 /* reset the jump entry 'n' of a TB so that it is not chained to
1393    another TB */
1394 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1395 {
1396     uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1397     tb_set_jmp_target(tb, n, addr);
1398 }
1399 
1400 /* remove any jumps to the TB */
1401 static inline void tb_jmp_unlink(TranslationBlock *dest)
1402 {
1403     TranslationBlock *tb;
1404     int n;
1405 
1406     qemu_spin_lock(&dest->jmp_lock);
1407 
1408     TB_FOR_EACH_JMP(dest, tb, n) {
1409         tb_reset_jump(tb, n);
1410         atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1411         /* No need to clear the list entry; setting the dest ptr is enough */
1412     }
1413     dest->jmp_list_head = (uintptr_t)NULL;
1414 
1415     qemu_spin_unlock(&dest->jmp_lock);
1416 }
1417 
1418 /*
1419  * In user-mode, call with mmap_lock held.
1420  * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1421  * locks held.
1422  */
1423 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1424 {
1425     CPUState *cpu;
1426     PageDesc *p;
1427     uint32_t h;
1428     tb_page_addr_t phys_pc;
1429 
1430     assert_memory_lock();
1431 
1432     /* make sure no further incoming jumps will be chained to this TB */
1433     qemu_spin_lock(&tb->jmp_lock);
1434     atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1435     qemu_spin_unlock(&tb->jmp_lock);
1436 
1437     /* remove the TB from the hash list */
1438     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1439     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1440                      tb->trace_vcpu_dstate);
1441     if (!(tb->cflags & CF_NOCACHE) &&
1442         !qht_remove(&tb_ctx.htable, tb, h)) {
1443         return;
1444     }
1445 
1446     /* remove the TB from the page list */
1447     if (rm_from_page_list) {
1448         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1449         tb_page_remove(p, tb);
1450         invalidate_page_bitmap(p);
1451         if (tb->page_addr[1] != -1) {
1452             p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1453             tb_page_remove(p, tb);
1454             invalidate_page_bitmap(p);
1455         }
1456     }
1457 
1458     /* remove the TB from the hash list */
1459     h = tb_jmp_cache_hash_func(tb->pc);
1460     CPU_FOREACH(cpu) {
1461         if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1462             atomic_set(&cpu->tb_jmp_cache[h], NULL);
1463         }
1464     }
1465 
1466     /* suppress this TB from the two jump lists */
1467     tb_remove_from_jmp_list(tb, 0);
1468     tb_remove_from_jmp_list(tb, 1);
1469 
1470     /* suppress any remaining jumps to this TB */
1471     tb_jmp_unlink(tb);
1472 
1473     atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1474                tcg_ctx->tb_phys_invalidate_count + 1);
1475 }
1476 
1477 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1478 {
1479     do_tb_phys_invalidate(tb, true);
1480 }
1481 
1482 /* invalidate one TB
1483  *
1484  * Called with mmap_lock held in user-mode.
1485  */
1486 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1487 {
1488     if (page_addr == -1 && tb->page_addr[0] != -1) {
1489         page_lock_tb(tb);
1490         do_tb_phys_invalidate(tb, true);
1491         page_unlock_tb(tb);
1492     } else {
1493         do_tb_phys_invalidate(tb, false);
1494     }
1495 }
1496 
1497 #ifdef CONFIG_SOFTMMU
1498 /* call with @p->lock held */
1499 static void build_page_bitmap(PageDesc *p)
1500 {
1501     int n, tb_start, tb_end;
1502     TranslationBlock *tb;
1503 
1504     assert_page_locked(p);
1505     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1506 
1507     PAGE_FOR_EACH_TB(p, tb, n) {
1508         /* NOTE: this is subtle as a TB may span two physical pages */
1509         if (n == 0) {
1510             /* NOTE: tb_end may be after the end of the page, but
1511                it is not a problem */
1512             tb_start = tb->pc & ~TARGET_PAGE_MASK;
1513             tb_end = tb_start + tb->size;
1514             if (tb_end > TARGET_PAGE_SIZE) {
1515                 tb_end = TARGET_PAGE_SIZE;
1516              }
1517         } else {
1518             tb_start = 0;
1519             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1520         }
1521         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1522     }
1523 }
1524 #endif
1525 
1526 /* add the tb in the target page and protect it if necessary
1527  *
1528  * Called with mmap_lock held for user-mode emulation.
1529  * Called with @p->lock held in !user-mode.
1530  */
1531 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1532                                unsigned int n, tb_page_addr_t page_addr)
1533 {
1534 #ifndef CONFIG_USER_ONLY
1535     bool page_already_protected;
1536 #endif
1537 
1538     assert_page_locked(p);
1539 
1540     tb->page_addr[n] = page_addr;
1541     tb->page_next[n] = p->first_tb;
1542 #ifndef CONFIG_USER_ONLY
1543     page_already_protected = p->first_tb != (uintptr_t)NULL;
1544 #endif
1545     p->first_tb = (uintptr_t)tb | n;
1546     invalidate_page_bitmap(p);
1547 
1548 #if defined(CONFIG_USER_ONLY)
1549     if (p->flags & PAGE_WRITE) {
1550         target_ulong addr;
1551         PageDesc *p2;
1552         int prot;
1553 
1554         /* force the host page as non writable (writes will have a
1555            page fault + mprotect overhead) */
1556         page_addr &= qemu_host_page_mask;
1557         prot = 0;
1558         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1559             addr += TARGET_PAGE_SIZE) {
1560 
1561             p2 = page_find(addr >> TARGET_PAGE_BITS);
1562             if (!p2) {
1563                 continue;
1564             }
1565             prot |= p2->flags;
1566             p2->flags &= ~PAGE_WRITE;
1567           }
1568         mprotect(g2h(page_addr), qemu_host_page_size,
1569                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1570         if (DEBUG_TB_INVALIDATE_GATE) {
1571             printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1572         }
1573     }
1574 #else
1575     /* if some code is already present, then the pages are already
1576        protected. So we handle the case where only the first TB is
1577        allocated in a physical page */
1578     if (!page_already_protected) {
1579         tlb_protect_code(page_addr);
1580     }
1581 #endif
1582 }
1583 
1584 /* add a new TB and link it to the physical page tables. phys_page2 is
1585  * (-1) to indicate that only one page contains the TB.
1586  *
1587  * Called with mmap_lock held for user-mode emulation.
1588  *
1589  * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1590  * Note that in !user-mode, another thread might have already added a TB
1591  * for the same block of guest code that @tb corresponds to. In that case,
1592  * the caller should discard the original @tb, and use instead the returned TB.
1593  */
1594 static TranslationBlock *
1595 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1596              tb_page_addr_t phys_page2)
1597 {
1598     PageDesc *p;
1599     PageDesc *p2 = NULL;
1600 
1601     assert_memory_lock();
1602 
1603     if (phys_pc == -1) {
1604         /*
1605          * If the TB is not associated with a physical RAM page then
1606          * it must be a temporary one-insn TB, and we have nothing to do
1607          * except fill in the page_addr[] fields.
1608          */
1609         assert(tb->cflags & CF_NOCACHE);
1610         tb->page_addr[0] = tb->page_addr[1] = -1;
1611         return tb;
1612     }
1613 
1614     /*
1615      * Add the TB to the page list, acquiring first the pages's locks.
1616      * We keep the locks held until after inserting the TB in the hash table,
1617      * so that if the insertion fails we know for sure that the TBs are still
1618      * in the page descriptors.
1619      * Note that inserting into the hash table first isn't an option, since
1620      * we can only insert TBs that are fully initialized.
1621      */
1622     page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1623     tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1624     if (p2) {
1625         tb_page_add(p2, tb, 1, phys_page2);
1626     } else {
1627         tb->page_addr[1] = -1;
1628     }
1629 
1630     if (!(tb->cflags & CF_NOCACHE)) {
1631         void *existing_tb = NULL;
1632         uint32_t h;
1633 
1634         /* add in the hash table */
1635         h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1636                          tb->trace_vcpu_dstate);
1637         qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1638 
1639         /* remove TB from the page(s) if we couldn't insert it */
1640         if (unlikely(existing_tb)) {
1641             tb_page_remove(p, tb);
1642             invalidate_page_bitmap(p);
1643             if (p2) {
1644                 tb_page_remove(p2, tb);
1645                 invalidate_page_bitmap(p2);
1646             }
1647             tb = existing_tb;
1648         }
1649     }
1650 
1651     if (p2 && p2 != p) {
1652         page_unlock(p2);
1653     }
1654     page_unlock(p);
1655 
1656 #ifdef CONFIG_USER_ONLY
1657     if (DEBUG_TB_CHECK_GATE) {
1658         tb_page_check();
1659     }
1660 #endif
1661     return tb;
1662 }
1663 
1664 /* Called with mmap_lock held for user mode emulation.  */
1665 TranslationBlock *tb_gen_code(CPUState *cpu,
1666                               target_ulong pc, target_ulong cs_base,
1667                               uint32_t flags, int cflags)
1668 {
1669     CPUArchState *env = cpu->env_ptr;
1670     TranslationBlock *tb, *existing_tb;
1671     tb_page_addr_t phys_pc, phys_page2;
1672     target_ulong virt_page2;
1673     tcg_insn_unit *gen_code_buf;
1674     int gen_code_size, search_size, max_insns;
1675 #ifdef CONFIG_PROFILER
1676     TCGProfile *prof = &tcg_ctx->prof;
1677     int64_t ti;
1678 #endif
1679 
1680     assert_memory_lock();
1681 
1682     phys_pc = get_page_addr_code(env, pc);
1683 
1684     if (phys_pc == -1) {
1685         /* Generate a temporary TB with 1 insn in it */
1686         cflags &= ~CF_COUNT_MASK;
1687         cflags |= CF_NOCACHE | 1;
1688     }
1689 
1690     cflags &= ~CF_CLUSTER_MASK;
1691     cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
1692 
1693     max_insns = cflags & CF_COUNT_MASK;
1694     if (max_insns == 0) {
1695         max_insns = CF_COUNT_MASK;
1696     }
1697     if (max_insns > TCG_MAX_INSNS) {
1698         max_insns = TCG_MAX_INSNS;
1699     }
1700     if (cpu->singlestep_enabled || singlestep) {
1701         max_insns = 1;
1702     }
1703 
1704  buffer_overflow:
1705     tb = tcg_tb_alloc(tcg_ctx);
1706     if (unlikely(!tb)) {
1707         /* flush must be done */
1708         tb_flush(cpu);
1709         mmap_unlock();
1710         /* Make the execution loop process the flush as soon as possible.  */
1711         cpu->exception_index = EXCP_INTERRUPT;
1712         cpu_loop_exit(cpu);
1713     }
1714 
1715     gen_code_buf = tcg_ctx->code_gen_ptr;
1716     tb->tc.ptr = gen_code_buf;
1717     tb->pc = pc;
1718     tb->cs_base = cs_base;
1719     tb->flags = flags;
1720     tb->cflags = cflags;
1721     tb->orig_tb = NULL;
1722     tb->trace_vcpu_dstate = *cpu->trace_dstate;
1723     tcg_ctx->tb_cflags = cflags;
1724  tb_overflow:
1725 
1726 #ifdef CONFIG_PROFILER
1727     /* includes aborted translations because of exceptions */
1728     atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1729     ti = profile_getclock();
1730 #endif
1731 
1732     tcg_func_start(tcg_ctx);
1733 
1734     tcg_ctx->cpu = env_cpu(env);
1735     gen_intermediate_code(cpu, tb, max_insns);
1736     tcg_ctx->cpu = NULL;
1737 
1738     trace_translate_block(tb, tb->pc, tb->tc.ptr);
1739 
1740     /* generate machine code */
1741     tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1742     tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1743     tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1744     if (TCG_TARGET_HAS_direct_jump) {
1745         tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1746         tcg_ctx->tb_jmp_target_addr = NULL;
1747     } else {
1748         tcg_ctx->tb_jmp_insn_offset = NULL;
1749         tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1750     }
1751 
1752 #ifdef CONFIG_PROFILER
1753     atomic_set(&prof->tb_count, prof->tb_count + 1);
1754     atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1755     ti = profile_getclock();
1756 #endif
1757 
1758     gen_code_size = tcg_gen_code(tcg_ctx, tb);
1759     if (unlikely(gen_code_size < 0)) {
1760         switch (gen_code_size) {
1761         case -1:
1762             /*
1763              * Overflow of code_gen_buffer, or the current slice of it.
1764              *
1765              * TODO: We don't need to re-do gen_intermediate_code, nor
1766              * should we re-do the tcg optimization currently hidden
1767              * inside tcg_gen_code.  All that should be required is to
1768              * flush the TBs, allocate a new TB, re-initialize it per
1769              * above, and re-do the actual code generation.
1770              */
1771             goto buffer_overflow;
1772 
1773         case -2:
1774             /*
1775              * The code generated for the TranslationBlock is too large.
1776              * The maximum size allowed by the unwind info is 64k.
1777              * There may be stricter constraints from relocations
1778              * in the tcg backend.
1779              *
1780              * Try again with half as many insns as we attempted this time.
1781              * If a single insn overflows, there's a bug somewhere...
1782              */
1783             max_insns = tb->icount;
1784             assert(max_insns > 1);
1785             max_insns /= 2;
1786             goto tb_overflow;
1787 
1788         default:
1789             g_assert_not_reached();
1790         }
1791     }
1792     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1793     if (unlikely(search_size < 0)) {
1794         goto buffer_overflow;
1795     }
1796     tb->tc.size = gen_code_size;
1797 
1798 #ifdef CONFIG_PROFILER
1799     atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1800     atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1801     atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1802     atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1803 #endif
1804 
1805 #ifdef DEBUG_DISAS
1806     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1807         qemu_log_in_addr_range(tb->pc)) {
1808         FILE *logfile = qemu_log_lock();
1809         qemu_log("OUT: [size=%d]\n", gen_code_size);
1810         if (tcg_ctx->data_gen_ptr) {
1811             size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1812             size_t data_size = gen_code_size - code_size;
1813             size_t i;
1814 
1815             log_disas(tb->tc.ptr, code_size);
1816 
1817             for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1818                 if (sizeof(tcg_target_ulong) == 8) {
1819                     qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1820                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1821                              *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1822                 } else {
1823                     qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1824                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1825                              *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1826                 }
1827             }
1828         } else {
1829             log_disas(tb->tc.ptr, gen_code_size);
1830         }
1831         qemu_log("\n");
1832         qemu_log_flush();
1833         qemu_log_unlock(logfile);
1834     }
1835 #endif
1836 
1837     atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1838         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1839                  CODE_GEN_ALIGN));
1840 
1841     /* init jump list */
1842     qemu_spin_init(&tb->jmp_lock);
1843     tb->jmp_list_head = (uintptr_t)NULL;
1844     tb->jmp_list_next[0] = (uintptr_t)NULL;
1845     tb->jmp_list_next[1] = (uintptr_t)NULL;
1846     tb->jmp_dest[0] = (uintptr_t)NULL;
1847     tb->jmp_dest[1] = (uintptr_t)NULL;
1848 
1849     /* init original jump addresses which have been set during tcg_gen_code() */
1850     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1851         tb_reset_jump(tb, 0);
1852     }
1853     if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1854         tb_reset_jump(tb, 1);
1855     }
1856 
1857     /* check next page if needed */
1858     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1859     phys_page2 = -1;
1860     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1861         phys_page2 = get_page_addr_code(env, virt_page2);
1862     }
1863     /*
1864      * No explicit memory barrier is required -- tb_link_page() makes the
1865      * TB visible in a consistent state.
1866      */
1867     existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1868     /* if the TB already exists, discard what we just translated */
1869     if (unlikely(existing_tb != tb)) {
1870         uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1871 
1872         orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1873         atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1874         return existing_tb;
1875     }
1876     tcg_tb_insert(tb);
1877     return tb;
1878 }
1879 
1880 /*
1881  * @p must be non-NULL.
1882  * user-mode: call with mmap_lock held.
1883  * !user-mode: call with all @pages locked.
1884  */
1885 static void
1886 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1887                                       PageDesc *p, tb_page_addr_t start,
1888                                       tb_page_addr_t end,
1889                                       uintptr_t retaddr)
1890 {
1891     TranslationBlock *tb;
1892     tb_page_addr_t tb_start, tb_end;
1893     int n;
1894 #ifdef TARGET_HAS_PRECISE_SMC
1895     CPUState *cpu = current_cpu;
1896     CPUArchState *env = NULL;
1897     bool current_tb_not_found = retaddr != 0;
1898     bool current_tb_modified = false;
1899     TranslationBlock *current_tb = NULL;
1900     target_ulong current_pc = 0;
1901     target_ulong current_cs_base = 0;
1902     uint32_t current_flags = 0;
1903 #endif /* TARGET_HAS_PRECISE_SMC */
1904 
1905     assert_page_locked(p);
1906 
1907 #if defined(TARGET_HAS_PRECISE_SMC)
1908     if (cpu != NULL) {
1909         env = cpu->env_ptr;
1910     }
1911 #endif
1912 
1913     /* we remove all the TBs in the range [start, end[ */
1914     /* XXX: see if in some cases it could be faster to invalidate all
1915        the code */
1916     PAGE_FOR_EACH_TB(p, tb, n) {
1917         assert_page_locked(p);
1918         /* NOTE: this is subtle as a TB may span two physical pages */
1919         if (n == 0) {
1920             /* NOTE: tb_end may be after the end of the page, but
1921                it is not a problem */
1922             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1923             tb_end = tb_start + tb->size;
1924         } else {
1925             tb_start = tb->page_addr[1];
1926             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1927         }
1928         if (!(tb_end <= start || tb_start >= end)) {
1929 #ifdef TARGET_HAS_PRECISE_SMC
1930             if (current_tb_not_found) {
1931                 current_tb_not_found = false;
1932                 /* now we have a real cpu fault */
1933                 current_tb = tcg_tb_lookup(retaddr);
1934             }
1935             if (current_tb == tb &&
1936                 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1937                 /*
1938                  * If we are modifying the current TB, we must stop
1939                  * its execution. We could be more precise by checking
1940                  * that the modification is after the current PC, but it
1941                  * would require a specialized function to partially
1942                  * restore the CPU state.
1943                  */
1944                 current_tb_modified = true;
1945                 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
1946                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1947                                      &current_flags);
1948             }
1949 #endif /* TARGET_HAS_PRECISE_SMC */
1950             tb_phys_invalidate__locked(tb);
1951         }
1952     }
1953 #if !defined(CONFIG_USER_ONLY)
1954     /* if no code remaining, no need to continue to use slow writes */
1955     if (!p->first_tb) {
1956         invalidate_page_bitmap(p);
1957         tlb_unprotect_code(start);
1958     }
1959 #endif
1960 #ifdef TARGET_HAS_PRECISE_SMC
1961     if (current_tb_modified) {
1962         page_collection_unlock(pages);
1963         /* Force execution of one insn next time.  */
1964         cpu->cflags_next_tb = 1 | curr_cflags();
1965         mmap_unlock();
1966         cpu_loop_exit_noexc(cpu);
1967     }
1968 #endif
1969 }
1970 
1971 /*
1972  * Invalidate all TBs which intersect with the target physical address range
1973  * [start;end[. NOTE: start and end must refer to the *same* physical page.
1974  * 'is_cpu_write_access' should be true if called from a real cpu write
1975  * access: the virtual CPU will exit the current TB if code is modified inside
1976  * this TB.
1977  *
1978  * Called with mmap_lock held for user-mode emulation
1979  */
1980 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
1981 {
1982     struct page_collection *pages;
1983     PageDesc *p;
1984 
1985     assert_memory_lock();
1986 
1987     p = page_find(start >> TARGET_PAGE_BITS);
1988     if (p == NULL) {
1989         return;
1990     }
1991     pages = page_collection_lock(start, end);
1992     tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
1993     page_collection_unlock(pages);
1994 }
1995 
1996 /*
1997  * Invalidate all TBs which intersect with the target physical address range
1998  * [start;end[. NOTE: start and end may refer to *different* physical pages.
1999  * 'is_cpu_write_access' should be true if called from a real cpu write
2000  * access: the virtual CPU will exit the current TB if code is modified inside
2001  * this TB.
2002  *
2003  * Called with mmap_lock held for user-mode emulation.
2004  */
2005 #ifdef CONFIG_SOFTMMU
2006 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
2007 #else
2008 void tb_invalidate_phys_range(target_ulong start, target_ulong end)
2009 #endif
2010 {
2011     struct page_collection *pages;
2012     tb_page_addr_t next;
2013 
2014     assert_memory_lock();
2015 
2016     pages = page_collection_lock(start, end);
2017     for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2018          start < end;
2019          start = next, next += TARGET_PAGE_SIZE) {
2020         PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
2021         tb_page_addr_t bound = MIN(next, end);
2022 
2023         if (pd == NULL) {
2024             continue;
2025         }
2026         tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
2027     }
2028     page_collection_unlock(pages);
2029 }
2030 
2031 #ifdef CONFIG_SOFTMMU
2032 /* len must be <= 8 and start must be a multiple of len.
2033  * Called via softmmu_template.h when code areas are written to with
2034  * iothread mutex not held.
2035  *
2036  * Call with all @pages in the range [@start, @start + len[ locked.
2037  */
2038 void tb_invalidate_phys_page_fast(struct page_collection *pages,
2039                                   tb_page_addr_t start, int len,
2040                                   uintptr_t retaddr)
2041 {
2042     PageDesc *p;
2043 
2044     assert_memory_lock();
2045 
2046     p = page_find(start >> TARGET_PAGE_BITS);
2047     if (!p) {
2048         return;
2049     }
2050 
2051     assert_page_locked(p);
2052     if (!p->code_bitmap &&
2053         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2054         build_page_bitmap(p);
2055     }
2056     if (p->code_bitmap) {
2057         unsigned int nr;
2058         unsigned long b;
2059 
2060         nr = start & ~TARGET_PAGE_MASK;
2061         b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2062         if (b & ((1 << len) - 1)) {
2063             goto do_invalidate;
2064         }
2065     } else {
2066     do_invalidate:
2067         tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2068                                               retaddr);
2069     }
2070 }
2071 #else
2072 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2073  * host PC of the faulting store instruction that caused this invalidate.
2074  * Returns true if the caller needs to abort execution of the current
2075  * TB (because it was modified by this store and the guest CPU has
2076  * precise-SMC semantics).
2077  */
2078 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2079 {
2080     TranslationBlock *tb;
2081     PageDesc *p;
2082     int n;
2083 #ifdef TARGET_HAS_PRECISE_SMC
2084     TranslationBlock *current_tb = NULL;
2085     CPUState *cpu = current_cpu;
2086     CPUArchState *env = NULL;
2087     int current_tb_modified = 0;
2088     target_ulong current_pc = 0;
2089     target_ulong current_cs_base = 0;
2090     uint32_t current_flags = 0;
2091 #endif
2092 
2093     assert_memory_lock();
2094 
2095     addr &= TARGET_PAGE_MASK;
2096     p = page_find(addr >> TARGET_PAGE_BITS);
2097     if (!p) {
2098         return false;
2099     }
2100 
2101 #ifdef TARGET_HAS_PRECISE_SMC
2102     if (p->first_tb && pc != 0) {
2103         current_tb = tcg_tb_lookup(pc);
2104     }
2105     if (cpu != NULL) {
2106         env = cpu->env_ptr;
2107     }
2108 #endif
2109     assert_page_locked(p);
2110     PAGE_FOR_EACH_TB(p, tb, n) {
2111 #ifdef TARGET_HAS_PRECISE_SMC
2112         if (current_tb == tb &&
2113             (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2114                 /* If we are modifying the current TB, we must stop
2115                    its execution. We could be more precise by checking
2116                    that the modification is after the current PC, but it
2117                    would require a specialized function to partially
2118                    restore the CPU state */
2119 
2120             current_tb_modified = 1;
2121             cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2122             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2123                                  &current_flags);
2124         }
2125 #endif /* TARGET_HAS_PRECISE_SMC */
2126         tb_phys_invalidate(tb, addr);
2127     }
2128     p->first_tb = (uintptr_t)NULL;
2129 #ifdef TARGET_HAS_PRECISE_SMC
2130     if (current_tb_modified) {
2131         /* Force execution of one insn next time.  */
2132         cpu->cflags_next_tb = 1 | curr_cflags();
2133         return true;
2134     }
2135 #endif
2136 
2137     return false;
2138 }
2139 #endif
2140 
2141 /* user-mode: call with mmap_lock held */
2142 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
2143 {
2144     TranslationBlock *tb;
2145 
2146     assert_memory_lock();
2147 
2148     tb = tcg_tb_lookup(retaddr);
2149     if (tb) {
2150         /* We can use retranslation to find the PC.  */
2151         cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2152         tb_phys_invalidate(tb, -1);
2153     } else {
2154         /* The exception probably happened in a helper.  The CPU state should
2155            have been saved before calling it. Fetch the PC from there.  */
2156         CPUArchState *env = cpu->env_ptr;
2157         target_ulong pc, cs_base;
2158         tb_page_addr_t addr;
2159         uint32_t flags;
2160 
2161         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2162         addr = get_page_addr_code(env, pc);
2163         if (addr != -1) {
2164             tb_invalidate_phys_range(addr, addr + 1);
2165         }
2166     }
2167 }
2168 
2169 #ifndef CONFIG_USER_ONLY
2170 /* in deterministic execution mode, instructions doing device I/Os
2171  * must be at the end of the TB.
2172  *
2173  * Called by softmmu_template.h, with iothread mutex not held.
2174  */
2175 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2176 {
2177 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2178     CPUArchState *env = cpu->env_ptr;
2179 #endif
2180     TranslationBlock *tb;
2181     uint32_t n;
2182 
2183     tb = tcg_tb_lookup(retaddr);
2184     if (!tb) {
2185         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2186                   (void *)retaddr);
2187     }
2188     cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2189 
2190     /* On MIPS and SH, delay slot instructions can only be restarted if
2191        they were already the first instruction in the TB.  If this is not
2192        the first instruction in a TB then re-execute the preceding
2193        branch.  */
2194     n = 1;
2195 #if defined(TARGET_MIPS)
2196     if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2197         && env->active_tc.PC != tb->pc) {
2198         env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2199         cpu_neg(cpu)->icount_decr.u16.low++;
2200         env->hflags &= ~MIPS_HFLAG_BMASK;
2201         n = 2;
2202     }
2203 #elif defined(TARGET_SH4)
2204     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2205         && env->pc != tb->pc) {
2206         env->pc -= 2;
2207         cpu_neg(cpu)->icount_decr.u16.low++;
2208         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2209         n = 2;
2210     }
2211 #endif
2212 
2213     /* Generate a new TB executing the I/O insn.  */
2214     cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2215 
2216     if (tb_cflags(tb) & CF_NOCACHE) {
2217         if (tb->orig_tb) {
2218             /* Invalidate original TB if this TB was generated in
2219              * cpu_exec_nocache() */
2220             tb_phys_invalidate(tb->orig_tb, -1);
2221         }
2222         tcg_tb_remove(tb);
2223     }
2224 
2225     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2226      * the first in the TB) then we end up generating a whole new TB and
2227      *  repeating the fault, which is horribly inefficient.
2228      *  Better would be to execute just this insn uncached, or generate a
2229      *  second new TB.
2230      */
2231     cpu_loop_exit_noexc(cpu);
2232 }
2233 
2234 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2235 {
2236     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2237 
2238     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2239         atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2240     }
2241 }
2242 
2243 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2244 {
2245     /* Discard jump cache entries for any tb which might potentially
2246        overlap the flushed page.  */
2247     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2248     tb_jmp_cache_clear_page(cpu, addr);
2249 }
2250 
2251 static void print_qht_statistics(struct qht_stats hst)
2252 {
2253     uint32_t hgram_opts;
2254     size_t hgram_bins;
2255     char *hgram;
2256 
2257     if (!hst.head_buckets) {
2258         return;
2259     }
2260     qemu_printf("TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2261                 hst.used_head_buckets, hst.head_buckets,
2262                 (double)hst.used_head_buckets / hst.head_buckets * 100);
2263 
2264     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2265     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2266     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2267         hgram_opts |= QDIST_PR_NODECIMAL;
2268     }
2269     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2270     qemu_printf("TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2271                 qdist_avg(&hst.occupancy) * 100, hgram);
2272     g_free(hgram);
2273 
2274     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2275     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2276     if (hgram_bins > 10) {
2277         hgram_bins = 10;
2278     } else {
2279         hgram_bins = 0;
2280         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2281     }
2282     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2283     qemu_printf("TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2284                 qdist_avg(&hst.chain), hgram);
2285     g_free(hgram);
2286 }
2287 
2288 struct tb_tree_stats {
2289     size_t nb_tbs;
2290     size_t host_size;
2291     size_t target_size;
2292     size_t max_target_size;
2293     size_t direct_jmp_count;
2294     size_t direct_jmp2_count;
2295     size_t cross_page;
2296 };
2297 
2298 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2299 {
2300     const TranslationBlock *tb = value;
2301     struct tb_tree_stats *tst = data;
2302 
2303     tst->nb_tbs++;
2304     tst->host_size += tb->tc.size;
2305     tst->target_size += tb->size;
2306     if (tb->size > tst->max_target_size) {
2307         tst->max_target_size = tb->size;
2308     }
2309     if (tb->page_addr[1] != -1) {
2310         tst->cross_page++;
2311     }
2312     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2313         tst->direct_jmp_count++;
2314         if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2315             tst->direct_jmp2_count++;
2316         }
2317     }
2318     return false;
2319 }
2320 
2321 void dump_exec_info(void)
2322 {
2323     struct tb_tree_stats tst = {};
2324     struct qht_stats hst;
2325     size_t nb_tbs, flush_full, flush_part, flush_elide;
2326 
2327     tcg_tb_foreach(tb_tree_stats_iter, &tst);
2328     nb_tbs = tst.nb_tbs;
2329     /* XXX: avoid using doubles ? */
2330     qemu_printf("Translation buffer state:\n");
2331     /*
2332      * Report total code size including the padding and TB structs;
2333      * otherwise users might think "-tb-size" is not honoured.
2334      * For avg host size we use the precise numbers from tb_tree_stats though.
2335      */
2336     qemu_printf("gen code size       %zu/%zu\n",
2337                 tcg_code_size(), tcg_code_capacity());
2338     qemu_printf("TB count            %zu\n", nb_tbs);
2339     qemu_printf("TB avg target size  %zu max=%zu bytes\n",
2340                 nb_tbs ? tst.target_size / nb_tbs : 0,
2341                 tst.max_target_size);
2342     qemu_printf("TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2343                 nb_tbs ? tst.host_size / nb_tbs : 0,
2344                 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2345     qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2346                 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2347     qemu_printf("direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2348                 tst.direct_jmp_count,
2349                 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2350                 tst.direct_jmp2_count,
2351                 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2352 
2353     qht_statistics_init(&tb_ctx.htable, &hst);
2354     print_qht_statistics(hst);
2355     qht_statistics_destroy(&hst);
2356 
2357     qemu_printf("\nStatistics:\n");
2358     qemu_printf("TB flush count      %u\n",
2359                 atomic_read(&tb_ctx.tb_flush_count));
2360     qemu_printf("TB invalidate count %zu\n",
2361                 tcg_tb_phys_invalidate_count());
2362 
2363     tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2364     qemu_printf("TLB full flushes    %zu\n", flush_full);
2365     qemu_printf("TLB partial flushes %zu\n", flush_part);
2366     qemu_printf("TLB elided flushes  %zu\n", flush_elide);
2367     tcg_dump_info();
2368 }
2369 
2370 void dump_opcount_info(void)
2371 {
2372     tcg_dump_op_count();
2373 }
2374 
2375 #else /* CONFIG_USER_ONLY */
2376 
2377 void cpu_interrupt(CPUState *cpu, int mask)
2378 {
2379     g_assert(qemu_mutex_iothread_locked());
2380     cpu->interrupt_request |= mask;
2381     atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2382 }
2383 
2384 /*
2385  * Walks guest process memory "regions" one by one
2386  * and calls callback function 'fn' for each region.
2387  */
2388 struct walk_memory_regions_data {
2389     walk_memory_regions_fn fn;
2390     void *priv;
2391     target_ulong start;
2392     int prot;
2393 };
2394 
2395 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2396                                    target_ulong end, int new_prot)
2397 {
2398     if (data->start != -1u) {
2399         int rc = data->fn(data->priv, data->start, end, data->prot);
2400         if (rc != 0) {
2401             return rc;
2402         }
2403     }
2404 
2405     data->start = (new_prot ? end : -1u);
2406     data->prot = new_prot;
2407 
2408     return 0;
2409 }
2410 
2411 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2412                                  target_ulong base, int level, void **lp)
2413 {
2414     target_ulong pa;
2415     int i, rc;
2416 
2417     if (*lp == NULL) {
2418         return walk_memory_regions_end(data, base, 0);
2419     }
2420 
2421     if (level == 0) {
2422         PageDesc *pd = *lp;
2423 
2424         for (i = 0; i < V_L2_SIZE; ++i) {
2425             int prot = pd[i].flags;
2426 
2427             pa = base | (i << TARGET_PAGE_BITS);
2428             if (prot != data->prot) {
2429                 rc = walk_memory_regions_end(data, pa, prot);
2430                 if (rc != 0) {
2431                     return rc;
2432                 }
2433             }
2434         }
2435     } else {
2436         void **pp = *lp;
2437 
2438         for (i = 0; i < V_L2_SIZE; ++i) {
2439             pa = base | ((target_ulong)i <<
2440                 (TARGET_PAGE_BITS + V_L2_BITS * level));
2441             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2442             if (rc != 0) {
2443                 return rc;
2444             }
2445         }
2446     }
2447 
2448     return 0;
2449 }
2450 
2451 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2452 {
2453     struct walk_memory_regions_data data;
2454     uintptr_t i, l1_sz = v_l1_size;
2455 
2456     data.fn = fn;
2457     data.priv = priv;
2458     data.start = -1u;
2459     data.prot = 0;
2460 
2461     for (i = 0; i < l1_sz; i++) {
2462         target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2463         int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2464         if (rc != 0) {
2465             return rc;
2466         }
2467     }
2468 
2469     return walk_memory_regions_end(&data, 0, 0);
2470 }
2471 
2472 static int dump_region(void *priv, target_ulong start,
2473     target_ulong end, unsigned long prot)
2474 {
2475     FILE *f = (FILE *)priv;
2476 
2477     (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2478         " "TARGET_FMT_lx" %c%c%c\n",
2479         start, end, end - start,
2480         ((prot & PAGE_READ) ? 'r' : '-'),
2481         ((prot & PAGE_WRITE) ? 'w' : '-'),
2482         ((prot & PAGE_EXEC) ? 'x' : '-'));
2483 
2484     return 0;
2485 }
2486 
2487 /* dump memory mappings */
2488 void page_dump(FILE *f)
2489 {
2490     const int length = sizeof(target_ulong) * 2;
2491     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2492             length, "start", length, "end", length, "size", "prot");
2493     walk_memory_regions(f, dump_region);
2494 }
2495 
2496 int page_get_flags(target_ulong address)
2497 {
2498     PageDesc *p;
2499 
2500     p = page_find(address >> TARGET_PAGE_BITS);
2501     if (!p) {
2502         return 0;
2503     }
2504     return p->flags;
2505 }
2506 
2507 /* Modify the flags of a page and invalidate the code if necessary.
2508    The flag PAGE_WRITE_ORG is positioned automatically depending
2509    on PAGE_WRITE.  The mmap_lock should already be held.  */
2510 void page_set_flags(target_ulong start, target_ulong end, int flags)
2511 {
2512     target_ulong addr, len;
2513 
2514     /* This function should never be called with addresses outside the
2515        guest address space.  If this assert fires, it probably indicates
2516        a missing call to h2g_valid.  */
2517 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2518     assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2519 #endif
2520     assert(start < end);
2521     assert_memory_lock();
2522 
2523     start = start & TARGET_PAGE_MASK;
2524     end = TARGET_PAGE_ALIGN(end);
2525 
2526     if (flags & PAGE_WRITE) {
2527         flags |= PAGE_WRITE_ORG;
2528     }
2529 
2530     for (addr = start, len = end - start;
2531          len != 0;
2532          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2533         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2534 
2535         /* If the write protection bit is set, then we invalidate
2536            the code inside.  */
2537         if (!(p->flags & PAGE_WRITE) &&
2538             (flags & PAGE_WRITE) &&
2539             p->first_tb) {
2540             tb_invalidate_phys_page(addr, 0);
2541         }
2542         p->flags = flags;
2543     }
2544 }
2545 
2546 int page_check_range(target_ulong start, target_ulong len, int flags)
2547 {
2548     PageDesc *p;
2549     target_ulong end;
2550     target_ulong addr;
2551 
2552     /* This function should never be called with addresses outside the
2553        guest address space.  If this assert fires, it probably indicates
2554        a missing call to h2g_valid.  */
2555 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2556     assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2557 #endif
2558 
2559     if (len == 0) {
2560         return 0;
2561     }
2562     if (start + len - 1 < start) {
2563         /* We've wrapped around.  */
2564         return -1;
2565     }
2566 
2567     /* must do before we loose bits in the next step */
2568     end = TARGET_PAGE_ALIGN(start + len);
2569     start = start & TARGET_PAGE_MASK;
2570 
2571     for (addr = start, len = end - start;
2572          len != 0;
2573          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2574         p = page_find(addr >> TARGET_PAGE_BITS);
2575         if (!p) {
2576             return -1;
2577         }
2578         if (!(p->flags & PAGE_VALID)) {
2579             return -1;
2580         }
2581 
2582         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2583             return -1;
2584         }
2585         if (flags & PAGE_WRITE) {
2586             if (!(p->flags & PAGE_WRITE_ORG)) {
2587                 return -1;
2588             }
2589             /* unprotect the page if it was put read-only because it
2590                contains translated code */
2591             if (!(p->flags & PAGE_WRITE)) {
2592                 if (!page_unprotect(addr, 0)) {
2593                     return -1;
2594                 }
2595             }
2596         }
2597     }
2598     return 0;
2599 }
2600 
2601 /* called from signal handler: invalidate the code and unprotect the
2602  * page. Return 0 if the fault was not handled, 1 if it was handled,
2603  * and 2 if it was handled but the caller must cause the TB to be
2604  * immediately exited. (We can only return 2 if the 'pc' argument is
2605  * non-zero.)
2606  */
2607 int page_unprotect(target_ulong address, uintptr_t pc)
2608 {
2609     unsigned int prot;
2610     bool current_tb_invalidated;
2611     PageDesc *p;
2612     target_ulong host_start, host_end, addr;
2613 
2614     /* Technically this isn't safe inside a signal handler.  However we
2615        know this only ever happens in a synchronous SEGV handler, so in
2616        practice it seems to be ok.  */
2617     mmap_lock();
2618 
2619     p = page_find(address >> TARGET_PAGE_BITS);
2620     if (!p) {
2621         mmap_unlock();
2622         return 0;
2623     }
2624 
2625     /* if the page was really writable, then we change its
2626        protection back to writable */
2627     if (p->flags & PAGE_WRITE_ORG) {
2628         current_tb_invalidated = false;
2629         if (p->flags & PAGE_WRITE) {
2630             /* If the page is actually marked WRITE then assume this is because
2631              * this thread raced with another one which got here first and
2632              * set the page to PAGE_WRITE and did the TB invalidate for us.
2633              */
2634 #ifdef TARGET_HAS_PRECISE_SMC
2635             TranslationBlock *current_tb = tcg_tb_lookup(pc);
2636             if (current_tb) {
2637                 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2638             }
2639 #endif
2640         } else {
2641             host_start = address & qemu_host_page_mask;
2642             host_end = host_start + qemu_host_page_size;
2643 
2644             prot = 0;
2645             for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2646                 p = page_find(addr >> TARGET_PAGE_BITS);
2647                 p->flags |= PAGE_WRITE;
2648                 prot |= p->flags;
2649 
2650                 /* and since the content will be modified, we must invalidate
2651                    the corresponding translated code. */
2652                 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2653 #ifdef CONFIG_USER_ONLY
2654                 if (DEBUG_TB_CHECK_GATE) {
2655                     tb_invalidate_check(addr);
2656                 }
2657 #endif
2658             }
2659             mprotect((void *)g2h(host_start), qemu_host_page_size,
2660                      prot & PAGE_BITS);
2661         }
2662         mmap_unlock();
2663         /* If current TB was invalidated return to main loop */
2664         return current_tb_invalidated ? 2 : 1;
2665     }
2666     mmap_unlock();
2667     return 0;
2668 }
2669 #endif /* CONFIG_USER_ONLY */
2670 
2671 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2672 void tcg_flush_softmmu_tlb(CPUState *cs)
2673 {
2674 #ifdef CONFIG_SOFTMMU
2675     tlb_flush(cs);
2676 #endif
2677 }
2678