xref: /openbmc/qemu/accel/tcg/translate-all.c (revision 4d9c7c84)
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu-common.h"
23 
24 #define NO_CPU_IO_DEFS
25 #include "cpu.h"
26 #include "trace.h"
27 #include "disas/disas.h"
28 #include "exec/exec-all.h"
29 #include "tcg/tcg.h"
30 #if defined(CONFIG_USER_ONLY)
31 #include "qemu.h"
32 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
33 #include <sys/param.h>
34 #if __FreeBSD_version >= 700104
35 #define HAVE_KINFO_GETVMMAP
36 #define sigqueue sigqueue_freebsd  /* avoid redefinition */
37 #include <sys/proc.h>
38 #include <machine/profile.h>
39 #define _KERNEL
40 #include <sys/user.h>
41 #undef _KERNEL
42 #undef sigqueue
43 #include <libutil.h>
44 #endif
45 #endif
46 #else
47 #include "exec/ram_addr.h"
48 #endif
49 
50 #include "exec/cputlb.h"
51 #include "exec/tb-hash.h"
52 #include "translate-all.h"
53 #include "qemu/bitmap.h"
54 #include "qemu/error-report.h"
55 #include "qemu/qemu-print.h"
56 #include "qemu/timer.h"
57 #include "qemu/main-loop.h"
58 #include "exec/log.h"
59 #include "sysemu/cpus.h"
60 #include "sysemu/tcg.h"
61 
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
66 
67 #ifdef DEBUG_TB_INVALIDATE
68 #define DEBUG_TB_INVALIDATE_GATE 1
69 #else
70 #define DEBUG_TB_INVALIDATE_GATE 0
71 #endif
72 
73 #ifdef DEBUG_TB_FLUSH
74 #define DEBUG_TB_FLUSH_GATE 1
75 #else
76 #define DEBUG_TB_FLUSH_GATE 0
77 #endif
78 
79 #if !defined(CONFIG_USER_ONLY)
80 /* TB consistency checks only implemented for usermode emulation.  */
81 #undef DEBUG_TB_CHECK
82 #endif
83 
84 #ifdef DEBUG_TB_CHECK
85 #define DEBUG_TB_CHECK_GATE 1
86 #else
87 #define DEBUG_TB_CHECK_GATE 0
88 #endif
89 
90 /* Access to the various translations structures need to be serialised via locks
91  * for consistency.
92  * In user-mode emulation access to the memory related structures are protected
93  * with mmap_lock.
94  * In !user-mode we use per-page locks.
95  */
96 #ifdef CONFIG_SOFTMMU
97 #define assert_memory_lock()
98 #else
99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
100 #endif
101 
102 #define SMC_BITMAP_USE_THRESHOLD 10
103 
104 typedef struct PageDesc {
105     /* list of TBs intersecting this ram page */
106     uintptr_t first_tb;
107 #ifdef CONFIG_SOFTMMU
108     /* in order to optimize self modifying code, we count the number
109        of lookups we do to a given page to use a bitmap */
110     unsigned long *code_bitmap;
111     unsigned int code_write_count;
112 #else
113     unsigned long flags;
114 #endif
115 #ifndef CONFIG_USER_ONLY
116     QemuSpin lock;
117 #endif
118 } PageDesc;
119 
120 /**
121  * struct page_entry - page descriptor entry
122  * @pd:     pointer to the &struct PageDesc of the page this entry represents
123  * @index:  page index of the page
124  * @locked: whether the page is locked
125  *
126  * This struct helps us keep track of the locked state of a page, without
127  * bloating &struct PageDesc.
128  *
129  * A page lock protects accesses to all fields of &struct PageDesc.
130  *
131  * See also: &struct page_collection.
132  */
133 struct page_entry {
134     PageDesc *pd;
135     tb_page_addr_t index;
136     bool locked;
137 };
138 
139 /**
140  * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
141  * @tree:   Binary search tree (BST) of the pages, with key == page index
142  * @max:    Pointer to the page in @tree with the highest page index
143  *
144  * To avoid deadlock we lock pages in ascending order of page index.
145  * When operating on a set of pages, we need to keep track of them so that
146  * we can lock them in order and also unlock them later. For this we collect
147  * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
148  * @tree implementation we use does not provide an O(1) operation to obtain the
149  * highest-ranked element, we use @max to keep track of the inserted page
150  * with the highest index. This is valuable because if a page is not in
151  * the tree and its index is higher than @max's, then we can lock it
152  * without breaking the locking order rule.
153  *
154  * Note on naming: 'struct page_set' would be shorter, but we already have a few
155  * page_set_*() helpers, so page_collection is used instead to avoid confusion.
156  *
157  * See also: page_collection_lock().
158  */
159 struct page_collection {
160     GTree *tree;
161     struct page_entry *max;
162 };
163 
164 /* list iterators for lists of tagged pointers in TranslationBlock */
165 #define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
166     for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
167          tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
168              tb = (TranslationBlock *)((uintptr_t)tb & ~1))
169 
170 #define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
171     TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
172 
173 #define TB_FOR_EACH_JMP(head_tb, tb, n)                                 \
174     TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
175 
176 /*
177  * In system mode we want L1_MAP to be based on ram offsets,
178  * while in user mode we want it to be based on virtual addresses.
179  *
180  * TODO: For user mode, see the caveat re host vs guest virtual
181  * address spaces near GUEST_ADDR_MAX.
182  */
183 #if !defined(CONFIG_USER_ONLY)
184 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
185 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
186 #else
187 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
188 #endif
189 #else
190 # define L1_MAP_ADDR_SPACE_BITS  MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
191 #endif
192 
193 /* Size of the L2 (and L3, etc) page tables.  */
194 #define V_L2_BITS 10
195 #define V_L2_SIZE (1 << V_L2_BITS)
196 
197 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
198 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
199                   sizeof_field(TranslationBlock, trace_vcpu_dstate)
200                   * BITS_PER_BYTE);
201 
202 /*
203  * L1 Mapping properties
204  */
205 static int v_l1_size;
206 static int v_l1_shift;
207 static int v_l2_levels;
208 
209 /* The bottom level has pointers to PageDesc, and is indexed by
210  * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
211  */
212 #define V_L1_MIN_BITS 4
213 #define V_L1_MAX_BITS (V_L2_BITS + 3)
214 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
215 
216 static void *l1_map[V_L1_MAX_SIZE];
217 
218 /* code generation context */
219 TCGContext tcg_init_ctx;
220 __thread TCGContext *tcg_ctx;
221 TBContext tb_ctx;
222 bool parallel_cpus;
223 
224 static void page_table_config_init(void)
225 {
226     uint32_t v_l1_bits;
227 
228     assert(TARGET_PAGE_BITS);
229     /* The bits remaining after N lower levels of page tables.  */
230     v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
231     if (v_l1_bits < V_L1_MIN_BITS) {
232         v_l1_bits += V_L2_BITS;
233     }
234 
235     v_l1_size = 1 << v_l1_bits;
236     v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
237     v_l2_levels = v_l1_shift / V_L2_BITS - 1;
238 
239     assert(v_l1_bits <= V_L1_MAX_BITS);
240     assert(v_l1_shift % V_L2_BITS == 0);
241     assert(v_l2_levels >= 0);
242 }
243 
244 void cpu_gen_init(void)
245 {
246     tcg_context_init(&tcg_init_ctx);
247 }
248 
249 /* Encode VAL as a signed leb128 sequence at P.
250    Return P incremented past the encoded value.  */
251 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
252 {
253     int more, byte;
254 
255     do {
256         byte = val & 0x7f;
257         val >>= 7;
258         more = !((val == 0 && (byte & 0x40) == 0)
259                  || (val == -1 && (byte & 0x40) != 0));
260         if (more) {
261             byte |= 0x80;
262         }
263         *p++ = byte;
264     } while (more);
265 
266     return p;
267 }
268 
269 /* Decode a signed leb128 sequence at *PP; increment *PP past the
270    decoded value.  Return the decoded value.  */
271 static target_long decode_sleb128(uint8_t **pp)
272 {
273     uint8_t *p = *pp;
274     target_long val = 0;
275     int byte, shift = 0;
276 
277     do {
278         byte = *p++;
279         val |= (target_ulong)(byte & 0x7f) << shift;
280         shift += 7;
281     } while (byte & 0x80);
282     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
283         val |= -(target_ulong)1 << shift;
284     }
285 
286     *pp = p;
287     return val;
288 }
289 
290 /* Encode the data collected about the instructions while compiling TB.
291    Place the data at BLOCK, and return the number of bytes consumed.
292 
293    The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
294    which come from the target's insn_start data, followed by a uintptr_t
295    which comes from the host pc of the end of the code implementing the insn.
296 
297    Each line of the table is encoded as sleb128 deltas from the previous
298    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
299    That is, the first column is seeded with the guest pc, the last column
300    with the host pc, and the middle columns with zeros.  */
301 
302 static int encode_search(TranslationBlock *tb, uint8_t *block)
303 {
304     uint8_t *highwater = tcg_ctx->code_gen_highwater;
305     uint8_t *p = block;
306     int i, j, n;
307 
308     for (i = 0, n = tb->icount; i < n; ++i) {
309         target_ulong prev;
310 
311         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
312             if (i == 0) {
313                 prev = (j == 0 ? tb->pc : 0);
314             } else {
315                 prev = tcg_ctx->gen_insn_data[i - 1][j];
316             }
317             p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
318         }
319         prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
320         p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
321 
322         /* Test for (pending) buffer overflow.  The assumption is that any
323            one row beginning below the high water mark cannot overrun
324            the buffer completely.  Thus we can test for overflow after
325            encoding a row without having to check during encoding.  */
326         if (unlikely(p > highwater)) {
327             return -1;
328         }
329     }
330 
331     return p - block;
332 }
333 
334 /* The cpu state corresponding to 'searched_pc' is restored.
335  * When reset_icount is true, current TB will be interrupted and
336  * icount should be recalculated.
337  */
338 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
339                                      uintptr_t searched_pc, bool reset_icount)
340 {
341     target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
342     uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
343     CPUArchState *env = cpu->env_ptr;
344     uint8_t *p = tb->tc.ptr + tb->tc.size;
345     int i, j, num_insns = tb->icount;
346 #ifdef CONFIG_PROFILER
347     TCGProfile *prof = &tcg_ctx->prof;
348     int64_t ti = profile_getclock();
349 #endif
350 
351     searched_pc -= GETPC_ADJ;
352 
353     if (searched_pc < host_pc) {
354         return -1;
355     }
356 
357     /* Reconstruct the stored insn data while looking for the point at
358        which the end of the insn exceeds the searched_pc.  */
359     for (i = 0; i < num_insns; ++i) {
360         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
361             data[j] += decode_sleb128(&p);
362         }
363         host_pc += decode_sleb128(&p);
364         if (host_pc > searched_pc) {
365             goto found;
366         }
367     }
368     return -1;
369 
370  found:
371     if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
372         assert(use_icount);
373         /* Reset the cycle counter to the start of the block
374            and shift if to the number of actually executed instructions */
375         cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
376     }
377     restore_state_to_opc(env, tb, data);
378 
379 #ifdef CONFIG_PROFILER
380     qatomic_set(&prof->restore_time,
381                 prof->restore_time + profile_getclock() - ti);
382     qatomic_set(&prof->restore_count, prof->restore_count + 1);
383 #endif
384     return 0;
385 }
386 
387 void tb_destroy(TranslationBlock *tb)
388 {
389     qemu_spin_destroy(&tb->jmp_lock);
390 }
391 
392 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
393 {
394     TranslationBlock *tb;
395     bool r = false;
396     uintptr_t check_offset;
397 
398     /* The host_pc has to be in the region of current code buffer. If
399      * it is not we will not be able to resolve it here. The two cases
400      * where host_pc will not be correct are:
401      *
402      *  - fault during translation (instruction fetch)
403      *  - fault from helper (not using GETPC() macro)
404      *
405      * Either way we need return early as we can't resolve it here.
406      *
407      * We are using unsigned arithmetic so if host_pc <
408      * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
409      * above the code_gen_buffer_size
410      */
411     check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
412 
413     if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
414         tb = tcg_tb_lookup(host_pc);
415         if (tb) {
416             cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
417             if (tb_cflags(tb) & CF_NOCACHE) {
418                 /* one-shot translation, invalidate it immediately */
419                 tb_phys_invalidate(tb, -1);
420                 tcg_tb_remove(tb);
421                 tb_destroy(tb);
422             }
423             r = true;
424         }
425     }
426 
427     return r;
428 }
429 
430 static void page_init(void)
431 {
432     page_size_init();
433     page_table_config_init();
434 
435 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
436     {
437 #ifdef HAVE_KINFO_GETVMMAP
438         struct kinfo_vmentry *freep;
439         int i, cnt;
440 
441         freep = kinfo_getvmmap(getpid(), &cnt);
442         if (freep) {
443             mmap_lock();
444             for (i = 0; i < cnt; i++) {
445                 unsigned long startaddr, endaddr;
446 
447                 startaddr = freep[i].kve_start;
448                 endaddr = freep[i].kve_end;
449                 if (h2g_valid(startaddr)) {
450                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
451 
452                     if (h2g_valid(endaddr)) {
453                         endaddr = h2g(endaddr);
454                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
455                     } else {
456 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
457                         endaddr = ~0ul;
458                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
459 #endif
460                     }
461                 }
462             }
463             free(freep);
464             mmap_unlock();
465         }
466 #else
467         FILE *f;
468 
469         last_brk = (unsigned long)sbrk(0);
470 
471         f = fopen("/compat/linux/proc/self/maps", "r");
472         if (f) {
473             mmap_lock();
474 
475             do {
476                 unsigned long startaddr, endaddr;
477                 int n;
478 
479                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
480 
481                 if (n == 2 && h2g_valid(startaddr)) {
482                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
483 
484                     if (h2g_valid(endaddr)) {
485                         endaddr = h2g(endaddr);
486                     } else {
487                         endaddr = ~0ul;
488                     }
489                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
490                 }
491             } while (!feof(f));
492 
493             fclose(f);
494             mmap_unlock();
495         }
496 #endif
497     }
498 #endif
499 }
500 
501 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
502 {
503     PageDesc *pd;
504     void **lp;
505     int i;
506 
507     /* Level 1.  Always allocated.  */
508     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
509 
510     /* Level 2..N-1.  */
511     for (i = v_l2_levels; i > 0; i--) {
512         void **p = qatomic_rcu_read(lp);
513 
514         if (p == NULL) {
515             void *existing;
516 
517             if (!alloc) {
518                 return NULL;
519             }
520             p = g_new0(void *, V_L2_SIZE);
521             existing = qatomic_cmpxchg(lp, NULL, p);
522             if (unlikely(existing)) {
523                 g_free(p);
524                 p = existing;
525             }
526         }
527 
528         lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
529     }
530 
531     pd = qatomic_rcu_read(lp);
532     if (pd == NULL) {
533         void *existing;
534 
535         if (!alloc) {
536             return NULL;
537         }
538         pd = g_new0(PageDesc, V_L2_SIZE);
539 #ifndef CONFIG_USER_ONLY
540         {
541             int i;
542 
543             for (i = 0; i < V_L2_SIZE; i++) {
544                 qemu_spin_init(&pd[i].lock);
545             }
546         }
547 #endif
548         existing = qatomic_cmpxchg(lp, NULL, pd);
549         if (unlikely(existing)) {
550 #ifndef CONFIG_USER_ONLY
551             {
552                 int i;
553 
554                 for (i = 0; i < V_L2_SIZE; i++) {
555                     qemu_spin_destroy(&pd[i].lock);
556                 }
557             }
558 #endif
559             g_free(pd);
560             pd = existing;
561         }
562     }
563 
564     return pd + (index & (V_L2_SIZE - 1));
565 }
566 
567 static inline PageDesc *page_find(tb_page_addr_t index)
568 {
569     return page_find_alloc(index, 0);
570 }
571 
572 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
573                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
574 
575 /* In user-mode page locks aren't used; mmap_lock is enough */
576 #ifdef CONFIG_USER_ONLY
577 
578 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
579 
580 static inline void page_lock(PageDesc *pd)
581 { }
582 
583 static inline void page_unlock(PageDesc *pd)
584 { }
585 
586 static inline void page_lock_tb(const TranslationBlock *tb)
587 { }
588 
589 static inline void page_unlock_tb(const TranslationBlock *tb)
590 { }
591 
592 struct page_collection *
593 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
594 {
595     return NULL;
596 }
597 
598 void page_collection_unlock(struct page_collection *set)
599 { }
600 #else /* !CONFIG_USER_ONLY */
601 
602 #ifdef CONFIG_DEBUG_TCG
603 
604 static __thread GHashTable *ht_pages_locked_debug;
605 
606 static void ht_pages_locked_debug_init(void)
607 {
608     if (ht_pages_locked_debug) {
609         return;
610     }
611     ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
612 }
613 
614 static bool page_is_locked(const PageDesc *pd)
615 {
616     PageDesc *found;
617 
618     ht_pages_locked_debug_init();
619     found = g_hash_table_lookup(ht_pages_locked_debug, pd);
620     return !!found;
621 }
622 
623 static void page_lock__debug(PageDesc *pd)
624 {
625     ht_pages_locked_debug_init();
626     g_assert(!page_is_locked(pd));
627     g_hash_table_insert(ht_pages_locked_debug, pd, pd);
628 }
629 
630 static void page_unlock__debug(const PageDesc *pd)
631 {
632     bool removed;
633 
634     ht_pages_locked_debug_init();
635     g_assert(page_is_locked(pd));
636     removed = g_hash_table_remove(ht_pages_locked_debug, pd);
637     g_assert(removed);
638 }
639 
640 static void
641 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
642 {
643     if (unlikely(!page_is_locked(pd))) {
644         error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
645                      pd, file, line);
646         abort();
647     }
648 }
649 
650 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
651 
652 void assert_no_pages_locked(void)
653 {
654     ht_pages_locked_debug_init();
655     g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
656 }
657 
658 #else /* !CONFIG_DEBUG_TCG */
659 
660 #define assert_page_locked(pd)
661 
662 static inline void page_lock__debug(const PageDesc *pd)
663 {
664 }
665 
666 static inline void page_unlock__debug(const PageDesc *pd)
667 {
668 }
669 
670 #endif /* CONFIG_DEBUG_TCG */
671 
672 static inline void page_lock(PageDesc *pd)
673 {
674     page_lock__debug(pd);
675     qemu_spin_lock(&pd->lock);
676 }
677 
678 static inline void page_unlock(PageDesc *pd)
679 {
680     qemu_spin_unlock(&pd->lock);
681     page_unlock__debug(pd);
682 }
683 
684 /* lock the page(s) of a TB in the correct acquisition order */
685 static inline void page_lock_tb(const TranslationBlock *tb)
686 {
687     page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
688 }
689 
690 static inline void page_unlock_tb(const TranslationBlock *tb)
691 {
692     PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
693 
694     page_unlock(p1);
695     if (unlikely(tb->page_addr[1] != -1)) {
696         PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
697 
698         if (p2 != p1) {
699             page_unlock(p2);
700         }
701     }
702 }
703 
704 static inline struct page_entry *
705 page_entry_new(PageDesc *pd, tb_page_addr_t index)
706 {
707     struct page_entry *pe = g_malloc(sizeof(*pe));
708 
709     pe->index = index;
710     pe->pd = pd;
711     pe->locked = false;
712     return pe;
713 }
714 
715 static void page_entry_destroy(gpointer p)
716 {
717     struct page_entry *pe = p;
718 
719     g_assert(pe->locked);
720     page_unlock(pe->pd);
721     g_free(pe);
722 }
723 
724 /* returns false on success */
725 static bool page_entry_trylock(struct page_entry *pe)
726 {
727     bool busy;
728 
729     busy = qemu_spin_trylock(&pe->pd->lock);
730     if (!busy) {
731         g_assert(!pe->locked);
732         pe->locked = true;
733         page_lock__debug(pe->pd);
734     }
735     return busy;
736 }
737 
738 static void do_page_entry_lock(struct page_entry *pe)
739 {
740     page_lock(pe->pd);
741     g_assert(!pe->locked);
742     pe->locked = true;
743 }
744 
745 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
746 {
747     struct page_entry *pe = value;
748 
749     do_page_entry_lock(pe);
750     return FALSE;
751 }
752 
753 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
754 {
755     struct page_entry *pe = value;
756 
757     if (pe->locked) {
758         pe->locked = false;
759         page_unlock(pe->pd);
760     }
761     return FALSE;
762 }
763 
764 /*
765  * Trylock a page, and if successful, add the page to a collection.
766  * Returns true ("busy") if the page could not be locked; false otherwise.
767  */
768 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
769 {
770     tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
771     struct page_entry *pe;
772     PageDesc *pd;
773 
774     pe = g_tree_lookup(set->tree, &index);
775     if (pe) {
776         return false;
777     }
778 
779     pd = page_find(index);
780     if (pd == NULL) {
781         return false;
782     }
783 
784     pe = page_entry_new(pd, index);
785     g_tree_insert(set->tree, &pe->index, pe);
786 
787     /*
788      * If this is either (1) the first insertion or (2) a page whose index
789      * is higher than any other so far, just lock the page and move on.
790      */
791     if (set->max == NULL || pe->index > set->max->index) {
792         set->max = pe;
793         do_page_entry_lock(pe);
794         return false;
795     }
796     /*
797      * Try to acquire out-of-order lock; if busy, return busy so that we acquire
798      * locks in order.
799      */
800     return page_entry_trylock(pe);
801 }
802 
803 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
804 {
805     tb_page_addr_t a = *(const tb_page_addr_t *)ap;
806     tb_page_addr_t b = *(const tb_page_addr_t *)bp;
807 
808     if (a == b) {
809         return 0;
810     } else if (a < b) {
811         return -1;
812     }
813     return 1;
814 }
815 
816 /*
817  * Lock a range of pages ([@start,@end[) as well as the pages of all
818  * intersecting TBs.
819  * Locking order: acquire locks in ascending order of page index.
820  */
821 struct page_collection *
822 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
823 {
824     struct page_collection *set = g_malloc(sizeof(*set));
825     tb_page_addr_t index;
826     PageDesc *pd;
827 
828     start >>= TARGET_PAGE_BITS;
829     end   >>= TARGET_PAGE_BITS;
830     g_assert(start <= end);
831 
832     set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
833                                 page_entry_destroy);
834     set->max = NULL;
835     assert_no_pages_locked();
836 
837  retry:
838     g_tree_foreach(set->tree, page_entry_lock, NULL);
839 
840     for (index = start; index <= end; index++) {
841         TranslationBlock *tb;
842         int n;
843 
844         pd = page_find(index);
845         if (pd == NULL) {
846             continue;
847         }
848         if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
849             g_tree_foreach(set->tree, page_entry_unlock, NULL);
850             goto retry;
851         }
852         assert_page_locked(pd);
853         PAGE_FOR_EACH_TB(pd, tb, n) {
854             if (page_trylock_add(set, tb->page_addr[0]) ||
855                 (tb->page_addr[1] != -1 &&
856                  page_trylock_add(set, tb->page_addr[1]))) {
857                 /* drop all locks, and reacquire in order */
858                 g_tree_foreach(set->tree, page_entry_unlock, NULL);
859                 goto retry;
860             }
861         }
862     }
863     return set;
864 }
865 
866 void page_collection_unlock(struct page_collection *set)
867 {
868     /* entries are unlocked and freed via page_entry_destroy */
869     g_tree_destroy(set->tree);
870     g_free(set);
871 }
872 
873 #endif /* !CONFIG_USER_ONLY */
874 
875 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
876                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
877 {
878     PageDesc *p1, *p2;
879     tb_page_addr_t page1;
880     tb_page_addr_t page2;
881 
882     assert_memory_lock();
883     g_assert(phys1 != -1);
884 
885     page1 = phys1 >> TARGET_PAGE_BITS;
886     page2 = phys2 >> TARGET_PAGE_BITS;
887 
888     p1 = page_find_alloc(page1, alloc);
889     if (ret_p1) {
890         *ret_p1 = p1;
891     }
892     if (likely(phys2 == -1)) {
893         page_lock(p1);
894         return;
895     } else if (page1 == page2) {
896         page_lock(p1);
897         if (ret_p2) {
898             *ret_p2 = p1;
899         }
900         return;
901     }
902     p2 = page_find_alloc(page2, alloc);
903     if (ret_p2) {
904         *ret_p2 = p2;
905     }
906     if (page1 < page2) {
907         page_lock(p1);
908         page_lock(p2);
909     } else {
910         page_lock(p2);
911         page_lock(p1);
912     }
913 }
914 
915 /* Minimum size of the code gen buffer.  This number is randomly chosen,
916    but not so small that we can't have a fair number of TB's live.  */
917 #define MIN_CODE_GEN_BUFFER_SIZE     (1 * MiB)
918 
919 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
920    indicated, this is constrained by the range of direct branches on the
921    host cpu, as used by the TCG implementation of goto_tb.  */
922 #if defined(__x86_64__)
923 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
924 #elif defined(__sparc__)
925 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
926 #elif defined(__powerpc64__)
927 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
928 #elif defined(__powerpc__)
929 # define MAX_CODE_GEN_BUFFER_SIZE  (32 * MiB)
930 #elif defined(__aarch64__)
931 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
932 #elif defined(__s390x__)
933   /* We have a +- 4GB range on the branches; leave some slop.  */
934 # define MAX_CODE_GEN_BUFFER_SIZE  (3 * GiB)
935 #elif defined(__mips__)
936   /* We have a 256MB branch region, but leave room to make sure the
937      main executable is also within that region.  */
938 # define MAX_CODE_GEN_BUFFER_SIZE  (128 * MiB)
939 #else
940 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
941 #endif
942 
943 #if TCG_TARGET_REG_BITS == 32
944 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
945 #ifdef CONFIG_USER_ONLY
946 /*
947  * For user mode on smaller 32 bit systems we may run into trouble
948  * allocating big chunks of data in the right place. On these systems
949  * we utilise a static code generation buffer directly in the binary.
950  */
951 #define USE_STATIC_CODE_GEN_BUFFER
952 #endif
953 #else /* TCG_TARGET_REG_BITS == 64 */
954 #ifdef CONFIG_USER_ONLY
955 /*
956  * As user-mode emulation typically means running multiple instances
957  * of the translator don't go too nuts with our default code gen
958  * buffer lest we make things too hard for the OS.
959  */
960 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
961 #else
962 /*
963  * We expect most system emulation to run one or two guests per host.
964  * Users running large scale system emulation may want to tweak their
965  * runtime setup via the tb-size control on the command line.
966  */
967 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
968 #endif
969 #endif
970 
971 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
972   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
973    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
974 
975 static inline size_t size_code_gen_buffer(size_t tb_size)
976 {
977     /* Size the buffer.  */
978     if (tb_size == 0) {
979         size_t phys_mem = qemu_get_host_physmem();
980         if (phys_mem == 0) {
981             tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
982         } else {
983             tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
984         }
985     }
986     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
987         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
988     }
989     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
990         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
991     }
992     return tb_size;
993 }
994 
995 #ifdef __mips__
996 /* In order to use J and JAL within the code_gen_buffer, we require
997    that the buffer not cross a 256MB boundary.  */
998 static inline bool cross_256mb(void *addr, size_t size)
999 {
1000     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
1001 }
1002 
1003 /* We weren't able to allocate a buffer without crossing that boundary,
1004    so make do with the larger portion of the buffer that doesn't cross.
1005    Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
1006 static inline void *split_cross_256mb(void *buf1, size_t size1)
1007 {
1008     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
1009     size_t size2 = buf1 + size1 - buf2;
1010 
1011     size1 = buf2 - buf1;
1012     if (size1 < size2) {
1013         size1 = size2;
1014         buf1 = buf2;
1015     }
1016 
1017     tcg_ctx->code_gen_buffer_size = size1;
1018     return buf1;
1019 }
1020 #endif
1021 
1022 #ifdef USE_STATIC_CODE_GEN_BUFFER
1023 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
1024     __attribute__((aligned(CODE_GEN_ALIGN)));
1025 
1026 static inline void *alloc_code_gen_buffer(void)
1027 {
1028     void *buf = static_code_gen_buffer;
1029     void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
1030     size_t size;
1031 
1032     /* page-align the beginning and end of the buffer */
1033     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
1034     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1035 
1036     size = end - buf;
1037 
1038     /* Honor a command-line option limiting the size of the buffer.  */
1039     if (size > tcg_ctx->code_gen_buffer_size) {
1040         size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1041                                qemu_real_host_page_size);
1042     }
1043     tcg_ctx->code_gen_buffer_size = size;
1044 
1045 #ifdef __mips__
1046     if (cross_256mb(buf, size)) {
1047         buf = split_cross_256mb(buf, size);
1048         size = tcg_ctx->code_gen_buffer_size;
1049     }
1050 #endif
1051 
1052     if (qemu_mprotect_rwx(buf, size)) {
1053         abort();
1054     }
1055     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1056 
1057     return buf;
1058 }
1059 #elif defined(_WIN32)
1060 static inline void *alloc_code_gen_buffer(void)
1061 {
1062     size_t size = tcg_ctx->code_gen_buffer_size;
1063     return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1064                         PAGE_EXECUTE_READWRITE);
1065 }
1066 #else
1067 static inline void *alloc_code_gen_buffer(void)
1068 {
1069     int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1070     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1071     size_t size = tcg_ctx->code_gen_buffer_size;
1072     void *buf;
1073 
1074     buf = mmap(NULL, size, prot, flags, -1, 0);
1075     if (buf == MAP_FAILED) {
1076         return NULL;
1077     }
1078 
1079 #ifdef __mips__
1080     if (cross_256mb(buf, size)) {
1081         /*
1082          * Try again, with the original still mapped, to avoid re-acquiring
1083          * the same 256mb crossing.
1084          */
1085         size_t size2;
1086         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1087         switch ((int)(buf2 != MAP_FAILED)) {
1088         case 1:
1089             if (!cross_256mb(buf2, size)) {
1090                 /* Success!  Use the new buffer.  */
1091                 munmap(buf, size);
1092                 break;
1093             }
1094             /* Failure.  Work with what we had.  */
1095             munmap(buf2, size);
1096             /* fallthru */
1097         default:
1098             /* Split the original buffer.  Free the smaller half.  */
1099             buf2 = split_cross_256mb(buf, size);
1100             size2 = tcg_ctx->code_gen_buffer_size;
1101             if (buf == buf2) {
1102                 munmap(buf + size2, size - size2);
1103             } else {
1104                 munmap(buf, size - size2);
1105             }
1106             size = size2;
1107             break;
1108         }
1109         buf = buf2;
1110     }
1111 #endif
1112 
1113     /* Request large pages for the buffer.  */
1114     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1115 
1116     return buf;
1117 }
1118 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1119 
1120 static inline void code_gen_alloc(size_t tb_size)
1121 {
1122     tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1123     tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1124     if (tcg_ctx->code_gen_buffer == NULL) {
1125         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1126         exit(1);
1127     }
1128 }
1129 
1130 static bool tb_cmp(const void *ap, const void *bp)
1131 {
1132     const TranslationBlock *a = ap;
1133     const TranslationBlock *b = bp;
1134 
1135     return a->pc == b->pc &&
1136         a->cs_base == b->cs_base &&
1137         a->flags == b->flags &&
1138         (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1139         a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1140         a->page_addr[0] == b->page_addr[0] &&
1141         a->page_addr[1] == b->page_addr[1];
1142 }
1143 
1144 static void tb_htable_init(void)
1145 {
1146     unsigned int mode = QHT_MODE_AUTO_RESIZE;
1147 
1148     qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1149 }
1150 
1151 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1152    (in bytes) allocated to the translation buffer. Zero means default
1153    size. */
1154 void tcg_exec_init(unsigned long tb_size)
1155 {
1156     tcg_allowed = true;
1157     cpu_gen_init();
1158     page_init();
1159     tb_htable_init();
1160     code_gen_alloc(tb_size);
1161 #if defined(CONFIG_SOFTMMU)
1162     /* There's no guest base to take into account, so go ahead and
1163        initialize the prologue now.  */
1164     tcg_prologue_init(tcg_ctx);
1165 #endif
1166 }
1167 
1168 /* call with @p->lock held */
1169 static inline void invalidate_page_bitmap(PageDesc *p)
1170 {
1171     assert_page_locked(p);
1172 #ifdef CONFIG_SOFTMMU
1173     g_free(p->code_bitmap);
1174     p->code_bitmap = NULL;
1175     p->code_write_count = 0;
1176 #endif
1177 }
1178 
1179 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1180 static void page_flush_tb_1(int level, void **lp)
1181 {
1182     int i;
1183 
1184     if (*lp == NULL) {
1185         return;
1186     }
1187     if (level == 0) {
1188         PageDesc *pd = *lp;
1189 
1190         for (i = 0; i < V_L2_SIZE; ++i) {
1191             page_lock(&pd[i]);
1192             pd[i].first_tb = (uintptr_t)NULL;
1193             invalidate_page_bitmap(pd + i);
1194             page_unlock(&pd[i]);
1195         }
1196     } else {
1197         void **pp = *lp;
1198 
1199         for (i = 0; i < V_L2_SIZE; ++i) {
1200             page_flush_tb_1(level - 1, pp + i);
1201         }
1202     }
1203 }
1204 
1205 static void page_flush_tb(void)
1206 {
1207     int i, l1_sz = v_l1_size;
1208 
1209     for (i = 0; i < l1_sz; i++) {
1210         page_flush_tb_1(v_l2_levels, l1_map + i);
1211     }
1212 }
1213 
1214 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1215 {
1216     const TranslationBlock *tb = value;
1217     size_t *size = data;
1218 
1219     *size += tb->tc.size;
1220     return false;
1221 }
1222 
1223 /* flush all the translation blocks */
1224 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1225 {
1226     bool did_flush = false;
1227 
1228     mmap_lock();
1229     /* If it is already been done on request of another CPU,
1230      * just retry.
1231      */
1232     if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1233         goto done;
1234     }
1235     did_flush = true;
1236 
1237     if (DEBUG_TB_FLUSH_GATE) {
1238         size_t nb_tbs = tcg_nb_tbs();
1239         size_t host_size = 0;
1240 
1241         tcg_tb_foreach(tb_host_size_iter, &host_size);
1242         printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1243                tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1244     }
1245 
1246     CPU_FOREACH(cpu) {
1247         cpu_tb_jmp_cache_clear(cpu);
1248     }
1249 
1250     qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1251     page_flush_tb();
1252 
1253     tcg_region_reset_all();
1254     /* XXX: flush processor icache at this point if cache flush is
1255        expensive */
1256     qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1257 
1258 done:
1259     mmap_unlock();
1260     if (did_flush) {
1261         qemu_plugin_flush_cb();
1262     }
1263 }
1264 
1265 void tb_flush(CPUState *cpu)
1266 {
1267     if (tcg_enabled()) {
1268         unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
1269 
1270         if (cpu_in_exclusive_context(cpu)) {
1271             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1272         } else {
1273             async_safe_run_on_cpu(cpu, do_tb_flush,
1274                                   RUN_ON_CPU_HOST_INT(tb_flush_count));
1275         }
1276     }
1277 }
1278 
1279 /*
1280  * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1281  * so in order to prevent bit rot we compile them unconditionally in user-mode,
1282  * and let the optimizer get rid of them by wrapping their user-only callers
1283  * with if (DEBUG_TB_CHECK_GATE).
1284  */
1285 #ifdef CONFIG_USER_ONLY
1286 
1287 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1288 {
1289     TranslationBlock *tb = p;
1290     target_ulong addr = *(target_ulong *)userp;
1291 
1292     if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1293         printf("ERROR invalidate: address=" TARGET_FMT_lx
1294                " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1295     }
1296 }
1297 
1298 /* verify that all the pages have correct rights for code
1299  *
1300  * Called with mmap_lock held.
1301  */
1302 static void tb_invalidate_check(target_ulong address)
1303 {
1304     address &= TARGET_PAGE_MASK;
1305     qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1306 }
1307 
1308 static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1309 {
1310     TranslationBlock *tb = p;
1311     int flags1, flags2;
1312 
1313     flags1 = page_get_flags(tb->pc);
1314     flags2 = page_get_flags(tb->pc + tb->size - 1);
1315     if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1316         printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1317                (long)tb->pc, tb->size, flags1, flags2);
1318     }
1319 }
1320 
1321 /* verify that all the pages have correct rights for code */
1322 static void tb_page_check(void)
1323 {
1324     qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1325 }
1326 
1327 #endif /* CONFIG_USER_ONLY */
1328 
1329 /*
1330  * user-mode: call with mmap_lock held
1331  * !user-mode: call with @pd->lock held
1332  */
1333 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1334 {
1335     TranslationBlock *tb1;
1336     uintptr_t *pprev;
1337     unsigned int n1;
1338 
1339     assert_page_locked(pd);
1340     pprev = &pd->first_tb;
1341     PAGE_FOR_EACH_TB(pd, tb1, n1) {
1342         if (tb1 == tb) {
1343             *pprev = tb1->page_next[n1];
1344             return;
1345         }
1346         pprev = &tb1->page_next[n1];
1347     }
1348     g_assert_not_reached();
1349 }
1350 
1351 /* remove @orig from its @n_orig-th jump list */
1352 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1353 {
1354     uintptr_t ptr, ptr_locked;
1355     TranslationBlock *dest;
1356     TranslationBlock *tb;
1357     uintptr_t *pprev;
1358     int n;
1359 
1360     /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1361     ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1362     dest = (TranslationBlock *)(ptr & ~1);
1363     if (dest == NULL) {
1364         return;
1365     }
1366 
1367     qemu_spin_lock(&dest->jmp_lock);
1368     /*
1369      * While acquiring the lock, the jump might have been removed if the
1370      * destination TB was invalidated; check again.
1371      */
1372     ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
1373     if (ptr_locked != ptr) {
1374         qemu_spin_unlock(&dest->jmp_lock);
1375         /*
1376          * The only possibility is that the jump was unlinked via
1377          * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1378          * because we set the LSB above.
1379          */
1380         g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1381         return;
1382     }
1383     /*
1384      * We first acquired the lock, and since the destination pointer matches,
1385      * we know for sure that @orig is in the jmp list.
1386      */
1387     pprev = &dest->jmp_list_head;
1388     TB_FOR_EACH_JMP(dest, tb, n) {
1389         if (tb == orig && n == n_orig) {
1390             *pprev = tb->jmp_list_next[n];
1391             /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1392             qemu_spin_unlock(&dest->jmp_lock);
1393             return;
1394         }
1395         pprev = &tb->jmp_list_next[n];
1396     }
1397     g_assert_not_reached();
1398 }
1399 
1400 /* reset the jump entry 'n' of a TB so that it is not chained to
1401    another TB */
1402 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1403 {
1404     uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1405     tb_set_jmp_target(tb, n, addr);
1406 }
1407 
1408 /* remove any jumps to the TB */
1409 static inline void tb_jmp_unlink(TranslationBlock *dest)
1410 {
1411     TranslationBlock *tb;
1412     int n;
1413 
1414     qemu_spin_lock(&dest->jmp_lock);
1415 
1416     TB_FOR_EACH_JMP(dest, tb, n) {
1417         tb_reset_jump(tb, n);
1418         qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1419         /* No need to clear the list entry; setting the dest ptr is enough */
1420     }
1421     dest->jmp_list_head = (uintptr_t)NULL;
1422 
1423     qemu_spin_unlock(&dest->jmp_lock);
1424 }
1425 
1426 /*
1427  * In user-mode, call with mmap_lock held.
1428  * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1429  * locks held.
1430  */
1431 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1432 {
1433     CPUState *cpu;
1434     PageDesc *p;
1435     uint32_t h;
1436     tb_page_addr_t phys_pc;
1437 
1438     assert_memory_lock();
1439 
1440     /* make sure no further incoming jumps will be chained to this TB */
1441     qemu_spin_lock(&tb->jmp_lock);
1442     qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1443     qemu_spin_unlock(&tb->jmp_lock);
1444 
1445     /* remove the TB from the hash list */
1446     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1447     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1448                      tb->trace_vcpu_dstate);
1449     if (!(tb->cflags & CF_NOCACHE) &&
1450         !qht_remove(&tb_ctx.htable, tb, h)) {
1451         return;
1452     }
1453 
1454     /* remove the TB from the page list */
1455     if (rm_from_page_list) {
1456         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1457         tb_page_remove(p, tb);
1458         invalidate_page_bitmap(p);
1459         if (tb->page_addr[1] != -1) {
1460             p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1461             tb_page_remove(p, tb);
1462             invalidate_page_bitmap(p);
1463         }
1464     }
1465 
1466     /* remove the TB from the hash list */
1467     h = tb_jmp_cache_hash_func(tb->pc);
1468     CPU_FOREACH(cpu) {
1469         if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1470             qatomic_set(&cpu->tb_jmp_cache[h], NULL);
1471         }
1472     }
1473 
1474     /* suppress this TB from the two jump lists */
1475     tb_remove_from_jmp_list(tb, 0);
1476     tb_remove_from_jmp_list(tb, 1);
1477 
1478     /* suppress any remaining jumps to this TB */
1479     tb_jmp_unlink(tb);
1480 
1481     qatomic_set(&tcg_ctx->tb_phys_invalidate_count,
1482                tcg_ctx->tb_phys_invalidate_count + 1);
1483 }
1484 
1485 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1486 {
1487     do_tb_phys_invalidate(tb, true);
1488 }
1489 
1490 /* invalidate one TB
1491  *
1492  * Called with mmap_lock held in user-mode.
1493  */
1494 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1495 {
1496     if (page_addr == -1 && tb->page_addr[0] != -1) {
1497         page_lock_tb(tb);
1498         do_tb_phys_invalidate(tb, true);
1499         page_unlock_tb(tb);
1500     } else {
1501         do_tb_phys_invalidate(tb, false);
1502     }
1503 }
1504 
1505 #ifdef CONFIG_SOFTMMU
1506 /* call with @p->lock held */
1507 static void build_page_bitmap(PageDesc *p)
1508 {
1509     int n, tb_start, tb_end;
1510     TranslationBlock *tb;
1511 
1512     assert_page_locked(p);
1513     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1514 
1515     PAGE_FOR_EACH_TB(p, tb, n) {
1516         /* NOTE: this is subtle as a TB may span two physical pages */
1517         if (n == 0) {
1518             /* NOTE: tb_end may be after the end of the page, but
1519                it is not a problem */
1520             tb_start = tb->pc & ~TARGET_PAGE_MASK;
1521             tb_end = tb_start + tb->size;
1522             if (tb_end > TARGET_PAGE_SIZE) {
1523                 tb_end = TARGET_PAGE_SIZE;
1524              }
1525         } else {
1526             tb_start = 0;
1527             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1528         }
1529         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1530     }
1531 }
1532 #endif
1533 
1534 /* add the tb in the target page and protect it if necessary
1535  *
1536  * Called with mmap_lock held for user-mode emulation.
1537  * Called with @p->lock held in !user-mode.
1538  */
1539 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1540                                unsigned int n, tb_page_addr_t page_addr)
1541 {
1542 #ifndef CONFIG_USER_ONLY
1543     bool page_already_protected;
1544 #endif
1545 
1546     assert_page_locked(p);
1547 
1548     tb->page_addr[n] = page_addr;
1549     tb->page_next[n] = p->first_tb;
1550 #ifndef CONFIG_USER_ONLY
1551     page_already_protected = p->first_tb != (uintptr_t)NULL;
1552 #endif
1553     p->first_tb = (uintptr_t)tb | n;
1554     invalidate_page_bitmap(p);
1555 
1556 #if defined(CONFIG_USER_ONLY)
1557     if (p->flags & PAGE_WRITE) {
1558         target_ulong addr;
1559         PageDesc *p2;
1560         int prot;
1561 
1562         /* force the host page as non writable (writes will have a
1563            page fault + mprotect overhead) */
1564         page_addr &= qemu_host_page_mask;
1565         prot = 0;
1566         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1567             addr += TARGET_PAGE_SIZE) {
1568 
1569             p2 = page_find(addr >> TARGET_PAGE_BITS);
1570             if (!p2) {
1571                 continue;
1572             }
1573             prot |= p2->flags;
1574             p2->flags &= ~PAGE_WRITE;
1575           }
1576         mprotect(g2h(page_addr), qemu_host_page_size,
1577                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1578         if (DEBUG_TB_INVALIDATE_GATE) {
1579             printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1580         }
1581     }
1582 #else
1583     /* if some code is already present, then the pages are already
1584        protected. So we handle the case where only the first TB is
1585        allocated in a physical page */
1586     if (!page_already_protected) {
1587         tlb_protect_code(page_addr);
1588     }
1589 #endif
1590 }
1591 
1592 /* add a new TB and link it to the physical page tables. phys_page2 is
1593  * (-1) to indicate that only one page contains the TB.
1594  *
1595  * Called with mmap_lock held for user-mode emulation.
1596  *
1597  * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1598  * Note that in !user-mode, another thread might have already added a TB
1599  * for the same block of guest code that @tb corresponds to. In that case,
1600  * the caller should discard the original @tb, and use instead the returned TB.
1601  */
1602 static TranslationBlock *
1603 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1604              tb_page_addr_t phys_page2)
1605 {
1606     PageDesc *p;
1607     PageDesc *p2 = NULL;
1608 
1609     assert_memory_lock();
1610 
1611     if (phys_pc == -1) {
1612         /*
1613          * If the TB is not associated with a physical RAM page then
1614          * it must be a temporary one-insn TB, and we have nothing to do
1615          * except fill in the page_addr[] fields.
1616          */
1617         assert(tb->cflags & CF_NOCACHE);
1618         tb->page_addr[0] = tb->page_addr[1] = -1;
1619         return tb;
1620     }
1621 
1622     /*
1623      * Add the TB to the page list, acquiring first the pages's locks.
1624      * We keep the locks held until after inserting the TB in the hash table,
1625      * so that if the insertion fails we know for sure that the TBs are still
1626      * in the page descriptors.
1627      * Note that inserting into the hash table first isn't an option, since
1628      * we can only insert TBs that are fully initialized.
1629      */
1630     page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1631     tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1632     if (p2) {
1633         tb_page_add(p2, tb, 1, phys_page2);
1634     } else {
1635         tb->page_addr[1] = -1;
1636     }
1637 
1638     if (!(tb->cflags & CF_NOCACHE)) {
1639         void *existing_tb = NULL;
1640         uint32_t h;
1641 
1642         /* add in the hash table */
1643         h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1644                          tb->trace_vcpu_dstate);
1645         qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1646 
1647         /* remove TB from the page(s) if we couldn't insert it */
1648         if (unlikely(existing_tb)) {
1649             tb_page_remove(p, tb);
1650             invalidate_page_bitmap(p);
1651             if (p2) {
1652                 tb_page_remove(p2, tb);
1653                 invalidate_page_bitmap(p2);
1654             }
1655             tb = existing_tb;
1656         }
1657     }
1658 
1659     if (p2 && p2 != p) {
1660         page_unlock(p2);
1661     }
1662     page_unlock(p);
1663 
1664 #ifdef CONFIG_USER_ONLY
1665     if (DEBUG_TB_CHECK_GATE) {
1666         tb_page_check();
1667     }
1668 #endif
1669     return tb;
1670 }
1671 
1672 /* Called with mmap_lock held for user mode emulation.  */
1673 TranslationBlock *tb_gen_code(CPUState *cpu,
1674                               target_ulong pc, target_ulong cs_base,
1675                               uint32_t flags, int cflags)
1676 {
1677     CPUArchState *env = cpu->env_ptr;
1678     TranslationBlock *tb, *existing_tb;
1679     tb_page_addr_t phys_pc, phys_page2;
1680     target_ulong virt_page2;
1681     tcg_insn_unit *gen_code_buf;
1682     int gen_code_size, search_size, max_insns;
1683 #ifdef CONFIG_PROFILER
1684     TCGProfile *prof = &tcg_ctx->prof;
1685     int64_t ti;
1686 #endif
1687 
1688     assert_memory_lock();
1689 
1690     phys_pc = get_page_addr_code(env, pc);
1691 
1692     if (phys_pc == -1) {
1693         /* Generate a temporary TB with 1 insn in it */
1694         cflags &= ~CF_COUNT_MASK;
1695         cflags |= CF_NOCACHE | 1;
1696     }
1697 
1698     cflags &= ~CF_CLUSTER_MASK;
1699     cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
1700 
1701     max_insns = cflags & CF_COUNT_MASK;
1702     if (max_insns == 0) {
1703         max_insns = CF_COUNT_MASK;
1704     }
1705     if (max_insns > TCG_MAX_INSNS) {
1706         max_insns = TCG_MAX_INSNS;
1707     }
1708     if (cpu->singlestep_enabled || singlestep) {
1709         max_insns = 1;
1710     }
1711 
1712  buffer_overflow:
1713     tb = tcg_tb_alloc(tcg_ctx);
1714     if (unlikely(!tb)) {
1715         /* flush must be done */
1716         tb_flush(cpu);
1717         mmap_unlock();
1718         /* Make the execution loop process the flush as soon as possible.  */
1719         cpu->exception_index = EXCP_INTERRUPT;
1720         cpu_loop_exit(cpu);
1721     }
1722 
1723     gen_code_buf = tcg_ctx->code_gen_ptr;
1724     tb->tc.ptr = gen_code_buf;
1725     tb->pc = pc;
1726     tb->cs_base = cs_base;
1727     tb->flags = flags;
1728     tb->cflags = cflags;
1729     tb->orig_tb = NULL;
1730     tb->trace_vcpu_dstate = *cpu->trace_dstate;
1731     tcg_ctx->tb_cflags = cflags;
1732  tb_overflow:
1733 
1734 #ifdef CONFIG_PROFILER
1735     /* includes aborted translations because of exceptions */
1736     qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1737     ti = profile_getclock();
1738 #endif
1739 
1740     tcg_func_start(tcg_ctx);
1741 
1742     tcg_ctx->cpu = env_cpu(env);
1743     gen_intermediate_code(cpu, tb, max_insns);
1744     tcg_ctx->cpu = NULL;
1745 
1746     trace_translate_block(tb, tb->pc, tb->tc.ptr);
1747 
1748     /* generate machine code */
1749     tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1750     tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1751     tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1752     if (TCG_TARGET_HAS_direct_jump) {
1753         tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1754         tcg_ctx->tb_jmp_target_addr = NULL;
1755     } else {
1756         tcg_ctx->tb_jmp_insn_offset = NULL;
1757         tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1758     }
1759 
1760 #ifdef CONFIG_PROFILER
1761     qatomic_set(&prof->tb_count, prof->tb_count + 1);
1762     qatomic_set(&prof->interm_time,
1763                 prof->interm_time + profile_getclock() - ti);
1764     ti = profile_getclock();
1765 #endif
1766 
1767     gen_code_size = tcg_gen_code(tcg_ctx, tb);
1768     if (unlikely(gen_code_size < 0)) {
1769         switch (gen_code_size) {
1770         case -1:
1771             /*
1772              * Overflow of code_gen_buffer, or the current slice of it.
1773              *
1774              * TODO: We don't need to re-do gen_intermediate_code, nor
1775              * should we re-do the tcg optimization currently hidden
1776              * inside tcg_gen_code.  All that should be required is to
1777              * flush the TBs, allocate a new TB, re-initialize it per
1778              * above, and re-do the actual code generation.
1779              */
1780             goto buffer_overflow;
1781 
1782         case -2:
1783             /*
1784              * The code generated for the TranslationBlock is too large.
1785              * The maximum size allowed by the unwind info is 64k.
1786              * There may be stricter constraints from relocations
1787              * in the tcg backend.
1788              *
1789              * Try again with half as many insns as we attempted this time.
1790              * If a single insn overflows, there's a bug somewhere...
1791              */
1792             max_insns = tb->icount;
1793             assert(max_insns > 1);
1794             max_insns /= 2;
1795             goto tb_overflow;
1796 
1797         default:
1798             g_assert_not_reached();
1799         }
1800     }
1801     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1802     if (unlikely(search_size < 0)) {
1803         goto buffer_overflow;
1804     }
1805     tb->tc.size = gen_code_size;
1806 
1807 #ifdef CONFIG_PROFILER
1808     qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1809     qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1810     qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1811     qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1812 #endif
1813 
1814 #ifdef DEBUG_DISAS
1815     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1816         qemu_log_in_addr_range(tb->pc)) {
1817         FILE *logfile = qemu_log_lock();
1818         int code_size, data_size = 0;
1819         size_t chunk_start;
1820         int insn = 0;
1821 
1822         if (tcg_ctx->data_gen_ptr) {
1823             code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1824             data_size = gen_code_size - code_size;
1825         } else {
1826             code_size = gen_code_size;
1827         }
1828 
1829         /* Dump header and the first instruction */
1830         qemu_log("OUT: [size=%d]\n", gen_code_size);
1831         qemu_log("  -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
1832                  tcg_ctx->gen_insn_data[insn][0]);
1833         chunk_start = tcg_ctx->gen_insn_end_off[insn];
1834         log_disas(tb->tc.ptr, chunk_start);
1835 
1836         /*
1837          * Dump each instruction chunk, wrapping up empty chunks into
1838          * the next instruction. The whole array is offset so the
1839          * first entry is the beginning of the 2nd instruction.
1840          */
1841         while (insn < tb->icount) {
1842             size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
1843             if (chunk_end > chunk_start) {
1844                 qemu_log("  -- guest addr 0x" TARGET_FMT_lx "\n",
1845                          tcg_ctx->gen_insn_data[insn][0]);
1846                 log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start);
1847                 chunk_start = chunk_end;
1848             }
1849             insn++;
1850         }
1851 
1852         if (chunk_start < code_size) {
1853             qemu_log("  -- tb slow paths + alignment\n");
1854             log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start);
1855         }
1856 
1857         /* Finally dump any data we may have after the block */
1858         if (data_size) {
1859             int i;
1860             qemu_log("  data: [size=%d]\n", data_size);
1861             for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1862                 if (sizeof(tcg_target_ulong) == 8) {
1863                     qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1864                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1865                              *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1866                 } else {
1867                     qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1868                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1869                              *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1870                 }
1871             }
1872         }
1873         qemu_log("\n");
1874         qemu_log_flush();
1875         qemu_log_unlock(logfile);
1876     }
1877 #endif
1878 
1879     qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
1880         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1881                  CODE_GEN_ALIGN));
1882 
1883     /* init jump list */
1884     qemu_spin_init(&tb->jmp_lock);
1885     tb->jmp_list_head = (uintptr_t)NULL;
1886     tb->jmp_list_next[0] = (uintptr_t)NULL;
1887     tb->jmp_list_next[1] = (uintptr_t)NULL;
1888     tb->jmp_dest[0] = (uintptr_t)NULL;
1889     tb->jmp_dest[1] = (uintptr_t)NULL;
1890 
1891     /* init original jump addresses which have been set during tcg_gen_code() */
1892     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1893         tb_reset_jump(tb, 0);
1894     }
1895     if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1896         tb_reset_jump(tb, 1);
1897     }
1898 
1899     /* check next page if needed */
1900     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1901     phys_page2 = -1;
1902     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1903         phys_page2 = get_page_addr_code(env, virt_page2);
1904     }
1905     /*
1906      * No explicit memory barrier is required -- tb_link_page() makes the
1907      * TB visible in a consistent state.
1908      */
1909     existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1910     /* if the TB already exists, discard what we just translated */
1911     if (unlikely(existing_tb != tb)) {
1912         uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1913 
1914         orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1915         qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1916         tb_destroy(tb);
1917         return existing_tb;
1918     }
1919     tcg_tb_insert(tb);
1920     return tb;
1921 }
1922 
1923 /*
1924  * @p must be non-NULL.
1925  * user-mode: call with mmap_lock held.
1926  * !user-mode: call with all @pages locked.
1927  */
1928 static void
1929 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1930                                       PageDesc *p, tb_page_addr_t start,
1931                                       tb_page_addr_t end,
1932                                       uintptr_t retaddr)
1933 {
1934     TranslationBlock *tb;
1935     tb_page_addr_t tb_start, tb_end;
1936     int n;
1937 #ifdef TARGET_HAS_PRECISE_SMC
1938     CPUState *cpu = current_cpu;
1939     CPUArchState *env = NULL;
1940     bool current_tb_not_found = retaddr != 0;
1941     bool current_tb_modified = false;
1942     TranslationBlock *current_tb = NULL;
1943     target_ulong current_pc = 0;
1944     target_ulong current_cs_base = 0;
1945     uint32_t current_flags = 0;
1946 #endif /* TARGET_HAS_PRECISE_SMC */
1947 
1948     assert_page_locked(p);
1949 
1950 #if defined(TARGET_HAS_PRECISE_SMC)
1951     if (cpu != NULL) {
1952         env = cpu->env_ptr;
1953     }
1954 #endif
1955 
1956     /* we remove all the TBs in the range [start, end[ */
1957     /* XXX: see if in some cases it could be faster to invalidate all
1958        the code */
1959     PAGE_FOR_EACH_TB(p, tb, n) {
1960         assert_page_locked(p);
1961         /* NOTE: this is subtle as a TB may span two physical pages */
1962         if (n == 0) {
1963             /* NOTE: tb_end may be after the end of the page, but
1964                it is not a problem */
1965             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1966             tb_end = tb_start + tb->size;
1967         } else {
1968             tb_start = tb->page_addr[1];
1969             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1970         }
1971         if (!(tb_end <= start || tb_start >= end)) {
1972 #ifdef TARGET_HAS_PRECISE_SMC
1973             if (current_tb_not_found) {
1974                 current_tb_not_found = false;
1975                 /* now we have a real cpu fault */
1976                 current_tb = tcg_tb_lookup(retaddr);
1977             }
1978             if (current_tb == tb &&
1979                 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1980                 /*
1981                  * If we are modifying the current TB, we must stop
1982                  * its execution. We could be more precise by checking
1983                  * that the modification is after the current PC, but it
1984                  * would require a specialized function to partially
1985                  * restore the CPU state.
1986                  */
1987                 current_tb_modified = true;
1988                 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
1989                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1990                                      &current_flags);
1991             }
1992 #endif /* TARGET_HAS_PRECISE_SMC */
1993             tb_phys_invalidate__locked(tb);
1994         }
1995     }
1996 #if !defined(CONFIG_USER_ONLY)
1997     /* if no code remaining, no need to continue to use slow writes */
1998     if (!p->first_tb) {
1999         invalidate_page_bitmap(p);
2000         tlb_unprotect_code(start);
2001     }
2002 #endif
2003 #ifdef TARGET_HAS_PRECISE_SMC
2004     if (current_tb_modified) {
2005         page_collection_unlock(pages);
2006         /* Force execution of one insn next time.  */
2007         cpu->cflags_next_tb = 1 | curr_cflags();
2008         mmap_unlock();
2009         cpu_loop_exit_noexc(cpu);
2010     }
2011 #endif
2012 }
2013 
2014 /*
2015  * Invalidate all TBs which intersect with the target physical address range
2016  * [start;end[. NOTE: start and end must refer to the *same* physical page.
2017  * 'is_cpu_write_access' should be true if called from a real cpu write
2018  * access: the virtual CPU will exit the current TB if code is modified inside
2019  * this TB.
2020  *
2021  * Called with mmap_lock held for user-mode emulation
2022  */
2023 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
2024 {
2025     struct page_collection *pages;
2026     PageDesc *p;
2027 
2028     assert_memory_lock();
2029 
2030     p = page_find(start >> TARGET_PAGE_BITS);
2031     if (p == NULL) {
2032         return;
2033     }
2034     pages = page_collection_lock(start, end);
2035     tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
2036     page_collection_unlock(pages);
2037 }
2038 
2039 /*
2040  * Invalidate all TBs which intersect with the target physical address range
2041  * [start;end[. NOTE: start and end may refer to *different* physical pages.
2042  * 'is_cpu_write_access' should be true if called from a real cpu write
2043  * access: the virtual CPU will exit the current TB if code is modified inside
2044  * this TB.
2045  *
2046  * Called with mmap_lock held for user-mode emulation.
2047  */
2048 #ifdef CONFIG_SOFTMMU
2049 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
2050 #else
2051 void tb_invalidate_phys_range(target_ulong start, target_ulong end)
2052 #endif
2053 {
2054     struct page_collection *pages;
2055     tb_page_addr_t next;
2056 
2057     assert_memory_lock();
2058 
2059     pages = page_collection_lock(start, end);
2060     for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2061          start < end;
2062          start = next, next += TARGET_PAGE_SIZE) {
2063         PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
2064         tb_page_addr_t bound = MIN(next, end);
2065 
2066         if (pd == NULL) {
2067             continue;
2068         }
2069         tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
2070     }
2071     page_collection_unlock(pages);
2072 }
2073 
2074 #ifdef CONFIG_SOFTMMU
2075 /* len must be <= 8 and start must be a multiple of len.
2076  * Called via softmmu_template.h when code areas are written to with
2077  * iothread mutex not held.
2078  *
2079  * Call with all @pages in the range [@start, @start + len[ locked.
2080  */
2081 void tb_invalidate_phys_page_fast(struct page_collection *pages,
2082                                   tb_page_addr_t start, int len,
2083                                   uintptr_t retaddr)
2084 {
2085     PageDesc *p;
2086 
2087     assert_memory_lock();
2088 
2089     p = page_find(start >> TARGET_PAGE_BITS);
2090     if (!p) {
2091         return;
2092     }
2093 
2094     assert_page_locked(p);
2095     if (!p->code_bitmap &&
2096         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2097         build_page_bitmap(p);
2098     }
2099     if (p->code_bitmap) {
2100         unsigned int nr;
2101         unsigned long b;
2102 
2103         nr = start & ~TARGET_PAGE_MASK;
2104         b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2105         if (b & ((1 << len) - 1)) {
2106             goto do_invalidate;
2107         }
2108     } else {
2109     do_invalidate:
2110         tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2111                                               retaddr);
2112     }
2113 }
2114 #else
2115 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2116  * host PC of the faulting store instruction that caused this invalidate.
2117  * Returns true if the caller needs to abort execution of the current
2118  * TB (because it was modified by this store and the guest CPU has
2119  * precise-SMC semantics).
2120  */
2121 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2122 {
2123     TranslationBlock *tb;
2124     PageDesc *p;
2125     int n;
2126 #ifdef TARGET_HAS_PRECISE_SMC
2127     TranslationBlock *current_tb = NULL;
2128     CPUState *cpu = current_cpu;
2129     CPUArchState *env = NULL;
2130     int current_tb_modified = 0;
2131     target_ulong current_pc = 0;
2132     target_ulong current_cs_base = 0;
2133     uint32_t current_flags = 0;
2134 #endif
2135 
2136     assert_memory_lock();
2137 
2138     addr &= TARGET_PAGE_MASK;
2139     p = page_find(addr >> TARGET_PAGE_BITS);
2140     if (!p) {
2141         return false;
2142     }
2143 
2144 #ifdef TARGET_HAS_PRECISE_SMC
2145     if (p->first_tb && pc != 0) {
2146         current_tb = tcg_tb_lookup(pc);
2147     }
2148     if (cpu != NULL) {
2149         env = cpu->env_ptr;
2150     }
2151 #endif
2152     assert_page_locked(p);
2153     PAGE_FOR_EACH_TB(p, tb, n) {
2154 #ifdef TARGET_HAS_PRECISE_SMC
2155         if (current_tb == tb &&
2156             (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2157                 /* If we are modifying the current TB, we must stop
2158                    its execution. We could be more precise by checking
2159                    that the modification is after the current PC, but it
2160                    would require a specialized function to partially
2161                    restore the CPU state */
2162 
2163             current_tb_modified = 1;
2164             cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2165             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2166                                  &current_flags);
2167         }
2168 #endif /* TARGET_HAS_PRECISE_SMC */
2169         tb_phys_invalidate(tb, addr);
2170     }
2171     p->first_tb = (uintptr_t)NULL;
2172 #ifdef TARGET_HAS_PRECISE_SMC
2173     if (current_tb_modified) {
2174         /* Force execution of one insn next time.  */
2175         cpu->cflags_next_tb = 1 | curr_cflags();
2176         return true;
2177     }
2178 #endif
2179 
2180     return false;
2181 }
2182 #endif
2183 
2184 /* user-mode: call with mmap_lock held */
2185 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
2186 {
2187     TranslationBlock *tb;
2188 
2189     assert_memory_lock();
2190 
2191     tb = tcg_tb_lookup(retaddr);
2192     if (tb) {
2193         /* We can use retranslation to find the PC.  */
2194         cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2195         tb_phys_invalidate(tb, -1);
2196     } else {
2197         /* The exception probably happened in a helper.  The CPU state should
2198            have been saved before calling it. Fetch the PC from there.  */
2199         CPUArchState *env = cpu->env_ptr;
2200         target_ulong pc, cs_base;
2201         tb_page_addr_t addr;
2202         uint32_t flags;
2203 
2204         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2205         addr = get_page_addr_code(env, pc);
2206         if (addr != -1) {
2207             tb_invalidate_phys_range(addr, addr + 1);
2208         }
2209     }
2210 }
2211 
2212 #ifndef CONFIG_USER_ONLY
2213 /* in deterministic execution mode, instructions doing device I/Os
2214  * must be at the end of the TB.
2215  *
2216  * Called by softmmu_template.h, with iothread mutex not held.
2217  */
2218 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2219 {
2220 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2221     CPUArchState *env = cpu->env_ptr;
2222 #endif
2223     TranslationBlock *tb;
2224     uint32_t n;
2225 
2226     tb = tcg_tb_lookup(retaddr);
2227     if (!tb) {
2228         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2229                   (void *)retaddr);
2230     }
2231     cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2232 
2233     /* On MIPS and SH, delay slot instructions can only be restarted if
2234        they were already the first instruction in the TB.  If this is not
2235        the first instruction in a TB then re-execute the preceding
2236        branch.  */
2237     n = 1;
2238 #if defined(TARGET_MIPS)
2239     if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2240         && env->active_tc.PC != tb->pc) {
2241         env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2242         cpu_neg(cpu)->icount_decr.u16.low++;
2243         env->hflags &= ~MIPS_HFLAG_BMASK;
2244         n = 2;
2245     }
2246 #elif defined(TARGET_SH4)
2247     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2248         && env->pc != tb->pc) {
2249         env->pc -= 2;
2250         cpu_neg(cpu)->icount_decr.u16.low++;
2251         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2252         n = 2;
2253     }
2254 #endif
2255 
2256     /* Generate a new TB executing the I/O insn.  */
2257     cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2258 
2259     if (tb_cflags(tb) & CF_NOCACHE) {
2260         if (tb->orig_tb) {
2261             /* Invalidate original TB if this TB was generated in
2262              * cpu_exec_nocache() */
2263             tb_phys_invalidate(tb->orig_tb, -1);
2264         }
2265         tcg_tb_remove(tb);
2266         tb_destroy(tb);
2267     }
2268 
2269     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2270      * the first in the TB) then we end up generating a whole new TB and
2271      *  repeating the fault, which is horribly inefficient.
2272      *  Better would be to execute just this insn uncached, or generate a
2273      *  second new TB.
2274      */
2275     cpu_loop_exit_noexc(cpu);
2276 }
2277 
2278 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2279 {
2280     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2281 
2282     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2283         qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2284     }
2285 }
2286 
2287 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2288 {
2289     /* Discard jump cache entries for any tb which might potentially
2290        overlap the flushed page.  */
2291     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2292     tb_jmp_cache_clear_page(cpu, addr);
2293 }
2294 
2295 static void print_qht_statistics(struct qht_stats hst)
2296 {
2297     uint32_t hgram_opts;
2298     size_t hgram_bins;
2299     char *hgram;
2300 
2301     if (!hst.head_buckets) {
2302         return;
2303     }
2304     qemu_printf("TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2305                 hst.used_head_buckets, hst.head_buckets,
2306                 (double)hst.used_head_buckets / hst.head_buckets * 100);
2307 
2308     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2309     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2310     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2311         hgram_opts |= QDIST_PR_NODECIMAL;
2312     }
2313     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2314     qemu_printf("TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2315                 qdist_avg(&hst.occupancy) * 100, hgram);
2316     g_free(hgram);
2317 
2318     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2319     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2320     if (hgram_bins > 10) {
2321         hgram_bins = 10;
2322     } else {
2323         hgram_bins = 0;
2324         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2325     }
2326     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2327     qemu_printf("TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2328                 qdist_avg(&hst.chain), hgram);
2329     g_free(hgram);
2330 }
2331 
2332 struct tb_tree_stats {
2333     size_t nb_tbs;
2334     size_t host_size;
2335     size_t target_size;
2336     size_t max_target_size;
2337     size_t direct_jmp_count;
2338     size_t direct_jmp2_count;
2339     size_t cross_page;
2340 };
2341 
2342 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2343 {
2344     const TranslationBlock *tb = value;
2345     struct tb_tree_stats *tst = data;
2346 
2347     tst->nb_tbs++;
2348     tst->host_size += tb->tc.size;
2349     tst->target_size += tb->size;
2350     if (tb->size > tst->max_target_size) {
2351         tst->max_target_size = tb->size;
2352     }
2353     if (tb->page_addr[1] != -1) {
2354         tst->cross_page++;
2355     }
2356     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2357         tst->direct_jmp_count++;
2358         if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2359             tst->direct_jmp2_count++;
2360         }
2361     }
2362     return false;
2363 }
2364 
2365 void dump_exec_info(void)
2366 {
2367     struct tb_tree_stats tst = {};
2368     struct qht_stats hst;
2369     size_t nb_tbs, flush_full, flush_part, flush_elide;
2370 
2371     tcg_tb_foreach(tb_tree_stats_iter, &tst);
2372     nb_tbs = tst.nb_tbs;
2373     /* XXX: avoid using doubles ? */
2374     qemu_printf("Translation buffer state:\n");
2375     /*
2376      * Report total code size including the padding and TB structs;
2377      * otherwise users might think "-tb-size" is not honoured.
2378      * For avg host size we use the precise numbers from tb_tree_stats though.
2379      */
2380     qemu_printf("gen code size       %zu/%zu\n",
2381                 tcg_code_size(), tcg_code_capacity());
2382     qemu_printf("TB count            %zu\n", nb_tbs);
2383     qemu_printf("TB avg target size  %zu max=%zu bytes\n",
2384                 nb_tbs ? tst.target_size / nb_tbs : 0,
2385                 tst.max_target_size);
2386     qemu_printf("TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2387                 nb_tbs ? tst.host_size / nb_tbs : 0,
2388                 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2389     qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2390                 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2391     qemu_printf("direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2392                 tst.direct_jmp_count,
2393                 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2394                 tst.direct_jmp2_count,
2395                 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2396 
2397     qht_statistics_init(&tb_ctx.htable, &hst);
2398     print_qht_statistics(hst);
2399     qht_statistics_destroy(&hst);
2400 
2401     qemu_printf("\nStatistics:\n");
2402     qemu_printf("TB flush count      %u\n",
2403                 qatomic_read(&tb_ctx.tb_flush_count));
2404     qemu_printf("TB invalidate count %zu\n",
2405                 tcg_tb_phys_invalidate_count());
2406 
2407     tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2408     qemu_printf("TLB full flushes    %zu\n", flush_full);
2409     qemu_printf("TLB partial flushes %zu\n", flush_part);
2410     qemu_printf("TLB elided flushes  %zu\n", flush_elide);
2411     tcg_dump_info();
2412 }
2413 
2414 void dump_opcount_info(void)
2415 {
2416     tcg_dump_op_count();
2417 }
2418 
2419 #else /* CONFIG_USER_ONLY */
2420 
2421 void cpu_interrupt(CPUState *cpu, int mask)
2422 {
2423     g_assert(qemu_mutex_iothread_locked());
2424     cpu->interrupt_request |= mask;
2425     qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2426 }
2427 
2428 /*
2429  * Walks guest process memory "regions" one by one
2430  * and calls callback function 'fn' for each region.
2431  */
2432 struct walk_memory_regions_data {
2433     walk_memory_regions_fn fn;
2434     void *priv;
2435     target_ulong start;
2436     int prot;
2437 };
2438 
2439 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2440                                    target_ulong end, int new_prot)
2441 {
2442     if (data->start != -1u) {
2443         int rc = data->fn(data->priv, data->start, end, data->prot);
2444         if (rc != 0) {
2445             return rc;
2446         }
2447     }
2448 
2449     data->start = (new_prot ? end : -1u);
2450     data->prot = new_prot;
2451 
2452     return 0;
2453 }
2454 
2455 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2456                                  target_ulong base, int level, void **lp)
2457 {
2458     target_ulong pa;
2459     int i, rc;
2460 
2461     if (*lp == NULL) {
2462         return walk_memory_regions_end(data, base, 0);
2463     }
2464 
2465     if (level == 0) {
2466         PageDesc *pd = *lp;
2467 
2468         for (i = 0; i < V_L2_SIZE; ++i) {
2469             int prot = pd[i].flags;
2470 
2471             pa = base | (i << TARGET_PAGE_BITS);
2472             if (prot != data->prot) {
2473                 rc = walk_memory_regions_end(data, pa, prot);
2474                 if (rc != 0) {
2475                     return rc;
2476                 }
2477             }
2478         }
2479     } else {
2480         void **pp = *lp;
2481 
2482         for (i = 0; i < V_L2_SIZE; ++i) {
2483             pa = base | ((target_ulong)i <<
2484                 (TARGET_PAGE_BITS + V_L2_BITS * level));
2485             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2486             if (rc != 0) {
2487                 return rc;
2488             }
2489         }
2490     }
2491 
2492     return 0;
2493 }
2494 
2495 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2496 {
2497     struct walk_memory_regions_data data;
2498     uintptr_t i, l1_sz = v_l1_size;
2499 
2500     data.fn = fn;
2501     data.priv = priv;
2502     data.start = -1u;
2503     data.prot = 0;
2504 
2505     for (i = 0; i < l1_sz; i++) {
2506         target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2507         int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2508         if (rc != 0) {
2509             return rc;
2510         }
2511     }
2512 
2513     return walk_memory_regions_end(&data, 0, 0);
2514 }
2515 
2516 static int dump_region(void *priv, target_ulong start,
2517     target_ulong end, unsigned long prot)
2518 {
2519     FILE *f = (FILE *)priv;
2520 
2521     (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2522         " "TARGET_FMT_lx" %c%c%c\n",
2523         start, end, end - start,
2524         ((prot & PAGE_READ) ? 'r' : '-'),
2525         ((prot & PAGE_WRITE) ? 'w' : '-'),
2526         ((prot & PAGE_EXEC) ? 'x' : '-'));
2527 
2528     return 0;
2529 }
2530 
2531 /* dump memory mappings */
2532 void page_dump(FILE *f)
2533 {
2534     const int length = sizeof(target_ulong) * 2;
2535     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2536             length, "start", length, "end", length, "size", "prot");
2537     walk_memory_regions(f, dump_region);
2538 }
2539 
2540 int page_get_flags(target_ulong address)
2541 {
2542     PageDesc *p;
2543 
2544     p = page_find(address >> TARGET_PAGE_BITS);
2545     if (!p) {
2546         return 0;
2547     }
2548     return p->flags;
2549 }
2550 
2551 /* Modify the flags of a page and invalidate the code if necessary.
2552    The flag PAGE_WRITE_ORG is positioned automatically depending
2553    on PAGE_WRITE.  The mmap_lock should already be held.  */
2554 void page_set_flags(target_ulong start, target_ulong end, int flags)
2555 {
2556     target_ulong addr, len;
2557 
2558     /* This function should never be called with addresses outside the
2559        guest address space.  If this assert fires, it probably indicates
2560        a missing call to h2g_valid.  */
2561     assert(end - 1 <= GUEST_ADDR_MAX);
2562     assert(start < end);
2563     assert_memory_lock();
2564 
2565     start = start & TARGET_PAGE_MASK;
2566     end = TARGET_PAGE_ALIGN(end);
2567 
2568     if (flags & PAGE_WRITE) {
2569         flags |= PAGE_WRITE_ORG;
2570     }
2571 
2572     for (addr = start, len = end - start;
2573          len != 0;
2574          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2575         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2576 
2577         /* If the write protection bit is set, then we invalidate
2578            the code inside.  */
2579         if (!(p->flags & PAGE_WRITE) &&
2580             (flags & PAGE_WRITE) &&
2581             p->first_tb) {
2582             tb_invalidate_phys_page(addr, 0);
2583         }
2584         p->flags = flags;
2585     }
2586 }
2587 
2588 int page_check_range(target_ulong start, target_ulong len, int flags)
2589 {
2590     PageDesc *p;
2591     target_ulong end;
2592     target_ulong addr;
2593 
2594     /* This function should never be called with addresses outside the
2595        guest address space.  If this assert fires, it probably indicates
2596        a missing call to h2g_valid.  */
2597     if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) {
2598         assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2599     }
2600 
2601     if (len == 0) {
2602         return 0;
2603     }
2604     if (start + len - 1 < start) {
2605         /* We've wrapped around.  */
2606         return -1;
2607     }
2608 
2609     /* must do before we loose bits in the next step */
2610     end = TARGET_PAGE_ALIGN(start + len);
2611     start = start & TARGET_PAGE_MASK;
2612 
2613     for (addr = start, len = end - start;
2614          len != 0;
2615          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2616         p = page_find(addr >> TARGET_PAGE_BITS);
2617         if (!p) {
2618             return -1;
2619         }
2620         if (!(p->flags & PAGE_VALID)) {
2621             return -1;
2622         }
2623 
2624         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2625             return -1;
2626         }
2627         if (flags & PAGE_WRITE) {
2628             if (!(p->flags & PAGE_WRITE_ORG)) {
2629                 return -1;
2630             }
2631             /* unprotect the page if it was put read-only because it
2632                contains translated code */
2633             if (!(p->flags & PAGE_WRITE)) {
2634                 if (!page_unprotect(addr, 0)) {
2635                     return -1;
2636                 }
2637             }
2638         }
2639     }
2640     return 0;
2641 }
2642 
2643 /* called from signal handler: invalidate the code and unprotect the
2644  * page. Return 0 if the fault was not handled, 1 if it was handled,
2645  * and 2 if it was handled but the caller must cause the TB to be
2646  * immediately exited. (We can only return 2 if the 'pc' argument is
2647  * non-zero.)
2648  */
2649 int page_unprotect(target_ulong address, uintptr_t pc)
2650 {
2651     unsigned int prot;
2652     bool current_tb_invalidated;
2653     PageDesc *p;
2654     target_ulong host_start, host_end, addr;
2655 
2656     /* Technically this isn't safe inside a signal handler.  However we
2657        know this only ever happens in a synchronous SEGV handler, so in
2658        practice it seems to be ok.  */
2659     mmap_lock();
2660 
2661     p = page_find(address >> TARGET_PAGE_BITS);
2662     if (!p) {
2663         mmap_unlock();
2664         return 0;
2665     }
2666 
2667     /* if the page was really writable, then we change its
2668        protection back to writable */
2669     if (p->flags & PAGE_WRITE_ORG) {
2670         current_tb_invalidated = false;
2671         if (p->flags & PAGE_WRITE) {
2672             /* If the page is actually marked WRITE then assume this is because
2673              * this thread raced with another one which got here first and
2674              * set the page to PAGE_WRITE and did the TB invalidate for us.
2675              */
2676 #ifdef TARGET_HAS_PRECISE_SMC
2677             TranslationBlock *current_tb = tcg_tb_lookup(pc);
2678             if (current_tb) {
2679                 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2680             }
2681 #endif
2682         } else {
2683             host_start = address & qemu_host_page_mask;
2684             host_end = host_start + qemu_host_page_size;
2685 
2686             prot = 0;
2687             for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2688                 p = page_find(addr >> TARGET_PAGE_BITS);
2689                 p->flags |= PAGE_WRITE;
2690                 prot |= p->flags;
2691 
2692                 /* and since the content will be modified, we must invalidate
2693                    the corresponding translated code. */
2694                 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2695 #ifdef CONFIG_USER_ONLY
2696                 if (DEBUG_TB_CHECK_GATE) {
2697                     tb_invalidate_check(addr);
2698                 }
2699 #endif
2700             }
2701             mprotect((void *)g2h(host_start), qemu_host_page_size,
2702                      prot & PAGE_BITS);
2703         }
2704         mmap_unlock();
2705         /* If current TB was invalidated return to main loop */
2706         return current_tb_invalidated ? 2 : 1;
2707     }
2708     mmap_unlock();
2709     return 0;
2710 }
2711 #endif /* CONFIG_USER_ONLY */
2712 
2713 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2714 void tcg_flush_softmmu_tlb(CPUState *cs)
2715 {
2716 #ifdef CONFIG_SOFTMMU
2717     tlb_flush(cs);
2718 #endif
2719 }
2720