xref: /openbmc/qemu/accel/tcg/translate-all.c (revision faa9372c)
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifdef _WIN32
20 #include <windows.h>
21 #endif
22 #include "qemu/osdep.h"
23 
24 
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
27 #include "cpu.h"
28 #include "trace.h"
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
31 #include "tcg.h"
32 #if defined(CONFIG_USER_ONLY)
33 #include "qemu.h"
34 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
35 #include <sys/param.h>
36 #if __FreeBSD_version >= 700104
37 #define HAVE_KINFO_GETVMMAP
38 #define sigqueue sigqueue_freebsd  /* avoid redefinition */
39 #include <sys/proc.h>
40 #include <machine/profile.h>
41 #define _KERNEL
42 #include <sys/user.h>
43 #undef _KERNEL
44 #undef sigqueue
45 #include <libutil.h>
46 #endif
47 #endif
48 #else
49 #include "exec/address-spaces.h"
50 #endif
51 
52 #include "exec/cputlb.h"
53 #include "exec/tb-hash.h"
54 #include "translate-all.h"
55 #include "qemu/bitmap.h"
56 #include "qemu/error-report.h"
57 #include "qemu/timer.h"
58 #include "qemu/main-loop.h"
59 #include "exec/log.h"
60 #include "sysemu/cpus.h"
61 
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
66 
67 #ifdef DEBUG_TB_INVALIDATE
68 #define DEBUG_TB_INVALIDATE_GATE 1
69 #else
70 #define DEBUG_TB_INVALIDATE_GATE 0
71 #endif
72 
73 #ifdef DEBUG_TB_FLUSH
74 #define DEBUG_TB_FLUSH_GATE 1
75 #else
76 #define DEBUG_TB_FLUSH_GATE 0
77 #endif
78 
79 #if !defined(CONFIG_USER_ONLY)
80 /* TB consistency checks only implemented for usermode emulation.  */
81 #undef DEBUG_TB_CHECK
82 #endif
83 
84 #ifdef DEBUG_TB_CHECK
85 #define DEBUG_TB_CHECK_GATE 1
86 #else
87 #define DEBUG_TB_CHECK_GATE 0
88 #endif
89 
90 /* Access to the various translations structures need to be serialised via locks
91  * for consistency. This is automatic for SoftMMU based system
92  * emulation due to its single threaded nature. In user-mode emulation
93  * access to the memory related structures are protected with the
94  * mmap_lock.
95  */
96 #ifdef CONFIG_SOFTMMU
97 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
98 #else
99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
100 #endif
101 
102 #define SMC_BITMAP_USE_THRESHOLD 10
103 
104 typedef struct PageDesc {
105     /* list of TBs intersecting this ram page */
106     uintptr_t first_tb;
107 #ifdef CONFIG_SOFTMMU
108     /* in order to optimize self modifying code, we count the number
109        of lookups we do to a given page to use a bitmap */
110     unsigned long *code_bitmap;
111     unsigned int code_write_count;
112 #else
113     unsigned long flags;
114 #endif
115 #ifndef CONFIG_USER_ONLY
116     QemuSpin lock;
117 #endif
118 } PageDesc;
119 
120 /**
121  * struct page_entry - page descriptor entry
122  * @pd:     pointer to the &struct PageDesc of the page this entry represents
123  * @index:  page index of the page
124  * @locked: whether the page is locked
125  *
126  * This struct helps us keep track of the locked state of a page, without
127  * bloating &struct PageDesc.
128  *
129  * A page lock protects accesses to all fields of &struct PageDesc.
130  *
131  * See also: &struct page_collection.
132  */
133 struct page_entry {
134     PageDesc *pd;
135     tb_page_addr_t index;
136     bool locked;
137 };
138 
139 /**
140  * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
141  * @tree:   Binary search tree (BST) of the pages, with key == page index
142  * @max:    Pointer to the page in @tree with the highest page index
143  *
144  * To avoid deadlock we lock pages in ascending order of page index.
145  * When operating on a set of pages, we need to keep track of them so that
146  * we can lock them in order and also unlock them later. For this we collect
147  * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
148  * @tree implementation we use does not provide an O(1) operation to obtain the
149  * highest-ranked element, we use @max to keep track of the inserted page
150  * with the highest index. This is valuable because if a page is not in
151  * the tree and its index is higher than @max's, then we can lock it
152  * without breaking the locking order rule.
153  *
154  * Note on naming: 'struct page_set' would be shorter, but we already have a few
155  * page_set_*() helpers, so page_collection is used instead to avoid confusion.
156  *
157  * See also: page_collection_lock().
158  */
159 struct page_collection {
160     GTree *tree;
161     struct page_entry *max;
162 };
163 
164 /* list iterators for lists of tagged pointers in TranslationBlock */
165 #define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
166     for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
167          tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
168              tb = (TranslationBlock *)((uintptr_t)tb & ~1))
169 
170 #define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
171     TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
172 
173 /* In system mode we want L1_MAP to be based on ram offsets,
174    while in user mode we want it to be based on virtual addresses.  */
175 #if !defined(CONFIG_USER_ONLY)
176 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
177 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
178 #else
179 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
180 #endif
181 #else
182 # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
183 #endif
184 
185 /* Size of the L2 (and L3, etc) page tables.  */
186 #define V_L2_BITS 10
187 #define V_L2_SIZE (1 << V_L2_BITS)
188 
189 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
190 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
191                   sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
192                   * BITS_PER_BYTE);
193 
194 /*
195  * L1 Mapping properties
196  */
197 static int v_l1_size;
198 static int v_l1_shift;
199 static int v_l2_levels;
200 
201 /* The bottom level has pointers to PageDesc, and is indexed by
202  * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
203  */
204 #define V_L1_MIN_BITS 4
205 #define V_L1_MAX_BITS (V_L2_BITS + 3)
206 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
207 
208 static void *l1_map[V_L1_MAX_SIZE];
209 
210 /* code generation context */
211 TCGContext tcg_init_ctx;
212 __thread TCGContext *tcg_ctx;
213 TBContext tb_ctx;
214 bool parallel_cpus;
215 
216 /* translation block context */
217 static __thread int have_tb_lock;
218 
219 static void page_table_config_init(void)
220 {
221     uint32_t v_l1_bits;
222 
223     assert(TARGET_PAGE_BITS);
224     /* The bits remaining after N lower levels of page tables.  */
225     v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
226     if (v_l1_bits < V_L1_MIN_BITS) {
227         v_l1_bits += V_L2_BITS;
228     }
229 
230     v_l1_size = 1 << v_l1_bits;
231     v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
232     v_l2_levels = v_l1_shift / V_L2_BITS - 1;
233 
234     assert(v_l1_bits <= V_L1_MAX_BITS);
235     assert(v_l1_shift % V_L2_BITS == 0);
236     assert(v_l2_levels >= 0);
237 }
238 
239 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
240 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
241 
242 void tb_lock(void)
243 {
244     assert_tb_unlocked();
245     qemu_mutex_lock(&tb_ctx.tb_lock);
246     have_tb_lock++;
247 }
248 
249 void tb_unlock(void)
250 {
251     assert_tb_locked();
252     have_tb_lock--;
253     qemu_mutex_unlock(&tb_ctx.tb_lock);
254 }
255 
256 void tb_lock_reset(void)
257 {
258     if (have_tb_lock) {
259         qemu_mutex_unlock(&tb_ctx.tb_lock);
260         have_tb_lock = 0;
261     }
262 }
263 
264 void cpu_gen_init(void)
265 {
266     tcg_context_init(&tcg_init_ctx);
267 }
268 
269 /* Encode VAL as a signed leb128 sequence at P.
270    Return P incremented past the encoded value.  */
271 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
272 {
273     int more, byte;
274 
275     do {
276         byte = val & 0x7f;
277         val >>= 7;
278         more = !((val == 0 && (byte & 0x40) == 0)
279                  || (val == -1 && (byte & 0x40) != 0));
280         if (more) {
281             byte |= 0x80;
282         }
283         *p++ = byte;
284     } while (more);
285 
286     return p;
287 }
288 
289 /* Decode a signed leb128 sequence at *PP; increment *PP past the
290    decoded value.  Return the decoded value.  */
291 static target_long decode_sleb128(uint8_t **pp)
292 {
293     uint8_t *p = *pp;
294     target_long val = 0;
295     int byte, shift = 0;
296 
297     do {
298         byte = *p++;
299         val |= (target_ulong)(byte & 0x7f) << shift;
300         shift += 7;
301     } while (byte & 0x80);
302     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
303         val |= -(target_ulong)1 << shift;
304     }
305 
306     *pp = p;
307     return val;
308 }
309 
310 /* Encode the data collected about the instructions while compiling TB.
311    Place the data at BLOCK, and return the number of bytes consumed.
312 
313    The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
314    which come from the target's insn_start data, followed by a uintptr_t
315    which comes from the host pc of the end of the code implementing the insn.
316 
317    Each line of the table is encoded as sleb128 deltas from the previous
318    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
319    That is, the first column is seeded with the guest pc, the last column
320    with the host pc, and the middle columns with zeros.  */
321 
322 static int encode_search(TranslationBlock *tb, uint8_t *block)
323 {
324     uint8_t *highwater = tcg_ctx->code_gen_highwater;
325     uint8_t *p = block;
326     int i, j, n;
327 
328     for (i = 0, n = tb->icount; i < n; ++i) {
329         target_ulong prev;
330 
331         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
332             if (i == 0) {
333                 prev = (j == 0 ? tb->pc : 0);
334             } else {
335                 prev = tcg_ctx->gen_insn_data[i - 1][j];
336             }
337             p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
338         }
339         prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
340         p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
341 
342         /* Test for (pending) buffer overflow.  The assumption is that any
343            one row beginning below the high water mark cannot overrun
344            the buffer completely.  Thus we can test for overflow after
345            encoding a row without having to check during encoding.  */
346         if (unlikely(p > highwater)) {
347             return -1;
348         }
349     }
350 
351     return p - block;
352 }
353 
354 /* The cpu state corresponding to 'searched_pc' is restored.
355  * Called with tb_lock held.
356  * When reset_icount is true, current TB will be interrupted and
357  * icount should be recalculated.
358  */
359 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
360                                      uintptr_t searched_pc, bool reset_icount)
361 {
362     target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
363     uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
364     CPUArchState *env = cpu->env_ptr;
365     uint8_t *p = tb->tc.ptr + tb->tc.size;
366     int i, j, num_insns = tb->icount;
367 #ifdef CONFIG_PROFILER
368     TCGProfile *prof = &tcg_ctx->prof;
369     int64_t ti = profile_getclock();
370 #endif
371 
372     searched_pc -= GETPC_ADJ;
373 
374     if (searched_pc < host_pc) {
375         return -1;
376     }
377 
378     /* Reconstruct the stored insn data while looking for the point at
379        which the end of the insn exceeds the searched_pc.  */
380     for (i = 0; i < num_insns; ++i) {
381         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
382             data[j] += decode_sleb128(&p);
383         }
384         host_pc += decode_sleb128(&p);
385         if (host_pc > searched_pc) {
386             goto found;
387         }
388     }
389     return -1;
390 
391  found:
392     if (reset_icount && (tb->cflags & CF_USE_ICOUNT)) {
393         assert(use_icount);
394         /* Reset the cycle counter to the start of the block
395            and shift if to the number of actually executed instructions */
396         cpu->icount_decr.u16.low += num_insns - i;
397     }
398     restore_state_to_opc(env, tb, data);
399 
400 #ifdef CONFIG_PROFILER
401     atomic_set(&prof->restore_time,
402                 prof->restore_time + profile_getclock() - ti);
403     atomic_set(&prof->restore_count, prof->restore_count + 1);
404 #endif
405     return 0;
406 }
407 
408 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
409 {
410     TranslationBlock *tb;
411     bool r = false;
412     uintptr_t check_offset;
413 
414     /* The host_pc has to be in the region of current code buffer. If
415      * it is not we will not be able to resolve it here. The two cases
416      * where host_pc will not be correct are:
417      *
418      *  - fault during translation (instruction fetch)
419      *  - fault from helper (not using GETPC() macro)
420      *
421      * Either way we need return early to avoid blowing up on a
422      * recursive tb_lock() as we can't resolve it here.
423      *
424      * We are using unsigned arithmetic so if host_pc <
425      * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
426      * above the code_gen_buffer_size
427      */
428     check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
429 
430     if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
431         tb_lock();
432         tb = tcg_tb_lookup(host_pc);
433         if (tb) {
434             cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
435             if (tb->cflags & CF_NOCACHE) {
436                 /* one-shot translation, invalidate it immediately */
437                 tb_phys_invalidate(tb, -1);
438                 tcg_tb_remove(tb);
439             }
440             r = true;
441         }
442         tb_unlock();
443     }
444 
445     return r;
446 }
447 
448 static void page_init(void)
449 {
450     page_size_init();
451     page_table_config_init();
452 
453 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
454     {
455 #ifdef HAVE_KINFO_GETVMMAP
456         struct kinfo_vmentry *freep;
457         int i, cnt;
458 
459         freep = kinfo_getvmmap(getpid(), &cnt);
460         if (freep) {
461             mmap_lock();
462             for (i = 0; i < cnt; i++) {
463                 unsigned long startaddr, endaddr;
464 
465                 startaddr = freep[i].kve_start;
466                 endaddr = freep[i].kve_end;
467                 if (h2g_valid(startaddr)) {
468                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
469 
470                     if (h2g_valid(endaddr)) {
471                         endaddr = h2g(endaddr);
472                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
473                     } else {
474 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
475                         endaddr = ~0ul;
476                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
477 #endif
478                     }
479                 }
480             }
481             free(freep);
482             mmap_unlock();
483         }
484 #else
485         FILE *f;
486 
487         last_brk = (unsigned long)sbrk(0);
488 
489         f = fopen("/compat/linux/proc/self/maps", "r");
490         if (f) {
491             mmap_lock();
492 
493             do {
494                 unsigned long startaddr, endaddr;
495                 int n;
496 
497                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
498 
499                 if (n == 2 && h2g_valid(startaddr)) {
500                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
501 
502                     if (h2g_valid(endaddr)) {
503                         endaddr = h2g(endaddr);
504                     } else {
505                         endaddr = ~0ul;
506                     }
507                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
508                 }
509             } while (!feof(f));
510 
511             fclose(f);
512             mmap_unlock();
513         }
514 #endif
515     }
516 #endif
517 }
518 
519 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
520 {
521     PageDesc *pd;
522     void **lp;
523     int i;
524 
525     /* Level 1.  Always allocated.  */
526     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
527 
528     /* Level 2..N-1.  */
529     for (i = v_l2_levels; i > 0; i--) {
530         void **p = atomic_rcu_read(lp);
531 
532         if (p == NULL) {
533             void *existing;
534 
535             if (!alloc) {
536                 return NULL;
537             }
538             p = g_new0(void *, V_L2_SIZE);
539             existing = atomic_cmpxchg(lp, NULL, p);
540             if (unlikely(existing)) {
541                 g_free(p);
542                 p = existing;
543             }
544         }
545 
546         lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
547     }
548 
549     pd = atomic_rcu_read(lp);
550     if (pd == NULL) {
551         void *existing;
552 
553         if (!alloc) {
554             return NULL;
555         }
556         pd = g_new0(PageDesc, V_L2_SIZE);
557 #ifndef CONFIG_USER_ONLY
558         {
559             int i;
560 
561             for (i = 0; i < V_L2_SIZE; i++) {
562                 qemu_spin_init(&pd[i].lock);
563             }
564         }
565 #endif
566         existing = atomic_cmpxchg(lp, NULL, pd);
567         if (unlikely(existing)) {
568             g_free(pd);
569             pd = existing;
570         }
571     }
572 
573     return pd + (index & (V_L2_SIZE - 1));
574 }
575 
576 static inline PageDesc *page_find(tb_page_addr_t index)
577 {
578     return page_find_alloc(index, 0);
579 }
580 
581 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
582                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
583 
584 /* In user-mode page locks aren't used; mmap_lock is enough */
585 #ifdef CONFIG_USER_ONLY
586 
587 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
588 
589 static inline void page_lock(PageDesc *pd)
590 { }
591 
592 static inline void page_unlock(PageDesc *pd)
593 { }
594 
595 static inline void page_lock_tb(const TranslationBlock *tb)
596 { }
597 
598 static inline void page_unlock_tb(const TranslationBlock *tb)
599 { }
600 
601 struct page_collection *
602 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
603 {
604     return NULL;
605 }
606 
607 void page_collection_unlock(struct page_collection *set)
608 { }
609 #else /* !CONFIG_USER_ONLY */
610 
611 #ifdef CONFIG_DEBUG_TCG
612 
613 static __thread GHashTable *ht_pages_locked_debug;
614 
615 static void ht_pages_locked_debug_init(void)
616 {
617     if (ht_pages_locked_debug) {
618         return;
619     }
620     ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
621 }
622 
623 static bool page_is_locked(const PageDesc *pd)
624 {
625     PageDesc *found;
626 
627     ht_pages_locked_debug_init();
628     found = g_hash_table_lookup(ht_pages_locked_debug, pd);
629     return !!found;
630 }
631 
632 static void page_lock__debug(PageDesc *pd)
633 {
634     ht_pages_locked_debug_init();
635     g_assert(!page_is_locked(pd));
636     g_hash_table_insert(ht_pages_locked_debug, pd, pd);
637 }
638 
639 static void page_unlock__debug(const PageDesc *pd)
640 {
641     bool removed;
642 
643     ht_pages_locked_debug_init();
644     g_assert(page_is_locked(pd));
645     removed = g_hash_table_remove(ht_pages_locked_debug, pd);
646     g_assert(removed);
647 }
648 
649 static void
650 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
651 {
652     if (unlikely(!page_is_locked(pd))) {
653         error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
654                      pd, file, line);
655         abort();
656     }
657 }
658 
659 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
660 
661 void assert_no_pages_locked(void)
662 {
663     ht_pages_locked_debug_init();
664     g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
665 }
666 
667 #else /* !CONFIG_DEBUG_TCG */
668 
669 #define assert_page_locked(pd)
670 
671 static inline void page_lock__debug(const PageDesc *pd)
672 {
673 }
674 
675 static inline void page_unlock__debug(const PageDesc *pd)
676 {
677 }
678 
679 #endif /* CONFIG_DEBUG_TCG */
680 
681 static inline void page_lock(PageDesc *pd)
682 {
683     page_lock__debug(pd);
684     qemu_spin_lock(&pd->lock);
685 }
686 
687 static inline void page_unlock(PageDesc *pd)
688 {
689     qemu_spin_unlock(&pd->lock);
690     page_unlock__debug(pd);
691 }
692 
693 /* lock the page(s) of a TB in the correct acquisition order */
694 static inline void page_lock_tb(const TranslationBlock *tb)
695 {
696     page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
697 }
698 
699 static inline void page_unlock_tb(const TranslationBlock *tb)
700 {
701     page_unlock(page_find(tb->page_addr[0] >> TARGET_PAGE_BITS));
702     if (unlikely(tb->page_addr[1] != -1)) {
703         page_unlock(page_find(tb->page_addr[1] >> TARGET_PAGE_BITS));
704     }
705 }
706 
707 static inline struct page_entry *
708 page_entry_new(PageDesc *pd, tb_page_addr_t index)
709 {
710     struct page_entry *pe = g_malloc(sizeof(*pe));
711 
712     pe->index = index;
713     pe->pd = pd;
714     pe->locked = false;
715     return pe;
716 }
717 
718 static void page_entry_destroy(gpointer p)
719 {
720     struct page_entry *pe = p;
721 
722     g_assert(pe->locked);
723     page_unlock(pe->pd);
724     g_free(pe);
725 }
726 
727 /* returns false on success */
728 static bool page_entry_trylock(struct page_entry *pe)
729 {
730     bool busy;
731 
732     busy = qemu_spin_trylock(&pe->pd->lock);
733     if (!busy) {
734         g_assert(!pe->locked);
735         pe->locked = true;
736         page_lock__debug(pe->pd);
737     }
738     return busy;
739 }
740 
741 static void do_page_entry_lock(struct page_entry *pe)
742 {
743     page_lock(pe->pd);
744     g_assert(!pe->locked);
745     pe->locked = true;
746 }
747 
748 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
749 {
750     struct page_entry *pe = value;
751 
752     do_page_entry_lock(pe);
753     return FALSE;
754 }
755 
756 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
757 {
758     struct page_entry *pe = value;
759 
760     if (pe->locked) {
761         pe->locked = false;
762         page_unlock(pe->pd);
763     }
764     return FALSE;
765 }
766 
767 /*
768  * Trylock a page, and if successful, add the page to a collection.
769  * Returns true ("busy") if the page could not be locked; false otherwise.
770  */
771 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
772 {
773     tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
774     struct page_entry *pe;
775     PageDesc *pd;
776 
777     pe = g_tree_lookup(set->tree, &index);
778     if (pe) {
779         return false;
780     }
781 
782     pd = page_find(index);
783     if (pd == NULL) {
784         return false;
785     }
786 
787     pe = page_entry_new(pd, index);
788     g_tree_insert(set->tree, &pe->index, pe);
789 
790     /*
791      * If this is either (1) the first insertion or (2) a page whose index
792      * is higher than any other so far, just lock the page and move on.
793      */
794     if (set->max == NULL || pe->index > set->max->index) {
795         set->max = pe;
796         do_page_entry_lock(pe);
797         return false;
798     }
799     /*
800      * Try to acquire out-of-order lock; if busy, return busy so that we acquire
801      * locks in order.
802      */
803     return page_entry_trylock(pe);
804 }
805 
806 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
807 {
808     tb_page_addr_t a = *(const tb_page_addr_t *)ap;
809     tb_page_addr_t b = *(const tb_page_addr_t *)bp;
810 
811     if (a == b) {
812         return 0;
813     } else if (a < b) {
814         return -1;
815     }
816     return 1;
817 }
818 
819 /*
820  * Lock a range of pages ([@start,@end[) as well as the pages of all
821  * intersecting TBs.
822  * Locking order: acquire locks in ascending order of page index.
823  */
824 struct page_collection *
825 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
826 {
827     struct page_collection *set = g_malloc(sizeof(*set));
828     tb_page_addr_t index;
829     PageDesc *pd;
830 
831     start >>= TARGET_PAGE_BITS;
832     end   >>= TARGET_PAGE_BITS;
833     g_assert(start <= end);
834 
835     set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
836                                 page_entry_destroy);
837     set->max = NULL;
838     assert_no_pages_locked();
839 
840  retry:
841     g_tree_foreach(set->tree, page_entry_lock, NULL);
842 
843     for (index = start; index <= end; index++) {
844         TranslationBlock *tb;
845         int n;
846 
847         pd = page_find(index);
848         if (pd == NULL) {
849             continue;
850         }
851         if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
852             g_tree_foreach(set->tree, page_entry_unlock, NULL);
853             goto retry;
854         }
855         assert_page_locked(pd);
856         PAGE_FOR_EACH_TB(pd, tb, n) {
857             if (page_trylock_add(set, tb->page_addr[0]) ||
858                 (tb->page_addr[1] != -1 &&
859                  page_trylock_add(set, tb->page_addr[1]))) {
860                 /* drop all locks, and reacquire in order */
861                 g_tree_foreach(set->tree, page_entry_unlock, NULL);
862                 goto retry;
863             }
864         }
865     }
866     return set;
867 }
868 
869 void page_collection_unlock(struct page_collection *set)
870 {
871     /* entries are unlocked and freed via page_entry_destroy */
872     g_tree_destroy(set->tree);
873     g_free(set);
874 }
875 
876 #endif /* !CONFIG_USER_ONLY */
877 
878 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
879                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
880 {
881     PageDesc *p1, *p2;
882 
883     assert_memory_lock();
884     g_assert(phys1 != -1 && phys1 != phys2);
885     p1 = page_find_alloc(phys1 >> TARGET_PAGE_BITS, alloc);
886     if (ret_p1) {
887         *ret_p1 = p1;
888     }
889     if (likely(phys2 == -1)) {
890         page_lock(p1);
891         return;
892     }
893     p2 = page_find_alloc(phys2 >> TARGET_PAGE_BITS, alloc);
894     if (ret_p2) {
895         *ret_p2 = p2;
896     }
897     if (phys1 < phys2) {
898         page_lock(p1);
899         page_lock(p2);
900     } else {
901         page_lock(p2);
902         page_lock(p1);
903     }
904 }
905 
906 #if defined(CONFIG_USER_ONLY)
907 /* Currently it is not recommended to allocate big chunks of data in
908    user mode. It will change when a dedicated libc will be used.  */
909 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
910    region in which the guest needs to run.  Revisit this.  */
911 #define USE_STATIC_CODE_GEN_BUFFER
912 #endif
913 
914 /* Minimum size of the code gen buffer.  This number is randomly chosen,
915    but not so small that we can't have a fair number of TB's live.  */
916 #define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
917 
918 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
919    indicated, this is constrained by the range of direct branches on the
920    host cpu, as used by the TCG implementation of goto_tb.  */
921 #if defined(__x86_64__)
922 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
923 #elif defined(__sparc__)
924 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
925 #elif defined(__powerpc64__)
926 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
927 #elif defined(__powerpc__)
928 # define MAX_CODE_GEN_BUFFER_SIZE  (32u * 1024 * 1024)
929 #elif defined(__aarch64__)
930 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
931 #elif defined(__s390x__)
932   /* We have a +- 4GB range on the branches; leave some slop.  */
933 # define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
934 #elif defined(__mips__)
935   /* We have a 256MB branch region, but leave room to make sure the
936      main executable is also within that region.  */
937 # define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
938 #else
939 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
940 #endif
941 
942 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
943 
944 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
945   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
946    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
947 
948 static inline size_t size_code_gen_buffer(size_t tb_size)
949 {
950     /* Size the buffer.  */
951     if (tb_size == 0) {
952 #ifdef USE_STATIC_CODE_GEN_BUFFER
953         tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
954 #else
955         /* ??? Needs adjustments.  */
956         /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
957            static buffer, we could size this on RESERVED_VA, on the text
958            segment size of the executable, or continue to use the default.  */
959         tb_size = (unsigned long)(ram_size / 4);
960 #endif
961     }
962     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
963         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
964     }
965     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
966         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
967     }
968     return tb_size;
969 }
970 
971 #ifdef __mips__
972 /* In order to use J and JAL within the code_gen_buffer, we require
973    that the buffer not cross a 256MB boundary.  */
974 static inline bool cross_256mb(void *addr, size_t size)
975 {
976     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
977 }
978 
979 /* We weren't able to allocate a buffer without crossing that boundary,
980    so make do with the larger portion of the buffer that doesn't cross.
981    Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
982 static inline void *split_cross_256mb(void *buf1, size_t size1)
983 {
984     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
985     size_t size2 = buf1 + size1 - buf2;
986 
987     size1 = buf2 - buf1;
988     if (size1 < size2) {
989         size1 = size2;
990         buf1 = buf2;
991     }
992 
993     tcg_ctx->code_gen_buffer_size = size1;
994     return buf1;
995 }
996 #endif
997 
998 #ifdef USE_STATIC_CODE_GEN_BUFFER
999 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
1000     __attribute__((aligned(CODE_GEN_ALIGN)));
1001 
1002 static inline void *alloc_code_gen_buffer(void)
1003 {
1004     void *buf = static_code_gen_buffer;
1005     void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
1006     size_t size;
1007 
1008     /* page-align the beginning and end of the buffer */
1009     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
1010     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1011 
1012     size = end - buf;
1013 
1014     /* Honor a command-line option limiting the size of the buffer.  */
1015     if (size > tcg_ctx->code_gen_buffer_size) {
1016         size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1017                                qemu_real_host_page_size);
1018     }
1019     tcg_ctx->code_gen_buffer_size = size;
1020 
1021 #ifdef __mips__
1022     if (cross_256mb(buf, size)) {
1023         buf = split_cross_256mb(buf, size);
1024         size = tcg_ctx->code_gen_buffer_size;
1025     }
1026 #endif
1027 
1028     if (qemu_mprotect_rwx(buf, size)) {
1029         abort();
1030     }
1031     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1032 
1033     return buf;
1034 }
1035 #elif defined(_WIN32)
1036 static inline void *alloc_code_gen_buffer(void)
1037 {
1038     size_t size = tcg_ctx->code_gen_buffer_size;
1039     return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1040                         PAGE_EXECUTE_READWRITE);
1041 }
1042 #else
1043 static inline void *alloc_code_gen_buffer(void)
1044 {
1045     int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1046     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1047     uintptr_t start = 0;
1048     size_t size = tcg_ctx->code_gen_buffer_size;
1049     void *buf;
1050 
1051     /* Constrain the position of the buffer based on the host cpu.
1052        Note that these addresses are chosen in concert with the
1053        addresses assigned in the relevant linker script file.  */
1054 # if defined(__PIE__) || defined(__PIC__)
1055     /* Don't bother setting a preferred location if we're building
1056        a position-independent executable.  We're more likely to get
1057        an address near the main executable if we let the kernel
1058        choose the address.  */
1059 # elif defined(__x86_64__) && defined(MAP_32BIT)
1060     /* Force the memory down into low memory with the executable.
1061        Leave the choice of exact location with the kernel.  */
1062     flags |= MAP_32BIT;
1063     /* Cannot expect to map more than 800MB in low memory.  */
1064     if (size > 800u * 1024 * 1024) {
1065         tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
1066     }
1067 # elif defined(__sparc__)
1068     start = 0x40000000ul;
1069 # elif defined(__s390x__)
1070     start = 0x90000000ul;
1071 # elif defined(__mips__)
1072 #  if _MIPS_SIM == _ABI64
1073     start = 0x128000000ul;
1074 #  else
1075     start = 0x08000000ul;
1076 #  endif
1077 # endif
1078 
1079     buf = mmap((void *)start, size, prot, flags, -1, 0);
1080     if (buf == MAP_FAILED) {
1081         return NULL;
1082     }
1083 
1084 #ifdef __mips__
1085     if (cross_256mb(buf, size)) {
1086         /* Try again, with the original still mapped, to avoid re-acquiring
1087            that 256mb crossing.  This time don't specify an address.  */
1088         size_t size2;
1089         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1090         switch ((int)(buf2 != MAP_FAILED)) {
1091         case 1:
1092             if (!cross_256mb(buf2, size)) {
1093                 /* Success!  Use the new buffer.  */
1094                 munmap(buf, size);
1095                 break;
1096             }
1097             /* Failure.  Work with what we had.  */
1098             munmap(buf2, size);
1099             /* fallthru */
1100         default:
1101             /* Split the original buffer.  Free the smaller half.  */
1102             buf2 = split_cross_256mb(buf, size);
1103             size2 = tcg_ctx->code_gen_buffer_size;
1104             if (buf == buf2) {
1105                 munmap(buf + size2, size - size2);
1106             } else {
1107                 munmap(buf, size - size2);
1108             }
1109             size = size2;
1110             break;
1111         }
1112         buf = buf2;
1113     }
1114 #endif
1115 
1116     /* Request large pages for the buffer.  */
1117     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1118 
1119     return buf;
1120 }
1121 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1122 
1123 static inline void code_gen_alloc(size_t tb_size)
1124 {
1125     tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1126     tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1127     if (tcg_ctx->code_gen_buffer == NULL) {
1128         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1129         exit(1);
1130     }
1131     qemu_mutex_init(&tb_ctx.tb_lock);
1132 }
1133 
1134 static bool tb_cmp(const void *ap, const void *bp)
1135 {
1136     const TranslationBlock *a = ap;
1137     const TranslationBlock *b = bp;
1138 
1139     return a->pc == b->pc &&
1140         a->cs_base == b->cs_base &&
1141         a->flags == b->flags &&
1142         (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1143         a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1144         a->page_addr[0] == b->page_addr[0] &&
1145         a->page_addr[1] == b->page_addr[1];
1146 }
1147 
1148 static void tb_htable_init(void)
1149 {
1150     unsigned int mode = QHT_MODE_AUTO_RESIZE;
1151 
1152     qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1153 }
1154 
1155 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1156    (in bytes) allocated to the translation buffer. Zero means default
1157    size. */
1158 void tcg_exec_init(unsigned long tb_size)
1159 {
1160     tcg_allowed = true;
1161     cpu_gen_init();
1162     page_init();
1163     tb_htable_init();
1164     code_gen_alloc(tb_size);
1165 #if defined(CONFIG_SOFTMMU)
1166     /* There's no guest base to take into account, so go ahead and
1167        initialize the prologue now.  */
1168     tcg_prologue_init(tcg_ctx);
1169 #endif
1170 }
1171 
1172 /*
1173  * Allocate a new translation block. Flush the translation buffer if
1174  * too many translation blocks or too much generated code.
1175  *
1176  * Called with tb_lock held.
1177  */
1178 static TranslationBlock *tb_alloc(target_ulong pc)
1179 {
1180     TranslationBlock *tb;
1181 
1182     assert_tb_locked();
1183 
1184     tb = tcg_tb_alloc(tcg_ctx);
1185     if (unlikely(tb == NULL)) {
1186         return NULL;
1187     }
1188     return tb;
1189 }
1190 
1191 /* call with @p->lock held */
1192 static inline void invalidate_page_bitmap(PageDesc *p)
1193 {
1194     assert_page_locked(p);
1195 #ifdef CONFIG_SOFTMMU
1196     g_free(p->code_bitmap);
1197     p->code_bitmap = NULL;
1198     p->code_write_count = 0;
1199 #endif
1200 }
1201 
1202 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1203 static void page_flush_tb_1(int level, void **lp)
1204 {
1205     int i;
1206 
1207     if (*lp == NULL) {
1208         return;
1209     }
1210     if (level == 0) {
1211         PageDesc *pd = *lp;
1212 
1213         for (i = 0; i < V_L2_SIZE; ++i) {
1214             page_lock(&pd[i]);
1215             pd[i].first_tb = (uintptr_t)NULL;
1216             invalidate_page_bitmap(pd + i);
1217             page_unlock(&pd[i]);
1218         }
1219     } else {
1220         void **pp = *lp;
1221 
1222         for (i = 0; i < V_L2_SIZE; ++i) {
1223             page_flush_tb_1(level - 1, pp + i);
1224         }
1225     }
1226 }
1227 
1228 static void page_flush_tb(void)
1229 {
1230     int i, l1_sz = v_l1_size;
1231 
1232     for (i = 0; i < l1_sz; i++) {
1233         page_flush_tb_1(v_l2_levels, l1_map + i);
1234     }
1235 }
1236 
1237 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1238 {
1239     const TranslationBlock *tb = value;
1240     size_t *size = data;
1241 
1242     *size += tb->tc.size;
1243     return false;
1244 }
1245 
1246 /* flush all the translation blocks */
1247 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1248 {
1249     tb_lock();
1250 
1251     /* If it is already been done on request of another CPU,
1252      * just retry.
1253      */
1254     if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1255         goto done;
1256     }
1257 
1258     if (DEBUG_TB_FLUSH_GATE) {
1259         size_t nb_tbs = tcg_nb_tbs();
1260         size_t host_size = 0;
1261 
1262         tcg_tb_foreach(tb_host_size_iter, &host_size);
1263         printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1264                tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1265     }
1266 
1267     CPU_FOREACH(cpu) {
1268         cpu_tb_jmp_cache_clear(cpu);
1269     }
1270 
1271     qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1272     page_flush_tb();
1273 
1274     tcg_region_reset_all();
1275     /* XXX: flush processor icache at this point if cache flush is
1276        expensive */
1277     atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1278 
1279 done:
1280     tb_unlock();
1281 }
1282 
1283 void tb_flush(CPUState *cpu)
1284 {
1285     if (tcg_enabled()) {
1286         unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
1287         async_safe_run_on_cpu(cpu, do_tb_flush,
1288                               RUN_ON_CPU_HOST_INT(tb_flush_count));
1289     }
1290 }
1291 
1292 /*
1293  * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1294  * so in order to prevent bit rot we compile them unconditionally in user-mode,
1295  * and let the optimizer get rid of them by wrapping their user-only callers
1296  * with if (DEBUG_TB_CHECK_GATE).
1297  */
1298 #ifdef CONFIG_USER_ONLY
1299 
1300 static void
1301 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
1302 {
1303     TranslationBlock *tb = p;
1304     target_ulong addr = *(target_ulong *)userp;
1305 
1306     if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1307         printf("ERROR invalidate: address=" TARGET_FMT_lx
1308                " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1309     }
1310 }
1311 
1312 /* verify that all the pages have correct rights for code
1313  *
1314  * Called with tb_lock held.
1315  */
1316 static void tb_invalidate_check(target_ulong address)
1317 {
1318     address &= TARGET_PAGE_MASK;
1319     qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1320 }
1321 
1322 static void
1323 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
1324 {
1325     TranslationBlock *tb = p;
1326     int flags1, flags2;
1327 
1328     flags1 = page_get_flags(tb->pc);
1329     flags2 = page_get_flags(tb->pc + tb->size - 1);
1330     if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1331         printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1332                (long)tb->pc, tb->size, flags1, flags2);
1333     }
1334 }
1335 
1336 /* verify that all the pages have correct rights for code */
1337 static void tb_page_check(void)
1338 {
1339     qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1340 }
1341 
1342 #endif /* CONFIG_USER_ONLY */
1343 
1344 /* call with @pd->lock held */
1345 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1346 {
1347     TranslationBlock *tb1;
1348     uintptr_t *pprev;
1349     unsigned int n1;
1350 
1351     assert_page_locked(pd);
1352     pprev = &pd->first_tb;
1353     PAGE_FOR_EACH_TB(pd, tb1, n1) {
1354         if (tb1 == tb) {
1355             *pprev = tb1->page_next[n1];
1356             return;
1357         }
1358         pprev = &tb1->page_next[n1];
1359     }
1360     g_assert_not_reached();
1361 }
1362 
1363 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1364 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1365 {
1366     TranslationBlock *tb1;
1367     uintptr_t *ptb, ntb;
1368     unsigned int n1;
1369 
1370     ptb = &tb->jmp_list_next[n];
1371     if (*ptb) {
1372         /* find tb(n) in circular list */
1373         for (;;) {
1374             ntb = *ptb;
1375             n1 = ntb & 3;
1376             tb1 = (TranslationBlock *)(ntb & ~3);
1377             if (n1 == n && tb1 == tb) {
1378                 break;
1379             }
1380             if (n1 == 2) {
1381                 ptb = &tb1->jmp_list_first;
1382             } else {
1383                 ptb = &tb1->jmp_list_next[n1];
1384             }
1385         }
1386         /* now we can suppress tb(n) from the list */
1387         *ptb = tb->jmp_list_next[n];
1388 
1389         tb->jmp_list_next[n] = (uintptr_t)NULL;
1390     }
1391 }
1392 
1393 /* reset the jump entry 'n' of a TB so that it is not chained to
1394    another TB */
1395 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1396 {
1397     uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1398     tb_set_jmp_target(tb, n, addr);
1399 }
1400 
1401 /* remove any jumps to the TB */
1402 static inline void tb_jmp_unlink(TranslationBlock *tb)
1403 {
1404     TranslationBlock *tb1;
1405     uintptr_t *ptb, ntb;
1406     unsigned int n1;
1407 
1408     ptb = &tb->jmp_list_first;
1409     for (;;) {
1410         ntb = *ptb;
1411         n1 = ntb & 3;
1412         tb1 = (TranslationBlock *)(ntb & ~3);
1413         if (n1 == 2) {
1414             break;
1415         }
1416         tb_reset_jump(tb1, n1);
1417         *ptb = tb1->jmp_list_next[n1];
1418         tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1419     }
1420 }
1421 
1422 /* If @rm_from_page_list is set, call with the TB's pages' locks held */
1423 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1424 {
1425     CPUState *cpu;
1426     PageDesc *p;
1427     uint32_t h;
1428     tb_page_addr_t phys_pc;
1429 
1430     assert_tb_locked();
1431 
1432     atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1433 
1434     /* remove the TB from the hash list */
1435     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1436     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1437                      tb->trace_vcpu_dstate);
1438     if (!qht_remove(&tb_ctx.htable, tb, h)) {
1439         return;
1440     }
1441 
1442     /* remove the TB from the page list */
1443     if (rm_from_page_list) {
1444         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1445         tb_page_remove(p, tb);
1446         invalidate_page_bitmap(p);
1447         if (tb->page_addr[1] != -1) {
1448             p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1449             tb_page_remove(p, tb);
1450             invalidate_page_bitmap(p);
1451         }
1452     }
1453 
1454     /* remove the TB from the hash list */
1455     h = tb_jmp_cache_hash_func(tb->pc);
1456     CPU_FOREACH(cpu) {
1457         if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1458             atomic_set(&cpu->tb_jmp_cache[h], NULL);
1459         }
1460     }
1461 
1462     /* suppress this TB from the two jump lists */
1463     tb_remove_from_jmp_list(tb, 0);
1464     tb_remove_from_jmp_list(tb, 1);
1465 
1466     /* suppress any remaining jumps to this TB */
1467     tb_jmp_unlink(tb);
1468 
1469     atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1470                tcg_ctx->tb_phys_invalidate_count + 1);
1471 }
1472 
1473 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1474 {
1475     do_tb_phys_invalidate(tb, true);
1476 }
1477 
1478 /* invalidate one TB
1479  *
1480  * Called with tb_lock held.
1481  */
1482 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1483 {
1484     if (page_addr == -1) {
1485         page_lock_tb(tb);
1486         do_tb_phys_invalidate(tb, true);
1487         page_unlock_tb(tb);
1488     } else {
1489         do_tb_phys_invalidate(tb, false);
1490     }
1491 }
1492 
1493 #ifdef CONFIG_SOFTMMU
1494 /* call with @p->lock held */
1495 static void build_page_bitmap(PageDesc *p)
1496 {
1497     int n, tb_start, tb_end;
1498     TranslationBlock *tb;
1499 
1500     assert_page_locked(p);
1501     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1502 
1503     PAGE_FOR_EACH_TB(p, tb, n) {
1504         /* NOTE: this is subtle as a TB may span two physical pages */
1505         if (n == 0) {
1506             /* NOTE: tb_end may be after the end of the page, but
1507                it is not a problem */
1508             tb_start = tb->pc & ~TARGET_PAGE_MASK;
1509             tb_end = tb_start + tb->size;
1510             if (tb_end > TARGET_PAGE_SIZE) {
1511                 tb_end = TARGET_PAGE_SIZE;
1512              }
1513         } else {
1514             tb_start = 0;
1515             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1516         }
1517         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1518     }
1519 }
1520 #endif
1521 
1522 /* add the tb in the target page and protect it if necessary
1523  *
1524  * Called with mmap_lock held for user-mode emulation.
1525  * Called with @p->lock held.
1526  */
1527 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1528                                unsigned int n, tb_page_addr_t page_addr)
1529 {
1530 #ifndef CONFIG_USER_ONLY
1531     bool page_already_protected;
1532 #endif
1533 
1534     assert_page_locked(p);
1535 
1536     tb->page_addr[n] = page_addr;
1537     tb->page_next[n] = p->first_tb;
1538 #ifndef CONFIG_USER_ONLY
1539     page_already_protected = p->first_tb != (uintptr_t)NULL;
1540 #endif
1541     p->first_tb = (uintptr_t)tb | n;
1542     invalidate_page_bitmap(p);
1543 
1544 #if defined(CONFIG_USER_ONLY)
1545     if (p->flags & PAGE_WRITE) {
1546         target_ulong addr;
1547         PageDesc *p2;
1548         int prot;
1549 
1550         /* force the host page as non writable (writes will have a
1551            page fault + mprotect overhead) */
1552         page_addr &= qemu_host_page_mask;
1553         prot = 0;
1554         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1555             addr += TARGET_PAGE_SIZE) {
1556 
1557             p2 = page_find(addr >> TARGET_PAGE_BITS);
1558             if (!p2) {
1559                 continue;
1560             }
1561             prot |= p2->flags;
1562             p2->flags &= ~PAGE_WRITE;
1563           }
1564         mprotect(g2h(page_addr), qemu_host_page_size,
1565                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1566         if (DEBUG_TB_INVALIDATE_GATE) {
1567             printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1568         }
1569     }
1570 #else
1571     /* if some code is already present, then the pages are already
1572        protected. So we handle the case where only the first TB is
1573        allocated in a physical page */
1574     if (!page_already_protected) {
1575         tlb_protect_code(page_addr);
1576     }
1577 #endif
1578 }
1579 
1580 /* add a new TB and link it to the physical page tables. phys_page2 is
1581  * (-1) to indicate that only one page contains the TB.
1582  *
1583  * Called with mmap_lock held for user-mode emulation.
1584  */
1585 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1586                          tb_page_addr_t phys_page2)
1587 {
1588     PageDesc *p;
1589     PageDesc *p2 = NULL;
1590     uint32_t h;
1591 
1592     assert_memory_lock();
1593 
1594     /*
1595      * Add the TB to the page list, acquiring first the pages's locks.
1596      */
1597     page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1598     tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1599     if (p2) {
1600         tb_page_add(p2, tb, 1, phys_page2);
1601     } else {
1602         tb->page_addr[1] = -1;
1603     }
1604 
1605     if (p2) {
1606         page_unlock(p2);
1607     }
1608     page_unlock(p);
1609 
1610     /* add in the hash table */
1611     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1612                      tb->trace_vcpu_dstate);
1613     qht_insert(&tb_ctx.htable, tb, h, NULL);
1614 
1615 #ifdef CONFIG_USER_ONLY
1616     if (DEBUG_TB_CHECK_GATE) {
1617         tb_page_check();
1618     }
1619 #endif
1620 }
1621 
1622 /* Called with mmap_lock held for user mode emulation.  */
1623 TranslationBlock *tb_gen_code(CPUState *cpu,
1624                               target_ulong pc, target_ulong cs_base,
1625                               uint32_t flags, int cflags)
1626 {
1627     CPUArchState *env = cpu->env_ptr;
1628     TranslationBlock *tb;
1629     tb_page_addr_t phys_pc, phys_page2;
1630     target_ulong virt_page2;
1631     tcg_insn_unit *gen_code_buf;
1632     int gen_code_size, search_size;
1633 #ifdef CONFIG_PROFILER
1634     TCGProfile *prof = &tcg_ctx->prof;
1635     int64_t ti;
1636 #endif
1637     assert_memory_lock();
1638 
1639     phys_pc = get_page_addr_code(env, pc);
1640 
1641  buffer_overflow:
1642     tb = tb_alloc(pc);
1643     if (unlikely(!tb)) {
1644         /* flush must be done */
1645         tb_flush(cpu);
1646         mmap_unlock();
1647         /* Make the execution loop process the flush as soon as possible.  */
1648         cpu->exception_index = EXCP_INTERRUPT;
1649         cpu_loop_exit(cpu);
1650     }
1651 
1652     gen_code_buf = tcg_ctx->code_gen_ptr;
1653     tb->tc.ptr = gen_code_buf;
1654     tb->pc = pc;
1655     tb->cs_base = cs_base;
1656     tb->flags = flags;
1657     tb->cflags = cflags;
1658     tb->trace_vcpu_dstate = *cpu->trace_dstate;
1659     tcg_ctx->tb_cflags = cflags;
1660 
1661 #ifdef CONFIG_PROFILER
1662     /* includes aborted translations because of exceptions */
1663     atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1664     ti = profile_getclock();
1665 #endif
1666 
1667     tcg_func_start(tcg_ctx);
1668 
1669     tcg_ctx->cpu = ENV_GET_CPU(env);
1670     gen_intermediate_code(cpu, tb);
1671     tcg_ctx->cpu = NULL;
1672 
1673     trace_translate_block(tb, tb->pc, tb->tc.ptr);
1674 
1675     /* generate machine code */
1676     tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1677     tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1678     tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1679     if (TCG_TARGET_HAS_direct_jump) {
1680         tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1681         tcg_ctx->tb_jmp_target_addr = NULL;
1682     } else {
1683         tcg_ctx->tb_jmp_insn_offset = NULL;
1684         tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1685     }
1686 
1687 #ifdef CONFIG_PROFILER
1688     atomic_set(&prof->tb_count, prof->tb_count + 1);
1689     atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1690     ti = profile_getclock();
1691 #endif
1692 
1693     /* ??? Overflow could be handled better here.  In particular, we
1694        don't need to re-do gen_intermediate_code, nor should we re-do
1695        the tcg optimization currently hidden inside tcg_gen_code.  All
1696        that should be required is to flush the TBs, allocate a new TB,
1697        re-initialize it per above, and re-do the actual code generation.  */
1698     gen_code_size = tcg_gen_code(tcg_ctx, tb);
1699     if (unlikely(gen_code_size < 0)) {
1700         goto buffer_overflow;
1701     }
1702     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1703     if (unlikely(search_size < 0)) {
1704         goto buffer_overflow;
1705     }
1706     tb->tc.size = gen_code_size;
1707 
1708 #ifdef CONFIG_PROFILER
1709     atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1710     atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1711     atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1712     atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1713 #endif
1714 
1715 #ifdef DEBUG_DISAS
1716     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1717         qemu_log_in_addr_range(tb->pc)) {
1718         qemu_log_lock();
1719         qemu_log("OUT: [size=%d]\n", gen_code_size);
1720         if (tcg_ctx->data_gen_ptr) {
1721             size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1722             size_t data_size = gen_code_size - code_size;
1723             size_t i;
1724 
1725             log_disas(tb->tc.ptr, code_size);
1726 
1727             for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1728                 if (sizeof(tcg_target_ulong) == 8) {
1729                     qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1730                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1731                              *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1732                 } else {
1733                     qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1734                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1735                              *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1736                 }
1737             }
1738         } else {
1739             log_disas(tb->tc.ptr, gen_code_size);
1740         }
1741         qemu_log("\n");
1742         qemu_log_flush();
1743         qemu_log_unlock();
1744     }
1745 #endif
1746 
1747     atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1748         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1749                  CODE_GEN_ALIGN));
1750 
1751     /* init jump list */
1752     assert(((uintptr_t)tb & 3) == 0);
1753     tb->jmp_list_first = (uintptr_t)tb | 2;
1754     tb->jmp_list_next[0] = (uintptr_t)NULL;
1755     tb->jmp_list_next[1] = (uintptr_t)NULL;
1756 
1757     /* init original jump addresses wich has been set during tcg_gen_code() */
1758     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1759         tb_reset_jump(tb, 0);
1760     }
1761     if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1762         tb_reset_jump(tb, 1);
1763     }
1764 
1765     /* check next page if needed */
1766     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1767     phys_page2 = -1;
1768     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1769         phys_page2 = get_page_addr_code(env, virt_page2);
1770     }
1771     /* As long as consistency of the TB stuff is provided by tb_lock in user
1772      * mode and is implicit in single-threaded softmmu emulation, no explicit
1773      * memory barrier is required before tb_link_page() makes the TB visible
1774      * through the physical hash table and physical page list.
1775      */
1776     tb_link_page(tb, phys_pc, phys_page2);
1777     tcg_tb_insert(tb);
1778     return tb;
1779 }
1780 
1781 /*
1782  * Call with all @pages locked.
1783  * @p must be non-NULL.
1784  */
1785 static void
1786 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1787                                       PageDesc *p, tb_page_addr_t start,
1788                                       tb_page_addr_t end,
1789                                       int is_cpu_write_access)
1790 {
1791     TranslationBlock *tb;
1792     tb_page_addr_t tb_start, tb_end;
1793     int n;
1794 #ifdef TARGET_HAS_PRECISE_SMC
1795     CPUState *cpu = current_cpu;
1796     CPUArchState *env = NULL;
1797     int current_tb_not_found = is_cpu_write_access;
1798     TranslationBlock *current_tb = NULL;
1799     int current_tb_modified = 0;
1800     target_ulong current_pc = 0;
1801     target_ulong current_cs_base = 0;
1802     uint32_t current_flags = 0;
1803 #endif /* TARGET_HAS_PRECISE_SMC */
1804 
1805     assert_page_locked(p);
1806 
1807 #if defined(TARGET_HAS_PRECISE_SMC)
1808     if (cpu != NULL) {
1809         env = cpu->env_ptr;
1810     }
1811 #endif
1812 
1813     /* we remove all the TBs in the range [start, end[ */
1814     /* XXX: see if in some cases it could be faster to invalidate all
1815        the code */
1816     PAGE_FOR_EACH_TB(p, tb, n) {
1817         assert_page_locked(p);
1818         /* NOTE: this is subtle as a TB may span two physical pages */
1819         if (n == 0) {
1820             /* NOTE: tb_end may be after the end of the page, but
1821                it is not a problem */
1822             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1823             tb_end = tb_start + tb->size;
1824         } else {
1825             tb_start = tb->page_addr[1];
1826             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1827         }
1828         if (!(tb_end <= start || tb_start >= end)) {
1829 #ifdef TARGET_HAS_PRECISE_SMC
1830             if (current_tb_not_found) {
1831                 current_tb_not_found = 0;
1832                 current_tb = NULL;
1833                 if (cpu->mem_io_pc) {
1834                     /* now we have a real cpu fault */
1835                     current_tb = tcg_tb_lookup(cpu->mem_io_pc);
1836                 }
1837             }
1838             if (current_tb == tb &&
1839                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1840                 /* If we are modifying the current TB, we must stop
1841                 its execution. We could be more precise by checking
1842                 that the modification is after the current PC, but it
1843                 would require a specialized function to partially
1844                 restore the CPU state */
1845 
1846                 current_tb_modified = 1;
1847                 cpu_restore_state_from_tb(cpu, current_tb,
1848                                           cpu->mem_io_pc, true);
1849                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1850                                      &current_flags);
1851             }
1852 #endif /* TARGET_HAS_PRECISE_SMC */
1853             tb_phys_invalidate__locked(tb);
1854         }
1855     }
1856 #if !defined(CONFIG_USER_ONLY)
1857     /* if no code remaining, no need to continue to use slow writes */
1858     if (!p->first_tb) {
1859         invalidate_page_bitmap(p);
1860         tlb_unprotect_code(start);
1861     }
1862 #endif
1863 #ifdef TARGET_HAS_PRECISE_SMC
1864     if (current_tb_modified) {
1865         page_collection_unlock(pages);
1866         /* Force execution of one insn next time.  */
1867         cpu->cflags_next_tb = 1 | curr_cflags();
1868         cpu_loop_exit_noexc(cpu);
1869     }
1870 #endif
1871 }
1872 
1873 /*
1874  * Invalidate all TBs which intersect with the target physical address range
1875  * [start;end[. NOTE: start and end must refer to the *same* physical page.
1876  * 'is_cpu_write_access' should be true if called from a real cpu write
1877  * access: the virtual CPU will exit the current TB if code is modified inside
1878  * this TB.
1879  *
1880  * Called with tb_lock/mmap_lock held for user-mode emulation
1881  * Called with tb_lock held for system-mode emulation
1882  */
1883 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1884                                    int is_cpu_write_access)
1885 {
1886     struct page_collection *pages;
1887     PageDesc *p;
1888 
1889     assert_memory_lock();
1890     assert_tb_locked();
1891 
1892     p = page_find(start >> TARGET_PAGE_BITS);
1893     if (p == NULL) {
1894         return;
1895     }
1896     pages = page_collection_lock(start, end);
1897     tb_invalidate_phys_page_range__locked(pages, p, start, end,
1898                                           is_cpu_write_access);
1899     page_collection_unlock(pages);
1900 }
1901 
1902 /*
1903  * Invalidate all TBs which intersect with the target physical address range
1904  * [start;end[. NOTE: start and end may refer to *different* physical pages.
1905  * 'is_cpu_write_access' should be true if called from a real cpu write
1906  * access: the virtual CPU will exit the current TB if code is modified inside
1907  * this TB.
1908  *
1909  * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1910  * Called with tb_lock held for system-mode emulation
1911  */
1912 static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1913 {
1914     struct page_collection *pages;
1915     tb_page_addr_t next;
1916 
1917     pages = page_collection_lock(start, end);
1918     for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1919          start < end;
1920          start = next, next += TARGET_PAGE_SIZE) {
1921         PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
1922         tb_page_addr_t bound = MIN(next, end);
1923 
1924         if (pd == NULL) {
1925             continue;
1926         }
1927         tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
1928     }
1929     page_collection_unlock(pages);
1930 }
1931 
1932 #ifdef CONFIG_SOFTMMU
1933 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1934 {
1935     assert_tb_locked();
1936     tb_invalidate_phys_range_1(start, end);
1937 }
1938 #else
1939 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1940 {
1941     assert_memory_lock();
1942     tb_lock();
1943     tb_invalidate_phys_range_1(start, end);
1944     tb_unlock();
1945 }
1946 #endif
1947 
1948 #ifdef CONFIG_SOFTMMU
1949 /* len must be <= 8 and start must be a multiple of len.
1950  * Called via softmmu_template.h when code areas are written to with
1951  * iothread mutex not held.
1952  */
1953 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1954 {
1955     struct page_collection *pages;
1956     PageDesc *p;
1957 
1958 #if 0
1959     if (1) {
1960         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1961                   cpu_single_env->mem_io_vaddr, len,
1962                   cpu_single_env->eip,
1963                   cpu_single_env->eip +
1964                   (intptr_t)cpu_single_env->segs[R_CS].base);
1965     }
1966 #endif
1967     assert_memory_lock();
1968 
1969     p = page_find(start >> TARGET_PAGE_BITS);
1970     if (!p) {
1971         return;
1972     }
1973 
1974     pages = page_collection_lock(start, start + len);
1975     assert_page_locked(p);
1976     if (!p->code_bitmap &&
1977         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1978         build_page_bitmap(p);
1979     }
1980     if (p->code_bitmap) {
1981         unsigned int nr;
1982         unsigned long b;
1983 
1984         nr = start & ~TARGET_PAGE_MASK;
1985         b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1986         if (b & ((1 << len) - 1)) {
1987             goto do_invalidate;
1988         }
1989     } else {
1990     do_invalidate:
1991         tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
1992     }
1993     page_collection_unlock(pages);
1994 }
1995 #else
1996 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1997  * host PC of the faulting store instruction that caused this invalidate.
1998  * Returns true if the caller needs to abort execution of the current
1999  * TB (because it was modified by this store and the guest CPU has
2000  * precise-SMC semantics).
2001  */
2002 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2003 {
2004     TranslationBlock *tb;
2005     PageDesc *p;
2006     int n;
2007 #ifdef TARGET_HAS_PRECISE_SMC
2008     TranslationBlock *current_tb = NULL;
2009     CPUState *cpu = current_cpu;
2010     CPUArchState *env = NULL;
2011     int current_tb_modified = 0;
2012     target_ulong current_pc = 0;
2013     target_ulong current_cs_base = 0;
2014     uint32_t current_flags = 0;
2015 #endif
2016 
2017     assert_memory_lock();
2018 
2019     addr &= TARGET_PAGE_MASK;
2020     p = page_find(addr >> TARGET_PAGE_BITS);
2021     if (!p) {
2022         return false;
2023     }
2024 
2025     tb_lock();
2026 #ifdef TARGET_HAS_PRECISE_SMC
2027     if (p->first_tb && pc != 0) {
2028         current_tb = tcg_tb_lookup(pc);
2029     }
2030     if (cpu != NULL) {
2031         env = cpu->env_ptr;
2032     }
2033 #endif
2034     assert_page_locked(p);
2035     PAGE_FOR_EACH_TB(p, tb, n) {
2036 #ifdef TARGET_HAS_PRECISE_SMC
2037         if (current_tb == tb &&
2038             (current_tb->cflags & CF_COUNT_MASK) != 1) {
2039                 /* If we are modifying the current TB, we must stop
2040                    its execution. We could be more precise by checking
2041                    that the modification is after the current PC, but it
2042                    would require a specialized function to partially
2043                    restore the CPU state */
2044 
2045             current_tb_modified = 1;
2046             cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2047             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2048                                  &current_flags);
2049         }
2050 #endif /* TARGET_HAS_PRECISE_SMC */
2051         tb_phys_invalidate(tb, addr);
2052     }
2053     p->first_tb = (uintptr_t)NULL;
2054 #ifdef TARGET_HAS_PRECISE_SMC
2055     if (current_tb_modified) {
2056         /* Force execution of one insn next time.  */
2057         cpu->cflags_next_tb = 1 | curr_cflags();
2058         /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
2059          * back into the cpu_exec loop. */
2060         return true;
2061     }
2062 #endif
2063     tb_unlock();
2064 
2065     return false;
2066 }
2067 #endif
2068 
2069 #if !defined(CONFIG_USER_ONLY)
2070 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
2071 {
2072     ram_addr_t ram_addr;
2073     MemoryRegion *mr;
2074     hwaddr l = 1;
2075 
2076     rcu_read_lock();
2077     mr = address_space_translate(as, addr, &addr, &l, false, attrs);
2078     if (!(memory_region_is_ram(mr)
2079           || memory_region_is_romd(mr))) {
2080         rcu_read_unlock();
2081         return;
2082     }
2083     ram_addr = memory_region_get_ram_addr(mr) + addr;
2084     tb_lock();
2085     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
2086     tb_unlock();
2087     rcu_read_unlock();
2088 }
2089 #endif /* !defined(CONFIG_USER_ONLY) */
2090 
2091 /* Called with tb_lock held.  */
2092 void tb_check_watchpoint(CPUState *cpu)
2093 {
2094     TranslationBlock *tb;
2095 
2096     tb = tcg_tb_lookup(cpu->mem_io_pc);
2097     if (tb) {
2098         /* We can use retranslation to find the PC.  */
2099         cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
2100         tb_phys_invalidate(tb, -1);
2101     } else {
2102         /* The exception probably happened in a helper.  The CPU state should
2103            have been saved before calling it. Fetch the PC from there.  */
2104         CPUArchState *env = cpu->env_ptr;
2105         target_ulong pc, cs_base;
2106         tb_page_addr_t addr;
2107         uint32_t flags;
2108 
2109         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2110         addr = get_page_addr_code(env, pc);
2111         tb_invalidate_phys_range(addr, addr + 1);
2112     }
2113 }
2114 
2115 #ifndef CONFIG_USER_ONLY
2116 /* in deterministic execution mode, instructions doing device I/Os
2117  * must be at the end of the TB.
2118  *
2119  * Called by softmmu_template.h, with iothread mutex not held.
2120  */
2121 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2122 {
2123 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2124     CPUArchState *env = cpu->env_ptr;
2125 #endif
2126     TranslationBlock *tb;
2127     uint32_t n;
2128 
2129     tb_lock();
2130     tb = tcg_tb_lookup(retaddr);
2131     if (!tb) {
2132         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2133                   (void *)retaddr);
2134     }
2135     cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2136 
2137     /* On MIPS and SH, delay slot instructions can only be restarted if
2138        they were already the first instruction in the TB.  If this is not
2139        the first instruction in a TB then re-execute the preceding
2140        branch.  */
2141     n = 1;
2142 #if defined(TARGET_MIPS)
2143     if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2144         && env->active_tc.PC != tb->pc) {
2145         env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2146         cpu->icount_decr.u16.low++;
2147         env->hflags &= ~MIPS_HFLAG_BMASK;
2148         n = 2;
2149     }
2150 #elif defined(TARGET_SH4)
2151     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2152         && env->pc != tb->pc) {
2153         env->pc -= 2;
2154         cpu->icount_decr.u16.low++;
2155         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2156         n = 2;
2157     }
2158 #endif
2159 
2160     /* Generate a new TB executing the I/O insn.  */
2161     cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2162 
2163     if (tb->cflags & CF_NOCACHE) {
2164         if (tb->orig_tb) {
2165             /* Invalidate original TB if this TB was generated in
2166              * cpu_exec_nocache() */
2167             tb_phys_invalidate(tb->orig_tb, -1);
2168         }
2169         tcg_tb_remove(tb);
2170     }
2171 
2172     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2173      * the first in the TB) then we end up generating a whole new TB and
2174      *  repeating the fault, which is horribly inefficient.
2175      *  Better would be to execute just this insn uncached, or generate a
2176      *  second new TB.
2177      *
2178      * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
2179      * tb_lock gets reset.
2180      */
2181     cpu_loop_exit_noexc(cpu);
2182 }
2183 
2184 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2185 {
2186     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2187 
2188     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2189         atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2190     }
2191 }
2192 
2193 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2194 {
2195     /* Discard jump cache entries for any tb which might potentially
2196        overlap the flushed page.  */
2197     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2198     tb_jmp_cache_clear_page(cpu, addr);
2199 }
2200 
2201 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
2202                                  struct qht_stats hst)
2203 {
2204     uint32_t hgram_opts;
2205     size_t hgram_bins;
2206     char *hgram;
2207 
2208     if (!hst.head_buckets) {
2209         return;
2210     }
2211     cpu_fprintf(f, "TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2212                 hst.used_head_buckets, hst.head_buckets,
2213                 (double)hst.used_head_buckets / hst.head_buckets * 100);
2214 
2215     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2216     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2217     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2218         hgram_opts |= QDIST_PR_NODECIMAL;
2219     }
2220     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2221     cpu_fprintf(f, "TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2222                 qdist_avg(&hst.occupancy) * 100, hgram);
2223     g_free(hgram);
2224 
2225     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2226     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2227     if (hgram_bins > 10) {
2228         hgram_bins = 10;
2229     } else {
2230         hgram_bins = 0;
2231         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2232     }
2233     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2234     cpu_fprintf(f, "TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2235                 qdist_avg(&hst.chain), hgram);
2236     g_free(hgram);
2237 }
2238 
2239 struct tb_tree_stats {
2240     size_t nb_tbs;
2241     size_t host_size;
2242     size_t target_size;
2243     size_t max_target_size;
2244     size_t direct_jmp_count;
2245     size_t direct_jmp2_count;
2246     size_t cross_page;
2247 };
2248 
2249 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2250 {
2251     const TranslationBlock *tb = value;
2252     struct tb_tree_stats *tst = data;
2253 
2254     tst->nb_tbs++;
2255     tst->host_size += tb->tc.size;
2256     tst->target_size += tb->size;
2257     if (tb->size > tst->max_target_size) {
2258         tst->max_target_size = tb->size;
2259     }
2260     if (tb->page_addr[1] != -1) {
2261         tst->cross_page++;
2262     }
2263     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2264         tst->direct_jmp_count++;
2265         if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2266             tst->direct_jmp2_count++;
2267         }
2268     }
2269     return false;
2270 }
2271 
2272 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
2273 {
2274     struct tb_tree_stats tst = {};
2275     struct qht_stats hst;
2276     size_t nb_tbs;
2277 
2278     tcg_tb_foreach(tb_tree_stats_iter, &tst);
2279     nb_tbs = tst.nb_tbs;
2280     /* XXX: avoid using doubles ? */
2281     cpu_fprintf(f, "Translation buffer state:\n");
2282     /*
2283      * Report total code size including the padding and TB structs;
2284      * otherwise users might think "-tb-size" is not honoured.
2285      * For avg host size we use the precise numbers from tb_tree_stats though.
2286      */
2287     cpu_fprintf(f, "gen code size       %zu/%zu\n",
2288                 tcg_code_size(), tcg_code_capacity());
2289     cpu_fprintf(f, "TB count            %zu\n", nb_tbs);
2290     cpu_fprintf(f, "TB avg target size  %zu max=%zu bytes\n",
2291                 nb_tbs ? tst.target_size / nb_tbs : 0,
2292                 tst.max_target_size);
2293     cpu_fprintf(f, "TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2294                 nb_tbs ? tst.host_size / nb_tbs : 0,
2295                 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2296     cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page,
2297             nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2298     cpu_fprintf(f, "direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2299                 tst.direct_jmp_count,
2300                 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2301                 tst.direct_jmp2_count,
2302                 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2303 
2304     qht_statistics_init(&tb_ctx.htable, &hst);
2305     print_qht_statistics(f, cpu_fprintf, hst);
2306     qht_statistics_destroy(&hst);
2307 
2308     cpu_fprintf(f, "\nStatistics:\n");
2309     cpu_fprintf(f, "TB flush count      %u\n",
2310                 atomic_read(&tb_ctx.tb_flush_count));
2311     cpu_fprintf(f, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count());
2312     cpu_fprintf(f, "TLB flush count     %zu\n", tlb_flush_count());
2313     tcg_dump_info(f, cpu_fprintf);
2314 }
2315 
2316 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
2317 {
2318     tcg_dump_op_count(f, cpu_fprintf);
2319 }
2320 
2321 #else /* CONFIG_USER_ONLY */
2322 
2323 void cpu_interrupt(CPUState *cpu, int mask)
2324 {
2325     g_assert(qemu_mutex_iothread_locked());
2326     cpu->interrupt_request |= mask;
2327     cpu->icount_decr.u16.high = -1;
2328 }
2329 
2330 /*
2331  * Walks guest process memory "regions" one by one
2332  * and calls callback function 'fn' for each region.
2333  */
2334 struct walk_memory_regions_data {
2335     walk_memory_regions_fn fn;
2336     void *priv;
2337     target_ulong start;
2338     int prot;
2339 };
2340 
2341 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2342                                    target_ulong end, int new_prot)
2343 {
2344     if (data->start != -1u) {
2345         int rc = data->fn(data->priv, data->start, end, data->prot);
2346         if (rc != 0) {
2347             return rc;
2348         }
2349     }
2350 
2351     data->start = (new_prot ? end : -1u);
2352     data->prot = new_prot;
2353 
2354     return 0;
2355 }
2356 
2357 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2358                                  target_ulong base, int level, void **lp)
2359 {
2360     target_ulong pa;
2361     int i, rc;
2362 
2363     if (*lp == NULL) {
2364         return walk_memory_regions_end(data, base, 0);
2365     }
2366 
2367     if (level == 0) {
2368         PageDesc *pd = *lp;
2369 
2370         for (i = 0; i < V_L2_SIZE; ++i) {
2371             int prot = pd[i].flags;
2372 
2373             pa = base | (i << TARGET_PAGE_BITS);
2374             if (prot != data->prot) {
2375                 rc = walk_memory_regions_end(data, pa, prot);
2376                 if (rc != 0) {
2377                     return rc;
2378                 }
2379             }
2380         }
2381     } else {
2382         void **pp = *lp;
2383 
2384         for (i = 0; i < V_L2_SIZE; ++i) {
2385             pa = base | ((target_ulong)i <<
2386                 (TARGET_PAGE_BITS + V_L2_BITS * level));
2387             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2388             if (rc != 0) {
2389                 return rc;
2390             }
2391         }
2392     }
2393 
2394     return 0;
2395 }
2396 
2397 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2398 {
2399     struct walk_memory_regions_data data;
2400     uintptr_t i, l1_sz = v_l1_size;
2401 
2402     data.fn = fn;
2403     data.priv = priv;
2404     data.start = -1u;
2405     data.prot = 0;
2406 
2407     for (i = 0; i < l1_sz; i++) {
2408         target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2409         int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2410         if (rc != 0) {
2411             return rc;
2412         }
2413     }
2414 
2415     return walk_memory_regions_end(&data, 0, 0);
2416 }
2417 
2418 static int dump_region(void *priv, target_ulong start,
2419     target_ulong end, unsigned long prot)
2420 {
2421     FILE *f = (FILE *)priv;
2422 
2423     (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2424         " "TARGET_FMT_lx" %c%c%c\n",
2425         start, end, end - start,
2426         ((prot & PAGE_READ) ? 'r' : '-'),
2427         ((prot & PAGE_WRITE) ? 'w' : '-'),
2428         ((prot & PAGE_EXEC) ? 'x' : '-'));
2429 
2430     return 0;
2431 }
2432 
2433 /* dump memory mappings */
2434 void page_dump(FILE *f)
2435 {
2436     const int length = sizeof(target_ulong) * 2;
2437     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2438             length, "start", length, "end", length, "size", "prot");
2439     walk_memory_regions(f, dump_region);
2440 }
2441 
2442 int page_get_flags(target_ulong address)
2443 {
2444     PageDesc *p;
2445 
2446     p = page_find(address >> TARGET_PAGE_BITS);
2447     if (!p) {
2448         return 0;
2449     }
2450     return p->flags;
2451 }
2452 
2453 /* Modify the flags of a page and invalidate the code if necessary.
2454    The flag PAGE_WRITE_ORG is positioned automatically depending
2455    on PAGE_WRITE.  The mmap_lock should already be held.  */
2456 void page_set_flags(target_ulong start, target_ulong end, int flags)
2457 {
2458     target_ulong addr, len;
2459 
2460     /* This function should never be called with addresses outside the
2461        guest address space.  If this assert fires, it probably indicates
2462        a missing call to h2g_valid.  */
2463 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2464     assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2465 #endif
2466     assert(start < end);
2467     assert_memory_lock();
2468 
2469     start = start & TARGET_PAGE_MASK;
2470     end = TARGET_PAGE_ALIGN(end);
2471 
2472     if (flags & PAGE_WRITE) {
2473         flags |= PAGE_WRITE_ORG;
2474     }
2475 
2476     for (addr = start, len = end - start;
2477          len != 0;
2478          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2479         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2480 
2481         /* If the write protection bit is set, then we invalidate
2482            the code inside.  */
2483         if (!(p->flags & PAGE_WRITE) &&
2484             (flags & PAGE_WRITE) &&
2485             p->first_tb) {
2486             tb_invalidate_phys_page(addr, 0);
2487         }
2488         p->flags = flags;
2489     }
2490 }
2491 
2492 int page_check_range(target_ulong start, target_ulong len, int flags)
2493 {
2494     PageDesc *p;
2495     target_ulong end;
2496     target_ulong addr;
2497 
2498     /* This function should never be called with addresses outside the
2499        guest address space.  If this assert fires, it probably indicates
2500        a missing call to h2g_valid.  */
2501 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2502     assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2503 #endif
2504 
2505     if (len == 0) {
2506         return 0;
2507     }
2508     if (start + len - 1 < start) {
2509         /* We've wrapped around.  */
2510         return -1;
2511     }
2512 
2513     /* must do before we loose bits in the next step */
2514     end = TARGET_PAGE_ALIGN(start + len);
2515     start = start & TARGET_PAGE_MASK;
2516 
2517     for (addr = start, len = end - start;
2518          len != 0;
2519          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2520         p = page_find(addr >> TARGET_PAGE_BITS);
2521         if (!p) {
2522             return -1;
2523         }
2524         if (!(p->flags & PAGE_VALID)) {
2525             return -1;
2526         }
2527 
2528         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2529             return -1;
2530         }
2531         if (flags & PAGE_WRITE) {
2532             if (!(p->flags & PAGE_WRITE_ORG)) {
2533                 return -1;
2534             }
2535             /* unprotect the page if it was put read-only because it
2536                contains translated code */
2537             if (!(p->flags & PAGE_WRITE)) {
2538                 if (!page_unprotect(addr, 0)) {
2539                     return -1;
2540                 }
2541             }
2542         }
2543     }
2544     return 0;
2545 }
2546 
2547 /* called from signal handler: invalidate the code and unprotect the
2548  * page. Return 0 if the fault was not handled, 1 if it was handled,
2549  * and 2 if it was handled but the caller must cause the TB to be
2550  * immediately exited. (We can only return 2 if the 'pc' argument is
2551  * non-zero.)
2552  */
2553 int page_unprotect(target_ulong address, uintptr_t pc)
2554 {
2555     unsigned int prot;
2556     bool current_tb_invalidated;
2557     PageDesc *p;
2558     target_ulong host_start, host_end, addr;
2559 
2560     /* Technically this isn't safe inside a signal handler.  However we
2561        know this only ever happens in a synchronous SEGV handler, so in
2562        practice it seems to be ok.  */
2563     mmap_lock();
2564 
2565     p = page_find(address >> TARGET_PAGE_BITS);
2566     if (!p) {
2567         mmap_unlock();
2568         return 0;
2569     }
2570 
2571     /* if the page was really writable, then we change its
2572        protection back to writable */
2573     if (p->flags & PAGE_WRITE_ORG) {
2574         current_tb_invalidated = false;
2575         if (p->flags & PAGE_WRITE) {
2576             /* If the page is actually marked WRITE then assume this is because
2577              * this thread raced with another one which got here first and
2578              * set the page to PAGE_WRITE and did the TB invalidate for us.
2579              */
2580 #ifdef TARGET_HAS_PRECISE_SMC
2581             TranslationBlock *current_tb = tcg_tb_lookup(pc);
2582             if (current_tb) {
2583                 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2584             }
2585 #endif
2586         } else {
2587             host_start = address & qemu_host_page_mask;
2588             host_end = host_start + qemu_host_page_size;
2589 
2590             prot = 0;
2591             for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2592                 p = page_find(addr >> TARGET_PAGE_BITS);
2593                 p->flags |= PAGE_WRITE;
2594                 prot |= p->flags;
2595 
2596                 /* and since the content will be modified, we must invalidate
2597                    the corresponding translated code. */
2598                 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2599 #ifdef CONFIG_USER_ONLY
2600                 if (DEBUG_TB_CHECK_GATE) {
2601                     tb_invalidate_check(addr);
2602                 }
2603 #endif
2604             }
2605             mprotect((void *)g2h(host_start), qemu_host_page_size,
2606                      prot & PAGE_BITS);
2607         }
2608         mmap_unlock();
2609         /* If current TB was invalidated return to main loop */
2610         return current_tb_invalidated ? 2 : 1;
2611     }
2612     mmap_unlock();
2613     return 0;
2614 }
2615 #endif /* CONFIG_USER_ONLY */
2616 
2617 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2618 void tcg_flush_softmmu_tlb(CPUState *cs)
2619 {
2620 #ifdef CONFIG_SOFTMMU
2621     tlb_flush(cs);
2622 #endif
2623 }
2624