xref: /openbmc/qemu/accel/tcg/translate-all.c (revision 4b9fa0b4)
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 
23 #define NO_CPU_IO_DEFS
24 #include "cpu.h"
25 #include "trace.h"
26 #include "disas/disas.h"
27 #include "exec/exec-all.h"
28 #include "tcg.h"
29 #if defined(CONFIG_USER_ONLY)
30 #include "qemu.h"
31 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
32 #include <sys/param.h>
33 #if __FreeBSD_version >= 700104
34 #define HAVE_KINFO_GETVMMAP
35 #define sigqueue sigqueue_freebsd  /* avoid redefinition */
36 #include <sys/proc.h>
37 #include <machine/profile.h>
38 #define _KERNEL
39 #include <sys/user.h>
40 #undef _KERNEL
41 #undef sigqueue
42 #include <libutil.h>
43 #endif
44 #endif
45 #else
46 #include "exec/ram_addr.h"
47 #endif
48 
49 #include "exec/cputlb.h"
50 #include "exec/tb-hash.h"
51 #include "translate-all.h"
52 #include "qemu/bitmap.h"
53 #include "qemu/error-report.h"
54 #include "qemu/qemu-print.h"
55 #include "qemu/timer.h"
56 #include "qemu/main-loop.h"
57 #include "exec/log.h"
58 #include "sysemu/cpus.h"
59 #include "sysemu/tcg.h"
60 
61 /* #define DEBUG_TB_INVALIDATE */
62 /* #define DEBUG_TB_FLUSH */
63 /* make various TB consistency checks */
64 /* #define DEBUG_TB_CHECK */
65 
66 #ifdef DEBUG_TB_INVALIDATE
67 #define DEBUG_TB_INVALIDATE_GATE 1
68 #else
69 #define DEBUG_TB_INVALIDATE_GATE 0
70 #endif
71 
72 #ifdef DEBUG_TB_FLUSH
73 #define DEBUG_TB_FLUSH_GATE 1
74 #else
75 #define DEBUG_TB_FLUSH_GATE 0
76 #endif
77 
78 #if !defined(CONFIG_USER_ONLY)
79 /* TB consistency checks only implemented for usermode emulation.  */
80 #undef DEBUG_TB_CHECK
81 #endif
82 
83 #ifdef DEBUG_TB_CHECK
84 #define DEBUG_TB_CHECK_GATE 1
85 #else
86 #define DEBUG_TB_CHECK_GATE 0
87 #endif
88 
89 /* Access to the various translations structures need to be serialised via locks
90  * for consistency.
91  * In user-mode emulation access to the memory related structures are protected
92  * with mmap_lock.
93  * In !user-mode we use per-page locks.
94  */
95 #ifdef CONFIG_SOFTMMU
96 #define assert_memory_lock()
97 #else
98 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
99 #endif
100 
101 #define SMC_BITMAP_USE_THRESHOLD 10
102 
103 typedef struct PageDesc {
104     /* list of TBs intersecting this ram page */
105     uintptr_t first_tb;
106 #ifdef CONFIG_SOFTMMU
107     /* in order to optimize self modifying code, we count the number
108        of lookups we do to a given page to use a bitmap */
109     unsigned long *code_bitmap;
110     unsigned int code_write_count;
111 #else
112     unsigned long flags;
113 #endif
114 #ifndef CONFIG_USER_ONLY
115     QemuSpin lock;
116 #endif
117 } PageDesc;
118 
119 /**
120  * struct page_entry - page descriptor entry
121  * @pd:     pointer to the &struct PageDesc of the page this entry represents
122  * @index:  page index of the page
123  * @locked: whether the page is locked
124  *
125  * This struct helps us keep track of the locked state of a page, without
126  * bloating &struct PageDesc.
127  *
128  * A page lock protects accesses to all fields of &struct PageDesc.
129  *
130  * See also: &struct page_collection.
131  */
132 struct page_entry {
133     PageDesc *pd;
134     tb_page_addr_t index;
135     bool locked;
136 };
137 
138 /**
139  * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
140  * @tree:   Binary search tree (BST) of the pages, with key == page index
141  * @max:    Pointer to the page in @tree with the highest page index
142  *
143  * To avoid deadlock we lock pages in ascending order of page index.
144  * When operating on a set of pages, we need to keep track of them so that
145  * we can lock them in order and also unlock them later. For this we collect
146  * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
147  * @tree implementation we use does not provide an O(1) operation to obtain the
148  * highest-ranked element, we use @max to keep track of the inserted page
149  * with the highest index. This is valuable because if a page is not in
150  * the tree and its index is higher than @max's, then we can lock it
151  * without breaking the locking order rule.
152  *
153  * Note on naming: 'struct page_set' would be shorter, but we already have a few
154  * page_set_*() helpers, so page_collection is used instead to avoid confusion.
155  *
156  * See also: page_collection_lock().
157  */
158 struct page_collection {
159     GTree *tree;
160     struct page_entry *max;
161 };
162 
163 /* list iterators for lists of tagged pointers in TranslationBlock */
164 #define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
165     for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
166          tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
167              tb = (TranslationBlock *)((uintptr_t)tb & ~1))
168 
169 #define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
170     TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
171 
172 #define TB_FOR_EACH_JMP(head_tb, tb, n)                                 \
173     TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
174 
175 /* In system mode we want L1_MAP to be based on ram offsets,
176    while in user mode we want it to be based on virtual addresses.  */
177 #if !defined(CONFIG_USER_ONLY)
178 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
179 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
180 #else
181 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
182 #endif
183 #else
184 # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
185 #endif
186 
187 /* Size of the L2 (and L3, etc) page tables.  */
188 #define V_L2_BITS 10
189 #define V_L2_SIZE (1 << V_L2_BITS)
190 
191 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
192 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
193                   sizeof_field(TranslationBlock, trace_vcpu_dstate)
194                   * BITS_PER_BYTE);
195 
196 /*
197  * L1 Mapping properties
198  */
199 static int v_l1_size;
200 static int v_l1_shift;
201 static int v_l2_levels;
202 
203 /* The bottom level has pointers to PageDesc, and is indexed by
204  * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
205  */
206 #define V_L1_MIN_BITS 4
207 #define V_L1_MAX_BITS (V_L2_BITS + 3)
208 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
209 
210 static void *l1_map[V_L1_MAX_SIZE];
211 
212 /* code generation context */
213 TCGContext tcg_init_ctx;
214 __thread TCGContext *tcg_ctx;
215 TBContext tb_ctx;
216 bool parallel_cpus;
217 
218 static void page_table_config_init(void)
219 {
220     uint32_t v_l1_bits;
221 
222     assert(TARGET_PAGE_BITS);
223     /* The bits remaining after N lower levels of page tables.  */
224     v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
225     if (v_l1_bits < V_L1_MIN_BITS) {
226         v_l1_bits += V_L2_BITS;
227     }
228 
229     v_l1_size = 1 << v_l1_bits;
230     v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
231     v_l2_levels = v_l1_shift / V_L2_BITS - 1;
232 
233     assert(v_l1_bits <= V_L1_MAX_BITS);
234     assert(v_l1_shift % V_L2_BITS == 0);
235     assert(v_l2_levels >= 0);
236 }
237 
238 void cpu_gen_init(void)
239 {
240     tcg_context_init(&tcg_init_ctx);
241 }
242 
243 /* Encode VAL as a signed leb128 sequence at P.
244    Return P incremented past the encoded value.  */
245 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
246 {
247     int more, byte;
248 
249     do {
250         byte = val & 0x7f;
251         val >>= 7;
252         more = !((val == 0 && (byte & 0x40) == 0)
253                  || (val == -1 && (byte & 0x40) != 0));
254         if (more) {
255             byte |= 0x80;
256         }
257         *p++ = byte;
258     } while (more);
259 
260     return p;
261 }
262 
263 /* Decode a signed leb128 sequence at *PP; increment *PP past the
264    decoded value.  Return the decoded value.  */
265 static target_long decode_sleb128(uint8_t **pp)
266 {
267     uint8_t *p = *pp;
268     target_long val = 0;
269     int byte, shift = 0;
270 
271     do {
272         byte = *p++;
273         val |= (target_ulong)(byte & 0x7f) << shift;
274         shift += 7;
275     } while (byte & 0x80);
276     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
277         val |= -(target_ulong)1 << shift;
278     }
279 
280     *pp = p;
281     return val;
282 }
283 
284 /* Encode the data collected about the instructions while compiling TB.
285    Place the data at BLOCK, and return the number of bytes consumed.
286 
287    The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
288    which come from the target's insn_start data, followed by a uintptr_t
289    which comes from the host pc of the end of the code implementing the insn.
290 
291    Each line of the table is encoded as sleb128 deltas from the previous
292    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
293    That is, the first column is seeded with the guest pc, the last column
294    with the host pc, and the middle columns with zeros.  */
295 
296 static int encode_search(TranslationBlock *tb, uint8_t *block)
297 {
298     uint8_t *highwater = tcg_ctx->code_gen_highwater;
299     uint8_t *p = block;
300     int i, j, n;
301 
302     for (i = 0, n = tb->icount; i < n; ++i) {
303         target_ulong prev;
304 
305         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
306             if (i == 0) {
307                 prev = (j == 0 ? tb->pc : 0);
308             } else {
309                 prev = tcg_ctx->gen_insn_data[i - 1][j];
310             }
311             p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
312         }
313         prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
314         p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
315 
316         /* Test for (pending) buffer overflow.  The assumption is that any
317            one row beginning below the high water mark cannot overrun
318            the buffer completely.  Thus we can test for overflow after
319            encoding a row without having to check during encoding.  */
320         if (unlikely(p > highwater)) {
321             return -1;
322         }
323     }
324 
325     return p - block;
326 }
327 
328 /* The cpu state corresponding to 'searched_pc' is restored.
329  * When reset_icount is true, current TB will be interrupted and
330  * icount should be recalculated.
331  */
332 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
333                                      uintptr_t searched_pc, bool reset_icount)
334 {
335     target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
336     uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
337     CPUArchState *env = cpu->env_ptr;
338     uint8_t *p = tb->tc.ptr + tb->tc.size;
339     int i, j, num_insns = tb->icount;
340 #ifdef CONFIG_PROFILER
341     TCGProfile *prof = &tcg_ctx->prof;
342     int64_t ti = profile_getclock();
343 #endif
344 
345     searched_pc -= GETPC_ADJ;
346 
347     if (searched_pc < host_pc) {
348         return -1;
349     }
350 
351     /* Reconstruct the stored insn data while looking for the point at
352        which the end of the insn exceeds the searched_pc.  */
353     for (i = 0; i < num_insns; ++i) {
354         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
355             data[j] += decode_sleb128(&p);
356         }
357         host_pc += decode_sleb128(&p);
358         if (host_pc > searched_pc) {
359             goto found;
360         }
361     }
362     return -1;
363 
364  found:
365     if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
366         assert(use_icount);
367         /* Reset the cycle counter to the start of the block
368            and shift if to the number of actually executed instructions */
369         cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
370     }
371     restore_state_to_opc(env, tb, data);
372 
373 #ifdef CONFIG_PROFILER
374     atomic_set(&prof->restore_time,
375                 prof->restore_time + profile_getclock() - ti);
376     atomic_set(&prof->restore_count, prof->restore_count + 1);
377 #endif
378     return 0;
379 }
380 
381 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
382 {
383     TranslationBlock *tb;
384     bool r = false;
385     uintptr_t check_offset;
386 
387     /* The host_pc has to be in the region of current code buffer. If
388      * it is not we will not be able to resolve it here. The two cases
389      * where host_pc will not be correct are:
390      *
391      *  - fault during translation (instruction fetch)
392      *  - fault from helper (not using GETPC() macro)
393      *
394      * Either way we need return early as we can't resolve it here.
395      *
396      * We are using unsigned arithmetic so if host_pc <
397      * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
398      * above the code_gen_buffer_size
399      */
400     check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
401 
402     if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
403         tb = tcg_tb_lookup(host_pc);
404         if (tb) {
405             cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
406             if (tb_cflags(tb) & CF_NOCACHE) {
407                 /* one-shot translation, invalidate it immediately */
408                 tb_phys_invalidate(tb, -1);
409                 tcg_tb_remove(tb);
410             }
411             r = true;
412         }
413     }
414 
415     return r;
416 }
417 
418 static void page_init(void)
419 {
420     page_size_init();
421     page_table_config_init();
422 
423 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
424     {
425 #ifdef HAVE_KINFO_GETVMMAP
426         struct kinfo_vmentry *freep;
427         int i, cnt;
428 
429         freep = kinfo_getvmmap(getpid(), &cnt);
430         if (freep) {
431             mmap_lock();
432             for (i = 0; i < cnt; i++) {
433                 unsigned long startaddr, endaddr;
434 
435                 startaddr = freep[i].kve_start;
436                 endaddr = freep[i].kve_end;
437                 if (h2g_valid(startaddr)) {
438                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
439 
440                     if (h2g_valid(endaddr)) {
441                         endaddr = h2g(endaddr);
442                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
443                     } else {
444 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
445                         endaddr = ~0ul;
446                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
447 #endif
448                     }
449                 }
450             }
451             free(freep);
452             mmap_unlock();
453         }
454 #else
455         FILE *f;
456 
457         last_brk = (unsigned long)sbrk(0);
458 
459         f = fopen("/compat/linux/proc/self/maps", "r");
460         if (f) {
461             mmap_lock();
462 
463             do {
464                 unsigned long startaddr, endaddr;
465                 int n;
466 
467                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
468 
469                 if (n == 2 && h2g_valid(startaddr)) {
470                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
471 
472                     if (h2g_valid(endaddr)) {
473                         endaddr = h2g(endaddr);
474                     } else {
475                         endaddr = ~0ul;
476                     }
477                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
478                 }
479             } while (!feof(f));
480 
481             fclose(f);
482             mmap_unlock();
483         }
484 #endif
485     }
486 #endif
487 }
488 
489 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
490 {
491     PageDesc *pd;
492     void **lp;
493     int i;
494 
495     /* Level 1.  Always allocated.  */
496     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
497 
498     /* Level 2..N-1.  */
499     for (i = v_l2_levels; i > 0; i--) {
500         void **p = atomic_rcu_read(lp);
501 
502         if (p == NULL) {
503             void *existing;
504 
505             if (!alloc) {
506                 return NULL;
507             }
508             p = g_new0(void *, V_L2_SIZE);
509             existing = atomic_cmpxchg(lp, NULL, p);
510             if (unlikely(existing)) {
511                 g_free(p);
512                 p = existing;
513             }
514         }
515 
516         lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
517     }
518 
519     pd = atomic_rcu_read(lp);
520     if (pd == NULL) {
521         void *existing;
522 
523         if (!alloc) {
524             return NULL;
525         }
526         pd = g_new0(PageDesc, V_L2_SIZE);
527 #ifndef CONFIG_USER_ONLY
528         {
529             int i;
530 
531             for (i = 0; i < V_L2_SIZE; i++) {
532                 qemu_spin_init(&pd[i].lock);
533             }
534         }
535 #endif
536         existing = atomic_cmpxchg(lp, NULL, pd);
537         if (unlikely(existing)) {
538             g_free(pd);
539             pd = existing;
540         }
541     }
542 
543     return pd + (index & (V_L2_SIZE - 1));
544 }
545 
546 static inline PageDesc *page_find(tb_page_addr_t index)
547 {
548     return page_find_alloc(index, 0);
549 }
550 
551 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
552                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
553 
554 /* In user-mode page locks aren't used; mmap_lock is enough */
555 #ifdef CONFIG_USER_ONLY
556 
557 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
558 
559 static inline void page_lock(PageDesc *pd)
560 { }
561 
562 static inline void page_unlock(PageDesc *pd)
563 { }
564 
565 static inline void page_lock_tb(const TranslationBlock *tb)
566 { }
567 
568 static inline void page_unlock_tb(const TranslationBlock *tb)
569 { }
570 
571 struct page_collection *
572 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
573 {
574     return NULL;
575 }
576 
577 void page_collection_unlock(struct page_collection *set)
578 { }
579 #else /* !CONFIG_USER_ONLY */
580 
581 #ifdef CONFIG_DEBUG_TCG
582 
583 static __thread GHashTable *ht_pages_locked_debug;
584 
585 static void ht_pages_locked_debug_init(void)
586 {
587     if (ht_pages_locked_debug) {
588         return;
589     }
590     ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
591 }
592 
593 static bool page_is_locked(const PageDesc *pd)
594 {
595     PageDesc *found;
596 
597     ht_pages_locked_debug_init();
598     found = g_hash_table_lookup(ht_pages_locked_debug, pd);
599     return !!found;
600 }
601 
602 static void page_lock__debug(PageDesc *pd)
603 {
604     ht_pages_locked_debug_init();
605     g_assert(!page_is_locked(pd));
606     g_hash_table_insert(ht_pages_locked_debug, pd, pd);
607 }
608 
609 static void page_unlock__debug(const PageDesc *pd)
610 {
611     bool removed;
612 
613     ht_pages_locked_debug_init();
614     g_assert(page_is_locked(pd));
615     removed = g_hash_table_remove(ht_pages_locked_debug, pd);
616     g_assert(removed);
617 }
618 
619 static void
620 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
621 {
622     if (unlikely(!page_is_locked(pd))) {
623         error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
624                      pd, file, line);
625         abort();
626     }
627 }
628 
629 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
630 
631 void assert_no_pages_locked(void)
632 {
633     ht_pages_locked_debug_init();
634     g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
635 }
636 
637 #else /* !CONFIG_DEBUG_TCG */
638 
639 #define assert_page_locked(pd)
640 
641 static inline void page_lock__debug(const PageDesc *pd)
642 {
643 }
644 
645 static inline void page_unlock__debug(const PageDesc *pd)
646 {
647 }
648 
649 #endif /* CONFIG_DEBUG_TCG */
650 
651 static inline void page_lock(PageDesc *pd)
652 {
653     page_lock__debug(pd);
654     qemu_spin_lock(&pd->lock);
655 }
656 
657 static inline void page_unlock(PageDesc *pd)
658 {
659     qemu_spin_unlock(&pd->lock);
660     page_unlock__debug(pd);
661 }
662 
663 /* lock the page(s) of a TB in the correct acquisition order */
664 static inline void page_lock_tb(const TranslationBlock *tb)
665 {
666     page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
667 }
668 
669 static inline void page_unlock_tb(const TranslationBlock *tb)
670 {
671     PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
672 
673     page_unlock(p1);
674     if (unlikely(tb->page_addr[1] != -1)) {
675         PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
676 
677         if (p2 != p1) {
678             page_unlock(p2);
679         }
680     }
681 }
682 
683 static inline struct page_entry *
684 page_entry_new(PageDesc *pd, tb_page_addr_t index)
685 {
686     struct page_entry *pe = g_malloc(sizeof(*pe));
687 
688     pe->index = index;
689     pe->pd = pd;
690     pe->locked = false;
691     return pe;
692 }
693 
694 static void page_entry_destroy(gpointer p)
695 {
696     struct page_entry *pe = p;
697 
698     g_assert(pe->locked);
699     page_unlock(pe->pd);
700     g_free(pe);
701 }
702 
703 /* returns false on success */
704 static bool page_entry_trylock(struct page_entry *pe)
705 {
706     bool busy;
707 
708     busy = qemu_spin_trylock(&pe->pd->lock);
709     if (!busy) {
710         g_assert(!pe->locked);
711         pe->locked = true;
712         page_lock__debug(pe->pd);
713     }
714     return busy;
715 }
716 
717 static void do_page_entry_lock(struct page_entry *pe)
718 {
719     page_lock(pe->pd);
720     g_assert(!pe->locked);
721     pe->locked = true;
722 }
723 
724 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
725 {
726     struct page_entry *pe = value;
727 
728     do_page_entry_lock(pe);
729     return FALSE;
730 }
731 
732 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
733 {
734     struct page_entry *pe = value;
735 
736     if (pe->locked) {
737         pe->locked = false;
738         page_unlock(pe->pd);
739     }
740     return FALSE;
741 }
742 
743 /*
744  * Trylock a page, and if successful, add the page to a collection.
745  * Returns true ("busy") if the page could not be locked; false otherwise.
746  */
747 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
748 {
749     tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
750     struct page_entry *pe;
751     PageDesc *pd;
752 
753     pe = g_tree_lookup(set->tree, &index);
754     if (pe) {
755         return false;
756     }
757 
758     pd = page_find(index);
759     if (pd == NULL) {
760         return false;
761     }
762 
763     pe = page_entry_new(pd, index);
764     g_tree_insert(set->tree, &pe->index, pe);
765 
766     /*
767      * If this is either (1) the first insertion or (2) a page whose index
768      * is higher than any other so far, just lock the page and move on.
769      */
770     if (set->max == NULL || pe->index > set->max->index) {
771         set->max = pe;
772         do_page_entry_lock(pe);
773         return false;
774     }
775     /*
776      * Try to acquire out-of-order lock; if busy, return busy so that we acquire
777      * locks in order.
778      */
779     return page_entry_trylock(pe);
780 }
781 
782 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
783 {
784     tb_page_addr_t a = *(const tb_page_addr_t *)ap;
785     tb_page_addr_t b = *(const tb_page_addr_t *)bp;
786 
787     if (a == b) {
788         return 0;
789     } else if (a < b) {
790         return -1;
791     }
792     return 1;
793 }
794 
795 /*
796  * Lock a range of pages ([@start,@end[) as well as the pages of all
797  * intersecting TBs.
798  * Locking order: acquire locks in ascending order of page index.
799  */
800 struct page_collection *
801 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
802 {
803     struct page_collection *set = g_malloc(sizeof(*set));
804     tb_page_addr_t index;
805     PageDesc *pd;
806 
807     start >>= TARGET_PAGE_BITS;
808     end   >>= TARGET_PAGE_BITS;
809     g_assert(start <= end);
810 
811     set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
812                                 page_entry_destroy);
813     set->max = NULL;
814     assert_no_pages_locked();
815 
816  retry:
817     g_tree_foreach(set->tree, page_entry_lock, NULL);
818 
819     for (index = start; index <= end; index++) {
820         TranslationBlock *tb;
821         int n;
822 
823         pd = page_find(index);
824         if (pd == NULL) {
825             continue;
826         }
827         if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
828             g_tree_foreach(set->tree, page_entry_unlock, NULL);
829             goto retry;
830         }
831         assert_page_locked(pd);
832         PAGE_FOR_EACH_TB(pd, tb, n) {
833             if (page_trylock_add(set, tb->page_addr[0]) ||
834                 (tb->page_addr[1] != -1 &&
835                  page_trylock_add(set, tb->page_addr[1]))) {
836                 /* drop all locks, and reacquire in order */
837                 g_tree_foreach(set->tree, page_entry_unlock, NULL);
838                 goto retry;
839             }
840         }
841     }
842     return set;
843 }
844 
845 void page_collection_unlock(struct page_collection *set)
846 {
847     /* entries are unlocked and freed via page_entry_destroy */
848     g_tree_destroy(set->tree);
849     g_free(set);
850 }
851 
852 #endif /* !CONFIG_USER_ONLY */
853 
854 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
855                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
856 {
857     PageDesc *p1, *p2;
858     tb_page_addr_t page1;
859     tb_page_addr_t page2;
860 
861     assert_memory_lock();
862     g_assert(phys1 != -1);
863 
864     page1 = phys1 >> TARGET_PAGE_BITS;
865     page2 = phys2 >> TARGET_PAGE_BITS;
866 
867     p1 = page_find_alloc(page1, alloc);
868     if (ret_p1) {
869         *ret_p1 = p1;
870     }
871     if (likely(phys2 == -1)) {
872         page_lock(p1);
873         return;
874     } else if (page1 == page2) {
875         page_lock(p1);
876         if (ret_p2) {
877             *ret_p2 = p1;
878         }
879         return;
880     }
881     p2 = page_find_alloc(page2, alloc);
882     if (ret_p2) {
883         *ret_p2 = p2;
884     }
885     if (page1 < page2) {
886         page_lock(p1);
887         page_lock(p2);
888     } else {
889         page_lock(p2);
890         page_lock(p1);
891     }
892 }
893 
894 #if defined(CONFIG_USER_ONLY)
895 /* Currently it is not recommended to allocate big chunks of data in
896    user mode. It will change when a dedicated libc will be used.  */
897 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
898    region in which the guest needs to run.  Revisit this.  */
899 #define USE_STATIC_CODE_GEN_BUFFER
900 #endif
901 
902 /* Minimum size of the code gen buffer.  This number is randomly chosen,
903    but not so small that we can't have a fair number of TB's live.  */
904 #define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
905 
906 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
907    indicated, this is constrained by the range of direct branches on the
908    host cpu, as used by the TCG implementation of goto_tb.  */
909 #if defined(__x86_64__)
910 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
911 #elif defined(__sparc__)
912 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
913 #elif defined(__powerpc64__)
914 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
915 #elif defined(__powerpc__)
916 # define MAX_CODE_GEN_BUFFER_SIZE  (32u * 1024 * 1024)
917 #elif defined(__aarch64__)
918 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
919 #elif defined(__s390x__)
920   /* We have a +- 4GB range on the branches; leave some slop.  */
921 # define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
922 #elif defined(__mips__)
923   /* We have a 256MB branch region, but leave room to make sure the
924      main executable is also within that region.  */
925 # define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
926 #else
927 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
928 #endif
929 
930 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
931 
932 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
933   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
934    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
935 
936 static inline size_t size_code_gen_buffer(size_t tb_size)
937 {
938     /* Size the buffer.  */
939     if (tb_size == 0) {
940 #ifdef USE_STATIC_CODE_GEN_BUFFER
941         tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
942 #else
943         /* ??? Needs adjustments.  */
944         /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
945            static buffer, we could size this on RESERVED_VA, on the text
946            segment size of the executable, or continue to use the default.  */
947         tb_size = (unsigned long)(ram_size / 4);
948 #endif
949     }
950     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
951         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
952     }
953     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
954         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
955     }
956     return tb_size;
957 }
958 
959 #ifdef __mips__
960 /* In order to use J and JAL within the code_gen_buffer, we require
961    that the buffer not cross a 256MB boundary.  */
962 static inline bool cross_256mb(void *addr, size_t size)
963 {
964     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
965 }
966 
967 /* We weren't able to allocate a buffer without crossing that boundary,
968    so make do with the larger portion of the buffer that doesn't cross.
969    Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
970 static inline void *split_cross_256mb(void *buf1, size_t size1)
971 {
972     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
973     size_t size2 = buf1 + size1 - buf2;
974 
975     size1 = buf2 - buf1;
976     if (size1 < size2) {
977         size1 = size2;
978         buf1 = buf2;
979     }
980 
981     tcg_ctx->code_gen_buffer_size = size1;
982     return buf1;
983 }
984 #endif
985 
986 #ifdef USE_STATIC_CODE_GEN_BUFFER
987 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
988     __attribute__((aligned(CODE_GEN_ALIGN)));
989 
990 static inline void *alloc_code_gen_buffer(void)
991 {
992     void *buf = static_code_gen_buffer;
993     void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
994     size_t size;
995 
996     /* page-align the beginning and end of the buffer */
997     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
998     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
999 
1000     size = end - buf;
1001 
1002     /* Honor a command-line option limiting the size of the buffer.  */
1003     if (size > tcg_ctx->code_gen_buffer_size) {
1004         size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1005                                qemu_real_host_page_size);
1006     }
1007     tcg_ctx->code_gen_buffer_size = size;
1008 
1009 #ifdef __mips__
1010     if (cross_256mb(buf, size)) {
1011         buf = split_cross_256mb(buf, size);
1012         size = tcg_ctx->code_gen_buffer_size;
1013     }
1014 #endif
1015 
1016     if (qemu_mprotect_rwx(buf, size)) {
1017         abort();
1018     }
1019     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1020 
1021     return buf;
1022 }
1023 #elif defined(_WIN32)
1024 static inline void *alloc_code_gen_buffer(void)
1025 {
1026     size_t size = tcg_ctx->code_gen_buffer_size;
1027     return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1028                         PAGE_EXECUTE_READWRITE);
1029 }
1030 #else
1031 static inline void *alloc_code_gen_buffer(void)
1032 {
1033     int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1034     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1035     uintptr_t start = 0;
1036     size_t size = tcg_ctx->code_gen_buffer_size;
1037     void *buf;
1038 
1039     /* Constrain the position of the buffer based on the host cpu.
1040        Note that these addresses are chosen in concert with the
1041        addresses assigned in the relevant linker script file.  */
1042 # if defined(__PIE__) || defined(__PIC__)
1043     /* Don't bother setting a preferred location if we're building
1044        a position-independent executable.  We're more likely to get
1045        an address near the main executable if we let the kernel
1046        choose the address.  */
1047 # elif defined(__x86_64__) && defined(MAP_32BIT)
1048     /* Force the memory down into low memory with the executable.
1049        Leave the choice of exact location with the kernel.  */
1050     flags |= MAP_32BIT;
1051     /* Cannot expect to map more than 800MB in low memory.  */
1052     if (size > 800u * 1024 * 1024) {
1053         tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
1054     }
1055 # elif defined(__sparc__)
1056     start = 0x40000000ul;
1057 # elif defined(__s390x__)
1058     start = 0x90000000ul;
1059 # elif defined(__mips__)
1060 #  if _MIPS_SIM == _ABI64
1061     start = 0x128000000ul;
1062 #  else
1063     start = 0x08000000ul;
1064 #  endif
1065 # endif
1066 
1067     buf = mmap((void *)start, size, prot, flags, -1, 0);
1068     if (buf == MAP_FAILED) {
1069         return NULL;
1070     }
1071 
1072 #ifdef __mips__
1073     if (cross_256mb(buf, size)) {
1074         /* Try again, with the original still mapped, to avoid re-acquiring
1075            that 256mb crossing.  This time don't specify an address.  */
1076         size_t size2;
1077         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1078         switch ((int)(buf2 != MAP_FAILED)) {
1079         case 1:
1080             if (!cross_256mb(buf2, size)) {
1081                 /* Success!  Use the new buffer.  */
1082                 munmap(buf, size);
1083                 break;
1084             }
1085             /* Failure.  Work with what we had.  */
1086             munmap(buf2, size);
1087             /* fallthru */
1088         default:
1089             /* Split the original buffer.  Free the smaller half.  */
1090             buf2 = split_cross_256mb(buf, size);
1091             size2 = tcg_ctx->code_gen_buffer_size;
1092             if (buf == buf2) {
1093                 munmap(buf + size2, size - size2);
1094             } else {
1095                 munmap(buf, size - size2);
1096             }
1097             size = size2;
1098             break;
1099         }
1100         buf = buf2;
1101     }
1102 #endif
1103 
1104     /* Request large pages for the buffer.  */
1105     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1106 
1107     return buf;
1108 }
1109 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1110 
1111 static inline void code_gen_alloc(size_t tb_size)
1112 {
1113     tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1114     tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1115     if (tcg_ctx->code_gen_buffer == NULL) {
1116         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1117         exit(1);
1118     }
1119 }
1120 
1121 static bool tb_cmp(const void *ap, const void *bp)
1122 {
1123     const TranslationBlock *a = ap;
1124     const TranslationBlock *b = bp;
1125 
1126     return a->pc == b->pc &&
1127         a->cs_base == b->cs_base &&
1128         a->flags == b->flags &&
1129         (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1130         a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1131         a->page_addr[0] == b->page_addr[0] &&
1132         a->page_addr[1] == b->page_addr[1];
1133 }
1134 
1135 static void tb_htable_init(void)
1136 {
1137     unsigned int mode = QHT_MODE_AUTO_RESIZE;
1138 
1139     qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1140 }
1141 
1142 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1143    (in bytes) allocated to the translation buffer. Zero means default
1144    size. */
1145 void tcg_exec_init(unsigned long tb_size)
1146 {
1147     tcg_allowed = true;
1148     cpu_gen_init();
1149     page_init();
1150     tb_htable_init();
1151     code_gen_alloc(tb_size);
1152 #if defined(CONFIG_SOFTMMU)
1153     /* There's no guest base to take into account, so go ahead and
1154        initialize the prologue now.  */
1155     tcg_prologue_init(tcg_ctx);
1156 #endif
1157 }
1158 
1159 /*
1160  * Allocate a new translation block. Flush the translation buffer if
1161  * too many translation blocks or too much generated code.
1162  */
1163 static TranslationBlock *tb_alloc(target_ulong pc)
1164 {
1165     TranslationBlock *tb;
1166 
1167     assert_memory_lock();
1168 
1169     tb = tcg_tb_alloc(tcg_ctx);
1170     if (unlikely(tb == NULL)) {
1171         return NULL;
1172     }
1173     return tb;
1174 }
1175 
1176 /* call with @p->lock held */
1177 static inline void invalidate_page_bitmap(PageDesc *p)
1178 {
1179     assert_page_locked(p);
1180 #ifdef CONFIG_SOFTMMU
1181     g_free(p->code_bitmap);
1182     p->code_bitmap = NULL;
1183     p->code_write_count = 0;
1184 #endif
1185 }
1186 
1187 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1188 static void page_flush_tb_1(int level, void **lp)
1189 {
1190     int i;
1191 
1192     if (*lp == NULL) {
1193         return;
1194     }
1195     if (level == 0) {
1196         PageDesc *pd = *lp;
1197 
1198         for (i = 0; i < V_L2_SIZE; ++i) {
1199             page_lock(&pd[i]);
1200             pd[i].first_tb = (uintptr_t)NULL;
1201             invalidate_page_bitmap(pd + i);
1202             page_unlock(&pd[i]);
1203         }
1204     } else {
1205         void **pp = *lp;
1206 
1207         for (i = 0; i < V_L2_SIZE; ++i) {
1208             page_flush_tb_1(level - 1, pp + i);
1209         }
1210     }
1211 }
1212 
1213 static void page_flush_tb(void)
1214 {
1215     int i, l1_sz = v_l1_size;
1216 
1217     for (i = 0; i < l1_sz; i++) {
1218         page_flush_tb_1(v_l2_levels, l1_map + i);
1219     }
1220 }
1221 
1222 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1223 {
1224     const TranslationBlock *tb = value;
1225     size_t *size = data;
1226 
1227     *size += tb->tc.size;
1228     return false;
1229 }
1230 
1231 /* flush all the translation blocks */
1232 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1233 {
1234     bool did_flush = false;
1235 
1236     mmap_lock();
1237     /* If it is already been done on request of another CPU,
1238      * just retry.
1239      */
1240     if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1241         goto done;
1242     }
1243     did_flush = true;
1244 
1245     if (DEBUG_TB_FLUSH_GATE) {
1246         size_t nb_tbs = tcg_nb_tbs();
1247         size_t host_size = 0;
1248 
1249         tcg_tb_foreach(tb_host_size_iter, &host_size);
1250         printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1251                tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1252     }
1253 
1254     CPU_FOREACH(cpu) {
1255         cpu_tb_jmp_cache_clear(cpu);
1256     }
1257 
1258     qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1259     page_flush_tb();
1260 
1261     tcg_region_reset_all();
1262     /* XXX: flush processor icache at this point if cache flush is
1263        expensive */
1264     atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1265 
1266 done:
1267     mmap_unlock();
1268     if (did_flush) {
1269         qemu_plugin_flush_cb();
1270     }
1271 }
1272 
1273 void tb_flush(CPUState *cpu)
1274 {
1275     if (tcg_enabled()) {
1276         unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
1277 
1278         if (cpu_in_exclusive_context(cpu)) {
1279             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1280         } else {
1281             async_safe_run_on_cpu(cpu, do_tb_flush,
1282                                   RUN_ON_CPU_HOST_INT(tb_flush_count));
1283         }
1284     }
1285 }
1286 
1287 /*
1288  * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1289  * so in order to prevent bit rot we compile them unconditionally in user-mode,
1290  * and let the optimizer get rid of them by wrapping their user-only callers
1291  * with if (DEBUG_TB_CHECK_GATE).
1292  */
1293 #ifdef CONFIG_USER_ONLY
1294 
1295 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1296 {
1297     TranslationBlock *tb = p;
1298     target_ulong addr = *(target_ulong *)userp;
1299 
1300     if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1301         printf("ERROR invalidate: address=" TARGET_FMT_lx
1302                " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1303     }
1304 }
1305 
1306 /* verify that all the pages have correct rights for code
1307  *
1308  * Called with mmap_lock held.
1309  */
1310 static void tb_invalidate_check(target_ulong address)
1311 {
1312     address &= TARGET_PAGE_MASK;
1313     qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1314 }
1315 
1316 static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1317 {
1318     TranslationBlock *tb = p;
1319     int flags1, flags2;
1320 
1321     flags1 = page_get_flags(tb->pc);
1322     flags2 = page_get_flags(tb->pc + tb->size - 1);
1323     if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1324         printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1325                (long)tb->pc, tb->size, flags1, flags2);
1326     }
1327 }
1328 
1329 /* verify that all the pages have correct rights for code */
1330 static void tb_page_check(void)
1331 {
1332     qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1333 }
1334 
1335 #endif /* CONFIG_USER_ONLY */
1336 
1337 /*
1338  * user-mode: call with mmap_lock held
1339  * !user-mode: call with @pd->lock held
1340  */
1341 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1342 {
1343     TranslationBlock *tb1;
1344     uintptr_t *pprev;
1345     unsigned int n1;
1346 
1347     assert_page_locked(pd);
1348     pprev = &pd->first_tb;
1349     PAGE_FOR_EACH_TB(pd, tb1, n1) {
1350         if (tb1 == tb) {
1351             *pprev = tb1->page_next[n1];
1352             return;
1353         }
1354         pprev = &tb1->page_next[n1];
1355     }
1356     g_assert_not_reached();
1357 }
1358 
1359 /* remove @orig from its @n_orig-th jump list */
1360 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1361 {
1362     uintptr_t ptr, ptr_locked;
1363     TranslationBlock *dest;
1364     TranslationBlock *tb;
1365     uintptr_t *pprev;
1366     int n;
1367 
1368     /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1369     ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1370     dest = (TranslationBlock *)(ptr & ~1);
1371     if (dest == NULL) {
1372         return;
1373     }
1374 
1375     qemu_spin_lock(&dest->jmp_lock);
1376     /*
1377      * While acquiring the lock, the jump might have been removed if the
1378      * destination TB was invalidated; check again.
1379      */
1380     ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
1381     if (ptr_locked != ptr) {
1382         qemu_spin_unlock(&dest->jmp_lock);
1383         /*
1384          * The only possibility is that the jump was unlinked via
1385          * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1386          * because we set the LSB above.
1387          */
1388         g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1389         return;
1390     }
1391     /*
1392      * We first acquired the lock, and since the destination pointer matches,
1393      * we know for sure that @orig is in the jmp list.
1394      */
1395     pprev = &dest->jmp_list_head;
1396     TB_FOR_EACH_JMP(dest, tb, n) {
1397         if (tb == orig && n == n_orig) {
1398             *pprev = tb->jmp_list_next[n];
1399             /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1400             qemu_spin_unlock(&dest->jmp_lock);
1401             return;
1402         }
1403         pprev = &tb->jmp_list_next[n];
1404     }
1405     g_assert_not_reached();
1406 }
1407 
1408 /* reset the jump entry 'n' of a TB so that it is not chained to
1409    another TB */
1410 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1411 {
1412     uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1413     tb_set_jmp_target(tb, n, addr);
1414 }
1415 
1416 /* remove any jumps to the TB */
1417 static inline void tb_jmp_unlink(TranslationBlock *dest)
1418 {
1419     TranslationBlock *tb;
1420     int n;
1421 
1422     qemu_spin_lock(&dest->jmp_lock);
1423 
1424     TB_FOR_EACH_JMP(dest, tb, n) {
1425         tb_reset_jump(tb, n);
1426         atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1427         /* No need to clear the list entry; setting the dest ptr is enough */
1428     }
1429     dest->jmp_list_head = (uintptr_t)NULL;
1430 
1431     qemu_spin_unlock(&dest->jmp_lock);
1432 }
1433 
1434 /*
1435  * In user-mode, call with mmap_lock held.
1436  * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1437  * locks held.
1438  */
1439 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1440 {
1441     CPUState *cpu;
1442     PageDesc *p;
1443     uint32_t h;
1444     tb_page_addr_t phys_pc;
1445 
1446     assert_memory_lock();
1447 
1448     /* make sure no further incoming jumps will be chained to this TB */
1449     qemu_spin_lock(&tb->jmp_lock);
1450     atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1451     qemu_spin_unlock(&tb->jmp_lock);
1452 
1453     /* remove the TB from the hash list */
1454     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1455     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1456                      tb->trace_vcpu_dstate);
1457     if (!(tb->cflags & CF_NOCACHE) &&
1458         !qht_remove(&tb_ctx.htable, tb, h)) {
1459         return;
1460     }
1461 
1462     /* remove the TB from the page list */
1463     if (rm_from_page_list) {
1464         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1465         tb_page_remove(p, tb);
1466         invalidate_page_bitmap(p);
1467         if (tb->page_addr[1] != -1) {
1468             p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1469             tb_page_remove(p, tb);
1470             invalidate_page_bitmap(p);
1471         }
1472     }
1473 
1474     /* remove the TB from the hash list */
1475     h = tb_jmp_cache_hash_func(tb->pc);
1476     CPU_FOREACH(cpu) {
1477         if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1478             atomic_set(&cpu->tb_jmp_cache[h], NULL);
1479         }
1480     }
1481 
1482     /* suppress this TB from the two jump lists */
1483     tb_remove_from_jmp_list(tb, 0);
1484     tb_remove_from_jmp_list(tb, 1);
1485 
1486     /* suppress any remaining jumps to this TB */
1487     tb_jmp_unlink(tb);
1488 
1489     atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1490                tcg_ctx->tb_phys_invalidate_count + 1);
1491 }
1492 
1493 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1494 {
1495     do_tb_phys_invalidate(tb, true);
1496 }
1497 
1498 /* invalidate one TB
1499  *
1500  * Called with mmap_lock held in user-mode.
1501  */
1502 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1503 {
1504     if (page_addr == -1 && tb->page_addr[0] != -1) {
1505         page_lock_tb(tb);
1506         do_tb_phys_invalidate(tb, true);
1507         page_unlock_tb(tb);
1508     } else {
1509         do_tb_phys_invalidate(tb, false);
1510     }
1511 }
1512 
1513 #ifdef CONFIG_SOFTMMU
1514 /* call with @p->lock held */
1515 static void build_page_bitmap(PageDesc *p)
1516 {
1517     int n, tb_start, tb_end;
1518     TranslationBlock *tb;
1519 
1520     assert_page_locked(p);
1521     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1522 
1523     PAGE_FOR_EACH_TB(p, tb, n) {
1524         /* NOTE: this is subtle as a TB may span two physical pages */
1525         if (n == 0) {
1526             /* NOTE: tb_end may be after the end of the page, but
1527                it is not a problem */
1528             tb_start = tb->pc & ~TARGET_PAGE_MASK;
1529             tb_end = tb_start + tb->size;
1530             if (tb_end > TARGET_PAGE_SIZE) {
1531                 tb_end = TARGET_PAGE_SIZE;
1532              }
1533         } else {
1534             tb_start = 0;
1535             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1536         }
1537         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1538     }
1539 }
1540 #endif
1541 
1542 /* add the tb in the target page and protect it if necessary
1543  *
1544  * Called with mmap_lock held for user-mode emulation.
1545  * Called with @p->lock held in !user-mode.
1546  */
1547 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1548                                unsigned int n, tb_page_addr_t page_addr)
1549 {
1550 #ifndef CONFIG_USER_ONLY
1551     bool page_already_protected;
1552 #endif
1553 
1554     assert_page_locked(p);
1555 
1556     tb->page_addr[n] = page_addr;
1557     tb->page_next[n] = p->first_tb;
1558 #ifndef CONFIG_USER_ONLY
1559     page_already_protected = p->first_tb != (uintptr_t)NULL;
1560 #endif
1561     p->first_tb = (uintptr_t)tb | n;
1562     invalidate_page_bitmap(p);
1563 
1564 #if defined(CONFIG_USER_ONLY)
1565     if (p->flags & PAGE_WRITE) {
1566         target_ulong addr;
1567         PageDesc *p2;
1568         int prot;
1569 
1570         /* force the host page as non writable (writes will have a
1571            page fault + mprotect overhead) */
1572         page_addr &= qemu_host_page_mask;
1573         prot = 0;
1574         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1575             addr += TARGET_PAGE_SIZE) {
1576 
1577             p2 = page_find(addr >> TARGET_PAGE_BITS);
1578             if (!p2) {
1579                 continue;
1580             }
1581             prot |= p2->flags;
1582             p2->flags &= ~PAGE_WRITE;
1583           }
1584         mprotect(g2h(page_addr), qemu_host_page_size,
1585                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1586         if (DEBUG_TB_INVALIDATE_GATE) {
1587             printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1588         }
1589     }
1590 #else
1591     /* if some code is already present, then the pages are already
1592        protected. So we handle the case where only the first TB is
1593        allocated in a physical page */
1594     if (!page_already_protected) {
1595         tlb_protect_code(page_addr);
1596     }
1597 #endif
1598 }
1599 
1600 /* add a new TB and link it to the physical page tables. phys_page2 is
1601  * (-1) to indicate that only one page contains the TB.
1602  *
1603  * Called with mmap_lock held for user-mode emulation.
1604  *
1605  * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1606  * Note that in !user-mode, another thread might have already added a TB
1607  * for the same block of guest code that @tb corresponds to. In that case,
1608  * the caller should discard the original @tb, and use instead the returned TB.
1609  */
1610 static TranslationBlock *
1611 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1612              tb_page_addr_t phys_page2)
1613 {
1614     PageDesc *p;
1615     PageDesc *p2 = NULL;
1616 
1617     assert_memory_lock();
1618 
1619     if (phys_pc == -1) {
1620         /*
1621          * If the TB is not associated with a physical RAM page then
1622          * it must be a temporary one-insn TB, and we have nothing to do
1623          * except fill in the page_addr[] fields.
1624          */
1625         assert(tb->cflags & CF_NOCACHE);
1626         tb->page_addr[0] = tb->page_addr[1] = -1;
1627         return tb;
1628     }
1629 
1630     /*
1631      * Add the TB to the page list, acquiring first the pages's locks.
1632      * We keep the locks held until after inserting the TB in the hash table,
1633      * so that if the insertion fails we know for sure that the TBs are still
1634      * in the page descriptors.
1635      * Note that inserting into the hash table first isn't an option, since
1636      * we can only insert TBs that are fully initialized.
1637      */
1638     page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1639     tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1640     if (p2) {
1641         tb_page_add(p2, tb, 1, phys_page2);
1642     } else {
1643         tb->page_addr[1] = -1;
1644     }
1645 
1646     if (!(tb->cflags & CF_NOCACHE)) {
1647         void *existing_tb = NULL;
1648         uint32_t h;
1649 
1650         /* add in the hash table */
1651         h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1652                          tb->trace_vcpu_dstate);
1653         qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1654 
1655         /* remove TB from the page(s) if we couldn't insert it */
1656         if (unlikely(existing_tb)) {
1657             tb_page_remove(p, tb);
1658             invalidate_page_bitmap(p);
1659             if (p2) {
1660                 tb_page_remove(p2, tb);
1661                 invalidate_page_bitmap(p2);
1662             }
1663             tb = existing_tb;
1664         }
1665     }
1666 
1667     if (p2 && p2 != p) {
1668         page_unlock(p2);
1669     }
1670     page_unlock(p);
1671 
1672 #ifdef CONFIG_USER_ONLY
1673     if (DEBUG_TB_CHECK_GATE) {
1674         tb_page_check();
1675     }
1676 #endif
1677     return tb;
1678 }
1679 
1680 /* Called with mmap_lock held for user mode emulation.  */
1681 TranslationBlock *tb_gen_code(CPUState *cpu,
1682                               target_ulong pc, target_ulong cs_base,
1683                               uint32_t flags, int cflags)
1684 {
1685     CPUArchState *env = cpu->env_ptr;
1686     TranslationBlock *tb, *existing_tb;
1687     tb_page_addr_t phys_pc, phys_page2;
1688     target_ulong virt_page2;
1689     tcg_insn_unit *gen_code_buf;
1690     int gen_code_size, search_size, max_insns;
1691 #ifdef CONFIG_PROFILER
1692     TCGProfile *prof = &tcg_ctx->prof;
1693     int64_t ti;
1694 #endif
1695     assert_memory_lock();
1696 
1697     phys_pc = get_page_addr_code(env, pc);
1698 
1699     if (phys_pc == -1) {
1700         /* Generate a temporary TB with 1 insn in it */
1701         cflags &= ~CF_COUNT_MASK;
1702         cflags |= CF_NOCACHE | 1;
1703     }
1704 
1705     cflags &= ~CF_CLUSTER_MASK;
1706     cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
1707 
1708     max_insns = cflags & CF_COUNT_MASK;
1709     if (max_insns == 0) {
1710         max_insns = CF_COUNT_MASK;
1711     }
1712     if (max_insns > TCG_MAX_INSNS) {
1713         max_insns = TCG_MAX_INSNS;
1714     }
1715     if (cpu->singlestep_enabled || singlestep) {
1716         max_insns = 1;
1717     }
1718 
1719  buffer_overflow:
1720     tb = tb_alloc(pc);
1721     if (unlikely(!tb)) {
1722         /* flush must be done */
1723         tb_flush(cpu);
1724         mmap_unlock();
1725         /* Make the execution loop process the flush as soon as possible.  */
1726         cpu->exception_index = EXCP_INTERRUPT;
1727         cpu_loop_exit(cpu);
1728     }
1729 
1730     gen_code_buf = tcg_ctx->code_gen_ptr;
1731     tb->tc.ptr = gen_code_buf;
1732     tb->pc = pc;
1733     tb->cs_base = cs_base;
1734     tb->flags = flags;
1735     tb->cflags = cflags;
1736     tb->trace_vcpu_dstate = *cpu->trace_dstate;
1737     tcg_ctx->tb_cflags = cflags;
1738  tb_overflow:
1739 
1740 #ifdef CONFIG_PROFILER
1741     /* includes aborted translations because of exceptions */
1742     atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1743     ti = profile_getclock();
1744 #endif
1745 
1746     tcg_func_start(tcg_ctx);
1747 
1748     tcg_ctx->cpu = env_cpu(env);
1749     gen_intermediate_code(cpu, tb, max_insns);
1750     tcg_ctx->cpu = NULL;
1751 
1752     trace_translate_block(tb, tb->pc, tb->tc.ptr);
1753 
1754     /* generate machine code */
1755     tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1756     tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1757     tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1758     if (TCG_TARGET_HAS_direct_jump) {
1759         tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1760         tcg_ctx->tb_jmp_target_addr = NULL;
1761     } else {
1762         tcg_ctx->tb_jmp_insn_offset = NULL;
1763         tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1764     }
1765 
1766 #ifdef CONFIG_PROFILER
1767     atomic_set(&prof->tb_count, prof->tb_count + 1);
1768     atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1769     ti = profile_getclock();
1770 #endif
1771 
1772     gen_code_size = tcg_gen_code(tcg_ctx, tb);
1773     if (unlikely(gen_code_size < 0)) {
1774         switch (gen_code_size) {
1775         case -1:
1776             /*
1777              * Overflow of code_gen_buffer, or the current slice of it.
1778              *
1779              * TODO: We don't need to re-do gen_intermediate_code, nor
1780              * should we re-do the tcg optimization currently hidden
1781              * inside tcg_gen_code.  All that should be required is to
1782              * flush the TBs, allocate a new TB, re-initialize it per
1783              * above, and re-do the actual code generation.
1784              */
1785             goto buffer_overflow;
1786 
1787         case -2:
1788             /*
1789              * The code generated for the TranslationBlock is too large.
1790              * The maximum size allowed by the unwind info is 64k.
1791              * There may be stricter constraints from relocations
1792              * in the tcg backend.
1793              *
1794              * Try again with half as many insns as we attempted this time.
1795              * If a single insn overflows, there's a bug somewhere...
1796              */
1797             max_insns = tb->icount;
1798             assert(max_insns > 1);
1799             max_insns /= 2;
1800             goto tb_overflow;
1801 
1802         default:
1803             g_assert_not_reached();
1804         }
1805     }
1806     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1807     if (unlikely(search_size < 0)) {
1808         goto buffer_overflow;
1809     }
1810     tb->tc.size = gen_code_size;
1811 
1812 #ifdef CONFIG_PROFILER
1813     atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1814     atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1815     atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1816     atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1817 #endif
1818 
1819 #ifdef DEBUG_DISAS
1820     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1821         qemu_log_in_addr_range(tb->pc)) {
1822         qemu_log_lock();
1823         qemu_log("OUT: [size=%d]\n", gen_code_size);
1824         if (tcg_ctx->data_gen_ptr) {
1825             size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1826             size_t data_size = gen_code_size - code_size;
1827             size_t i;
1828 
1829             log_disas(tb->tc.ptr, code_size);
1830 
1831             for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1832                 if (sizeof(tcg_target_ulong) == 8) {
1833                     qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1834                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1835                              *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1836                 } else {
1837                     qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1838                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1839                              *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1840                 }
1841             }
1842         } else {
1843             log_disas(tb->tc.ptr, gen_code_size);
1844         }
1845         qemu_log("\n");
1846         qemu_log_flush();
1847         qemu_log_unlock();
1848     }
1849 #endif
1850 
1851     atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1852         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1853                  CODE_GEN_ALIGN));
1854 
1855     /* init jump list */
1856     qemu_spin_init(&tb->jmp_lock);
1857     tb->jmp_list_head = (uintptr_t)NULL;
1858     tb->jmp_list_next[0] = (uintptr_t)NULL;
1859     tb->jmp_list_next[1] = (uintptr_t)NULL;
1860     tb->jmp_dest[0] = (uintptr_t)NULL;
1861     tb->jmp_dest[1] = (uintptr_t)NULL;
1862 
1863     /* init original jump addresses which have been set during tcg_gen_code() */
1864     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1865         tb_reset_jump(tb, 0);
1866     }
1867     if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1868         tb_reset_jump(tb, 1);
1869     }
1870 
1871     /* check next page if needed */
1872     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1873     phys_page2 = -1;
1874     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1875         phys_page2 = get_page_addr_code(env, virt_page2);
1876     }
1877     /*
1878      * No explicit memory barrier is required -- tb_link_page() makes the
1879      * TB visible in a consistent state.
1880      */
1881     existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1882     /* if the TB already exists, discard what we just translated */
1883     if (unlikely(existing_tb != tb)) {
1884         uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1885 
1886         orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1887         atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1888         return existing_tb;
1889     }
1890     tcg_tb_insert(tb);
1891     return tb;
1892 }
1893 
1894 /*
1895  * @p must be non-NULL.
1896  * user-mode: call with mmap_lock held.
1897  * !user-mode: call with all @pages locked.
1898  */
1899 static void
1900 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1901                                       PageDesc *p, tb_page_addr_t start,
1902                                       tb_page_addr_t end,
1903                                       uintptr_t retaddr)
1904 {
1905     TranslationBlock *tb;
1906     tb_page_addr_t tb_start, tb_end;
1907     int n;
1908 #ifdef TARGET_HAS_PRECISE_SMC
1909     CPUState *cpu = current_cpu;
1910     CPUArchState *env = NULL;
1911     bool current_tb_not_found = retaddr != 0;
1912     bool current_tb_modified = false;
1913     TranslationBlock *current_tb = NULL;
1914     target_ulong current_pc = 0;
1915     target_ulong current_cs_base = 0;
1916     uint32_t current_flags = 0;
1917 #endif /* TARGET_HAS_PRECISE_SMC */
1918 
1919     assert_page_locked(p);
1920 
1921 #if defined(TARGET_HAS_PRECISE_SMC)
1922     if (cpu != NULL) {
1923         env = cpu->env_ptr;
1924     }
1925 #endif
1926 
1927     /* we remove all the TBs in the range [start, end[ */
1928     /* XXX: see if in some cases it could be faster to invalidate all
1929        the code */
1930     PAGE_FOR_EACH_TB(p, tb, n) {
1931         assert_page_locked(p);
1932         /* NOTE: this is subtle as a TB may span two physical pages */
1933         if (n == 0) {
1934             /* NOTE: tb_end may be after the end of the page, but
1935                it is not a problem */
1936             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1937             tb_end = tb_start + tb->size;
1938         } else {
1939             tb_start = tb->page_addr[1];
1940             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1941         }
1942         if (!(tb_end <= start || tb_start >= end)) {
1943 #ifdef TARGET_HAS_PRECISE_SMC
1944             if (current_tb_not_found) {
1945                 current_tb_not_found = false;
1946                 /* now we have a real cpu fault */
1947                 current_tb = tcg_tb_lookup(retaddr);
1948             }
1949             if (current_tb == tb &&
1950                 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1951                 /*
1952                  * If we are modifying the current TB, we must stop
1953                  * its execution. We could be more precise by checking
1954                  * that the modification is after the current PC, but it
1955                  * would require a specialized function to partially
1956                  * restore the CPU state.
1957                  */
1958                 current_tb_modified = true;
1959                 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
1960                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1961                                      &current_flags);
1962             }
1963 #endif /* TARGET_HAS_PRECISE_SMC */
1964             tb_phys_invalidate__locked(tb);
1965         }
1966     }
1967 #if !defined(CONFIG_USER_ONLY)
1968     /* if no code remaining, no need to continue to use slow writes */
1969     if (!p->first_tb) {
1970         invalidate_page_bitmap(p);
1971         tlb_unprotect_code(start);
1972     }
1973 #endif
1974 #ifdef TARGET_HAS_PRECISE_SMC
1975     if (current_tb_modified) {
1976         page_collection_unlock(pages);
1977         /* Force execution of one insn next time.  */
1978         cpu->cflags_next_tb = 1 | curr_cflags();
1979         mmap_unlock();
1980         cpu_loop_exit_noexc(cpu);
1981     }
1982 #endif
1983 }
1984 
1985 /*
1986  * Invalidate all TBs which intersect with the target physical address range
1987  * [start;end[. NOTE: start and end must refer to the *same* physical page.
1988  * 'is_cpu_write_access' should be true if called from a real cpu write
1989  * access: the virtual CPU will exit the current TB if code is modified inside
1990  * this TB.
1991  *
1992  * Called with mmap_lock held for user-mode emulation
1993  */
1994 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
1995 {
1996     struct page_collection *pages;
1997     PageDesc *p;
1998 
1999     assert_memory_lock();
2000 
2001     p = page_find(start >> TARGET_PAGE_BITS);
2002     if (p == NULL) {
2003         return;
2004     }
2005     pages = page_collection_lock(start, end);
2006     tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
2007     page_collection_unlock(pages);
2008 }
2009 
2010 /*
2011  * Invalidate all TBs which intersect with the target physical address range
2012  * [start;end[. NOTE: start and end may refer to *different* physical pages.
2013  * 'is_cpu_write_access' should be true if called from a real cpu write
2014  * access: the virtual CPU will exit the current TB if code is modified inside
2015  * this TB.
2016  *
2017  * Called with mmap_lock held for user-mode emulation.
2018  */
2019 #ifdef CONFIG_SOFTMMU
2020 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
2021 #else
2022 void tb_invalidate_phys_range(target_ulong start, target_ulong end)
2023 #endif
2024 {
2025     struct page_collection *pages;
2026     tb_page_addr_t next;
2027 
2028     assert_memory_lock();
2029 
2030     pages = page_collection_lock(start, end);
2031     for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2032          start < end;
2033          start = next, next += TARGET_PAGE_SIZE) {
2034         PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
2035         tb_page_addr_t bound = MIN(next, end);
2036 
2037         if (pd == NULL) {
2038             continue;
2039         }
2040         tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
2041     }
2042     page_collection_unlock(pages);
2043 }
2044 
2045 #ifdef CONFIG_SOFTMMU
2046 /* len must be <= 8 and start must be a multiple of len.
2047  * Called via softmmu_template.h when code areas are written to with
2048  * iothread mutex not held.
2049  *
2050  * Call with all @pages in the range [@start, @start + len[ locked.
2051  */
2052 void tb_invalidate_phys_page_fast(struct page_collection *pages,
2053                                   tb_page_addr_t start, int len,
2054                                   uintptr_t retaddr)
2055 {
2056     PageDesc *p;
2057 
2058     assert_memory_lock();
2059 
2060     p = page_find(start >> TARGET_PAGE_BITS);
2061     if (!p) {
2062         return;
2063     }
2064 
2065     assert_page_locked(p);
2066     if (!p->code_bitmap &&
2067         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2068         build_page_bitmap(p);
2069     }
2070     if (p->code_bitmap) {
2071         unsigned int nr;
2072         unsigned long b;
2073 
2074         nr = start & ~TARGET_PAGE_MASK;
2075         b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2076         if (b & ((1 << len) - 1)) {
2077             goto do_invalidate;
2078         }
2079     } else {
2080     do_invalidate:
2081         tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2082                                               retaddr);
2083     }
2084 }
2085 #else
2086 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2087  * host PC of the faulting store instruction that caused this invalidate.
2088  * Returns true if the caller needs to abort execution of the current
2089  * TB (because it was modified by this store and the guest CPU has
2090  * precise-SMC semantics).
2091  */
2092 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2093 {
2094     TranslationBlock *tb;
2095     PageDesc *p;
2096     int n;
2097 #ifdef TARGET_HAS_PRECISE_SMC
2098     TranslationBlock *current_tb = NULL;
2099     CPUState *cpu = current_cpu;
2100     CPUArchState *env = NULL;
2101     int current_tb_modified = 0;
2102     target_ulong current_pc = 0;
2103     target_ulong current_cs_base = 0;
2104     uint32_t current_flags = 0;
2105 #endif
2106 
2107     assert_memory_lock();
2108 
2109     addr &= TARGET_PAGE_MASK;
2110     p = page_find(addr >> TARGET_PAGE_BITS);
2111     if (!p) {
2112         return false;
2113     }
2114 
2115 #ifdef TARGET_HAS_PRECISE_SMC
2116     if (p->first_tb && pc != 0) {
2117         current_tb = tcg_tb_lookup(pc);
2118     }
2119     if (cpu != NULL) {
2120         env = cpu->env_ptr;
2121     }
2122 #endif
2123     assert_page_locked(p);
2124     PAGE_FOR_EACH_TB(p, tb, n) {
2125 #ifdef TARGET_HAS_PRECISE_SMC
2126         if (current_tb == tb &&
2127             (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2128                 /* If we are modifying the current TB, we must stop
2129                    its execution. We could be more precise by checking
2130                    that the modification is after the current PC, but it
2131                    would require a specialized function to partially
2132                    restore the CPU state */
2133 
2134             current_tb_modified = 1;
2135             cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2136             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2137                                  &current_flags);
2138         }
2139 #endif /* TARGET_HAS_PRECISE_SMC */
2140         tb_phys_invalidate(tb, addr);
2141     }
2142     p->first_tb = (uintptr_t)NULL;
2143 #ifdef TARGET_HAS_PRECISE_SMC
2144     if (current_tb_modified) {
2145         /* Force execution of one insn next time.  */
2146         cpu->cflags_next_tb = 1 | curr_cflags();
2147         return true;
2148     }
2149 #endif
2150 
2151     return false;
2152 }
2153 #endif
2154 
2155 /* user-mode: call with mmap_lock held */
2156 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
2157 {
2158     TranslationBlock *tb;
2159 
2160     assert_memory_lock();
2161 
2162     tb = tcg_tb_lookup(retaddr);
2163     if (tb) {
2164         /* We can use retranslation to find the PC.  */
2165         cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2166         tb_phys_invalidate(tb, -1);
2167     } else {
2168         /* The exception probably happened in a helper.  The CPU state should
2169            have been saved before calling it. Fetch the PC from there.  */
2170         CPUArchState *env = cpu->env_ptr;
2171         target_ulong pc, cs_base;
2172         tb_page_addr_t addr;
2173         uint32_t flags;
2174 
2175         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2176         addr = get_page_addr_code(env, pc);
2177         if (addr != -1) {
2178             tb_invalidate_phys_range(addr, addr + 1);
2179         }
2180     }
2181 }
2182 
2183 #ifndef CONFIG_USER_ONLY
2184 /* in deterministic execution mode, instructions doing device I/Os
2185  * must be at the end of the TB.
2186  *
2187  * Called by softmmu_template.h, with iothread mutex not held.
2188  */
2189 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2190 {
2191 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2192     CPUArchState *env = cpu->env_ptr;
2193 #endif
2194     TranslationBlock *tb;
2195     uint32_t n;
2196 
2197     tb = tcg_tb_lookup(retaddr);
2198     if (!tb) {
2199         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2200                   (void *)retaddr);
2201     }
2202     cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2203 
2204     /* On MIPS and SH, delay slot instructions can only be restarted if
2205        they were already the first instruction in the TB.  If this is not
2206        the first instruction in a TB then re-execute the preceding
2207        branch.  */
2208     n = 1;
2209 #if defined(TARGET_MIPS)
2210     if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2211         && env->active_tc.PC != tb->pc) {
2212         env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2213         cpu_neg(cpu)->icount_decr.u16.low++;
2214         env->hflags &= ~MIPS_HFLAG_BMASK;
2215         n = 2;
2216     }
2217 #elif defined(TARGET_SH4)
2218     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2219         && env->pc != tb->pc) {
2220         env->pc -= 2;
2221         cpu_neg(cpu)->icount_decr.u16.low++;
2222         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2223         n = 2;
2224     }
2225 #endif
2226 
2227     /* Generate a new TB executing the I/O insn.  */
2228     cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2229 
2230     if (tb_cflags(tb) & CF_NOCACHE) {
2231         if (tb->orig_tb) {
2232             /* Invalidate original TB if this TB was generated in
2233              * cpu_exec_nocache() */
2234             tb_phys_invalidate(tb->orig_tb, -1);
2235         }
2236         tcg_tb_remove(tb);
2237     }
2238 
2239     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2240      * the first in the TB) then we end up generating a whole new TB and
2241      *  repeating the fault, which is horribly inefficient.
2242      *  Better would be to execute just this insn uncached, or generate a
2243      *  second new TB.
2244      */
2245     cpu_loop_exit_noexc(cpu);
2246 }
2247 
2248 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2249 {
2250     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2251 
2252     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2253         atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2254     }
2255 }
2256 
2257 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2258 {
2259     /* Discard jump cache entries for any tb which might potentially
2260        overlap the flushed page.  */
2261     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2262     tb_jmp_cache_clear_page(cpu, addr);
2263 }
2264 
2265 static void print_qht_statistics(struct qht_stats hst)
2266 {
2267     uint32_t hgram_opts;
2268     size_t hgram_bins;
2269     char *hgram;
2270 
2271     if (!hst.head_buckets) {
2272         return;
2273     }
2274     qemu_printf("TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2275                 hst.used_head_buckets, hst.head_buckets,
2276                 (double)hst.used_head_buckets / hst.head_buckets * 100);
2277 
2278     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2279     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2280     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2281         hgram_opts |= QDIST_PR_NODECIMAL;
2282     }
2283     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2284     qemu_printf("TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2285                 qdist_avg(&hst.occupancy) * 100, hgram);
2286     g_free(hgram);
2287 
2288     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2289     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2290     if (hgram_bins > 10) {
2291         hgram_bins = 10;
2292     } else {
2293         hgram_bins = 0;
2294         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2295     }
2296     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2297     qemu_printf("TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2298                 qdist_avg(&hst.chain), hgram);
2299     g_free(hgram);
2300 }
2301 
2302 struct tb_tree_stats {
2303     size_t nb_tbs;
2304     size_t host_size;
2305     size_t target_size;
2306     size_t max_target_size;
2307     size_t direct_jmp_count;
2308     size_t direct_jmp2_count;
2309     size_t cross_page;
2310 };
2311 
2312 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2313 {
2314     const TranslationBlock *tb = value;
2315     struct tb_tree_stats *tst = data;
2316 
2317     tst->nb_tbs++;
2318     tst->host_size += tb->tc.size;
2319     tst->target_size += tb->size;
2320     if (tb->size > tst->max_target_size) {
2321         tst->max_target_size = tb->size;
2322     }
2323     if (tb->page_addr[1] != -1) {
2324         tst->cross_page++;
2325     }
2326     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2327         tst->direct_jmp_count++;
2328         if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2329             tst->direct_jmp2_count++;
2330         }
2331     }
2332     return false;
2333 }
2334 
2335 void dump_exec_info(void)
2336 {
2337     struct tb_tree_stats tst = {};
2338     struct qht_stats hst;
2339     size_t nb_tbs, flush_full, flush_part, flush_elide;
2340 
2341     tcg_tb_foreach(tb_tree_stats_iter, &tst);
2342     nb_tbs = tst.nb_tbs;
2343     /* XXX: avoid using doubles ? */
2344     qemu_printf("Translation buffer state:\n");
2345     /*
2346      * Report total code size including the padding and TB structs;
2347      * otherwise users might think "-tb-size" is not honoured.
2348      * For avg host size we use the precise numbers from tb_tree_stats though.
2349      */
2350     qemu_printf("gen code size       %zu/%zu\n",
2351                 tcg_code_size(), tcg_code_capacity());
2352     qemu_printf("TB count            %zu\n", nb_tbs);
2353     qemu_printf("TB avg target size  %zu max=%zu bytes\n",
2354                 nb_tbs ? tst.target_size / nb_tbs : 0,
2355                 tst.max_target_size);
2356     qemu_printf("TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2357                 nb_tbs ? tst.host_size / nb_tbs : 0,
2358                 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2359     qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2360                 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2361     qemu_printf("direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2362                 tst.direct_jmp_count,
2363                 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2364                 tst.direct_jmp2_count,
2365                 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2366 
2367     qht_statistics_init(&tb_ctx.htable, &hst);
2368     print_qht_statistics(hst);
2369     qht_statistics_destroy(&hst);
2370 
2371     qemu_printf("\nStatistics:\n");
2372     qemu_printf("TB flush count      %u\n",
2373                 atomic_read(&tb_ctx.tb_flush_count));
2374     qemu_printf("TB invalidate count %zu\n",
2375                 tcg_tb_phys_invalidate_count());
2376 
2377     tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2378     qemu_printf("TLB full flushes    %zu\n", flush_full);
2379     qemu_printf("TLB partial flushes %zu\n", flush_part);
2380     qemu_printf("TLB elided flushes  %zu\n", flush_elide);
2381     tcg_dump_info();
2382 }
2383 
2384 void dump_opcount_info(void)
2385 {
2386     tcg_dump_op_count();
2387 }
2388 
2389 #else /* CONFIG_USER_ONLY */
2390 
2391 void cpu_interrupt(CPUState *cpu, int mask)
2392 {
2393     g_assert(qemu_mutex_iothread_locked());
2394     cpu->interrupt_request |= mask;
2395     atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2396 }
2397 
2398 /*
2399  * Walks guest process memory "regions" one by one
2400  * and calls callback function 'fn' for each region.
2401  */
2402 struct walk_memory_regions_data {
2403     walk_memory_regions_fn fn;
2404     void *priv;
2405     target_ulong start;
2406     int prot;
2407 };
2408 
2409 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2410                                    target_ulong end, int new_prot)
2411 {
2412     if (data->start != -1u) {
2413         int rc = data->fn(data->priv, data->start, end, data->prot);
2414         if (rc != 0) {
2415             return rc;
2416         }
2417     }
2418 
2419     data->start = (new_prot ? end : -1u);
2420     data->prot = new_prot;
2421 
2422     return 0;
2423 }
2424 
2425 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2426                                  target_ulong base, int level, void **lp)
2427 {
2428     target_ulong pa;
2429     int i, rc;
2430 
2431     if (*lp == NULL) {
2432         return walk_memory_regions_end(data, base, 0);
2433     }
2434 
2435     if (level == 0) {
2436         PageDesc *pd = *lp;
2437 
2438         for (i = 0; i < V_L2_SIZE; ++i) {
2439             int prot = pd[i].flags;
2440 
2441             pa = base | (i << TARGET_PAGE_BITS);
2442             if (prot != data->prot) {
2443                 rc = walk_memory_regions_end(data, pa, prot);
2444                 if (rc != 0) {
2445                     return rc;
2446                 }
2447             }
2448         }
2449     } else {
2450         void **pp = *lp;
2451 
2452         for (i = 0; i < V_L2_SIZE; ++i) {
2453             pa = base | ((target_ulong)i <<
2454                 (TARGET_PAGE_BITS + V_L2_BITS * level));
2455             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2456             if (rc != 0) {
2457                 return rc;
2458             }
2459         }
2460     }
2461 
2462     return 0;
2463 }
2464 
2465 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2466 {
2467     struct walk_memory_regions_data data;
2468     uintptr_t i, l1_sz = v_l1_size;
2469 
2470     data.fn = fn;
2471     data.priv = priv;
2472     data.start = -1u;
2473     data.prot = 0;
2474 
2475     for (i = 0; i < l1_sz; i++) {
2476         target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2477         int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2478         if (rc != 0) {
2479             return rc;
2480         }
2481     }
2482 
2483     return walk_memory_regions_end(&data, 0, 0);
2484 }
2485 
2486 static int dump_region(void *priv, target_ulong start,
2487     target_ulong end, unsigned long prot)
2488 {
2489     FILE *f = (FILE *)priv;
2490 
2491     (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2492         " "TARGET_FMT_lx" %c%c%c\n",
2493         start, end, end - start,
2494         ((prot & PAGE_READ) ? 'r' : '-'),
2495         ((prot & PAGE_WRITE) ? 'w' : '-'),
2496         ((prot & PAGE_EXEC) ? 'x' : '-'));
2497 
2498     return 0;
2499 }
2500 
2501 /* dump memory mappings */
2502 void page_dump(FILE *f)
2503 {
2504     const int length = sizeof(target_ulong) * 2;
2505     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2506             length, "start", length, "end", length, "size", "prot");
2507     walk_memory_regions(f, dump_region);
2508 }
2509 
2510 int page_get_flags(target_ulong address)
2511 {
2512     PageDesc *p;
2513 
2514     p = page_find(address >> TARGET_PAGE_BITS);
2515     if (!p) {
2516         return 0;
2517     }
2518     return p->flags;
2519 }
2520 
2521 /* Modify the flags of a page and invalidate the code if necessary.
2522    The flag PAGE_WRITE_ORG is positioned automatically depending
2523    on PAGE_WRITE.  The mmap_lock should already be held.  */
2524 void page_set_flags(target_ulong start, target_ulong end, int flags)
2525 {
2526     target_ulong addr, len;
2527 
2528     /* This function should never be called with addresses outside the
2529        guest address space.  If this assert fires, it probably indicates
2530        a missing call to h2g_valid.  */
2531 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2532     assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2533 #endif
2534     assert(start < end);
2535     assert_memory_lock();
2536 
2537     start = start & TARGET_PAGE_MASK;
2538     end = TARGET_PAGE_ALIGN(end);
2539 
2540     if (flags & PAGE_WRITE) {
2541         flags |= PAGE_WRITE_ORG;
2542     }
2543 
2544     for (addr = start, len = end - start;
2545          len != 0;
2546          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2547         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2548 
2549         /* If the write protection bit is set, then we invalidate
2550            the code inside.  */
2551         if (!(p->flags & PAGE_WRITE) &&
2552             (flags & PAGE_WRITE) &&
2553             p->first_tb) {
2554             tb_invalidate_phys_page(addr, 0);
2555         }
2556         p->flags = flags;
2557     }
2558 }
2559 
2560 int page_check_range(target_ulong start, target_ulong len, int flags)
2561 {
2562     PageDesc *p;
2563     target_ulong end;
2564     target_ulong addr;
2565 
2566     /* This function should never be called with addresses outside the
2567        guest address space.  If this assert fires, it probably indicates
2568        a missing call to h2g_valid.  */
2569 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2570     assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2571 #endif
2572 
2573     if (len == 0) {
2574         return 0;
2575     }
2576     if (start + len - 1 < start) {
2577         /* We've wrapped around.  */
2578         return -1;
2579     }
2580 
2581     /* must do before we loose bits in the next step */
2582     end = TARGET_PAGE_ALIGN(start + len);
2583     start = start & TARGET_PAGE_MASK;
2584 
2585     for (addr = start, len = end - start;
2586          len != 0;
2587          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2588         p = page_find(addr >> TARGET_PAGE_BITS);
2589         if (!p) {
2590             return -1;
2591         }
2592         if (!(p->flags & PAGE_VALID)) {
2593             return -1;
2594         }
2595 
2596         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2597             return -1;
2598         }
2599         if (flags & PAGE_WRITE) {
2600             if (!(p->flags & PAGE_WRITE_ORG)) {
2601                 return -1;
2602             }
2603             /* unprotect the page if it was put read-only because it
2604                contains translated code */
2605             if (!(p->flags & PAGE_WRITE)) {
2606                 if (!page_unprotect(addr, 0)) {
2607                     return -1;
2608                 }
2609             }
2610         }
2611     }
2612     return 0;
2613 }
2614 
2615 /* called from signal handler: invalidate the code and unprotect the
2616  * page. Return 0 if the fault was not handled, 1 if it was handled,
2617  * and 2 if it was handled but the caller must cause the TB to be
2618  * immediately exited. (We can only return 2 if the 'pc' argument is
2619  * non-zero.)
2620  */
2621 int page_unprotect(target_ulong address, uintptr_t pc)
2622 {
2623     unsigned int prot;
2624     bool current_tb_invalidated;
2625     PageDesc *p;
2626     target_ulong host_start, host_end, addr;
2627 
2628     /* Technically this isn't safe inside a signal handler.  However we
2629        know this only ever happens in a synchronous SEGV handler, so in
2630        practice it seems to be ok.  */
2631     mmap_lock();
2632 
2633     p = page_find(address >> TARGET_PAGE_BITS);
2634     if (!p) {
2635         mmap_unlock();
2636         return 0;
2637     }
2638 
2639     /* if the page was really writable, then we change its
2640        protection back to writable */
2641     if (p->flags & PAGE_WRITE_ORG) {
2642         current_tb_invalidated = false;
2643         if (p->flags & PAGE_WRITE) {
2644             /* If the page is actually marked WRITE then assume this is because
2645              * this thread raced with another one which got here first and
2646              * set the page to PAGE_WRITE and did the TB invalidate for us.
2647              */
2648 #ifdef TARGET_HAS_PRECISE_SMC
2649             TranslationBlock *current_tb = tcg_tb_lookup(pc);
2650             if (current_tb) {
2651                 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2652             }
2653 #endif
2654         } else {
2655             host_start = address & qemu_host_page_mask;
2656             host_end = host_start + qemu_host_page_size;
2657 
2658             prot = 0;
2659             for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2660                 p = page_find(addr >> TARGET_PAGE_BITS);
2661                 p->flags |= PAGE_WRITE;
2662                 prot |= p->flags;
2663 
2664                 /* and since the content will be modified, we must invalidate
2665                    the corresponding translated code. */
2666                 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2667 #ifdef CONFIG_USER_ONLY
2668                 if (DEBUG_TB_CHECK_GATE) {
2669                     tb_invalidate_check(addr);
2670                 }
2671 #endif
2672             }
2673             mprotect((void *)g2h(host_start), qemu_host_page_size,
2674                      prot & PAGE_BITS);
2675         }
2676         mmap_unlock();
2677         /* If current TB was invalidated return to main loop */
2678         return current_tb_invalidated ? 2 : 1;
2679     }
2680     mmap_unlock();
2681     return 0;
2682 }
2683 #endif /* CONFIG_USER_ONLY */
2684 
2685 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2686 void tcg_flush_softmmu_tlb(CPUState *cs)
2687 {
2688 #ifdef CONFIG_SOFTMMU
2689     tlb_flush(cs);
2690 #endif
2691 }
2692