xref: /openbmc/qemu/accel/tcg/cputlb.c (revision af803a4fcb1c707a6a885b5736335baf794f7676)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9fb0343d5SThomas Huth  * version 2.1 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
26d9bb58e5SYang Zhong #include "exec/cputlb.h"
27d9bb58e5SYang Zhong #include "exec/memory-internal.h"
28d9bb58e5SYang Zhong #include "exec/ram_addr.h"
29d9bb58e5SYang Zhong #include "tcg/tcg.h"
30d9bb58e5SYang Zhong #include "qemu/error-report.h"
31d9bb58e5SYang Zhong #include "exec/log.h"
32d9bb58e5SYang Zhong #include "exec/helper-proto.h"
33d9bb58e5SYang Zhong #include "qemu/atomic.h"
34e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
353b9bd3f4SPaolo Bonzini #include "exec/translate-all.h"
36243af022SPaolo Bonzini #include "trace/trace-root.h"
37e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h"
3865269192SPhilippe Mathieu-Daudé #include "internal.h"
39235537faSAlex Bennée #ifdef CONFIG_PLUGIN
40235537faSAlex Bennée #include "qemu/plugin-memory.h"
41235537faSAlex Bennée #endif
42d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h"
43d9bb58e5SYang Zhong 
44d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
45d9bb58e5SYang Zhong /* #define DEBUG_TLB */
46d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
47d9bb58e5SYang Zhong 
48d9bb58e5SYang Zhong #ifdef DEBUG_TLB
49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
50d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
51d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
52d9bb58e5SYang Zhong # else
53d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
54d9bb58e5SYang Zhong # endif
55d9bb58e5SYang Zhong #else
56d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
57d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
58d9bb58e5SYang Zhong #endif
59d9bb58e5SYang Zhong 
60d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
61d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
62d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
64d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
65d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66d9bb58e5SYang Zhong     } \
67d9bb58e5SYang Zhong } while (0)
68d9bb58e5SYang Zhong 
69ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do {                              \
70d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
71ea9025cbSEmilio G. Cota             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
72d9bb58e5SYang Zhong         }                                                         \
73d9bb58e5SYang Zhong     } while (0)
74d9bb58e5SYang Zhong 
75d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
76d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
77d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78d9bb58e5SYang Zhong 
79d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
80d9bb58e5SYang Zhong  */
81d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83d9bb58e5SYang Zhong 
84722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
857a1efe1bSRichard Henderson {
86722a1c1eSRichard Henderson     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
877a1efe1bSRichard Henderson }
887a1efe1bSRichard Henderson 
89722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
9086e1eff8SEmilio G. Cota {
91722a1c1eSRichard Henderson     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
9286e1eff8SEmilio G. Cota }
9386e1eff8SEmilio G. Cota 
9479e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
9586e1eff8SEmilio G. Cota                              size_t max_entries)
9686e1eff8SEmilio G. Cota {
9779e42085SRichard Henderson     desc->window_begin_ns = ns;
9879e42085SRichard Henderson     desc->window_max_entries = max_entries;
9986e1eff8SEmilio G. Cota }
10086e1eff8SEmilio G. Cota 
1010f4abea8SRichard Henderson static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
1020f4abea8SRichard Henderson {
1030f4abea8SRichard Henderson     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
1040f4abea8SRichard Henderson 
1050f4abea8SRichard Henderson     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1060f4abea8SRichard Henderson         qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1070f4abea8SRichard Henderson     }
1080f4abea8SRichard Henderson }
1090f4abea8SRichard Henderson 
1100f4abea8SRichard Henderson static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1110f4abea8SRichard Henderson {
1120f4abea8SRichard Henderson     /* Discard jump cache entries for any tb which might potentially
1130f4abea8SRichard Henderson        overlap the flushed page.  */
1140f4abea8SRichard Henderson     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
1150f4abea8SRichard Henderson     tb_jmp_cache_clear_page(cpu, addr);
1160f4abea8SRichard Henderson }
1170f4abea8SRichard Henderson 
11886e1eff8SEmilio G. Cota /**
11986e1eff8SEmilio G. Cota  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
12071ccd47bSRichard Henderson  * @desc: The CPUTLBDesc portion of the TLB
12171ccd47bSRichard Henderson  * @fast: The CPUTLBDescFast portion of the same TLB
12286e1eff8SEmilio G. Cota  *
12386e1eff8SEmilio G. Cota  * Called with tlb_lock_held.
12486e1eff8SEmilio G. Cota  *
12586e1eff8SEmilio G. Cota  * We have two main constraints when resizing a TLB: (1) we only resize it
12686e1eff8SEmilio G. Cota  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
12786e1eff8SEmilio G. Cota  * the array or unnecessarily flushing it), which means we do not control how
12886e1eff8SEmilio G. Cota  * frequently the resizing can occur; (2) we don't have access to the guest's
12986e1eff8SEmilio G. Cota  * future scheduling decisions, and therefore have to decide the magnitude of
13086e1eff8SEmilio G. Cota  * the resize based on past observations.
13186e1eff8SEmilio G. Cota  *
13286e1eff8SEmilio G. Cota  * In general, a memory-hungry process can benefit greatly from an appropriately
13386e1eff8SEmilio G. Cota  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
13486e1eff8SEmilio G. Cota  * we just have to make the TLB as large as possible; while an oversized TLB
13586e1eff8SEmilio G. Cota  * results in minimal TLB miss rates, it also takes longer to be flushed
13686e1eff8SEmilio G. Cota  * (flushes can be _very_ frequent), and the reduced locality can also hurt
13786e1eff8SEmilio G. Cota  * performance.
13886e1eff8SEmilio G. Cota  *
13986e1eff8SEmilio G. Cota  * To achieve near-optimal performance for all kinds of workloads, we:
14086e1eff8SEmilio G. Cota  *
14186e1eff8SEmilio G. Cota  * 1. Aggressively increase the size of the TLB when the use rate of the
14286e1eff8SEmilio G. Cota  * TLB being flushed is high, since it is likely that in the near future this
14386e1eff8SEmilio G. Cota  * memory-hungry process will execute again, and its memory hungriness will
14486e1eff8SEmilio G. Cota  * probably be similar.
14586e1eff8SEmilio G. Cota  *
14686e1eff8SEmilio G. Cota  * 2. Slowly reduce the size of the TLB as the use rate declines over a
14786e1eff8SEmilio G. Cota  * reasonably large time window. The rationale is that if in such a time window
14886e1eff8SEmilio G. Cota  * we have not observed a high TLB use rate, it is likely that we won't observe
14986e1eff8SEmilio G. Cota  * it in the near future. In that case, once a time window expires we downsize
15086e1eff8SEmilio G. Cota  * the TLB to match the maximum use rate observed in the window.
15186e1eff8SEmilio G. Cota  *
15286e1eff8SEmilio G. Cota  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
15386e1eff8SEmilio G. Cota  * since in that range performance is likely near-optimal. Recall that the TLB
15486e1eff8SEmilio G. Cota  * is direct mapped, so we want the use rate to be low (or at least not too
15586e1eff8SEmilio G. Cota  * high), since otherwise we are likely to have a significant amount of
15686e1eff8SEmilio G. Cota  * conflict misses.
15786e1eff8SEmilio G. Cota  */
1583c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
1593c3959f2SRichard Henderson                                   int64_t now)
16086e1eff8SEmilio G. Cota {
16171ccd47bSRichard Henderson     size_t old_size = tlb_n_entries(fast);
16286e1eff8SEmilio G. Cota     size_t rate;
16386e1eff8SEmilio G. Cota     size_t new_size = old_size;
16486e1eff8SEmilio G. Cota     int64_t window_len_ms = 100;
16586e1eff8SEmilio G. Cota     int64_t window_len_ns = window_len_ms * 1000 * 1000;
16679e42085SRichard Henderson     bool window_expired = now > desc->window_begin_ns + window_len_ns;
16786e1eff8SEmilio G. Cota 
16879e42085SRichard Henderson     if (desc->n_used_entries > desc->window_max_entries) {
16979e42085SRichard Henderson         desc->window_max_entries = desc->n_used_entries;
17086e1eff8SEmilio G. Cota     }
17179e42085SRichard Henderson     rate = desc->window_max_entries * 100 / old_size;
17286e1eff8SEmilio G. Cota 
17386e1eff8SEmilio G. Cota     if (rate > 70) {
17486e1eff8SEmilio G. Cota         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
17586e1eff8SEmilio G. Cota     } else if (rate < 30 && window_expired) {
17679e42085SRichard Henderson         size_t ceil = pow2ceil(desc->window_max_entries);
17779e42085SRichard Henderson         size_t expected_rate = desc->window_max_entries * 100 / ceil;
17886e1eff8SEmilio G. Cota 
17986e1eff8SEmilio G. Cota         /*
18086e1eff8SEmilio G. Cota          * Avoid undersizing when the max number of entries seen is just below
18186e1eff8SEmilio G. Cota          * a pow2. For instance, if max_entries == 1025, the expected use rate
18286e1eff8SEmilio G. Cota          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
18386e1eff8SEmilio G. Cota          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
18486e1eff8SEmilio G. Cota          * later. Thus, make sure that the expected use rate remains below 70%.
18586e1eff8SEmilio G. Cota          * (and since we double the size, that means the lowest rate we'd
18686e1eff8SEmilio G. Cota          * expect to get is 35%, which is still in the 30-70% range where
18786e1eff8SEmilio G. Cota          * we consider that the size is appropriate.)
18886e1eff8SEmilio G. Cota          */
18986e1eff8SEmilio G. Cota         if (expected_rate > 70) {
19086e1eff8SEmilio G. Cota             ceil *= 2;
19186e1eff8SEmilio G. Cota         }
19286e1eff8SEmilio G. Cota         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
19386e1eff8SEmilio G. Cota     }
19486e1eff8SEmilio G. Cota 
19586e1eff8SEmilio G. Cota     if (new_size == old_size) {
19686e1eff8SEmilio G. Cota         if (window_expired) {
19779e42085SRichard Henderson             tlb_window_reset(desc, now, desc->n_used_entries);
19886e1eff8SEmilio G. Cota         }
19986e1eff8SEmilio G. Cota         return;
20086e1eff8SEmilio G. Cota     }
20186e1eff8SEmilio G. Cota 
20271ccd47bSRichard Henderson     g_free(fast->table);
20325d3ec58SRichard Henderson     g_free(desc->fulltlb);
20486e1eff8SEmilio G. Cota 
20579e42085SRichard Henderson     tlb_window_reset(desc, now, 0);
20686e1eff8SEmilio G. Cota     /* desc->n_used_entries is cleared by the caller */
20771ccd47bSRichard Henderson     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
20871ccd47bSRichard Henderson     fast->table = g_try_new(CPUTLBEntry, new_size);
20925d3ec58SRichard Henderson     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
21071ccd47bSRichard Henderson 
21186e1eff8SEmilio G. Cota     /*
21286e1eff8SEmilio G. Cota      * If the allocations fail, try smaller sizes. We just freed some
21386e1eff8SEmilio G. Cota      * memory, so going back to half of new_size has a good chance of working.
21486e1eff8SEmilio G. Cota      * Increased memory pressure elsewhere in the system might cause the
21586e1eff8SEmilio G. Cota      * allocations to fail though, so we progressively reduce the allocation
21686e1eff8SEmilio G. Cota      * size, aborting if we cannot even allocate the smallest TLB we support.
21786e1eff8SEmilio G. Cota      */
21825d3ec58SRichard Henderson     while (fast->table == NULL || desc->fulltlb == NULL) {
21986e1eff8SEmilio G. Cota         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
22086e1eff8SEmilio G. Cota             error_report("%s: %s", __func__, strerror(errno));
22186e1eff8SEmilio G. Cota             abort();
22286e1eff8SEmilio G. Cota         }
22386e1eff8SEmilio G. Cota         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
22471ccd47bSRichard Henderson         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
22586e1eff8SEmilio G. Cota 
22671ccd47bSRichard Henderson         g_free(fast->table);
22725d3ec58SRichard Henderson         g_free(desc->fulltlb);
22871ccd47bSRichard Henderson         fast->table = g_try_new(CPUTLBEntry, new_size);
22925d3ec58SRichard Henderson         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
23086e1eff8SEmilio G. Cota     }
23186e1eff8SEmilio G. Cota }
23286e1eff8SEmilio G. Cota 
233bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
23486e1eff8SEmilio G. Cota {
2355c948e31SRichard Henderson     desc->n_used_entries = 0;
2365c948e31SRichard Henderson     desc->large_page_addr = -1;
2375c948e31SRichard Henderson     desc->large_page_mask = -1;
2385c948e31SRichard Henderson     desc->vindex = 0;
2395c948e31SRichard Henderson     memset(fast->table, -1, sizeof_tlb(fast));
2405c948e31SRichard Henderson     memset(desc->vtable, -1, sizeof(desc->vtable));
24186e1eff8SEmilio G. Cota }
24286e1eff8SEmilio G. Cota 
2433c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
2443c3959f2SRichard Henderson                                         int64_t now)
245bbf021b0SRichard Henderson {
246bbf021b0SRichard Henderson     CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
247bbf021b0SRichard Henderson     CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
248bbf021b0SRichard Henderson 
2493c3959f2SRichard Henderson     tlb_mmu_resize_locked(desc, fast, now);
250bbf021b0SRichard Henderson     tlb_mmu_flush_locked(desc, fast);
251bbf021b0SRichard Henderson }
252bbf021b0SRichard Henderson 
25356e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
25456e89f76SRichard Henderson {
25556e89f76SRichard Henderson     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
25656e89f76SRichard Henderson 
25756e89f76SRichard Henderson     tlb_window_reset(desc, now, 0);
25856e89f76SRichard Henderson     desc->n_used_entries = 0;
25956e89f76SRichard Henderson     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
26056e89f76SRichard Henderson     fast->table = g_new(CPUTLBEntry, n_entries);
26125d3ec58SRichard Henderson     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
2623c16304aSRichard Henderson     tlb_mmu_flush_locked(desc, fast);
26356e89f76SRichard Henderson }
26456e89f76SRichard Henderson 
26586e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
26686e1eff8SEmilio G. Cota {
267a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].n_used_entries++;
26886e1eff8SEmilio G. Cota }
26986e1eff8SEmilio G. Cota 
27086e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
27186e1eff8SEmilio G. Cota {
272a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].n_used_entries--;
27386e1eff8SEmilio G. Cota }
27486e1eff8SEmilio G. Cota 
2755005e253SEmilio G. Cota void tlb_init(CPUState *cpu)
2765005e253SEmilio G. Cota {
27771aec354SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
27856e89f76SRichard Henderson     int64_t now = get_clock_realtime();
27956e89f76SRichard Henderson     int i;
28071aec354SEmilio G. Cota 
281a40ec84eSRichard Henderson     qemu_spin_init(&env_tlb(env)->c.lock);
2823d1523ceSRichard Henderson 
2833c16304aSRichard Henderson     /* All tlbs are initialized flushed. */
2843c16304aSRichard Henderson     env_tlb(env)->c.dirty = 0;
28586e1eff8SEmilio G. Cota 
28656e89f76SRichard Henderson     for (i = 0; i < NB_MMU_MODES; i++) {
28756e89f76SRichard Henderson         tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
28856e89f76SRichard Henderson     }
2895005e253SEmilio G. Cota }
2905005e253SEmilio G. Cota 
291816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu)
292816d9be5SEmilio G. Cota {
293816d9be5SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
294816d9be5SEmilio G. Cota     int i;
295816d9be5SEmilio G. Cota 
296816d9be5SEmilio G. Cota     qemu_spin_destroy(&env_tlb(env)->c.lock);
297816d9be5SEmilio G. Cota     for (i = 0; i < NB_MMU_MODES; i++) {
298816d9be5SEmilio G. Cota         CPUTLBDesc *desc = &env_tlb(env)->d[i];
299816d9be5SEmilio G. Cota         CPUTLBDescFast *fast = &env_tlb(env)->f[i];
300816d9be5SEmilio G. Cota 
301816d9be5SEmilio G. Cota         g_free(fast->table);
30225d3ec58SRichard Henderson         g_free(desc->fulltlb);
303816d9be5SEmilio G. Cota     }
304816d9be5SEmilio G. Cota }
305816d9be5SEmilio G. Cota 
306d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
307d9bb58e5SYang Zhong  *
308d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
309d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
310d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
311d9bb58e5SYang Zhong  * again.
312d9bb58e5SYang Zhong  */
313d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
314d9bb58e5SYang Zhong                              run_on_cpu_data d)
315d9bb58e5SYang Zhong {
316d9bb58e5SYang Zhong     CPUState *cpu;
317d9bb58e5SYang Zhong 
318d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
319d9bb58e5SYang Zhong         if (cpu != src) {
320d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
321d9bb58e5SYang Zhong         }
322d9bb58e5SYang Zhong     }
323d9bb58e5SYang Zhong }
324d9bb58e5SYang Zhong 
325e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
32683974cf4SEmilio G. Cota {
32783974cf4SEmilio G. Cota     CPUState *cpu;
328e09de0a2SRichard Henderson     size_t full = 0, part = 0, elide = 0;
32983974cf4SEmilio G. Cota 
33083974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
33183974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
33283974cf4SEmilio G. Cota 
333d73415a3SStefan Hajnoczi         full += qatomic_read(&env_tlb(env)->c.full_flush_count);
334d73415a3SStefan Hajnoczi         part += qatomic_read(&env_tlb(env)->c.part_flush_count);
335d73415a3SStefan Hajnoczi         elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
33683974cf4SEmilio G. Cota     }
337e09de0a2SRichard Henderson     *pfull = full;
338e09de0a2SRichard Henderson     *ppart = part;
339e09de0a2SRichard Henderson     *pelide = elide;
34083974cf4SEmilio G. Cota }
341d9bb58e5SYang Zhong 
342d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
343d9bb58e5SYang Zhong {
344d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
3453d1523ceSRichard Henderson     uint16_t asked = data.host_int;
3463d1523ceSRichard Henderson     uint16_t all_dirty, work, to_clean;
3473c3959f2SRichard Henderson     int64_t now = get_clock_realtime();
348d9bb58e5SYang Zhong 
349d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
350d9bb58e5SYang Zhong 
3513d1523ceSRichard Henderson     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
352d9bb58e5SYang Zhong 
353a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
35460a2ad7dSRichard Henderson 
355a40ec84eSRichard Henderson     all_dirty = env_tlb(env)->c.dirty;
3563d1523ceSRichard Henderson     to_clean = asked & all_dirty;
3573d1523ceSRichard Henderson     all_dirty &= ~to_clean;
358a40ec84eSRichard Henderson     env_tlb(env)->c.dirty = all_dirty;
3593d1523ceSRichard Henderson 
3603d1523ceSRichard Henderson     for (work = to_clean; work != 0; work &= work - 1) {
3613d1523ceSRichard Henderson         int mmu_idx = ctz32(work);
3623c3959f2SRichard Henderson         tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
363d9bb58e5SYang Zhong     }
3643d1523ceSRichard Henderson 
365a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
366d9bb58e5SYang Zhong 
367f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
36864f2674bSRichard Henderson 
3693d1523ceSRichard Henderson     if (to_clean == ALL_MMUIDX_BITS) {
370d73415a3SStefan Hajnoczi         qatomic_set(&env_tlb(env)->c.full_flush_count,
371a40ec84eSRichard Henderson                    env_tlb(env)->c.full_flush_count + 1);
372e09de0a2SRichard Henderson     } else {
373d73415a3SStefan Hajnoczi         qatomic_set(&env_tlb(env)->c.part_flush_count,
374a40ec84eSRichard Henderson                    env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
3753d1523ceSRichard Henderson         if (to_clean != asked) {
376d73415a3SStefan Hajnoczi             qatomic_set(&env_tlb(env)->c.elide_flush_count,
377a40ec84eSRichard Henderson                        env_tlb(env)->c.elide_flush_count +
3783d1523ceSRichard Henderson                        ctpop16(asked & ~to_clean));
3793d1523ceSRichard Henderson         }
38064f2674bSRichard Henderson     }
381d9bb58e5SYang Zhong }
382d9bb58e5SYang Zhong 
383d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
384d9bb58e5SYang Zhong {
385d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
386d9bb58e5SYang Zhong 
38764f2674bSRichard Henderson     if (cpu->created && !qemu_cpu_is_self(cpu)) {
388d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
389ab651105SRichard Henderson                          RUN_ON_CPU_HOST_INT(idxmap));
390d9bb58e5SYang Zhong     } else {
39160a2ad7dSRichard Henderson         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
392d9bb58e5SYang Zhong     }
393d9bb58e5SYang Zhong }
394d9bb58e5SYang Zhong 
39564f2674bSRichard Henderson void tlb_flush(CPUState *cpu)
39664f2674bSRichard Henderson {
39764f2674bSRichard Henderson     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
39864f2674bSRichard Henderson }
39964f2674bSRichard Henderson 
400d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
401d9bb58e5SYang Zhong {
402d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
403d9bb58e5SYang Zhong 
404d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
405d9bb58e5SYang Zhong 
406d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
407d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
408d9bb58e5SYang Zhong }
409d9bb58e5SYang Zhong 
41064f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu)
41164f2674bSRichard Henderson {
41264f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
41364f2674bSRichard Henderson }
41464f2674bSRichard Henderson 
41564f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
416d9bb58e5SYang Zhong {
417d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
418d9bb58e5SYang Zhong 
419d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
420d9bb58e5SYang Zhong 
421d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
422d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
423d9bb58e5SYang Zhong }
424d9bb58e5SYang Zhong 
42564f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu)
42664f2674bSRichard Henderson {
42764f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
42864f2674bSRichard Henderson }
42964f2674bSRichard Henderson 
4303ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
4313ab6e68cSRichard Henderson                                       target_ulong page, target_ulong mask)
4323ab6e68cSRichard Henderson {
4333ab6e68cSRichard Henderson     page &= mask;
4343ab6e68cSRichard Henderson     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
4353ab6e68cSRichard Henderson 
4363ab6e68cSRichard Henderson     return (page == (tlb_entry->addr_read & mask) ||
4373ab6e68cSRichard Henderson             page == (tlb_addr_write(tlb_entry) & mask) ||
4383ab6e68cSRichard Henderson             page == (tlb_entry->addr_code & mask));
4393ab6e68cSRichard Henderson }
4403ab6e68cSRichard Henderson 
44168fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
44268fea038SRichard Henderson                                         target_ulong page)
443d9bb58e5SYang Zhong {
4443ab6e68cSRichard Henderson     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
44568fea038SRichard Henderson }
44668fea038SRichard Henderson 
4473cea94bbSEmilio G. Cota /**
4483cea94bbSEmilio G. Cota  * tlb_entry_is_empty - return true if the entry is not in use
4493cea94bbSEmilio G. Cota  * @te: pointer to CPUTLBEntry
4503cea94bbSEmilio G. Cota  */
4513cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
4523cea94bbSEmilio G. Cota {
4533cea94bbSEmilio G. Cota     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
4543cea94bbSEmilio G. Cota }
4553cea94bbSEmilio G. Cota 
45653d28455SRichard Henderson /* Called with tlb_c.lock held */
4573ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
4583ab6e68cSRichard Henderson                                         target_ulong page,
4593ab6e68cSRichard Henderson                                         target_ulong mask)
46068fea038SRichard Henderson {
4613ab6e68cSRichard Henderson     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
462d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
46386e1eff8SEmilio G. Cota         return true;
464d9bb58e5SYang Zhong     }
46586e1eff8SEmilio G. Cota     return false;
466d9bb58e5SYang Zhong }
467d9bb58e5SYang Zhong 
4683ab6e68cSRichard Henderson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
46968fea038SRichard Henderson                                           target_ulong page)
47068fea038SRichard Henderson {
4713ab6e68cSRichard Henderson     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
4723ab6e68cSRichard Henderson }
4733ab6e68cSRichard Henderson 
4743ab6e68cSRichard Henderson /* Called with tlb_c.lock held */
4753ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
4763ab6e68cSRichard Henderson                                             target_ulong page,
4773ab6e68cSRichard Henderson                                             target_ulong mask)
4783ab6e68cSRichard Henderson {
479a40ec84eSRichard Henderson     CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
48068fea038SRichard Henderson     int k;
48171aec354SEmilio G. Cota 
48229a0af61SRichard Henderson     assert_cpu_is_self(env_cpu(env));
48368fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
4843ab6e68cSRichard Henderson         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
48586e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, mmu_idx);
48686e1eff8SEmilio G. Cota         }
48768fea038SRichard Henderson     }
48868fea038SRichard Henderson }
48968fea038SRichard Henderson 
4903ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
4913ab6e68cSRichard Henderson                                               target_ulong page)
4923ab6e68cSRichard Henderson {
4933ab6e68cSRichard Henderson     tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
4943ab6e68cSRichard Henderson }
4953ab6e68cSRichard Henderson 
4961308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx,
4971308e026SRichard Henderson                                   target_ulong page)
4981308e026SRichard Henderson {
499a40ec84eSRichard Henderson     target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
500a40ec84eSRichard Henderson     target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
5011308e026SRichard Henderson 
5021308e026SRichard Henderson     /* Check if we need to flush due to large pages.  */
5031308e026SRichard Henderson     if ((page & lp_mask) == lp_addr) {
5041308e026SRichard Henderson         tlb_debug("forcing full flush midx %d ("
5051308e026SRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
5061308e026SRichard Henderson                   midx, lp_addr, lp_mask);
5073c3959f2SRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
5081308e026SRichard Henderson     } else {
50986e1eff8SEmilio G. Cota         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
51086e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, midx);
51186e1eff8SEmilio G. Cota         }
5121308e026SRichard Henderson         tlb_flush_vtlb_page_locked(env, midx, page);
5131308e026SRichard Henderson     }
5141308e026SRichard Henderson }
5151308e026SRichard Henderson 
5167b7d00e0SRichard Henderson /**
5177b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_0:
5187b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5197b7d00e0SRichard Henderson  * @addr: page of virtual address to flush
5207b7d00e0SRichard Henderson  * @idxmap: set of mmu_idx to flush
5217b7d00e0SRichard Henderson  *
5227b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
5237b7d00e0SRichard Henderson  * at @addr from the tlbs indicated by @idxmap from @cpu.
524d9bb58e5SYang Zhong  */
5257b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
5267b7d00e0SRichard Henderson                                              target_ulong addr,
5277b7d00e0SRichard Henderson                                              uint16_t idxmap)
528d9bb58e5SYang Zhong {
529d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
530d9bb58e5SYang Zhong     int mmu_idx;
531d9bb58e5SYang Zhong 
532d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
533d9bb58e5SYang Zhong 
5347b7d00e0SRichard Henderson     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
535d9bb58e5SYang Zhong 
536a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
537d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
5387b7d00e0SRichard Henderson         if ((idxmap >> mmu_idx) & 1) {
5391308e026SRichard Henderson             tlb_flush_page_locked(env, mmu_idx, addr);
540d9bb58e5SYang Zhong         }
541d9bb58e5SYang Zhong     }
542a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
543d9bb58e5SYang Zhong 
544d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
545d9bb58e5SYang Zhong }
546d9bb58e5SYang Zhong 
5477b7d00e0SRichard Henderson /**
5487b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_1:
5497b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5507b7d00e0SRichard Henderson  * @data: encoded addr + idxmap
5517b7d00e0SRichard Henderson  *
5527b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
5537b7d00e0SRichard Henderson  * async_run_on_cpu.  The idxmap parameter is encoded in the page
5547b7d00e0SRichard Henderson  * offset of the target_ptr field.  This limits the set of mmu_idx
5557b7d00e0SRichard Henderson  * that can be passed via this method.
5567b7d00e0SRichard Henderson  */
5577b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
5587b7d00e0SRichard Henderson                                              run_on_cpu_data data)
5597b7d00e0SRichard Henderson {
5607b7d00e0SRichard Henderson     target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
5617b7d00e0SRichard Henderson     target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
5627b7d00e0SRichard Henderson     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
5637b7d00e0SRichard Henderson 
5647b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
5657b7d00e0SRichard Henderson }
5667b7d00e0SRichard Henderson 
5677b7d00e0SRichard Henderson typedef struct {
5687b7d00e0SRichard Henderson     target_ulong addr;
5697b7d00e0SRichard Henderson     uint16_t idxmap;
5707b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData;
5717b7d00e0SRichard Henderson 
5727b7d00e0SRichard Henderson /**
5737b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_2:
5747b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5757b7d00e0SRichard Henderson  * @data: allocated addr + idxmap
5767b7d00e0SRichard Henderson  *
5777b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
5787b7d00e0SRichard Henderson  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
5797b7d00e0SRichard Henderson  * TLBFlushPageByMMUIdxData structure that has been allocated
5807b7d00e0SRichard Henderson  * specifically for this helper.  Free the structure when done.
5817b7d00e0SRichard Henderson  */
5827b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
5837b7d00e0SRichard Henderson                                              run_on_cpu_data data)
5847b7d00e0SRichard Henderson {
5857b7d00e0SRichard Henderson     TLBFlushPageByMMUIdxData *d = data.host_ptr;
5867b7d00e0SRichard Henderson 
5877b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
5887b7d00e0SRichard Henderson     g_free(d);
5897b7d00e0SRichard Henderson }
5907b7d00e0SRichard Henderson 
591d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
592d9bb58e5SYang Zhong {
593d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
594d9bb58e5SYang Zhong 
595d9bb58e5SYang Zhong     /* This should already be page aligned */
5967b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
597d9bb58e5SYang Zhong 
5987b7d00e0SRichard Henderson     if (qemu_cpu_is_self(cpu)) {
5997b7d00e0SRichard Henderson         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
6007b7d00e0SRichard Henderson     } else if (idxmap < TARGET_PAGE_SIZE) {
6017b7d00e0SRichard Henderson         /*
6027b7d00e0SRichard Henderson          * Most targets have only a few mmu_idx.  In the case where
6037b7d00e0SRichard Henderson          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
6047b7d00e0SRichard Henderson          * allocating memory for this operation.
6057b7d00e0SRichard Henderson          */
6067b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
6077b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
608d9bb58e5SYang Zhong     } else {
6097b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
6107b7d00e0SRichard Henderson 
6117b7d00e0SRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
6127b7d00e0SRichard Henderson         d->addr = addr;
6137b7d00e0SRichard Henderson         d->idxmap = idxmap;
6147b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
6157b7d00e0SRichard Henderson                          RUN_ON_CPU_HOST_PTR(d));
616d9bb58e5SYang Zhong     }
617d9bb58e5SYang Zhong }
618d9bb58e5SYang Zhong 
619f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr)
620f8144c6cSRichard Henderson {
621f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
622f8144c6cSRichard Henderson }
623f8144c6cSRichard Henderson 
624d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
625d9bb58e5SYang Zhong                                        uint16_t idxmap)
626d9bb58e5SYang Zhong {
627d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
628d9bb58e5SYang Zhong 
629d9bb58e5SYang Zhong     /* This should already be page aligned */
6307b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
631d9bb58e5SYang Zhong 
6327b7d00e0SRichard Henderson     /*
6337b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
6347b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
6357b7d00e0SRichard Henderson      */
6367b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
6377b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6387b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6397b7d00e0SRichard Henderson     } else {
6407b7d00e0SRichard Henderson         CPUState *dst_cpu;
6417b7d00e0SRichard Henderson 
6427b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
6437b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
6447b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
6457b7d00e0SRichard Henderson                 TLBFlushPageByMMUIdxData *d
6467b7d00e0SRichard Henderson                     = g_new(TLBFlushPageByMMUIdxData, 1);
6477b7d00e0SRichard Henderson 
6487b7d00e0SRichard Henderson                 d->addr = addr;
6497b7d00e0SRichard Henderson                 d->idxmap = idxmap;
6507b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
6517b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
6527b7d00e0SRichard Henderson             }
6537b7d00e0SRichard Henderson         }
6547b7d00e0SRichard Henderson     }
6557b7d00e0SRichard Henderson 
6567b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
657d9bb58e5SYang Zhong }
658d9bb58e5SYang Zhong 
659f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
660f8144c6cSRichard Henderson {
661f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
662f8144c6cSRichard Henderson }
663f8144c6cSRichard Henderson 
664d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
665d9bb58e5SYang Zhong                                               target_ulong addr,
666d9bb58e5SYang Zhong                                               uint16_t idxmap)
667d9bb58e5SYang Zhong {
668d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
669d9bb58e5SYang Zhong 
670d9bb58e5SYang Zhong     /* This should already be page aligned */
6717b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
672d9bb58e5SYang Zhong 
6737b7d00e0SRichard Henderson     /*
6747b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
6757b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
6767b7d00e0SRichard Henderson      */
6777b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
6787b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6797b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6807b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6817b7d00e0SRichard Henderson                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6827b7d00e0SRichard Henderson     } else {
6837b7d00e0SRichard Henderson         CPUState *dst_cpu;
6847b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d;
6857b7d00e0SRichard Henderson 
6867b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
6877b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
6887b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
6897b7d00e0SRichard Henderson                 d = g_new(TLBFlushPageByMMUIdxData, 1);
6907b7d00e0SRichard Henderson                 d->addr = addr;
6917b7d00e0SRichard Henderson                 d->idxmap = idxmap;
6927b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
6937b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
6947b7d00e0SRichard Henderson             }
6957b7d00e0SRichard Henderson         }
6967b7d00e0SRichard Henderson 
6977b7d00e0SRichard Henderson         d = g_new(TLBFlushPageByMMUIdxData, 1);
6987b7d00e0SRichard Henderson         d->addr = addr;
6997b7d00e0SRichard Henderson         d->idxmap = idxmap;
7007b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
7017b7d00e0SRichard Henderson                               RUN_ON_CPU_HOST_PTR(d));
7027b7d00e0SRichard Henderson     }
703d9bb58e5SYang Zhong }
704d9bb58e5SYang Zhong 
705f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
706d9bb58e5SYang Zhong {
707f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
708d9bb58e5SYang Zhong }
709d9bb58e5SYang Zhong 
7103c4ddec1SRichard Henderson static void tlb_flush_range_locked(CPUArchState *env, int midx,
7113c4ddec1SRichard Henderson                                    target_ulong addr, target_ulong len,
7123c4ddec1SRichard Henderson                                    unsigned bits)
7133ab6e68cSRichard Henderson {
7143ab6e68cSRichard Henderson     CPUTLBDesc *d = &env_tlb(env)->d[midx];
7153ab6e68cSRichard Henderson     CPUTLBDescFast *f = &env_tlb(env)->f[midx];
7163ab6e68cSRichard Henderson     target_ulong mask = MAKE_64BIT_MASK(0, bits);
7173ab6e68cSRichard Henderson 
7183ab6e68cSRichard Henderson     /*
7193ab6e68cSRichard Henderson      * If @bits is smaller than the tlb size, there may be multiple entries
7203ab6e68cSRichard Henderson      * within the TLB; otherwise all addresses that match under @mask hit
7213ab6e68cSRichard Henderson      * the same TLB entry.
7223ab6e68cSRichard Henderson      * TODO: Perhaps allow bits to be a few bits less than the size.
7233ab6e68cSRichard Henderson      * For now, just flush the entire TLB.
7243c4ddec1SRichard Henderson      *
7253c4ddec1SRichard Henderson      * If @len is larger than the tlb size, then it will take longer to
7263c4ddec1SRichard Henderson      * test all of the entries in the TLB than it will to flush it all.
7273ab6e68cSRichard Henderson      */
7283c4ddec1SRichard Henderson     if (mask < f->mask || len > f->mask) {
7293ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
7303c4ddec1SRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
7313c4ddec1SRichard Henderson                   midx, addr, mask, len);
7323ab6e68cSRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
7333ab6e68cSRichard Henderson         return;
7343ab6e68cSRichard Henderson     }
7353ab6e68cSRichard Henderson 
7363c4ddec1SRichard Henderson     /*
7373c4ddec1SRichard Henderson      * Check if we need to flush due to large pages.
7383c4ddec1SRichard Henderson      * Because large_page_mask contains all 1's from the msb,
7393c4ddec1SRichard Henderson      * we only need to test the end of the range.
7403c4ddec1SRichard Henderson      */
7413c4ddec1SRichard Henderson     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
7423ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
7433ab6e68cSRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
7443ab6e68cSRichard Henderson                   midx, d->large_page_addr, d->large_page_mask);
7453ab6e68cSRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
7463ab6e68cSRichard Henderson         return;
7473ab6e68cSRichard Henderson     }
7483ab6e68cSRichard Henderson 
7493c4ddec1SRichard Henderson     for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
7503c4ddec1SRichard Henderson         target_ulong page = addr + i;
7513c4ddec1SRichard Henderson         CPUTLBEntry *entry = tlb_entry(env, midx, page);
7523c4ddec1SRichard Henderson 
7533c4ddec1SRichard Henderson         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
7543ab6e68cSRichard Henderson             tlb_n_used_entries_dec(env, midx);
7553ab6e68cSRichard Henderson         }
7563ab6e68cSRichard Henderson         tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
7573ab6e68cSRichard Henderson     }
7583c4ddec1SRichard Henderson }
7593ab6e68cSRichard Henderson 
7603ab6e68cSRichard Henderson typedef struct {
7613ab6e68cSRichard Henderson     target_ulong addr;
7623c4ddec1SRichard Henderson     target_ulong len;
7633ab6e68cSRichard Henderson     uint16_t idxmap;
7643ab6e68cSRichard Henderson     uint16_t bits;
7653960a59fSRichard Henderson } TLBFlushRangeData;
7663ab6e68cSRichard Henderson 
7676be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
7683960a59fSRichard Henderson                                               TLBFlushRangeData d)
7693ab6e68cSRichard Henderson {
7703ab6e68cSRichard Henderson     CPUArchState *env = cpu->env_ptr;
7713ab6e68cSRichard Henderson     int mmu_idx;
7723ab6e68cSRichard Henderson 
7733ab6e68cSRichard Henderson     assert_cpu_is_self(cpu);
7743ab6e68cSRichard Henderson 
7753c4ddec1SRichard Henderson     tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
7763c4ddec1SRichard Henderson               d.addr, d.bits, d.len, d.idxmap);
7773ab6e68cSRichard Henderson 
7783ab6e68cSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
7793ab6e68cSRichard Henderson     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
7803ab6e68cSRichard Henderson         if ((d.idxmap >> mmu_idx) & 1) {
7813c4ddec1SRichard Henderson             tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
7823ab6e68cSRichard Henderson         }
7833ab6e68cSRichard Henderson     }
7843ab6e68cSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
7853ab6e68cSRichard Henderson 
786cfc2a2d6SIdan Horowitz     /*
787cfc2a2d6SIdan Horowitz      * If the length is larger than the jump cache size, then it will take
788cfc2a2d6SIdan Horowitz      * longer to clear each entry individually than it will to clear it all.
789cfc2a2d6SIdan Horowitz      */
790cfc2a2d6SIdan Horowitz     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
791cfc2a2d6SIdan Horowitz         cpu_tb_jmp_cache_clear(cpu);
792cfc2a2d6SIdan Horowitz         return;
793cfc2a2d6SIdan Horowitz     }
794cfc2a2d6SIdan Horowitz 
7953c4ddec1SRichard Henderson     for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
7963c4ddec1SRichard Henderson         tb_flush_jmp_cache(cpu, d.addr + i);
7973c4ddec1SRichard Henderson     }
7983ab6e68cSRichard Henderson }
7993ab6e68cSRichard Henderson 
800206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
8013ab6e68cSRichard Henderson                                               run_on_cpu_data data)
8023ab6e68cSRichard Henderson {
8033960a59fSRichard Henderson     TLBFlushRangeData *d = data.host_ptr;
8046be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
8053ab6e68cSRichard Henderson     g_free(d);
8063ab6e68cSRichard Henderson }
8073ab6e68cSRichard Henderson 
808e5b1921bSRichard Henderson void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
809e5b1921bSRichard Henderson                                target_ulong len, uint16_t idxmap,
810e5b1921bSRichard Henderson                                unsigned bits)
8113ab6e68cSRichard Henderson {
8123960a59fSRichard Henderson     TLBFlushRangeData d;
8133ab6e68cSRichard Henderson 
814e5b1921bSRichard Henderson     /*
815e5b1921bSRichard Henderson      * If all bits are significant, and len is small,
816e5b1921bSRichard Henderson      * this devolves to tlb_flush_page.
817e5b1921bSRichard Henderson      */
818e5b1921bSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8193ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
8203ab6e68cSRichard Henderson         return;
8213ab6e68cSRichard Henderson     }
8223ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
8233ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
8243ab6e68cSRichard Henderson         tlb_flush_by_mmuidx(cpu, idxmap);
8253ab6e68cSRichard Henderson         return;
8263ab6e68cSRichard Henderson     }
8273ab6e68cSRichard Henderson 
8283ab6e68cSRichard Henderson     /* This should already be page aligned */
8293ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
830e5b1921bSRichard Henderson     d.len = len;
8313ab6e68cSRichard Henderson     d.idxmap = idxmap;
8323ab6e68cSRichard Henderson     d.bits = bits;
8333ab6e68cSRichard Henderson 
8343ab6e68cSRichard Henderson     if (qemu_cpu_is_self(cpu)) {
8356be48e45SRichard Henderson         tlb_flush_range_by_mmuidx_async_0(cpu, d);
8363ab6e68cSRichard Henderson     } else {
8373ab6e68cSRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
8383960a59fSRichard Henderson         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
839206a583dSRichard Henderson         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
8403ab6e68cSRichard Henderson                          RUN_ON_CPU_HOST_PTR(p));
8413ab6e68cSRichard Henderson     }
8423ab6e68cSRichard Henderson }
8433ab6e68cSRichard Henderson 
844e5b1921bSRichard Henderson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
845e5b1921bSRichard Henderson                                    uint16_t idxmap, unsigned bits)
846e5b1921bSRichard Henderson {
847e5b1921bSRichard Henderson     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
848e5b1921bSRichard Henderson }
849e5b1921bSRichard Henderson 
850600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
851600b819fSRichard Henderson                                         target_ulong addr, target_ulong len,
852600b819fSRichard Henderson                                         uint16_t idxmap, unsigned bits)
8533ab6e68cSRichard Henderson {
8543960a59fSRichard Henderson     TLBFlushRangeData d;
855d34e4d1aSRichard Henderson     CPUState *dst_cpu;
8563ab6e68cSRichard Henderson 
857600b819fSRichard Henderson     /*
858600b819fSRichard Henderson      * If all bits are significant, and len is small,
859600b819fSRichard Henderson      * this devolves to tlb_flush_page.
860600b819fSRichard Henderson      */
861600b819fSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8623ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
8633ab6e68cSRichard Henderson         return;
8643ab6e68cSRichard Henderson     }
8653ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
8663ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
8673ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
8683ab6e68cSRichard Henderson         return;
8693ab6e68cSRichard Henderson     }
8703ab6e68cSRichard Henderson 
8713ab6e68cSRichard Henderson     /* This should already be page aligned */
8723ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
873600b819fSRichard Henderson     d.len = len;
8743ab6e68cSRichard Henderson     d.idxmap = idxmap;
8753ab6e68cSRichard Henderson     d.bits = bits;
8763ab6e68cSRichard Henderson 
8773ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
8783ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
8793ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
8803960a59fSRichard Henderson             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
8813ab6e68cSRichard Henderson             async_run_on_cpu(dst_cpu,
882206a583dSRichard Henderson                              tlb_flush_range_by_mmuidx_async_1,
8833ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
8843ab6e68cSRichard Henderson         }
8853ab6e68cSRichard Henderson     }
8863ab6e68cSRichard Henderson 
8876be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
8883ab6e68cSRichard Henderson }
8893ab6e68cSRichard Henderson 
890600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
891600b819fSRichard Henderson                                             target_ulong addr,
892600b819fSRichard Henderson                                             uint16_t idxmap, unsigned bits)
893600b819fSRichard Henderson {
894600b819fSRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
895600b819fSRichard Henderson                                        idxmap, bits);
896600b819fSRichard Henderson }
897600b819fSRichard Henderson 
898c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
8993ab6e68cSRichard Henderson                                                target_ulong addr,
900c13b27d8SRichard Henderson                                                target_ulong len,
9013ab6e68cSRichard Henderson                                                uint16_t idxmap,
9023ab6e68cSRichard Henderson                                                unsigned bits)
9033ab6e68cSRichard Henderson {
904d34e4d1aSRichard Henderson     TLBFlushRangeData d, *p;
905d34e4d1aSRichard Henderson     CPUState *dst_cpu;
9063ab6e68cSRichard Henderson 
907c13b27d8SRichard Henderson     /*
908c13b27d8SRichard Henderson      * If all bits are significant, and len is small,
909c13b27d8SRichard Henderson      * this devolves to tlb_flush_page.
910c13b27d8SRichard Henderson      */
911c13b27d8SRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
9123ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
9133ab6e68cSRichard Henderson         return;
9143ab6e68cSRichard Henderson     }
9153ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
9163ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
9173ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
9183ab6e68cSRichard Henderson         return;
9193ab6e68cSRichard Henderson     }
9203ab6e68cSRichard Henderson 
9213ab6e68cSRichard Henderson     /* This should already be page aligned */
9223ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
923c13b27d8SRichard Henderson     d.len = len;
9243ab6e68cSRichard Henderson     d.idxmap = idxmap;
9253ab6e68cSRichard Henderson     d.bits = bits;
9263ab6e68cSRichard Henderson 
9273ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
9283ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
9293ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
9306d244788SRichard Henderson             p = g_memdup(&d, sizeof(d));
931206a583dSRichard Henderson             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
9323ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
9333ab6e68cSRichard Henderson         }
9343ab6e68cSRichard Henderson     }
9353ab6e68cSRichard Henderson 
9366d244788SRichard Henderson     p = g_memdup(&d, sizeof(d));
937206a583dSRichard Henderson     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
9383ab6e68cSRichard Henderson                           RUN_ON_CPU_HOST_PTR(p));
9393ab6e68cSRichard Henderson }
9403ab6e68cSRichard Henderson 
941c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
942c13b27d8SRichard Henderson                                                    target_ulong addr,
943c13b27d8SRichard Henderson                                                    uint16_t idxmap,
944c13b27d8SRichard Henderson                                                    unsigned bits)
945c13b27d8SRichard Henderson {
946c13b27d8SRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
947c13b27d8SRichard Henderson                                               idxmap, bits);
948c13b27d8SRichard Henderson }
949c13b27d8SRichard Henderson 
950d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
951d9bb58e5SYang Zhong    can be detected */
952d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
953d9bb58e5SYang Zhong {
954d9bb58e5SYang Zhong     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
955d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
956d9bb58e5SYang Zhong }
957d9bb58e5SYang Zhong 
958d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
959d9bb58e5SYang Zhong    tested for self modifying code */
960d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
961d9bb58e5SYang Zhong {
962d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
963d9bb58e5SYang Zhong }
964d9bb58e5SYang Zhong 
965d9bb58e5SYang Zhong 
966d9bb58e5SYang Zhong /*
967d9bb58e5SYang Zhong  * Dirty write flag handling
968d9bb58e5SYang Zhong  *
969d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
970d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
971d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
972d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
973d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
974d9bb58e5SYang Zhong  * generated code.
975d9bb58e5SYang Zhong  *
97671aec354SEmilio G. Cota  * Other vCPUs might be reading their TLBs during guest execution, so we update
977d73415a3SStefan Hajnoczi  * te->addr_write with qatomic_set. We don't need to worry about this for
97871aec354SEmilio G. Cota  * oversized guests as MTTCG is disabled for them.
979d9bb58e5SYang Zhong  *
98053d28455SRichard Henderson  * Called with tlb_c.lock held.
981d9bb58e5SYang Zhong  */
98271aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
98371aec354SEmilio G. Cota                                          uintptr_t start, uintptr_t length)
984d9bb58e5SYang Zhong {
985d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
986d9bb58e5SYang Zhong 
9877b0d792cSRichard Henderson     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
9887b0d792cSRichard Henderson                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
989d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
990d9bb58e5SYang Zhong         addr += tlb_entry->addend;
991d9bb58e5SYang Zhong         if ((addr - start) < length) {
992d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
99371aec354SEmilio G. Cota             tlb_entry->addr_write |= TLB_NOTDIRTY;
994d9bb58e5SYang Zhong #else
995d73415a3SStefan Hajnoczi             qatomic_set(&tlb_entry->addr_write,
99671aec354SEmilio G. Cota                        tlb_entry->addr_write | TLB_NOTDIRTY);
997d9bb58e5SYang Zhong #endif
998d9bb58e5SYang Zhong         }
99971aec354SEmilio G. Cota     }
100071aec354SEmilio G. Cota }
100171aec354SEmilio G. Cota 
100271aec354SEmilio G. Cota /*
100353d28455SRichard Henderson  * Called with tlb_c.lock held.
100471aec354SEmilio G. Cota  * Called only from the vCPU context, i.e. the TLB's owner thread.
100571aec354SEmilio G. Cota  */
100671aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
100771aec354SEmilio G. Cota {
100871aec354SEmilio G. Cota     *d = *s;
100971aec354SEmilio G. Cota }
1010d9bb58e5SYang Zhong 
1011d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
101271aec354SEmilio G. Cota  * the target vCPU).
101353d28455SRichard Henderson  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
101471aec354SEmilio G. Cota  * thing actually updated is the target TLB entry ->addr_write flags.
1015d9bb58e5SYang Zhong  */
1016d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1017d9bb58e5SYang Zhong {
1018d9bb58e5SYang Zhong     CPUArchState *env;
1019d9bb58e5SYang Zhong 
1020d9bb58e5SYang Zhong     int mmu_idx;
1021d9bb58e5SYang Zhong 
1022d9bb58e5SYang Zhong     env = cpu->env_ptr;
1023a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
1024d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1025d9bb58e5SYang Zhong         unsigned int i;
1026722a1c1eSRichard Henderson         unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1027d9bb58e5SYang Zhong 
102886e1eff8SEmilio G. Cota         for (i = 0; i < n; i++) {
1029a40ec84eSRichard Henderson             tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1030a40ec84eSRichard Henderson                                          start1, length);
1031d9bb58e5SYang Zhong         }
1032d9bb58e5SYang Zhong 
1033d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
1034a40ec84eSRichard Henderson             tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1035a40ec84eSRichard Henderson                                          start1, length);
1036d9bb58e5SYang Zhong         }
1037d9bb58e5SYang Zhong     }
1038a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
1039d9bb58e5SYang Zhong }
1040d9bb58e5SYang Zhong 
104153d28455SRichard Henderson /* Called with tlb_c.lock held */
104271aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
104371aec354SEmilio G. Cota                                          target_ulong vaddr)
1044d9bb58e5SYang Zhong {
1045d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1046d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
1047d9bb58e5SYang Zhong     }
1048d9bb58e5SYang Zhong }
1049d9bb58e5SYang Zhong 
1050d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
1051d9bb58e5SYang Zhong    so that it is no longer dirty */
1052d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
1053d9bb58e5SYang Zhong {
1054d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
1055d9bb58e5SYang Zhong     int mmu_idx;
1056d9bb58e5SYang Zhong 
1057d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
1058d9bb58e5SYang Zhong 
1059d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
1060a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
1061d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1062383beda9SRichard Henderson         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
1063d9bb58e5SYang Zhong     }
1064d9bb58e5SYang Zhong 
1065d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1066d9bb58e5SYang Zhong         int k;
1067d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
1068a40ec84eSRichard Henderson             tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
1069d9bb58e5SYang Zhong         }
1070d9bb58e5SYang Zhong     }
1071a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
1072d9bb58e5SYang Zhong }
1073d9bb58e5SYang Zhong 
1074d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
1075d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
10761308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
10771308e026SRichard Henderson                                target_ulong vaddr, target_ulong size)
1078d9bb58e5SYang Zhong {
1079a40ec84eSRichard Henderson     target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
10801308e026SRichard Henderson     target_ulong lp_mask = ~(size - 1);
1081d9bb58e5SYang Zhong 
10821308e026SRichard Henderson     if (lp_addr == (target_ulong)-1) {
10831308e026SRichard Henderson         /* No previous large page.  */
10841308e026SRichard Henderson         lp_addr = vaddr;
10851308e026SRichard Henderson     } else {
1086d9bb58e5SYang Zhong         /* Extend the existing region to include the new page.
10871308e026SRichard Henderson            This is a compromise between unnecessary flushes and
10881308e026SRichard Henderson            the cost of maintaining a full variable size TLB.  */
1089a40ec84eSRichard Henderson         lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
10901308e026SRichard Henderson         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
10911308e026SRichard Henderson             lp_mask <<= 1;
1092d9bb58e5SYang Zhong         }
10931308e026SRichard Henderson     }
1094a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1095a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1096d9bb58e5SYang Zhong }
1097d9bb58e5SYang Zhong 
1098d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address
1099d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1100d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
1101d9bb58e5SYang Zhong  *
1102d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
1103d9bb58e5SYang Zhong  * critical section.
1104d9bb58e5SYang Zhong  */
1105d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
1106d9bb58e5SYang Zhong                              hwaddr paddr, MemTxAttrs attrs, int prot,
1107d9bb58e5SYang Zhong                              int mmu_idx, target_ulong size)
1108d9bb58e5SYang Zhong {
1109d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
1110a40ec84eSRichard Henderson     CPUTLB *tlb = env_tlb(env);
1111a40ec84eSRichard Henderson     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1112d9bb58e5SYang Zhong     MemoryRegionSection *section;
1113d9bb58e5SYang Zhong     unsigned int index;
1114d9bb58e5SYang Zhong     target_ulong address;
11158f5db641SRichard Henderson     target_ulong write_address;
1116d9bb58e5SYang Zhong     uintptr_t addend;
111768fea038SRichard Henderson     CPUTLBEntry *te, tn;
111855df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
111955df6fcfSPeter Maydell     target_ulong vaddr_page;
1120d9bb58e5SYang Zhong     int asidx = cpu_asidx_from_attrs(cpu, attrs);
112150b107c5SRichard Henderson     int wp_flags;
11228f5db641SRichard Henderson     bool is_ram, is_romd;
1123d9bb58e5SYang Zhong 
1124d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
112555df6fcfSPeter Maydell 
11261308e026SRichard Henderson     if (size <= TARGET_PAGE_SIZE) {
112755df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
112855df6fcfSPeter Maydell     } else {
11291308e026SRichard Henderson         tlb_add_large_page(env, mmu_idx, vaddr, size);
1130d9bb58e5SYang Zhong         sz = size;
113155df6fcfSPeter Maydell     }
113255df6fcfSPeter Maydell     vaddr_page = vaddr & TARGET_PAGE_MASK;
113355df6fcfSPeter Maydell     paddr_page = paddr & TARGET_PAGE_MASK;
113455df6fcfSPeter Maydell 
113555df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
113655df6fcfSPeter Maydell                                                 &xlat, &sz, attrs, &prot);
1137d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
1138d9bb58e5SYang Zhong 
1139d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1140d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
1141d9bb58e5SYang Zhong               vaddr, paddr, prot, mmu_idx);
1142d9bb58e5SYang Zhong 
114355df6fcfSPeter Maydell     address = vaddr_page;
114455df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
114530d7e098SRichard Henderson         /* Repeat the MMU check and TLB fill on every access.  */
114630d7e098SRichard Henderson         address |= TLB_INVALID_MASK;
114755df6fcfSPeter Maydell     }
1148a26fc6f5STony Nguyen     if (attrs.byte_swap) {
11495b87b3e6SRichard Henderson         address |= TLB_BSWAP;
1150a26fc6f5STony Nguyen     }
11518f5db641SRichard Henderson 
11528f5db641SRichard Henderson     is_ram = memory_region_is_ram(section->mr);
11538f5db641SRichard Henderson     is_romd = memory_region_is_romd(section->mr);
11548f5db641SRichard Henderson 
11558f5db641SRichard Henderson     if (is_ram || is_romd) {
11568f5db641SRichard Henderson         /* RAM and ROMD both have associated host memory. */
1157d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
11588f5db641SRichard Henderson     } else {
11598f5db641SRichard Henderson         /* I/O does not; force the host address to NULL. */
11608f5db641SRichard Henderson         addend = 0;
1161d9bb58e5SYang Zhong     }
1162d9bb58e5SYang Zhong 
11638f5db641SRichard Henderson     write_address = address;
11648f5db641SRichard Henderson     if (is_ram) {
11658f5db641SRichard Henderson         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
11668f5db641SRichard Henderson         /*
11678f5db641SRichard Henderson          * Computing is_clean is expensive; avoid all that unless
11688f5db641SRichard Henderson          * the page is actually writable.
11698f5db641SRichard Henderson          */
11708f5db641SRichard Henderson         if (prot & PAGE_WRITE) {
11718f5db641SRichard Henderson             if (section->readonly) {
11728f5db641SRichard Henderson                 write_address |= TLB_DISCARD_WRITE;
11738f5db641SRichard Henderson             } else if (cpu_physical_memory_is_clean(iotlb)) {
11748f5db641SRichard Henderson                 write_address |= TLB_NOTDIRTY;
11758f5db641SRichard Henderson             }
11768f5db641SRichard Henderson         }
11778f5db641SRichard Henderson     } else {
11788f5db641SRichard Henderson         /* I/O or ROMD */
11798f5db641SRichard Henderson         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
11808f5db641SRichard Henderson         /*
11818f5db641SRichard Henderson          * Writes to romd devices must go through MMIO to enable write.
11828f5db641SRichard Henderson          * Reads to romd devices go through the ram_ptr found above,
11838f5db641SRichard Henderson          * but of course reads to I/O must go through MMIO.
11848f5db641SRichard Henderson          */
11858f5db641SRichard Henderson         write_address |= TLB_MMIO;
11868f5db641SRichard Henderson         if (!is_romd) {
11878f5db641SRichard Henderson             address = write_address;
11888f5db641SRichard Henderson         }
11898f5db641SRichard Henderson     }
11908f5db641SRichard Henderson 
119150b107c5SRichard Henderson     wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
119250b107c5SRichard Henderson                                               TARGET_PAGE_SIZE);
1193d9bb58e5SYang Zhong 
1194383beda9SRichard Henderson     index = tlb_index(env, mmu_idx, vaddr_page);
1195383beda9SRichard Henderson     te = tlb_entry(env, mmu_idx, vaddr_page);
1196d9bb58e5SYang Zhong 
119768fea038SRichard Henderson     /*
119871aec354SEmilio G. Cota      * Hold the TLB lock for the rest of the function. We could acquire/release
119971aec354SEmilio G. Cota      * the lock several times in the function, but it is faster to amortize the
120071aec354SEmilio G. Cota      * acquisition cost by acquiring it just once. Note that this leads to
120171aec354SEmilio G. Cota      * a longer critical section, but this is not a concern since the TLB lock
120271aec354SEmilio G. Cota      * is unlikely to be contended.
120371aec354SEmilio G. Cota      */
1204a40ec84eSRichard Henderson     qemu_spin_lock(&tlb->c.lock);
120571aec354SEmilio G. Cota 
12063d1523ceSRichard Henderson     /* Note that the tlb is no longer clean.  */
1207a40ec84eSRichard Henderson     tlb->c.dirty |= 1 << mmu_idx;
12083d1523ceSRichard Henderson 
120971aec354SEmilio G. Cota     /* Make sure there's no cached translation for the new page.  */
121071aec354SEmilio G. Cota     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
121171aec354SEmilio G. Cota 
121271aec354SEmilio G. Cota     /*
121368fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
121468fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
121568fea038SRichard Henderson      */
12163cea94bbSEmilio G. Cota     if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
1217a40ec84eSRichard Henderson         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1218a40ec84eSRichard Henderson         CPUTLBEntry *tv = &desc->vtable[vidx];
121968fea038SRichard Henderson 
122068fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
122171aec354SEmilio G. Cota         copy_tlb_helper_locked(tv, te);
122225d3ec58SRichard Henderson         desc->vfulltlb[vidx] = desc->fulltlb[index];
122386e1eff8SEmilio G. Cota         tlb_n_used_entries_dec(env, mmu_idx);
122468fea038SRichard Henderson     }
1225d9bb58e5SYang Zhong 
1226d9bb58e5SYang Zhong     /* refill the tlb */
1227ace41090SPeter Maydell     /*
1228ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
1229ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
12308f5db641SRichard Henderson      *  + the ram_addr_t of the page base of the target RAM (RAM)
12318f5db641SRichard Henderson      *  + the offset within section->mr of the page base (I/O, ROMD)
123255df6fcfSPeter Maydell      * We subtract the vaddr_page (which is page aligned and thus won't
1233ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
1234ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
1235ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
1236ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
1237ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1238ace41090SPeter Maydell      */
123925d3ec58SRichard Henderson     desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
124025d3ec58SRichard Henderson     desc->fulltlb[index].attrs = attrs;
1241d9bb58e5SYang Zhong 
1242d9bb58e5SYang Zhong     /* Now calculate the new entry */
124355df6fcfSPeter Maydell     tn.addend = addend - vaddr_page;
1244d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
1245d9bb58e5SYang Zhong         tn.addr_read = address;
124650b107c5SRichard Henderson         if (wp_flags & BP_MEM_READ) {
124750b107c5SRichard Henderson             tn.addr_read |= TLB_WATCHPOINT;
124850b107c5SRichard Henderson         }
1249d9bb58e5SYang Zhong     } else {
1250d9bb58e5SYang Zhong         tn.addr_read = -1;
1251d9bb58e5SYang Zhong     }
1252d9bb58e5SYang Zhong 
1253d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
12548f5db641SRichard Henderson         tn.addr_code = address;
1255d9bb58e5SYang Zhong     } else {
1256d9bb58e5SYang Zhong         tn.addr_code = -1;
1257d9bb58e5SYang Zhong     }
1258d9bb58e5SYang Zhong 
1259d9bb58e5SYang Zhong     tn.addr_write = -1;
1260d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
12618f5db641SRichard Henderson         tn.addr_write = write_address;
1262f52bfb12SDavid Hildenbrand         if (prot & PAGE_WRITE_INV) {
1263f52bfb12SDavid Hildenbrand             tn.addr_write |= TLB_INVALID_MASK;
1264f52bfb12SDavid Hildenbrand         }
126550b107c5SRichard Henderson         if (wp_flags & BP_MEM_WRITE) {
126650b107c5SRichard Henderson             tn.addr_write |= TLB_WATCHPOINT;
126750b107c5SRichard Henderson         }
1268d9bb58e5SYang Zhong     }
1269d9bb58e5SYang Zhong 
127071aec354SEmilio G. Cota     copy_tlb_helper_locked(te, &tn);
127186e1eff8SEmilio G. Cota     tlb_n_used_entries_inc(env, mmu_idx);
1272a40ec84eSRichard Henderson     qemu_spin_unlock(&tlb->c.lock);
1273d9bb58e5SYang Zhong }
1274d9bb58e5SYang Zhong 
1275d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory
1276d9bb58e5SYang Zhong  * transaction attributes to be used.
1277d9bb58e5SYang Zhong  */
1278d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1279d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
1280d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
1281d9bb58e5SYang Zhong {
1282d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1283d9bb58e5SYang Zhong                             prot, mmu_idx, size);
1284d9bb58e5SYang Zhong }
1285d9bb58e5SYang Zhong 
1286c319dc13SRichard Henderson /*
1287c319dc13SRichard Henderson  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1288c319dc13SRichard Henderson  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1289c319dc13SRichard Henderson  * be discarded and looked up again (e.g. via tlb_entry()).
1290c319dc13SRichard Henderson  */
1291c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1292c319dc13SRichard Henderson                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1293c319dc13SRichard Henderson {
1294c319dc13SRichard Henderson     bool ok;
1295c319dc13SRichard Henderson 
1296c319dc13SRichard Henderson     /*
1297c319dc13SRichard Henderson      * This is not a probe, so only valid return is success; failure
1298c319dc13SRichard Henderson      * should result in exception + longjmp to the cpu loop.
1299c319dc13SRichard Henderson      */
13008810ee2aSAlex Bennée     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1301e124536fSEduardo Habkost                                     access_type, mmu_idx, false, retaddr);
1302c319dc13SRichard Henderson     assert(ok);
1303c319dc13SRichard Henderson }
1304c319dc13SRichard Henderson 
130578271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
130678271684SClaudio Fontana                                         MMUAccessType access_type,
130778271684SClaudio Fontana                                         int mmu_idx, uintptr_t retaddr)
130878271684SClaudio Fontana {
13098810ee2aSAlex Bennée     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
13108810ee2aSAlex Bennée                                           mmu_idx, retaddr);
131178271684SClaudio Fontana }
131278271684SClaudio Fontana 
131378271684SClaudio Fontana static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
131478271684SClaudio Fontana                                           vaddr addr, unsigned size,
131578271684SClaudio Fontana                                           MMUAccessType access_type,
131678271684SClaudio Fontana                                           int mmu_idx, MemTxAttrs attrs,
131778271684SClaudio Fontana                                           MemTxResult response,
131878271684SClaudio Fontana                                           uintptr_t retaddr)
131978271684SClaudio Fontana {
132078271684SClaudio Fontana     CPUClass *cc = CPU_GET_CLASS(cpu);
132178271684SClaudio Fontana 
132278271684SClaudio Fontana     if (!cpu->ignore_memory_transaction_failures &&
132378271684SClaudio Fontana         cc->tcg_ops->do_transaction_failed) {
132478271684SClaudio Fontana         cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
132578271684SClaudio Fontana                                            access_type, mmu_idx, attrs,
132678271684SClaudio Fontana                                            response, retaddr);
132778271684SClaudio Fontana     }
132878271684SClaudio Fontana }
132978271684SClaudio Fontana 
133025d3ec58SRichard Henderson static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
1331f1be3696SRichard Henderson                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
1332be5c4787STony Nguyen                          MMUAccessType access_type, MemOp op)
1333d9bb58e5SYang Zhong {
133429a0af61SRichard Henderson     CPUState *cpu = env_cpu(env);
13352d54f194SPeter Maydell     hwaddr mr_offset;
13362d54f194SPeter Maydell     MemoryRegionSection *section;
13372d54f194SPeter Maydell     MemoryRegion *mr;
1338d9bb58e5SYang Zhong     uint64_t val;
1339d9bb58e5SYang Zhong     bool locked = false;
134004e3aabdSPeter Maydell     MemTxResult r;
1341d9bb58e5SYang Zhong 
134225d3ec58SRichard Henderson     section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
13432d54f194SPeter Maydell     mr = section->mr;
134425d3ec58SRichard Henderson     mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1345d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
134608565552SRichard Henderson     if (!cpu->can_do_io) {
1347d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1348d9bb58e5SYang Zhong     }
1349d9bb58e5SYang Zhong 
135041744954SPhilippe Mathieu-Daudé     if (!qemu_mutex_iothread_locked()) {
1351d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
1352d9bb58e5SYang Zhong         locked = true;
1353d9bb58e5SYang Zhong     }
135425d3ec58SRichard Henderson     r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
135504e3aabdSPeter Maydell     if (r != MEMTX_OK) {
13562d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
13572d54f194SPeter Maydell             section->offset_within_address_space -
13582d54f194SPeter Maydell             section->offset_within_region;
13592d54f194SPeter Maydell 
1360be5c4787STony Nguyen         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
136125d3ec58SRichard Henderson                                mmu_idx, full->attrs, r, retaddr);
136204e3aabdSPeter Maydell     }
1363d9bb58e5SYang Zhong     if (locked) {
1364d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
1365d9bb58e5SYang Zhong     }
1366d9bb58e5SYang Zhong 
1367d9bb58e5SYang Zhong     return val;
1368d9bb58e5SYang Zhong }
1369d9bb58e5SYang Zhong 
13702f3a57eeSAlex Bennée /*
137125d3ec58SRichard Henderson  * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
137225d3ec58SRichard Henderson  * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
1373570ef309SAlex Bennée  * because of the side effect of io_writex changing memory layout.
13742f3a57eeSAlex Bennée  */
137537523ff7SRichard Henderson static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
137637523ff7SRichard Henderson                             hwaddr mr_offset)
13772f3a57eeSAlex Bennée {
13782f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN
13792f3a57eeSAlex Bennée     SavedIOTLB *saved = &cs->saved_iotlb;
13802f3a57eeSAlex Bennée     saved->section = section;
13812f3a57eeSAlex Bennée     saved->mr_offset = mr_offset;
13822f3a57eeSAlex Bennée #endif
13832f3a57eeSAlex Bennée }
13842f3a57eeSAlex Bennée 
138525d3ec58SRichard Henderson static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
1386f1be3696SRichard Henderson                       int mmu_idx, uint64_t val, target_ulong addr,
1387be5c4787STony Nguyen                       uintptr_t retaddr, MemOp op)
1388d9bb58e5SYang Zhong {
138929a0af61SRichard Henderson     CPUState *cpu = env_cpu(env);
13902d54f194SPeter Maydell     hwaddr mr_offset;
13912d54f194SPeter Maydell     MemoryRegionSection *section;
13922d54f194SPeter Maydell     MemoryRegion *mr;
1393d9bb58e5SYang Zhong     bool locked = false;
139404e3aabdSPeter Maydell     MemTxResult r;
1395d9bb58e5SYang Zhong 
139625d3ec58SRichard Henderson     section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
13972d54f194SPeter Maydell     mr = section->mr;
139825d3ec58SRichard Henderson     mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
139908565552SRichard Henderson     if (!cpu->can_do_io) {
1400d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1401d9bb58e5SYang Zhong     }
1402d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
1403d9bb58e5SYang Zhong 
14042f3a57eeSAlex Bennée     /*
14052f3a57eeSAlex Bennée      * The memory_region_dispatch may trigger a flush/resize
14062f3a57eeSAlex Bennée      * so for plugins we save the iotlb_data just in case.
14072f3a57eeSAlex Bennée      */
140837523ff7SRichard Henderson     save_iotlb_data(cpu, section, mr_offset);
14092f3a57eeSAlex Bennée 
141041744954SPhilippe Mathieu-Daudé     if (!qemu_mutex_iothread_locked()) {
1411d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
1412d9bb58e5SYang Zhong         locked = true;
1413d9bb58e5SYang Zhong     }
141425d3ec58SRichard Henderson     r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
141504e3aabdSPeter Maydell     if (r != MEMTX_OK) {
14162d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
14172d54f194SPeter Maydell             section->offset_within_address_space -
14182d54f194SPeter Maydell             section->offset_within_region;
14192d54f194SPeter Maydell 
1420be5c4787STony Nguyen         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
142125d3ec58SRichard Henderson                                MMU_DATA_STORE, mmu_idx, full->attrs, r,
1422be5c4787STony Nguyen                                retaddr);
142304e3aabdSPeter Maydell     }
1424d9bb58e5SYang Zhong     if (locked) {
1425d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
1426d9bb58e5SYang Zhong     }
1427d9bb58e5SYang Zhong }
1428d9bb58e5SYang Zhong 
14294811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
14304811e909SRichard Henderson {
14314811e909SRichard Henderson #if TCG_OVERSIZED_GUEST
14324811e909SRichard Henderson     return *(target_ulong *)((uintptr_t)entry + ofs);
14334811e909SRichard Henderson #else
1434d73415a3SStefan Hajnoczi     /* ofs might correspond to .addr_write, so use qatomic_read */
1435d73415a3SStefan Hajnoczi     return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
14364811e909SRichard Henderson #endif
14374811e909SRichard Henderson }
14384811e909SRichard Henderson 
1439d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
1440d9bb58e5SYang Zhong    back to the main tlb.  */
1441d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1442d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
1443d9bb58e5SYang Zhong {
1444d9bb58e5SYang Zhong     size_t vidx;
144571aec354SEmilio G. Cota 
144629a0af61SRichard Henderson     assert_cpu_is_self(env_cpu(env));
1447d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1448a40ec84eSRichard Henderson         CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1449a40ec84eSRichard Henderson         target_ulong cmp;
1450a40ec84eSRichard Henderson 
1451d73415a3SStefan Hajnoczi         /* elt_ofs might correspond to .addr_write, so use qatomic_read */
1452a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST
1453a40ec84eSRichard Henderson         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1454a40ec84eSRichard Henderson #else
1455d73415a3SStefan Hajnoczi         cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1456a40ec84eSRichard Henderson #endif
1457d9bb58e5SYang Zhong 
1458d9bb58e5SYang Zhong         if (cmp == page) {
1459d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
1460a40ec84eSRichard Henderson             CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1461d9bb58e5SYang Zhong 
1462a40ec84eSRichard Henderson             qemu_spin_lock(&env_tlb(env)->c.lock);
146371aec354SEmilio G. Cota             copy_tlb_helper_locked(&tmptlb, tlb);
146471aec354SEmilio G. Cota             copy_tlb_helper_locked(tlb, vtlb);
146571aec354SEmilio G. Cota             copy_tlb_helper_locked(vtlb, &tmptlb);
1466a40ec84eSRichard Henderson             qemu_spin_unlock(&env_tlb(env)->c.lock);
1467d9bb58e5SYang Zhong 
146825d3ec58SRichard Henderson             CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
146925d3ec58SRichard Henderson             CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
147025d3ec58SRichard Henderson             CPUTLBEntryFull tmpf;
147125d3ec58SRichard Henderson             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1472d9bb58e5SYang Zhong             return true;
1473d9bb58e5SYang Zhong         }
1474d9bb58e5SYang Zhong     }
1475d9bb58e5SYang Zhong     return false;
1476d9bb58e5SYang Zhong }
1477d9bb58e5SYang Zhong 
1478d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
1479d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
1480d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1481d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
1482d9bb58e5SYang Zhong 
1483707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
148425d3ec58SRichard Henderson                            CPUTLBEntryFull *full, uintptr_t retaddr)
1485707526adSRichard Henderson {
148625d3ec58SRichard Henderson     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1487707526adSRichard Henderson 
1488707526adSRichard Henderson     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1489707526adSRichard Henderson 
1490707526adSRichard Henderson     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1491707526adSRichard Henderson         struct page_collection *pages
1492707526adSRichard Henderson             = page_collection_lock(ram_addr, ram_addr + size);
14935a7c27bbSRichard Henderson         tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1494707526adSRichard Henderson         page_collection_unlock(pages);
1495707526adSRichard Henderson     }
1496707526adSRichard Henderson 
1497707526adSRichard Henderson     /*
1498707526adSRichard Henderson      * Set both VGA and migration bits for simplicity and to remove
1499707526adSRichard Henderson      * the notdirty callback faster.
1500707526adSRichard Henderson      */
1501707526adSRichard Henderson     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1502707526adSRichard Henderson 
1503707526adSRichard Henderson     /* We remove the notdirty callback only if the code has been flushed. */
1504707526adSRichard Henderson     if (!cpu_physical_memory_is_clean(ram_addr)) {
1505707526adSRichard Henderson         trace_memory_notdirty_set_dirty(mem_vaddr);
1506707526adSRichard Henderson         tlb_set_dirty(cpu, mem_vaddr);
1507707526adSRichard Henderson     }
1508707526adSRichard Henderson }
1509707526adSRichard Henderson 
1510069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr,
1511069cfe77SRichard Henderson                                  int fault_size, MMUAccessType access_type,
1512069cfe77SRichard Henderson                                  int mmu_idx, bool nonfault,
1513*af803a4fSRichard Henderson                                  void **phost, CPUTLBEntryFull **pfull,
1514*af803a4fSRichard Henderson                                  uintptr_t retaddr)
1515d9bb58e5SYang Zhong {
1516383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
1517383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1518069cfe77SRichard Henderson     target_ulong tlb_addr, page_addr;
1519c25c283dSDavid Hildenbrand     size_t elt_ofs;
1520069cfe77SRichard Henderson     int flags;
1521ca86cf32SDavid Hildenbrand 
1522c25c283dSDavid Hildenbrand     switch (access_type) {
1523c25c283dSDavid Hildenbrand     case MMU_DATA_LOAD:
1524c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_read);
1525c25c283dSDavid Hildenbrand         break;
1526c25c283dSDavid Hildenbrand     case MMU_DATA_STORE:
1527c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_write);
1528c25c283dSDavid Hildenbrand         break;
1529c25c283dSDavid Hildenbrand     case MMU_INST_FETCH:
1530c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_code);
1531c25c283dSDavid Hildenbrand         break;
1532c25c283dSDavid Hildenbrand     default:
1533c25c283dSDavid Hildenbrand         g_assert_not_reached();
1534c25c283dSDavid Hildenbrand     }
1535c25c283dSDavid Hildenbrand     tlb_addr = tlb_read_ofs(entry, elt_ofs);
1536c25c283dSDavid Hildenbrand 
1537c3c8bf57SRichard Henderson     flags = TLB_FLAGS_MASK;
1538069cfe77SRichard Henderson     page_addr = addr & TARGET_PAGE_MASK;
1539069cfe77SRichard Henderson     if (!tlb_hit_page(tlb_addr, page_addr)) {
1540069cfe77SRichard Henderson         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1541069cfe77SRichard Henderson             CPUState *cs = env_cpu(env);
1542069cfe77SRichard Henderson 
15438810ee2aSAlex Bennée             if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1544069cfe77SRichard Henderson                                            mmu_idx, nonfault, retaddr)) {
1545069cfe77SRichard Henderson                 /* Non-faulting page table read failed.  */
1546069cfe77SRichard Henderson                 *phost = NULL;
1547*af803a4fSRichard Henderson                 *pfull = NULL;
1548069cfe77SRichard Henderson                 return TLB_INVALID_MASK;
1549069cfe77SRichard Henderson             }
1550069cfe77SRichard Henderson 
155103a98189SDavid Hildenbrand             /* TLB resize via tlb_fill may have moved the entry.  */
1552*af803a4fSRichard Henderson             index = tlb_index(env, mmu_idx, addr);
155303a98189SDavid Hildenbrand             entry = tlb_entry(env, mmu_idx, addr);
1554c3c8bf57SRichard Henderson 
1555c3c8bf57SRichard Henderson             /*
1556c3c8bf57SRichard Henderson              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1557c3c8bf57SRichard Henderson              * to force the next access through tlb_fill.  We've just
1558c3c8bf57SRichard Henderson              * called tlb_fill, so we know that this entry *is* valid.
1559c3c8bf57SRichard Henderson              */
1560c3c8bf57SRichard Henderson             flags &= ~TLB_INVALID_MASK;
1561d9bb58e5SYang Zhong         }
1562c25c283dSDavid Hildenbrand         tlb_addr = tlb_read_ofs(entry, elt_ofs);
156303a98189SDavid Hildenbrand     }
1564c3c8bf57SRichard Henderson     flags &= tlb_addr;
156503a98189SDavid Hildenbrand 
1566*af803a4fSRichard Henderson     *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1567*af803a4fSRichard Henderson 
1568069cfe77SRichard Henderson     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
1569069cfe77SRichard Henderson     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1570069cfe77SRichard Henderson         *phost = NULL;
1571069cfe77SRichard Henderson         return TLB_MMIO;
1572fef39ccdSDavid Hildenbrand     }
1573fef39ccdSDavid Hildenbrand 
1574069cfe77SRichard Henderson     /* Everything else is RAM. */
1575069cfe77SRichard Henderson     *phost = (void *)((uintptr_t)addr + entry->addend);
1576069cfe77SRichard Henderson     return flags;
1577069cfe77SRichard Henderson }
1578069cfe77SRichard Henderson 
1579*af803a4fSRichard Henderson int probe_access_full(CPUArchState *env, target_ulong addr,
1580069cfe77SRichard Henderson                       MMUAccessType access_type, int mmu_idx,
1581*af803a4fSRichard Henderson                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1582*af803a4fSRichard Henderson                       uintptr_t retaddr)
1583069cfe77SRichard Henderson {
1584*af803a4fSRichard Henderson     int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1585*af803a4fSRichard Henderson                                       nonfault, phost, pfull, retaddr);
1586069cfe77SRichard Henderson 
1587069cfe77SRichard Henderson     /* Handle clean RAM pages.  */
1588069cfe77SRichard Henderson     if (unlikely(flags & TLB_NOTDIRTY)) {
1589*af803a4fSRichard Henderson         notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
1590069cfe77SRichard Henderson         flags &= ~TLB_NOTDIRTY;
1591069cfe77SRichard Henderson     }
1592069cfe77SRichard Henderson 
1593069cfe77SRichard Henderson     return flags;
1594069cfe77SRichard Henderson }
1595069cfe77SRichard Henderson 
1596*af803a4fSRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr,
1597*af803a4fSRichard Henderson                        MMUAccessType access_type, int mmu_idx,
1598*af803a4fSRichard Henderson                        bool nonfault, void **phost, uintptr_t retaddr)
1599*af803a4fSRichard Henderson {
1600*af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1601*af803a4fSRichard Henderson 
1602*af803a4fSRichard Henderson     return probe_access_full(env, addr, access_type, mmu_idx,
1603*af803a4fSRichard Henderson                              nonfault, phost, &full, retaddr);
1604*af803a4fSRichard Henderson }
1605*af803a4fSRichard Henderson 
1606069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size,
1607069cfe77SRichard Henderson                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1608069cfe77SRichard Henderson {
1609*af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1610069cfe77SRichard Henderson     void *host;
1611069cfe77SRichard Henderson     int flags;
1612069cfe77SRichard Henderson 
1613069cfe77SRichard Henderson     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1614069cfe77SRichard Henderson 
1615069cfe77SRichard Henderson     flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1616*af803a4fSRichard Henderson                                   false, &host, &full, retaddr);
1617069cfe77SRichard Henderson 
1618069cfe77SRichard Henderson     /* Per the interface, size == 0 merely faults the access. */
1619069cfe77SRichard Henderson     if (size == 0) {
162073bc0bd4SRichard Henderson         return NULL;
162173bc0bd4SRichard Henderson     }
162273bc0bd4SRichard Henderson 
1623069cfe77SRichard Henderson     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
162403a98189SDavid Hildenbrand         /* Handle watchpoints.  */
1625069cfe77SRichard Henderson         if (flags & TLB_WATCHPOINT) {
1626069cfe77SRichard Henderson             int wp_access = (access_type == MMU_DATA_STORE
1627069cfe77SRichard Henderson                              ? BP_MEM_WRITE : BP_MEM_READ);
162803a98189SDavid Hildenbrand             cpu_check_watchpoint(env_cpu(env), addr, size,
162925d3ec58SRichard Henderson                                  full->attrs, wp_access, retaddr);
1630d9bb58e5SYang Zhong         }
1631fef39ccdSDavid Hildenbrand 
163273bc0bd4SRichard Henderson         /* Handle clean RAM pages.  */
1633069cfe77SRichard Henderson         if (flags & TLB_NOTDIRTY) {
163425d3ec58SRichard Henderson             notdirty_write(env_cpu(env), addr, 1, full, retaddr);
163573bc0bd4SRichard Henderson         }
1636fef39ccdSDavid Hildenbrand     }
1637fef39ccdSDavid Hildenbrand 
1638069cfe77SRichard Henderson     return host;
1639d9bb58e5SYang Zhong }
1640d9bb58e5SYang Zhong 
16414811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
16424811e909SRichard Henderson                         MMUAccessType access_type, int mmu_idx)
16434811e909SRichard Henderson {
1644*af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1645069cfe77SRichard Henderson     void *host;
1646069cfe77SRichard Henderson     int flags;
16474811e909SRichard Henderson 
1648069cfe77SRichard Henderson     flags = probe_access_internal(env, addr, 0, access_type,
1649*af803a4fSRichard Henderson                                   mmu_idx, true, &host, &full, 0);
1650069cfe77SRichard Henderson 
1651069cfe77SRichard Henderson     /* No combination of flags are expected by the caller. */
1652069cfe77SRichard Henderson     return flags ? NULL : host;
16534811e909SRichard Henderson }
16544811e909SRichard Henderson 
16557e0d9973SRichard Henderson /*
16567e0d9973SRichard Henderson  * Return a ram_addr_t for the virtual address for execution.
16577e0d9973SRichard Henderson  *
16587e0d9973SRichard Henderson  * Return -1 if we can't translate and execute from an entire page
16597e0d9973SRichard Henderson  * of RAM.  This will force us to execute by loading and translating
16607e0d9973SRichard Henderson  * one insn at a time, without caching.
16617e0d9973SRichard Henderson  *
16627e0d9973SRichard Henderson  * NOTE: This function will trigger an exception if the page is
16637e0d9973SRichard Henderson  * not executable.
16647e0d9973SRichard Henderson  */
16657e0d9973SRichard Henderson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
16667e0d9973SRichard Henderson                                         void **hostp)
16677e0d9973SRichard Henderson {
1668*af803a4fSRichard Henderson     CPUTLBEntryFull *full;
16697e0d9973SRichard Henderson     void *p;
16707e0d9973SRichard Henderson 
16717e0d9973SRichard Henderson     (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
1672*af803a4fSRichard Henderson                                 cpu_mmu_index(env, true), false, &p, &full, 0);
16737e0d9973SRichard Henderson     if (p == NULL) {
16747e0d9973SRichard Henderson         return -1;
16757e0d9973SRichard Henderson     }
16767e0d9973SRichard Henderson     if (hostp) {
16777e0d9973SRichard Henderson         *hostp = p;
16787e0d9973SRichard Henderson     }
16797e0d9973SRichard Henderson     return qemu_ram_addr_from_host_nofail(p);
16807e0d9973SRichard Henderson }
16817e0d9973SRichard Henderson 
1682235537faSAlex Bennée #ifdef CONFIG_PLUGIN
1683235537faSAlex Bennée /*
1684235537faSAlex Bennée  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1685235537faSAlex Bennée  * This should be a hot path as we will have just looked this path up
1686235537faSAlex Bennée  * in the softmmu lookup code (or helper). We don't handle re-fills or
1687235537faSAlex Bennée  * checking the victim table. This is purely informational.
1688235537faSAlex Bennée  *
16892f3a57eeSAlex Bennée  * This almost never fails as the memory access being instrumented
16902f3a57eeSAlex Bennée  * should have just filled the TLB. The one corner case is io_writex
16912f3a57eeSAlex Bennée  * which can cause TLB flushes and potential resizing of the TLBs
1692570ef309SAlex Bennée  * losing the information we need. In those cases we need to recover
169325d3ec58SRichard Henderson  * data from a copy of the CPUTLBEntryFull. As long as this always occurs
1694570ef309SAlex Bennée  * from the same thread (which a mem callback will be) this is safe.
1695235537faSAlex Bennée  */
1696235537faSAlex Bennée 
1697235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1698235537faSAlex Bennée                        bool is_store, struct qemu_plugin_hwaddr *data)
1699235537faSAlex Bennée {
1700235537faSAlex Bennée     CPUArchState *env = cpu->env_ptr;
1701235537faSAlex Bennée     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1702235537faSAlex Bennée     uintptr_t index = tlb_index(env, mmu_idx, addr);
1703235537faSAlex Bennée     target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1704235537faSAlex Bennée 
1705235537faSAlex Bennée     if (likely(tlb_hit(tlb_addr, addr))) {
1706235537faSAlex Bennée         /* We must have an iotlb entry for MMIO */
1707235537faSAlex Bennée         if (tlb_addr & TLB_MMIO) {
170825d3ec58SRichard Henderson             CPUTLBEntryFull *full;
170925d3ec58SRichard Henderson             full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1710235537faSAlex Bennée             data->is_io = true;
171125d3ec58SRichard Henderson             data->v.io.section =
171225d3ec58SRichard Henderson                 iotlb_to_section(cpu, full->xlat_section, full->attrs);
171325d3ec58SRichard Henderson             data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1714235537faSAlex Bennée         } else {
1715235537faSAlex Bennée             data->is_io = false;
17162d932039SAlex Bennée             data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1717235537faSAlex Bennée         }
1718235537faSAlex Bennée         return true;
17192f3a57eeSAlex Bennée     } else {
17202f3a57eeSAlex Bennée         SavedIOTLB *saved = &cpu->saved_iotlb;
17212f3a57eeSAlex Bennée         data->is_io = true;
17222f3a57eeSAlex Bennée         data->v.io.section = saved->section;
17232f3a57eeSAlex Bennée         data->v.io.offset = saved->mr_offset;
17242f3a57eeSAlex Bennée         return true;
1725235537faSAlex Bennée     }
1726235537faSAlex Bennée }
1727235537faSAlex Bennée 
1728235537faSAlex Bennée #endif
1729235537faSAlex Bennée 
173008dff435SRichard Henderson /*
173108dff435SRichard Henderson  * Probe for an atomic operation.  Do not allow unaligned operations,
173208dff435SRichard Henderson  * or io operations to proceed.  Return the host address.
173308dff435SRichard Henderson  *
173408dff435SRichard Henderson  * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
173508dff435SRichard Henderson  */
1736d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
17379002ffcbSRichard Henderson                                MemOpIdx oi, int size, int prot,
173808dff435SRichard Henderson                                uintptr_t retaddr)
1739d9bb58e5SYang Zhong {
1740b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
174114776ab5STony Nguyen     MemOp mop = get_memop(oi);
1742d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
174308dff435SRichard Henderson     uintptr_t index;
174408dff435SRichard Henderson     CPUTLBEntry *tlbe;
174508dff435SRichard Henderson     target_ulong tlb_addr;
174634d49937SPeter Maydell     void *hostaddr;
1747d9bb58e5SYang Zhong 
1748b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1749b826044fSRichard Henderson 
1750d9bb58e5SYang Zhong     /* Adjust the given return address.  */
1751d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
1752d9bb58e5SYang Zhong 
1753d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1754d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1755d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
175629a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1757d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1758d9bb58e5SYang Zhong     }
1759d9bb58e5SYang Zhong 
1760d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
176108dff435SRichard Henderson     if (unlikely(addr & (size - 1))) {
1762d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1763d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1764d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1765d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1766d9bb58e5SYang Zhong         goto stop_the_world;
1767d9bb58e5SYang Zhong     }
1768d9bb58e5SYang Zhong 
176908dff435SRichard Henderson     index = tlb_index(env, mmu_idx, addr);
177008dff435SRichard Henderson     tlbe = tlb_entry(env, mmu_idx, addr);
177108dff435SRichard Henderson 
1772d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
177308dff435SRichard Henderson     if (prot & PAGE_WRITE) {
177408dff435SRichard Henderson         tlb_addr = tlb_addr_write(tlbe);
1775334692bcSPeter Maydell         if (!tlb_hit(tlb_addr, addr)) {
1776d9bb58e5SYang Zhong             if (!VICTIM_TLB_HIT(addr_write, addr)) {
177708dff435SRichard Henderson                 tlb_fill(env_cpu(env), addr, size,
177808dff435SRichard Henderson                          MMU_DATA_STORE, mmu_idx, retaddr);
17796d967cb8SEmilio G. Cota                 index = tlb_index(env, mmu_idx, addr);
17806d967cb8SEmilio G. Cota                 tlbe = tlb_entry(env, mmu_idx, addr);
1781d9bb58e5SYang Zhong             }
1782403f290cSEmilio G. Cota             tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1783d9bb58e5SYang Zhong         }
1784d9bb58e5SYang Zhong 
178508dff435SRichard Henderson         /* Let the guest notice RMW on a write-only page.  */
178608dff435SRichard Henderson         if ((prot & PAGE_READ) &&
178708dff435SRichard Henderson             unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
178808dff435SRichard Henderson             tlb_fill(env_cpu(env), addr, size,
178908dff435SRichard Henderson                      MMU_DATA_LOAD, mmu_idx, retaddr);
179008dff435SRichard Henderson             /*
179108dff435SRichard Henderson              * Since we don't support reads and writes to different addresses,
179208dff435SRichard Henderson              * and we do have the proper page loaded for write, this shouldn't
179308dff435SRichard Henderson              * ever return.  But just in case, handle via stop-the-world.
179408dff435SRichard Henderson              */
179508dff435SRichard Henderson             goto stop_the_world;
179608dff435SRichard Henderson         }
179708dff435SRichard Henderson     } else /* if (prot & PAGE_READ) */ {
179808dff435SRichard Henderson         tlb_addr = tlbe->addr_read;
179908dff435SRichard Henderson         if (!tlb_hit(tlb_addr, addr)) {
180008dff435SRichard Henderson             if (!VICTIM_TLB_HIT(addr_write, addr)) {
180108dff435SRichard Henderson                 tlb_fill(env_cpu(env), addr, size,
180208dff435SRichard Henderson                          MMU_DATA_LOAD, mmu_idx, retaddr);
180308dff435SRichard Henderson                 index = tlb_index(env, mmu_idx, addr);
180408dff435SRichard Henderson                 tlbe = tlb_entry(env, mmu_idx, addr);
180508dff435SRichard Henderson             }
180608dff435SRichard Henderson             tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
180708dff435SRichard Henderson         }
180808dff435SRichard Henderson     }
180908dff435SRichard Henderson 
181055df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
181130d7e098SRichard Henderson     if (unlikely(tlb_addr & TLB_MMIO)) {
1812d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1813d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1814d9bb58e5SYang Zhong         goto stop_the_world;
1815d9bb58e5SYang Zhong     }
1816d9bb58e5SYang Zhong 
181734d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
181834d49937SPeter Maydell 
181934d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
182008dff435SRichard Henderson         notdirty_write(env_cpu(env), addr, size,
182125d3ec58SRichard Henderson                        &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
182234d49937SPeter Maydell     }
182334d49937SPeter Maydell 
182434d49937SPeter Maydell     return hostaddr;
1825d9bb58e5SYang Zhong 
1826d9bb58e5SYang Zhong  stop_the_world:
182729a0af61SRichard Henderson     cpu_loop_exit_atomic(env_cpu(env), retaddr);
1828d9bb58e5SYang Zhong }
1829d9bb58e5SYang Zhong 
1830eed56642SAlex Bennée /*
1831f83bcecbSRichard Henderson  * Verify that we have passed the correct MemOp to the correct function.
1832f83bcecbSRichard Henderson  *
1833f83bcecbSRichard Henderson  * In the case of the helper_*_mmu functions, we will have done this by
1834f83bcecbSRichard Henderson  * using the MemOp to look up the helper during code generation.
1835f83bcecbSRichard Henderson  *
1836f83bcecbSRichard Henderson  * In the case of the cpu_*_mmu functions, this is up to the caller.
1837f83bcecbSRichard Henderson  * We could present one function to target code, and dispatch based on
1838f83bcecbSRichard Henderson  * the MemOp, but so far we have worked hard to avoid an indirect function
1839f83bcecbSRichard Henderson  * call along the memory path.
1840f83bcecbSRichard Henderson  */
1841f83bcecbSRichard Henderson static void validate_memop(MemOpIdx oi, MemOp expected)
1842f83bcecbSRichard Henderson {
1843f83bcecbSRichard Henderson #ifdef CONFIG_DEBUG_TCG
1844f83bcecbSRichard Henderson     MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1845f83bcecbSRichard Henderson     assert(have == expected);
1846f83bcecbSRichard Henderson #endif
1847f83bcecbSRichard Henderson }
1848f83bcecbSRichard Henderson 
1849f83bcecbSRichard Henderson /*
1850eed56642SAlex Bennée  * Load Helpers
1851eed56642SAlex Bennée  *
1852eed56642SAlex Bennée  * We support two different access types. SOFTMMU_CODE_ACCESS is
1853eed56642SAlex Bennée  * specifically for reading instructions from system memory. It is
1854eed56642SAlex Bennée  * called by the translation loop and in some helpers where the code
1855eed56642SAlex Bennée  * is disassembled. It shouldn't be called directly by guest code.
1856eed56642SAlex Bennée  */
1857d9bb58e5SYang Zhong 
18582dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
18599002ffcbSRichard Henderson                                 MemOpIdx oi, uintptr_t retaddr);
18602dd92606SRichard Henderson 
1861c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE
186280d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op)
186380d9d1c6SRichard Henderson {
186480d9d1c6SRichard Henderson     switch (op) {
186580d9d1c6SRichard Henderson     case MO_UB:
186680d9d1c6SRichard Henderson         return ldub_p(haddr);
186780d9d1c6SRichard Henderson     case MO_BEUW:
186880d9d1c6SRichard Henderson         return lduw_be_p(haddr);
186980d9d1c6SRichard Henderson     case MO_LEUW:
187080d9d1c6SRichard Henderson         return lduw_le_p(haddr);
187180d9d1c6SRichard Henderson     case MO_BEUL:
187280d9d1c6SRichard Henderson         return (uint32_t)ldl_be_p(haddr);
187380d9d1c6SRichard Henderson     case MO_LEUL:
187480d9d1c6SRichard Henderson         return (uint32_t)ldl_le_p(haddr);
1875fc313c64SFrédéric Pétrot     case MO_BEUQ:
187680d9d1c6SRichard Henderson         return ldq_be_p(haddr);
1877fc313c64SFrédéric Pétrot     case MO_LEUQ:
187880d9d1c6SRichard Henderson         return ldq_le_p(haddr);
187980d9d1c6SRichard Henderson     default:
188080d9d1c6SRichard Henderson         qemu_build_not_reached();
188180d9d1c6SRichard Henderson     }
188280d9d1c6SRichard Henderson }
188380d9d1c6SRichard Henderson 
188480d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE
18859002ffcbSRichard Henderson load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
1886be5c4787STony Nguyen             uintptr_t retaddr, MemOp op, bool code_read,
18872dd92606SRichard Henderson             FullLoadHelper *full_load)
1888eed56642SAlex Bennée {
1889eed56642SAlex Bennée     const size_t tlb_off = code_read ?
1890eed56642SAlex Bennée         offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1891f1be3696SRichard Henderson     const MMUAccessType access_type =
1892f1be3696SRichard Henderson         code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1893b826044fSRichard Henderson     const unsigned a_bits = get_alignment_bits(get_memop(oi));
1894b826044fSRichard Henderson     const size_t size = memop_size(op);
1895b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
1896b826044fSRichard Henderson     uintptr_t index;
1897b826044fSRichard Henderson     CPUTLBEntry *entry;
1898b826044fSRichard Henderson     target_ulong tlb_addr;
1899eed56642SAlex Bennée     void *haddr;
1900eed56642SAlex Bennée     uint64_t res;
1901b826044fSRichard Henderson 
1902b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1903d9bb58e5SYang Zhong 
1904eed56642SAlex Bennée     /* Handle CPU specific unaligned behaviour */
1905eed56642SAlex Bennée     if (addr & ((1 << a_bits) - 1)) {
190629a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, access_type,
1907eed56642SAlex Bennée                              mmu_idx, retaddr);
1908eed56642SAlex Bennée     }
1909eed56642SAlex Bennée 
1910b826044fSRichard Henderson     index = tlb_index(env, mmu_idx, addr);
1911b826044fSRichard Henderson     entry = tlb_entry(env, mmu_idx, addr);
1912b826044fSRichard Henderson     tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1913b826044fSRichard Henderson 
1914eed56642SAlex Bennée     /* If the TLB entry is for a different page, reload and try again.  */
1915eed56642SAlex Bennée     if (!tlb_hit(tlb_addr, addr)) {
1916eed56642SAlex Bennée         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1917eed56642SAlex Bennée                             addr & TARGET_PAGE_MASK)) {
191829a0af61SRichard Henderson             tlb_fill(env_cpu(env), addr, size,
1919f1be3696SRichard Henderson                      access_type, mmu_idx, retaddr);
1920eed56642SAlex Bennée             index = tlb_index(env, mmu_idx, addr);
1921eed56642SAlex Bennée             entry = tlb_entry(env, mmu_idx, addr);
1922eed56642SAlex Bennée         }
1923eed56642SAlex Bennée         tlb_addr = code_read ? entry->addr_code : entry->addr_read;
192430d7e098SRichard Henderson         tlb_addr &= ~TLB_INVALID_MASK;
1925eed56642SAlex Bennée     }
1926eed56642SAlex Bennée 
192750b107c5SRichard Henderson     /* Handle anything that isn't just a straight memory access.  */
1928eed56642SAlex Bennée     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
192925d3ec58SRichard Henderson         CPUTLBEntryFull *full;
19305b87b3e6SRichard Henderson         bool need_swap;
193150b107c5SRichard Henderson 
193250b107c5SRichard Henderson         /* For anything that is unaligned, recurse through full_load.  */
1933eed56642SAlex Bennée         if ((addr & (size - 1)) != 0) {
1934eed56642SAlex Bennée             goto do_unaligned_access;
1935eed56642SAlex Bennée         }
193650b107c5SRichard Henderson 
193725d3ec58SRichard Henderson         full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
193850b107c5SRichard Henderson 
193950b107c5SRichard Henderson         /* Handle watchpoints.  */
194050b107c5SRichard Henderson         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
194150b107c5SRichard Henderson             /* On watchpoint hit, this will longjmp out.  */
194250b107c5SRichard Henderson             cpu_check_watchpoint(env_cpu(env), addr, size,
194325d3ec58SRichard Henderson                                  full->attrs, BP_MEM_READ, retaddr);
19445b87b3e6SRichard Henderson         }
194550b107c5SRichard Henderson 
19465b87b3e6SRichard Henderson         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
194750b107c5SRichard Henderson 
194850b107c5SRichard Henderson         /* Handle I/O access.  */
19495b87b3e6SRichard Henderson         if (likely(tlb_addr & TLB_MMIO)) {
195025d3ec58SRichard Henderson             return io_readx(env, full, mmu_idx, addr, retaddr,
19515b87b3e6SRichard Henderson                             access_type, op ^ (need_swap * MO_BSWAP));
19525b87b3e6SRichard Henderson         }
19535b87b3e6SRichard Henderson 
19545b87b3e6SRichard Henderson         haddr = (void *)((uintptr_t)addr + entry->addend);
19555b87b3e6SRichard Henderson 
19565b87b3e6SRichard Henderson         /*
19575b87b3e6SRichard Henderson          * Keep these two load_memop separate to ensure that the compiler
19585b87b3e6SRichard Henderson          * is able to fold the entire function to a single instruction.
19595b87b3e6SRichard Henderson          * There is a build-time assert inside to remind you of this.  ;-)
19605b87b3e6SRichard Henderson          */
19615b87b3e6SRichard Henderson         if (unlikely(need_swap)) {
19625b87b3e6SRichard Henderson             return load_memop(haddr, op ^ MO_BSWAP);
19635b87b3e6SRichard Henderson         }
19645b87b3e6SRichard Henderson         return load_memop(haddr, op);
1965eed56642SAlex Bennée     }
1966eed56642SAlex Bennée 
1967eed56642SAlex Bennée     /* Handle slow unaligned access (it spans two pages or IO).  */
1968eed56642SAlex Bennée     if (size > 1
1969eed56642SAlex Bennée         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1970eed56642SAlex Bennée                     >= TARGET_PAGE_SIZE)) {
1971eed56642SAlex Bennée         target_ulong addr1, addr2;
19728c79b288SAlex Bennée         uint64_t r1, r2;
1973eed56642SAlex Bennée         unsigned shift;
1974eed56642SAlex Bennée     do_unaligned_access:
1975ab7a2009SAlex Bennée         addr1 = addr & ~((target_ulong)size - 1);
1976eed56642SAlex Bennée         addr2 = addr1 + size;
19772dd92606SRichard Henderson         r1 = full_load(env, addr1, oi, retaddr);
19782dd92606SRichard Henderson         r2 = full_load(env, addr2, oi, retaddr);
1979eed56642SAlex Bennée         shift = (addr & (size - 1)) * 8;
1980eed56642SAlex Bennée 
1981be5c4787STony Nguyen         if (memop_big_endian(op)) {
1982eed56642SAlex Bennée             /* Big-endian combine.  */
1983eed56642SAlex Bennée             res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1984eed56642SAlex Bennée         } else {
1985eed56642SAlex Bennée             /* Little-endian combine.  */
1986eed56642SAlex Bennée             res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1987eed56642SAlex Bennée         }
1988eed56642SAlex Bennée         return res & MAKE_64BIT_MASK(0, size * 8);
1989eed56642SAlex Bennée     }
1990eed56642SAlex Bennée 
1991eed56642SAlex Bennée     haddr = (void *)((uintptr_t)addr + entry->addend);
199280d9d1c6SRichard Henderson     return load_memop(haddr, op);
1993eed56642SAlex Bennée }
1994eed56642SAlex Bennée 
1995eed56642SAlex Bennée /*
1996eed56642SAlex Bennée  * For the benefit of TCG generated code, we want to avoid the
1997eed56642SAlex Bennée  * complication of ABI-specific return type promotion and always
1998eed56642SAlex Bennée  * return a value extended to the register size of the host. This is
1999eed56642SAlex Bennée  * tcg_target_long, except in the case of a 32-bit host and 64-bit
2000eed56642SAlex Bennée  * data, and for that we always have uint64_t.
2001eed56642SAlex Bennée  *
2002eed56642SAlex Bennée  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2003eed56642SAlex Bennée  */
2004eed56642SAlex Bennée 
20052dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
20069002ffcbSRichard Henderson                               MemOpIdx oi, uintptr_t retaddr)
20072dd92606SRichard Henderson {
2008f83bcecbSRichard Henderson     validate_memop(oi, MO_UB);
2009be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
20102dd92606SRichard Henderson }
20112dd92606SRichard Henderson 
2012fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
20139002ffcbSRichard Henderson                                      MemOpIdx oi, uintptr_t retaddr)
2014eed56642SAlex Bennée {
20152dd92606SRichard Henderson     return full_ldub_mmu(env, addr, oi, retaddr);
20162dd92606SRichard Henderson }
20172dd92606SRichard Henderson 
20182dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
20199002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20202dd92606SRichard Henderson {
2021f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUW);
2022be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
20232dd92606SRichard Henderson                        full_le_lduw_mmu);
2024eed56642SAlex Bennée }
2025eed56642SAlex Bennée 
2026fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
20279002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2028eed56642SAlex Bennée {
20292dd92606SRichard Henderson     return full_le_lduw_mmu(env, addr, oi, retaddr);
20302dd92606SRichard Henderson }
20312dd92606SRichard Henderson 
20322dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
20339002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20342dd92606SRichard Henderson {
2035f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUW);
2036be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
20372dd92606SRichard Henderson                        full_be_lduw_mmu);
2038eed56642SAlex Bennée }
2039eed56642SAlex Bennée 
2040fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
20419002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2042eed56642SAlex Bennée {
20432dd92606SRichard Henderson     return full_be_lduw_mmu(env, addr, oi, retaddr);
20442dd92606SRichard Henderson }
20452dd92606SRichard Henderson 
20462dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
20479002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20482dd92606SRichard Henderson {
2049f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUL);
2050be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
20512dd92606SRichard Henderson                        full_le_ldul_mmu);
2052eed56642SAlex Bennée }
2053eed56642SAlex Bennée 
2054fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
20559002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2056eed56642SAlex Bennée {
20572dd92606SRichard Henderson     return full_le_ldul_mmu(env, addr, oi, retaddr);
20582dd92606SRichard Henderson }
20592dd92606SRichard Henderson 
20602dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
20619002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20622dd92606SRichard Henderson {
2063f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUL);
2064be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
20652dd92606SRichard Henderson                        full_be_ldul_mmu);
2066eed56642SAlex Bennée }
2067eed56642SAlex Bennée 
2068fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
20699002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2070eed56642SAlex Bennée {
20712dd92606SRichard Henderson     return full_be_ldul_mmu(env, addr, oi, retaddr);
2072eed56642SAlex Bennée }
2073eed56642SAlex Bennée 
2074fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
20759002ffcbSRichard Henderson                            MemOpIdx oi, uintptr_t retaddr)
2076eed56642SAlex Bennée {
2077fc313c64SFrédéric Pétrot     validate_memop(oi, MO_LEUQ);
2078fc313c64SFrédéric Pétrot     return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
20792dd92606SRichard Henderson                        helper_le_ldq_mmu);
2080eed56642SAlex Bennée }
2081eed56642SAlex Bennée 
2082fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
20839002ffcbSRichard Henderson                            MemOpIdx oi, uintptr_t retaddr)
2084eed56642SAlex Bennée {
2085fc313c64SFrédéric Pétrot     validate_memop(oi, MO_BEUQ);
2086fc313c64SFrédéric Pétrot     return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
20872dd92606SRichard Henderson                        helper_be_ldq_mmu);
2088eed56642SAlex Bennée }
2089eed56642SAlex Bennée 
2090eed56642SAlex Bennée /*
2091eed56642SAlex Bennée  * Provide signed versions of the load routines as well.  We can of course
2092eed56642SAlex Bennée  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2093eed56642SAlex Bennée  */
2094eed56642SAlex Bennée 
2095eed56642SAlex Bennée 
2096eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
20979002ffcbSRichard Henderson                                      MemOpIdx oi, uintptr_t retaddr)
2098eed56642SAlex Bennée {
2099eed56642SAlex Bennée     return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2100eed56642SAlex Bennée }
2101eed56642SAlex Bennée 
2102eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
21039002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2104eed56642SAlex Bennée {
2105eed56642SAlex Bennée     return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2106eed56642SAlex Bennée }
2107eed56642SAlex Bennée 
2108eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
21099002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2110eed56642SAlex Bennée {
2111eed56642SAlex Bennée     return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2112eed56642SAlex Bennée }
2113eed56642SAlex Bennée 
2114eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
21159002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2116eed56642SAlex Bennée {
2117eed56642SAlex Bennée     return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2118eed56642SAlex Bennée }
2119eed56642SAlex Bennée 
2120eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
21219002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2122eed56642SAlex Bennée {
2123eed56642SAlex Bennée     return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2124eed56642SAlex Bennée }
2125eed56642SAlex Bennée 
2126eed56642SAlex Bennée /*
2127d03f1408SRichard Henderson  * Load helpers for cpu_ldst.h.
2128d03f1408SRichard Henderson  */
2129d03f1408SRichard Henderson 
2130d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
2131f83bcecbSRichard Henderson                                        MemOpIdx oi, uintptr_t retaddr,
2132f83bcecbSRichard Henderson                                        FullLoadHelper *full_load)
2133d03f1408SRichard Henderson {
2134d03f1408SRichard Henderson     uint64_t ret;
2135d03f1408SRichard Henderson 
2136d03f1408SRichard Henderson     ret = full_load(env, addr, oi, retaddr);
213737aff087SRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2138d03f1408SRichard Henderson     return ret;
2139d03f1408SRichard Henderson }
2140d03f1408SRichard Henderson 
2141f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2142d03f1408SRichard Henderson {
2143f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
2144d03f1408SRichard Henderson }
2145d03f1408SRichard Henderson 
2146f83bcecbSRichard Henderson uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
2147f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2148d03f1408SRichard Henderson {
2149f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
2150d03f1408SRichard Henderson }
2151d03f1408SRichard Henderson 
2152f83bcecbSRichard Henderson uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
2153f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2154d03f1408SRichard Henderson {
2155f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
2156d03f1408SRichard Henderson }
2157d03f1408SRichard Henderson 
2158f83bcecbSRichard Henderson uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
2159f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2160d03f1408SRichard Henderson {
216146697cb9SRichard Henderson     return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu);
2162d03f1408SRichard Henderson }
2163d03f1408SRichard Henderson 
2164f83bcecbSRichard Henderson uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
2165f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2166d03f1408SRichard Henderson {
2167f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
2168d03f1408SRichard Henderson }
2169d03f1408SRichard Henderson 
2170f83bcecbSRichard Henderson uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
2171f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2172d03f1408SRichard Henderson {
2173f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
2174b9e60257SRichard Henderson }
2175b9e60257SRichard Henderson 
2176f83bcecbSRichard Henderson uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
2177f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2178b9e60257SRichard Henderson {
2179f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
2180cfe04a4bSRichard Henderson }
2181cfe04a4bSRichard Henderson 
2182d03f1408SRichard Henderson /*
2183eed56642SAlex Bennée  * Store Helpers
2184eed56642SAlex Bennée  */
2185eed56642SAlex Bennée 
2186c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE
218780d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op)
218880d9d1c6SRichard Henderson {
218980d9d1c6SRichard Henderson     switch (op) {
219080d9d1c6SRichard Henderson     case MO_UB:
219180d9d1c6SRichard Henderson         stb_p(haddr, val);
219280d9d1c6SRichard Henderson         break;
219380d9d1c6SRichard Henderson     case MO_BEUW:
219480d9d1c6SRichard Henderson         stw_be_p(haddr, val);
219580d9d1c6SRichard Henderson         break;
219680d9d1c6SRichard Henderson     case MO_LEUW:
219780d9d1c6SRichard Henderson         stw_le_p(haddr, val);
219880d9d1c6SRichard Henderson         break;
219980d9d1c6SRichard Henderson     case MO_BEUL:
220080d9d1c6SRichard Henderson         stl_be_p(haddr, val);
220180d9d1c6SRichard Henderson         break;
220280d9d1c6SRichard Henderson     case MO_LEUL:
220380d9d1c6SRichard Henderson         stl_le_p(haddr, val);
220480d9d1c6SRichard Henderson         break;
2205fc313c64SFrédéric Pétrot     case MO_BEUQ:
220680d9d1c6SRichard Henderson         stq_be_p(haddr, val);
220780d9d1c6SRichard Henderson         break;
2208fc313c64SFrédéric Pétrot     case MO_LEUQ:
220980d9d1c6SRichard Henderson         stq_le_p(haddr, val);
221080d9d1c6SRichard Henderson         break;
221180d9d1c6SRichard Henderson     default:
221280d9d1c6SRichard Henderson         qemu_build_not_reached();
221380d9d1c6SRichard Henderson     }
221480d9d1c6SRichard Henderson }
221580d9d1c6SRichard Henderson 
2216f83bcecbSRichard Henderson static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2217f83bcecbSRichard Henderson                          MemOpIdx oi, uintptr_t retaddr);
2218f83bcecbSRichard Henderson 
22196b8b622eSRichard Henderson static void __attribute__((noinline))
22206b8b622eSRichard Henderson store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
22216b8b622eSRichard Henderson                        uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
22226b8b622eSRichard Henderson                        bool big_endian)
22236b8b622eSRichard Henderson {
22246b8b622eSRichard Henderson     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
22256b8b622eSRichard Henderson     uintptr_t index, index2;
22266b8b622eSRichard Henderson     CPUTLBEntry *entry, *entry2;
2227b0f650f0SIlya Leoshkevich     target_ulong page1, page2, tlb_addr, tlb_addr2;
22289002ffcbSRichard Henderson     MemOpIdx oi;
22296b8b622eSRichard Henderson     size_t size2;
22306b8b622eSRichard Henderson     int i;
22316b8b622eSRichard Henderson 
22326b8b622eSRichard Henderson     /*
22336b8b622eSRichard Henderson      * Ensure the second page is in the TLB.  Note that the first page
22346b8b622eSRichard Henderson      * is already guaranteed to be filled, and that the second page
2235b0f650f0SIlya Leoshkevich      * cannot evict the first.  An exception to this rule is PAGE_WRITE_INV
2236b0f650f0SIlya Leoshkevich      * handling: the first page could have evicted itself.
22376b8b622eSRichard Henderson      */
2238b0f650f0SIlya Leoshkevich     page1 = addr & TARGET_PAGE_MASK;
22396b8b622eSRichard Henderson     page2 = (addr + size) & TARGET_PAGE_MASK;
22406b8b622eSRichard Henderson     size2 = (addr + size) & ~TARGET_PAGE_MASK;
22416b8b622eSRichard Henderson     index2 = tlb_index(env, mmu_idx, page2);
22426b8b622eSRichard Henderson     entry2 = tlb_entry(env, mmu_idx, page2);
22436b8b622eSRichard Henderson 
22446b8b622eSRichard Henderson     tlb_addr2 = tlb_addr_write(entry2);
2245b0f650f0SIlya Leoshkevich     if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) {
22466b8b622eSRichard Henderson         if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
22476b8b622eSRichard Henderson             tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
22486b8b622eSRichard Henderson                      mmu_idx, retaddr);
22496b8b622eSRichard Henderson             index2 = tlb_index(env, mmu_idx, page2);
22506b8b622eSRichard Henderson             entry2 = tlb_entry(env, mmu_idx, page2);
22516b8b622eSRichard Henderson         }
22526b8b622eSRichard Henderson         tlb_addr2 = tlb_addr_write(entry2);
22536b8b622eSRichard Henderson     }
22546b8b622eSRichard Henderson 
22556b8b622eSRichard Henderson     index = tlb_index(env, mmu_idx, addr);
22566b8b622eSRichard Henderson     entry = tlb_entry(env, mmu_idx, addr);
22576b8b622eSRichard Henderson     tlb_addr = tlb_addr_write(entry);
22586b8b622eSRichard Henderson 
22596b8b622eSRichard Henderson     /*
22606b8b622eSRichard Henderson      * Handle watchpoints.  Since this may trap, all checks
22616b8b622eSRichard Henderson      * must happen before any store.
22626b8b622eSRichard Henderson      */
22636b8b622eSRichard Henderson     if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
22646b8b622eSRichard Henderson         cpu_check_watchpoint(env_cpu(env), addr, size - size2,
226525d3ec58SRichard Henderson                              env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
22666b8b622eSRichard Henderson                              BP_MEM_WRITE, retaddr);
22676b8b622eSRichard Henderson     }
22686b8b622eSRichard Henderson     if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
22696b8b622eSRichard Henderson         cpu_check_watchpoint(env_cpu(env), page2, size2,
227025d3ec58SRichard Henderson                              env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
22716b8b622eSRichard Henderson                              BP_MEM_WRITE, retaddr);
22726b8b622eSRichard Henderson     }
22736b8b622eSRichard Henderson 
22746b8b622eSRichard Henderson     /*
22756b8b622eSRichard Henderson      * XXX: not efficient, but simple.
22766b8b622eSRichard Henderson      * This loop must go in the forward direction to avoid issues
22776b8b622eSRichard Henderson      * with self-modifying code in Windows 64-bit.
22786b8b622eSRichard Henderson      */
22796b8b622eSRichard Henderson     oi = make_memop_idx(MO_UB, mmu_idx);
22806b8b622eSRichard Henderson     if (big_endian) {
22816b8b622eSRichard Henderson         for (i = 0; i < size; ++i) {
22826b8b622eSRichard Henderson             /* Big-endian extract.  */
22836b8b622eSRichard Henderson             uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
2284f83bcecbSRichard Henderson             full_stb_mmu(env, addr + i, val8, oi, retaddr);
22856b8b622eSRichard Henderson         }
22866b8b622eSRichard Henderson     } else {
22876b8b622eSRichard Henderson         for (i = 0; i < size; ++i) {
22886b8b622eSRichard Henderson             /* Little-endian extract.  */
22896b8b622eSRichard Henderson             uint8_t val8 = val >> (i * 8);
2290f83bcecbSRichard Henderson             full_stb_mmu(env, addr + i, val8, oi, retaddr);
22916b8b622eSRichard Henderson         }
22926b8b622eSRichard Henderson     }
22936b8b622eSRichard Henderson }
22946b8b622eSRichard Henderson 
229580d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE
22964601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
22979002ffcbSRichard Henderson              MemOpIdx oi, uintptr_t retaddr, MemOp op)
2298eed56642SAlex Bennée {
2299eed56642SAlex Bennée     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2300b826044fSRichard Henderson     const unsigned a_bits = get_alignment_bits(get_memop(oi));
2301b826044fSRichard Henderson     const size_t size = memop_size(op);
2302b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
2303b826044fSRichard Henderson     uintptr_t index;
2304b826044fSRichard Henderson     CPUTLBEntry *entry;
2305b826044fSRichard Henderson     target_ulong tlb_addr;
2306eed56642SAlex Bennée     void *haddr;
2307b826044fSRichard Henderson 
2308b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
2309eed56642SAlex Bennée 
2310eed56642SAlex Bennée     /* Handle CPU specific unaligned behaviour */
2311eed56642SAlex Bennée     if (addr & ((1 << a_bits) - 1)) {
231229a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
2313eed56642SAlex Bennée                              mmu_idx, retaddr);
2314eed56642SAlex Bennée     }
2315eed56642SAlex Bennée 
2316b826044fSRichard Henderson     index = tlb_index(env, mmu_idx, addr);
2317b826044fSRichard Henderson     entry = tlb_entry(env, mmu_idx, addr);
2318b826044fSRichard Henderson     tlb_addr = tlb_addr_write(entry);
2319b826044fSRichard Henderson 
2320eed56642SAlex Bennée     /* If the TLB entry is for a different page, reload and try again.  */
2321eed56642SAlex Bennée     if (!tlb_hit(tlb_addr, addr)) {
2322eed56642SAlex Bennée         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2323eed56642SAlex Bennée             addr & TARGET_PAGE_MASK)) {
232429a0af61SRichard Henderson             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
2325eed56642SAlex Bennée                      mmu_idx, retaddr);
2326eed56642SAlex Bennée             index = tlb_index(env, mmu_idx, addr);
2327eed56642SAlex Bennée             entry = tlb_entry(env, mmu_idx, addr);
2328eed56642SAlex Bennée         }
2329eed56642SAlex Bennée         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2330eed56642SAlex Bennée     }
2331eed56642SAlex Bennée 
233250b107c5SRichard Henderson     /* Handle anything that isn't just a straight memory access.  */
2333eed56642SAlex Bennée     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
233425d3ec58SRichard Henderson         CPUTLBEntryFull *full;
23355b87b3e6SRichard Henderson         bool need_swap;
233650b107c5SRichard Henderson 
233750b107c5SRichard Henderson         /* For anything that is unaligned, recurse through byte stores.  */
2338eed56642SAlex Bennée         if ((addr & (size - 1)) != 0) {
2339eed56642SAlex Bennée             goto do_unaligned_access;
2340eed56642SAlex Bennée         }
234150b107c5SRichard Henderson 
234225d3ec58SRichard Henderson         full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
234350b107c5SRichard Henderson 
234450b107c5SRichard Henderson         /* Handle watchpoints.  */
234550b107c5SRichard Henderson         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
234650b107c5SRichard Henderson             /* On watchpoint hit, this will longjmp out.  */
234750b107c5SRichard Henderson             cpu_check_watchpoint(env_cpu(env), addr, size,
234825d3ec58SRichard Henderson                                  full->attrs, BP_MEM_WRITE, retaddr);
23495b87b3e6SRichard Henderson         }
235050b107c5SRichard Henderson 
23515b87b3e6SRichard Henderson         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
235250b107c5SRichard Henderson 
235350b107c5SRichard Henderson         /* Handle I/O access.  */
235408565552SRichard Henderson         if (tlb_addr & TLB_MMIO) {
235525d3ec58SRichard Henderson             io_writex(env, full, mmu_idx, val, addr, retaddr,
23565b87b3e6SRichard Henderson                       op ^ (need_swap * MO_BSWAP));
23575b87b3e6SRichard Henderson             return;
23585b87b3e6SRichard Henderson         }
23595b87b3e6SRichard Henderson 
23607b0d792cSRichard Henderson         /* Ignore writes to ROM.  */
23617b0d792cSRichard Henderson         if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
23627b0d792cSRichard Henderson             return;
23637b0d792cSRichard Henderson         }
23647b0d792cSRichard Henderson 
236508565552SRichard Henderson         /* Handle clean RAM pages.  */
236608565552SRichard Henderson         if (tlb_addr & TLB_NOTDIRTY) {
236725d3ec58SRichard Henderson             notdirty_write(env_cpu(env), addr, size, full, retaddr);
236808565552SRichard Henderson         }
236908565552SRichard Henderson 
2370707526adSRichard Henderson         haddr = (void *)((uintptr_t)addr + entry->addend);
237108565552SRichard Henderson 
23725b87b3e6SRichard Henderson         /*
23735b87b3e6SRichard Henderson          * Keep these two store_memop separate to ensure that the compiler
23745b87b3e6SRichard Henderson          * is able to fold the entire function to a single instruction.
23755b87b3e6SRichard Henderson          * There is a build-time assert inside to remind you of this.  ;-)
23765b87b3e6SRichard Henderson          */
23775b87b3e6SRichard Henderson         if (unlikely(need_swap)) {
23785b87b3e6SRichard Henderson             store_memop(haddr, val, op ^ MO_BSWAP);
23795b87b3e6SRichard Henderson         } else {
23805b87b3e6SRichard Henderson             store_memop(haddr, val, op);
23815b87b3e6SRichard Henderson         }
2382eed56642SAlex Bennée         return;
2383eed56642SAlex Bennée     }
2384eed56642SAlex Bennée 
2385eed56642SAlex Bennée     /* Handle slow unaligned access (it spans two pages or IO).  */
2386eed56642SAlex Bennée     if (size > 1
2387eed56642SAlex Bennée         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2388eed56642SAlex Bennée                      >= TARGET_PAGE_SIZE)) {
2389eed56642SAlex Bennée     do_unaligned_access:
23906b8b622eSRichard Henderson         store_helper_unaligned(env, addr, val, retaddr, size,
23916b8b622eSRichard Henderson                                mmu_idx, memop_big_endian(op));
2392eed56642SAlex Bennée         return;
2393eed56642SAlex Bennée     }
2394eed56642SAlex Bennée 
2395eed56642SAlex Bennée     haddr = (void *)((uintptr_t)addr + entry->addend);
239680d9d1c6SRichard Henderson     store_memop(haddr, val, op);
2397eed56642SAlex Bennée }
2398eed56642SAlex Bennée 
2399f83bcecbSRichard Henderson static void __attribute__((noinline))
2400f83bcecbSRichard Henderson full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
24019002ffcbSRichard Henderson              MemOpIdx oi, uintptr_t retaddr)
2402eed56642SAlex Bennée {
2403f83bcecbSRichard Henderson     validate_memop(oi, MO_UB);
2404be5c4787STony Nguyen     store_helper(env, addr, val, oi, retaddr, MO_UB);
2405eed56642SAlex Bennée }
2406eed56642SAlex Bennée 
2407f83bcecbSRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2408f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t retaddr)
2409f83bcecbSRichard Henderson {
2410f83bcecbSRichard Henderson     full_stb_mmu(env, addr, val, oi, retaddr);
2411f83bcecbSRichard Henderson }
2412f83bcecbSRichard Henderson 
2413f83bcecbSRichard Henderson static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2414f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2415f83bcecbSRichard Henderson {
2416f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUW);
2417f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2418f83bcecbSRichard Henderson }
2419f83bcecbSRichard Henderson 
2420fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
24219002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2422eed56642SAlex Bennée {
2423f83bcecbSRichard Henderson     full_le_stw_mmu(env, addr, val, oi, retaddr);
2424f83bcecbSRichard Henderson }
2425f83bcecbSRichard Henderson 
2426f83bcecbSRichard Henderson static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2427f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2428f83bcecbSRichard Henderson {
2429f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUW);
2430f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2431eed56642SAlex Bennée }
2432eed56642SAlex Bennée 
2433fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
24349002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2435eed56642SAlex Bennée {
2436f83bcecbSRichard Henderson     full_be_stw_mmu(env, addr, val, oi, retaddr);
2437f83bcecbSRichard Henderson }
2438f83bcecbSRichard Henderson 
2439f83bcecbSRichard Henderson static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2440f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2441f83bcecbSRichard Henderson {
2442f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUL);
2443f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2444eed56642SAlex Bennée }
2445eed56642SAlex Bennée 
2446fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
24479002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2448eed56642SAlex Bennée {
2449f83bcecbSRichard Henderson     full_le_stl_mmu(env, addr, val, oi, retaddr);
2450f83bcecbSRichard Henderson }
2451f83bcecbSRichard Henderson 
2452f83bcecbSRichard Henderson static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2453f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2454f83bcecbSRichard Henderson {
2455f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUL);
2456f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2457eed56642SAlex Bennée }
2458eed56642SAlex Bennée 
2459fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
24609002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2461eed56642SAlex Bennée {
2462f83bcecbSRichard Henderson     full_be_stl_mmu(env, addr, val, oi, retaddr);
2463eed56642SAlex Bennée }
2464eed56642SAlex Bennée 
2465fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
24669002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2467eed56642SAlex Bennée {
2468fc313c64SFrédéric Pétrot     validate_memop(oi, MO_LEUQ);
2469fc313c64SFrédéric Pétrot     store_helper(env, addr, val, oi, retaddr, MO_LEUQ);
2470eed56642SAlex Bennée }
2471eed56642SAlex Bennée 
2472fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
24739002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2474eed56642SAlex Bennée {
2475fc313c64SFrédéric Pétrot     validate_memop(oi, MO_BEUQ);
2476fc313c64SFrédéric Pétrot     store_helper(env, addr, val, oi, retaddr, MO_BEUQ);
2477eed56642SAlex Bennée }
2478d9bb58e5SYang Zhong 
2479d03f1408SRichard Henderson /*
2480d03f1408SRichard Henderson  * Store Helpers for cpu_ldst.h
2481d03f1408SRichard Henderson  */
2482d03f1408SRichard Henderson 
2483f83bcecbSRichard Henderson typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
2484f83bcecbSRichard Henderson                              uint64_t val, MemOpIdx oi, uintptr_t retaddr);
2485f83bcecbSRichard Henderson 
2486f83bcecbSRichard Henderson static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
2487f83bcecbSRichard Henderson                                     uint64_t val, MemOpIdx oi, uintptr_t ra,
2488f83bcecbSRichard Henderson                                     FullStoreHelper *full_store)
2489d03f1408SRichard Henderson {
2490f83bcecbSRichard Henderson     full_store(env, addr, val, oi, ra);
249137aff087SRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
2492d03f1408SRichard Henderson }
2493d03f1408SRichard Henderson 
2494f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2495f83bcecbSRichard Henderson                  MemOpIdx oi, uintptr_t retaddr)
2496d03f1408SRichard Henderson {
2497f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
2498d03f1408SRichard Henderson }
2499d03f1408SRichard Henderson 
2500f83bcecbSRichard Henderson void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2501f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2502d03f1408SRichard Henderson {
2503f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
2504d03f1408SRichard Henderson }
2505d03f1408SRichard Henderson 
2506f83bcecbSRichard Henderson void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2507f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2508d03f1408SRichard Henderson {
2509f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
2510d03f1408SRichard Henderson }
2511d03f1408SRichard Henderson 
2512f83bcecbSRichard Henderson void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2513f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2514d03f1408SRichard Henderson {
2515f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
2516b9e60257SRichard Henderson }
2517b9e60257SRichard Henderson 
2518f83bcecbSRichard Henderson void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2519f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2520b9e60257SRichard Henderson {
2521f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
2522b9e60257SRichard Henderson }
2523b9e60257SRichard Henderson 
2524f83bcecbSRichard Henderson void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2525f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2526b9e60257SRichard Henderson {
2527f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
2528b9e60257SRichard Henderson }
2529b9e60257SRichard Henderson 
2530f83bcecbSRichard Henderson void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2531f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2532b9e60257SRichard Henderson {
2533f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
2534d03f1408SRichard Henderson }
2535d03f1408SRichard Henderson 
2536f83bcecbSRichard Henderson #include "ldst_common.c.inc"
2537cfe04a4bSRichard Henderson 
2538be9568b4SRichard Henderson /*
2539be9568b4SRichard Henderson  * First set of functions passes in OI and RETADDR.
2540be9568b4SRichard Henderson  * This makes them callable from other helpers.
2541be9568b4SRichard Henderson  */
2542d9bb58e5SYang Zhong 
2543d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
2544be9568b4SRichard Henderson     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2545a754f7f3SRichard Henderson 
2546707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP
2547d9bb58e5SYang Zhong 
2548139c1837SPaolo Bonzini #include "atomic_common.c.inc"
2549d9bb58e5SYang Zhong 
2550d9bb58e5SYang Zhong #define DATA_SIZE 1
2551d9bb58e5SYang Zhong #include "atomic_template.h"
2552d9bb58e5SYang Zhong 
2553d9bb58e5SYang Zhong #define DATA_SIZE 2
2554d9bb58e5SYang Zhong #include "atomic_template.h"
2555d9bb58e5SYang Zhong 
2556d9bb58e5SYang Zhong #define DATA_SIZE 4
2557d9bb58e5SYang Zhong #include "atomic_template.h"
2558d9bb58e5SYang Zhong 
2559d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
2560d9bb58e5SYang Zhong #define DATA_SIZE 8
2561d9bb58e5SYang Zhong #include "atomic_template.h"
2562d9bb58e5SYang Zhong #endif
2563d9bb58e5SYang Zhong 
2564e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2565d9bb58e5SYang Zhong #define DATA_SIZE 16
2566d9bb58e5SYang Zhong #include "atomic_template.h"
2567d9bb58e5SYang Zhong #endif
2568d9bb58e5SYang Zhong 
2569d9bb58e5SYang Zhong /* Code access functions.  */
2570d9bb58e5SYang Zhong 
2571fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
25729002ffcbSRichard Henderson                                MemOpIdx oi, uintptr_t retaddr)
25732dd92606SRichard Henderson {
2574fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
25752dd92606SRichard Henderson }
25762dd92606SRichard Henderson 
2577fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2578eed56642SAlex Bennée {
25799002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2580fc4120a3SRichard Henderson     return full_ldub_code(env, addr, oi, 0);
25812dd92606SRichard Henderson }
25822dd92606SRichard Henderson 
2583fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
25849002ffcbSRichard Henderson                                MemOpIdx oi, uintptr_t retaddr)
25854cef72d0SAlex Bennée {
2586fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
25874cef72d0SAlex Bennée }
25884cef72d0SAlex Bennée 
2589fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
25902dd92606SRichard Henderson {
25919002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2592fc4120a3SRichard Henderson     return full_lduw_code(env, addr, oi, 0);
2593eed56642SAlex Bennée }
2594d9bb58e5SYang Zhong 
2595fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
25969002ffcbSRichard Henderson                               MemOpIdx oi, uintptr_t retaddr)
2597eed56642SAlex Bennée {
2598fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
25992dd92606SRichard Henderson }
26002dd92606SRichard Henderson 
2601fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
26024cef72d0SAlex Bennée {
26039002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2604fc4120a3SRichard Henderson     return full_ldl_code(env, addr, oi, 0);
26054cef72d0SAlex Bennée }
26064cef72d0SAlex Bennée 
2607fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
26089002ffcbSRichard Henderson                               MemOpIdx oi, uintptr_t retaddr)
26092dd92606SRichard Henderson {
2610fc313c64SFrédéric Pétrot     return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
2611eed56642SAlex Bennée }
2612d9bb58e5SYang Zhong 
2613fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2614eed56642SAlex Bennée {
2615fc313c64SFrédéric Pétrot     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
2616fc4120a3SRichard Henderson     return full_ldq_code(env, addr, oi, 0);
2617eed56642SAlex Bennée }
2618