xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 1d41a79b3c7573c941723455149cfadfe7c2ab37)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9fb0343d5SThomas Huth  * version 2.1 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
26d9bb58e5SYang Zhong #include "exec/cputlb.h"
27d9bb58e5SYang Zhong #include "exec/memory-internal.h"
28d9bb58e5SYang Zhong #include "exec/ram_addr.h"
29d9bb58e5SYang Zhong #include "tcg/tcg.h"
30d9bb58e5SYang Zhong #include "qemu/error-report.h"
31d9bb58e5SYang Zhong #include "exec/log.h"
32d9bb58e5SYang Zhong #include "exec/helper-proto.h"
33d9bb58e5SYang Zhong #include "qemu/atomic.h"
34e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
353b9bd3f4SPaolo Bonzini #include "exec/translate-all.h"
36243af022SPaolo Bonzini #include "trace/trace-root.h"
37e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h"
3865269192SPhilippe Mathieu-Daudé #include "internal.h"
39235537faSAlex Bennée #ifdef CONFIG_PLUGIN
40235537faSAlex Bennée #include "qemu/plugin-memory.h"
41235537faSAlex Bennée #endif
42d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h"
43d9bb58e5SYang Zhong 
44d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
45d9bb58e5SYang Zhong /* #define DEBUG_TLB */
46d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
47d9bb58e5SYang Zhong 
48d9bb58e5SYang Zhong #ifdef DEBUG_TLB
49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
50d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
51d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
52d9bb58e5SYang Zhong # else
53d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
54d9bb58e5SYang Zhong # endif
55d9bb58e5SYang Zhong #else
56d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
57d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
58d9bb58e5SYang Zhong #endif
59d9bb58e5SYang Zhong 
60d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
61d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
62d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
64d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
65d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66d9bb58e5SYang Zhong     } \
67d9bb58e5SYang Zhong } while (0)
68d9bb58e5SYang Zhong 
69ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do {                              \
70d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
71ea9025cbSEmilio G. Cota             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
72d9bb58e5SYang Zhong         }                                                         \
73d9bb58e5SYang Zhong     } while (0)
74d9bb58e5SYang Zhong 
75d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
76d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
77d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78d9bb58e5SYang Zhong 
79d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
80d9bb58e5SYang Zhong  */
81d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83d9bb58e5SYang Zhong 
84722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
857a1efe1bSRichard Henderson {
86722a1c1eSRichard Henderson     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
877a1efe1bSRichard Henderson }
887a1efe1bSRichard Henderson 
89722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
9086e1eff8SEmilio G. Cota {
91722a1c1eSRichard Henderson     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
9286e1eff8SEmilio G. Cota }
9386e1eff8SEmilio G. Cota 
9479e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
9586e1eff8SEmilio G. Cota                              size_t max_entries)
9686e1eff8SEmilio G. Cota {
9779e42085SRichard Henderson     desc->window_begin_ns = ns;
9879e42085SRichard Henderson     desc->window_max_entries = max_entries;
9986e1eff8SEmilio G. Cota }
10086e1eff8SEmilio G. Cota 
1010f4abea8SRichard Henderson static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
1020f4abea8SRichard Henderson {
1030f4abea8SRichard Henderson     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
1040f4abea8SRichard Henderson 
1050f4abea8SRichard Henderson     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1060f4abea8SRichard Henderson         qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1070f4abea8SRichard Henderson     }
1080f4abea8SRichard Henderson }
1090f4abea8SRichard Henderson 
11086e1eff8SEmilio G. Cota /**
11186e1eff8SEmilio G. Cota  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
11271ccd47bSRichard Henderson  * @desc: The CPUTLBDesc portion of the TLB
11371ccd47bSRichard Henderson  * @fast: The CPUTLBDescFast portion of the same TLB
11486e1eff8SEmilio G. Cota  *
11586e1eff8SEmilio G. Cota  * Called with tlb_lock_held.
11686e1eff8SEmilio G. Cota  *
11786e1eff8SEmilio G. Cota  * We have two main constraints when resizing a TLB: (1) we only resize it
11886e1eff8SEmilio G. Cota  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
11986e1eff8SEmilio G. Cota  * the array or unnecessarily flushing it), which means we do not control how
12086e1eff8SEmilio G. Cota  * frequently the resizing can occur; (2) we don't have access to the guest's
12186e1eff8SEmilio G. Cota  * future scheduling decisions, and therefore have to decide the magnitude of
12286e1eff8SEmilio G. Cota  * the resize based on past observations.
12386e1eff8SEmilio G. Cota  *
12486e1eff8SEmilio G. Cota  * In general, a memory-hungry process can benefit greatly from an appropriately
12586e1eff8SEmilio G. Cota  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
12686e1eff8SEmilio G. Cota  * we just have to make the TLB as large as possible; while an oversized TLB
12786e1eff8SEmilio G. Cota  * results in minimal TLB miss rates, it also takes longer to be flushed
12886e1eff8SEmilio G. Cota  * (flushes can be _very_ frequent), and the reduced locality can also hurt
12986e1eff8SEmilio G. Cota  * performance.
13086e1eff8SEmilio G. Cota  *
13186e1eff8SEmilio G. Cota  * To achieve near-optimal performance for all kinds of workloads, we:
13286e1eff8SEmilio G. Cota  *
13386e1eff8SEmilio G. Cota  * 1. Aggressively increase the size of the TLB when the use rate of the
13486e1eff8SEmilio G. Cota  * TLB being flushed is high, since it is likely that in the near future this
13586e1eff8SEmilio G. Cota  * memory-hungry process will execute again, and its memory hungriness will
13686e1eff8SEmilio G. Cota  * probably be similar.
13786e1eff8SEmilio G. Cota  *
13886e1eff8SEmilio G. Cota  * 2. Slowly reduce the size of the TLB as the use rate declines over a
13986e1eff8SEmilio G. Cota  * reasonably large time window. The rationale is that if in such a time window
14086e1eff8SEmilio G. Cota  * we have not observed a high TLB use rate, it is likely that we won't observe
14186e1eff8SEmilio G. Cota  * it in the near future. In that case, once a time window expires we downsize
14286e1eff8SEmilio G. Cota  * the TLB to match the maximum use rate observed in the window.
14386e1eff8SEmilio G. Cota  *
14486e1eff8SEmilio G. Cota  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
14586e1eff8SEmilio G. Cota  * since in that range performance is likely near-optimal. Recall that the TLB
14686e1eff8SEmilio G. Cota  * is direct mapped, so we want the use rate to be low (or at least not too
14786e1eff8SEmilio G. Cota  * high), since otherwise we are likely to have a significant amount of
14886e1eff8SEmilio G. Cota  * conflict misses.
14986e1eff8SEmilio G. Cota  */
1503c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
1513c3959f2SRichard Henderson                                   int64_t now)
15286e1eff8SEmilio G. Cota {
15371ccd47bSRichard Henderson     size_t old_size = tlb_n_entries(fast);
15486e1eff8SEmilio G. Cota     size_t rate;
15586e1eff8SEmilio G. Cota     size_t new_size = old_size;
15686e1eff8SEmilio G. Cota     int64_t window_len_ms = 100;
15786e1eff8SEmilio G. Cota     int64_t window_len_ns = window_len_ms * 1000 * 1000;
15879e42085SRichard Henderson     bool window_expired = now > desc->window_begin_ns + window_len_ns;
15986e1eff8SEmilio G. Cota 
16079e42085SRichard Henderson     if (desc->n_used_entries > desc->window_max_entries) {
16179e42085SRichard Henderson         desc->window_max_entries = desc->n_used_entries;
16286e1eff8SEmilio G. Cota     }
16379e42085SRichard Henderson     rate = desc->window_max_entries * 100 / old_size;
16486e1eff8SEmilio G. Cota 
16586e1eff8SEmilio G. Cota     if (rate > 70) {
16686e1eff8SEmilio G. Cota         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
16786e1eff8SEmilio G. Cota     } else if (rate < 30 && window_expired) {
16879e42085SRichard Henderson         size_t ceil = pow2ceil(desc->window_max_entries);
16979e42085SRichard Henderson         size_t expected_rate = desc->window_max_entries * 100 / ceil;
17086e1eff8SEmilio G. Cota 
17186e1eff8SEmilio G. Cota         /*
17286e1eff8SEmilio G. Cota          * Avoid undersizing when the max number of entries seen is just below
17386e1eff8SEmilio G. Cota          * a pow2. For instance, if max_entries == 1025, the expected use rate
17486e1eff8SEmilio G. Cota          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
17586e1eff8SEmilio G. Cota          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
17686e1eff8SEmilio G. Cota          * later. Thus, make sure that the expected use rate remains below 70%.
17786e1eff8SEmilio G. Cota          * (and since we double the size, that means the lowest rate we'd
17886e1eff8SEmilio G. Cota          * expect to get is 35%, which is still in the 30-70% range where
17986e1eff8SEmilio G. Cota          * we consider that the size is appropriate.)
18086e1eff8SEmilio G. Cota          */
18186e1eff8SEmilio G. Cota         if (expected_rate > 70) {
18286e1eff8SEmilio G. Cota             ceil *= 2;
18386e1eff8SEmilio G. Cota         }
18486e1eff8SEmilio G. Cota         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
18586e1eff8SEmilio G. Cota     }
18686e1eff8SEmilio G. Cota 
18786e1eff8SEmilio G. Cota     if (new_size == old_size) {
18886e1eff8SEmilio G. Cota         if (window_expired) {
18979e42085SRichard Henderson             tlb_window_reset(desc, now, desc->n_used_entries);
19086e1eff8SEmilio G. Cota         }
19186e1eff8SEmilio G. Cota         return;
19286e1eff8SEmilio G. Cota     }
19386e1eff8SEmilio G. Cota 
19471ccd47bSRichard Henderson     g_free(fast->table);
19525d3ec58SRichard Henderson     g_free(desc->fulltlb);
19686e1eff8SEmilio G. Cota 
19779e42085SRichard Henderson     tlb_window_reset(desc, now, 0);
19886e1eff8SEmilio G. Cota     /* desc->n_used_entries is cleared by the caller */
19971ccd47bSRichard Henderson     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
20071ccd47bSRichard Henderson     fast->table = g_try_new(CPUTLBEntry, new_size);
20125d3ec58SRichard Henderson     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
20271ccd47bSRichard Henderson 
20386e1eff8SEmilio G. Cota     /*
20486e1eff8SEmilio G. Cota      * If the allocations fail, try smaller sizes. We just freed some
20586e1eff8SEmilio G. Cota      * memory, so going back to half of new_size has a good chance of working.
20686e1eff8SEmilio G. Cota      * Increased memory pressure elsewhere in the system might cause the
20786e1eff8SEmilio G. Cota      * allocations to fail though, so we progressively reduce the allocation
20886e1eff8SEmilio G. Cota      * size, aborting if we cannot even allocate the smallest TLB we support.
20986e1eff8SEmilio G. Cota      */
21025d3ec58SRichard Henderson     while (fast->table == NULL || desc->fulltlb == NULL) {
21186e1eff8SEmilio G. Cota         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
21286e1eff8SEmilio G. Cota             error_report("%s: %s", __func__, strerror(errno));
21386e1eff8SEmilio G. Cota             abort();
21486e1eff8SEmilio G. Cota         }
21586e1eff8SEmilio G. Cota         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
21671ccd47bSRichard Henderson         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
21786e1eff8SEmilio G. Cota 
21871ccd47bSRichard Henderson         g_free(fast->table);
21925d3ec58SRichard Henderson         g_free(desc->fulltlb);
22071ccd47bSRichard Henderson         fast->table = g_try_new(CPUTLBEntry, new_size);
22125d3ec58SRichard Henderson         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
22286e1eff8SEmilio G. Cota     }
22386e1eff8SEmilio G. Cota }
22486e1eff8SEmilio G. Cota 
225bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
22686e1eff8SEmilio G. Cota {
2275c948e31SRichard Henderson     desc->n_used_entries = 0;
2285c948e31SRichard Henderson     desc->large_page_addr = -1;
2295c948e31SRichard Henderson     desc->large_page_mask = -1;
2305c948e31SRichard Henderson     desc->vindex = 0;
2315c948e31SRichard Henderson     memset(fast->table, -1, sizeof_tlb(fast));
2325c948e31SRichard Henderson     memset(desc->vtable, -1, sizeof(desc->vtable));
23386e1eff8SEmilio G. Cota }
23486e1eff8SEmilio G. Cota 
2353c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
2363c3959f2SRichard Henderson                                         int64_t now)
237bbf021b0SRichard Henderson {
238bbf021b0SRichard Henderson     CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
239bbf021b0SRichard Henderson     CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
240bbf021b0SRichard Henderson 
2413c3959f2SRichard Henderson     tlb_mmu_resize_locked(desc, fast, now);
242bbf021b0SRichard Henderson     tlb_mmu_flush_locked(desc, fast);
243bbf021b0SRichard Henderson }
244bbf021b0SRichard Henderson 
24556e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
24656e89f76SRichard Henderson {
24756e89f76SRichard Henderson     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
24856e89f76SRichard Henderson 
24956e89f76SRichard Henderson     tlb_window_reset(desc, now, 0);
25056e89f76SRichard Henderson     desc->n_used_entries = 0;
25156e89f76SRichard Henderson     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
25256e89f76SRichard Henderson     fast->table = g_new(CPUTLBEntry, n_entries);
25325d3ec58SRichard Henderson     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
2543c16304aSRichard Henderson     tlb_mmu_flush_locked(desc, fast);
25556e89f76SRichard Henderson }
25656e89f76SRichard Henderson 
25786e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
25886e1eff8SEmilio G. Cota {
259a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].n_used_entries++;
26086e1eff8SEmilio G. Cota }
26186e1eff8SEmilio G. Cota 
26286e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
26386e1eff8SEmilio G. Cota {
264a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].n_used_entries--;
26586e1eff8SEmilio G. Cota }
26686e1eff8SEmilio G. Cota 
2675005e253SEmilio G. Cota void tlb_init(CPUState *cpu)
2685005e253SEmilio G. Cota {
26971aec354SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
27056e89f76SRichard Henderson     int64_t now = get_clock_realtime();
27156e89f76SRichard Henderson     int i;
27271aec354SEmilio G. Cota 
273a40ec84eSRichard Henderson     qemu_spin_init(&env_tlb(env)->c.lock);
2743d1523ceSRichard Henderson 
2753c16304aSRichard Henderson     /* All tlbs are initialized flushed. */
2763c16304aSRichard Henderson     env_tlb(env)->c.dirty = 0;
27786e1eff8SEmilio G. Cota 
27856e89f76SRichard Henderson     for (i = 0; i < NB_MMU_MODES; i++) {
27956e89f76SRichard Henderson         tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
28056e89f76SRichard Henderson     }
2815005e253SEmilio G. Cota }
2825005e253SEmilio G. Cota 
283816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu)
284816d9be5SEmilio G. Cota {
285816d9be5SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
286816d9be5SEmilio G. Cota     int i;
287816d9be5SEmilio G. Cota 
288816d9be5SEmilio G. Cota     qemu_spin_destroy(&env_tlb(env)->c.lock);
289816d9be5SEmilio G. Cota     for (i = 0; i < NB_MMU_MODES; i++) {
290816d9be5SEmilio G. Cota         CPUTLBDesc *desc = &env_tlb(env)->d[i];
291816d9be5SEmilio G. Cota         CPUTLBDescFast *fast = &env_tlb(env)->f[i];
292816d9be5SEmilio G. Cota 
293816d9be5SEmilio G. Cota         g_free(fast->table);
29425d3ec58SRichard Henderson         g_free(desc->fulltlb);
295816d9be5SEmilio G. Cota     }
296816d9be5SEmilio G. Cota }
297816d9be5SEmilio G. Cota 
298d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
299d9bb58e5SYang Zhong  *
300d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
301d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
302d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
303d9bb58e5SYang Zhong  * again.
304d9bb58e5SYang Zhong  */
305d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
306d9bb58e5SYang Zhong                              run_on_cpu_data d)
307d9bb58e5SYang Zhong {
308d9bb58e5SYang Zhong     CPUState *cpu;
309d9bb58e5SYang Zhong 
310d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
311d9bb58e5SYang Zhong         if (cpu != src) {
312d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
313d9bb58e5SYang Zhong         }
314d9bb58e5SYang Zhong     }
315d9bb58e5SYang Zhong }
316d9bb58e5SYang Zhong 
317e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
31883974cf4SEmilio G. Cota {
31983974cf4SEmilio G. Cota     CPUState *cpu;
320e09de0a2SRichard Henderson     size_t full = 0, part = 0, elide = 0;
32183974cf4SEmilio G. Cota 
32283974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
32383974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
32483974cf4SEmilio G. Cota 
325d73415a3SStefan Hajnoczi         full += qatomic_read(&env_tlb(env)->c.full_flush_count);
326d73415a3SStefan Hajnoczi         part += qatomic_read(&env_tlb(env)->c.part_flush_count);
327d73415a3SStefan Hajnoczi         elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
32883974cf4SEmilio G. Cota     }
329e09de0a2SRichard Henderson     *pfull = full;
330e09de0a2SRichard Henderson     *ppart = part;
331e09de0a2SRichard Henderson     *pelide = elide;
33283974cf4SEmilio G. Cota }
333d9bb58e5SYang Zhong 
334d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
335d9bb58e5SYang Zhong {
336d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
3373d1523ceSRichard Henderson     uint16_t asked = data.host_int;
3383d1523ceSRichard Henderson     uint16_t all_dirty, work, to_clean;
3393c3959f2SRichard Henderson     int64_t now = get_clock_realtime();
340d9bb58e5SYang Zhong 
341d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
342d9bb58e5SYang Zhong 
3433d1523ceSRichard Henderson     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
344d9bb58e5SYang Zhong 
345a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
34660a2ad7dSRichard Henderson 
347a40ec84eSRichard Henderson     all_dirty = env_tlb(env)->c.dirty;
3483d1523ceSRichard Henderson     to_clean = asked & all_dirty;
3493d1523ceSRichard Henderson     all_dirty &= ~to_clean;
350a40ec84eSRichard Henderson     env_tlb(env)->c.dirty = all_dirty;
3513d1523ceSRichard Henderson 
3523d1523ceSRichard Henderson     for (work = to_clean; work != 0; work &= work - 1) {
3533d1523ceSRichard Henderson         int mmu_idx = ctz32(work);
3543c3959f2SRichard Henderson         tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
355d9bb58e5SYang Zhong     }
3563d1523ceSRichard Henderson 
357a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
358d9bb58e5SYang Zhong 
359f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
36064f2674bSRichard Henderson 
3613d1523ceSRichard Henderson     if (to_clean == ALL_MMUIDX_BITS) {
362d73415a3SStefan Hajnoczi         qatomic_set(&env_tlb(env)->c.full_flush_count,
363a40ec84eSRichard Henderson                    env_tlb(env)->c.full_flush_count + 1);
364e09de0a2SRichard Henderson     } else {
365d73415a3SStefan Hajnoczi         qatomic_set(&env_tlb(env)->c.part_flush_count,
366a40ec84eSRichard Henderson                    env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
3673d1523ceSRichard Henderson         if (to_clean != asked) {
368d73415a3SStefan Hajnoczi             qatomic_set(&env_tlb(env)->c.elide_flush_count,
369a40ec84eSRichard Henderson                        env_tlb(env)->c.elide_flush_count +
3703d1523ceSRichard Henderson                        ctpop16(asked & ~to_clean));
3713d1523ceSRichard Henderson         }
37264f2674bSRichard Henderson     }
373d9bb58e5SYang Zhong }
374d9bb58e5SYang Zhong 
375d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
376d9bb58e5SYang Zhong {
377d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
378d9bb58e5SYang Zhong 
37964f2674bSRichard Henderson     if (cpu->created && !qemu_cpu_is_self(cpu)) {
380d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
381ab651105SRichard Henderson                          RUN_ON_CPU_HOST_INT(idxmap));
382d9bb58e5SYang Zhong     } else {
38360a2ad7dSRichard Henderson         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
384d9bb58e5SYang Zhong     }
385d9bb58e5SYang Zhong }
386d9bb58e5SYang Zhong 
38764f2674bSRichard Henderson void tlb_flush(CPUState *cpu)
38864f2674bSRichard Henderson {
38964f2674bSRichard Henderson     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
39064f2674bSRichard Henderson }
39164f2674bSRichard Henderson 
392d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
393d9bb58e5SYang Zhong {
394d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
395d9bb58e5SYang Zhong 
396d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
397d9bb58e5SYang Zhong 
398d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
399d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
400d9bb58e5SYang Zhong }
401d9bb58e5SYang Zhong 
40264f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu)
40364f2674bSRichard Henderson {
40464f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
40564f2674bSRichard Henderson }
40664f2674bSRichard Henderson 
40764f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
408d9bb58e5SYang Zhong {
409d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
410d9bb58e5SYang Zhong 
411d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
412d9bb58e5SYang Zhong 
413d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
414d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
415d9bb58e5SYang Zhong }
416d9bb58e5SYang Zhong 
41764f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu)
41864f2674bSRichard Henderson {
41964f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
42064f2674bSRichard Henderson }
42164f2674bSRichard Henderson 
4223ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
4233ab6e68cSRichard Henderson                                       target_ulong page, target_ulong mask)
4243ab6e68cSRichard Henderson {
4253ab6e68cSRichard Henderson     page &= mask;
4263ab6e68cSRichard Henderson     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
4273ab6e68cSRichard Henderson 
4283ab6e68cSRichard Henderson     return (page == (tlb_entry->addr_read & mask) ||
4293ab6e68cSRichard Henderson             page == (tlb_addr_write(tlb_entry) & mask) ||
4303ab6e68cSRichard Henderson             page == (tlb_entry->addr_code & mask));
4313ab6e68cSRichard Henderson }
4323ab6e68cSRichard Henderson 
43368fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
43468fea038SRichard Henderson                                         target_ulong page)
435d9bb58e5SYang Zhong {
4363ab6e68cSRichard Henderson     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
43768fea038SRichard Henderson }
43868fea038SRichard Henderson 
4393cea94bbSEmilio G. Cota /**
4403cea94bbSEmilio G. Cota  * tlb_entry_is_empty - return true if the entry is not in use
4413cea94bbSEmilio G. Cota  * @te: pointer to CPUTLBEntry
4423cea94bbSEmilio G. Cota  */
4433cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
4443cea94bbSEmilio G. Cota {
4453cea94bbSEmilio G. Cota     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
4463cea94bbSEmilio G. Cota }
4473cea94bbSEmilio G. Cota 
44853d28455SRichard Henderson /* Called with tlb_c.lock held */
4493ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
4503ab6e68cSRichard Henderson                                         target_ulong page,
4513ab6e68cSRichard Henderson                                         target_ulong mask)
45268fea038SRichard Henderson {
4533ab6e68cSRichard Henderson     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
454d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
45586e1eff8SEmilio G. Cota         return true;
456d9bb58e5SYang Zhong     }
45786e1eff8SEmilio G. Cota     return false;
458d9bb58e5SYang Zhong }
459d9bb58e5SYang Zhong 
4603ab6e68cSRichard Henderson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
46168fea038SRichard Henderson                                           target_ulong page)
46268fea038SRichard Henderson {
4633ab6e68cSRichard Henderson     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
4643ab6e68cSRichard Henderson }
4653ab6e68cSRichard Henderson 
4663ab6e68cSRichard Henderson /* Called with tlb_c.lock held */
4673ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
4683ab6e68cSRichard Henderson                                             target_ulong page,
4693ab6e68cSRichard Henderson                                             target_ulong mask)
4703ab6e68cSRichard Henderson {
471a40ec84eSRichard Henderson     CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
47268fea038SRichard Henderson     int k;
47371aec354SEmilio G. Cota 
47429a0af61SRichard Henderson     assert_cpu_is_self(env_cpu(env));
47568fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
4763ab6e68cSRichard Henderson         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
47786e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, mmu_idx);
47886e1eff8SEmilio G. Cota         }
47968fea038SRichard Henderson     }
48068fea038SRichard Henderson }
48168fea038SRichard Henderson 
4823ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
4833ab6e68cSRichard Henderson                                               target_ulong page)
4843ab6e68cSRichard Henderson {
4853ab6e68cSRichard Henderson     tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
4863ab6e68cSRichard Henderson }
4873ab6e68cSRichard Henderson 
4881308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx,
4891308e026SRichard Henderson                                   target_ulong page)
4901308e026SRichard Henderson {
491a40ec84eSRichard Henderson     target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
492a40ec84eSRichard Henderson     target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
4931308e026SRichard Henderson 
4941308e026SRichard Henderson     /* Check if we need to flush due to large pages.  */
4951308e026SRichard Henderson     if ((page & lp_mask) == lp_addr) {
4961308e026SRichard Henderson         tlb_debug("forcing full flush midx %d ("
4971308e026SRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
4981308e026SRichard Henderson                   midx, lp_addr, lp_mask);
4993c3959f2SRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
5001308e026SRichard Henderson     } else {
50186e1eff8SEmilio G. Cota         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
50286e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, midx);
50386e1eff8SEmilio G. Cota         }
5041308e026SRichard Henderson         tlb_flush_vtlb_page_locked(env, midx, page);
5051308e026SRichard Henderson     }
5061308e026SRichard Henderson }
5071308e026SRichard Henderson 
5087b7d00e0SRichard Henderson /**
5097b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_0:
5107b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5117b7d00e0SRichard Henderson  * @addr: page of virtual address to flush
5127b7d00e0SRichard Henderson  * @idxmap: set of mmu_idx to flush
5137b7d00e0SRichard Henderson  *
5147b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
5157b7d00e0SRichard Henderson  * at @addr from the tlbs indicated by @idxmap from @cpu.
516d9bb58e5SYang Zhong  */
5177b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
5187b7d00e0SRichard Henderson                                              target_ulong addr,
5197b7d00e0SRichard Henderson                                              uint16_t idxmap)
520d9bb58e5SYang Zhong {
521d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
522d9bb58e5SYang Zhong     int mmu_idx;
523d9bb58e5SYang Zhong 
524d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
525d9bb58e5SYang Zhong 
5267b7d00e0SRichard Henderson     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
527d9bb58e5SYang Zhong 
528a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
529d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
5307b7d00e0SRichard Henderson         if ((idxmap >> mmu_idx) & 1) {
5311308e026SRichard Henderson             tlb_flush_page_locked(env, mmu_idx, addr);
532d9bb58e5SYang Zhong         }
533d9bb58e5SYang Zhong     }
534a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
535d9bb58e5SYang Zhong 
536*1d41a79bSRichard Henderson     /*
537*1d41a79bSRichard Henderson      * Discard jump cache entries for any tb which might potentially
538*1d41a79bSRichard Henderson      * overlap the flushed page, which includes the previous.
539*1d41a79bSRichard Henderson      */
540*1d41a79bSRichard Henderson     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
541*1d41a79bSRichard Henderson     tb_jmp_cache_clear_page(cpu, addr);
542d9bb58e5SYang Zhong }
543d9bb58e5SYang Zhong 
5447b7d00e0SRichard Henderson /**
5457b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_1:
5467b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5477b7d00e0SRichard Henderson  * @data: encoded addr + idxmap
5487b7d00e0SRichard Henderson  *
5497b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
5507b7d00e0SRichard Henderson  * async_run_on_cpu.  The idxmap parameter is encoded in the page
5517b7d00e0SRichard Henderson  * offset of the target_ptr field.  This limits the set of mmu_idx
5527b7d00e0SRichard Henderson  * that can be passed via this method.
5537b7d00e0SRichard Henderson  */
5547b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
5557b7d00e0SRichard Henderson                                              run_on_cpu_data data)
5567b7d00e0SRichard Henderson {
5577b7d00e0SRichard Henderson     target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
5587b7d00e0SRichard Henderson     target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
5597b7d00e0SRichard Henderson     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
5607b7d00e0SRichard Henderson 
5617b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
5627b7d00e0SRichard Henderson }
5637b7d00e0SRichard Henderson 
5647b7d00e0SRichard Henderson typedef struct {
5657b7d00e0SRichard Henderson     target_ulong addr;
5667b7d00e0SRichard Henderson     uint16_t idxmap;
5677b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData;
5687b7d00e0SRichard Henderson 
5697b7d00e0SRichard Henderson /**
5707b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_2:
5717b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5727b7d00e0SRichard Henderson  * @data: allocated addr + idxmap
5737b7d00e0SRichard Henderson  *
5747b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
5757b7d00e0SRichard Henderson  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
5767b7d00e0SRichard Henderson  * TLBFlushPageByMMUIdxData structure that has been allocated
5777b7d00e0SRichard Henderson  * specifically for this helper.  Free the structure when done.
5787b7d00e0SRichard Henderson  */
5797b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
5807b7d00e0SRichard Henderson                                              run_on_cpu_data data)
5817b7d00e0SRichard Henderson {
5827b7d00e0SRichard Henderson     TLBFlushPageByMMUIdxData *d = data.host_ptr;
5837b7d00e0SRichard Henderson 
5847b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
5857b7d00e0SRichard Henderson     g_free(d);
5867b7d00e0SRichard Henderson }
5877b7d00e0SRichard Henderson 
588d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
589d9bb58e5SYang Zhong {
590d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
591d9bb58e5SYang Zhong 
592d9bb58e5SYang Zhong     /* This should already be page aligned */
5937b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
594d9bb58e5SYang Zhong 
5957b7d00e0SRichard Henderson     if (qemu_cpu_is_self(cpu)) {
5967b7d00e0SRichard Henderson         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
5977b7d00e0SRichard Henderson     } else if (idxmap < TARGET_PAGE_SIZE) {
5987b7d00e0SRichard Henderson         /*
5997b7d00e0SRichard Henderson          * Most targets have only a few mmu_idx.  In the case where
6007b7d00e0SRichard Henderson          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
6017b7d00e0SRichard Henderson          * allocating memory for this operation.
6027b7d00e0SRichard Henderson          */
6037b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
6047b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
605d9bb58e5SYang Zhong     } else {
6067b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
6077b7d00e0SRichard Henderson 
6087b7d00e0SRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
6097b7d00e0SRichard Henderson         d->addr = addr;
6107b7d00e0SRichard Henderson         d->idxmap = idxmap;
6117b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
6127b7d00e0SRichard Henderson                          RUN_ON_CPU_HOST_PTR(d));
613d9bb58e5SYang Zhong     }
614d9bb58e5SYang Zhong }
615d9bb58e5SYang Zhong 
616f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr)
617f8144c6cSRichard Henderson {
618f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
619f8144c6cSRichard Henderson }
620f8144c6cSRichard Henderson 
621d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
622d9bb58e5SYang Zhong                                        uint16_t idxmap)
623d9bb58e5SYang Zhong {
624d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
625d9bb58e5SYang Zhong 
626d9bb58e5SYang Zhong     /* This should already be page aligned */
6277b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
628d9bb58e5SYang Zhong 
6297b7d00e0SRichard Henderson     /*
6307b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
6317b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
6327b7d00e0SRichard Henderson      */
6337b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
6347b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6357b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6367b7d00e0SRichard Henderson     } else {
6377b7d00e0SRichard Henderson         CPUState *dst_cpu;
6387b7d00e0SRichard Henderson 
6397b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
6407b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
6417b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
6427b7d00e0SRichard Henderson                 TLBFlushPageByMMUIdxData *d
6437b7d00e0SRichard Henderson                     = g_new(TLBFlushPageByMMUIdxData, 1);
6447b7d00e0SRichard Henderson 
6457b7d00e0SRichard Henderson                 d->addr = addr;
6467b7d00e0SRichard Henderson                 d->idxmap = idxmap;
6477b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
6487b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
6497b7d00e0SRichard Henderson             }
6507b7d00e0SRichard Henderson         }
6517b7d00e0SRichard Henderson     }
6527b7d00e0SRichard Henderson 
6537b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
654d9bb58e5SYang Zhong }
655d9bb58e5SYang Zhong 
656f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
657f8144c6cSRichard Henderson {
658f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
659f8144c6cSRichard Henderson }
660f8144c6cSRichard Henderson 
661d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
662d9bb58e5SYang Zhong                                               target_ulong addr,
663d9bb58e5SYang Zhong                                               uint16_t idxmap)
664d9bb58e5SYang Zhong {
665d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
666d9bb58e5SYang Zhong 
667d9bb58e5SYang Zhong     /* This should already be page aligned */
6687b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
669d9bb58e5SYang Zhong 
6707b7d00e0SRichard Henderson     /*
6717b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
6727b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
6737b7d00e0SRichard Henderson      */
6747b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
6757b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6767b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6777b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6787b7d00e0SRichard Henderson                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6797b7d00e0SRichard Henderson     } else {
6807b7d00e0SRichard Henderson         CPUState *dst_cpu;
6817b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d;
6827b7d00e0SRichard Henderson 
6837b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
6847b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
6857b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
6867b7d00e0SRichard Henderson                 d = g_new(TLBFlushPageByMMUIdxData, 1);
6877b7d00e0SRichard Henderson                 d->addr = addr;
6887b7d00e0SRichard Henderson                 d->idxmap = idxmap;
6897b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
6907b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
6917b7d00e0SRichard Henderson             }
6927b7d00e0SRichard Henderson         }
6937b7d00e0SRichard Henderson 
6947b7d00e0SRichard Henderson         d = g_new(TLBFlushPageByMMUIdxData, 1);
6957b7d00e0SRichard Henderson         d->addr = addr;
6967b7d00e0SRichard Henderson         d->idxmap = idxmap;
6977b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
6987b7d00e0SRichard Henderson                               RUN_ON_CPU_HOST_PTR(d));
6997b7d00e0SRichard Henderson     }
700d9bb58e5SYang Zhong }
701d9bb58e5SYang Zhong 
702f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
703d9bb58e5SYang Zhong {
704f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
705d9bb58e5SYang Zhong }
706d9bb58e5SYang Zhong 
7073c4ddec1SRichard Henderson static void tlb_flush_range_locked(CPUArchState *env, int midx,
7083c4ddec1SRichard Henderson                                    target_ulong addr, target_ulong len,
7093c4ddec1SRichard Henderson                                    unsigned bits)
7103ab6e68cSRichard Henderson {
7113ab6e68cSRichard Henderson     CPUTLBDesc *d = &env_tlb(env)->d[midx];
7123ab6e68cSRichard Henderson     CPUTLBDescFast *f = &env_tlb(env)->f[midx];
7133ab6e68cSRichard Henderson     target_ulong mask = MAKE_64BIT_MASK(0, bits);
7143ab6e68cSRichard Henderson 
7153ab6e68cSRichard Henderson     /*
7163ab6e68cSRichard Henderson      * If @bits is smaller than the tlb size, there may be multiple entries
7173ab6e68cSRichard Henderson      * within the TLB; otherwise all addresses that match under @mask hit
7183ab6e68cSRichard Henderson      * the same TLB entry.
7193ab6e68cSRichard Henderson      * TODO: Perhaps allow bits to be a few bits less than the size.
7203ab6e68cSRichard Henderson      * For now, just flush the entire TLB.
7213c4ddec1SRichard Henderson      *
7223c4ddec1SRichard Henderson      * If @len is larger than the tlb size, then it will take longer to
7233c4ddec1SRichard Henderson      * test all of the entries in the TLB than it will to flush it all.
7243ab6e68cSRichard Henderson      */
7253c4ddec1SRichard Henderson     if (mask < f->mask || len > f->mask) {
7263ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
7273c4ddec1SRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
7283c4ddec1SRichard Henderson                   midx, addr, mask, len);
7293ab6e68cSRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
7303ab6e68cSRichard Henderson         return;
7313ab6e68cSRichard Henderson     }
7323ab6e68cSRichard Henderson 
7333c4ddec1SRichard Henderson     /*
7343c4ddec1SRichard Henderson      * Check if we need to flush due to large pages.
7353c4ddec1SRichard Henderson      * Because large_page_mask contains all 1's from the msb,
7363c4ddec1SRichard Henderson      * we only need to test the end of the range.
7373c4ddec1SRichard Henderson      */
7383c4ddec1SRichard Henderson     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
7393ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
7403ab6e68cSRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
7413ab6e68cSRichard Henderson                   midx, d->large_page_addr, d->large_page_mask);
7423ab6e68cSRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
7433ab6e68cSRichard Henderson         return;
7443ab6e68cSRichard Henderson     }
7453ab6e68cSRichard Henderson 
7463c4ddec1SRichard Henderson     for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
7473c4ddec1SRichard Henderson         target_ulong page = addr + i;
7483c4ddec1SRichard Henderson         CPUTLBEntry *entry = tlb_entry(env, midx, page);
7493c4ddec1SRichard Henderson 
7503c4ddec1SRichard Henderson         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
7513ab6e68cSRichard Henderson             tlb_n_used_entries_dec(env, midx);
7523ab6e68cSRichard Henderson         }
7533ab6e68cSRichard Henderson         tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
7543ab6e68cSRichard Henderson     }
7553c4ddec1SRichard Henderson }
7563ab6e68cSRichard Henderson 
7573ab6e68cSRichard Henderson typedef struct {
7583ab6e68cSRichard Henderson     target_ulong addr;
7593c4ddec1SRichard Henderson     target_ulong len;
7603ab6e68cSRichard Henderson     uint16_t idxmap;
7613ab6e68cSRichard Henderson     uint16_t bits;
7623960a59fSRichard Henderson } TLBFlushRangeData;
7633ab6e68cSRichard Henderson 
7646be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
7653960a59fSRichard Henderson                                               TLBFlushRangeData d)
7663ab6e68cSRichard Henderson {
7673ab6e68cSRichard Henderson     CPUArchState *env = cpu->env_ptr;
7683ab6e68cSRichard Henderson     int mmu_idx;
7693ab6e68cSRichard Henderson 
7703ab6e68cSRichard Henderson     assert_cpu_is_self(cpu);
7713ab6e68cSRichard Henderson 
7723c4ddec1SRichard Henderson     tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
7733c4ddec1SRichard Henderson               d.addr, d.bits, d.len, d.idxmap);
7743ab6e68cSRichard Henderson 
7753ab6e68cSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
7763ab6e68cSRichard Henderson     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
7773ab6e68cSRichard Henderson         if ((d.idxmap >> mmu_idx) & 1) {
7783c4ddec1SRichard Henderson             tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
7793ab6e68cSRichard Henderson         }
7803ab6e68cSRichard Henderson     }
7813ab6e68cSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
7823ab6e68cSRichard Henderson 
783cfc2a2d6SIdan Horowitz     /*
784cfc2a2d6SIdan Horowitz      * If the length is larger than the jump cache size, then it will take
785cfc2a2d6SIdan Horowitz      * longer to clear each entry individually than it will to clear it all.
786cfc2a2d6SIdan Horowitz      */
787cfc2a2d6SIdan Horowitz     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
788cfc2a2d6SIdan Horowitz         cpu_tb_jmp_cache_clear(cpu);
789cfc2a2d6SIdan Horowitz         return;
790cfc2a2d6SIdan Horowitz     }
791cfc2a2d6SIdan Horowitz 
792*1d41a79bSRichard Henderson     /*
793*1d41a79bSRichard Henderson      * Discard jump cache entries for any tb which might potentially
794*1d41a79bSRichard Henderson      * overlap the flushed pages, which includes the previous.
795*1d41a79bSRichard Henderson      */
796*1d41a79bSRichard Henderson     d.addr -= TARGET_PAGE_SIZE;
797*1d41a79bSRichard Henderson     for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
798*1d41a79bSRichard Henderson         tb_jmp_cache_clear_page(cpu, d.addr);
799*1d41a79bSRichard Henderson         d.addr += TARGET_PAGE_SIZE;
8003c4ddec1SRichard Henderson     }
8013ab6e68cSRichard Henderson }
8023ab6e68cSRichard Henderson 
803206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
8043ab6e68cSRichard Henderson                                               run_on_cpu_data data)
8053ab6e68cSRichard Henderson {
8063960a59fSRichard Henderson     TLBFlushRangeData *d = data.host_ptr;
8076be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
8083ab6e68cSRichard Henderson     g_free(d);
8093ab6e68cSRichard Henderson }
8103ab6e68cSRichard Henderson 
811e5b1921bSRichard Henderson void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
812e5b1921bSRichard Henderson                                target_ulong len, uint16_t idxmap,
813e5b1921bSRichard Henderson                                unsigned bits)
8143ab6e68cSRichard Henderson {
8153960a59fSRichard Henderson     TLBFlushRangeData d;
8163ab6e68cSRichard Henderson 
817e5b1921bSRichard Henderson     /*
818e5b1921bSRichard Henderson      * If all bits are significant, and len is small,
819e5b1921bSRichard Henderson      * this devolves to tlb_flush_page.
820e5b1921bSRichard Henderson      */
821e5b1921bSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8223ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
8233ab6e68cSRichard Henderson         return;
8243ab6e68cSRichard Henderson     }
8253ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
8263ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
8273ab6e68cSRichard Henderson         tlb_flush_by_mmuidx(cpu, idxmap);
8283ab6e68cSRichard Henderson         return;
8293ab6e68cSRichard Henderson     }
8303ab6e68cSRichard Henderson 
8313ab6e68cSRichard Henderson     /* This should already be page aligned */
8323ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
833e5b1921bSRichard Henderson     d.len = len;
8343ab6e68cSRichard Henderson     d.idxmap = idxmap;
8353ab6e68cSRichard Henderson     d.bits = bits;
8363ab6e68cSRichard Henderson 
8373ab6e68cSRichard Henderson     if (qemu_cpu_is_self(cpu)) {
8386be48e45SRichard Henderson         tlb_flush_range_by_mmuidx_async_0(cpu, d);
8393ab6e68cSRichard Henderson     } else {
8403ab6e68cSRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
8413960a59fSRichard Henderson         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
842206a583dSRichard Henderson         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
8433ab6e68cSRichard Henderson                          RUN_ON_CPU_HOST_PTR(p));
8443ab6e68cSRichard Henderson     }
8453ab6e68cSRichard Henderson }
8463ab6e68cSRichard Henderson 
847e5b1921bSRichard Henderson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
848e5b1921bSRichard Henderson                                    uint16_t idxmap, unsigned bits)
849e5b1921bSRichard Henderson {
850e5b1921bSRichard Henderson     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
851e5b1921bSRichard Henderson }
852e5b1921bSRichard Henderson 
853600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
854600b819fSRichard Henderson                                         target_ulong addr, target_ulong len,
855600b819fSRichard Henderson                                         uint16_t idxmap, unsigned bits)
8563ab6e68cSRichard Henderson {
8573960a59fSRichard Henderson     TLBFlushRangeData d;
858d34e4d1aSRichard Henderson     CPUState *dst_cpu;
8593ab6e68cSRichard Henderson 
860600b819fSRichard Henderson     /*
861600b819fSRichard Henderson      * If all bits are significant, and len is small,
862600b819fSRichard Henderson      * this devolves to tlb_flush_page.
863600b819fSRichard Henderson      */
864600b819fSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8653ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
8663ab6e68cSRichard Henderson         return;
8673ab6e68cSRichard Henderson     }
8683ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
8693ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
8703ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
8713ab6e68cSRichard Henderson         return;
8723ab6e68cSRichard Henderson     }
8733ab6e68cSRichard Henderson 
8743ab6e68cSRichard Henderson     /* This should already be page aligned */
8753ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
876600b819fSRichard Henderson     d.len = len;
8773ab6e68cSRichard Henderson     d.idxmap = idxmap;
8783ab6e68cSRichard Henderson     d.bits = bits;
8793ab6e68cSRichard Henderson 
8803ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
8813ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
8823ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
8833960a59fSRichard Henderson             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
8843ab6e68cSRichard Henderson             async_run_on_cpu(dst_cpu,
885206a583dSRichard Henderson                              tlb_flush_range_by_mmuidx_async_1,
8863ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
8873ab6e68cSRichard Henderson         }
8883ab6e68cSRichard Henderson     }
8893ab6e68cSRichard Henderson 
8906be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
8913ab6e68cSRichard Henderson }
8923ab6e68cSRichard Henderson 
893600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
894600b819fSRichard Henderson                                             target_ulong addr,
895600b819fSRichard Henderson                                             uint16_t idxmap, unsigned bits)
896600b819fSRichard Henderson {
897600b819fSRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
898600b819fSRichard Henderson                                        idxmap, bits);
899600b819fSRichard Henderson }
900600b819fSRichard Henderson 
901c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
9023ab6e68cSRichard Henderson                                                target_ulong addr,
903c13b27d8SRichard Henderson                                                target_ulong len,
9043ab6e68cSRichard Henderson                                                uint16_t idxmap,
9053ab6e68cSRichard Henderson                                                unsigned bits)
9063ab6e68cSRichard Henderson {
907d34e4d1aSRichard Henderson     TLBFlushRangeData d, *p;
908d34e4d1aSRichard Henderson     CPUState *dst_cpu;
9093ab6e68cSRichard Henderson 
910c13b27d8SRichard Henderson     /*
911c13b27d8SRichard Henderson      * If all bits are significant, and len is small,
912c13b27d8SRichard Henderson      * this devolves to tlb_flush_page.
913c13b27d8SRichard Henderson      */
914c13b27d8SRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
9153ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
9163ab6e68cSRichard Henderson         return;
9173ab6e68cSRichard Henderson     }
9183ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
9193ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
9203ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
9213ab6e68cSRichard Henderson         return;
9223ab6e68cSRichard Henderson     }
9233ab6e68cSRichard Henderson 
9243ab6e68cSRichard Henderson     /* This should already be page aligned */
9253ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
926c13b27d8SRichard Henderson     d.len = len;
9273ab6e68cSRichard Henderson     d.idxmap = idxmap;
9283ab6e68cSRichard Henderson     d.bits = bits;
9293ab6e68cSRichard Henderson 
9303ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
9313ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
9323ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
9336d244788SRichard Henderson             p = g_memdup(&d, sizeof(d));
934206a583dSRichard Henderson             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
9353ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
9363ab6e68cSRichard Henderson         }
9373ab6e68cSRichard Henderson     }
9383ab6e68cSRichard Henderson 
9396d244788SRichard Henderson     p = g_memdup(&d, sizeof(d));
940206a583dSRichard Henderson     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
9413ab6e68cSRichard Henderson                           RUN_ON_CPU_HOST_PTR(p));
9423ab6e68cSRichard Henderson }
9433ab6e68cSRichard Henderson 
944c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
945c13b27d8SRichard Henderson                                                    target_ulong addr,
946c13b27d8SRichard Henderson                                                    uint16_t idxmap,
947c13b27d8SRichard Henderson                                                    unsigned bits)
948c13b27d8SRichard Henderson {
949c13b27d8SRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
950c13b27d8SRichard Henderson                                               idxmap, bits);
951c13b27d8SRichard Henderson }
952c13b27d8SRichard Henderson 
953d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
954d9bb58e5SYang Zhong    can be detected */
955d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
956d9bb58e5SYang Zhong {
95793b99616SRichard Henderson     cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
95893b99616SRichard Henderson                                              TARGET_PAGE_SIZE,
959d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
960d9bb58e5SYang Zhong }
961d9bb58e5SYang Zhong 
962d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
963d9bb58e5SYang Zhong    tested for self modifying code */
964d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
965d9bb58e5SYang Zhong {
966d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
967d9bb58e5SYang Zhong }
968d9bb58e5SYang Zhong 
969d9bb58e5SYang Zhong 
970d9bb58e5SYang Zhong /*
971d9bb58e5SYang Zhong  * Dirty write flag handling
972d9bb58e5SYang Zhong  *
973d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
974d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
975d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
976d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
977d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
978d9bb58e5SYang Zhong  * generated code.
979d9bb58e5SYang Zhong  *
98071aec354SEmilio G. Cota  * Other vCPUs might be reading their TLBs during guest execution, so we update
981d73415a3SStefan Hajnoczi  * te->addr_write with qatomic_set. We don't need to worry about this for
98271aec354SEmilio G. Cota  * oversized guests as MTTCG is disabled for them.
983d9bb58e5SYang Zhong  *
98453d28455SRichard Henderson  * Called with tlb_c.lock held.
985d9bb58e5SYang Zhong  */
98671aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
98771aec354SEmilio G. Cota                                          uintptr_t start, uintptr_t length)
988d9bb58e5SYang Zhong {
989d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
990d9bb58e5SYang Zhong 
9917b0d792cSRichard Henderson     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
9927b0d792cSRichard Henderson                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
993d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
994d9bb58e5SYang Zhong         addr += tlb_entry->addend;
995d9bb58e5SYang Zhong         if ((addr - start) < length) {
996d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
99771aec354SEmilio G. Cota             tlb_entry->addr_write |= TLB_NOTDIRTY;
998d9bb58e5SYang Zhong #else
999d73415a3SStefan Hajnoczi             qatomic_set(&tlb_entry->addr_write,
100071aec354SEmilio G. Cota                        tlb_entry->addr_write | TLB_NOTDIRTY);
1001d9bb58e5SYang Zhong #endif
1002d9bb58e5SYang Zhong         }
100371aec354SEmilio G. Cota     }
100471aec354SEmilio G. Cota }
100571aec354SEmilio G. Cota 
100671aec354SEmilio G. Cota /*
100753d28455SRichard Henderson  * Called with tlb_c.lock held.
100871aec354SEmilio G. Cota  * Called only from the vCPU context, i.e. the TLB's owner thread.
100971aec354SEmilio G. Cota  */
101071aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
101171aec354SEmilio G. Cota {
101271aec354SEmilio G. Cota     *d = *s;
101371aec354SEmilio G. Cota }
1014d9bb58e5SYang Zhong 
1015d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
101671aec354SEmilio G. Cota  * the target vCPU).
101753d28455SRichard Henderson  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
101871aec354SEmilio G. Cota  * thing actually updated is the target TLB entry ->addr_write flags.
1019d9bb58e5SYang Zhong  */
1020d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1021d9bb58e5SYang Zhong {
1022d9bb58e5SYang Zhong     CPUArchState *env;
1023d9bb58e5SYang Zhong 
1024d9bb58e5SYang Zhong     int mmu_idx;
1025d9bb58e5SYang Zhong 
1026d9bb58e5SYang Zhong     env = cpu->env_ptr;
1027a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
1028d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1029d9bb58e5SYang Zhong         unsigned int i;
1030722a1c1eSRichard Henderson         unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1031d9bb58e5SYang Zhong 
103286e1eff8SEmilio G. Cota         for (i = 0; i < n; i++) {
1033a40ec84eSRichard Henderson             tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1034a40ec84eSRichard Henderson                                          start1, length);
1035d9bb58e5SYang Zhong         }
1036d9bb58e5SYang Zhong 
1037d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
1038a40ec84eSRichard Henderson             tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1039a40ec84eSRichard Henderson                                          start1, length);
1040d9bb58e5SYang Zhong         }
1041d9bb58e5SYang Zhong     }
1042a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
1043d9bb58e5SYang Zhong }
1044d9bb58e5SYang Zhong 
104553d28455SRichard Henderson /* Called with tlb_c.lock held */
104671aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
104771aec354SEmilio G. Cota                                          target_ulong vaddr)
1048d9bb58e5SYang Zhong {
1049d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1050d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
1051d9bb58e5SYang Zhong     }
1052d9bb58e5SYang Zhong }
1053d9bb58e5SYang Zhong 
1054d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
1055d9bb58e5SYang Zhong    so that it is no longer dirty */
1056d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
1057d9bb58e5SYang Zhong {
1058d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
1059d9bb58e5SYang Zhong     int mmu_idx;
1060d9bb58e5SYang Zhong 
1061d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
1062d9bb58e5SYang Zhong 
1063d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
1064a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
1065d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1066383beda9SRichard Henderson         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
1067d9bb58e5SYang Zhong     }
1068d9bb58e5SYang Zhong 
1069d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1070d9bb58e5SYang Zhong         int k;
1071d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
1072a40ec84eSRichard Henderson             tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
1073d9bb58e5SYang Zhong         }
1074d9bb58e5SYang Zhong     }
1075a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
1076d9bb58e5SYang Zhong }
1077d9bb58e5SYang Zhong 
1078d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
1079d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
10801308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
10811308e026SRichard Henderson                                target_ulong vaddr, target_ulong size)
1082d9bb58e5SYang Zhong {
1083a40ec84eSRichard Henderson     target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
10841308e026SRichard Henderson     target_ulong lp_mask = ~(size - 1);
1085d9bb58e5SYang Zhong 
10861308e026SRichard Henderson     if (lp_addr == (target_ulong)-1) {
10871308e026SRichard Henderson         /* No previous large page.  */
10881308e026SRichard Henderson         lp_addr = vaddr;
10891308e026SRichard Henderson     } else {
1090d9bb58e5SYang Zhong         /* Extend the existing region to include the new page.
10911308e026SRichard Henderson            This is a compromise between unnecessary flushes and
10921308e026SRichard Henderson            the cost of maintaining a full variable size TLB.  */
1093a40ec84eSRichard Henderson         lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
10941308e026SRichard Henderson         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
10951308e026SRichard Henderson             lp_mask <<= 1;
1096d9bb58e5SYang Zhong         }
10971308e026SRichard Henderson     }
1098a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1099a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1100d9bb58e5SYang Zhong }
1101d9bb58e5SYang Zhong 
110240473689SRichard Henderson /*
110340473689SRichard Henderson  * Add a new TLB entry. At most one entry for a given virtual address
1104d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1105d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
1106d9bb58e5SYang Zhong  *
1107d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
1108d9bb58e5SYang Zhong  * critical section.
1109d9bb58e5SYang Zhong  */
111040473689SRichard Henderson void tlb_set_page_full(CPUState *cpu, int mmu_idx,
111140473689SRichard Henderson                        target_ulong vaddr, CPUTLBEntryFull *full)
1112d9bb58e5SYang Zhong {
1113d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
1114a40ec84eSRichard Henderson     CPUTLB *tlb = env_tlb(env);
1115a40ec84eSRichard Henderson     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1116d9bb58e5SYang Zhong     MemoryRegionSection *section;
1117d9bb58e5SYang Zhong     unsigned int index;
1118d9bb58e5SYang Zhong     target_ulong address;
11198f5db641SRichard Henderson     target_ulong write_address;
1120d9bb58e5SYang Zhong     uintptr_t addend;
112168fea038SRichard Henderson     CPUTLBEntry *te, tn;
112255df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
112355df6fcfSPeter Maydell     target_ulong vaddr_page;
112440473689SRichard Henderson     int asidx, wp_flags, prot;
11258f5db641SRichard Henderson     bool is_ram, is_romd;
1126d9bb58e5SYang Zhong 
1127d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
112855df6fcfSPeter Maydell 
112940473689SRichard Henderson     if (full->lg_page_size <= TARGET_PAGE_BITS) {
113055df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
113155df6fcfSPeter Maydell     } else {
113240473689SRichard Henderson         sz = (hwaddr)1 << full->lg_page_size;
113340473689SRichard Henderson         tlb_add_large_page(env, mmu_idx, vaddr, sz);
113455df6fcfSPeter Maydell     }
113555df6fcfSPeter Maydell     vaddr_page = vaddr & TARGET_PAGE_MASK;
113640473689SRichard Henderson     paddr_page = full->phys_addr & TARGET_PAGE_MASK;
113755df6fcfSPeter Maydell 
113840473689SRichard Henderson     prot = full->prot;
113940473689SRichard Henderson     asidx = cpu_asidx_from_attrs(cpu, full->attrs);
114055df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
114140473689SRichard Henderson                                                 &xlat, &sz, full->attrs, &prot);
1142d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
1143d9bb58e5SYang Zhong 
1144d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1145d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
114640473689SRichard Henderson               vaddr, full->phys_addr, prot, mmu_idx);
1147d9bb58e5SYang Zhong 
114855df6fcfSPeter Maydell     address = vaddr_page;
114940473689SRichard Henderson     if (full->lg_page_size < TARGET_PAGE_BITS) {
115030d7e098SRichard Henderson         /* Repeat the MMU check and TLB fill on every access.  */
115130d7e098SRichard Henderson         address |= TLB_INVALID_MASK;
115255df6fcfSPeter Maydell     }
115340473689SRichard Henderson     if (full->attrs.byte_swap) {
11545b87b3e6SRichard Henderson         address |= TLB_BSWAP;
1155a26fc6f5STony Nguyen     }
11568f5db641SRichard Henderson 
11578f5db641SRichard Henderson     is_ram = memory_region_is_ram(section->mr);
11588f5db641SRichard Henderson     is_romd = memory_region_is_romd(section->mr);
11598f5db641SRichard Henderson 
11608f5db641SRichard Henderson     if (is_ram || is_romd) {
11618f5db641SRichard Henderson         /* RAM and ROMD both have associated host memory. */
1162d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
11638f5db641SRichard Henderson     } else {
11648f5db641SRichard Henderson         /* I/O does not; force the host address to NULL. */
11658f5db641SRichard Henderson         addend = 0;
1166d9bb58e5SYang Zhong     }
1167d9bb58e5SYang Zhong 
11688f5db641SRichard Henderson     write_address = address;
11698f5db641SRichard Henderson     if (is_ram) {
11708f5db641SRichard Henderson         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
11718f5db641SRichard Henderson         /*
11728f5db641SRichard Henderson          * Computing is_clean is expensive; avoid all that unless
11738f5db641SRichard Henderson          * the page is actually writable.
11748f5db641SRichard Henderson          */
11758f5db641SRichard Henderson         if (prot & PAGE_WRITE) {
11768f5db641SRichard Henderson             if (section->readonly) {
11778f5db641SRichard Henderson                 write_address |= TLB_DISCARD_WRITE;
11788f5db641SRichard Henderson             } else if (cpu_physical_memory_is_clean(iotlb)) {
11798f5db641SRichard Henderson                 write_address |= TLB_NOTDIRTY;
11808f5db641SRichard Henderson             }
11818f5db641SRichard Henderson         }
11828f5db641SRichard Henderson     } else {
11838f5db641SRichard Henderson         /* I/O or ROMD */
11848f5db641SRichard Henderson         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
11858f5db641SRichard Henderson         /*
11868f5db641SRichard Henderson          * Writes to romd devices must go through MMIO to enable write.
11878f5db641SRichard Henderson          * Reads to romd devices go through the ram_ptr found above,
11888f5db641SRichard Henderson          * but of course reads to I/O must go through MMIO.
11898f5db641SRichard Henderson          */
11908f5db641SRichard Henderson         write_address |= TLB_MMIO;
11918f5db641SRichard Henderson         if (!is_romd) {
11928f5db641SRichard Henderson             address = write_address;
11938f5db641SRichard Henderson         }
11948f5db641SRichard Henderson     }
11958f5db641SRichard Henderson 
119650b107c5SRichard Henderson     wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
119750b107c5SRichard Henderson                                               TARGET_PAGE_SIZE);
1198d9bb58e5SYang Zhong 
1199383beda9SRichard Henderson     index = tlb_index(env, mmu_idx, vaddr_page);
1200383beda9SRichard Henderson     te = tlb_entry(env, mmu_idx, vaddr_page);
1201d9bb58e5SYang Zhong 
120268fea038SRichard Henderson     /*
120371aec354SEmilio G. Cota      * Hold the TLB lock for the rest of the function. We could acquire/release
120471aec354SEmilio G. Cota      * the lock several times in the function, but it is faster to amortize the
120571aec354SEmilio G. Cota      * acquisition cost by acquiring it just once. Note that this leads to
120671aec354SEmilio G. Cota      * a longer critical section, but this is not a concern since the TLB lock
120771aec354SEmilio G. Cota      * is unlikely to be contended.
120871aec354SEmilio G. Cota      */
1209a40ec84eSRichard Henderson     qemu_spin_lock(&tlb->c.lock);
121071aec354SEmilio G. Cota 
12113d1523ceSRichard Henderson     /* Note that the tlb is no longer clean.  */
1212a40ec84eSRichard Henderson     tlb->c.dirty |= 1 << mmu_idx;
12133d1523ceSRichard Henderson 
121471aec354SEmilio G. Cota     /* Make sure there's no cached translation for the new page.  */
121571aec354SEmilio G. Cota     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
121671aec354SEmilio G. Cota 
121771aec354SEmilio G. Cota     /*
121868fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
121968fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
122068fea038SRichard Henderson      */
12213cea94bbSEmilio G. Cota     if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
1222a40ec84eSRichard Henderson         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1223a40ec84eSRichard Henderson         CPUTLBEntry *tv = &desc->vtable[vidx];
122468fea038SRichard Henderson 
122568fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
122671aec354SEmilio G. Cota         copy_tlb_helper_locked(tv, te);
122725d3ec58SRichard Henderson         desc->vfulltlb[vidx] = desc->fulltlb[index];
122886e1eff8SEmilio G. Cota         tlb_n_used_entries_dec(env, mmu_idx);
122968fea038SRichard Henderson     }
1230d9bb58e5SYang Zhong 
1231d9bb58e5SYang Zhong     /* refill the tlb */
1232ace41090SPeter Maydell     /*
1233ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
1234ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
12358f5db641SRichard Henderson      *  + the ram_addr_t of the page base of the target RAM (RAM)
12368f5db641SRichard Henderson      *  + the offset within section->mr of the page base (I/O, ROMD)
123755df6fcfSPeter Maydell      * We subtract the vaddr_page (which is page aligned and thus won't
1238ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
1239ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
1240ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
1241ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
1242ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1243ace41090SPeter Maydell      */
124440473689SRichard Henderson     desc->fulltlb[index] = *full;
124525d3ec58SRichard Henderson     desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
124640473689SRichard Henderson     desc->fulltlb[index].phys_addr = paddr_page;
124740473689SRichard Henderson     desc->fulltlb[index].prot = prot;
1248d9bb58e5SYang Zhong 
1249d9bb58e5SYang Zhong     /* Now calculate the new entry */
125055df6fcfSPeter Maydell     tn.addend = addend - vaddr_page;
1251d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
1252d9bb58e5SYang Zhong         tn.addr_read = address;
125350b107c5SRichard Henderson         if (wp_flags & BP_MEM_READ) {
125450b107c5SRichard Henderson             tn.addr_read |= TLB_WATCHPOINT;
125550b107c5SRichard Henderson         }
1256d9bb58e5SYang Zhong     } else {
1257d9bb58e5SYang Zhong         tn.addr_read = -1;
1258d9bb58e5SYang Zhong     }
1259d9bb58e5SYang Zhong 
1260d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
12618f5db641SRichard Henderson         tn.addr_code = address;
1262d9bb58e5SYang Zhong     } else {
1263d9bb58e5SYang Zhong         tn.addr_code = -1;
1264d9bb58e5SYang Zhong     }
1265d9bb58e5SYang Zhong 
1266d9bb58e5SYang Zhong     tn.addr_write = -1;
1267d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
12688f5db641SRichard Henderson         tn.addr_write = write_address;
1269f52bfb12SDavid Hildenbrand         if (prot & PAGE_WRITE_INV) {
1270f52bfb12SDavid Hildenbrand             tn.addr_write |= TLB_INVALID_MASK;
1271f52bfb12SDavid Hildenbrand         }
127250b107c5SRichard Henderson         if (wp_flags & BP_MEM_WRITE) {
127350b107c5SRichard Henderson             tn.addr_write |= TLB_WATCHPOINT;
127450b107c5SRichard Henderson         }
1275d9bb58e5SYang Zhong     }
1276d9bb58e5SYang Zhong 
127771aec354SEmilio G. Cota     copy_tlb_helper_locked(te, &tn);
127886e1eff8SEmilio G. Cota     tlb_n_used_entries_inc(env, mmu_idx);
1279a40ec84eSRichard Henderson     qemu_spin_unlock(&tlb->c.lock);
1280d9bb58e5SYang Zhong }
1281d9bb58e5SYang Zhong 
128240473689SRichard Henderson void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
128340473689SRichard Henderson                              hwaddr paddr, MemTxAttrs attrs, int prot,
128440473689SRichard Henderson                              int mmu_idx, target_ulong size)
128540473689SRichard Henderson {
128640473689SRichard Henderson     CPUTLBEntryFull full = {
128740473689SRichard Henderson         .phys_addr = paddr,
128840473689SRichard Henderson         .attrs = attrs,
128940473689SRichard Henderson         .prot = prot,
129040473689SRichard Henderson         .lg_page_size = ctz64(size)
129140473689SRichard Henderson     };
129240473689SRichard Henderson 
129340473689SRichard Henderson     assert(is_power_of_2(size));
129440473689SRichard Henderson     tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
129540473689SRichard Henderson }
129640473689SRichard Henderson 
1297d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1298d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
1299d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
1300d9bb58e5SYang Zhong {
1301d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1302d9bb58e5SYang Zhong                             prot, mmu_idx, size);
1303d9bb58e5SYang Zhong }
1304d9bb58e5SYang Zhong 
1305c319dc13SRichard Henderson /*
1306c319dc13SRichard Henderson  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1307c319dc13SRichard Henderson  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1308c319dc13SRichard Henderson  * be discarded and looked up again (e.g. via tlb_entry()).
1309c319dc13SRichard Henderson  */
1310c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1311c319dc13SRichard Henderson                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1312c319dc13SRichard Henderson {
1313c319dc13SRichard Henderson     bool ok;
1314c319dc13SRichard Henderson 
1315c319dc13SRichard Henderson     /*
1316c319dc13SRichard Henderson      * This is not a probe, so only valid return is success; failure
1317c319dc13SRichard Henderson      * should result in exception + longjmp to the cpu loop.
1318c319dc13SRichard Henderson      */
13198810ee2aSAlex Bennée     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1320e124536fSEduardo Habkost                                     access_type, mmu_idx, false, retaddr);
1321c319dc13SRichard Henderson     assert(ok);
1322c319dc13SRichard Henderson }
1323c319dc13SRichard Henderson 
132478271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
132578271684SClaudio Fontana                                         MMUAccessType access_type,
132678271684SClaudio Fontana                                         int mmu_idx, uintptr_t retaddr)
132778271684SClaudio Fontana {
13288810ee2aSAlex Bennée     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
13298810ee2aSAlex Bennée                                           mmu_idx, retaddr);
133078271684SClaudio Fontana }
133178271684SClaudio Fontana 
133278271684SClaudio Fontana static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
133378271684SClaudio Fontana                                           vaddr addr, unsigned size,
133478271684SClaudio Fontana                                           MMUAccessType access_type,
133578271684SClaudio Fontana                                           int mmu_idx, MemTxAttrs attrs,
133678271684SClaudio Fontana                                           MemTxResult response,
133778271684SClaudio Fontana                                           uintptr_t retaddr)
133878271684SClaudio Fontana {
133978271684SClaudio Fontana     CPUClass *cc = CPU_GET_CLASS(cpu);
134078271684SClaudio Fontana 
134178271684SClaudio Fontana     if (!cpu->ignore_memory_transaction_failures &&
134278271684SClaudio Fontana         cc->tcg_ops->do_transaction_failed) {
134378271684SClaudio Fontana         cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
134478271684SClaudio Fontana                                            access_type, mmu_idx, attrs,
134578271684SClaudio Fontana                                            response, retaddr);
134678271684SClaudio Fontana     }
134778271684SClaudio Fontana }
134878271684SClaudio Fontana 
134925d3ec58SRichard Henderson static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
1350f1be3696SRichard Henderson                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
1351be5c4787STony Nguyen                          MMUAccessType access_type, MemOp op)
1352d9bb58e5SYang Zhong {
135329a0af61SRichard Henderson     CPUState *cpu = env_cpu(env);
13542d54f194SPeter Maydell     hwaddr mr_offset;
13552d54f194SPeter Maydell     MemoryRegionSection *section;
13562d54f194SPeter Maydell     MemoryRegion *mr;
1357d9bb58e5SYang Zhong     uint64_t val;
1358d9bb58e5SYang Zhong     bool locked = false;
135904e3aabdSPeter Maydell     MemTxResult r;
1360d9bb58e5SYang Zhong 
136125d3ec58SRichard Henderson     section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
13622d54f194SPeter Maydell     mr = section->mr;
136325d3ec58SRichard Henderson     mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1364d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
136508565552SRichard Henderson     if (!cpu->can_do_io) {
1366d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1367d9bb58e5SYang Zhong     }
1368d9bb58e5SYang Zhong 
136941744954SPhilippe Mathieu-Daudé     if (!qemu_mutex_iothread_locked()) {
1370d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
1371d9bb58e5SYang Zhong         locked = true;
1372d9bb58e5SYang Zhong     }
137325d3ec58SRichard Henderson     r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
137404e3aabdSPeter Maydell     if (r != MEMTX_OK) {
13752d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
13762d54f194SPeter Maydell             section->offset_within_address_space -
13772d54f194SPeter Maydell             section->offset_within_region;
13782d54f194SPeter Maydell 
1379be5c4787STony Nguyen         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
138025d3ec58SRichard Henderson                                mmu_idx, full->attrs, r, retaddr);
138104e3aabdSPeter Maydell     }
1382d9bb58e5SYang Zhong     if (locked) {
1383d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
1384d9bb58e5SYang Zhong     }
1385d9bb58e5SYang Zhong 
1386d9bb58e5SYang Zhong     return val;
1387d9bb58e5SYang Zhong }
1388d9bb58e5SYang Zhong 
13892f3a57eeSAlex Bennée /*
139025d3ec58SRichard Henderson  * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
139125d3ec58SRichard Henderson  * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
1392570ef309SAlex Bennée  * because of the side effect of io_writex changing memory layout.
13932f3a57eeSAlex Bennée  */
139437523ff7SRichard Henderson static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
139537523ff7SRichard Henderson                             hwaddr mr_offset)
13962f3a57eeSAlex Bennée {
13972f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN
13982f3a57eeSAlex Bennée     SavedIOTLB *saved = &cs->saved_iotlb;
13992f3a57eeSAlex Bennée     saved->section = section;
14002f3a57eeSAlex Bennée     saved->mr_offset = mr_offset;
14012f3a57eeSAlex Bennée #endif
14022f3a57eeSAlex Bennée }
14032f3a57eeSAlex Bennée 
140425d3ec58SRichard Henderson static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
1405f1be3696SRichard Henderson                       int mmu_idx, uint64_t val, target_ulong addr,
1406be5c4787STony Nguyen                       uintptr_t retaddr, MemOp op)
1407d9bb58e5SYang Zhong {
140829a0af61SRichard Henderson     CPUState *cpu = env_cpu(env);
14092d54f194SPeter Maydell     hwaddr mr_offset;
14102d54f194SPeter Maydell     MemoryRegionSection *section;
14112d54f194SPeter Maydell     MemoryRegion *mr;
1412d9bb58e5SYang Zhong     bool locked = false;
141304e3aabdSPeter Maydell     MemTxResult r;
1414d9bb58e5SYang Zhong 
141525d3ec58SRichard Henderson     section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
14162d54f194SPeter Maydell     mr = section->mr;
141725d3ec58SRichard Henderson     mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
141808565552SRichard Henderson     if (!cpu->can_do_io) {
1419d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1420d9bb58e5SYang Zhong     }
1421d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
1422d9bb58e5SYang Zhong 
14232f3a57eeSAlex Bennée     /*
14242f3a57eeSAlex Bennée      * The memory_region_dispatch may trigger a flush/resize
14252f3a57eeSAlex Bennée      * so for plugins we save the iotlb_data just in case.
14262f3a57eeSAlex Bennée      */
142737523ff7SRichard Henderson     save_iotlb_data(cpu, section, mr_offset);
14282f3a57eeSAlex Bennée 
142941744954SPhilippe Mathieu-Daudé     if (!qemu_mutex_iothread_locked()) {
1430d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
1431d9bb58e5SYang Zhong         locked = true;
1432d9bb58e5SYang Zhong     }
143325d3ec58SRichard Henderson     r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
143404e3aabdSPeter Maydell     if (r != MEMTX_OK) {
14352d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
14362d54f194SPeter Maydell             section->offset_within_address_space -
14372d54f194SPeter Maydell             section->offset_within_region;
14382d54f194SPeter Maydell 
1439be5c4787STony Nguyen         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
144025d3ec58SRichard Henderson                                MMU_DATA_STORE, mmu_idx, full->attrs, r,
1441be5c4787STony Nguyen                                retaddr);
144204e3aabdSPeter Maydell     }
1443d9bb58e5SYang Zhong     if (locked) {
1444d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
1445d9bb58e5SYang Zhong     }
1446d9bb58e5SYang Zhong }
1447d9bb58e5SYang Zhong 
14484811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
14494811e909SRichard Henderson {
14504811e909SRichard Henderson #if TCG_OVERSIZED_GUEST
14514811e909SRichard Henderson     return *(target_ulong *)((uintptr_t)entry + ofs);
14524811e909SRichard Henderson #else
1453d73415a3SStefan Hajnoczi     /* ofs might correspond to .addr_write, so use qatomic_read */
1454d73415a3SStefan Hajnoczi     return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
14554811e909SRichard Henderson #endif
14564811e909SRichard Henderson }
14574811e909SRichard Henderson 
1458d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
1459d9bb58e5SYang Zhong    back to the main tlb.  */
1460d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1461d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
1462d9bb58e5SYang Zhong {
1463d9bb58e5SYang Zhong     size_t vidx;
146471aec354SEmilio G. Cota 
146529a0af61SRichard Henderson     assert_cpu_is_self(env_cpu(env));
1466d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1467a40ec84eSRichard Henderson         CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1468a40ec84eSRichard Henderson         target_ulong cmp;
1469a40ec84eSRichard Henderson 
1470d73415a3SStefan Hajnoczi         /* elt_ofs might correspond to .addr_write, so use qatomic_read */
1471a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST
1472a40ec84eSRichard Henderson         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1473a40ec84eSRichard Henderson #else
1474d73415a3SStefan Hajnoczi         cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1475a40ec84eSRichard Henderson #endif
1476d9bb58e5SYang Zhong 
1477d9bb58e5SYang Zhong         if (cmp == page) {
1478d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
1479a40ec84eSRichard Henderson             CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1480d9bb58e5SYang Zhong 
1481a40ec84eSRichard Henderson             qemu_spin_lock(&env_tlb(env)->c.lock);
148271aec354SEmilio G. Cota             copy_tlb_helper_locked(&tmptlb, tlb);
148371aec354SEmilio G. Cota             copy_tlb_helper_locked(tlb, vtlb);
148471aec354SEmilio G. Cota             copy_tlb_helper_locked(vtlb, &tmptlb);
1485a40ec84eSRichard Henderson             qemu_spin_unlock(&env_tlb(env)->c.lock);
1486d9bb58e5SYang Zhong 
148725d3ec58SRichard Henderson             CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
148825d3ec58SRichard Henderson             CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
148925d3ec58SRichard Henderson             CPUTLBEntryFull tmpf;
149025d3ec58SRichard Henderson             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1491d9bb58e5SYang Zhong             return true;
1492d9bb58e5SYang Zhong         }
1493d9bb58e5SYang Zhong     }
1494d9bb58e5SYang Zhong     return false;
1495d9bb58e5SYang Zhong }
1496d9bb58e5SYang Zhong 
1497d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
1498d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
1499d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1500d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
1501d9bb58e5SYang Zhong 
1502707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
150325d3ec58SRichard Henderson                            CPUTLBEntryFull *full, uintptr_t retaddr)
1504707526adSRichard Henderson {
150525d3ec58SRichard Henderson     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1506707526adSRichard Henderson 
1507707526adSRichard Henderson     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1508707526adSRichard Henderson 
1509707526adSRichard Henderson     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1510707526adSRichard Henderson         struct page_collection *pages
1511707526adSRichard Henderson             = page_collection_lock(ram_addr, ram_addr + size);
15125a7c27bbSRichard Henderson         tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1513707526adSRichard Henderson         page_collection_unlock(pages);
1514707526adSRichard Henderson     }
1515707526adSRichard Henderson 
1516707526adSRichard Henderson     /*
1517707526adSRichard Henderson      * Set both VGA and migration bits for simplicity and to remove
1518707526adSRichard Henderson      * the notdirty callback faster.
1519707526adSRichard Henderson      */
1520707526adSRichard Henderson     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1521707526adSRichard Henderson 
1522707526adSRichard Henderson     /* We remove the notdirty callback only if the code has been flushed. */
1523707526adSRichard Henderson     if (!cpu_physical_memory_is_clean(ram_addr)) {
1524707526adSRichard Henderson         trace_memory_notdirty_set_dirty(mem_vaddr);
1525707526adSRichard Henderson         tlb_set_dirty(cpu, mem_vaddr);
1526707526adSRichard Henderson     }
1527707526adSRichard Henderson }
1528707526adSRichard Henderson 
1529069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr,
1530069cfe77SRichard Henderson                                  int fault_size, MMUAccessType access_type,
1531069cfe77SRichard Henderson                                  int mmu_idx, bool nonfault,
1532af803a4fSRichard Henderson                                  void **phost, CPUTLBEntryFull **pfull,
1533af803a4fSRichard Henderson                                  uintptr_t retaddr)
1534d9bb58e5SYang Zhong {
1535383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
1536383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1537069cfe77SRichard Henderson     target_ulong tlb_addr, page_addr;
1538c25c283dSDavid Hildenbrand     size_t elt_ofs;
1539069cfe77SRichard Henderson     int flags;
1540ca86cf32SDavid Hildenbrand 
1541c25c283dSDavid Hildenbrand     switch (access_type) {
1542c25c283dSDavid Hildenbrand     case MMU_DATA_LOAD:
1543c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_read);
1544c25c283dSDavid Hildenbrand         break;
1545c25c283dSDavid Hildenbrand     case MMU_DATA_STORE:
1546c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_write);
1547c25c283dSDavid Hildenbrand         break;
1548c25c283dSDavid Hildenbrand     case MMU_INST_FETCH:
1549c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_code);
1550c25c283dSDavid Hildenbrand         break;
1551c25c283dSDavid Hildenbrand     default:
1552c25c283dSDavid Hildenbrand         g_assert_not_reached();
1553c25c283dSDavid Hildenbrand     }
1554c25c283dSDavid Hildenbrand     tlb_addr = tlb_read_ofs(entry, elt_ofs);
1555c25c283dSDavid Hildenbrand 
1556c3c8bf57SRichard Henderson     flags = TLB_FLAGS_MASK;
1557069cfe77SRichard Henderson     page_addr = addr & TARGET_PAGE_MASK;
1558069cfe77SRichard Henderson     if (!tlb_hit_page(tlb_addr, page_addr)) {
1559069cfe77SRichard Henderson         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1560069cfe77SRichard Henderson             CPUState *cs = env_cpu(env);
1561069cfe77SRichard Henderson 
15628810ee2aSAlex Bennée             if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1563069cfe77SRichard Henderson                                            mmu_idx, nonfault, retaddr)) {
1564069cfe77SRichard Henderson                 /* Non-faulting page table read failed.  */
1565069cfe77SRichard Henderson                 *phost = NULL;
1566af803a4fSRichard Henderson                 *pfull = NULL;
1567069cfe77SRichard Henderson                 return TLB_INVALID_MASK;
1568069cfe77SRichard Henderson             }
1569069cfe77SRichard Henderson 
157003a98189SDavid Hildenbrand             /* TLB resize via tlb_fill may have moved the entry.  */
1571af803a4fSRichard Henderson             index = tlb_index(env, mmu_idx, addr);
157203a98189SDavid Hildenbrand             entry = tlb_entry(env, mmu_idx, addr);
1573c3c8bf57SRichard Henderson 
1574c3c8bf57SRichard Henderson             /*
1575c3c8bf57SRichard Henderson              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1576c3c8bf57SRichard Henderson              * to force the next access through tlb_fill.  We've just
1577c3c8bf57SRichard Henderson              * called tlb_fill, so we know that this entry *is* valid.
1578c3c8bf57SRichard Henderson              */
1579c3c8bf57SRichard Henderson             flags &= ~TLB_INVALID_MASK;
1580d9bb58e5SYang Zhong         }
1581c25c283dSDavid Hildenbrand         tlb_addr = tlb_read_ofs(entry, elt_ofs);
158203a98189SDavid Hildenbrand     }
1583c3c8bf57SRichard Henderson     flags &= tlb_addr;
158403a98189SDavid Hildenbrand 
1585af803a4fSRichard Henderson     *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1586af803a4fSRichard Henderson 
1587069cfe77SRichard Henderson     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
1588069cfe77SRichard Henderson     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1589069cfe77SRichard Henderson         *phost = NULL;
1590069cfe77SRichard Henderson         return TLB_MMIO;
1591fef39ccdSDavid Hildenbrand     }
1592fef39ccdSDavid Hildenbrand 
1593069cfe77SRichard Henderson     /* Everything else is RAM. */
1594069cfe77SRichard Henderson     *phost = (void *)((uintptr_t)addr + entry->addend);
1595069cfe77SRichard Henderson     return flags;
1596069cfe77SRichard Henderson }
1597069cfe77SRichard Henderson 
1598af803a4fSRichard Henderson int probe_access_full(CPUArchState *env, target_ulong addr,
1599069cfe77SRichard Henderson                       MMUAccessType access_type, int mmu_idx,
1600af803a4fSRichard Henderson                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1601af803a4fSRichard Henderson                       uintptr_t retaddr)
1602069cfe77SRichard Henderson {
1603af803a4fSRichard Henderson     int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1604af803a4fSRichard Henderson                                       nonfault, phost, pfull, retaddr);
1605069cfe77SRichard Henderson 
1606069cfe77SRichard Henderson     /* Handle clean RAM pages.  */
1607069cfe77SRichard Henderson     if (unlikely(flags & TLB_NOTDIRTY)) {
1608af803a4fSRichard Henderson         notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
1609069cfe77SRichard Henderson         flags &= ~TLB_NOTDIRTY;
1610069cfe77SRichard Henderson     }
1611069cfe77SRichard Henderson 
1612069cfe77SRichard Henderson     return flags;
1613069cfe77SRichard Henderson }
1614069cfe77SRichard Henderson 
1615af803a4fSRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr,
1616af803a4fSRichard Henderson                        MMUAccessType access_type, int mmu_idx,
1617af803a4fSRichard Henderson                        bool nonfault, void **phost, uintptr_t retaddr)
1618af803a4fSRichard Henderson {
1619af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1620af803a4fSRichard Henderson 
1621af803a4fSRichard Henderson     return probe_access_full(env, addr, access_type, mmu_idx,
1622af803a4fSRichard Henderson                              nonfault, phost, &full, retaddr);
1623af803a4fSRichard Henderson }
1624af803a4fSRichard Henderson 
1625069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size,
1626069cfe77SRichard Henderson                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1627069cfe77SRichard Henderson {
1628af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1629069cfe77SRichard Henderson     void *host;
1630069cfe77SRichard Henderson     int flags;
1631069cfe77SRichard Henderson 
1632069cfe77SRichard Henderson     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1633069cfe77SRichard Henderson 
1634069cfe77SRichard Henderson     flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1635af803a4fSRichard Henderson                                   false, &host, &full, retaddr);
1636069cfe77SRichard Henderson 
1637069cfe77SRichard Henderson     /* Per the interface, size == 0 merely faults the access. */
1638069cfe77SRichard Henderson     if (size == 0) {
163973bc0bd4SRichard Henderson         return NULL;
164073bc0bd4SRichard Henderson     }
164173bc0bd4SRichard Henderson 
1642069cfe77SRichard Henderson     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
164303a98189SDavid Hildenbrand         /* Handle watchpoints.  */
1644069cfe77SRichard Henderson         if (flags & TLB_WATCHPOINT) {
1645069cfe77SRichard Henderson             int wp_access = (access_type == MMU_DATA_STORE
1646069cfe77SRichard Henderson                              ? BP_MEM_WRITE : BP_MEM_READ);
164703a98189SDavid Hildenbrand             cpu_check_watchpoint(env_cpu(env), addr, size,
164825d3ec58SRichard Henderson                                  full->attrs, wp_access, retaddr);
1649d9bb58e5SYang Zhong         }
1650fef39ccdSDavid Hildenbrand 
165173bc0bd4SRichard Henderson         /* Handle clean RAM pages.  */
1652069cfe77SRichard Henderson         if (flags & TLB_NOTDIRTY) {
165325d3ec58SRichard Henderson             notdirty_write(env_cpu(env), addr, 1, full, retaddr);
165473bc0bd4SRichard Henderson         }
1655fef39ccdSDavid Hildenbrand     }
1656fef39ccdSDavid Hildenbrand 
1657069cfe77SRichard Henderson     return host;
1658d9bb58e5SYang Zhong }
1659d9bb58e5SYang Zhong 
16604811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
16614811e909SRichard Henderson                         MMUAccessType access_type, int mmu_idx)
16624811e909SRichard Henderson {
1663af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1664069cfe77SRichard Henderson     void *host;
1665069cfe77SRichard Henderson     int flags;
16664811e909SRichard Henderson 
1667069cfe77SRichard Henderson     flags = probe_access_internal(env, addr, 0, access_type,
1668af803a4fSRichard Henderson                                   mmu_idx, true, &host, &full, 0);
1669069cfe77SRichard Henderson 
1670069cfe77SRichard Henderson     /* No combination of flags are expected by the caller. */
1671069cfe77SRichard Henderson     return flags ? NULL : host;
16724811e909SRichard Henderson }
16734811e909SRichard Henderson 
16747e0d9973SRichard Henderson /*
16757e0d9973SRichard Henderson  * Return a ram_addr_t for the virtual address for execution.
16767e0d9973SRichard Henderson  *
16777e0d9973SRichard Henderson  * Return -1 if we can't translate and execute from an entire page
16787e0d9973SRichard Henderson  * of RAM.  This will force us to execute by loading and translating
16797e0d9973SRichard Henderson  * one insn at a time, without caching.
16807e0d9973SRichard Henderson  *
16817e0d9973SRichard Henderson  * NOTE: This function will trigger an exception if the page is
16827e0d9973SRichard Henderson  * not executable.
16837e0d9973SRichard Henderson  */
16847e0d9973SRichard Henderson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
16857e0d9973SRichard Henderson                                         void **hostp)
16867e0d9973SRichard Henderson {
1687af803a4fSRichard Henderson     CPUTLBEntryFull *full;
16887e0d9973SRichard Henderson     void *p;
16897e0d9973SRichard Henderson 
16907e0d9973SRichard Henderson     (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
1691af803a4fSRichard Henderson                                 cpu_mmu_index(env, true), false, &p, &full, 0);
16927e0d9973SRichard Henderson     if (p == NULL) {
16937e0d9973SRichard Henderson         return -1;
16947e0d9973SRichard Henderson     }
16957e0d9973SRichard Henderson     if (hostp) {
16967e0d9973SRichard Henderson         *hostp = p;
16977e0d9973SRichard Henderson     }
16987e0d9973SRichard Henderson     return qemu_ram_addr_from_host_nofail(p);
16997e0d9973SRichard Henderson }
17007e0d9973SRichard Henderson 
1701235537faSAlex Bennée #ifdef CONFIG_PLUGIN
1702235537faSAlex Bennée /*
1703235537faSAlex Bennée  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1704235537faSAlex Bennée  * This should be a hot path as we will have just looked this path up
1705235537faSAlex Bennée  * in the softmmu lookup code (or helper). We don't handle re-fills or
1706235537faSAlex Bennée  * checking the victim table. This is purely informational.
1707235537faSAlex Bennée  *
17082f3a57eeSAlex Bennée  * This almost never fails as the memory access being instrumented
17092f3a57eeSAlex Bennée  * should have just filled the TLB. The one corner case is io_writex
17102f3a57eeSAlex Bennée  * which can cause TLB flushes and potential resizing of the TLBs
1711570ef309SAlex Bennée  * losing the information we need. In those cases we need to recover
171225d3ec58SRichard Henderson  * data from a copy of the CPUTLBEntryFull. As long as this always occurs
1713570ef309SAlex Bennée  * from the same thread (which a mem callback will be) this is safe.
1714235537faSAlex Bennée  */
1715235537faSAlex Bennée 
1716235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1717235537faSAlex Bennée                        bool is_store, struct qemu_plugin_hwaddr *data)
1718235537faSAlex Bennée {
1719235537faSAlex Bennée     CPUArchState *env = cpu->env_ptr;
1720235537faSAlex Bennée     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1721235537faSAlex Bennée     uintptr_t index = tlb_index(env, mmu_idx, addr);
1722235537faSAlex Bennée     target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1723235537faSAlex Bennée 
1724235537faSAlex Bennée     if (likely(tlb_hit(tlb_addr, addr))) {
1725235537faSAlex Bennée         /* We must have an iotlb entry for MMIO */
1726235537faSAlex Bennée         if (tlb_addr & TLB_MMIO) {
172725d3ec58SRichard Henderson             CPUTLBEntryFull *full;
172825d3ec58SRichard Henderson             full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1729235537faSAlex Bennée             data->is_io = true;
173025d3ec58SRichard Henderson             data->v.io.section =
173125d3ec58SRichard Henderson                 iotlb_to_section(cpu, full->xlat_section, full->attrs);
173225d3ec58SRichard Henderson             data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1733235537faSAlex Bennée         } else {
1734235537faSAlex Bennée             data->is_io = false;
17352d932039SAlex Bennée             data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1736235537faSAlex Bennée         }
1737235537faSAlex Bennée         return true;
17382f3a57eeSAlex Bennée     } else {
17392f3a57eeSAlex Bennée         SavedIOTLB *saved = &cpu->saved_iotlb;
17402f3a57eeSAlex Bennée         data->is_io = true;
17412f3a57eeSAlex Bennée         data->v.io.section = saved->section;
17422f3a57eeSAlex Bennée         data->v.io.offset = saved->mr_offset;
17432f3a57eeSAlex Bennée         return true;
1744235537faSAlex Bennée     }
1745235537faSAlex Bennée }
1746235537faSAlex Bennée 
1747235537faSAlex Bennée #endif
1748235537faSAlex Bennée 
174908dff435SRichard Henderson /*
175008dff435SRichard Henderson  * Probe for an atomic operation.  Do not allow unaligned operations,
175108dff435SRichard Henderson  * or io operations to proceed.  Return the host address.
175208dff435SRichard Henderson  *
175308dff435SRichard Henderson  * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
175408dff435SRichard Henderson  */
1755d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
17569002ffcbSRichard Henderson                                MemOpIdx oi, int size, int prot,
175708dff435SRichard Henderson                                uintptr_t retaddr)
1758d9bb58e5SYang Zhong {
1759b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
176014776ab5STony Nguyen     MemOp mop = get_memop(oi);
1761d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
176208dff435SRichard Henderson     uintptr_t index;
176308dff435SRichard Henderson     CPUTLBEntry *tlbe;
176408dff435SRichard Henderson     target_ulong tlb_addr;
176534d49937SPeter Maydell     void *hostaddr;
1766d9bb58e5SYang Zhong 
1767b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1768b826044fSRichard Henderson 
1769d9bb58e5SYang Zhong     /* Adjust the given return address.  */
1770d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
1771d9bb58e5SYang Zhong 
1772d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1773d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1774d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
177529a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1776d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1777d9bb58e5SYang Zhong     }
1778d9bb58e5SYang Zhong 
1779d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
178008dff435SRichard Henderson     if (unlikely(addr & (size - 1))) {
1781d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1782d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1783d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1784d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1785d9bb58e5SYang Zhong         goto stop_the_world;
1786d9bb58e5SYang Zhong     }
1787d9bb58e5SYang Zhong 
178808dff435SRichard Henderson     index = tlb_index(env, mmu_idx, addr);
178908dff435SRichard Henderson     tlbe = tlb_entry(env, mmu_idx, addr);
179008dff435SRichard Henderson 
1791d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
179208dff435SRichard Henderson     if (prot & PAGE_WRITE) {
179308dff435SRichard Henderson         tlb_addr = tlb_addr_write(tlbe);
1794334692bcSPeter Maydell         if (!tlb_hit(tlb_addr, addr)) {
1795d9bb58e5SYang Zhong             if (!VICTIM_TLB_HIT(addr_write, addr)) {
179608dff435SRichard Henderson                 tlb_fill(env_cpu(env), addr, size,
179708dff435SRichard Henderson                          MMU_DATA_STORE, mmu_idx, retaddr);
17986d967cb8SEmilio G. Cota                 index = tlb_index(env, mmu_idx, addr);
17996d967cb8SEmilio G. Cota                 tlbe = tlb_entry(env, mmu_idx, addr);
1800d9bb58e5SYang Zhong             }
1801403f290cSEmilio G. Cota             tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1802d9bb58e5SYang Zhong         }
1803d9bb58e5SYang Zhong 
180408dff435SRichard Henderson         /* Let the guest notice RMW on a write-only page.  */
180508dff435SRichard Henderson         if ((prot & PAGE_READ) &&
180608dff435SRichard Henderson             unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
180708dff435SRichard Henderson             tlb_fill(env_cpu(env), addr, size,
180808dff435SRichard Henderson                      MMU_DATA_LOAD, mmu_idx, retaddr);
180908dff435SRichard Henderson             /*
181008dff435SRichard Henderson              * Since we don't support reads and writes to different addresses,
181108dff435SRichard Henderson              * and we do have the proper page loaded for write, this shouldn't
181208dff435SRichard Henderson              * ever return.  But just in case, handle via stop-the-world.
181308dff435SRichard Henderson              */
181408dff435SRichard Henderson             goto stop_the_world;
181508dff435SRichard Henderson         }
181608dff435SRichard Henderson     } else /* if (prot & PAGE_READ) */ {
181708dff435SRichard Henderson         tlb_addr = tlbe->addr_read;
181808dff435SRichard Henderson         if (!tlb_hit(tlb_addr, addr)) {
181908dff435SRichard Henderson             if (!VICTIM_TLB_HIT(addr_write, addr)) {
182008dff435SRichard Henderson                 tlb_fill(env_cpu(env), addr, size,
182108dff435SRichard Henderson                          MMU_DATA_LOAD, mmu_idx, retaddr);
182208dff435SRichard Henderson                 index = tlb_index(env, mmu_idx, addr);
182308dff435SRichard Henderson                 tlbe = tlb_entry(env, mmu_idx, addr);
182408dff435SRichard Henderson             }
182508dff435SRichard Henderson             tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
182608dff435SRichard Henderson         }
182708dff435SRichard Henderson     }
182808dff435SRichard Henderson 
182955df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
183030d7e098SRichard Henderson     if (unlikely(tlb_addr & TLB_MMIO)) {
1831d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1832d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1833d9bb58e5SYang Zhong         goto stop_the_world;
1834d9bb58e5SYang Zhong     }
1835d9bb58e5SYang Zhong 
183634d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
183734d49937SPeter Maydell 
183834d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
183908dff435SRichard Henderson         notdirty_write(env_cpu(env), addr, size,
184025d3ec58SRichard Henderson                        &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
184134d49937SPeter Maydell     }
184234d49937SPeter Maydell 
184334d49937SPeter Maydell     return hostaddr;
1844d9bb58e5SYang Zhong 
1845d9bb58e5SYang Zhong  stop_the_world:
184629a0af61SRichard Henderson     cpu_loop_exit_atomic(env_cpu(env), retaddr);
1847d9bb58e5SYang Zhong }
1848d9bb58e5SYang Zhong 
1849eed56642SAlex Bennée /*
1850f83bcecbSRichard Henderson  * Verify that we have passed the correct MemOp to the correct function.
1851f83bcecbSRichard Henderson  *
1852f83bcecbSRichard Henderson  * In the case of the helper_*_mmu functions, we will have done this by
1853f83bcecbSRichard Henderson  * using the MemOp to look up the helper during code generation.
1854f83bcecbSRichard Henderson  *
1855f83bcecbSRichard Henderson  * In the case of the cpu_*_mmu functions, this is up to the caller.
1856f83bcecbSRichard Henderson  * We could present one function to target code, and dispatch based on
1857f83bcecbSRichard Henderson  * the MemOp, but so far we have worked hard to avoid an indirect function
1858f83bcecbSRichard Henderson  * call along the memory path.
1859f83bcecbSRichard Henderson  */
1860f83bcecbSRichard Henderson static void validate_memop(MemOpIdx oi, MemOp expected)
1861f83bcecbSRichard Henderson {
1862f83bcecbSRichard Henderson #ifdef CONFIG_DEBUG_TCG
1863f83bcecbSRichard Henderson     MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1864f83bcecbSRichard Henderson     assert(have == expected);
1865f83bcecbSRichard Henderson #endif
1866f83bcecbSRichard Henderson }
1867f83bcecbSRichard Henderson 
1868f83bcecbSRichard Henderson /*
1869eed56642SAlex Bennée  * Load Helpers
1870eed56642SAlex Bennée  *
1871eed56642SAlex Bennée  * We support two different access types. SOFTMMU_CODE_ACCESS is
1872eed56642SAlex Bennée  * specifically for reading instructions from system memory. It is
1873eed56642SAlex Bennée  * called by the translation loop and in some helpers where the code
1874eed56642SAlex Bennée  * is disassembled. It shouldn't be called directly by guest code.
1875eed56642SAlex Bennée  */
1876d9bb58e5SYang Zhong 
18772dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
18789002ffcbSRichard Henderson                                 MemOpIdx oi, uintptr_t retaddr);
18792dd92606SRichard Henderson 
1880c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE
188180d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op)
188280d9d1c6SRichard Henderson {
188380d9d1c6SRichard Henderson     switch (op) {
188480d9d1c6SRichard Henderson     case MO_UB:
188580d9d1c6SRichard Henderson         return ldub_p(haddr);
188680d9d1c6SRichard Henderson     case MO_BEUW:
188780d9d1c6SRichard Henderson         return lduw_be_p(haddr);
188880d9d1c6SRichard Henderson     case MO_LEUW:
188980d9d1c6SRichard Henderson         return lduw_le_p(haddr);
189080d9d1c6SRichard Henderson     case MO_BEUL:
189180d9d1c6SRichard Henderson         return (uint32_t)ldl_be_p(haddr);
189280d9d1c6SRichard Henderson     case MO_LEUL:
189380d9d1c6SRichard Henderson         return (uint32_t)ldl_le_p(haddr);
1894fc313c64SFrédéric Pétrot     case MO_BEUQ:
189580d9d1c6SRichard Henderson         return ldq_be_p(haddr);
1896fc313c64SFrédéric Pétrot     case MO_LEUQ:
189780d9d1c6SRichard Henderson         return ldq_le_p(haddr);
189880d9d1c6SRichard Henderson     default:
189980d9d1c6SRichard Henderson         qemu_build_not_reached();
190080d9d1c6SRichard Henderson     }
190180d9d1c6SRichard Henderson }
190280d9d1c6SRichard Henderson 
190380d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE
19049002ffcbSRichard Henderson load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
1905be5c4787STony Nguyen             uintptr_t retaddr, MemOp op, bool code_read,
19062dd92606SRichard Henderson             FullLoadHelper *full_load)
1907eed56642SAlex Bennée {
1908eed56642SAlex Bennée     const size_t tlb_off = code_read ?
1909eed56642SAlex Bennée         offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1910f1be3696SRichard Henderson     const MMUAccessType access_type =
1911f1be3696SRichard Henderson         code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1912b826044fSRichard Henderson     const unsigned a_bits = get_alignment_bits(get_memop(oi));
1913b826044fSRichard Henderson     const size_t size = memop_size(op);
1914b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
1915b826044fSRichard Henderson     uintptr_t index;
1916b826044fSRichard Henderson     CPUTLBEntry *entry;
1917b826044fSRichard Henderson     target_ulong tlb_addr;
1918eed56642SAlex Bennée     void *haddr;
1919eed56642SAlex Bennée     uint64_t res;
1920b826044fSRichard Henderson 
1921b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1922d9bb58e5SYang Zhong 
1923eed56642SAlex Bennée     /* Handle CPU specific unaligned behaviour */
1924eed56642SAlex Bennée     if (addr & ((1 << a_bits) - 1)) {
192529a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, access_type,
1926eed56642SAlex Bennée                              mmu_idx, retaddr);
1927eed56642SAlex Bennée     }
1928eed56642SAlex Bennée 
1929b826044fSRichard Henderson     index = tlb_index(env, mmu_idx, addr);
1930b826044fSRichard Henderson     entry = tlb_entry(env, mmu_idx, addr);
1931b826044fSRichard Henderson     tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1932b826044fSRichard Henderson 
1933eed56642SAlex Bennée     /* If the TLB entry is for a different page, reload and try again.  */
1934eed56642SAlex Bennée     if (!tlb_hit(tlb_addr, addr)) {
1935eed56642SAlex Bennée         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1936eed56642SAlex Bennée                             addr & TARGET_PAGE_MASK)) {
193729a0af61SRichard Henderson             tlb_fill(env_cpu(env), addr, size,
1938f1be3696SRichard Henderson                      access_type, mmu_idx, retaddr);
1939eed56642SAlex Bennée             index = tlb_index(env, mmu_idx, addr);
1940eed56642SAlex Bennée             entry = tlb_entry(env, mmu_idx, addr);
1941eed56642SAlex Bennée         }
1942eed56642SAlex Bennée         tlb_addr = code_read ? entry->addr_code : entry->addr_read;
194330d7e098SRichard Henderson         tlb_addr &= ~TLB_INVALID_MASK;
1944eed56642SAlex Bennée     }
1945eed56642SAlex Bennée 
194650b107c5SRichard Henderson     /* Handle anything that isn't just a straight memory access.  */
1947eed56642SAlex Bennée     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
194825d3ec58SRichard Henderson         CPUTLBEntryFull *full;
19495b87b3e6SRichard Henderson         bool need_swap;
195050b107c5SRichard Henderson 
195150b107c5SRichard Henderson         /* For anything that is unaligned, recurse through full_load.  */
1952eed56642SAlex Bennée         if ((addr & (size - 1)) != 0) {
1953eed56642SAlex Bennée             goto do_unaligned_access;
1954eed56642SAlex Bennée         }
195550b107c5SRichard Henderson 
195625d3ec58SRichard Henderson         full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
195750b107c5SRichard Henderson 
195850b107c5SRichard Henderson         /* Handle watchpoints.  */
195950b107c5SRichard Henderson         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
196050b107c5SRichard Henderson             /* On watchpoint hit, this will longjmp out.  */
196150b107c5SRichard Henderson             cpu_check_watchpoint(env_cpu(env), addr, size,
196225d3ec58SRichard Henderson                                  full->attrs, BP_MEM_READ, retaddr);
19635b87b3e6SRichard Henderson         }
196450b107c5SRichard Henderson 
19655b87b3e6SRichard Henderson         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
196650b107c5SRichard Henderson 
196750b107c5SRichard Henderson         /* Handle I/O access.  */
19685b87b3e6SRichard Henderson         if (likely(tlb_addr & TLB_MMIO)) {
196925d3ec58SRichard Henderson             return io_readx(env, full, mmu_idx, addr, retaddr,
19705b87b3e6SRichard Henderson                             access_type, op ^ (need_swap * MO_BSWAP));
19715b87b3e6SRichard Henderson         }
19725b87b3e6SRichard Henderson 
19735b87b3e6SRichard Henderson         haddr = (void *)((uintptr_t)addr + entry->addend);
19745b87b3e6SRichard Henderson 
19755b87b3e6SRichard Henderson         /*
19765b87b3e6SRichard Henderson          * Keep these two load_memop separate to ensure that the compiler
19775b87b3e6SRichard Henderson          * is able to fold the entire function to a single instruction.
19785b87b3e6SRichard Henderson          * There is a build-time assert inside to remind you of this.  ;-)
19795b87b3e6SRichard Henderson          */
19805b87b3e6SRichard Henderson         if (unlikely(need_swap)) {
19815b87b3e6SRichard Henderson             return load_memop(haddr, op ^ MO_BSWAP);
19825b87b3e6SRichard Henderson         }
19835b87b3e6SRichard Henderson         return load_memop(haddr, op);
1984eed56642SAlex Bennée     }
1985eed56642SAlex Bennée 
1986eed56642SAlex Bennée     /* Handle slow unaligned access (it spans two pages or IO).  */
1987eed56642SAlex Bennée     if (size > 1
1988eed56642SAlex Bennée         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1989eed56642SAlex Bennée                     >= TARGET_PAGE_SIZE)) {
1990eed56642SAlex Bennée         target_ulong addr1, addr2;
19918c79b288SAlex Bennée         uint64_t r1, r2;
1992eed56642SAlex Bennée         unsigned shift;
1993eed56642SAlex Bennée     do_unaligned_access:
1994ab7a2009SAlex Bennée         addr1 = addr & ~((target_ulong)size - 1);
1995eed56642SAlex Bennée         addr2 = addr1 + size;
19962dd92606SRichard Henderson         r1 = full_load(env, addr1, oi, retaddr);
19972dd92606SRichard Henderson         r2 = full_load(env, addr2, oi, retaddr);
1998eed56642SAlex Bennée         shift = (addr & (size - 1)) * 8;
1999eed56642SAlex Bennée 
2000be5c4787STony Nguyen         if (memop_big_endian(op)) {
2001eed56642SAlex Bennée             /* Big-endian combine.  */
2002eed56642SAlex Bennée             res = (r1 << shift) | (r2 >> ((size * 8) - shift));
2003eed56642SAlex Bennée         } else {
2004eed56642SAlex Bennée             /* Little-endian combine.  */
2005eed56642SAlex Bennée             res = (r1 >> shift) | (r2 << ((size * 8) - shift));
2006eed56642SAlex Bennée         }
2007eed56642SAlex Bennée         return res & MAKE_64BIT_MASK(0, size * 8);
2008eed56642SAlex Bennée     }
2009eed56642SAlex Bennée 
2010eed56642SAlex Bennée     haddr = (void *)((uintptr_t)addr + entry->addend);
201180d9d1c6SRichard Henderson     return load_memop(haddr, op);
2012eed56642SAlex Bennée }
2013eed56642SAlex Bennée 
2014eed56642SAlex Bennée /*
2015eed56642SAlex Bennée  * For the benefit of TCG generated code, we want to avoid the
2016eed56642SAlex Bennée  * complication of ABI-specific return type promotion and always
2017eed56642SAlex Bennée  * return a value extended to the register size of the host. This is
2018eed56642SAlex Bennée  * tcg_target_long, except in the case of a 32-bit host and 64-bit
2019eed56642SAlex Bennée  * data, and for that we always have uint64_t.
2020eed56642SAlex Bennée  *
2021eed56642SAlex Bennée  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2022eed56642SAlex Bennée  */
2023eed56642SAlex Bennée 
20242dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
20259002ffcbSRichard Henderson                               MemOpIdx oi, uintptr_t retaddr)
20262dd92606SRichard Henderson {
2027f83bcecbSRichard Henderson     validate_memop(oi, MO_UB);
2028be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
20292dd92606SRichard Henderson }
20302dd92606SRichard Henderson 
2031fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
20329002ffcbSRichard Henderson                                      MemOpIdx oi, uintptr_t retaddr)
2033eed56642SAlex Bennée {
20342dd92606SRichard Henderson     return full_ldub_mmu(env, addr, oi, retaddr);
20352dd92606SRichard Henderson }
20362dd92606SRichard Henderson 
20372dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
20389002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20392dd92606SRichard Henderson {
2040f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUW);
2041be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
20422dd92606SRichard Henderson                        full_le_lduw_mmu);
2043eed56642SAlex Bennée }
2044eed56642SAlex Bennée 
2045fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
20469002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2047eed56642SAlex Bennée {
20482dd92606SRichard Henderson     return full_le_lduw_mmu(env, addr, oi, retaddr);
20492dd92606SRichard Henderson }
20502dd92606SRichard Henderson 
20512dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
20529002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20532dd92606SRichard Henderson {
2054f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUW);
2055be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
20562dd92606SRichard Henderson                        full_be_lduw_mmu);
2057eed56642SAlex Bennée }
2058eed56642SAlex Bennée 
2059fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
20609002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2061eed56642SAlex Bennée {
20622dd92606SRichard Henderson     return full_be_lduw_mmu(env, addr, oi, retaddr);
20632dd92606SRichard Henderson }
20642dd92606SRichard Henderson 
20652dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
20669002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20672dd92606SRichard Henderson {
2068f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUL);
2069be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
20702dd92606SRichard Henderson                        full_le_ldul_mmu);
2071eed56642SAlex Bennée }
2072eed56642SAlex Bennée 
2073fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
20749002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2075eed56642SAlex Bennée {
20762dd92606SRichard Henderson     return full_le_ldul_mmu(env, addr, oi, retaddr);
20772dd92606SRichard Henderson }
20782dd92606SRichard Henderson 
20792dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
20809002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20812dd92606SRichard Henderson {
2082f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUL);
2083be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
20842dd92606SRichard Henderson                        full_be_ldul_mmu);
2085eed56642SAlex Bennée }
2086eed56642SAlex Bennée 
2087fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
20889002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2089eed56642SAlex Bennée {
20902dd92606SRichard Henderson     return full_be_ldul_mmu(env, addr, oi, retaddr);
2091eed56642SAlex Bennée }
2092eed56642SAlex Bennée 
2093fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
20949002ffcbSRichard Henderson                            MemOpIdx oi, uintptr_t retaddr)
2095eed56642SAlex Bennée {
2096fc313c64SFrédéric Pétrot     validate_memop(oi, MO_LEUQ);
2097fc313c64SFrédéric Pétrot     return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
20982dd92606SRichard Henderson                        helper_le_ldq_mmu);
2099eed56642SAlex Bennée }
2100eed56642SAlex Bennée 
2101fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
21029002ffcbSRichard Henderson                            MemOpIdx oi, uintptr_t retaddr)
2103eed56642SAlex Bennée {
2104fc313c64SFrédéric Pétrot     validate_memop(oi, MO_BEUQ);
2105fc313c64SFrédéric Pétrot     return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
21062dd92606SRichard Henderson                        helper_be_ldq_mmu);
2107eed56642SAlex Bennée }
2108eed56642SAlex Bennée 
2109eed56642SAlex Bennée /*
2110eed56642SAlex Bennée  * Provide signed versions of the load routines as well.  We can of course
2111eed56642SAlex Bennée  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2112eed56642SAlex Bennée  */
2113eed56642SAlex Bennée 
2114eed56642SAlex Bennée 
2115eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
21169002ffcbSRichard Henderson                                      MemOpIdx oi, uintptr_t retaddr)
2117eed56642SAlex Bennée {
2118eed56642SAlex Bennée     return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2119eed56642SAlex Bennée }
2120eed56642SAlex Bennée 
2121eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
21229002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2123eed56642SAlex Bennée {
2124eed56642SAlex Bennée     return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2125eed56642SAlex Bennée }
2126eed56642SAlex Bennée 
2127eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
21289002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2129eed56642SAlex Bennée {
2130eed56642SAlex Bennée     return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2131eed56642SAlex Bennée }
2132eed56642SAlex Bennée 
2133eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
21349002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2135eed56642SAlex Bennée {
2136eed56642SAlex Bennée     return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2137eed56642SAlex Bennée }
2138eed56642SAlex Bennée 
2139eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
21409002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2141eed56642SAlex Bennée {
2142eed56642SAlex Bennée     return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2143eed56642SAlex Bennée }
2144eed56642SAlex Bennée 
2145eed56642SAlex Bennée /*
2146d03f1408SRichard Henderson  * Load helpers for cpu_ldst.h.
2147d03f1408SRichard Henderson  */
2148d03f1408SRichard Henderson 
2149d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
2150f83bcecbSRichard Henderson                                        MemOpIdx oi, uintptr_t retaddr,
2151f83bcecbSRichard Henderson                                        FullLoadHelper *full_load)
2152d03f1408SRichard Henderson {
2153d03f1408SRichard Henderson     uint64_t ret;
2154d03f1408SRichard Henderson 
2155d03f1408SRichard Henderson     ret = full_load(env, addr, oi, retaddr);
215637aff087SRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2157d03f1408SRichard Henderson     return ret;
2158d03f1408SRichard Henderson }
2159d03f1408SRichard Henderson 
2160f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2161d03f1408SRichard Henderson {
2162f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
2163d03f1408SRichard Henderson }
2164d03f1408SRichard Henderson 
2165f83bcecbSRichard Henderson uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
2166f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2167d03f1408SRichard Henderson {
2168f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
2169d03f1408SRichard Henderson }
2170d03f1408SRichard Henderson 
2171f83bcecbSRichard Henderson uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
2172f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2173d03f1408SRichard Henderson {
2174f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
2175d03f1408SRichard Henderson }
2176d03f1408SRichard Henderson 
2177f83bcecbSRichard Henderson uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
2178f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2179d03f1408SRichard Henderson {
218046697cb9SRichard Henderson     return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu);
2181d03f1408SRichard Henderson }
2182d03f1408SRichard Henderson 
2183f83bcecbSRichard Henderson uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
2184f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2185d03f1408SRichard Henderson {
2186f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
2187d03f1408SRichard Henderson }
2188d03f1408SRichard Henderson 
2189f83bcecbSRichard Henderson uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
2190f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2191d03f1408SRichard Henderson {
2192f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
2193b9e60257SRichard Henderson }
2194b9e60257SRichard Henderson 
2195f83bcecbSRichard Henderson uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
2196f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2197b9e60257SRichard Henderson {
2198f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
2199cfe04a4bSRichard Henderson }
2200cfe04a4bSRichard Henderson 
2201d03f1408SRichard Henderson /*
2202eed56642SAlex Bennée  * Store Helpers
2203eed56642SAlex Bennée  */
2204eed56642SAlex Bennée 
2205c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE
220680d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op)
220780d9d1c6SRichard Henderson {
220880d9d1c6SRichard Henderson     switch (op) {
220980d9d1c6SRichard Henderson     case MO_UB:
221080d9d1c6SRichard Henderson         stb_p(haddr, val);
221180d9d1c6SRichard Henderson         break;
221280d9d1c6SRichard Henderson     case MO_BEUW:
221380d9d1c6SRichard Henderson         stw_be_p(haddr, val);
221480d9d1c6SRichard Henderson         break;
221580d9d1c6SRichard Henderson     case MO_LEUW:
221680d9d1c6SRichard Henderson         stw_le_p(haddr, val);
221780d9d1c6SRichard Henderson         break;
221880d9d1c6SRichard Henderson     case MO_BEUL:
221980d9d1c6SRichard Henderson         stl_be_p(haddr, val);
222080d9d1c6SRichard Henderson         break;
222180d9d1c6SRichard Henderson     case MO_LEUL:
222280d9d1c6SRichard Henderson         stl_le_p(haddr, val);
222380d9d1c6SRichard Henderson         break;
2224fc313c64SFrédéric Pétrot     case MO_BEUQ:
222580d9d1c6SRichard Henderson         stq_be_p(haddr, val);
222680d9d1c6SRichard Henderson         break;
2227fc313c64SFrédéric Pétrot     case MO_LEUQ:
222880d9d1c6SRichard Henderson         stq_le_p(haddr, val);
222980d9d1c6SRichard Henderson         break;
223080d9d1c6SRichard Henderson     default:
223180d9d1c6SRichard Henderson         qemu_build_not_reached();
223280d9d1c6SRichard Henderson     }
223380d9d1c6SRichard Henderson }
223480d9d1c6SRichard Henderson 
2235f83bcecbSRichard Henderson static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2236f83bcecbSRichard Henderson                          MemOpIdx oi, uintptr_t retaddr);
2237f83bcecbSRichard Henderson 
22386b8b622eSRichard Henderson static void __attribute__((noinline))
22396b8b622eSRichard Henderson store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
22406b8b622eSRichard Henderson                        uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
22416b8b622eSRichard Henderson                        bool big_endian)
22426b8b622eSRichard Henderson {
22436b8b622eSRichard Henderson     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
22446b8b622eSRichard Henderson     uintptr_t index, index2;
22456b8b622eSRichard Henderson     CPUTLBEntry *entry, *entry2;
2246b0f650f0SIlya Leoshkevich     target_ulong page1, page2, tlb_addr, tlb_addr2;
22479002ffcbSRichard Henderson     MemOpIdx oi;
22486b8b622eSRichard Henderson     size_t size2;
22496b8b622eSRichard Henderson     int i;
22506b8b622eSRichard Henderson 
22516b8b622eSRichard Henderson     /*
22526b8b622eSRichard Henderson      * Ensure the second page is in the TLB.  Note that the first page
22536b8b622eSRichard Henderson      * is already guaranteed to be filled, and that the second page
2254b0f650f0SIlya Leoshkevich      * cannot evict the first.  An exception to this rule is PAGE_WRITE_INV
2255b0f650f0SIlya Leoshkevich      * handling: the first page could have evicted itself.
22566b8b622eSRichard Henderson      */
2257b0f650f0SIlya Leoshkevich     page1 = addr & TARGET_PAGE_MASK;
22586b8b622eSRichard Henderson     page2 = (addr + size) & TARGET_PAGE_MASK;
22596b8b622eSRichard Henderson     size2 = (addr + size) & ~TARGET_PAGE_MASK;
22606b8b622eSRichard Henderson     index2 = tlb_index(env, mmu_idx, page2);
22616b8b622eSRichard Henderson     entry2 = tlb_entry(env, mmu_idx, page2);
22626b8b622eSRichard Henderson 
22636b8b622eSRichard Henderson     tlb_addr2 = tlb_addr_write(entry2);
2264b0f650f0SIlya Leoshkevich     if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) {
22656b8b622eSRichard Henderson         if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
22666b8b622eSRichard Henderson             tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
22676b8b622eSRichard Henderson                      mmu_idx, retaddr);
22686b8b622eSRichard Henderson             index2 = tlb_index(env, mmu_idx, page2);
22696b8b622eSRichard Henderson             entry2 = tlb_entry(env, mmu_idx, page2);
22706b8b622eSRichard Henderson         }
22716b8b622eSRichard Henderson         tlb_addr2 = tlb_addr_write(entry2);
22726b8b622eSRichard Henderson     }
22736b8b622eSRichard Henderson 
22746b8b622eSRichard Henderson     index = tlb_index(env, mmu_idx, addr);
22756b8b622eSRichard Henderson     entry = tlb_entry(env, mmu_idx, addr);
22766b8b622eSRichard Henderson     tlb_addr = tlb_addr_write(entry);
22776b8b622eSRichard Henderson 
22786b8b622eSRichard Henderson     /*
22796b8b622eSRichard Henderson      * Handle watchpoints.  Since this may trap, all checks
22806b8b622eSRichard Henderson      * must happen before any store.
22816b8b622eSRichard Henderson      */
22826b8b622eSRichard Henderson     if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
22836b8b622eSRichard Henderson         cpu_check_watchpoint(env_cpu(env), addr, size - size2,
228425d3ec58SRichard Henderson                              env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
22856b8b622eSRichard Henderson                              BP_MEM_WRITE, retaddr);
22866b8b622eSRichard Henderson     }
22876b8b622eSRichard Henderson     if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
22886b8b622eSRichard Henderson         cpu_check_watchpoint(env_cpu(env), page2, size2,
228925d3ec58SRichard Henderson                              env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
22906b8b622eSRichard Henderson                              BP_MEM_WRITE, retaddr);
22916b8b622eSRichard Henderson     }
22926b8b622eSRichard Henderson 
22936b8b622eSRichard Henderson     /*
22946b8b622eSRichard Henderson      * XXX: not efficient, but simple.
22956b8b622eSRichard Henderson      * This loop must go in the forward direction to avoid issues
22966b8b622eSRichard Henderson      * with self-modifying code in Windows 64-bit.
22976b8b622eSRichard Henderson      */
22986b8b622eSRichard Henderson     oi = make_memop_idx(MO_UB, mmu_idx);
22996b8b622eSRichard Henderson     if (big_endian) {
23006b8b622eSRichard Henderson         for (i = 0; i < size; ++i) {
23016b8b622eSRichard Henderson             /* Big-endian extract.  */
23026b8b622eSRichard Henderson             uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
2303f83bcecbSRichard Henderson             full_stb_mmu(env, addr + i, val8, oi, retaddr);
23046b8b622eSRichard Henderson         }
23056b8b622eSRichard Henderson     } else {
23066b8b622eSRichard Henderson         for (i = 0; i < size; ++i) {
23076b8b622eSRichard Henderson             /* Little-endian extract.  */
23086b8b622eSRichard Henderson             uint8_t val8 = val >> (i * 8);
2309f83bcecbSRichard Henderson             full_stb_mmu(env, addr + i, val8, oi, retaddr);
23106b8b622eSRichard Henderson         }
23116b8b622eSRichard Henderson     }
23126b8b622eSRichard Henderson }
23136b8b622eSRichard Henderson 
231480d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE
23154601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
23169002ffcbSRichard Henderson              MemOpIdx oi, uintptr_t retaddr, MemOp op)
2317eed56642SAlex Bennée {
2318eed56642SAlex Bennée     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2319b826044fSRichard Henderson     const unsigned a_bits = get_alignment_bits(get_memop(oi));
2320b826044fSRichard Henderson     const size_t size = memop_size(op);
2321b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
2322b826044fSRichard Henderson     uintptr_t index;
2323b826044fSRichard Henderson     CPUTLBEntry *entry;
2324b826044fSRichard Henderson     target_ulong tlb_addr;
2325eed56642SAlex Bennée     void *haddr;
2326b826044fSRichard Henderson 
2327b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
2328eed56642SAlex Bennée 
2329eed56642SAlex Bennée     /* Handle CPU specific unaligned behaviour */
2330eed56642SAlex Bennée     if (addr & ((1 << a_bits) - 1)) {
233129a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
2332eed56642SAlex Bennée                              mmu_idx, retaddr);
2333eed56642SAlex Bennée     }
2334eed56642SAlex Bennée 
2335b826044fSRichard Henderson     index = tlb_index(env, mmu_idx, addr);
2336b826044fSRichard Henderson     entry = tlb_entry(env, mmu_idx, addr);
2337b826044fSRichard Henderson     tlb_addr = tlb_addr_write(entry);
2338b826044fSRichard Henderson 
2339eed56642SAlex Bennée     /* If the TLB entry is for a different page, reload and try again.  */
2340eed56642SAlex Bennée     if (!tlb_hit(tlb_addr, addr)) {
2341eed56642SAlex Bennée         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2342eed56642SAlex Bennée             addr & TARGET_PAGE_MASK)) {
234329a0af61SRichard Henderson             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
2344eed56642SAlex Bennée                      mmu_idx, retaddr);
2345eed56642SAlex Bennée             index = tlb_index(env, mmu_idx, addr);
2346eed56642SAlex Bennée             entry = tlb_entry(env, mmu_idx, addr);
2347eed56642SAlex Bennée         }
2348eed56642SAlex Bennée         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2349eed56642SAlex Bennée     }
2350eed56642SAlex Bennée 
235150b107c5SRichard Henderson     /* Handle anything that isn't just a straight memory access.  */
2352eed56642SAlex Bennée     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
235325d3ec58SRichard Henderson         CPUTLBEntryFull *full;
23545b87b3e6SRichard Henderson         bool need_swap;
235550b107c5SRichard Henderson 
235650b107c5SRichard Henderson         /* For anything that is unaligned, recurse through byte stores.  */
2357eed56642SAlex Bennée         if ((addr & (size - 1)) != 0) {
2358eed56642SAlex Bennée             goto do_unaligned_access;
2359eed56642SAlex Bennée         }
236050b107c5SRichard Henderson 
236125d3ec58SRichard Henderson         full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
236250b107c5SRichard Henderson 
236350b107c5SRichard Henderson         /* Handle watchpoints.  */
236450b107c5SRichard Henderson         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
236550b107c5SRichard Henderson             /* On watchpoint hit, this will longjmp out.  */
236650b107c5SRichard Henderson             cpu_check_watchpoint(env_cpu(env), addr, size,
236725d3ec58SRichard Henderson                                  full->attrs, BP_MEM_WRITE, retaddr);
23685b87b3e6SRichard Henderson         }
236950b107c5SRichard Henderson 
23705b87b3e6SRichard Henderson         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
237150b107c5SRichard Henderson 
237250b107c5SRichard Henderson         /* Handle I/O access.  */
237308565552SRichard Henderson         if (tlb_addr & TLB_MMIO) {
237425d3ec58SRichard Henderson             io_writex(env, full, mmu_idx, val, addr, retaddr,
23755b87b3e6SRichard Henderson                       op ^ (need_swap * MO_BSWAP));
23765b87b3e6SRichard Henderson             return;
23775b87b3e6SRichard Henderson         }
23785b87b3e6SRichard Henderson 
23797b0d792cSRichard Henderson         /* Ignore writes to ROM.  */
23807b0d792cSRichard Henderson         if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
23817b0d792cSRichard Henderson             return;
23827b0d792cSRichard Henderson         }
23837b0d792cSRichard Henderson 
238408565552SRichard Henderson         /* Handle clean RAM pages.  */
238508565552SRichard Henderson         if (tlb_addr & TLB_NOTDIRTY) {
238625d3ec58SRichard Henderson             notdirty_write(env_cpu(env), addr, size, full, retaddr);
238708565552SRichard Henderson         }
238808565552SRichard Henderson 
2389707526adSRichard Henderson         haddr = (void *)((uintptr_t)addr + entry->addend);
239008565552SRichard Henderson 
23915b87b3e6SRichard Henderson         /*
23925b87b3e6SRichard Henderson          * Keep these two store_memop separate to ensure that the compiler
23935b87b3e6SRichard Henderson          * is able to fold the entire function to a single instruction.
23945b87b3e6SRichard Henderson          * There is a build-time assert inside to remind you of this.  ;-)
23955b87b3e6SRichard Henderson          */
23965b87b3e6SRichard Henderson         if (unlikely(need_swap)) {
23975b87b3e6SRichard Henderson             store_memop(haddr, val, op ^ MO_BSWAP);
23985b87b3e6SRichard Henderson         } else {
23995b87b3e6SRichard Henderson             store_memop(haddr, val, op);
24005b87b3e6SRichard Henderson         }
2401eed56642SAlex Bennée         return;
2402eed56642SAlex Bennée     }
2403eed56642SAlex Bennée 
2404eed56642SAlex Bennée     /* Handle slow unaligned access (it spans two pages or IO).  */
2405eed56642SAlex Bennée     if (size > 1
2406eed56642SAlex Bennée         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2407eed56642SAlex Bennée                      >= TARGET_PAGE_SIZE)) {
2408eed56642SAlex Bennée     do_unaligned_access:
24096b8b622eSRichard Henderson         store_helper_unaligned(env, addr, val, retaddr, size,
24106b8b622eSRichard Henderson                                mmu_idx, memop_big_endian(op));
2411eed56642SAlex Bennée         return;
2412eed56642SAlex Bennée     }
2413eed56642SAlex Bennée 
2414eed56642SAlex Bennée     haddr = (void *)((uintptr_t)addr + entry->addend);
241580d9d1c6SRichard Henderson     store_memop(haddr, val, op);
2416eed56642SAlex Bennée }
2417eed56642SAlex Bennée 
2418f83bcecbSRichard Henderson static void __attribute__((noinline))
2419f83bcecbSRichard Henderson full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
24209002ffcbSRichard Henderson              MemOpIdx oi, uintptr_t retaddr)
2421eed56642SAlex Bennée {
2422f83bcecbSRichard Henderson     validate_memop(oi, MO_UB);
2423be5c4787STony Nguyen     store_helper(env, addr, val, oi, retaddr, MO_UB);
2424eed56642SAlex Bennée }
2425eed56642SAlex Bennée 
2426f83bcecbSRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2427f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t retaddr)
2428f83bcecbSRichard Henderson {
2429f83bcecbSRichard Henderson     full_stb_mmu(env, addr, val, oi, retaddr);
2430f83bcecbSRichard Henderson }
2431f83bcecbSRichard Henderson 
2432f83bcecbSRichard Henderson static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2433f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2434f83bcecbSRichard Henderson {
2435f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUW);
2436f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2437f83bcecbSRichard Henderson }
2438f83bcecbSRichard Henderson 
2439fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
24409002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2441eed56642SAlex Bennée {
2442f83bcecbSRichard Henderson     full_le_stw_mmu(env, addr, val, oi, retaddr);
2443f83bcecbSRichard Henderson }
2444f83bcecbSRichard Henderson 
2445f83bcecbSRichard Henderson static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2446f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2447f83bcecbSRichard Henderson {
2448f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUW);
2449f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2450eed56642SAlex Bennée }
2451eed56642SAlex Bennée 
2452fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
24539002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2454eed56642SAlex Bennée {
2455f83bcecbSRichard Henderson     full_be_stw_mmu(env, addr, val, oi, retaddr);
2456f83bcecbSRichard Henderson }
2457f83bcecbSRichard Henderson 
2458f83bcecbSRichard Henderson static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2459f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2460f83bcecbSRichard Henderson {
2461f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUL);
2462f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2463eed56642SAlex Bennée }
2464eed56642SAlex Bennée 
2465fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
24669002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2467eed56642SAlex Bennée {
2468f83bcecbSRichard Henderson     full_le_stl_mmu(env, addr, val, oi, retaddr);
2469f83bcecbSRichard Henderson }
2470f83bcecbSRichard Henderson 
2471f83bcecbSRichard Henderson static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2472f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2473f83bcecbSRichard Henderson {
2474f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUL);
2475f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2476eed56642SAlex Bennée }
2477eed56642SAlex Bennée 
2478fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
24799002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2480eed56642SAlex Bennée {
2481f83bcecbSRichard Henderson     full_be_stl_mmu(env, addr, val, oi, retaddr);
2482eed56642SAlex Bennée }
2483eed56642SAlex Bennée 
2484fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
24859002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2486eed56642SAlex Bennée {
2487fc313c64SFrédéric Pétrot     validate_memop(oi, MO_LEUQ);
2488fc313c64SFrédéric Pétrot     store_helper(env, addr, val, oi, retaddr, MO_LEUQ);
2489eed56642SAlex Bennée }
2490eed56642SAlex Bennée 
2491fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
24929002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2493eed56642SAlex Bennée {
2494fc313c64SFrédéric Pétrot     validate_memop(oi, MO_BEUQ);
2495fc313c64SFrédéric Pétrot     store_helper(env, addr, val, oi, retaddr, MO_BEUQ);
2496eed56642SAlex Bennée }
2497d9bb58e5SYang Zhong 
2498d03f1408SRichard Henderson /*
2499d03f1408SRichard Henderson  * Store Helpers for cpu_ldst.h
2500d03f1408SRichard Henderson  */
2501d03f1408SRichard Henderson 
2502f83bcecbSRichard Henderson typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
2503f83bcecbSRichard Henderson                              uint64_t val, MemOpIdx oi, uintptr_t retaddr);
2504f83bcecbSRichard Henderson 
2505f83bcecbSRichard Henderson static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
2506f83bcecbSRichard Henderson                                     uint64_t val, MemOpIdx oi, uintptr_t ra,
2507f83bcecbSRichard Henderson                                     FullStoreHelper *full_store)
2508d03f1408SRichard Henderson {
2509f83bcecbSRichard Henderson     full_store(env, addr, val, oi, ra);
251037aff087SRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
2511d03f1408SRichard Henderson }
2512d03f1408SRichard Henderson 
2513f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2514f83bcecbSRichard Henderson                  MemOpIdx oi, uintptr_t retaddr)
2515d03f1408SRichard Henderson {
2516f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
2517d03f1408SRichard Henderson }
2518d03f1408SRichard Henderson 
2519f83bcecbSRichard Henderson void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2520f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2521d03f1408SRichard Henderson {
2522f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
2523d03f1408SRichard Henderson }
2524d03f1408SRichard Henderson 
2525f83bcecbSRichard Henderson void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2526f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2527d03f1408SRichard Henderson {
2528f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
2529d03f1408SRichard Henderson }
2530d03f1408SRichard Henderson 
2531f83bcecbSRichard Henderson void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2532f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2533d03f1408SRichard Henderson {
2534f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
2535b9e60257SRichard Henderson }
2536b9e60257SRichard Henderson 
2537f83bcecbSRichard Henderson void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2538f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2539b9e60257SRichard Henderson {
2540f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
2541b9e60257SRichard Henderson }
2542b9e60257SRichard Henderson 
2543f83bcecbSRichard Henderson void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2544f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2545b9e60257SRichard Henderson {
2546f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
2547b9e60257SRichard Henderson }
2548b9e60257SRichard Henderson 
2549f83bcecbSRichard Henderson void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2550f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2551b9e60257SRichard Henderson {
2552f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
2553d03f1408SRichard Henderson }
2554d03f1408SRichard Henderson 
2555f83bcecbSRichard Henderson #include "ldst_common.c.inc"
2556cfe04a4bSRichard Henderson 
2557be9568b4SRichard Henderson /*
2558be9568b4SRichard Henderson  * First set of functions passes in OI and RETADDR.
2559be9568b4SRichard Henderson  * This makes them callable from other helpers.
2560be9568b4SRichard Henderson  */
2561d9bb58e5SYang Zhong 
2562d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
2563be9568b4SRichard Henderson     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2564a754f7f3SRichard Henderson 
2565707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP
2566d9bb58e5SYang Zhong 
2567139c1837SPaolo Bonzini #include "atomic_common.c.inc"
2568d9bb58e5SYang Zhong 
2569d9bb58e5SYang Zhong #define DATA_SIZE 1
2570d9bb58e5SYang Zhong #include "atomic_template.h"
2571d9bb58e5SYang Zhong 
2572d9bb58e5SYang Zhong #define DATA_SIZE 2
2573d9bb58e5SYang Zhong #include "atomic_template.h"
2574d9bb58e5SYang Zhong 
2575d9bb58e5SYang Zhong #define DATA_SIZE 4
2576d9bb58e5SYang Zhong #include "atomic_template.h"
2577d9bb58e5SYang Zhong 
2578d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
2579d9bb58e5SYang Zhong #define DATA_SIZE 8
2580d9bb58e5SYang Zhong #include "atomic_template.h"
2581d9bb58e5SYang Zhong #endif
2582d9bb58e5SYang Zhong 
2583e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2584d9bb58e5SYang Zhong #define DATA_SIZE 16
2585d9bb58e5SYang Zhong #include "atomic_template.h"
2586d9bb58e5SYang Zhong #endif
2587d9bb58e5SYang Zhong 
2588d9bb58e5SYang Zhong /* Code access functions.  */
2589d9bb58e5SYang Zhong 
2590fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
25919002ffcbSRichard Henderson                                MemOpIdx oi, uintptr_t retaddr)
25922dd92606SRichard Henderson {
2593fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
25942dd92606SRichard Henderson }
25952dd92606SRichard Henderson 
2596fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2597eed56642SAlex Bennée {
25989002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2599fc4120a3SRichard Henderson     return full_ldub_code(env, addr, oi, 0);
26002dd92606SRichard Henderson }
26012dd92606SRichard Henderson 
2602fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
26039002ffcbSRichard Henderson                                MemOpIdx oi, uintptr_t retaddr)
26044cef72d0SAlex Bennée {
2605fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
26064cef72d0SAlex Bennée }
26074cef72d0SAlex Bennée 
2608fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
26092dd92606SRichard Henderson {
26109002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2611fc4120a3SRichard Henderson     return full_lduw_code(env, addr, oi, 0);
2612eed56642SAlex Bennée }
2613d9bb58e5SYang Zhong 
2614fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
26159002ffcbSRichard Henderson                               MemOpIdx oi, uintptr_t retaddr)
2616eed56642SAlex Bennée {
2617fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
26182dd92606SRichard Henderson }
26192dd92606SRichard Henderson 
2620fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
26214cef72d0SAlex Bennée {
26229002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2623fc4120a3SRichard Henderson     return full_ldl_code(env, addr, oi, 0);
26244cef72d0SAlex Bennée }
26254cef72d0SAlex Bennée 
2626fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
26279002ffcbSRichard Henderson                               MemOpIdx oi, uintptr_t retaddr)
26282dd92606SRichard Henderson {
2629fc313c64SFrédéric Pétrot     return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
2630eed56642SAlex Bennée }
2631d9bb58e5SYang Zhong 
2632fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2633eed56642SAlex Bennée {
2634fc313c64SFrédéric Pétrot     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
2635fc4120a3SRichard Henderson     return full_ldq_code(env, addr, oi, 0);
2636eed56642SAlex Bennée }
2637