xref: /openbmc/qemu/accel/tcg/cputlb.c (revision a976a99a29755e8c7a275ac269db8a0a20d79e95)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9fb0343d5SThomas Huth  * version 2.1 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
26d9bb58e5SYang Zhong #include "exec/cputlb.h"
27d9bb58e5SYang Zhong #include "exec/memory-internal.h"
28d9bb58e5SYang Zhong #include "exec/ram_addr.h"
29d9bb58e5SYang Zhong #include "tcg/tcg.h"
30d9bb58e5SYang Zhong #include "qemu/error-report.h"
31d9bb58e5SYang Zhong #include "exec/log.h"
32d9bb58e5SYang Zhong #include "exec/helper-proto.h"
33d9bb58e5SYang Zhong #include "qemu/atomic.h"
34e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
353b9bd3f4SPaolo Bonzini #include "exec/translate-all.h"
36243af022SPaolo Bonzini #include "trace/trace-root.h"
37e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h"
3865269192SPhilippe Mathieu-Daudé #include "internal.h"
39235537faSAlex Bennée #ifdef CONFIG_PLUGIN
40235537faSAlex Bennée #include "qemu/plugin-memory.h"
41235537faSAlex Bennée #endif
42d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h"
43d9bb58e5SYang Zhong 
44d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
45d9bb58e5SYang Zhong /* #define DEBUG_TLB */
46d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
47d9bb58e5SYang Zhong 
48d9bb58e5SYang Zhong #ifdef DEBUG_TLB
49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
50d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
51d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
52d9bb58e5SYang Zhong # else
53d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
54d9bb58e5SYang Zhong # endif
55d9bb58e5SYang Zhong #else
56d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
57d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
58d9bb58e5SYang Zhong #endif
59d9bb58e5SYang Zhong 
60d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
61d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
62d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
64d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
65d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66d9bb58e5SYang Zhong     } \
67d9bb58e5SYang Zhong } while (0)
68d9bb58e5SYang Zhong 
69ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do {                              \
70d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
71ea9025cbSEmilio G. Cota             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
72d9bb58e5SYang Zhong         }                                                         \
73d9bb58e5SYang Zhong     } while (0)
74d9bb58e5SYang Zhong 
75d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
76d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
77d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78d9bb58e5SYang Zhong 
79d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
80d9bb58e5SYang Zhong  */
81d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83d9bb58e5SYang Zhong 
84722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
857a1efe1bSRichard Henderson {
86722a1c1eSRichard Henderson     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
877a1efe1bSRichard Henderson }
887a1efe1bSRichard Henderson 
89722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
9086e1eff8SEmilio G. Cota {
91722a1c1eSRichard Henderson     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
9286e1eff8SEmilio G. Cota }
9386e1eff8SEmilio G. Cota 
9479e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
9586e1eff8SEmilio G. Cota                              size_t max_entries)
9686e1eff8SEmilio G. Cota {
9779e42085SRichard Henderson     desc->window_begin_ns = ns;
9879e42085SRichard Henderson     desc->window_max_entries = max_entries;
9986e1eff8SEmilio G. Cota }
10086e1eff8SEmilio G. Cota 
1010f4abea8SRichard Henderson static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
1020f4abea8SRichard Henderson {
103*a976a99aSRichard Henderson     int i, i0 = tb_jmp_cache_hash_page(page_addr);
104*a976a99aSRichard Henderson     CPUJumpCache *jc = cpu->tb_jmp_cache;
1050f4abea8SRichard Henderson 
1060f4abea8SRichard Henderson     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
107*a976a99aSRichard Henderson         qatomic_set(&jc->array[i0 + i].tb, NULL);
1080f4abea8SRichard Henderson     }
1090f4abea8SRichard Henderson }
1100f4abea8SRichard Henderson 
11186e1eff8SEmilio G. Cota /**
11286e1eff8SEmilio G. Cota  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
11371ccd47bSRichard Henderson  * @desc: The CPUTLBDesc portion of the TLB
11471ccd47bSRichard Henderson  * @fast: The CPUTLBDescFast portion of the same TLB
11586e1eff8SEmilio G. Cota  *
11686e1eff8SEmilio G. Cota  * Called with tlb_lock_held.
11786e1eff8SEmilio G. Cota  *
11886e1eff8SEmilio G. Cota  * We have two main constraints when resizing a TLB: (1) we only resize it
11986e1eff8SEmilio G. Cota  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
12086e1eff8SEmilio G. Cota  * the array or unnecessarily flushing it), which means we do not control how
12186e1eff8SEmilio G. Cota  * frequently the resizing can occur; (2) we don't have access to the guest's
12286e1eff8SEmilio G. Cota  * future scheduling decisions, and therefore have to decide the magnitude of
12386e1eff8SEmilio G. Cota  * the resize based on past observations.
12486e1eff8SEmilio G. Cota  *
12586e1eff8SEmilio G. Cota  * In general, a memory-hungry process can benefit greatly from an appropriately
12686e1eff8SEmilio G. Cota  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
12786e1eff8SEmilio G. Cota  * we just have to make the TLB as large as possible; while an oversized TLB
12886e1eff8SEmilio G. Cota  * results in minimal TLB miss rates, it also takes longer to be flushed
12986e1eff8SEmilio G. Cota  * (flushes can be _very_ frequent), and the reduced locality can also hurt
13086e1eff8SEmilio G. Cota  * performance.
13186e1eff8SEmilio G. Cota  *
13286e1eff8SEmilio G. Cota  * To achieve near-optimal performance for all kinds of workloads, we:
13386e1eff8SEmilio G. Cota  *
13486e1eff8SEmilio G. Cota  * 1. Aggressively increase the size of the TLB when the use rate of the
13586e1eff8SEmilio G. Cota  * TLB being flushed is high, since it is likely that in the near future this
13686e1eff8SEmilio G. Cota  * memory-hungry process will execute again, and its memory hungriness will
13786e1eff8SEmilio G. Cota  * probably be similar.
13886e1eff8SEmilio G. Cota  *
13986e1eff8SEmilio G. Cota  * 2. Slowly reduce the size of the TLB as the use rate declines over a
14086e1eff8SEmilio G. Cota  * reasonably large time window. The rationale is that if in such a time window
14186e1eff8SEmilio G. Cota  * we have not observed a high TLB use rate, it is likely that we won't observe
14286e1eff8SEmilio G. Cota  * it in the near future. In that case, once a time window expires we downsize
14386e1eff8SEmilio G. Cota  * the TLB to match the maximum use rate observed in the window.
14486e1eff8SEmilio G. Cota  *
14586e1eff8SEmilio G. Cota  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
14686e1eff8SEmilio G. Cota  * since in that range performance is likely near-optimal. Recall that the TLB
14786e1eff8SEmilio G. Cota  * is direct mapped, so we want the use rate to be low (or at least not too
14886e1eff8SEmilio G. Cota  * high), since otherwise we are likely to have a significant amount of
14986e1eff8SEmilio G. Cota  * conflict misses.
15086e1eff8SEmilio G. Cota  */
1513c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
1523c3959f2SRichard Henderson                                   int64_t now)
15386e1eff8SEmilio G. Cota {
15471ccd47bSRichard Henderson     size_t old_size = tlb_n_entries(fast);
15586e1eff8SEmilio G. Cota     size_t rate;
15686e1eff8SEmilio G. Cota     size_t new_size = old_size;
15786e1eff8SEmilio G. Cota     int64_t window_len_ms = 100;
15886e1eff8SEmilio G. Cota     int64_t window_len_ns = window_len_ms * 1000 * 1000;
15979e42085SRichard Henderson     bool window_expired = now > desc->window_begin_ns + window_len_ns;
16086e1eff8SEmilio G. Cota 
16179e42085SRichard Henderson     if (desc->n_used_entries > desc->window_max_entries) {
16279e42085SRichard Henderson         desc->window_max_entries = desc->n_used_entries;
16386e1eff8SEmilio G. Cota     }
16479e42085SRichard Henderson     rate = desc->window_max_entries * 100 / old_size;
16586e1eff8SEmilio G. Cota 
16686e1eff8SEmilio G. Cota     if (rate > 70) {
16786e1eff8SEmilio G. Cota         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
16886e1eff8SEmilio G. Cota     } else if (rate < 30 && window_expired) {
16979e42085SRichard Henderson         size_t ceil = pow2ceil(desc->window_max_entries);
17079e42085SRichard Henderson         size_t expected_rate = desc->window_max_entries * 100 / ceil;
17186e1eff8SEmilio G. Cota 
17286e1eff8SEmilio G. Cota         /*
17386e1eff8SEmilio G. Cota          * Avoid undersizing when the max number of entries seen is just below
17486e1eff8SEmilio G. Cota          * a pow2. For instance, if max_entries == 1025, the expected use rate
17586e1eff8SEmilio G. Cota          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
17686e1eff8SEmilio G. Cota          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
17786e1eff8SEmilio G. Cota          * later. Thus, make sure that the expected use rate remains below 70%.
17886e1eff8SEmilio G. Cota          * (and since we double the size, that means the lowest rate we'd
17986e1eff8SEmilio G. Cota          * expect to get is 35%, which is still in the 30-70% range where
18086e1eff8SEmilio G. Cota          * we consider that the size is appropriate.)
18186e1eff8SEmilio G. Cota          */
18286e1eff8SEmilio G. Cota         if (expected_rate > 70) {
18386e1eff8SEmilio G. Cota             ceil *= 2;
18486e1eff8SEmilio G. Cota         }
18586e1eff8SEmilio G. Cota         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
18686e1eff8SEmilio G. Cota     }
18786e1eff8SEmilio G. Cota 
18886e1eff8SEmilio G. Cota     if (new_size == old_size) {
18986e1eff8SEmilio G. Cota         if (window_expired) {
19079e42085SRichard Henderson             tlb_window_reset(desc, now, desc->n_used_entries);
19186e1eff8SEmilio G. Cota         }
19286e1eff8SEmilio G. Cota         return;
19386e1eff8SEmilio G. Cota     }
19486e1eff8SEmilio G. Cota 
19571ccd47bSRichard Henderson     g_free(fast->table);
19625d3ec58SRichard Henderson     g_free(desc->fulltlb);
19786e1eff8SEmilio G. Cota 
19879e42085SRichard Henderson     tlb_window_reset(desc, now, 0);
19986e1eff8SEmilio G. Cota     /* desc->n_used_entries is cleared by the caller */
20071ccd47bSRichard Henderson     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
20171ccd47bSRichard Henderson     fast->table = g_try_new(CPUTLBEntry, new_size);
20225d3ec58SRichard Henderson     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
20371ccd47bSRichard Henderson 
20486e1eff8SEmilio G. Cota     /*
20586e1eff8SEmilio G. Cota      * If the allocations fail, try smaller sizes. We just freed some
20686e1eff8SEmilio G. Cota      * memory, so going back to half of new_size has a good chance of working.
20786e1eff8SEmilio G. Cota      * Increased memory pressure elsewhere in the system might cause the
20886e1eff8SEmilio G. Cota      * allocations to fail though, so we progressively reduce the allocation
20986e1eff8SEmilio G. Cota      * size, aborting if we cannot even allocate the smallest TLB we support.
21086e1eff8SEmilio G. Cota      */
21125d3ec58SRichard Henderson     while (fast->table == NULL || desc->fulltlb == NULL) {
21286e1eff8SEmilio G. Cota         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
21386e1eff8SEmilio G. Cota             error_report("%s: %s", __func__, strerror(errno));
21486e1eff8SEmilio G. Cota             abort();
21586e1eff8SEmilio G. Cota         }
21686e1eff8SEmilio G. Cota         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
21771ccd47bSRichard Henderson         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
21886e1eff8SEmilio G. Cota 
21971ccd47bSRichard Henderson         g_free(fast->table);
22025d3ec58SRichard Henderson         g_free(desc->fulltlb);
22171ccd47bSRichard Henderson         fast->table = g_try_new(CPUTLBEntry, new_size);
22225d3ec58SRichard Henderson         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
22386e1eff8SEmilio G. Cota     }
22486e1eff8SEmilio G. Cota }
22586e1eff8SEmilio G. Cota 
226bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
22786e1eff8SEmilio G. Cota {
2285c948e31SRichard Henderson     desc->n_used_entries = 0;
2295c948e31SRichard Henderson     desc->large_page_addr = -1;
2305c948e31SRichard Henderson     desc->large_page_mask = -1;
2315c948e31SRichard Henderson     desc->vindex = 0;
2325c948e31SRichard Henderson     memset(fast->table, -1, sizeof_tlb(fast));
2335c948e31SRichard Henderson     memset(desc->vtable, -1, sizeof(desc->vtable));
23486e1eff8SEmilio G. Cota }
23586e1eff8SEmilio G. Cota 
2363c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
2373c3959f2SRichard Henderson                                         int64_t now)
238bbf021b0SRichard Henderson {
239bbf021b0SRichard Henderson     CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
240bbf021b0SRichard Henderson     CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
241bbf021b0SRichard Henderson 
2423c3959f2SRichard Henderson     tlb_mmu_resize_locked(desc, fast, now);
243bbf021b0SRichard Henderson     tlb_mmu_flush_locked(desc, fast);
244bbf021b0SRichard Henderson }
245bbf021b0SRichard Henderson 
24656e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
24756e89f76SRichard Henderson {
24856e89f76SRichard Henderson     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
24956e89f76SRichard Henderson 
25056e89f76SRichard Henderson     tlb_window_reset(desc, now, 0);
25156e89f76SRichard Henderson     desc->n_used_entries = 0;
25256e89f76SRichard Henderson     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
25356e89f76SRichard Henderson     fast->table = g_new(CPUTLBEntry, n_entries);
25425d3ec58SRichard Henderson     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
2553c16304aSRichard Henderson     tlb_mmu_flush_locked(desc, fast);
25656e89f76SRichard Henderson }
25756e89f76SRichard Henderson 
25886e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
25986e1eff8SEmilio G. Cota {
260a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].n_used_entries++;
26186e1eff8SEmilio G. Cota }
26286e1eff8SEmilio G. Cota 
26386e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
26486e1eff8SEmilio G. Cota {
265a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].n_used_entries--;
26686e1eff8SEmilio G. Cota }
26786e1eff8SEmilio G. Cota 
2685005e253SEmilio G. Cota void tlb_init(CPUState *cpu)
2695005e253SEmilio G. Cota {
27071aec354SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
27156e89f76SRichard Henderson     int64_t now = get_clock_realtime();
27256e89f76SRichard Henderson     int i;
27371aec354SEmilio G. Cota 
274a40ec84eSRichard Henderson     qemu_spin_init(&env_tlb(env)->c.lock);
2753d1523ceSRichard Henderson 
2763c16304aSRichard Henderson     /* All tlbs are initialized flushed. */
2773c16304aSRichard Henderson     env_tlb(env)->c.dirty = 0;
27886e1eff8SEmilio G. Cota 
27956e89f76SRichard Henderson     for (i = 0; i < NB_MMU_MODES; i++) {
28056e89f76SRichard Henderson         tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
28156e89f76SRichard Henderson     }
2825005e253SEmilio G. Cota }
2835005e253SEmilio G. Cota 
284816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu)
285816d9be5SEmilio G. Cota {
286816d9be5SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
287816d9be5SEmilio G. Cota     int i;
288816d9be5SEmilio G. Cota 
289816d9be5SEmilio G. Cota     qemu_spin_destroy(&env_tlb(env)->c.lock);
290816d9be5SEmilio G. Cota     for (i = 0; i < NB_MMU_MODES; i++) {
291816d9be5SEmilio G. Cota         CPUTLBDesc *desc = &env_tlb(env)->d[i];
292816d9be5SEmilio G. Cota         CPUTLBDescFast *fast = &env_tlb(env)->f[i];
293816d9be5SEmilio G. Cota 
294816d9be5SEmilio G. Cota         g_free(fast->table);
29525d3ec58SRichard Henderson         g_free(desc->fulltlb);
296816d9be5SEmilio G. Cota     }
297816d9be5SEmilio G. Cota }
298816d9be5SEmilio G. Cota 
299d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
300d9bb58e5SYang Zhong  *
301d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
302d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
303d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
304d9bb58e5SYang Zhong  * again.
305d9bb58e5SYang Zhong  */
306d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
307d9bb58e5SYang Zhong                              run_on_cpu_data d)
308d9bb58e5SYang Zhong {
309d9bb58e5SYang Zhong     CPUState *cpu;
310d9bb58e5SYang Zhong 
311d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
312d9bb58e5SYang Zhong         if (cpu != src) {
313d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
314d9bb58e5SYang Zhong         }
315d9bb58e5SYang Zhong     }
316d9bb58e5SYang Zhong }
317d9bb58e5SYang Zhong 
318e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
31983974cf4SEmilio G. Cota {
32083974cf4SEmilio G. Cota     CPUState *cpu;
321e09de0a2SRichard Henderson     size_t full = 0, part = 0, elide = 0;
32283974cf4SEmilio G. Cota 
32383974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
32483974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
32583974cf4SEmilio G. Cota 
326d73415a3SStefan Hajnoczi         full += qatomic_read(&env_tlb(env)->c.full_flush_count);
327d73415a3SStefan Hajnoczi         part += qatomic_read(&env_tlb(env)->c.part_flush_count);
328d73415a3SStefan Hajnoczi         elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
32983974cf4SEmilio G. Cota     }
330e09de0a2SRichard Henderson     *pfull = full;
331e09de0a2SRichard Henderson     *ppart = part;
332e09de0a2SRichard Henderson     *pelide = elide;
33383974cf4SEmilio G. Cota }
334d9bb58e5SYang Zhong 
335d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
336d9bb58e5SYang Zhong {
337d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
3383d1523ceSRichard Henderson     uint16_t asked = data.host_int;
3393d1523ceSRichard Henderson     uint16_t all_dirty, work, to_clean;
3403c3959f2SRichard Henderson     int64_t now = get_clock_realtime();
341d9bb58e5SYang Zhong 
342d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
343d9bb58e5SYang Zhong 
3443d1523ceSRichard Henderson     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
345d9bb58e5SYang Zhong 
346a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
34760a2ad7dSRichard Henderson 
348a40ec84eSRichard Henderson     all_dirty = env_tlb(env)->c.dirty;
3493d1523ceSRichard Henderson     to_clean = asked & all_dirty;
3503d1523ceSRichard Henderson     all_dirty &= ~to_clean;
351a40ec84eSRichard Henderson     env_tlb(env)->c.dirty = all_dirty;
3523d1523ceSRichard Henderson 
3533d1523ceSRichard Henderson     for (work = to_clean; work != 0; work &= work - 1) {
3543d1523ceSRichard Henderson         int mmu_idx = ctz32(work);
3553c3959f2SRichard Henderson         tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
356d9bb58e5SYang Zhong     }
3573d1523ceSRichard Henderson 
358a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
359d9bb58e5SYang Zhong 
360*a976a99aSRichard Henderson     tcg_flush_jmp_cache(cpu);
36164f2674bSRichard Henderson 
3623d1523ceSRichard Henderson     if (to_clean == ALL_MMUIDX_BITS) {
363d73415a3SStefan Hajnoczi         qatomic_set(&env_tlb(env)->c.full_flush_count,
364a40ec84eSRichard Henderson                    env_tlb(env)->c.full_flush_count + 1);
365e09de0a2SRichard Henderson     } else {
366d73415a3SStefan Hajnoczi         qatomic_set(&env_tlb(env)->c.part_flush_count,
367a40ec84eSRichard Henderson                    env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
3683d1523ceSRichard Henderson         if (to_clean != asked) {
369d73415a3SStefan Hajnoczi             qatomic_set(&env_tlb(env)->c.elide_flush_count,
370a40ec84eSRichard Henderson                        env_tlb(env)->c.elide_flush_count +
3713d1523ceSRichard Henderson                        ctpop16(asked & ~to_clean));
3723d1523ceSRichard Henderson         }
37364f2674bSRichard Henderson     }
374d9bb58e5SYang Zhong }
375d9bb58e5SYang Zhong 
376d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
377d9bb58e5SYang Zhong {
378d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
379d9bb58e5SYang Zhong 
38064f2674bSRichard Henderson     if (cpu->created && !qemu_cpu_is_self(cpu)) {
381d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
382ab651105SRichard Henderson                          RUN_ON_CPU_HOST_INT(idxmap));
383d9bb58e5SYang Zhong     } else {
38460a2ad7dSRichard Henderson         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
385d9bb58e5SYang Zhong     }
386d9bb58e5SYang Zhong }
387d9bb58e5SYang Zhong 
38864f2674bSRichard Henderson void tlb_flush(CPUState *cpu)
38964f2674bSRichard Henderson {
39064f2674bSRichard Henderson     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
39164f2674bSRichard Henderson }
39264f2674bSRichard Henderson 
393d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
394d9bb58e5SYang Zhong {
395d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
396d9bb58e5SYang Zhong 
397d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
398d9bb58e5SYang Zhong 
399d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
400d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
401d9bb58e5SYang Zhong }
402d9bb58e5SYang Zhong 
40364f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu)
40464f2674bSRichard Henderson {
40564f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
40664f2674bSRichard Henderson }
40764f2674bSRichard Henderson 
40864f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
409d9bb58e5SYang Zhong {
410d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
411d9bb58e5SYang Zhong 
412d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
413d9bb58e5SYang Zhong 
414d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
415d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
416d9bb58e5SYang Zhong }
417d9bb58e5SYang Zhong 
41864f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu)
41964f2674bSRichard Henderson {
42064f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
42164f2674bSRichard Henderson }
42264f2674bSRichard Henderson 
4233ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
4243ab6e68cSRichard Henderson                                       target_ulong page, target_ulong mask)
4253ab6e68cSRichard Henderson {
4263ab6e68cSRichard Henderson     page &= mask;
4273ab6e68cSRichard Henderson     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
4283ab6e68cSRichard Henderson 
4293ab6e68cSRichard Henderson     return (page == (tlb_entry->addr_read & mask) ||
4303ab6e68cSRichard Henderson             page == (tlb_addr_write(tlb_entry) & mask) ||
4313ab6e68cSRichard Henderson             page == (tlb_entry->addr_code & mask));
4323ab6e68cSRichard Henderson }
4333ab6e68cSRichard Henderson 
43468fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
43568fea038SRichard Henderson                                         target_ulong page)
436d9bb58e5SYang Zhong {
4373ab6e68cSRichard Henderson     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
43868fea038SRichard Henderson }
43968fea038SRichard Henderson 
4403cea94bbSEmilio G. Cota /**
4413cea94bbSEmilio G. Cota  * tlb_entry_is_empty - return true if the entry is not in use
4423cea94bbSEmilio G. Cota  * @te: pointer to CPUTLBEntry
4433cea94bbSEmilio G. Cota  */
4443cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
4453cea94bbSEmilio G. Cota {
4463cea94bbSEmilio G. Cota     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
4473cea94bbSEmilio G. Cota }
4483cea94bbSEmilio G. Cota 
44953d28455SRichard Henderson /* Called with tlb_c.lock held */
4503ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
4513ab6e68cSRichard Henderson                                         target_ulong page,
4523ab6e68cSRichard Henderson                                         target_ulong mask)
45368fea038SRichard Henderson {
4543ab6e68cSRichard Henderson     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
455d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
45686e1eff8SEmilio G. Cota         return true;
457d9bb58e5SYang Zhong     }
45886e1eff8SEmilio G. Cota     return false;
459d9bb58e5SYang Zhong }
460d9bb58e5SYang Zhong 
4613ab6e68cSRichard Henderson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
46268fea038SRichard Henderson                                           target_ulong page)
46368fea038SRichard Henderson {
4643ab6e68cSRichard Henderson     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
4653ab6e68cSRichard Henderson }
4663ab6e68cSRichard Henderson 
4673ab6e68cSRichard Henderson /* Called with tlb_c.lock held */
4683ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
4693ab6e68cSRichard Henderson                                             target_ulong page,
4703ab6e68cSRichard Henderson                                             target_ulong mask)
4713ab6e68cSRichard Henderson {
472a40ec84eSRichard Henderson     CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
47368fea038SRichard Henderson     int k;
47471aec354SEmilio G. Cota 
47529a0af61SRichard Henderson     assert_cpu_is_self(env_cpu(env));
47668fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
4773ab6e68cSRichard Henderson         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
47886e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, mmu_idx);
47986e1eff8SEmilio G. Cota         }
48068fea038SRichard Henderson     }
48168fea038SRichard Henderson }
48268fea038SRichard Henderson 
4833ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
4843ab6e68cSRichard Henderson                                               target_ulong page)
4853ab6e68cSRichard Henderson {
4863ab6e68cSRichard Henderson     tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
4873ab6e68cSRichard Henderson }
4883ab6e68cSRichard Henderson 
4891308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx,
4901308e026SRichard Henderson                                   target_ulong page)
4911308e026SRichard Henderson {
492a40ec84eSRichard Henderson     target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
493a40ec84eSRichard Henderson     target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
4941308e026SRichard Henderson 
4951308e026SRichard Henderson     /* Check if we need to flush due to large pages.  */
4961308e026SRichard Henderson     if ((page & lp_mask) == lp_addr) {
4971308e026SRichard Henderson         tlb_debug("forcing full flush midx %d ("
4981308e026SRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
4991308e026SRichard Henderson                   midx, lp_addr, lp_mask);
5003c3959f2SRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
5011308e026SRichard Henderson     } else {
50286e1eff8SEmilio G. Cota         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
50386e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, midx);
50486e1eff8SEmilio G. Cota         }
5051308e026SRichard Henderson         tlb_flush_vtlb_page_locked(env, midx, page);
5061308e026SRichard Henderson     }
5071308e026SRichard Henderson }
5081308e026SRichard Henderson 
5097b7d00e0SRichard Henderson /**
5107b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_0:
5117b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5127b7d00e0SRichard Henderson  * @addr: page of virtual address to flush
5137b7d00e0SRichard Henderson  * @idxmap: set of mmu_idx to flush
5147b7d00e0SRichard Henderson  *
5157b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
5167b7d00e0SRichard Henderson  * at @addr from the tlbs indicated by @idxmap from @cpu.
517d9bb58e5SYang Zhong  */
5187b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
5197b7d00e0SRichard Henderson                                              target_ulong addr,
5207b7d00e0SRichard Henderson                                              uint16_t idxmap)
521d9bb58e5SYang Zhong {
522d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
523d9bb58e5SYang Zhong     int mmu_idx;
524d9bb58e5SYang Zhong 
525d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
526d9bb58e5SYang Zhong 
5277b7d00e0SRichard Henderson     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
528d9bb58e5SYang Zhong 
529a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
530d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
5317b7d00e0SRichard Henderson         if ((idxmap >> mmu_idx) & 1) {
5321308e026SRichard Henderson             tlb_flush_page_locked(env, mmu_idx, addr);
533d9bb58e5SYang Zhong         }
534d9bb58e5SYang Zhong     }
535a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
536d9bb58e5SYang Zhong 
5371d41a79bSRichard Henderson     /*
5381d41a79bSRichard Henderson      * Discard jump cache entries for any tb which might potentially
5391d41a79bSRichard Henderson      * overlap the flushed page, which includes the previous.
5401d41a79bSRichard Henderson      */
5411d41a79bSRichard Henderson     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
5421d41a79bSRichard Henderson     tb_jmp_cache_clear_page(cpu, addr);
543d9bb58e5SYang Zhong }
544d9bb58e5SYang Zhong 
5457b7d00e0SRichard Henderson /**
5467b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_1:
5477b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5487b7d00e0SRichard Henderson  * @data: encoded addr + idxmap
5497b7d00e0SRichard Henderson  *
5507b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
5517b7d00e0SRichard Henderson  * async_run_on_cpu.  The idxmap parameter is encoded in the page
5527b7d00e0SRichard Henderson  * offset of the target_ptr field.  This limits the set of mmu_idx
5537b7d00e0SRichard Henderson  * that can be passed via this method.
5547b7d00e0SRichard Henderson  */
5557b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
5567b7d00e0SRichard Henderson                                              run_on_cpu_data data)
5577b7d00e0SRichard Henderson {
5587b7d00e0SRichard Henderson     target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
5597b7d00e0SRichard Henderson     target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
5607b7d00e0SRichard Henderson     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
5617b7d00e0SRichard Henderson 
5627b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
5637b7d00e0SRichard Henderson }
5647b7d00e0SRichard Henderson 
5657b7d00e0SRichard Henderson typedef struct {
5667b7d00e0SRichard Henderson     target_ulong addr;
5677b7d00e0SRichard Henderson     uint16_t idxmap;
5687b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData;
5697b7d00e0SRichard Henderson 
5707b7d00e0SRichard Henderson /**
5717b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_2:
5727b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5737b7d00e0SRichard Henderson  * @data: allocated addr + idxmap
5747b7d00e0SRichard Henderson  *
5757b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
5767b7d00e0SRichard Henderson  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
5777b7d00e0SRichard Henderson  * TLBFlushPageByMMUIdxData structure that has been allocated
5787b7d00e0SRichard Henderson  * specifically for this helper.  Free the structure when done.
5797b7d00e0SRichard Henderson  */
5807b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
5817b7d00e0SRichard Henderson                                              run_on_cpu_data data)
5827b7d00e0SRichard Henderson {
5837b7d00e0SRichard Henderson     TLBFlushPageByMMUIdxData *d = data.host_ptr;
5847b7d00e0SRichard Henderson 
5857b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
5867b7d00e0SRichard Henderson     g_free(d);
5877b7d00e0SRichard Henderson }
5887b7d00e0SRichard Henderson 
589d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
590d9bb58e5SYang Zhong {
591d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
592d9bb58e5SYang Zhong 
593d9bb58e5SYang Zhong     /* This should already be page aligned */
5947b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
595d9bb58e5SYang Zhong 
5967b7d00e0SRichard Henderson     if (qemu_cpu_is_self(cpu)) {
5977b7d00e0SRichard Henderson         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
5987b7d00e0SRichard Henderson     } else if (idxmap < TARGET_PAGE_SIZE) {
5997b7d00e0SRichard Henderson         /*
6007b7d00e0SRichard Henderson          * Most targets have only a few mmu_idx.  In the case where
6017b7d00e0SRichard Henderson          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
6027b7d00e0SRichard Henderson          * allocating memory for this operation.
6037b7d00e0SRichard Henderson          */
6047b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
6057b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
606d9bb58e5SYang Zhong     } else {
6077b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
6087b7d00e0SRichard Henderson 
6097b7d00e0SRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
6107b7d00e0SRichard Henderson         d->addr = addr;
6117b7d00e0SRichard Henderson         d->idxmap = idxmap;
6127b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
6137b7d00e0SRichard Henderson                          RUN_ON_CPU_HOST_PTR(d));
614d9bb58e5SYang Zhong     }
615d9bb58e5SYang Zhong }
616d9bb58e5SYang Zhong 
617f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr)
618f8144c6cSRichard Henderson {
619f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
620f8144c6cSRichard Henderson }
621f8144c6cSRichard Henderson 
622d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
623d9bb58e5SYang Zhong                                        uint16_t idxmap)
624d9bb58e5SYang Zhong {
625d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
626d9bb58e5SYang Zhong 
627d9bb58e5SYang Zhong     /* This should already be page aligned */
6287b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
629d9bb58e5SYang Zhong 
6307b7d00e0SRichard Henderson     /*
6317b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
6327b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
6337b7d00e0SRichard Henderson      */
6347b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
6357b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6367b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6377b7d00e0SRichard Henderson     } else {
6387b7d00e0SRichard Henderson         CPUState *dst_cpu;
6397b7d00e0SRichard Henderson 
6407b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
6417b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
6427b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
6437b7d00e0SRichard Henderson                 TLBFlushPageByMMUIdxData *d
6447b7d00e0SRichard Henderson                     = g_new(TLBFlushPageByMMUIdxData, 1);
6457b7d00e0SRichard Henderson 
6467b7d00e0SRichard Henderson                 d->addr = addr;
6477b7d00e0SRichard Henderson                 d->idxmap = idxmap;
6487b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
6497b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
6507b7d00e0SRichard Henderson             }
6517b7d00e0SRichard Henderson         }
6527b7d00e0SRichard Henderson     }
6537b7d00e0SRichard Henderson 
6547b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
655d9bb58e5SYang Zhong }
656d9bb58e5SYang Zhong 
657f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
658f8144c6cSRichard Henderson {
659f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
660f8144c6cSRichard Henderson }
661f8144c6cSRichard Henderson 
662d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
663d9bb58e5SYang Zhong                                               target_ulong addr,
664d9bb58e5SYang Zhong                                               uint16_t idxmap)
665d9bb58e5SYang Zhong {
666d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
667d9bb58e5SYang Zhong 
668d9bb58e5SYang Zhong     /* This should already be page aligned */
6697b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
670d9bb58e5SYang Zhong 
6717b7d00e0SRichard Henderson     /*
6727b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
6737b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
6747b7d00e0SRichard Henderson      */
6757b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
6767b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6777b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6787b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6797b7d00e0SRichard Henderson                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6807b7d00e0SRichard Henderson     } else {
6817b7d00e0SRichard Henderson         CPUState *dst_cpu;
6827b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d;
6837b7d00e0SRichard Henderson 
6847b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
6857b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
6867b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
6877b7d00e0SRichard Henderson                 d = g_new(TLBFlushPageByMMUIdxData, 1);
6887b7d00e0SRichard Henderson                 d->addr = addr;
6897b7d00e0SRichard Henderson                 d->idxmap = idxmap;
6907b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
6917b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
6927b7d00e0SRichard Henderson             }
6937b7d00e0SRichard Henderson         }
6947b7d00e0SRichard Henderson 
6957b7d00e0SRichard Henderson         d = g_new(TLBFlushPageByMMUIdxData, 1);
6967b7d00e0SRichard Henderson         d->addr = addr;
6977b7d00e0SRichard Henderson         d->idxmap = idxmap;
6987b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
6997b7d00e0SRichard Henderson                               RUN_ON_CPU_HOST_PTR(d));
7007b7d00e0SRichard Henderson     }
701d9bb58e5SYang Zhong }
702d9bb58e5SYang Zhong 
703f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
704d9bb58e5SYang Zhong {
705f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
706d9bb58e5SYang Zhong }
707d9bb58e5SYang Zhong 
7083c4ddec1SRichard Henderson static void tlb_flush_range_locked(CPUArchState *env, int midx,
7093c4ddec1SRichard Henderson                                    target_ulong addr, target_ulong len,
7103c4ddec1SRichard Henderson                                    unsigned bits)
7113ab6e68cSRichard Henderson {
7123ab6e68cSRichard Henderson     CPUTLBDesc *d = &env_tlb(env)->d[midx];
7133ab6e68cSRichard Henderson     CPUTLBDescFast *f = &env_tlb(env)->f[midx];
7143ab6e68cSRichard Henderson     target_ulong mask = MAKE_64BIT_MASK(0, bits);
7153ab6e68cSRichard Henderson 
7163ab6e68cSRichard Henderson     /*
7173ab6e68cSRichard Henderson      * If @bits is smaller than the tlb size, there may be multiple entries
7183ab6e68cSRichard Henderson      * within the TLB; otherwise all addresses that match under @mask hit
7193ab6e68cSRichard Henderson      * the same TLB entry.
7203ab6e68cSRichard Henderson      * TODO: Perhaps allow bits to be a few bits less than the size.
7213ab6e68cSRichard Henderson      * For now, just flush the entire TLB.
7223c4ddec1SRichard Henderson      *
7233c4ddec1SRichard Henderson      * If @len is larger than the tlb size, then it will take longer to
7243c4ddec1SRichard Henderson      * test all of the entries in the TLB than it will to flush it all.
7253ab6e68cSRichard Henderson      */
7263c4ddec1SRichard Henderson     if (mask < f->mask || len > f->mask) {
7273ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
7283c4ddec1SRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
7293c4ddec1SRichard Henderson                   midx, addr, mask, len);
7303ab6e68cSRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
7313ab6e68cSRichard Henderson         return;
7323ab6e68cSRichard Henderson     }
7333ab6e68cSRichard Henderson 
7343c4ddec1SRichard Henderson     /*
7353c4ddec1SRichard Henderson      * Check if we need to flush due to large pages.
7363c4ddec1SRichard Henderson      * Because large_page_mask contains all 1's from the msb,
7373c4ddec1SRichard Henderson      * we only need to test the end of the range.
7383c4ddec1SRichard Henderson      */
7393c4ddec1SRichard Henderson     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
7403ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
7413ab6e68cSRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
7423ab6e68cSRichard Henderson                   midx, d->large_page_addr, d->large_page_mask);
7433ab6e68cSRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
7443ab6e68cSRichard Henderson         return;
7453ab6e68cSRichard Henderson     }
7463ab6e68cSRichard Henderson 
7473c4ddec1SRichard Henderson     for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
7483c4ddec1SRichard Henderson         target_ulong page = addr + i;
7493c4ddec1SRichard Henderson         CPUTLBEntry *entry = tlb_entry(env, midx, page);
7503c4ddec1SRichard Henderson 
7513c4ddec1SRichard Henderson         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
7523ab6e68cSRichard Henderson             tlb_n_used_entries_dec(env, midx);
7533ab6e68cSRichard Henderson         }
7543ab6e68cSRichard Henderson         tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
7553ab6e68cSRichard Henderson     }
7563c4ddec1SRichard Henderson }
7573ab6e68cSRichard Henderson 
7583ab6e68cSRichard Henderson typedef struct {
7593ab6e68cSRichard Henderson     target_ulong addr;
7603c4ddec1SRichard Henderson     target_ulong len;
7613ab6e68cSRichard Henderson     uint16_t idxmap;
7623ab6e68cSRichard Henderson     uint16_t bits;
7633960a59fSRichard Henderson } TLBFlushRangeData;
7643ab6e68cSRichard Henderson 
7656be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
7663960a59fSRichard Henderson                                               TLBFlushRangeData d)
7673ab6e68cSRichard Henderson {
7683ab6e68cSRichard Henderson     CPUArchState *env = cpu->env_ptr;
7693ab6e68cSRichard Henderson     int mmu_idx;
7703ab6e68cSRichard Henderson 
7713ab6e68cSRichard Henderson     assert_cpu_is_self(cpu);
7723ab6e68cSRichard Henderson 
7733c4ddec1SRichard Henderson     tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
7743c4ddec1SRichard Henderson               d.addr, d.bits, d.len, d.idxmap);
7753ab6e68cSRichard Henderson 
7763ab6e68cSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
7773ab6e68cSRichard Henderson     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
7783ab6e68cSRichard Henderson         if ((d.idxmap >> mmu_idx) & 1) {
7793c4ddec1SRichard Henderson             tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
7803ab6e68cSRichard Henderson         }
7813ab6e68cSRichard Henderson     }
7823ab6e68cSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
7833ab6e68cSRichard Henderson 
784cfc2a2d6SIdan Horowitz     /*
785cfc2a2d6SIdan Horowitz      * If the length is larger than the jump cache size, then it will take
786cfc2a2d6SIdan Horowitz      * longer to clear each entry individually than it will to clear it all.
787cfc2a2d6SIdan Horowitz      */
788cfc2a2d6SIdan Horowitz     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
789*a976a99aSRichard Henderson         tcg_flush_jmp_cache(cpu);
790cfc2a2d6SIdan Horowitz         return;
791cfc2a2d6SIdan Horowitz     }
792cfc2a2d6SIdan Horowitz 
7931d41a79bSRichard Henderson     /*
7941d41a79bSRichard Henderson      * Discard jump cache entries for any tb which might potentially
7951d41a79bSRichard Henderson      * overlap the flushed pages, which includes the previous.
7961d41a79bSRichard Henderson      */
7971d41a79bSRichard Henderson     d.addr -= TARGET_PAGE_SIZE;
7981d41a79bSRichard Henderson     for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
7991d41a79bSRichard Henderson         tb_jmp_cache_clear_page(cpu, d.addr);
8001d41a79bSRichard Henderson         d.addr += TARGET_PAGE_SIZE;
8013c4ddec1SRichard Henderson     }
8023ab6e68cSRichard Henderson }
8033ab6e68cSRichard Henderson 
804206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
8053ab6e68cSRichard Henderson                                               run_on_cpu_data data)
8063ab6e68cSRichard Henderson {
8073960a59fSRichard Henderson     TLBFlushRangeData *d = data.host_ptr;
8086be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
8093ab6e68cSRichard Henderson     g_free(d);
8103ab6e68cSRichard Henderson }
8113ab6e68cSRichard Henderson 
812e5b1921bSRichard Henderson void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
813e5b1921bSRichard Henderson                                target_ulong len, uint16_t idxmap,
814e5b1921bSRichard Henderson                                unsigned bits)
8153ab6e68cSRichard Henderson {
8163960a59fSRichard Henderson     TLBFlushRangeData d;
8173ab6e68cSRichard Henderson 
818e5b1921bSRichard Henderson     /*
819e5b1921bSRichard Henderson      * If all bits are significant, and len is small,
820e5b1921bSRichard Henderson      * this devolves to tlb_flush_page.
821e5b1921bSRichard Henderson      */
822e5b1921bSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8233ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
8243ab6e68cSRichard Henderson         return;
8253ab6e68cSRichard Henderson     }
8263ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
8273ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
8283ab6e68cSRichard Henderson         tlb_flush_by_mmuidx(cpu, idxmap);
8293ab6e68cSRichard Henderson         return;
8303ab6e68cSRichard Henderson     }
8313ab6e68cSRichard Henderson 
8323ab6e68cSRichard Henderson     /* This should already be page aligned */
8333ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
834e5b1921bSRichard Henderson     d.len = len;
8353ab6e68cSRichard Henderson     d.idxmap = idxmap;
8363ab6e68cSRichard Henderson     d.bits = bits;
8373ab6e68cSRichard Henderson 
8383ab6e68cSRichard Henderson     if (qemu_cpu_is_self(cpu)) {
8396be48e45SRichard Henderson         tlb_flush_range_by_mmuidx_async_0(cpu, d);
8403ab6e68cSRichard Henderson     } else {
8413ab6e68cSRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
8423960a59fSRichard Henderson         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
843206a583dSRichard Henderson         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
8443ab6e68cSRichard Henderson                          RUN_ON_CPU_HOST_PTR(p));
8453ab6e68cSRichard Henderson     }
8463ab6e68cSRichard Henderson }
8473ab6e68cSRichard Henderson 
848e5b1921bSRichard Henderson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
849e5b1921bSRichard Henderson                                    uint16_t idxmap, unsigned bits)
850e5b1921bSRichard Henderson {
851e5b1921bSRichard Henderson     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
852e5b1921bSRichard Henderson }
853e5b1921bSRichard Henderson 
854600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
855600b819fSRichard Henderson                                         target_ulong addr, target_ulong len,
856600b819fSRichard Henderson                                         uint16_t idxmap, unsigned bits)
8573ab6e68cSRichard Henderson {
8583960a59fSRichard Henderson     TLBFlushRangeData d;
859d34e4d1aSRichard Henderson     CPUState *dst_cpu;
8603ab6e68cSRichard Henderson 
861600b819fSRichard Henderson     /*
862600b819fSRichard Henderson      * If all bits are significant, and len is small,
863600b819fSRichard Henderson      * this devolves to tlb_flush_page.
864600b819fSRichard Henderson      */
865600b819fSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8663ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
8673ab6e68cSRichard Henderson         return;
8683ab6e68cSRichard Henderson     }
8693ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
8703ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
8713ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
8723ab6e68cSRichard Henderson         return;
8733ab6e68cSRichard Henderson     }
8743ab6e68cSRichard Henderson 
8753ab6e68cSRichard Henderson     /* This should already be page aligned */
8763ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
877600b819fSRichard Henderson     d.len = len;
8783ab6e68cSRichard Henderson     d.idxmap = idxmap;
8793ab6e68cSRichard Henderson     d.bits = bits;
8803ab6e68cSRichard Henderson 
8813ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
8823ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
8833ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
8843960a59fSRichard Henderson             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
8853ab6e68cSRichard Henderson             async_run_on_cpu(dst_cpu,
886206a583dSRichard Henderson                              tlb_flush_range_by_mmuidx_async_1,
8873ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
8883ab6e68cSRichard Henderson         }
8893ab6e68cSRichard Henderson     }
8903ab6e68cSRichard Henderson 
8916be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
8923ab6e68cSRichard Henderson }
8933ab6e68cSRichard Henderson 
894600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
895600b819fSRichard Henderson                                             target_ulong addr,
896600b819fSRichard Henderson                                             uint16_t idxmap, unsigned bits)
897600b819fSRichard Henderson {
898600b819fSRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
899600b819fSRichard Henderson                                        idxmap, bits);
900600b819fSRichard Henderson }
901600b819fSRichard Henderson 
902c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
9033ab6e68cSRichard Henderson                                                target_ulong addr,
904c13b27d8SRichard Henderson                                                target_ulong len,
9053ab6e68cSRichard Henderson                                                uint16_t idxmap,
9063ab6e68cSRichard Henderson                                                unsigned bits)
9073ab6e68cSRichard Henderson {
908d34e4d1aSRichard Henderson     TLBFlushRangeData d, *p;
909d34e4d1aSRichard Henderson     CPUState *dst_cpu;
9103ab6e68cSRichard Henderson 
911c13b27d8SRichard Henderson     /*
912c13b27d8SRichard Henderson      * If all bits are significant, and len is small,
913c13b27d8SRichard Henderson      * this devolves to tlb_flush_page.
914c13b27d8SRichard Henderson      */
915c13b27d8SRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
9163ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
9173ab6e68cSRichard Henderson         return;
9183ab6e68cSRichard Henderson     }
9193ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
9203ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
9213ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
9223ab6e68cSRichard Henderson         return;
9233ab6e68cSRichard Henderson     }
9243ab6e68cSRichard Henderson 
9253ab6e68cSRichard Henderson     /* This should already be page aligned */
9263ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
927c13b27d8SRichard Henderson     d.len = len;
9283ab6e68cSRichard Henderson     d.idxmap = idxmap;
9293ab6e68cSRichard Henderson     d.bits = bits;
9303ab6e68cSRichard Henderson 
9313ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
9323ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
9333ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
9346d244788SRichard Henderson             p = g_memdup(&d, sizeof(d));
935206a583dSRichard Henderson             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
9363ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
9373ab6e68cSRichard Henderson         }
9383ab6e68cSRichard Henderson     }
9393ab6e68cSRichard Henderson 
9406d244788SRichard Henderson     p = g_memdup(&d, sizeof(d));
941206a583dSRichard Henderson     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
9423ab6e68cSRichard Henderson                           RUN_ON_CPU_HOST_PTR(p));
9433ab6e68cSRichard Henderson }
9443ab6e68cSRichard Henderson 
945c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
946c13b27d8SRichard Henderson                                                    target_ulong addr,
947c13b27d8SRichard Henderson                                                    uint16_t idxmap,
948c13b27d8SRichard Henderson                                                    unsigned bits)
949c13b27d8SRichard Henderson {
950c13b27d8SRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
951c13b27d8SRichard Henderson                                               idxmap, bits);
952c13b27d8SRichard Henderson }
953c13b27d8SRichard Henderson 
954d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
955d9bb58e5SYang Zhong    can be detected */
956d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
957d9bb58e5SYang Zhong {
95893b99616SRichard Henderson     cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
95993b99616SRichard Henderson                                              TARGET_PAGE_SIZE,
960d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
961d9bb58e5SYang Zhong }
962d9bb58e5SYang Zhong 
963d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
964d9bb58e5SYang Zhong    tested for self modifying code */
965d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
966d9bb58e5SYang Zhong {
967d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
968d9bb58e5SYang Zhong }
969d9bb58e5SYang Zhong 
970d9bb58e5SYang Zhong 
971d9bb58e5SYang Zhong /*
972d9bb58e5SYang Zhong  * Dirty write flag handling
973d9bb58e5SYang Zhong  *
974d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
975d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
976d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
977d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
978d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
979d9bb58e5SYang Zhong  * generated code.
980d9bb58e5SYang Zhong  *
98171aec354SEmilio G. Cota  * Other vCPUs might be reading their TLBs during guest execution, so we update
982d73415a3SStefan Hajnoczi  * te->addr_write with qatomic_set. We don't need to worry about this for
98371aec354SEmilio G. Cota  * oversized guests as MTTCG is disabled for them.
984d9bb58e5SYang Zhong  *
98553d28455SRichard Henderson  * Called with tlb_c.lock held.
986d9bb58e5SYang Zhong  */
98771aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
98871aec354SEmilio G. Cota                                          uintptr_t start, uintptr_t length)
989d9bb58e5SYang Zhong {
990d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
991d9bb58e5SYang Zhong 
9927b0d792cSRichard Henderson     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
9937b0d792cSRichard Henderson                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
994d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
995d9bb58e5SYang Zhong         addr += tlb_entry->addend;
996d9bb58e5SYang Zhong         if ((addr - start) < length) {
997d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
99871aec354SEmilio G. Cota             tlb_entry->addr_write |= TLB_NOTDIRTY;
999d9bb58e5SYang Zhong #else
1000d73415a3SStefan Hajnoczi             qatomic_set(&tlb_entry->addr_write,
100171aec354SEmilio G. Cota                        tlb_entry->addr_write | TLB_NOTDIRTY);
1002d9bb58e5SYang Zhong #endif
1003d9bb58e5SYang Zhong         }
100471aec354SEmilio G. Cota     }
100571aec354SEmilio G. Cota }
100671aec354SEmilio G. Cota 
100771aec354SEmilio G. Cota /*
100853d28455SRichard Henderson  * Called with tlb_c.lock held.
100971aec354SEmilio G. Cota  * Called only from the vCPU context, i.e. the TLB's owner thread.
101071aec354SEmilio G. Cota  */
101171aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
101271aec354SEmilio G. Cota {
101371aec354SEmilio G. Cota     *d = *s;
101471aec354SEmilio G. Cota }
1015d9bb58e5SYang Zhong 
1016d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
101771aec354SEmilio G. Cota  * the target vCPU).
101853d28455SRichard Henderson  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
101971aec354SEmilio G. Cota  * thing actually updated is the target TLB entry ->addr_write flags.
1020d9bb58e5SYang Zhong  */
1021d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1022d9bb58e5SYang Zhong {
1023d9bb58e5SYang Zhong     CPUArchState *env;
1024d9bb58e5SYang Zhong 
1025d9bb58e5SYang Zhong     int mmu_idx;
1026d9bb58e5SYang Zhong 
1027d9bb58e5SYang Zhong     env = cpu->env_ptr;
1028a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
1029d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1030d9bb58e5SYang Zhong         unsigned int i;
1031722a1c1eSRichard Henderson         unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1032d9bb58e5SYang Zhong 
103386e1eff8SEmilio G. Cota         for (i = 0; i < n; i++) {
1034a40ec84eSRichard Henderson             tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1035a40ec84eSRichard Henderson                                          start1, length);
1036d9bb58e5SYang Zhong         }
1037d9bb58e5SYang Zhong 
1038d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
1039a40ec84eSRichard Henderson             tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1040a40ec84eSRichard Henderson                                          start1, length);
1041d9bb58e5SYang Zhong         }
1042d9bb58e5SYang Zhong     }
1043a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
1044d9bb58e5SYang Zhong }
1045d9bb58e5SYang Zhong 
104653d28455SRichard Henderson /* Called with tlb_c.lock held */
104771aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
104871aec354SEmilio G. Cota                                          target_ulong vaddr)
1049d9bb58e5SYang Zhong {
1050d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1051d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
1052d9bb58e5SYang Zhong     }
1053d9bb58e5SYang Zhong }
1054d9bb58e5SYang Zhong 
1055d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
1056d9bb58e5SYang Zhong    so that it is no longer dirty */
1057d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
1058d9bb58e5SYang Zhong {
1059d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
1060d9bb58e5SYang Zhong     int mmu_idx;
1061d9bb58e5SYang Zhong 
1062d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
1063d9bb58e5SYang Zhong 
1064d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
1065a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
1066d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1067383beda9SRichard Henderson         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
1068d9bb58e5SYang Zhong     }
1069d9bb58e5SYang Zhong 
1070d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1071d9bb58e5SYang Zhong         int k;
1072d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
1073a40ec84eSRichard Henderson             tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
1074d9bb58e5SYang Zhong         }
1075d9bb58e5SYang Zhong     }
1076a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
1077d9bb58e5SYang Zhong }
1078d9bb58e5SYang Zhong 
1079d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
1080d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
10811308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
10821308e026SRichard Henderson                                target_ulong vaddr, target_ulong size)
1083d9bb58e5SYang Zhong {
1084a40ec84eSRichard Henderson     target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
10851308e026SRichard Henderson     target_ulong lp_mask = ~(size - 1);
1086d9bb58e5SYang Zhong 
10871308e026SRichard Henderson     if (lp_addr == (target_ulong)-1) {
10881308e026SRichard Henderson         /* No previous large page.  */
10891308e026SRichard Henderson         lp_addr = vaddr;
10901308e026SRichard Henderson     } else {
1091d9bb58e5SYang Zhong         /* Extend the existing region to include the new page.
10921308e026SRichard Henderson            This is a compromise between unnecessary flushes and
10931308e026SRichard Henderson            the cost of maintaining a full variable size TLB.  */
1094a40ec84eSRichard Henderson         lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
10951308e026SRichard Henderson         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
10961308e026SRichard Henderson             lp_mask <<= 1;
1097d9bb58e5SYang Zhong         }
10981308e026SRichard Henderson     }
1099a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1100a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1101d9bb58e5SYang Zhong }
1102d9bb58e5SYang Zhong 
110340473689SRichard Henderson /*
110440473689SRichard Henderson  * Add a new TLB entry. At most one entry for a given virtual address
1105d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1106d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
1107d9bb58e5SYang Zhong  *
1108d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
1109d9bb58e5SYang Zhong  * critical section.
1110d9bb58e5SYang Zhong  */
111140473689SRichard Henderson void tlb_set_page_full(CPUState *cpu, int mmu_idx,
111240473689SRichard Henderson                        target_ulong vaddr, CPUTLBEntryFull *full)
1113d9bb58e5SYang Zhong {
1114d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
1115a40ec84eSRichard Henderson     CPUTLB *tlb = env_tlb(env);
1116a40ec84eSRichard Henderson     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1117d9bb58e5SYang Zhong     MemoryRegionSection *section;
1118d9bb58e5SYang Zhong     unsigned int index;
1119d9bb58e5SYang Zhong     target_ulong address;
11208f5db641SRichard Henderson     target_ulong write_address;
1121d9bb58e5SYang Zhong     uintptr_t addend;
112268fea038SRichard Henderson     CPUTLBEntry *te, tn;
112355df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
112455df6fcfSPeter Maydell     target_ulong vaddr_page;
112540473689SRichard Henderson     int asidx, wp_flags, prot;
11268f5db641SRichard Henderson     bool is_ram, is_romd;
1127d9bb58e5SYang Zhong 
1128d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
112955df6fcfSPeter Maydell 
113040473689SRichard Henderson     if (full->lg_page_size <= TARGET_PAGE_BITS) {
113155df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
113255df6fcfSPeter Maydell     } else {
113340473689SRichard Henderson         sz = (hwaddr)1 << full->lg_page_size;
113440473689SRichard Henderson         tlb_add_large_page(env, mmu_idx, vaddr, sz);
113555df6fcfSPeter Maydell     }
113655df6fcfSPeter Maydell     vaddr_page = vaddr & TARGET_PAGE_MASK;
113740473689SRichard Henderson     paddr_page = full->phys_addr & TARGET_PAGE_MASK;
113855df6fcfSPeter Maydell 
113940473689SRichard Henderson     prot = full->prot;
114040473689SRichard Henderson     asidx = cpu_asidx_from_attrs(cpu, full->attrs);
114155df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
114240473689SRichard Henderson                                                 &xlat, &sz, full->attrs, &prot);
1143d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
1144d9bb58e5SYang Zhong 
1145d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1146d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
114740473689SRichard Henderson               vaddr, full->phys_addr, prot, mmu_idx);
1148d9bb58e5SYang Zhong 
114955df6fcfSPeter Maydell     address = vaddr_page;
115040473689SRichard Henderson     if (full->lg_page_size < TARGET_PAGE_BITS) {
115130d7e098SRichard Henderson         /* Repeat the MMU check and TLB fill on every access.  */
115230d7e098SRichard Henderson         address |= TLB_INVALID_MASK;
115355df6fcfSPeter Maydell     }
115440473689SRichard Henderson     if (full->attrs.byte_swap) {
11555b87b3e6SRichard Henderson         address |= TLB_BSWAP;
1156a26fc6f5STony Nguyen     }
11578f5db641SRichard Henderson 
11588f5db641SRichard Henderson     is_ram = memory_region_is_ram(section->mr);
11598f5db641SRichard Henderson     is_romd = memory_region_is_romd(section->mr);
11608f5db641SRichard Henderson 
11618f5db641SRichard Henderson     if (is_ram || is_romd) {
11628f5db641SRichard Henderson         /* RAM and ROMD both have associated host memory. */
1163d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
11648f5db641SRichard Henderson     } else {
11658f5db641SRichard Henderson         /* I/O does not; force the host address to NULL. */
11668f5db641SRichard Henderson         addend = 0;
1167d9bb58e5SYang Zhong     }
1168d9bb58e5SYang Zhong 
11698f5db641SRichard Henderson     write_address = address;
11708f5db641SRichard Henderson     if (is_ram) {
11718f5db641SRichard Henderson         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
11728f5db641SRichard Henderson         /*
11738f5db641SRichard Henderson          * Computing is_clean is expensive; avoid all that unless
11748f5db641SRichard Henderson          * the page is actually writable.
11758f5db641SRichard Henderson          */
11768f5db641SRichard Henderson         if (prot & PAGE_WRITE) {
11778f5db641SRichard Henderson             if (section->readonly) {
11788f5db641SRichard Henderson                 write_address |= TLB_DISCARD_WRITE;
11798f5db641SRichard Henderson             } else if (cpu_physical_memory_is_clean(iotlb)) {
11808f5db641SRichard Henderson                 write_address |= TLB_NOTDIRTY;
11818f5db641SRichard Henderson             }
11828f5db641SRichard Henderson         }
11838f5db641SRichard Henderson     } else {
11848f5db641SRichard Henderson         /* I/O or ROMD */
11858f5db641SRichard Henderson         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
11868f5db641SRichard Henderson         /*
11878f5db641SRichard Henderson          * Writes to romd devices must go through MMIO to enable write.
11888f5db641SRichard Henderson          * Reads to romd devices go through the ram_ptr found above,
11898f5db641SRichard Henderson          * but of course reads to I/O must go through MMIO.
11908f5db641SRichard Henderson          */
11918f5db641SRichard Henderson         write_address |= TLB_MMIO;
11928f5db641SRichard Henderson         if (!is_romd) {
11938f5db641SRichard Henderson             address = write_address;
11948f5db641SRichard Henderson         }
11958f5db641SRichard Henderson     }
11968f5db641SRichard Henderson 
119750b107c5SRichard Henderson     wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
119850b107c5SRichard Henderson                                               TARGET_PAGE_SIZE);
1199d9bb58e5SYang Zhong 
1200383beda9SRichard Henderson     index = tlb_index(env, mmu_idx, vaddr_page);
1201383beda9SRichard Henderson     te = tlb_entry(env, mmu_idx, vaddr_page);
1202d9bb58e5SYang Zhong 
120368fea038SRichard Henderson     /*
120471aec354SEmilio G. Cota      * Hold the TLB lock for the rest of the function. We could acquire/release
120571aec354SEmilio G. Cota      * the lock several times in the function, but it is faster to amortize the
120671aec354SEmilio G. Cota      * acquisition cost by acquiring it just once. Note that this leads to
120771aec354SEmilio G. Cota      * a longer critical section, but this is not a concern since the TLB lock
120871aec354SEmilio G. Cota      * is unlikely to be contended.
120971aec354SEmilio G. Cota      */
1210a40ec84eSRichard Henderson     qemu_spin_lock(&tlb->c.lock);
121171aec354SEmilio G. Cota 
12123d1523ceSRichard Henderson     /* Note that the tlb is no longer clean.  */
1213a40ec84eSRichard Henderson     tlb->c.dirty |= 1 << mmu_idx;
12143d1523ceSRichard Henderson 
121571aec354SEmilio G. Cota     /* Make sure there's no cached translation for the new page.  */
121671aec354SEmilio G. Cota     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
121771aec354SEmilio G. Cota 
121871aec354SEmilio G. Cota     /*
121968fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
122068fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
122168fea038SRichard Henderson      */
12223cea94bbSEmilio G. Cota     if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
1223a40ec84eSRichard Henderson         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1224a40ec84eSRichard Henderson         CPUTLBEntry *tv = &desc->vtable[vidx];
122568fea038SRichard Henderson 
122668fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
122771aec354SEmilio G. Cota         copy_tlb_helper_locked(tv, te);
122825d3ec58SRichard Henderson         desc->vfulltlb[vidx] = desc->fulltlb[index];
122986e1eff8SEmilio G. Cota         tlb_n_used_entries_dec(env, mmu_idx);
123068fea038SRichard Henderson     }
1231d9bb58e5SYang Zhong 
1232d9bb58e5SYang Zhong     /* refill the tlb */
1233ace41090SPeter Maydell     /*
1234ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
1235ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
12368f5db641SRichard Henderson      *  + the ram_addr_t of the page base of the target RAM (RAM)
12378f5db641SRichard Henderson      *  + the offset within section->mr of the page base (I/O, ROMD)
123855df6fcfSPeter Maydell      * We subtract the vaddr_page (which is page aligned and thus won't
1239ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
1240ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
1241ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
1242ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
1243ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1244ace41090SPeter Maydell      */
124540473689SRichard Henderson     desc->fulltlb[index] = *full;
124625d3ec58SRichard Henderson     desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
124740473689SRichard Henderson     desc->fulltlb[index].phys_addr = paddr_page;
124840473689SRichard Henderson     desc->fulltlb[index].prot = prot;
1249d9bb58e5SYang Zhong 
1250d9bb58e5SYang Zhong     /* Now calculate the new entry */
125155df6fcfSPeter Maydell     tn.addend = addend - vaddr_page;
1252d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
1253d9bb58e5SYang Zhong         tn.addr_read = address;
125450b107c5SRichard Henderson         if (wp_flags & BP_MEM_READ) {
125550b107c5SRichard Henderson             tn.addr_read |= TLB_WATCHPOINT;
125650b107c5SRichard Henderson         }
1257d9bb58e5SYang Zhong     } else {
1258d9bb58e5SYang Zhong         tn.addr_read = -1;
1259d9bb58e5SYang Zhong     }
1260d9bb58e5SYang Zhong 
1261d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
12628f5db641SRichard Henderson         tn.addr_code = address;
1263d9bb58e5SYang Zhong     } else {
1264d9bb58e5SYang Zhong         tn.addr_code = -1;
1265d9bb58e5SYang Zhong     }
1266d9bb58e5SYang Zhong 
1267d9bb58e5SYang Zhong     tn.addr_write = -1;
1268d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
12698f5db641SRichard Henderson         tn.addr_write = write_address;
1270f52bfb12SDavid Hildenbrand         if (prot & PAGE_WRITE_INV) {
1271f52bfb12SDavid Hildenbrand             tn.addr_write |= TLB_INVALID_MASK;
1272f52bfb12SDavid Hildenbrand         }
127350b107c5SRichard Henderson         if (wp_flags & BP_MEM_WRITE) {
127450b107c5SRichard Henderson             tn.addr_write |= TLB_WATCHPOINT;
127550b107c5SRichard Henderson         }
1276d9bb58e5SYang Zhong     }
1277d9bb58e5SYang Zhong 
127871aec354SEmilio G. Cota     copy_tlb_helper_locked(te, &tn);
127986e1eff8SEmilio G. Cota     tlb_n_used_entries_inc(env, mmu_idx);
1280a40ec84eSRichard Henderson     qemu_spin_unlock(&tlb->c.lock);
1281d9bb58e5SYang Zhong }
1282d9bb58e5SYang Zhong 
128340473689SRichard Henderson void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
128440473689SRichard Henderson                              hwaddr paddr, MemTxAttrs attrs, int prot,
128540473689SRichard Henderson                              int mmu_idx, target_ulong size)
128640473689SRichard Henderson {
128740473689SRichard Henderson     CPUTLBEntryFull full = {
128840473689SRichard Henderson         .phys_addr = paddr,
128940473689SRichard Henderson         .attrs = attrs,
129040473689SRichard Henderson         .prot = prot,
129140473689SRichard Henderson         .lg_page_size = ctz64(size)
129240473689SRichard Henderson     };
129340473689SRichard Henderson 
129440473689SRichard Henderson     assert(is_power_of_2(size));
129540473689SRichard Henderson     tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
129640473689SRichard Henderson }
129740473689SRichard Henderson 
1298d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1299d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
1300d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
1301d9bb58e5SYang Zhong {
1302d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1303d9bb58e5SYang Zhong                             prot, mmu_idx, size);
1304d9bb58e5SYang Zhong }
1305d9bb58e5SYang Zhong 
1306c319dc13SRichard Henderson /*
1307c319dc13SRichard Henderson  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1308c319dc13SRichard Henderson  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1309c319dc13SRichard Henderson  * be discarded and looked up again (e.g. via tlb_entry()).
1310c319dc13SRichard Henderson  */
1311c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1312c319dc13SRichard Henderson                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1313c319dc13SRichard Henderson {
1314c319dc13SRichard Henderson     bool ok;
1315c319dc13SRichard Henderson 
1316c319dc13SRichard Henderson     /*
1317c319dc13SRichard Henderson      * This is not a probe, so only valid return is success; failure
1318c319dc13SRichard Henderson      * should result in exception + longjmp to the cpu loop.
1319c319dc13SRichard Henderson      */
13208810ee2aSAlex Bennée     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1321e124536fSEduardo Habkost                                     access_type, mmu_idx, false, retaddr);
1322c319dc13SRichard Henderson     assert(ok);
1323c319dc13SRichard Henderson }
1324c319dc13SRichard Henderson 
132578271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
132678271684SClaudio Fontana                                         MMUAccessType access_type,
132778271684SClaudio Fontana                                         int mmu_idx, uintptr_t retaddr)
132878271684SClaudio Fontana {
13298810ee2aSAlex Bennée     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
13308810ee2aSAlex Bennée                                           mmu_idx, retaddr);
133178271684SClaudio Fontana }
133278271684SClaudio Fontana 
133378271684SClaudio Fontana static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
133478271684SClaudio Fontana                                           vaddr addr, unsigned size,
133578271684SClaudio Fontana                                           MMUAccessType access_type,
133678271684SClaudio Fontana                                           int mmu_idx, MemTxAttrs attrs,
133778271684SClaudio Fontana                                           MemTxResult response,
133878271684SClaudio Fontana                                           uintptr_t retaddr)
133978271684SClaudio Fontana {
134078271684SClaudio Fontana     CPUClass *cc = CPU_GET_CLASS(cpu);
134178271684SClaudio Fontana 
134278271684SClaudio Fontana     if (!cpu->ignore_memory_transaction_failures &&
134378271684SClaudio Fontana         cc->tcg_ops->do_transaction_failed) {
134478271684SClaudio Fontana         cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
134578271684SClaudio Fontana                                            access_type, mmu_idx, attrs,
134678271684SClaudio Fontana                                            response, retaddr);
134778271684SClaudio Fontana     }
134878271684SClaudio Fontana }
134978271684SClaudio Fontana 
135025d3ec58SRichard Henderson static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
1351f1be3696SRichard Henderson                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
1352be5c4787STony Nguyen                          MMUAccessType access_type, MemOp op)
1353d9bb58e5SYang Zhong {
135429a0af61SRichard Henderson     CPUState *cpu = env_cpu(env);
13552d54f194SPeter Maydell     hwaddr mr_offset;
13562d54f194SPeter Maydell     MemoryRegionSection *section;
13572d54f194SPeter Maydell     MemoryRegion *mr;
1358d9bb58e5SYang Zhong     uint64_t val;
1359d9bb58e5SYang Zhong     bool locked = false;
136004e3aabdSPeter Maydell     MemTxResult r;
1361d9bb58e5SYang Zhong 
136225d3ec58SRichard Henderson     section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
13632d54f194SPeter Maydell     mr = section->mr;
136425d3ec58SRichard Henderson     mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1365d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
136608565552SRichard Henderson     if (!cpu->can_do_io) {
1367d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1368d9bb58e5SYang Zhong     }
1369d9bb58e5SYang Zhong 
137041744954SPhilippe Mathieu-Daudé     if (!qemu_mutex_iothread_locked()) {
1371d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
1372d9bb58e5SYang Zhong         locked = true;
1373d9bb58e5SYang Zhong     }
137425d3ec58SRichard Henderson     r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
137504e3aabdSPeter Maydell     if (r != MEMTX_OK) {
13762d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
13772d54f194SPeter Maydell             section->offset_within_address_space -
13782d54f194SPeter Maydell             section->offset_within_region;
13792d54f194SPeter Maydell 
1380be5c4787STony Nguyen         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
138125d3ec58SRichard Henderson                                mmu_idx, full->attrs, r, retaddr);
138204e3aabdSPeter Maydell     }
1383d9bb58e5SYang Zhong     if (locked) {
1384d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
1385d9bb58e5SYang Zhong     }
1386d9bb58e5SYang Zhong 
1387d9bb58e5SYang Zhong     return val;
1388d9bb58e5SYang Zhong }
1389d9bb58e5SYang Zhong 
13902f3a57eeSAlex Bennée /*
139125d3ec58SRichard Henderson  * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
139225d3ec58SRichard Henderson  * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
1393570ef309SAlex Bennée  * because of the side effect of io_writex changing memory layout.
13942f3a57eeSAlex Bennée  */
139537523ff7SRichard Henderson static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
139637523ff7SRichard Henderson                             hwaddr mr_offset)
13972f3a57eeSAlex Bennée {
13982f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN
13992f3a57eeSAlex Bennée     SavedIOTLB *saved = &cs->saved_iotlb;
14002f3a57eeSAlex Bennée     saved->section = section;
14012f3a57eeSAlex Bennée     saved->mr_offset = mr_offset;
14022f3a57eeSAlex Bennée #endif
14032f3a57eeSAlex Bennée }
14042f3a57eeSAlex Bennée 
140525d3ec58SRichard Henderson static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
1406f1be3696SRichard Henderson                       int mmu_idx, uint64_t val, target_ulong addr,
1407be5c4787STony Nguyen                       uintptr_t retaddr, MemOp op)
1408d9bb58e5SYang Zhong {
140929a0af61SRichard Henderson     CPUState *cpu = env_cpu(env);
14102d54f194SPeter Maydell     hwaddr mr_offset;
14112d54f194SPeter Maydell     MemoryRegionSection *section;
14122d54f194SPeter Maydell     MemoryRegion *mr;
1413d9bb58e5SYang Zhong     bool locked = false;
141404e3aabdSPeter Maydell     MemTxResult r;
1415d9bb58e5SYang Zhong 
141625d3ec58SRichard Henderson     section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
14172d54f194SPeter Maydell     mr = section->mr;
141825d3ec58SRichard Henderson     mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
141908565552SRichard Henderson     if (!cpu->can_do_io) {
1420d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1421d9bb58e5SYang Zhong     }
1422d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
1423d9bb58e5SYang Zhong 
14242f3a57eeSAlex Bennée     /*
14252f3a57eeSAlex Bennée      * The memory_region_dispatch may trigger a flush/resize
14262f3a57eeSAlex Bennée      * so for plugins we save the iotlb_data just in case.
14272f3a57eeSAlex Bennée      */
142837523ff7SRichard Henderson     save_iotlb_data(cpu, section, mr_offset);
14292f3a57eeSAlex Bennée 
143041744954SPhilippe Mathieu-Daudé     if (!qemu_mutex_iothread_locked()) {
1431d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
1432d9bb58e5SYang Zhong         locked = true;
1433d9bb58e5SYang Zhong     }
143425d3ec58SRichard Henderson     r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
143504e3aabdSPeter Maydell     if (r != MEMTX_OK) {
14362d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
14372d54f194SPeter Maydell             section->offset_within_address_space -
14382d54f194SPeter Maydell             section->offset_within_region;
14392d54f194SPeter Maydell 
1440be5c4787STony Nguyen         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
144125d3ec58SRichard Henderson                                MMU_DATA_STORE, mmu_idx, full->attrs, r,
1442be5c4787STony Nguyen                                retaddr);
144304e3aabdSPeter Maydell     }
1444d9bb58e5SYang Zhong     if (locked) {
1445d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
1446d9bb58e5SYang Zhong     }
1447d9bb58e5SYang Zhong }
1448d9bb58e5SYang Zhong 
14494811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
14504811e909SRichard Henderson {
14514811e909SRichard Henderson #if TCG_OVERSIZED_GUEST
14524811e909SRichard Henderson     return *(target_ulong *)((uintptr_t)entry + ofs);
14534811e909SRichard Henderson #else
1454d73415a3SStefan Hajnoczi     /* ofs might correspond to .addr_write, so use qatomic_read */
1455d73415a3SStefan Hajnoczi     return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
14564811e909SRichard Henderson #endif
14574811e909SRichard Henderson }
14584811e909SRichard Henderson 
1459d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
1460d9bb58e5SYang Zhong    back to the main tlb.  */
1461d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1462d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
1463d9bb58e5SYang Zhong {
1464d9bb58e5SYang Zhong     size_t vidx;
146571aec354SEmilio G. Cota 
146629a0af61SRichard Henderson     assert_cpu_is_self(env_cpu(env));
1467d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1468a40ec84eSRichard Henderson         CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1469a40ec84eSRichard Henderson         target_ulong cmp;
1470a40ec84eSRichard Henderson 
1471d73415a3SStefan Hajnoczi         /* elt_ofs might correspond to .addr_write, so use qatomic_read */
1472a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST
1473a40ec84eSRichard Henderson         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1474a40ec84eSRichard Henderson #else
1475d73415a3SStefan Hajnoczi         cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1476a40ec84eSRichard Henderson #endif
1477d9bb58e5SYang Zhong 
1478d9bb58e5SYang Zhong         if (cmp == page) {
1479d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
1480a40ec84eSRichard Henderson             CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1481d9bb58e5SYang Zhong 
1482a40ec84eSRichard Henderson             qemu_spin_lock(&env_tlb(env)->c.lock);
148371aec354SEmilio G. Cota             copy_tlb_helper_locked(&tmptlb, tlb);
148471aec354SEmilio G. Cota             copy_tlb_helper_locked(tlb, vtlb);
148571aec354SEmilio G. Cota             copy_tlb_helper_locked(vtlb, &tmptlb);
1486a40ec84eSRichard Henderson             qemu_spin_unlock(&env_tlb(env)->c.lock);
1487d9bb58e5SYang Zhong 
148825d3ec58SRichard Henderson             CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
148925d3ec58SRichard Henderson             CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
149025d3ec58SRichard Henderson             CPUTLBEntryFull tmpf;
149125d3ec58SRichard Henderson             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1492d9bb58e5SYang Zhong             return true;
1493d9bb58e5SYang Zhong         }
1494d9bb58e5SYang Zhong     }
1495d9bb58e5SYang Zhong     return false;
1496d9bb58e5SYang Zhong }
1497d9bb58e5SYang Zhong 
1498d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
1499d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
1500d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1501d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
1502d9bb58e5SYang Zhong 
1503707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
150425d3ec58SRichard Henderson                            CPUTLBEntryFull *full, uintptr_t retaddr)
1505707526adSRichard Henderson {
150625d3ec58SRichard Henderson     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1507707526adSRichard Henderson 
1508707526adSRichard Henderson     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1509707526adSRichard Henderson 
1510707526adSRichard Henderson     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1511707526adSRichard Henderson         struct page_collection *pages
1512707526adSRichard Henderson             = page_collection_lock(ram_addr, ram_addr + size);
15135a7c27bbSRichard Henderson         tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1514707526adSRichard Henderson         page_collection_unlock(pages);
1515707526adSRichard Henderson     }
1516707526adSRichard Henderson 
1517707526adSRichard Henderson     /*
1518707526adSRichard Henderson      * Set both VGA and migration bits for simplicity and to remove
1519707526adSRichard Henderson      * the notdirty callback faster.
1520707526adSRichard Henderson      */
1521707526adSRichard Henderson     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1522707526adSRichard Henderson 
1523707526adSRichard Henderson     /* We remove the notdirty callback only if the code has been flushed. */
1524707526adSRichard Henderson     if (!cpu_physical_memory_is_clean(ram_addr)) {
1525707526adSRichard Henderson         trace_memory_notdirty_set_dirty(mem_vaddr);
1526707526adSRichard Henderson         tlb_set_dirty(cpu, mem_vaddr);
1527707526adSRichard Henderson     }
1528707526adSRichard Henderson }
1529707526adSRichard Henderson 
1530069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr,
1531069cfe77SRichard Henderson                                  int fault_size, MMUAccessType access_type,
1532069cfe77SRichard Henderson                                  int mmu_idx, bool nonfault,
1533af803a4fSRichard Henderson                                  void **phost, CPUTLBEntryFull **pfull,
1534af803a4fSRichard Henderson                                  uintptr_t retaddr)
1535d9bb58e5SYang Zhong {
1536383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
1537383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1538069cfe77SRichard Henderson     target_ulong tlb_addr, page_addr;
1539c25c283dSDavid Hildenbrand     size_t elt_ofs;
1540069cfe77SRichard Henderson     int flags;
1541ca86cf32SDavid Hildenbrand 
1542c25c283dSDavid Hildenbrand     switch (access_type) {
1543c25c283dSDavid Hildenbrand     case MMU_DATA_LOAD:
1544c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_read);
1545c25c283dSDavid Hildenbrand         break;
1546c25c283dSDavid Hildenbrand     case MMU_DATA_STORE:
1547c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_write);
1548c25c283dSDavid Hildenbrand         break;
1549c25c283dSDavid Hildenbrand     case MMU_INST_FETCH:
1550c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_code);
1551c25c283dSDavid Hildenbrand         break;
1552c25c283dSDavid Hildenbrand     default:
1553c25c283dSDavid Hildenbrand         g_assert_not_reached();
1554c25c283dSDavid Hildenbrand     }
1555c25c283dSDavid Hildenbrand     tlb_addr = tlb_read_ofs(entry, elt_ofs);
1556c25c283dSDavid Hildenbrand 
1557c3c8bf57SRichard Henderson     flags = TLB_FLAGS_MASK;
1558069cfe77SRichard Henderson     page_addr = addr & TARGET_PAGE_MASK;
1559069cfe77SRichard Henderson     if (!tlb_hit_page(tlb_addr, page_addr)) {
1560069cfe77SRichard Henderson         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
1561069cfe77SRichard Henderson             CPUState *cs = env_cpu(env);
1562069cfe77SRichard Henderson 
15638810ee2aSAlex Bennée             if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1564069cfe77SRichard Henderson                                            mmu_idx, nonfault, retaddr)) {
1565069cfe77SRichard Henderson                 /* Non-faulting page table read failed.  */
1566069cfe77SRichard Henderson                 *phost = NULL;
1567af803a4fSRichard Henderson                 *pfull = NULL;
1568069cfe77SRichard Henderson                 return TLB_INVALID_MASK;
1569069cfe77SRichard Henderson             }
1570069cfe77SRichard Henderson 
157103a98189SDavid Hildenbrand             /* TLB resize via tlb_fill may have moved the entry.  */
1572af803a4fSRichard Henderson             index = tlb_index(env, mmu_idx, addr);
157303a98189SDavid Hildenbrand             entry = tlb_entry(env, mmu_idx, addr);
1574c3c8bf57SRichard Henderson 
1575c3c8bf57SRichard Henderson             /*
1576c3c8bf57SRichard Henderson              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1577c3c8bf57SRichard Henderson              * to force the next access through tlb_fill.  We've just
1578c3c8bf57SRichard Henderson              * called tlb_fill, so we know that this entry *is* valid.
1579c3c8bf57SRichard Henderson              */
1580c3c8bf57SRichard Henderson             flags &= ~TLB_INVALID_MASK;
1581d9bb58e5SYang Zhong         }
1582c25c283dSDavid Hildenbrand         tlb_addr = tlb_read_ofs(entry, elt_ofs);
158303a98189SDavid Hildenbrand     }
1584c3c8bf57SRichard Henderson     flags &= tlb_addr;
158503a98189SDavid Hildenbrand 
1586af803a4fSRichard Henderson     *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1587af803a4fSRichard Henderson 
1588069cfe77SRichard Henderson     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
1589069cfe77SRichard Henderson     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1590069cfe77SRichard Henderson         *phost = NULL;
1591069cfe77SRichard Henderson         return TLB_MMIO;
1592fef39ccdSDavid Hildenbrand     }
1593fef39ccdSDavid Hildenbrand 
1594069cfe77SRichard Henderson     /* Everything else is RAM. */
1595069cfe77SRichard Henderson     *phost = (void *)((uintptr_t)addr + entry->addend);
1596069cfe77SRichard Henderson     return flags;
1597069cfe77SRichard Henderson }
1598069cfe77SRichard Henderson 
1599af803a4fSRichard Henderson int probe_access_full(CPUArchState *env, target_ulong addr,
1600069cfe77SRichard Henderson                       MMUAccessType access_type, int mmu_idx,
1601af803a4fSRichard Henderson                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1602af803a4fSRichard Henderson                       uintptr_t retaddr)
1603069cfe77SRichard Henderson {
1604af803a4fSRichard Henderson     int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1605af803a4fSRichard Henderson                                       nonfault, phost, pfull, retaddr);
1606069cfe77SRichard Henderson 
1607069cfe77SRichard Henderson     /* Handle clean RAM pages.  */
1608069cfe77SRichard Henderson     if (unlikely(flags & TLB_NOTDIRTY)) {
1609af803a4fSRichard Henderson         notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
1610069cfe77SRichard Henderson         flags &= ~TLB_NOTDIRTY;
1611069cfe77SRichard Henderson     }
1612069cfe77SRichard Henderson 
1613069cfe77SRichard Henderson     return flags;
1614069cfe77SRichard Henderson }
1615069cfe77SRichard Henderson 
1616af803a4fSRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr,
1617af803a4fSRichard Henderson                        MMUAccessType access_type, int mmu_idx,
1618af803a4fSRichard Henderson                        bool nonfault, void **phost, uintptr_t retaddr)
1619af803a4fSRichard Henderson {
1620af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1621af803a4fSRichard Henderson 
1622af803a4fSRichard Henderson     return probe_access_full(env, addr, access_type, mmu_idx,
1623af803a4fSRichard Henderson                              nonfault, phost, &full, retaddr);
1624af803a4fSRichard Henderson }
1625af803a4fSRichard Henderson 
1626069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size,
1627069cfe77SRichard Henderson                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1628069cfe77SRichard Henderson {
1629af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1630069cfe77SRichard Henderson     void *host;
1631069cfe77SRichard Henderson     int flags;
1632069cfe77SRichard Henderson 
1633069cfe77SRichard Henderson     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1634069cfe77SRichard Henderson 
1635069cfe77SRichard Henderson     flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1636af803a4fSRichard Henderson                                   false, &host, &full, retaddr);
1637069cfe77SRichard Henderson 
1638069cfe77SRichard Henderson     /* Per the interface, size == 0 merely faults the access. */
1639069cfe77SRichard Henderson     if (size == 0) {
164073bc0bd4SRichard Henderson         return NULL;
164173bc0bd4SRichard Henderson     }
164273bc0bd4SRichard Henderson 
1643069cfe77SRichard Henderson     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
164403a98189SDavid Hildenbrand         /* Handle watchpoints.  */
1645069cfe77SRichard Henderson         if (flags & TLB_WATCHPOINT) {
1646069cfe77SRichard Henderson             int wp_access = (access_type == MMU_DATA_STORE
1647069cfe77SRichard Henderson                              ? BP_MEM_WRITE : BP_MEM_READ);
164803a98189SDavid Hildenbrand             cpu_check_watchpoint(env_cpu(env), addr, size,
164925d3ec58SRichard Henderson                                  full->attrs, wp_access, retaddr);
1650d9bb58e5SYang Zhong         }
1651fef39ccdSDavid Hildenbrand 
165273bc0bd4SRichard Henderson         /* Handle clean RAM pages.  */
1653069cfe77SRichard Henderson         if (flags & TLB_NOTDIRTY) {
165425d3ec58SRichard Henderson             notdirty_write(env_cpu(env), addr, 1, full, retaddr);
165573bc0bd4SRichard Henderson         }
1656fef39ccdSDavid Hildenbrand     }
1657fef39ccdSDavid Hildenbrand 
1658069cfe77SRichard Henderson     return host;
1659d9bb58e5SYang Zhong }
1660d9bb58e5SYang Zhong 
16614811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
16624811e909SRichard Henderson                         MMUAccessType access_type, int mmu_idx)
16634811e909SRichard Henderson {
1664af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1665069cfe77SRichard Henderson     void *host;
1666069cfe77SRichard Henderson     int flags;
16674811e909SRichard Henderson 
1668069cfe77SRichard Henderson     flags = probe_access_internal(env, addr, 0, access_type,
1669af803a4fSRichard Henderson                                   mmu_idx, true, &host, &full, 0);
1670069cfe77SRichard Henderson 
1671069cfe77SRichard Henderson     /* No combination of flags are expected by the caller. */
1672069cfe77SRichard Henderson     return flags ? NULL : host;
16734811e909SRichard Henderson }
16744811e909SRichard Henderson 
16757e0d9973SRichard Henderson /*
16767e0d9973SRichard Henderson  * Return a ram_addr_t for the virtual address for execution.
16777e0d9973SRichard Henderson  *
16787e0d9973SRichard Henderson  * Return -1 if we can't translate and execute from an entire page
16797e0d9973SRichard Henderson  * of RAM.  This will force us to execute by loading and translating
16807e0d9973SRichard Henderson  * one insn at a time, without caching.
16817e0d9973SRichard Henderson  *
16827e0d9973SRichard Henderson  * NOTE: This function will trigger an exception if the page is
16837e0d9973SRichard Henderson  * not executable.
16847e0d9973SRichard Henderson  */
16857e0d9973SRichard Henderson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
16867e0d9973SRichard Henderson                                         void **hostp)
16877e0d9973SRichard Henderson {
1688af803a4fSRichard Henderson     CPUTLBEntryFull *full;
16897e0d9973SRichard Henderson     void *p;
16907e0d9973SRichard Henderson 
16917e0d9973SRichard Henderson     (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
1692af803a4fSRichard Henderson                                 cpu_mmu_index(env, true), false, &p, &full, 0);
16937e0d9973SRichard Henderson     if (p == NULL) {
16947e0d9973SRichard Henderson         return -1;
16957e0d9973SRichard Henderson     }
16967e0d9973SRichard Henderson     if (hostp) {
16977e0d9973SRichard Henderson         *hostp = p;
16987e0d9973SRichard Henderson     }
16997e0d9973SRichard Henderson     return qemu_ram_addr_from_host_nofail(p);
17007e0d9973SRichard Henderson }
17017e0d9973SRichard Henderson 
1702235537faSAlex Bennée #ifdef CONFIG_PLUGIN
1703235537faSAlex Bennée /*
1704235537faSAlex Bennée  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1705235537faSAlex Bennée  * This should be a hot path as we will have just looked this path up
1706235537faSAlex Bennée  * in the softmmu lookup code (or helper). We don't handle re-fills or
1707235537faSAlex Bennée  * checking the victim table. This is purely informational.
1708235537faSAlex Bennée  *
17092f3a57eeSAlex Bennée  * This almost never fails as the memory access being instrumented
17102f3a57eeSAlex Bennée  * should have just filled the TLB. The one corner case is io_writex
17112f3a57eeSAlex Bennée  * which can cause TLB flushes and potential resizing of the TLBs
1712570ef309SAlex Bennée  * losing the information we need. In those cases we need to recover
171325d3ec58SRichard Henderson  * data from a copy of the CPUTLBEntryFull. As long as this always occurs
1714570ef309SAlex Bennée  * from the same thread (which a mem callback will be) this is safe.
1715235537faSAlex Bennée  */
1716235537faSAlex Bennée 
1717235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1718235537faSAlex Bennée                        bool is_store, struct qemu_plugin_hwaddr *data)
1719235537faSAlex Bennée {
1720235537faSAlex Bennée     CPUArchState *env = cpu->env_ptr;
1721235537faSAlex Bennée     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1722235537faSAlex Bennée     uintptr_t index = tlb_index(env, mmu_idx, addr);
1723235537faSAlex Bennée     target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1724235537faSAlex Bennée 
1725235537faSAlex Bennée     if (likely(tlb_hit(tlb_addr, addr))) {
1726235537faSAlex Bennée         /* We must have an iotlb entry for MMIO */
1727235537faSAlex Bennée         if (tlb_addr & TLB_MMIO) {
172825d3ec58SRichard Henderson             CPUTLBEntryFull *full;
172925d3ec58SRichard Henderson             full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1730235537faSAlex Bennée             data->is_io = true;
173125d3ec58SRichard Henderson             data->v.io.section =
173225d3ec58SRichard Henderson                 iotlb_to_section(cpu, full->xlat_section, full->attrs);
173325d3ec58SRichard Henderson             data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1734235537faSAlex Bennée         } else {
1735235537faSAlex Bennée             data->is_io = false;
17362d932039SAlex Bennée             data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1737235537faSAlex Bennée         }
1738235537faSAlex Bennée         return true;
17392f3a57eeSAlex Bennée     } else {
17402f3a57eeSAlex Bennée         SavedIOTLB *saved = &cpu->saved_iotlb;
17412f3a57eeSAlex Bennée         data->is_io = true;
17422f3a57eeSAlex Bennée         data->v.io.section = saved->section;
17432f3a57eeSAlex Bennée         data->v.io.offset = saved->mr_offset;
17442f3a57eeSAlex Bennée         return true;
1745235537faSAlex Bennée     }
1746235537faSAlex Bennée }
1747235537faSAlex Bennée 
1748235537faSAlex Bennée #endif
1749235537faSAlex Bennée 
175008dff435SRichard Henderson /*
175108dff435SRichard Henderson  * Probe for an atomic operation.  Do not allow unaligned operations,
175208dff435SRichard Henderson  * or io operations to proceed.  Return the host address.
175308dff435SRichard Henderson  *
175408dff435SRichard Henderson  * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
175508dff435SRichard Henderson  */
1756d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
17579002ffcbSRichard Henderson                                MemOpIdx oi, int size, int prot,
175808dff435SRichard Henderson                                uintptr_t retaddr)
1759d9bb58e5SYang Zhong {
1760b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
176114776ab5STony Nguyen     MemOp mop = get_memop(oi);
1762d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
176308dff435SRichard Henderson     uintptr_t index;
176408dff435SRichard Henderson     CPUTLBEntry *tlbe;
176508dff435SRichard Henderson     target_ulong tlb_addr;
176634d49937SPeter Maydell     void *hostaddr;
1767d9bb58e5SYang Zhong 
1768b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1769b826044fSRichard Henderson 
1770d9bb58e5SYang Zhong     /* Adjust the given return address.  */
1771d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
1772d9bb58e5SYang Zhong 
1773d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1774d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1775d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
177629a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1777d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1778d9bb58e5SYang Zhong     }
1779d9bb58e5SYang Zhong 
1780d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
178108dff435SRichard Henderson     if (unlikely(addr & (size - 1))) {
1782d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1783d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1784d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1785d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1786d9bb58e5SYang Zhong         goto stop_the_world;
1787d9bb58e5SYang Zhong     }
1788d9bb58e5SYang Zhong 
178908dff435SRichard Henderson     index = tlb_index(env, mmu_idx, addr);
179008dff435SRichard Henderson     tlbe = tlb_entry(env, mmu_idx, addr);
179108dff435SRichard Henderson 
1792d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
179308dff435SRichard Henderson     if (prot & PAGE_WRITE) {
179408dff435SRichard Henderson         tlb_addr = tlb_addr_write(tlbe);
1795334692bcSPeter Maydell         if (!tlb_hit(tlb_addr, addr)) {
1796d9bb58e5SYang Zhong             if (!VICTIM_TLB_HIT(addr_write, addr)) {
179708dff435SRichard Henderson                 tlb_fill(env_cpu(env), addr, size,
179808dff435SRichard Henderson                          MMU_DATA_STORE, mmu_idx, retaddr);
17996d967cb8SEmilio G. Cota                 index = tlb_index(env, mmu_idx, addr);
18006d967cb8SEmilio G. Cota                 tlbe = tlb_entry(env, mmu_idx, addr);
1801d9bb58e5SYang Zhong             }
1802403f290cSEmilio G. Cota             tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1803d9bb58e5SYang Zhong         }
1804d9bb58e5SYang Zhong 
180508dff435SRichard Henderson         /* Let the guest notice RMW on a write-only page.  */
180608dff435SRichard Henderson         if ((prot & PAGE_READ) &&
180708dff435SRichard Henderson             unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
180808dff435SRichard Henderson             tlb_fill(env_cpu(env), addr, size,
180908dff435SRichard Henderson                      MMU_DATA_LOAD, mmu_idx, retaddr);
181008dff435SRichard Henderson             /*
181108dff435SRichard Henderson              * Since we don't support reads and writes to different addresses,
181208dff435SRichard Henderson              * and we do have the proper page loaded for write, this shouldn't
181308dff435SRichard Henderson              * ever return.  But just in case, handle via stop-the-world.
181408dff435SRichard Henderson              */
181508dff435SRichard Henderson             goto stop_the_world;
181608dff435SRichard Henderson         }
181708dff435SRichard Henderson     } else /* if (prot & PAGE_READ) */ {
181808dff435SRichard Henderson         tlb_addr = tlbe->addr_read;
181908dff435SRichard Henderson         if (!tlb_hit(tlb_addr, addr)) {
182008dff435SRichard Henderson             if (!VICTIM_TLB_HIT(addr_write, addr)) {
182108dff435SRichard Henderson                 tlb_fill(env_cpu(env), addr, size,
182208dff435SRichard Henderson                          MMU_DATA_LOAD, mmu_idx, retaddr);
182308dff435SRichard Henderson                 index = tlb_index(env, mmu_idx, addr);
182408dff435SRichard Henderson                 tlbe = tlb_entry(env, mmu_idx, addr);
182508dff435SRichard Henderson             }
182608dff435SRichard Henderson             tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
182708dff435SRichard Henderson         }
182808dff435SRichard Henderson     }
182908dff435SRichard Henderson 
183055df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
183130d7e098SRichard Henderson     if (unlikely(tlb_addr & TLB_MMIO)) {
1832d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1833d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1834d9bb58e5SYang Zhong         goto stop_the_world;
1835d9bb58e5SYang Zhong     }
1836d9bb58e5SYang Zhong 
183734d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
183834d49937SPeter Maydell 
183934d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
184008dff435SRichard Henderson         notdirty_write(env_cpu(env), addr, size,
184125d3ec58SRichard Henderson                        &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
184234d49937SPeter Maydell     }
184334d49937SPeter Maydell 
184434d49937SPeter Maydell     return hostaddr;
1845d9bb58e5SYang Zhong 
1846d9bb58e5SYang Zhong  stop_the_world:
184729a0af61SRichard Henderson     cpu_loop_exit_atomic(env_cpu(env), retaddr);
1848d9bb58e5SYang Zhong }
1849d9bb58e5SYang Zhong 
1850eed56642SAlex Bennée /*
1851f83bcecbSRichard Henderson  * Verify that we have passed the correct MemOp to the correct function.
1852f83bcecbSRichard Henderson  *
1853f83bcecbSRichard Henderson  * In the case of the helper_*_mmu functions, we will have done this by
1854f83bcecbSRichard Henderson  * using the MemOp to look up the helper during code generation.
1855f83bcecbSRichard Henderson  *
1856f83bcecbSRichard Henderson  * In the case of the cpu_*_mmu functions, this is up to the caller.
1857f83bcecbSRichard Henderson  * We could present one function to target code, and dispatch based on
1858f83bcecbSRichard Henderson  * the MemOp, but so far we have worked hard to avoid an indirect function
1859f83bcecbSRichard Henderson  * call along the memory path.
1860f83bcecbSRichard Henderson  */
1861f83bcecbSRichard Henderson static void validate_memop(MemOpIdx oi, MemOp expected)
1862f83bcecbSRichard Henderson {
1863f83bcecbSRichard Henderson #ifdef CONFIG_DEBUG_TCG
1864f83bcecbSRichard Henderson     MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1865f83bcecbSRichard Henderson     assert(have == expected);
1866f83bcecbSRichard Henderson #endif
1867f83bcecbSRichard Henderson }
1868f83bcecbSRichard Henderson 
1869f83bcecbSRichard Henderson /*
1870eed56642SAlex Bennée  * Load Helpers
1871eed56642SAlex Bennée  *
1872eed56642SAlex Bennée  * We support two different access types. SOFTMMU_CODE_ACCESS is
1873eed56642SAlex Bennée  * specifically for reading instructions from system memory. It is
1874eed56642SAlex Bennée  * called by the translation loop and in some helpers where the code
1875eed56642SAlex Bennée  * is disassembled. It shouldn't be called directly by guest code.
1876eed56642SAlex Bennée  */
1877d9bb58e5SYang Zhong 
18782dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
18799002ffcbSRichard Henderson                                 MemOpIdx oi, uintptr_t retaddr);
18802dd92606SRichard Henderson 
1881c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE
188280d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op)
188380d9d1c6SRichard Henderson {
188480d9d1c6SRichard Henderson     switch (op) {
188580d9d1c6SRichard Henderson     case MO_UB:
188680d9d1c6SRichard Henderson         return ldub_p(haddr);
188780d9d1c6SRichard Henderson     case MO_BEUW:
188880d9d1c6SRichard Henderson         return lduw_be_p(haddr);
188980d9d1c6SRichard Henderson     case MO_LEUW:
189080d9d1c6SRichard Henderson         return lduw_le_p(haddr);
189180d9d1c6SRichard Henderson     case MO_BEUL:
189280d9d1c6SRichard Henderson         return (uint32_t)ldl_be_p(haddr);
189380d9d1c6SRichard Henderson     case MO_LEUL:
189480d9d1c6SRichard Henderson         return (uint32_t)ldl_le_p(haddr);
1895fc313c64SFrédéric Pétrot     case MO_BEUQ:
189680d9d1c6SRichard Henderson         return ldq_be_p(haddr);
1897fc313c64SFrédéric Pétrot     case MO_LEUQ:
189880d9d1c6SRichard Henderson         return ldq_le_p(haddr);
189980d9d1c6SRichard Henderson     default:
190080d9d1c6SRichard Henderson         qemu_build_not_reached();
190180d9d1c6SRichard Henderson     }
190280d9d1c6SRichard Henderson }
190380d9d1c6SRichard Henderson 
190480d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE
19059002ffcbSRichard Henderson load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
1906be5c4787STony Nguyen             uintptr_t retaddr, MemOp op, bool code_read,
19072dd92606SRichard Henderson             FullLoadHelper *full_load)
1908eed56642SAlex Bennée {
1909eed56642SAlex Bennée     const size_t tlb_off = code_read ?
1910eed56642SAlex Bennée         offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1911f1be3696SRichard Henderson     const MMUAccessType access_type =
1912f1be3696SRichard Henderson         code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1913b826044fSRichard Henderson     const unsigned a_bits = get_alignment_bits(get_memop(oi));
1914b826044fSRichard Henderson     const size_t size = memop_size(op);
1915b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
1916b826044fSRichard Henderson     uintptr_t index;
1917b826044fSRichard Henderson     CPUTLBEntry *entry;
1918b826044fSRichard Henderson     target_ulong tlb_addr;
1919eed56642SAlex Bennée     void *haddr;
1920eed56642SAlex Bennée     uint64_t res;
1921b826044fSRichard Henderson 
1922b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1923d9bb58e5SYang Zhong 
1924eed56642SAlex Bennée     /* Handle CPU specific unaligned behaviour */
1925eed56642SAlex Bennée     if (addr & ((1 << a_bits) - 1)) {
192629a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, access_type,
1927eed56642SAlex Bennée                              mmu_idx, retaddr);
1928eed56642SAlex Bennée     }
1929eed56642SAlex Bennée 
1930b826044fSRichard Henderson     index = tlb_index(env, mmu_idx, addr);
1931b826044fSRichard Henderson     entry = tlb_entry(env, mmu_idx, addr);
1932b826044fSRichard Henderson     tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1933b826044fSRichard Henderson 
1934eed56642SAlex Bennée     /* If the TLB entry is for a different page, reload and try again.  */
1935eed56642SAlex Bennée     if (!tlb_hit(tlb_addr, addr)) {
1936eed56642SAlex Bennée         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1937eed56642SAlex Bennée                             addr & TARGET_PAGE_MASK)) {
193829a0af61SRichard Henderson             tlb_fill(env_cpu(env), addr, size,
1939f1be3696SRichard Henderson                      access_type, mmu_idx, retaddr);
1940eed56642SAlex Bennée             index = tlb_index(env, mmu_idx, addr);
1941eed56642SAlex Bennée             entry = tlb_entry(env, mmu_idx, addr);
1942eed56642SAlex Bennée         }
1943eed56642SAlex Bennée         tlb_addr = code_read ? entry->addr_code : entry->addr_read;
194430d7e098SRichard Henderson         tlb_addr &= ~TLB_INVALID_MASK;
1945eed56642SAlex Bennée     }
1946eed56642SAlex Bennée 
194750b107c5SRichard Henderson     /* Handle anything that isn't just a straight memory access.  */
1948eed56642SAlex Bennée     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
194925d3ec58SRichard Henderson         CPUTLBEntryFull *full;
19505b87b3e6SRichard Henderson         bool need_swap;
195150b107c5SRichard Henderson 
195250b107c5SRichard Henderson         /* For anything that is unaligned, recurse through full_load.  */
1953eed56642SAlex Bennée         if ((addr & (size - 1)) != 0) {
1954eed56642SAlex Bennée             goto do_unaligned_access;
1955eed56642SAlex Bennée         }
195650b107c5SRichard Henderson 
195725d3ec58SRichard Henderson         full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
195850b107c5SRichard Henderson 
195950b107c5SRichard Henderson         /* Handle watchpoints.  */
196050b107c5SRichard Henderson         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
196150b107c5SRichard Henderson             /* On watchpoint hit, this will longjmp out.  */
196250b107c5SRichard Henderson             cpu_check_watchpoint(env_cpu(env), addr, size,
196325d3ec58SRichard Henderson                                  full->attrs, BP_MEM_READ, retaddr);
19645b87b3e6SRichard Henderson         }
196550b107c5SRichard Henderson 
19665b87b3e6SRichard Henderson         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
196750b107c5SRichard Henderson 
196850b107c5SRichard Henderson         /* Handle I/O access.  */
19695b87b3e6SRichard Henderson         if (likely(tlb_addr & TLB_MMIO)) {
197025d3ec58SRichard Henderson             return io_readx(env, full, mmu_idx, addr, retaddr,
19715b87b3e6SRichard Henderson                             access_type, op ^ (need_swap * MO_BSWAP));
19725b87b3e6SRichard Henderson         }
19735b87b3e6SRichard Henderson 
19745b87b3e6SRichard Henderson         haddr = (void *)((uintptr_t)addr + entry->addend);
19755b87b3e6SRichard Henderson 
19765b87b3e6SRichard Henderson         /*
19775b87b3e6SRichard Henderson          * Keep these two load_memop separate to ensure that the compiler
19785b87b3e6SRichard Henderson          * is able to fold the entire function to a single instruction.
19795b87b3e6SRichard Henderson          * There is a build-time assert inside to remind you of this.  ;-)
19805b87b3e6SRichard Henderson          */
19815b87b3e6SRichard Henderson         if (unlikely(need_swap)) {
19825b87b3e6SRichard Henderson             return load_memop(haddr, op ^ MO_BSWAP);
19835b87b3e6SRichard Henderson         }
19845b87b3e6SRichard Henderson         return load_memop(haddr, op);
1985eed56642SAlex Bennée     }
1986eed56642SAlex Bennée 
1987eed56642SAlex Bennée     /* Handle slow unaligned access (it spans two pages or IO).  */
1988eed56642SAlex Bennée     if (size > 1
1989eed56642SAlex Bennée         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1990eed56642SAlex Bennée                     >= TARGET_PAGE_SIZE)) {
1991eed56642SAlex Bennée         target_ulong addr1, addr2;
19928c79b288SAlex Bennée         uint64_t r1, r2;
1993eed56642SAlex Bennée         unsigned shift;
1994eed56642SAlex Bennée     do_unaligned_access:
1995ab7a2009SAlex Bennée         addr1 = addr & ~((target_ulong)size - 1);
1996eed56642SAlex Bennée         addr2 = addr1 + size;
19972dd92606SRichard Henderson         r1 = full_load(env, addr1, oi, retaddr);
19982dd92606SRichard Henderson         r2 = full_load(env, addr2, oi, retaddr);
1999eed56642SAlex Bennée         shift = (addr & (size - 1)) * 8;
2000eed56642SAlex Bennée 
2001be5c4787STony Nguyen         if (memop_big_endian(op)) {
2002eed56642SAlex Bennée             /* Big-endian combine.  */
2003eed56642SAlex Bennée             res = (r1 << shift) | (r2 >> ((size * 8) - shift));
2004eed56642SAlex Bennée         } else {
2005eed56642SAlex Bennée             /* Little-endian combine.  */
2006eed56642SAlex Bennée             res = (r1 >> shift) | (r2 << ((size * 8) - shift));
2007eed56642SAlex Bennée         }
2008eed56642SAlex Bennée         return res & MAKE_64BIT_MASK(0, size * 8);
2009eed56642SAlex Bennée     }
2010eed56642SAlex Bennée 
2011eed56642SAlex Bennée     haddr = (void *)((uintptr_t)addr + entry->addend);
201280d9d1c6SRichard Henderson     return load_memop(haddr, op);
2013eed56642SAlex Bennée }
2014eed56642SAlex Bennée 
2015eed56642SAlex Bennée /*
2016eed56642SAlex Bennée  * For the benefit of TCG generated code, we want to avoid the
2017eed56642SAlex Bennée  * complication of ABI-specific return type promotion and always
2018eed56642SAlex Bennée  * return a value extended to the register size of the host. This is
2019eed56642SAlex Bennée  * tcg_target_long, except in the case of a 32-bit host and 64-bit
2020eed56642SAlex Bennée  * data, and for that we always have uint64_t.
2021eed56642SAlex Bennée  *
2022eed56642SAlex Bennée  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2023eed56642SAlex Bennée  */
2024eed56642SAlex Bennée 
20252dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
20269002ffcbSRichard Henderson                               MemOpIdx oi, uintptr_t retaddr)
20272dd92606SRichard Henderson {
2028f83bcecbSRichard Henderson     validate_memop(oi, MO_UB);
2029be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
20302dd92606SRichard Henderson }
20312dd92606SRichard Henderson 
2032fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
20339002ffcbSRichard Henderson                                      MemOpIdx oi, uintptr_t retaddr)
2034eed56642SAlex Bennée {
20352dd92606SRichard Henderson     return full_ldub_mmu(env, addr, oi, retaddr);
20362dd92606SRichard Henderson }
20372dd92606SRichard Henderson 
20382dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
20399002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20402dd92606SRichard Henderson {
2041f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUW);
2042be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
20432dd92606SRichard Henderson                        full_le_lduw_mmu);
2044eed56642SAlex Bennée }
2045eed56642SAlex Bennée 
2046fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
20479002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2048eed56642SAlex Bennée {
20492dd92606SRichard Henderson     return full_le_lduw_mmu(env, addr, oi, retaddr);
20502dd92606SRichard Henderson }
20512dd92606SRichard Henderson 
20522dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
20539002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20542dd92606SRichard Henderson {
2055f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUW);
2056be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
20572dd92606SRichard Henderson                        full_be_lduw_mmu);
2058eed56642SAlex Bennée }
2059eed56642SAlex Bennée 
2060fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
20619002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2062eed56642SAlex Bennée {
20632dd92606SRichard Henderson     return full_be_lduw_mmu(env, addr, oi, retaddr);
20642dd92606SRichard Henderson }
20652dd92606SRichard Henderson 
20662dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
20679002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20682dd92606SRichard Henderson {
2069f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUL);
2070be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
20712dd92606SRichard Henderson                        full_le_ldul_mmu);
2072eed56642SAlex Bennée }
2073eed56642SAlex Bennée 
2074fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
20759002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2076eed56642SAlex Bennée {
20772dd92606SRichard Henderson     return full_le_ldul_mmu(env, addr, oi, retaddr);
20782dd92606SRichard Henderson }
20792dd92606SRichard Henderson 
20802dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
20819002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
20822dd92606SRichard Henderson {
2083f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUL);
2084be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
20852dd92606SRichard Henderson                        full_be_ldul_mmu);
2086eed56642SAlex Bennée }
2087eed56642SAlex Bennée 
2088fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
20899002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2090eed56642SAlex Bennée {
20912dd92606SRichard Henderson     return full_be_ldul_mmu(env, addr, oi, retaddr);
2092eed56642SAlex Bennée }
2093eed56642SAlex Bennée 
2094fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
20959002ffcbSRichard Henderson                            MemOpIdx oi, uintptr_t retaddr)
2096eed56642SAlex Bennée {
2097fc313c64SFrédéric Pétrot     validate_memop(oi, MO_LEUQ);
2098fc313c64SFrédéric Pétrot     return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
20992dd92606SRichard Henderson                        helper_le_ldq_mmu);
2100eed56642SAlex Bennée }
2101eed56642SAlex Bennée 
2102fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
21039002ffcbSRichard Henderson                            MemOpIdx oi, uintptr_t retaddr)
2104eed56642SAlex Bennée {
2105fc313c64SFrédéric Pétrot     validate_memop(oi, MO_BEUQ);
2106fc313c64SFrédéric Pétrot     return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
21072dd92606SRichard Henderson                        helper_be_ldq_mmu);
2108eed56642SAlex Bennée }
2109eed56642SAlex Bennée 
2110eed56642SAlex Bennée /*
2111eed56642SAlex Bennée  * Provide signed versions of the load routines as well.  We can of course
2112eed56642SAlex Bennée  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2113eed56642SAlex Bennée  */
2114eed56642SAlex Bennée 
2115eed56642SAlex Bennée 
2116eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
21179002ffcbSRichard Henderson                                      MemOpIdx oi, uintptr_t retaddr)
2118eed56642SAlex Bennée {
2119eed56642SAlex Bennée     return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2120eed56642SAlex Bennée }
2121eed56642SAlex Bennée 
2122eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
21239002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2124eed56642SAlex Bennée {
2125eed56642SAlex Bennée     return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2126eed56642SAlex Bennée }
2127eed56642SAlex Bennée 
2128eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
21299002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2130eed56642SAlex Bennée {
2131eed56642SAlex Bennée     return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2132eed56642SAlex Bennée }
2133eed56642SAlex Bennée 
2134eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
21359002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2136eed56642SAlex Bennée {
2137eed56642SAlex Bennée     return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2138eed56642SAlex Bennée }
2139eed56642SAlex Bennée 
2140eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
21419002ffcbSRichard Henderson                                     MemOpIdx oi, uintptr_t retaddr)
2142eed56642SAlex Bennée {
2143eed56642SAlex Bennée     return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2144eed56642SAlex Bennée }
2145eed56642SAlex Bennée 
2146eed56642SAlex Bennée /*
2147d03f1408SRichard Henderson  * Load helpers for cpu_ldst.h.
2148d03f1408SRichard Henderson  */
2149d03f1408SRichard Henderson 
2150d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
2151f83bcecbSRichard Henderson                                        MemOpIdx oi, uintptr_t retaddr,
2152f83bcecbSRichard Henderson                                        FullLoadHelper *full_load)
2153d03f1408SRichard Henderson {
2154d03f1408SRichard Henderson     uint64_t ret;
2155d03f1408SRichard Henderson 
2156d03f1408SRichard Henderson     ret = full_load(env, addr, oi, retaddr);
215737aff087SRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2158d03f1408SRichard Henderson     return ret;
2159d03f1408SRichard Henderson }
2160d03f1408SRichard Henderson 
2161f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2162d03f1408SRichard Henderson {
2163f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
2164d03f1408SRichard Henderson }
2165d03f1408SRichard Henderson 
2166f83bcecbSRichard Henderson uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
2167f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2168d03f1408SRichard Henderson {
2169f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
2170d03f1408SRichard Henderson }
2171d03f1408SRichard Henderson 
2172f83bcecbSRichard Henderson uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
2173f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2174d03f1408SRichard Henderson {
2175f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
2176d03f1408SRichard Henderson }
2177d03f1408SRichard Henderson 
2178f83bcecbSRichard Henderson uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
2179f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2180d03f1408SRichard Henderson {
218146697cb9SRichard Henderson     return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu);
2182d03f1408SRichard Henderson }
2183d03f1408SRichard Henderson 
2184f83bcecbSRichard Henderson uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
2185f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2186d03f1408SRichard Henderson {
2187f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
2188d03f1408SRichard Henderson }
2189d03f1408SRichard Henderson 
2190f83bcecbSRichard Henderson uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
2191f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2192d03f1408SRichard Henderson {
2193f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
2194b9e60257SRichard Henderson }
2195b9e60257SRichard Henderson 
2196f83bcecbSRichard Henderson uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
2197f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t ra)
2198b9e60257SRichard Henderson {
2199f83bcecbSRichard Henderson     return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
2200cfe04a4bSRichard Henderson }
2201cfe04a4bSRichard Henderson 
2202d03f1408SRichard Henderson /*
2203eed56642SAlex Bennée  * Store Helpers
2204eed56642SAlex Bennée  */
2205eed56642SAlex Bennée 
2206c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE
220780d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op)
220880d9d1c6SRichard Henderson {
220980d9d1c6SRichard Henderson     switch (op) {
221080d9d1c6SRichard Henderson     case MO_UB:
221180d9d1c6SRichard Henderson         stb_p(haddr, val);
221280d9d1c6SRichard Henderson         break;
221380d9d1c6SRichard Henderson     case MO_BEUW:
221480d9d1c6SRichard Henderson         stw_be_p(haddr, val);
221580d9d1c6SRichard Henderson         break;
221680d9d1c6SRichard Henderson     case MO_LEUW:
221780d9d1c6SRichard Henderson         stw_le_p(haddr, val);
221880d9d1c6SRichard Henderson         break;
221980d9d1c6SRichard Henderson     case MO_BEUL:
222080d9d1c6SRichard Henderson         stl_be_p(haddr, val);
222180d9d1c6SRichard Henderson         break;
222280d9d1c6SRichard Henderson     case MO_LEUL:
222380d9d1c6SRichard Henderson         stl_le_p(haddr, val);
222480d9d1c6SRichard Henderson         break;
2225fc313c64SFrédéric Pétrot     case MO_BEUQ:
222680d9d1c6SRichard Henderson         stq_be_p(haddr, val);
222780d9d1c6SRichard Henderson         break;
2228fc313c64SFrédéric Pétrot     case MO_LEUQ:
222980d9d1c6SRichard Henderson         stq_le_p(haddr, val);
223080d9d1c6SRichard Henderson         break;
223180d9d1c6SRichard Henderson     default:
223280d9d1c6SRichard Henderson         qemu_build_not_reached();
223380d9d1c6SRichard Henderson     }
223480d9d1c6SRichard Henderson }
223580d9d1c6SRichard Henderson 
2236f83bcecbSRichard Henderson static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2237f83bcecbSRichard Henderson                          MemOpIdx oi, uintptr_t retaddr);
2238f83bcecbSRichard Henderson 
22396b8b622eSRichard Henderson static void __attribute__((noinline))
22406b8b622eSRichard Henderson store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
22416b8b622eSRichard Henderson                        uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
22426b8b622eSRichard Henderson                        bool big_endian)
22436b8b622eSRichard Henderson {
22446b8b622eSRichard Henderson     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
22456b8b622eSRichard Henderson     uintptr_t index, index2;
22466b8b622eSRichard Henderson     CPUTLBEntry *entry, *entry2;
2247b0f650f0SIlya Leoshkevich     target_ulong page1, page2, tlb_addr, tlb_addr2;
22489002ffcbSRichard Henderson     MemOpIdx oi;
22496b8b622eSRichard Henderson     size_t size2;
22506b8b622eSRichard Henderson     int i;
22516b8b622eSRichard Henderson 
22526b8b622eSRichard Henderson     /*
22536b8b622eSRichard Henderson      * Ensure the second page is in the TLB.  Note that the first page
22546b8b622eSRichard Henderson      * is already guaranteed to be filled, and that the second page
2255b0f650f0SIlya Leoshkevich      * cannot evict the first.  An exception to this rule is PAGE_WRITE_INV
2256b0f650f0SIlya Leoshkevich      * handling: the first page could have evicted itself.
22576b8b622eSRichard Henderson      */
2258b0f650f0SIlya Leoshkevich     page1 = addr & TARGET_PAGE_MASK;
22596b8b622eSRichard Henderson     page2 = (addr + size) & TARGET_PAGE_MASK;
22606b8b622eSRichard Henderson     size2 = (addr + size) & ~TARGET_PAGE_MASK;
22616b8b622eSRichard Henderson     index2 = tlb_index(env, mmu_idx, page2);
22626b8b622eSRichard Henderson     entry2 = tlb_entry(env, mmu_idx, page2);
22636b8b622eSRichard Henderson 
22646b8b622eSRichard Henderson     tlb_addr2 = tlb_addr_write(entry2);
2265b0f650f0SIlya Leoshkevich     if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) {
22666b8b622eSRichard Henderson         if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
22676b8b622eSRichard Henderson             tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
22686b8b622eSRichard Henderson                      mmu_idx, retaddr);
22696b8b622eSRichard Henderson             index2 = tlb_index(env, mmu_idx, page2);
22706b8b622eSRichard Henderson             entry2 = tlb_entry(env, mmu_idx, page2);
22716b8b622eSRichard Henderson         }
22726b8b622eSRichard Henderson         tlb_addr2 = tlb_addr_write(entry2);
22736b8b622eSRichard Henderson     }
22746b8b622eSRichard Henderson 
22756b8b622eSRichard Henderson     index = tlb_index(env, mmu_idx, addr);
22766b8b622eSRichard Henderson     entry = tlb_entry(env, mmu_idx, addr);
22776b8b622eSRichard Henderson     tlb_addr = tlb_addr_write(entry);
22786b8b622eSRichard Henderson 
22796b8b622eSRichard Henderson     /*
22806b8b622eSRichard Henderson      * Handle watchpoints.  Since this may trap, all checks
22816b8b622eSRichard Henderson      * must happen before any store.
22826b8b622eSRichard Henderson      */
22836b8b622eSRichard Henderson     if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
22846b8b622eSRichard Henderson         cpu_check_watchpoint(env_cpu(env), addr, size - size2,
228525d3ec58SRichard Henderson                              env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
22866b8b622eSRichard Henderson                              BP_MEM_WRITE, retaddr);
22876b8b622eSRichard Henderson     }
22886b8b622eSRichard Henderson     if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
22896b8b622eSRichard Henderson         cpu_check_watchpoint(env_cpu(env), page2, size2,
229025d3ec58SRichard Henderson                              env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
22916b8b622eSRichard Henderson                              BP_MEM_WRITE, retaddr);
22926b8b622eSRichard Henderson     }
22936b8b622eSRichard Henderson 
22946b8b622eSRichard Henderson     /*
22956b8b622eSRichard Henderson      * XXX: not efficient, but simple.
22966b8b622eSRichard Henderson      * This loop must go in the forward direction to avoid issues
22976b8b622eSRichard Henderson      * with self-modifying code in Windows 64-bit.
22986b8b622eSRichard Henderson      */
22996b8b622eSRichard Henderson     oi = make_memop_idx(MO_UB, mmu_idx);
23006b8b622eSRichard Henderson     if (big_endian) {
23016b8b622eSRichard Henderson         for (i = 0; i < size; ++i) {
23026b8b622eSRichard Henderson             /* Big-endian extract.  */
23036b8b622eSRichard Henderson             uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
2304f83bcecbSRichard Henderson             full_stb_mmu(env, addr + i, val8, oi, retaddr);
23056b8b622eSRichard Henderson         }
23066b8b622eSRichard Henderson     } else {
23076b8b622eSRichard Henderson         for (i = 0; i < size; ++i) {
23086b8b622eSRichard Henderson             /* Little-endian extract.  */
23096b8b622eSRichard Henderson             uint8_t val8 = val >> (i * 8);
2310f83bcecbSRichard Henderson             full_stb_mmu(env, addr + i, val8, oi, retaddr);
23116b8b622eSRichard Henderson         }
23126b8b622eSRichard Henderson     }
23136b8b622eSRichard Henderson }
23146b8b622eSRichard Henderson 
231580d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE
23164601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
23179002ffcbSRichard Henderson              MemOpIdx oi, uintptr_t retaddr, MemOp op)
2318eed56642SAlex Bennée {
2319eed56642SAlex Bennée     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2320b826044fSRichard Henderson     const unsigned a_bits = get_alignment_bits(get_memop(oi));
2321b826044fSRichard Henderson     const size_t size = memop_size(op);
2322b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
2323b826044fSRichard Henderson     uintptr_t index;
2324b826044fSRichard Henderson     CPUTLBEntry *entry;
2325b826044fSRichard Henderson     target_ulong tlb_addr;
2326eed56642SAlex Bennée     void *haddr;
2327b826044fSRichard Henderson 
2328b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
2329eed56642SAlex Bennée 
2330eed56642SAlex Bennée     /* Handle CPU specific unaligned behaviour */
2331eed56642SAlex Bennée     if (addr & ((1 << a_bits) - 1)) {
233229a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
2333eed56642SAlex Bennée                              mmu_idx, retaddr);
2334eed56642SAlex Bennée     }
2335eed56642SAlex Bennée 
2336b826044fSRichard Henderson     index = tlb_index(env, mmu_idx, addr);
2337b826044fSRichard Henderson     entry = tlb_entry(env, mmu_idx, addr);
2338b826044fSRichard Henderson     tlb_addr = tlb_addr_write(entry);
2339b826044fSRichard Henderson 
2340eed56642SAlex Bennée     /* If the TLB entry is for a different page, reload and try again.  */
2341eed56642SAlex Bennée     if (!tlb_hit(tlb_addr, addr)) {
2342eed56642SAlex Bennée         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2343eed56642SAlex Bennée             addr & TARGET_PAGE_MASK)) {
234429a0af61SRichard Henderson             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
2345eed56642SAlex Bennée                      mmu_idx, retaddr);
2346eed56642SAlex Bennée             index = tlb_index(env, mmu_idx, addr);
2347eed56642SAlex Bennée             entry = tlb_entry(env, mmu_idx, addr);
2348eed56642SAlex Bennée         }
2349eed56642SAlex Bennée         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2350eed56642SAlex Bennée     }
2351eed56642SAlex Bennée 
235250b107c5SRichard Henderson     /* Handle anything that isn't just a straight memory access.  */
2353eed56642SAlex Bennée     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
235425d3ec58SRichard Henderson         CPUTLBEntryFull *full;
23555b87b3e6SRichard Henderson         bool need_swap;
235650b107c5SRichard Henderson 
235750b107c5SRichard Henderson         /* For anything that is unaligned, recurse through byte stores.  */
2358eed56642SAlex Bennée         if ((addr & (size - 1)) != 0) {
2359eed56642SAlex Bennée             goto do_unaligned_access;
2360eed56642SAlex Bennée         }
236150b107c5SRichard Henderson 
236225d3ec58SRichard Henderson         full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
236350b107c5SRichard Henderson 
236450b107c5SRichard Henderson         /* Handle watchpoints.  */
236550b107c5SRichard Henderson         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
236650b107c5SRichard Henderson             /* On watchpoint hit, this will longjmp out.  */
236750b107c5SRichard Henderson             cpu_check_watchpoint(env_cpu(env), addr, size,
236825d3ec58SRichard Henderson                                  full->attrs, BP_MEM_WRITE, retaddr);
23695b87b3e6SRichard Henderson         }
237050b107c5SRichard Henderson 
23715b87b3e6SRichard Henderson         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
237250b107c5SRichard Henderson 
237350b107c5SRichard Henderson         /* Handle I/O access.  */
237408565552SRichard Henderson         if (tlb_addr & TLB_MMIO) {
237525d3ec58SRichard Henderson             io_writex(env, full, mmu_idx, val, addr, retaddr,
23765b87b3e6SRichard Henderson                       op ^ (need_swap * MO_BSWAP));
23775b87b3e6SRichard Henderson             return;
23785b87b3e6SRichard Henderson         }
23795b87b3e6SRichard Henderson 
23807b0d792cSRichard Henderson         /* Ignore writes to ROM.  */
23817b0d792cSRichard Henderson         if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
23827b0d792cSRichard Henderson             return;
23837b0d792cSRichard Henderson         }
23847b0d792cSRichard Henderson 
238508565552SRichard Henderson         /* Handle clean RAM pages.  */
238608565552SRichard Henderson         if (tlb_addr & TLB_NOTDIRTY) {
238725d3ec58SRichard Henderson             notdirty_write(env_cpu(env), addr, size, full, retaddr);
238808565552SRichard Henderson         }
238908565552SRichard Henderson 
2390707526adSRichard Henderson         haddr = (void *)((uintptr_t)addr + entry->addend);
239108565552SRichard Henderson 
23925b87b3e6SRichard Henderson         /*
23935b87b3e6SRichard Henderson          * Keep these two store_memop separate to ensure that the compiler
23945b87b3e6SRichard Henderson          * is able to fold the entire function to a single instruction.
23955b87b3e6SRichard Henderson          * There is a build-time assert inside to remind you of this.  ;-)
23965b87b3e6SRichard Henderson          */
23975b87b3e6SRichard Henderson         if (unlikely(need_swap)) {
23985b87b3e6SRichard Henderson             store_memop(haddr, val, op ^ MO_BSWAP);
23995b87b3e6SRichard Henderson         } else {
24005b87b3e6SRichard Henderson             store_memop(haddr, val, op);
24015b87b3e6SRichard Henderson         }
2402eed56642SAlex Bennée         return;
2403eed56642SAlex Bennée     }
2404eed56642SAlex Bennée 
2405eed56642SAlex Bennée     /* Handle slow unaligned access (it spans two pages or IO).  */
2406eed56642SAlex Bennée     if (size > 1
2407eed56642SAlex Bennée         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2408eed56642SAlex Bennée                      >= TARGET_PAGE_SIZE)) {
2409eed56642SAlex Bennée     do_unaligned_access:
24106b8b622eSRichard Henderson         store_helper_unaligned(env, addr, val, retaddr, size,
24116b8b622eSRichard Henderson                                mmu_idx, memop_big_endian(op));
2412eed56642SAlex Bennée         return;
2413eed56642SAlex Bennée     }
2414eed56642SAlex Bennée 
2415eed56642SAlex Bennée     haddr = (void *)((uintptr_t)addr + entry->addend);
241680d9d1c6SRichard Henderson     store_memop(haddr, val, op);
2417eed56642SAlex Bennée }
2418eed56642SAlex Bennée 
2419f83bcecbSRichard Henderson static void __attribute__((noinline))
2420f83bcecbSRichard Henderson full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
24219002ffcbSRichard Henderson              MemOpIdx oi, uintptr_t retaddr)
2422eed56642SAlex Bennée {
2423f83bcecbSRichard Henderson     validate_memop(oi, MO_UB);
2424be5c4787STony Nguyen     store_helper(env, addr, val, oi, retaddr, MO_UB);
2425eed56642SAlex Bennée }
2426eed56642SAlex Bennée 
2427f83bcecbSRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2428f83bcecbSRichard Henderson                         MemOpIdx oi, uintptr_t retaddr)
2429f83bcecbSRichard Henderson {
2430f83bcecbSRichard Henderson     full_stb_mmu(env, addr, val, oi, retaddr);
2431f83bcecbSRichard Henderson }
2432f83bcecbSRichard Henderson 
2433f83bcecbSRichard Henderson static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2434f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2435f83bcecbSRichard Henderson {
2436f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUW);
2437f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2438f83bcecbSRichard Henderson }
2439f83bcecbSRichard Henderson 
2440fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
24419002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2442eed56642SAlex Bennée {
2443f83bcecbSRichard Henderson     full_le_stw_mmu(env, addr, val, oi, retaddr);
2444f83bcecbSRichard Henderson }
2445f83bcecbSRichard Henderson 
2446f83bcecbSRichard Henderson static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2447f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2448f83bcecbSRichard Henderson {
2449f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUW);
2450f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2451eed56642SAlex Bennée }
2452eed56642SAlex Bennée 
2453fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
24549002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2455eed56642SAlex Bennée {
2456f83bcecbSRichard Henderson     full_be_stw_mmu(env, addr, val, oi, retaddr);
2457f83bcecbSRichard Henderson }
2458f83bcecbSRichard Henderson 
2459f83bcecbSRichard Henderson static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2460f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2461f83bcecbSRichard Henderson {
2462f83bcecbSRichard Henderson     validate_memop(oi, MO_LEUL);
2463f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2464eed56642SAlex Bennée }
2465eed56642SAlex Bennée 
2466fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
24679002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2468eed56642SAlex Bennée {
2469f83bcecbSRichard Henderson     full_le_stl_mmu(env, addr, val, oi, retaddr);
2470f83bcecbSRichard Henderson }
2471f83bcecbSRichard Henderson 
2472f83bcecbSRichard Henderson static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2473f83bcecbSRichard Henderson                             MemOpIdx oi, uintptr_t retaddr)
2474f83bcecbSRichard Henderson {
2475f83bcecbSRichard Henderson     validate_memop(oi, MO_BEUL);
2476f83bcecbSRichard Henderson     store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2477eed56642SAlex Bennée }
2478eed56642SAlex Bennée 
2479fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
24809002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2481eed56642SAlex Bennée {
2482f83bcecbSRichard Henderson     full_be_stl_mmu(env, addr, val, oi, retaddr);
2483eed56642SAlex Bennée }
2484eed56642SAlex Bennée 
2485fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
24869002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2487eed56642SAlex Bennée {
2488fc313c64SFrédéric Pétrot     validate_memop(oi, MO_LEUQ);
2489fc313c64SFrédéric Pétrot     store_helper(env, addr, val, oi, retaddr, MO_LEUQ);
2490eed56642SAlex Bennée }
2491eed56642SAlex Bennée 
2492fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
24939002ffcbSRichard Henderson                        MemOpIdx oi, uintptr_t retaddr)
2494eed56642SAlex Bennée {
2495fc313c64SFrédéric Pétrot     validate_memop(oi, MO_BEUQ);
2496fc313c64SFrédéric Pétrot     store_helper(env, addr, val, oi, retaddr, MO_BEUQ);
2497eed56642SAlex Bennée }
2498d9bb58e5SYang Zhong 
2499d03f1408SRichard Henderson /*
2500d03f1408SRichard Henderson  * Store Helpers for cpu_ldst.h
2501d03f1408SRichard Henderson  */
2502d03f1408SRichard Henderson 
2503f83bcecbSRichard Henderson typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
2504f83bcecbSRichard Henderson                              uint64_t val, MemOpIdx oi, uintptr_t retaddr);
2505f83bcecbSRichard Henderson 
2506f83bcecbSRichard Henderson static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
2507f83bcecbSRichard Henderson                                     uint64_t val, MemOpIdx oi, uintptr_t ra,
2508f83bcecbSRichard Henderson                                     FullStoreHelper *full_store)
2509d03f1408SRichard Henderson {
2510f83bcecbSRichard Henderson     full_store(env, addr, val, oi, ra);
251137aff087SRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
2512d03f1408SRichard Henderson }
2513d03f1408SRichard Henderson 
2514f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2515f83bcecbSRichard Henderson                  MemOpIdx oi, uintptr_t retaddr)
2516d03f1408SRichard Henderson {
2517f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
2518d03f1408SRichard Henderson }
2519d03f1408SRichard Henderson 
2520f83bcecbSRichard Henderson void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2521f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2522d03f1408SRichard Henderson {
2523f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
2524d03f1408SRichard Henderson }
2525d03f1408SRichard Henderson 
2526f83bcecbSRichard Henderson void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2527f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2528d03f1408SRichard Henderson {
2529f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
2530d03f1408SRichard Henderson }
2531d03f1408SRichard Henderson 
2532f83bcecbSRichard Henderson void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2533f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2534d03f1408SRichard Henderson {
2535f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
2536b9e60257SRichard Henderson }
2537b9e60257SRichard Henderson 
2538f83bcecbSRichard Henderson void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2539f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2540b9e60257SRichard Henderson {
2541f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
2542b9e60257SRichard Henderson }
2543b9e60257SRichard Henderson 
2544f83bcecbSRichard Henderson void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2545f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2546b9e60257SRichard Henderson {
2547f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
2548b9e60257SRichard Henderson }
2549b9e60257SRichard Henderson 
2550f83bcecbSRichard Henderson void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2551f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2552b9e60257SRichard Henderson {
2553f83bcecbSRichard Henderson     cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
2554d03f1408SRichard Henderson }
2555d03f1408SRichard Henderson 
2556f83bcecbSRichard Henderson #include "ldst_common.c.inc"
2557cfe04a4bSRichard Henderson 
2558be9568b4SRichard Henderson /*
2559be9568b4SRichard Henderson  * First set of functions passes in OI and RETADDR.
2560be9568b4SRichard Henderson  * This makes them callable from other helpers.
2561be9568b4SRichard Henderson  */
2562d9bb58e5SYang Zhong 
2563d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
2564be9568b4SRichard Henderson     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2565a754f7f3SRichard Henderson 
2566707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP
2567d9bb58e5SYang Zhong 
2568139c1837SPaolo Bonzini #include "atomic_common.c.inc"
2569d9bb58e5SYang Zhong 
2570d9bb58e5SYang Zhong #define DATA_SIZE 1
2571d9bb58e5SYang Zhong #include "atomic_template.h"
2572d9bb58e5SYang Zhong 
2573d9bb58e5SYang Zhong #define DATA_SIZE 2
2574d9bb58e5SYang Zhong #include "atomic_template.h"
2575d9bb58e5SYang Zhong 
2576d9bb58e5SYang Zhong #define DATA_SIZE 4
2577d9bb58e5SYang Zhong #include "atomic_template.h"
2578d9bb58e5SYang Zhong 
2579d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
2580d9bb58e5SYang Zhong #define DATA_SIZE 8
2581d9bb58e5SYang Zhong #include "atomic_template.h"
2582d9bb58e5SYang Zhong #endif
2583d9bb58e5SYang Zhong 
2584e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2585d9bb58e5SYang Zhong #define DATA_SIZE 16
2586d9bb58e5SYang Zhong #include "atomic_template.h"
2587d9bb58e5SYang Zhong #endif
2588d9bb58e5SYang Zhong 
2589d9bb58e5SYang Zhong /* Code access functions.  */
2590d9bb58e5SYang Zhong 
2591fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
25929002ffcbSRichard Henderson                                MemOpIdx oi, uintptr_t retaddr)
25932dd92606SRichard Henderson {
2594fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
25952dd92606SRichard Henderson }
25962dd92606SRichard Henderson 
2597fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2598eed56642SAlex Bennée {
25999002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2600fc4120a3SRichard Henderson     return full_ldub_code(env, addr, oi, 0);
26012dd92606SRichard Henderson }
26022dd92606SRichard Henderson 
2603fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
26049002ffcbSRichard Henderson                                MemOpIdx oi, uintptr_t retaddr)
26054cef72d0SAlex Bennée {
2606fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
26074cef72d0SAlex Bennée }
26084cef72d0SAlex Bennée 
2609fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
26102dd92606SRichard Henderson {
26119002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2612fc4120a3SRichard Henderson     return full_lduw_code(env, addr, oi, 0);
2613eed56642SAlex Bennée }
2614d9bb58e5SYang Zhong 
2615fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
26169002ffcbSRichard Henderson                               MemOpIdx oi, uintptr_t retaddr)
2617eed56642SAlex Bennée {
2618fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
26192dd92606SRichard Henderson }
26202dd92606SRichard Henderson 
2621fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
26224cef72d0SAlex Bennée {
26239002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2624fc4120a3SRichard Henderson     return full_ldl_code(env, addr, oi, 0);
26254cef72d0SAlex Bennée }
26264cef72d0SAlex Bennée 
2627fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
26289002ffcbSRichard Henderson                               MemOpIdx oi, uintptr_t retaddr)
26292dd92606SRichard Henderson {
2630fc313c64SFrédéric Pétrot     return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
2631eed56642SAlex Bennée }
2632d9bb58e5SYang Zhong 
2633fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2634eed56642SAlex Bennée {
2635fc313c64SFrédéric Pétrot     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
2636fc4120a3SRichard Henderson     return full_ldq_code(env, addr, oi, 0);
2637eed56642SAlex Bennée }
2638