xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 6d03226b42247b68ab2f0b3663e0f624335a4055)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9fb0343d5SThomas Huth  * version 2.1 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
26d9bb58e5SYang Zhong #include "exec/cputlb.h"
27d9bb58e5SYang Zhong #include "exec/memory-internal.h"
28d9bb58e5SYang Zhong #include "exec/ram_addr.h"
29d9bb58e5SYang Zhong #include "tcg/tcg.h"
30d9bb58e5SYang Zhong #include "qemu/error-report.h"
31d9bb58e5SYang Zhong #include "exec/log.h"
32c213ee2dSRichard Henderson #include "exec/helper-proto-common.h"
33d9bb58e5SYang Zhong #include "qemu/atomic.h"
34e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
353b9bd3f4SPaolo Bonzini #include "exec/translate-all.h"
3651807763SPhilippe Mathieu-Daudé #include "trace.h"
37e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h"
3865269192SPhilippe Mathieu-Daudé #include "internal.h"
39235537faSAlex Bennée #ifdef CONFIG_PLUGIN
40235537faSAlex Bennée #include "qemu/plugin-memory.h"
41235537faSAlex Bennée #endif
42d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h"
4370f168f8SRichard Henderson #include "tcg/oversized-guest.h"
44d9bb58e5SYang Zhong 
45d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
46d9bb58e5SYang Zhong /* #define DEBUG_TLB */
47d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
48d9bb58e5SYang Zhong 
49d9bb58e5SYang Zhong #ifdef DEBUG_TLB
50d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
51d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
52d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
53d9bb58e5SYang Zhong # else
54d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
55d9bb58e5SYang Zhong # endif
56d9bb58e5SYang Zhong #else
57d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
58d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
59d9bb58e5SYang Zhong #endif
60d9bb58e5SYang Zhong 
61d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
62d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
63d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
64d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
65d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
66d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
67d9bb58e5SYang Zhong     } \
68d9bb58e5SYang Zhong } while (0)
69d9bb58e5SYang Zhong 
70ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do {                              \
71d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
72ea9025cbSEmilio G. Cota             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
73d9bb58e5SYang Zhong         }                                                         \
74d9bb58e5SYang Zhong     } while (0)
75d9bb58e5SYang Zhong 
76d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
77d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
78d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
79d9bb58e5SYang Zhong 
80d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
81d9bb58e5SYang Zhong  */
82d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
83d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
84d9bb58e5SYang Zhong 
85722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
867a1efe1bSRichard Henderson {
87722a1c1eSRichard Henderson     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
887a1efe1bSRichard Henderson }
897a1efe1bSRichard Henderson 
90722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
9186e1eff8SEmilio G. Cota {
92722a1c1eSRichard Henderson     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
9386e1eff8SEmilio G. Cota }
9486e1eff8SEmilio G. Cota 
9579e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
9686e1eff8SEmilio G. Cota                              size_t max_entries)
9786e1eff8SEmilio G. Cota {
9879e42085SRichard Henderson     desc->window_begin_ns = ns;
9979e42085SRichard Henderson     desc->window_max_entries = max_entries;
10086e1eff8SEmilio G. Cota }
10186e1eff8SEmilio G. Cota 
10206f3831cSAnton Johansson static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
1030f4abea8SRichard Henderson {
104a976a99aSRichard Henderson     CPUJumpCache *jc = cpu->tb_jmp_cache;
10599ab4d50SEric Auger     int i, i0;
1060f4abea8SRichard Henderson 
10799ab4d50SEric Auger     if (unlikely(!jc)) {
10899ab4d50SEric Auger         return;
10999ab4d50SEric Auger     }
11099ab4d50SEric Auger 
11199ab4d50SEric Auger     i0 = tb_jmp_cache_hash_page(page_addr);
1120f4abea8SRichard Henderson     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
113a976a99aSRichard Henderson         qatomic_set(&jc->array[i0 + i].tb, NULL);
1140f4abea8SRichard Henderson     }
1150f4abea8SRichard Henderson }
1160f4abea8SRichard Henderson 
11786e1eff8SEmilio G. Cota /**
11886e1eff8SEmilio G. Cota  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
11971ccd47bSRichard Henderson  * @desc: The CPUTLBDesc portion of the TLB
12071ccd47bSRichard Henderson  * @fast: The CPUTLBDescFast portion of the same TLB
12186e1eff8SEmilio G. Cota  *
12286e1eff8SEmilio G. Cota  * Called with tlb_lock_held.
12386e1eff8SEmilio G. Cota  *
12486e1eff8SEmilio G. Cota  * We have two main constraints when resizing a TLB: (1) we only resize it
12586e1eff8SEmilio G. Cota  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
12686e1eff8SEmilio G. Cota  * the array or unnecessarily flushing it), which means we do not control how
12786e1eff8SEmilio G. Cota  * frequently the resizing can occur; (2) we don't have access to the guest's
12886e1eff8SEmilio G. Cota  * future scheduling decisions, and therefore have to decide the magnitude of
12986e1eff8SEmilio G. Cota  * the resize based on past observations.
13086e1eff8SEmilio G. Cota  *
13186e1eff8SEmilio G. Cota  * In general, a memory-hungry process can benefit greatly from an appropriately
13286e1eff8SEmilio G. Cota  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
13386e1eff8SEmilio G. Cota  * we just have to make the TLB as large as possible; while an oversized TLB
13486e1eff8SEmilio G. Cota  * results in minimal TLB miss rates, it also takes longer to be flushed
13586e1eff8SEmilio G. Cota  * (flushes can be _very_ frequent), and the reduced locality can also hurt
13686e1eff8SEmilio G. Cota  * performance.
13786e1eff8SEmilio G. Cota  *
13886e1eff8SEmilio G. Cota  * To achieve near-optimal performance for all kinds of workloads, we:
13986e1eff8SEmilio G. Cota  *
14086e1eff8SEmilio G. Cota  * 1. Aggressively increase the size of the TLB when the use rate of the
14186e1eff8SEmilio G. Cota  * TLB being flushed is high, since it is likely that in the near future this
14286e1eff8SEmilio G. Cota  * memory-hungry process will execute again, and its memory hungriness will
14386e1eff8SEmilio G. Cota  * probably be similar.
14486e1eff8SEmilio G. Cota  *
14586e1eff8SEmilio G. Cota  * 2. Slowly reduce the size of the TLB as the use rate declines over a
14686e1eff8SEmilio G. Cota  * reasonably large time window. The rationale is that if in such a time window
14786e1eff8SEmilio G. Cota  * we have not observed a high TLB use rate, it is likely that we won't observe
14886e1eff8SEmilio G. Cota  * it in the near future. In that case, once a time window expires we downsize
14986e1eff8SEmilio G. Cota  * the TLB to match the maximum use rate observed in the window.
15086e1eff8SEmilio G. Cota  *
15186e1eff8SEmilio G. Cota  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
15286e1eff8SEmilio G. Cota  * since in that range performance is likely near-optimal. Recall that the TLB
15386e1eff8SEmilio G. Cota  * is direct mapped, so we want the use rate to be low (or at least not too
15486e1eff8SEmilio G. Cota  * high), since otherwise we are likely to have a significant amount of
15586e1eff8SEmilio G. Cota  * conflict misses.
15686e1eff8SEmilio G. Cota  */
1573c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
1583c3959f2SRichard Henderson                                   int64_t now)
15986e1eff8SEmilio G. Cota {
16071ccd47bSRichard Henderson     size_t old_size = tlb_n_entries(fast);
16186e1eff8SEmilio G. Cota     size_t rate;
16286e1eff8SEmilio G. Cota     size_t new_size = old_size;
16386e1eff8SEmilio G. Cota     int64_t window_len_ms = 100;
16486e1eff8SEmilio G. Cota     int64_t window_len_ns = window_len_ms * 1000 * 1000;
16579e42085SRichard Henderson     bool window_expired = now > desc->window_begin_ns + window_len_ns;
16686e1eff8SEmilio G. Cota 
16779e42085SRichard Henderson     if (desc->n_used_entries > desc->window_max_entries) {
16879e42085SRichard Henderson         desc->window_max_entries = desc->n_used_entries;
16986e1eff8SEmilio G. Cota     }
17079e42085SRichard Henderson     rate = desc->window_max_entries * 100 / old_size;
17186e1eff8SEmilio G. Cota 
17286e1eff8SEmilio G. Cota     if (rate > 70) {
17386e1eff8SEmilio G. Cota         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
17486e1eff8SEmilio G. Cota     } else if (rate < 30 && window_expired) {
17579e42085SRichard Henderson         size_t ceil = pow2ceil(desc->window_max_entries);
17679e42085SRichard Henderson         size_t expected_rate = desc->window_max_entries * 100 / ceil;
17786e1eff8SEmilio G. Cota 
17886e1eff8SEmilio G. Cota         /*
17986e1eff8SEmilio G. Cota          * Avoid undersizing when the max number of entries seen is just below
18086e1eff8SEmilio G. Cota          * a pow2. For instance, if max_entries == 1025, the expected use rate
18186e1eff8SEmilio G. Cota          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
18286e1eff8SEmilio G. Cota          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
18386e1eff8SEmilio G. Cota          * later. Thus, make sure that the expected use rate remains below 70%.
18486e1eff8SEmilio G. Cota          * (and since we double the size, that means the lowest rate we'd
18586e1eff8SEmilio G. Cota          * expect to get is 35%, which is still in the 30-70% range where
18686e1eff8SEmilio G. Cota          * we consider that the size is appropriate.)
18786e1eff8SEmilio G. Cota          */
18886e1eff8SEmilio G. Cota         if (expected_rate > 70) {
18986e1eff8SEmilio G. Cota             ceil *= 2;
19086e1eff8SEmilio G. Cota         }
19186e1eff8SEmilio G. Cota         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
19286e1eff8SEmilio G. Cota     }
19386e1eff8SEmilio G. Cota 
19486e1eff8SEmilio G. Cota     if (new_size == old_size) {
19586e1eff8SEmilio G. Cota         if (window_expired) {
19679e42085SRichard Henderson             tlb_window_reset(desc, now, desc->n_used_entries);
19786e1eff8SEmilio G. Cota         }
19886e1eff8SEmilio G. Cota         return;
19986e1eff8SEmilio G. Cota     }
20086e1eff8SEmilio G. Cota 
20171ccd47bSRichard Henderson     g_free(fast->table);
20225d3ec58SRichard Henderson     g_free(desc->fulltlb);
20386e1eff8SEmilio G. Cota 
20479e42085SRichard Henderson     tlb_window_reset(desc, now, 0);
20586e1eff8SEmilio G. Cota     /* desc->n_used_entries is cleared by the caller */
20671ccd47bSRichard Henderson     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
20771ccd47bSRichard Henderson     fast->table = g_try_new(CPUTLBEntry, new_size);
20825d3ec58SRichard Henderson     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
20971ccd47bSRichard Henderson 
21086e1eff8SEmilio G. Cota     /*
21186e1eff8SEmilio G. Cota      * If the allocations fail, try smaller sizes. We just freed some
21286e1eff8SEmilio G. Cota      * memory, so going back to half of new_size has a good chance of working.
21386e1eff8SEmilio G. Cota      * Increased memory pressure elsewhere in the system might cause the
21486e1eff8SEmilio G. Cota      * allocations to fail though, so we progressively reduce the allocation
21586e1eff8SEmilio G. Cota      * size, aborting if we cannot even allocate the smallest TLB we support.
21686e1eff8SEmilio G. Cota      */
21725d3ec58SRichard Henderson     while (fast->table == NULL || desc->fulltlb == NULL) {
21886e1eff8SEmilio G. Cota         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
21986e1eff8SEmilio G. Cota             error_report("%s: %s", __func__, strerror(errno));
22086e1eff8SEmilio G. Cota             abort();
22186e1eff8SEmilio G. Cota         }
22286e1eff8SEmilio G. Cota         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
22371ccd47bSRichard Henderson         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
22486e1eff8SEmilio G. Cota 
22571ccd47bSRichard Henderson         g_free(fast->table);
22625d3ec58SRichard Henderson         g_free(desc->fulltlb);
22771ccd47bSRichard Henderson         fast->table = g_try_new(CPUTLBEntry, new_size);
22825d3ec58SRichard Henderson         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
22986e1eff8SEmilio G. Cota     }
23086e1eff8SEmilio G. Cota }
23186e1eff8SEmilio G. Cota 
232bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
23386e1eff8SEmilio G. Cota {
2345c948e31SRichard Henderson     desc->n_used_entries = 0;
2355c948e31SRichard Henderson     desc->large_page_addr = -1;
2365c948e31SRichard Henderson     desc->large_page_mask = -1;
2375c948e31SRichard Henderson     desc->vindex = 0;
2385c948e31SRichard Henderson     memset(fast->table, -1, sizeof_tlb(fast));
2395c948e31SRichard Henderson     memset(desc->vtable, -1, sizeof(desc->vtable));
24086e1eff8SEmilio G. Cota }
24186e1eff8SEmilio G. Cota 
2423c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
2433c3959f2SRichard Henderson                                         int64_t now)
244bbf021b0SRichard Henderson {
245bbf021b0SRichard Henderson     CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
246bbf021b0SRichard Henderson     CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
247bbf021b0SRichard Henderson 
2483c3959f2SRichard Henderson     tlb_mmu_resize_locked(desc, fast, now);
249bbf021b0SRichard Henderson     tlb_mmu_flush_locked(desc, fast);
250bbf021b0SRichard Henderson }
251bbf021b0SRichard Henderson 
25256e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
25356e89f76SRichard Henderson {
25456e89f76SRichard Henderson     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
25556e89f76SRichard Henderson 
25656e89f76SRichard Henderson     tlb_window_reset(desc, now, 0);
25756e89f76SRichard Henderson     desc->n_used_entries = 0;
25856e89f76SRichard Henderson     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
25956e89f76SRichard Henderson     fast->table = g_new(CPUTLBEntry, n_entries);
26025d3ec58SRichard Henderson     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
2613c16304aSRichard Henderson     tlb_mmu_flush_locked(desc, fast);
26256e89f76SRichard Henderson }
26356e89f76SRichard Henderson 
26486e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
26586e1eff8SEmilio G. Cota {
266a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].n_used_entries++;
26786e1eff8SEmilio G. Cota }
26886e1eff8SEmilio G. Cota 
26986e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
27086e1eff8SEmilio G. Cota {
271a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].n_used_entries--;
27286e1eff8SEmilio G. Cota }
27386e1eff8SEmilio G. Cota 
2745005e253SEmilio G. Cota void tlb_init(CPUState *cpu)
2755005e253SEmilio G. Cota {
27671aec354SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
27756e89f76SRichard Henderson     int64_t now = get_clock_realtime();
27856e89f76SRichard Henderson     int i;
27971aec354SEmilio G. Cota 
280a40ec84eSRichard Henderson     qemu_spin_init(&env_tlb(env)->c.lock);
2813d1523ceSRichard Henderson 
2823c16304aSRichard Henderson     /* All tlbs are initialized flushed. */
2833c16304aSRichard Henderson     env_tlb(env)->c.dirty = 0;
28486e1eff8SEmilio G. Cota 
28556e89f76SRichard Henderson     for (i = 0; i < NB_MMU_MODES; i++) {
28656e89f76SRichard Henderson         tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
28756e89f76SRichard Henderson     }
2885005e253SEmilio G. Cota }
2895005e253SEmilio G. Cota 
290816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu)
291816d9be5SEmilio G. Cota {
292816d9be5SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
293816d9be5SEmilio G. Cota     int i;
294816d9be5SEmilio G. Cota 
295816d9be5SEmilio G. Cota     qemu_spin_destroy(&env_tlb(env)->c.lock);
296816d9be5SEmilio G. Cota     for (i = 0; i < NB_MMU_MODES; i++) {
297816d9be5SEmilio G. Cota         CPUTLBDesc *desc = &env_tlb(env)->d[i];
298816d9be5SEmilio G. Cota         CPUTLBDescFast *fast = &env_tlb(env)->f[i];
299816d9be5SEmilio G. Cota 
300816d9be5SEmilio G. Cota         g_free(fast->table);
30125d3ec58SRichard Henderson         g_free(desc->fulltlb);
302816d9be5SEmilio G. Cota     }
303816d9be5SEmilio G. Cota }
304816d9be5SEmilio G. Cota 
305d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
306d9bb58e5SYang Zhong  *
307d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
308d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
309d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
310d9bb58e5SYang Zhong  * again.
311d9bb58e5SYang Zhong  */
312d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
313d9bb58e5SYang Zhong                              run_on_cpu_data d)
314d9bb58e5SYang Zhong {
315d9bb58e5SYang Zhong     CPUState *cpu;
316d9bb58e5SYang Zhong 
317d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
318d9bb58e5SYang Zhong         if (cpu != src) {
319d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
320d9bb58e5SYang Zhong         }
321d9bb58e5SYang Zhong     }
322d9bb58e5SYang Zhong }
323d9bb58e5SYang Zhong 
324e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
32583974cf4SEmilio G. Cota {
32683974cf4SEmilio G. Cota     CPUState *cpu;
327e09de0a2SRichard Henderson     size_t full = 0, part = 0, elide = 0;
32883974cf4SEmilio G. Cota 
32983974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
33083974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
33183974cf4SEmilio G. Cota 
332d73415a3SStefan Hajnoczi         full += qatomic_read(&env_tlb(env)->c.full_flush_count);
333d73415a3SStefan Hajnoczi         part += qatomic_read(&env_tlb(env)->c.part_flush_count);
334d73415a3SStefan Hajnoczi         elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
33583974cf4SEmilio G. Cota     }
336e09de0a2SRichard Henderson     *pfull = full;
337e09de0a2SRichard Henderson     *ppart = part;
338e09de0a2SRichard Henderson     *pelide = elide;
33983974cf4SEmilio G. Cota }
340d9bb58e5SYang Zhong 
341d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
342d9bb58e5SYang Zhong {
343d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
3443d1523ceSRichard Henderson     uint16_t asked = data.host_int;
3453d1523ceSRichard Henderson     uint16_t all_dirty, work, to_clean;
3463c3959f2SRichard Henderson     int64_t now = get_clock_realtime();
347d9bb58e5SYang Zhong 
348d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
349d9bb58e5SYang Zhong 
3503d1523ceSRichard Henderson     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
351d9bb58e5SYang Zhong 
352a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
35360a2ad7dSRichard Henderson 
354a40ec84eSRichard Henderson     all_dirty = env_tlb(env)->c.dirty;
3553d1523ceSRichard Henderson     to_clean = asked & all_dirty;
3563d1523ceSRichard Henderson     all_dirty &= ~to_clean;
357a40ec84eSRichard Henderson     env_tlb(env)->c.dirty = all_dirty;
3583d1523ceSRichard Henderson 
3593d1523ceSRichard Henderson     for (work = to_clean; work != 0; work &= work - 1) {
3603d1523ceSRichard Henderson         int mmu_idx = ctz32(work);
3613c3959f2SRichard Henderson         tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
362d9bb58e5SYang Zhong     }
3633d1523ceSRichard Henderson 
364a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
365d9bb58e5SYang Zhong 
366a976a99aSRichard Henderson     tcg_flush_jmp_cache(cpu);
36764f2674bSRichard Henderson 
3683d1523ceSRichard Henderson     if (to_clean == ALL_MMUIDX_BITS) {
369d73415a3SStefan Hajnoczi         qatomic_set(&env_tlb(env)->c.full_flush_count,
370a40ec84eSRichard Henderson                    env_tlb(env)->c.full_flush_count + 1);
371e09de0a2SRichard Henderson     } else {
372d73415a3SStefan Hajnoczi         qatomic_set(&env_tlb(env)->c.part_flush_count,
373a40ec84eSRichard Henderson                    env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
3743d1523ceSRichard Henderson         if (to_clean != asked) {
375d73415a3SStefan Hajnoczi             qatomic_set(&env_tlb(env)->c.elide_flush_count,
376a40ec84eSRichard Henderson                        env_tlb(env)->c.elide_flush_count +
3773d1523ceSRichard Henderson                        ctpop16(asked & ~to_clean));
3783d1523ceSRichard Henderson         }
37964f2674bSRichard Henderson     }
380d9bb58e5SYang Zhong }
381d9bb58e5SYang Zhong 
382d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
383d9bb58e5SYang Zhong {
384d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
385d9bb58e5SYang Zhong 
38664f2674bSRichard Henderson     if (cpu->created && !qemu_cpu_is_self(cpu)) {
387d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
388ab651105SRichard Henderson                          RUN_ON_CPU_HOST_INT(idxmap));
389d9bb58e5SYang Zhong     } else {
39060a2ad7dSRichard Henderson         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
391d9bb58e5SYang Zhong     }
392d9bb58e5SYang Zhong }
393d9bb58e5SYang Zhong 
39464f2674bSRichard Henderson void tlb_flush(CPUState *cpu)
39564f2674bSRichard Henderson {
39664f2674bSRichard Henderson     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
39764f2674bSRichard Henderson }
39864f2674bSRichard Henderson 
399d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
400d9bb58e5SYang Zhong {
401d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
402d9bb58e5SYang Zhong 
403d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
404d9bb58e5SYang Zhong 
405d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
406d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
407d9bb58e5SYang Zhong }
408d9bb58e5SYang Zhong 
40964f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu)
41064f2674bSRichard Henderson {
41164f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
41264f2674bSRichard Henderson }
41364f2674bSRichard Henderson 
41464f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
415d9bb58e5SYang Zhong {
416d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
417d9bb58e5SYang Zhong 
418d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
419d9bb58e5SYang Zhong 
420d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
421d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
422d9bb58e5SYang Zhong }
423d9bb58e5SYang Zhong 
42464f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu)
42564f2674bSRichard Henderson {
42664f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
42764f2674bSRichard Henderson }
42864f2674bSRichard Henderson 
4293ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
430732d5487SAnton Johansson                                       vaddr page, vaddr mask)
4313ab6e68cSRichard Henderson {
4323ab6e68cSRichard Henderson     page &= mask;
4333ab6e68cSRichard Henderson     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
4343ab6e68cSRichard Henderson 
4353ab6e68cSRichard Henderson     return (page == (tlb_entry->addr_read & mask) ||
4363ab6e68cSRichard Henderson             page == (tlb_addr_write(tlb_entry) & mask) ||
4373ab6e68cSRichard Henderson             page == (tlb_entry->addr_code & mask));
4383ab6e68cSRichard Henderson }
4393ab6e68cSRichard Henderson 
440732d5487SAnton Johansson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
441d9bb58e5SYang Zhong {
4423ab6e68cSRichard Henderson     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
44368fea038SRichard Henderson }
44468fea038SRichard Henderson 
4453cea94bbSEmilio G. Cota /**
4463cea94bbSEmilio G. Cota  * tlb_entry_is_empty - return true if the entry is not in use
4473cea94bbSEmilio G. Cota  * @te: pointer to CPUTLBEntry
4483cea94bbSEmilio G. Cota  */
4493cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
4503cea94bbSEmilio G. Cota {
4513cea94bbSEmilio G. Cota     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
4523cea94bbSEmilio G. Cota }
4533cea94bbSEmilio G. Cota 
45453d28455SRichard Henderson /* Called with tlb_c.lock held */
4553ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
456732d5487SAnton Johansson                                         vaddr page,
457732d5487SAnton Johansson                                         vaddr mask)
45868fea038SRichard Henderson {
4593ab6e68cSRichard Henderson     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
460d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
46186e1eff8SEmilio G. Cota         return true;
462d9bb58e5SYang Zhong     }
46386e1eff8SEmilio G. Cota     return false;
464d9bb58e5SYang Zhong }
465d9bb58e5SYang Zhong 
466732d5487SAnton Johansson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
46768fea038SRichard Henderson {
4683ab6e68cSRichard Henderson     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
4693ab6e68cSRichard Henderson }
4703ab6e68cSRichard Henderson 
4713ab6e68cSRichard Henderson /* Called with tlb_c.lock held */
4723ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
473732d5487SAnton Johansson                                             vaddr page,
474732d5487SAnton Johansson                                             vaddr mask)
4753ab6e68cSRichard Henderson {
476a40ec84eSRichard Henderson     CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
47768fea038SRichard Henderson     int k;
47871aec354SEmilio G. Cota 
47929a0af61SRichard Henderson     assert_cpu_is_self(env_cpu(env));
48068fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
4813ab6e68cSRichard Henderson         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
48286e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, mmu_idx);
48386e1eff8SEmilio G. Cota         }
48468fea038SRichard Henderson     }
48568fea038SRichard Henderson }
48668fea038SRichard Henderson 
4873ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
488732d5487SAnton Johansson                                               vaddr page)
4893ab6e68cSRichard Henderson {
4903ab6e68cSRichard Henderson     tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
4913ab6e68cSRichard Henderson }
4923ab6e68cSRichard Henderson 
493732d5487SAnton Johansson static void tlb_flush_page_locked(CPUArchState *env, int midx, vaddr page)
4941308e026SRichard Henderson {
495732d5487SAnton Johansson     vaddr lp_addr = env_tlb(env)->d[midx].large_page_addr;
496732d5487SAnton Johansson     vaddr lp_mask = env_tlb(env)->d[midx].large_page_mask;
4971308e026SRichard Henderson 
4981308e026SRichard Henderson     /* Check if we need to flush due to large pages.  */
4991308e026SRichard Henderson     if ((page & lp_mask) == lp_addr) {
500732d5487SAnton Johansson         tlb_debug("forcing full flush midx %d (%"
501732d5487SAnton Johansson                   VADDR_PRIx "/%" VADDR_PRIx ")\n",
5021308e026SRichard Henderson                   midx, lp_addr, lp_mask);
5033c3959f2SRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
5041308e026SRichard Henderson     } else {
50586e1eff8SEmilio G. Cota         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
50686e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, midx);
50786e1eff8SEmilio G. Cota         }
5081308e026SRichard Henderson         tlb_flush_vtlb_page_locked(env, midx, page);
5091308e026SRichard Henderson     }
5101308e026SRichard Henderson }
5111308e026SRichard Henderson 
5127b7d00e0SRichard Henderson /**
5137b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_0:
5147b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5157b7d00e0SRichard Henderson  * @addr: page of virtual address to flush
5167b7d00e0SRichard Henderson  * @idxmap: set of mmu_idx to flush
5177b7d00e0SRichard Henderson  *
5187b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
5197b7d00e0SRichard Henderson  * at @addr from the tlbs indicated by @idxmap from @cpu.
520d9bb58e5SYang Zhong  */
5217b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
522732d5487SAnton Johansson                                              vaddr addr,
5237b7d00e0SRichard Henderson                                              uint16_t idxmap)
524d9bb58e5SYang Zhong {
525d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
526d9bb58e5SYang Zhong     int mmu_idx;
527d9bb58e5SYang Zhong 
528d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
529d9bb58e5SYang Zhong 
530732d5487SAnton Johansson     tlb_debug("page addr: %" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
531d9bb58e5SYang Zhong 
532a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
533d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
5347b7d00e0SRichard Henderson         if ((idxmap >> mmu_idx) & 1) {
5351308e026SRichard Henderson             tlb_flush_page_locked(env, mmu_idx, addr);
536d9bb58e5SYang Zhong         }
537d9bb58e5SYang Zhong     }
538a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
539d9bb58e5SYang Zhong 
5401d41a79bSRichard Henderson     /*
5411d41a79bSRichard Henderson      * Discard jump cache entries for any tb which might potentially
5421d41a79bSRichard Henderson      * overlap the flushed page, which includes the previous.
5431d41a79bSRichard Henderson      */
5441d41a79bSRichard Henderson     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
5451d41a79bSRichard Henderson     tb_jmp_cache_clear_page(cpu, addr);
546d9bb58e5SYang Zhong }
547d9bb58e5SYang Zhong 
5487b7d00e0SRichard Henderson /**
5497b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_1:
5507b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5517b7d00e0SRichard Henderson  * @data: encoded addr + idxmap
5527b7d00e0SRichard Henderson  *
5537b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
5547b7d00e0SRichard Henderson  * async_run_on_cpu.  The idxmap parameter is encoded in the page
5557b7d00e0SRichard Henderson  * offset of the target_ptr field.  This limits the set of mmu_idx
5567b7d00e0SRichard Henderson  * that can be passed via this method.
5577b7d00e0SRichard Henderson  */
5587b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
5597b7d00e0SRichard Henderson                                              run_on_cpu_data data)
5607b7d00e0SRichard Henderson {
561732d5487SAnton Johansson     vaddr addr_and_idxmap = data.target_ptr;
562732d5487SAnton Johansson     vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
5637b7d00e0SRichard Henderson     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
5647b7d00e0SRichard Henderson 
5657b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
5667b7d00e0SRichard Henderson }
5677b7d00e0SRichard Henderson 
5687b7d00e0SRichard Henderson typedef struct {
569732d5487SAnton Johansson     vaddr addr;
5707b7d00e0SRichard Henderson     uint16_t idxmap;
5717b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData;
5727b7d00e0SRichard Henderson 
5737b7d00e0SRichard Henderson /**
5747b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_2:
5757b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5767b7d00e0SRichard Henderson  * @data: allocated addr + idxmap
5777b7d00e0SRichard Henderson  *
5787b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
5797b7d00e0SRichard Henderson  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
5807b7d00e0SRichard Henderson  * TLBFlushPageByMMUIdxData structure that has been allocated
5817b7d00e0SRichard Henderson  * specifically for this helper.  Free the structure when done.
5827b7d00e0SRichard Henderson  */
5837b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
5847b7d00e0SRichard Henderson                                              run_on_cpu_data data)
5857b7d00e0SRichard Henderson {
5867b7d00e0SRichard Henderson     TLBFlushPageByMMUIdxData *d = data.host_ptr;
5877b7d00e0SRichard Henderson 
5887b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
5897b7d00e0SRichard Henderson     g_free(d);
5907b7d00e0SRichard Henderson }
5917b7d00e0SRichard Henderson 
592732d5487SAnton Johansson void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
593d9bb58e5SYang Zhong {
594732d5487SAnton Johansson     tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
595d9bb58e5SYang Zhong 
596d9bb58e5SYang Zhong     /* This should already be page aligned */
5977b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
598d9bb58e5SYang Zhong 
5997b7d00e0SRichard Henderson     if (qemu_cpu_is_self(cpu)) {
6007b7d00e0SRichard Henderson         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
6017b7d00e0SRichard Henderson     } else if (idxmap < TARGET_PAGE_SIZE) {
6027b7d00e0SRichard Henderson         /*
6037b7d00e0SRichard Henderson          * Most targets have only a few mmu_idx.  In the case where
6047b7d00e0SRichard Henderson          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
6057b7d00e0SRichard Henderson          * allocating memory for this operation.
6067b7d00e0SRichard Henderson          */
6077b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
6087b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
609d9bb58e5SYang Zhong     } else {
6107b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
6117b7d00e0SRichard Henderson 
6127b7d00e0SRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
6137b7d00e0SRichard Henderson         d->addr = addr;
6147b7d00e0SRichard Henderson         d->idxmap = idxmap;
6157b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
6167b7d00e0SRichard Henderson                          RUN_ON_CPU_HOST_PTR(d));
617d9bb58e5SYang Zhong     }
618d9bb58e5SYang Zhong }
619d9bb58e5SYang Zhong 
620732d5487SAnton Johansson void tlb_flush_page(CPUState *cpu, vaddr addr)
621f8144c6cSRichard Henderson {
622f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
623f8144c6cSRichard Henderson }
624f8144c6cSRichard Henderson 
625732d5487SAnton Johansson void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
626d9bb58e5SYang Zhong                                        uint16_t idxmap)
627d9bb58e5SYang Zhong {
628732d5487SAnton Johansson     tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
629d9bb58e5SYang Zhong 
630d9bb58e5SYang Zhong     /* This should already be page aligned */
6317b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
632d9bb58e5SYang Zhong 
6337b7d00e0SRichard Henderson     /*
6347b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
6357b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
6367b7d00e0SRichard Henderson      */
6377b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
6387b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6397b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6407b7d00e0SRichard Henderson     } else {
6417b7d00e0SRichard Henderson         CPUState *dst_cpu;
6427b7d00e0SRichard Henderson 
6437b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
6447b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
6457b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
6467b7d00e0SRichard Henderson                 TLBFlushPageByMMUIdxData *d
6477b7d00e0SRichard Henderson                     = g_new(TLBFlushPageByMMUIdxData, 1);
6487b7d00e0SRichard Henderson 
6497b7d00e0SRichard Henderson                 d->addr = addr;
6507b7d00e0SRichard Henderson                 d->idxmap = idxmap;
6517b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
6527b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
6537b7d00e0SRichard Henderson             }
6547b7d00e0SRichard Henderson         }
6557b7d00e0SRichard Henderson     }
6567b7d00e0SRichard Henderson 
6577b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
658d9bb58e5SYang Zhong }
659d9bb58e5SYang Zhong 
660732d5487SAnton Johansson void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
661f8144c6cSRichard Henderson {
662f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
663f8144c6cSRichard Henderson }
664f8144c6cSRichard Henderson 
665d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
666732d5487SAnton Johansson                                               vaddr addr,
667d9bb58e5SYang Zhong                                               uint16_t idxmap)
668d9bb58e5SYang Zhong {
669732d5487SAnton Johansson     tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
670d9bb58e5SYang Zhong 
671d9bb58e5SYang Zhong     /* This should already be page aligned */
6727b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
673d9bb58e5SYang Zhong 
6747b7d00e0SRichard Henderson     /*
6757b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
6767b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
6777b7d00e0SRichard Henderson      */
6787b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
6797b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6807b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6817b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6827b7d00e0SRichard Henderson                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6837b7d00e0SRichard Henderson     } else {
6847b7d00e0SRichard Henderson         CPUState *dst_cpu;
6857b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d;
6867b7d00e0SRichard Henderson 
6877b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
6887b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
6897b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
6907b7d00e0SRichard Henderson                 d = g_new(TLBFlushPageByMMUIdxData, 1);
6917b7d00e0SRichard Henderson                 d->addr = addr;
6927b7d00e0SRichard Henderson                 d->idxmap = idxmap;
6937b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
6947b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
6957b7d00e0SRichard Henderson             }
6967b7d00e0SRichard Henderson         }
6977b7d00e0SRichard Henderson 
6987b7d00e0SRichard Henderson         d = g_new(TLBFlushPageByMMUIdxData, 1);
6997b7d00e0SRichard Henderson         d->addr = addr;
7007b7d00e0SRichard Henderson         d->idxmap = idxmap;
7017b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
7027b7d00e0SRichard Henderson                               RUN_ON_CPU_HOST_PTR(d));
7037b7d00e0SRichard Henderson     }
704d9bb58e5SYang Zhong }
705d9bb58e5SYang Zhong 
706732d5487SAnton Johansson void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
707d9bb58e5SYang Zhong {
708f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
709d9bb58e5SYang Zhong }
710d9bb58e5SYang Zhong 
7113c4ddec1SRichard Henderson static void tlb_flush_range_locked(CPUArchState *env, int midx,
712732d5487SAnton Johansson                                    vaddr addr, vaddr len,
7133c4ddec1SRichard Henderson                                    unsigned bits)
7143ab6e68cSRichard Henderson {
7153ab6e68cSRichard Henderson     CPUTLBDesc *d = &env_tlb(env)->d[midx];
7163ab6e68cSRichard Henderson     CPUTLBDescFast *f = &env_tlb(env)->f[midx];
717732d5487SAnton Johansson     vaddr mask = MAKE_64BIT_MASK(0, bits);
7183ab6e68cSRichard Henderson 
7193ab6e68cSRichard Henderson     /*
7203ab6e68cSRichard Henderson      * If @bits is smaller than the tlb size, there may be multiple entries
7213ab6e68cSRichard Henderson      * within the TLB; otherwise all addresses that match under @mask hit
7223ab6e68cSRichard Henderson      * the same TLB entry.
7233ab6e68cSRichard Henderson      * TODO: Perhaps allow bits to be a few bits less than the size.
7243ab6e68cSRichard Henderson      * For now, just flush the entire TLB.
7253c4ddec1SRichard Henderson      *
7263c4ddec1SRichard Henderson      * If @len is larger than the tlb size, then it will take longer to
7273c4ddec1SRichard Henderson      * test all of the entries in the TLB than it will to flush it all.
7283ab6e68cSRichard Henderson      */
7293c4ddec1SRichard Henderson     if (mask < f->mask || len > f->mask) {
7303ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
731732d5487SAnton Johansson                   "%" VADDR_PRIx "/%" VADDR_PRIx "+%" VADDR_PRIx ")\n",
7323c4ddec1SRichard Henderson                   midx, addr, mask, len);
7333ab6e68cSRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
7343ab6e68cSRichard Henderson         return;
7353ab6e68cSRichard Henderson     }
7363ab6e68cSRichard Henderson 
7373c4ddec1SRichard Henderson     /*
7383c4ddec1SRichard Henderson      * Check if we need to flush due to large pages.
7393c4ddec1SRichard Henderson      * Because large_page_mask contains all 1's from the msb,
7403c4ddec1SRichard Henderson      * we only need to test the end of the range.
7413c4ddec1SRichard Henderson      */
7423c4ddec1SRichard Henderson     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
7433ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
744732d5487SAnton Johansson                   "%" VADDR_PRIx "/%" VADDR_PRIx ")\n",
7453ab6e68cSRichard Henderson                   midx, d->large_page_addr, d->large_page_mask);
7463ab6e68cSRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
7473ab6e68cSRichard Henderson         return;
7483ab6e68cSRichard Henderson     }
7493ab6e68cSRichard Henderson 
750732d5487SAnton Johansson     for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
751732d5487SAnton Johansson         vaddr page = addr + i;
7523c4ddec1SRichard Henderson         CPUTLBEntry *entry = tlb_entry(env, midx, page);
7533c4ddec1SRichard Henderson 
7543c4ddec1SRichard Henderson         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
7553ab6e68cSRichard Henderson             tlb_n_used_entries_dec(env, midx);
7563ab6e68cSRichard Henderson         }
7573ab6e68cSRichard Henderson         tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
7583ab6e68cSRichard Henderson     }
7593c4ddec1SRichard Henderson }
7603ab6e68cSRichard Henderson 
7613ab6e68cSRichard Henderson typedef struct {
762732d5487SAnton Johansson     vaddr addr;
763732d5487SAnton Johansson     vaddr len;
7643ab6e68cSRichard Henderson     uint16_t idxmap;
7653ab6e68cSRichard Henderson     uint16_t bits;
7663960a59fSRichard Henderson } TLBFlushRangeData;
7673ab6e68cSRichard Henderson 
7686be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
7693960a59fSRichard Henderson                                               TLBFlushRangeData d)
7703ab6e68cSRichard Henderson {
7713ab6e68cSRichard Henderson     CPUArchState *env = cpu->env_ptr;
7723ab6e68cSRichard Henderson     int mmu_idx;
7733ab6e68cSRichard Henderson 
7743ab6e68cSRichard Henderson     assert_cpu_is_self(cpu);
7753ab6e68cSRichard Henderson 
776732d5487SAnton Johansson     tlb_debug("range: %" VADDR_PRIx "/%u+%" VADDR_PRIx " mmu_map:0x%x\n",
7773c4ddec1SRichard Henderson               d.addr, d.bits, d.len, d.idxmap);
7783ab6e68cSRichard Henderson 
7793ab6e68cSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
7803ab6e68cSRichard Henderson     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
7813ab6e68cSRichard Henderson         if ((d.idxmap >> mmu_idx) & 1) {
7823c4ddec1SRichard Henderson             tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
7833ab6e68cSRichard Henderson         }
7843ab6e68cSRichard Henderson     }
7853ab6e68cSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
7863ab6e68cSRichard Henderson 
787cfc2a2d6SIdan Horowitz     /*
788cfc2a2d6SIdan Horowitz      * If the length is larger than the jump cache size, then it will take
789cfc2a2d6SIdan Horowitz      * longer to clear each entry individually than it will to clear it all.
790cfc2a2d6SIdan Horowitz      */
791cfc2a2d6SIdan Horowitz     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
792a976a99aSRichard Henderson         tcg_flush_jmp_cache(cpu);
793cfc2a2d6SIdan Horowitz         return;
794cfc2a2d6SIdan Horowitz     }
795cfc2a2d6SIdan Horowitz 
7961d41a79bSRichard Henderson     /*
7971d41a79bSRichard Henderson      * Discard jump cache entries for any tb which might potentially
7981d41a79bSRichard Henderson      * overlap the flushed pages, which includes the previous.
7991d41a79bSRichard Henderson      */
8001d41a79bSRichard Henderson     d.addr -= TARGET_PAGE_SIZE;
801732d5487SAnton Johansson     for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
8021d41a79bSRichard Henderson         tb_jmp_cache_clear_page(cpu, d.addr);
8031d41a79bSRichard Henderson         d.addr += TARGET_PAGE_SIZE;
8043c4ddec1SRichard Henderson     }
8053ab6e68cSRichard Henderson }
8063ab6e68cSRichard Henderson 
807206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
8083ab6e68cSRichard Henderson                                               run_on_cpu_data data)
8093ab6e68cSRichard Henderson {
8103960a59fSRichard Henderson     TLBFlushRangeData *d = data.host_ptr;
8116be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
8123ab6e68cSRichard Henderson     g_free(d);
8133ab6e68cSRichard Henderson }
8143ab6e68cSRichard Henderson 
815732d5487SAnton Johansson void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
816732d5487SAnton Johansson                                vaddr len, uint16_t idxmap,
817e5b1921bSRichard Henderson                                unsigned bits)
8183ab6e68cSRichard Henderson {
8193960a59fSRichard Henderson     TLBFlushRangeData d;
8203ab6e68cSRichard Henderson 
821e5b1921bSRichard Henderson     /*
822e5b1921bSRichard Henderson      * If all bits are significant, and len is small,
823e5b1921bSRichard Henderson      * this devolves to tlb_flush_page.
824e5b1921bSRichard Henderson      */
825e5b1921bSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8263ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
8273ab6e68cSRichard Henderson         return;
8283ab6e68cSRichard Henderson     }
8293ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
8303ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
8313ab6e68cSRichard Henderson         tlb_flush_by_mmuidx(cpu, idxmap);
8323ab6e68cSRichard Henderson         return;
8333ab6e68cSRichard Henderson     }
8343ab6e68cSRichard Henderson 
8353ab6e68cSRichard Henderson     /* This should already be page aligned */
8363ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
837e5b1921bSRichard Henderson     d.len = len;
8383ab6e68cSRichard Henderson     d.idxmap = idxmap;
8393ab6e68cSRichard Henderson     d.bits = bits;
8403ab6e68cSRichard Henderson 
8413ab6e68cSRichard Henderson     if (qemu_cpu_is_self(cpu)) {
8426be48e45SRichard Henderson         tlb_flush_range_by_mmuidx_async_0(cpu, d);
8433ab6e68cSRichard Henderson     } else {
8443ab6e68cSRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
8453960a59fSRichard Henderson         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
846206a583dSRichard Henderson         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
8473ab6e68cSRichard Henderson                          RUN_ON_CPU_HOST_PTR(p));
8483ab6e68cSRichard Henderson     }
8493ab6e68cSRichard Henderson }
8503ab6e68cSRichard Henderson 
851732d5487SAnton Johansson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
852e5b1921bSRichard Henderson                                    uint16_t idxmap, unsigned bits)
853e5b1921bSRichard Henderson {
854e5b1921bSRichard Henderson     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
855e5b1921bSRichard Henderson }
856e5b1921bSRichard Henderson 
857600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
858732d5487SAnton Johansson                                         vaddr addr, vaddr len,
859600b819fSRichard Henderson                                         uint16_t idxmap, unsigned bits)
8603ab6e68cSRichard Henderson {
8613960a59fSRichard Henderson     TLBFlushRangeData d;
862d34e4d1aSRichard Henderson     CPUState *dst_cpu;
8633ab6e68cSRichard Henderson 
864600b819fSRichard Henderson     /*
865600b819fSRichard Henderson      * If all bits are significant, and len is small,
866600b819fSRichard Henderson      * this devolves to tlb_flush_page.
867600b819fSRichard Henderson      */
868600b819fSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8693ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
8703ab6e68cSRichard Henderson         return;
8713ab6e68cSRichard Henderson     }
8723ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
8733ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
8743ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
8753ab6e68cSRichard Henderson         return;
8763ab6e68cSRichard Henderson     }
8773ab6e68cSRichard Henderson 
8783ab6e68cSRichard Henderson     /* This should already be page aligned */
8793ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
880600b819fSRichard Henderson     d.len = len;
8813ab6e68cSRichard Henderson     d.idxmap = idxmap;
8823ab6e68cSRichard Henderson     d.bits = bits;
8833ab6e68cSRichard Henderson 
8843ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
8853ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
8863ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
8873960a59fSRichard Henderson             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
8883ab6e68cSRichard Henderson             async_run_on_cpu(dst_cpu,
889206a583dSRichard Henderson                              tlb_flush_range_by_mmuidx_async_1,
8903ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
8913ab6e68cSRichard Henderson         }
8923ab6e68cSRichard Henderson     }
8933ab6e68cSRichard Henderson 
8946be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
8953ab6e68cSRichard Henderson }
8963ab6e68cSRichard Henderson 
897600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
898732d5487SAnton Johansson                                             vaddr addr, uint16_t idxmap,
899732d5487SAnton Johansson                                             unsigned bits)
900600b819fSRichard Henderson {
901600b819fSRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
902600b819fSRichard Henderson                                        idxmap, bits);
903600b819fSRichard Henderson }
904600b819fSRichard Henderson 
905c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
906732d5487SAnton Johansson                                                vaddr addr,
907732d5487SAnton Johansson                                                vaddr len,
9083ab6e68cSRichard Henderson                                                uint16_t idxmap,
9093ab6e68cSRichard Henderson                                                unsigned bits)
9103ab6e68cSRichard Henderson {
911d34e4d1aSRichard Henderson     TLBFlushRangeData d, *p;
912d34e4d1aSRichard Henderson     CPUState *dst_cpu;
9133ab6e68cSRichard Henderson 
914c13b27d8SRichard Henderson     /*
915c13b27d8SRichard Henderson      * If all bits are significant, and len is small,
916c13b27d8SRichard Henderson      * this devolves to tlb_flush_page.
917c13b27d8SRichard Henderson      */
918c13b27d8SRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
9193ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
9203ab6e68cSRichard Henderson         return;
9213ab6e68cSRichard Henderson     }
9223ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
9233ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
9243ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
9253ab6e68cSRichard Henderson         return;
9263ab6e68cSRichard Henderson     }
9273ab6e68cSRichard Henderson 
9283ab6e68cSRichard Henderson     /* This should already be page aligned */
9293ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
930c13b27d8SRichard Henderson     d.len = len;
9313ab6e68cSRichard Henderson     d.idxmap = idxmap;
9323ab6e68cSRichard Henderson     d.bits = bits;
9333ab6e68cSRichard Henderson 
9343ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
9353ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
9363ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
9376d244788SRichard Henderson             p = g_memdup(&d, sizeof(d));
938206a583dSRichard Henderson             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
9393ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
9403ab6e68cSRichard Henderson         }
9413ab6e68cSRichard Henderson     }
9423ab6e68cSRichard Henderson 
9436d244788SRichard Henderson     p = g_memdup(&d, sizeof(d));
944206a583dSRichard Henderson     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
9453ab6e68cSRichard Henderson                           RUN_ON_CPU_HOST_PTR(p));
9463ab6e68cSRichard Henderson }
9473ab6e68cSRichard Henderson 
948c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
949732d5487SAnton Johansson                                                    vaddr addr,
950c13b27d8SRichard Henderson                                                    uint16_t idxmap,
951c13b27d8SRichard Henderson                                                    unsigned bits)
952c13b27d8SRichard Henderson {
953c13b27d8SRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
954c13b27d8SRichard Henderson                                               idxmap, bits);
955c13b27d8SRichard Henderson }
956c13b27d8SRichard Henderson 
957d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
958d9bb58e5SYang Zhong    can be detected */
959d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
960d9bb58e5SYang Zhong {
96193b99616SRichard Henderson     cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
96293b99616SRichard Henderson                                              TARGET_PAGE_SIZE,
963d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
964d9bb58e5SYang Zhong }
965d9bb58e5SYang Zhong 
966d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
967d9bb58e5SYang Zhong    tested for self modifying code */
968d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
969d9bb58e5SYang Zhong {
970d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
971d9bb58e5SYang Zhong }
972d9bb58e5SYang Zhong 
973d9bb58e5SYang Zhong 
974d9bb58e5SYang Zhong /*
975d9bb58e5SYang Zhong  * Dirty write flag handling
976d9bb58e5SYang Zhong  *
977d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
978d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
979d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
980d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
981d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
982d9bb58e5SYang Zhong  * generated code.
983d9bb58e5SYang Zhong  *
98471aec354SEmilio G. Cota  * Other vCPUs might be reading their TLBs during guest execution, so we update
985d73415a3SStefan Hajnoczi  * te->addr_write with qatomic_set. We don't need to worry about this for
98671aec354SEmilio G. Cota  * oversized guests as MTTCG is disabled for them.
987d9bb58e5SYang Zhong  *
98853d28455SRichard Henderson  * Called with tlb_c.lock held.
989d9bb58e5SYang Zhong  */
99071aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
99171aec354SEmilio G. Cota                                          uintptr_t start, uintptr_t length)
992d9bb58e5SYang Zhong {
993d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
994d9bb58e5SYang Zhong 
9957b0d792cSRichard Henderson     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
9967b0d792cSRichard Henderson                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
997d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
998d9bb58e5SYang Zhong         addr += tlb_entry->addend;
999d9bb58e5SYang Zhong         if ((addr - start) < length) {
1000238f4380SRichard Henderson #if TARGET_LONG_BITS == 32
1001238f4380SRichard Henderson             uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
1002238f4380SRichard Henderson             ptr_write += HOST_BIG_ENDIAN;
1003238f4380SRichard Henderson             qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
1004238f4380SRichard Henderson #elif TCG_OVERSIZED_GUEST
100571aec354SEmilio G. Cota             tlb_entry->addr_write |= TLB_NOTDIRTY;
1006d9bb58e5SYang Zhong #else
1007d73415a3SStefan Hajnoczi             qatomic_set(&tlb_entry->addr_write,
100871aec354SEmilio G. Cota                         tlb_entry->addr_write | TLB_NOTDIRTY);
1009d9bb58e5SYang Zhong #endif
1010d9bb58e5SYang Zhong         }
101171aec354SEmilio G. Cota     }
101271aec354SEmilio G. Cota }
101371aec354SEmilio G. Cota 
101471aec354SEmilio G. Cota /*
101553d28455SRichard Henderson  * Called with tlb_c.lock held.
101671aec354SEmilio G. Cota  * Called only from the vCPU context, i.e. the TLB's owner thread.
101771aec354SEmilio G. Cota  */
101871aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
101971aec354SEmilio G. Cota {
102071aec354SEmilio G. Cota     *d = *s;
102171aec354SEmilio G. Cota }
1022d9bb58e5SYang Zhong 
1023d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
102471aec354SEmilio G. Cota  * the target vCPU).
102553d28455SRichard Henderson  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
102671aec354SEmilio G. Cota  * thing actually updated is the target TLB entry ->addr_write flags.
1027d9bb58e5SYang Zhong  */
1028d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1029d9bb58e5SYang Zhong {
1030d9bb58e5SYang Zhong     CPUArchState *env;
1031d9bb58e5SYang Zhong 
1032d9bb58e5SYang Zhong     int mmu_idx;
1033d9bb58e5SYang Zhong 
1034d9bb58e5SYang Zhong     env = cpu->env_ptr;
1035a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
1036d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1037d9bb58e5SYang Zhong         unsigned int i;
1038722a1c1eSRichard Henderson         unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1039d9bb58e5SYang Zhong 
104086e1eff8SEmilio G. Cota         for (i = 0; i < n; i++) {
1041a40ec84eSRichard Henderson             tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1042a40ec84eSRichard Henderson                                          start1, length);
1043d9bb58e5SYang Zhong         }
1044d9bb58e5SYang Zhong 
1045d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
1046a40ec84eSRichard Henderson             tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1047a40ec84eSRichard Henderson                                          start1, length);
1048d9bb58e5SYang Zhong         }
1049d9bb58e5SYang Zhong     }
1050a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
1051d9bb58e5SYang Zhong }
1052d9bb58e5SYang Zhong 
105353d28455SRichard Henderson /* Called with tlb_c.lock held */
105471aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1055732d5487SAnton Johansson                                          vaddr addr)
1056d9bb58e5SYang Zhong {
1057732d5487SAnton Johansson     if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
1058732d5487SAnton Johansson         tlb_entry->addr_write = addr;
1059d9bb58e5SYang Zhong     }
1060d9bb58e5SYang Zhong }
1061d9bb58e5SYang Zhong 
1062d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
1063d9bb58e5SYang Zhong    so that it is no longer dirty */
1064732d5487SAnton Johansson void tlb_set_dirty(CPUState *cpu, vaddr addr)
1065d9bb58e5SYang Zhong {
1066d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
1067d9bb58e5SYang Zhong     int mmu_idx;
1068d9bb58e5SYang Zhong 
1069d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
1070d9bb58e5SYang Zhong 
1071732d5487SAnton Johansson     addr &= TARGET_PAGE_MASK;
1072a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
1073d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1074732d5487SAnton Johansson         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, addr), addr);
1075d9bb58e5SYang Zhong     }
1076d9bb58e5SYang Zhong 
1077d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1078d9bb58e5SYang Zhong         int k;
1079d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
1080732d5487SAnton Johansson             tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], addr);
1081d9bb58e5SYang Zhong         }
1082d9bb58e5SYang Zhong     }
1083a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
1084d9bb58e5SYang Zhong }
1085d9bb58e5SYang Zhong 
1086d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
1087d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
10881308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1089732d5487SAnton Johansson                                vaddr addr, uint64_t size)
1090d9bb58e5SYang Zhong {
1091732d5487SAnton Johansson     vaddr lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1092732d5487SAnton Johansson     vaddr lp_mask = ~(size - 1);
1093d9bb58e5SYang Zhong 
1094732d5487SAnton Johansson     if (lp_addr == (vaddr)-1) {
10951308e026SRichard Henderson         /* No previous large page.  */
1096732d5487SAnton Johansson         lp_addr = addr;
10971308e026SRichard Henderson     } else {
1098d9bb58e5SYang Zhong         /* Extend the existing region to include the new page.
10991308e026SRichard Henderson            This is a compromise between unnecessary flushes and
11001308e026SRichard Henderson            the cost of maintaining a full variable size TLB.  */
1101a40ec84eSRichard Henderson         lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1102732d5487SAnton Johansson         while (((lp_addr ^ addr) & lp_mask) != 0) {
11031308e026SRichard Henderson             lp_mask <<= 1;
1104d9bb58e5SYang Zhong         }
11051308e026SRichard Henderson     }
1106a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1107a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1108d9bb58e5SYang Zhong }
1109d9bb58e5SYang Zhong 
111058e8f1f6SRichard Henderson static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
111158e8f1f6SRichard Henderson                                    target_ulong address, int flags,
111258e8f1f6SRichard Henderson                                    MMUAccessType access_type, bool enable)
111358e8f1f6SRichard Henderson {
111458e8f1f6SRichard Henderson     if (enable) {
111558e8f1f6SRichard Henderson         address |= flags & TLB_FLAGS_MASK;
111658e8f1f6SRichard Henderson         flags &= TLB_SLOW_FLAGS_MASK;
111758e8f1f6SRichard Henderson         if (flags) {
111858e8f1f6SRichard Henderson             address |= TLB_FORCE_SLOW;
111958e8f1f6SRichard Henderson         }
112058e8f1f6SRichard Henderson     } else {
112158e8f1f6SRichard Henderson         address = -1;
112258e8f1f6SRichard Henderson         flags = 0;
112358e8f1f6SRichard Henderson     }
112458e8f1f6SRichard Henderson     ent->addr_idx[access_type] = address;
112558e8f1f6SRichard Henderson     full->slow_flags[access_type] = flags;
112658e8f1f6SRichard Henderson }
112758e8f1f6SRichard Henderson 
112840473689SRichard Henderson /*
112940473689SRichard Henderson  * Add a new TLB entry. At most one entry for a given virtual address
1130d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1131d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
1132d9bb58e5SYang Zhong  *
1133d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
1134d9bb58e5SYang Zhong  * critical section.
1135d9bb58e5SYang Zhong  */
113640473689SRichard Henderson void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1137732d5487SAnton Johansson                        vaddr addr, CPUTLBEntryFull *full)
1138d9bb58e5SYang Zhong {
1139d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
1140a40ec84eSRichard Henderson     CPUTLB *tlb = env_tlb(env);
1141a40ec84eSRichard Henderson     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1142d9bb58e5SYang Zhong     MemoryRegionSection *section;
114358e8f1f6SRichard Henderson     unsigned int index, read_flags, write_flags;
1144d9bb58e5SYang Zhong     uintptr_t addend;
114568fea038SRichard Henderson     CPUTLBEntry *te, tn;
114655df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
1147732d5487SAnton Johansson     vaddr addr_page;
114840473689SRichard Henderson     int asidx, wp_flags, prot;
11498f5db641SRichard Henderson     bool is_ram, is_romd;
1150d9bb58e5SYang Zhong 
1151d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
115255df6fcfSPeter Maydell 
115340473689SRichard Henderson     if (full->lg_page_size <= TARGET_PAGE_BITS) {
115455df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
115555df6fcfSPeter Maydell     } else {
115640473689SRichard Henderson         sz = (hwaddr)1 << full->lg_page_size;
1157732d5487SAnton Johansson         tlb_add_large_page(env, mmu_idx, addr, sz);
115855df6fcfSPeter Maydell     }
1159732d5487SAnton Johansson     addr_page = addr & TARGET_PAGE_MASK;
116040473689SRichard Henderson     paddr_page = full->phys_addr & TARGET_PAGE_MASK;
116155df6fcfSPeter Maydell 
116240473689SRichard Henderson     prot = full->prot;
116340473689SRichard Henderson     asidx = cpu_asidx_from_attrs(cpu, full->attrs);
116455df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
116540473689SRichard Henderson                                                 &xlat, &sz, full->attrs, &prot);
1166d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
1167d9bb58e5SYang Zhong 
1168732d5487SAnton Johansson     tlb_debug("vaddr=%" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
1169d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
1170732d5487SAnton Johansson               addr, full->phys_addr, prot, mmu_idx);
1171d9bb58e5SYang Zhong 
117258e8f1f6SRichard Henderson     read_flags = 0;
117340473689SRichard Henderson     if (full->lg_page_size < TARGET_PAGE_BITS) {
117430d7e098SRichard Henderson         /* Repeat the MMU check and TLB fill on every access.  */
117558e8f1f6SRichard Henderson         read_flags |= TLB_INVALID_MASK;
117655df6fcfSPeter Maydell     }
117740473689SRichard Henderson     if (full->attrs.byte_swap) {
117858e8f1f6SRichard Henderson         read_flags |= TLB_BSWAP;
1179a26fc6f5STony Nguyen     }
11808f5db641SRichard Henderson 
11818f5db641SRichard Henderson     is_ram = memory_region_is_ram(section->mr);
11828f5db641SRichard Henderson     is_romd = memory_region_is_romd(section->mr);
11838f5db641SRichard Henderson 
11848f5db641SRichard Henderson     if (is_ram || is_romd) {
11858f5db641SRichard Henderson         /* RAM and ROMD both have associated host memory. */
1186d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
11878f5db641SRichard Henderson     } else {
11888f5db641SRichard Henderson         /* I/O does not; force the host address to NULL. */
11898f5db641SRichard Henderson         addend = 0;
1190d9bb58e5SYang Zhong     }
1191d9bb58e5SYang Zhong 
119258e8f1f6SRichard Henderson     write_flags = read_flags;
11938f5db641SRichard Henderson     if (is_ram) {
11948f5db641SRichard Henderson         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
11958f5db641SRichard Henderson         /*
11968f5db641SRichard Henderson          * Computing is_clean is expensive; avoid all that unless
11978f5db641SRichard Henderson          * the page is actually writable.
11988f5db641SRichard Henderson          */
11998f5db641SRichard Henderson         if (prot & PAGE_WRITE) {
12008f5db641SRichard Henderson             if (section->readonly) {
120158e8f1f6SRichard Henderson                 write_flags |= TLB_DISCARD_WRITE;
12028f5db641SRichard Henderson             } else if (cpu_physical_memory_is_clean(iotlb)) {
120358e8f1f6SRichard Henderson                 write_flags |= TLB_NOTDIRTY;
12048f5db641SRichard Henderson             }
12058f5db641SRichard Henderson         }
12068f5db641SRichard Henderson     } else {
12078f5db641SRichard Henderson         /* I/O or ROMD */
12088f5db641SRichard Henderson         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
12098f5db641SRichard Henderson         /*
12108f5db641SRichard Henderson          * Writes to romd devices must go through MMIO to enable write.
12118f5db641SRichard Henderson          * Reads to romd devices go through the ram_ptr found above,
12128f5db641SRichard Henderson          * but of course reads to I/O must go through MMIO.
12138f5db641SRichard Henderson          */
121458e8f1f6SRichard Henderson         write_flags |= TLB_MMIO;
12158f5db641SRichard Henderson         if (!is_romd) {
121658e8f1f6SRichard Henderson             read_flags = write_flags;
12178f5db641SRichard Henderson         }
12188f5db641SRichard Henderson     }
12198f5db641SRichard Henderson 
1220732d5487SAnton Johansson     wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
122150b107c5SRichard Henderson                                               TARGET_PAGE_SIZE);
1222d9bb58e5SYang Zhong 
1223732d5487SAnton Johansson     index = tlb_index(env, mmu_idx, addr_page);
1224732d5487SAnton Johansson     te = tlb_entry(env, mmu_idx, addr_page);
1225d9bb58e5SYang Zhong 
122668fea038SRichard Henderson     /*
122771aec354SEmilio G. Cota      * Hold the TLB lock for the rest of the function. We could acquire/release
122871aec354SEmilio G. Cota      * the lock several times in the function, but it is faster to amortize the
122971aec354SEmilio G. Cota      * acquisition cost by acquiring it just once. Note that this leads to
123071aec354SEmilio G. Cota      * a longer critical section, but this is not a concern since the TLB lock
123171aec354SEmilio G. Cota      * is unlikely to be contended.
123271aec354SEmilio G. Cota      */
1233a40ec84eSRichard Henderson     qemu_spin_lock(&tlb->c.lock);
123471aec354SEmilio G. Cota 
12353d1523ceSRichard Henderson     /* Note that the tlb is no longer clean.  */
1236a40ec84eSRichard Henderson     tlb->c.dirty |= 1 << mmu_idx;
12373d1523ceSRichard Henderson 
123871aec354SEmilio G. Cota     /* Make sure there's no cached translation for the new page.  */
1239732d5487SAnton Johansson     tlb_flush_vtlb_page_locked(env, mmu_idx, addr_page);
124071aec354SEmilio G. Cota 
124171aec354SEmilio G. Cota     /*
124268fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
124368fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
124468fea038SRichard Henderson      */
1245732d5487SAnton Johansson     if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
1246a40ec84eSRichard Henderson         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1247a40ec84eSRichard Henderson         CPUTLBEntry *tv = &desc->vtable[vidx];
124868fea038SRichard Henderson 
124968fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
125071aec354SEmilio G. Cota         copy_tlb_helper_locked(tv, te);
125125d3ec58SRichard Henderson         desc->vfulltlb[vidx] = desc->fulltlb[index];
125286e1eff8SEmilio G. Cota         tlb_n_used_entries_dec(env, mmu_idx);
125368fea038SRichard Henderson     }
1254d9bb58e5SYang Zhong 
1255d9bb58e5SYang Zhong     /* refill the tlb */
1256ace41090SPeter Maydell     /*
1257ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
1258ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
12598f5db641SRichard Henderson      *  + the ram_addr_t of the page base of the target RAM (RAM)
12608f5db641SRichard Henderson      *  + the offset within section->mr of the page base (I/O, ROMD)
126158e8f1f6SRichard Henderson      * We subtract addr_page (which is page aligned and thus won't
1262ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
1263ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
1264ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
1265ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
1266ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1267ace41090SPeter Maydell      */
126840473689SRichard Henderson     desc->fulltlb[index] = *full;
126958e8f1f6SRichard Henderson     full = &desc->fulltlb[index];
127058e8f1f6SRichard Henderson     full->xlat_section = iotlb - addr_page;
127158e8f1f6SRichard Henderson     full->phys_addr = paddr_page;
1272d9bb58e5SYang Zhong 
1273d9bb58e5SYang Zhong     /* Now calculate the new entry */
1274732d5487SAnton Johansson     tn.addend = addend - addr_page;
127558e8f1f6SRichard Henderson 
127658e8f1f6SRichard Henderson     tlb_set_compare(full, &tn, addr_page, read_flags,
127758e8f1f6SRichard Henderson                     MMU_INST_FETCH, prot & PAGE_EXEC);
127858e8f1f6SRichard Henderson 
127950b107c5SRichard Henderson     if (wp_flags & BP_MEM_READ) {
128058e8f1f6SRichard Henderson         read_flags |= TLB_WATCHPOINT;
128150b107c5SRichard Henderson     }
128258e8f1f6SRichard Henderson     tlb_set_compare(full, &tn, addr_page, read_flags,
128358e8f1f6SRichard Henderson                     MMU_DATA_LOAD, prot & PAGE_READ);
1284d9bb58e5SYang Zhong 
1285f52bfb12SDavid Hildenbrand     if (prot & PAGE_WRITE_INV) {
128658e8f1f6SRichard Henderson         write_flags |= TLB_INVALID_MASK;
1287f52bfb12SDavid Hildenbrand     }
128850b107c5SRichard Henderson     if (wp_flags & BP_MEM_WRITE) {
128958e8f1f6SRichard Henderson         write_flags |= TLB_WATCHPOINT;
129050b107c5SRichard Henderson     }
129158e8f1f6SRichard Henderson     tlb_set_compare(full, &tn, addr_page, write_flags,
129258e8f1f6SRichard Henderson                     MMU_DATA_STORE, prot & PAGE_WRITE);
1293d9bb58e5SYang Zhong 
129471aec354SEmilio G. Cota     copy_tlb_helper_locked(te, &tn);
129586e1eff8SEmilio G. Cota     tlb_n_used_entries_inc(env, mmu_idx);
1296a40ec84eSRichard Henderson     qemu_spin_unlock(&tlb->c.lock);
1297d9bb58e5SYang Zhong }
1298d9bb58e5SYang Zhong 
1299732d5487SAnton Johansson void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
130040473689SRichard Henderson                              hwaddr paddr, MemTxAttrs attrs, int prot,
1301732d5487SAnton Johansson                              int mmu_idx, uint64_t size)
130240473689SRichard Henderson {
130340473689SRichard Henderson     CPUTLBEntryFull full = {
130440473689SRichard Henderson         .phys_addr = paddr,
130540473689SRichard Henderson         .attrs = attrs,
130640473689SRichard Henderson         .prot = prot,
130740473689SRichard Henderson         .lg_page_size = ctz64(size)
130840473689SRichard Henderson     };
130940473689SRichard Henderson 
131040473689SRichard Henderson     assert(is_power_of_2(size));
1311732d5487SAnton Johansson     tlb_set_page_full(cpu, mmu_idx, addr, &full);
131240473689SRichard Henderson }
131340473689SRichard Henderson 
1314732d5487SAnton Johansson void tlb_set_page(CPUState *cpu, vaddr addr,
1315d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
1316732d5487SAnton Johansson                   int mmu_idx, uint64_t size)
1317d9bb58e5SYang Zhong {
1318732d5487SAnton Johansson     tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
1319d9bb58e5SYang Zhong                             prot, mmu_idx, size);
1320d9bb58e5SYang Zhong }
1321d9bb58e5SYang Zhong 
1322c319dc13SRichard Henderson /*
1323c319dc13SRichard Henderson  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1324c319dc13SRichard Henderson  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1325c319dc13SRichard Henderson  * be discarded and looked up again (e.g. via tlb_entry()).
1326c319dc13SRichard Henderson  */
1327732d5487SAnton Johansson static void tlb_fill(CPUState *cpu, vaddr addr, int size,
1328c319dc13SRichard Henderson                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1329c319dc13SRichard Henderson {
1330c319dc13SRichard Henderson     bool ok;
1331c319dc13SRichard Henderson 
1332c319dc13SRichard Henderson     /*
1333c319dc13SRichard Henderson      * This is not a probe, so only valid return is success; failure
1334c319dc13SRichard Henderson      * should result in exception + longjmp to the cpu loop.
1335c319dc13SRichard Henderson      */
13368810ee2aSAlex Bennée     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1337e124536fSEduardo Habkost                                     access_type, mmu_idx, false, retaddr);
1338c319dc13SRichard Henderson     assert(ok);
1339c319dc13SRichard Henderson }
1340c319dc13SRichard Henderson 
134178271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
134278271684SClaudio Fontana                                         MMUAccessType access_type,
134378271684SClaudio Fontana                                         int mmu_idx, uintptr_t retaddr)
134478271684SClaudio Fontana {
13458810ee2aSAlex Bennée     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
13468810ee2aSAlex Bennée                                           mmu_idx, retaddr);
134778271684SClaudio Fontana }
134878271684SClaudio Fontana 
134978271684SClaudio Fontana static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
135078271684SClaudio Fontana                                           vaddr addr, unsigned size,
135178271684SClaudio Fontana                                           MMUAccessType access_type,
135278271684SClaudio Fontana                                           int mmu_idx, MemTxAttrs attrs,
135378271684SClaudio Fontana                                           MemTxResult response,
135478271684SClaudio Fontana                                           uintptr_t retaddr)
135578271684SClaudio Fontana {
135678271684SClaudio Fontana     CPUClass *cc = CPU_GET_CLASS(cpu);
135778271684SClaudio Fontana 
135878271684SClaudio Fontana     if (!cpu->ignore_memory_transaction_failures &&
135978271684SClaudio Fontana         cc->tcg_ops->do_transaction_failed) {
136078271684SClaudio Fontana         cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
136178271684SClaudio Fontana                                            access_type, mmu_idx, attrs,
136278271684SClaudio Fontana                                            response, retaddr);
136378271684SClaudio Fontana     }
136478271684SClaudio Fontana }
136578271684SClaudio Fontana 
136625d3ec58SRichard Henderson static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
1367732d5487SAnton Johansson                          int mmu_idx, vaddr addr, uintptr_t retaddr,
1368be5c4787STony Nguyen                          MMUAccessType access_type, MemOp op)
1369d9bb58e5SYang Zhong {
137029a0af61SRichard Henderson     CPUState *cpu = env_cpu(env);
13712d54f194SPeter Maydell     hwaddr mr_offset;
13722d54f194SPeter Maydell     MemoryRegionSection *section;
13732d54f194SPeter Maydell     MemoryRegion *mr;
1374d9bb58e5SYang Zhong     uint64_t val;
137504e3aabdSPeter Maydell     MemTxResult r;
1376d9bb58e5SYang Zhong 
137725d3ec58SRichard Henderson     section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
13782d54f194SPeter Maydell     mr = section->mr;
137925d3ec58SRichard Henderson     mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1380d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
138108565552SRichard Henderson     if (!cpu->can_do_io) {
1382d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1383d9bb58e5SYang Zhong     }
1384d9bb58e5SYang Zhong 
138561b59fb2SRichard Henderson     {
138661b59fb2SRichard Henderson         QEMU_IOTHREAD_LOCK_GUARD();
138725d3ec58SRichard Henderson         r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
138861b59fb2SRichard Henderson     }
138961b59fb2SRichard Henderson 
139004e3aabdSPeter Maydell     if (r != MEMTX_OK) {
13912d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
13922d54f194SPeter Maydell             section->offset_within_address_space -
13932d54f194SPeter Maydell             section->offset_within_region;
13942d54f194SPeter Maydell 
1395be5c4787STony Nguyen         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
139625d3ec58SRichard Henderson                                mmu_idx, full->attrs, r, retaddr);
139704e3aabdSPeter Maydell     }
1398d9bb58e5SYang Zhong     return val;
1399d9bb58e5SYang Zhong }
1400d9bb58e5SYang Zhong 
14012f3a57eeSAlex Bennée /*
140225d3ec58SRichard Henderson  * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
140325d3ec58SRichard Henderson  * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
1404570ef309SAlex Bennée  * because of the side effect of io_writex changing memory layout.
14052f3a57eeSAlex Bennée  */
140637523ff7SRichard Henderson static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
140737523ff7SRichard Henderson                             hwaddr mr_offset)
14082f3a57eeSAlex Bennée {
14092f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN
14102f3a57eeSAlex Bennée     SavedIOTLB *saved = &cs->saved_iotlb;
14112f3a57eeSAlex Bennée     saved->section = section;
14122f3a57eeSAlex Bennée     saved->mr_offset = mr_offset;
14132f3a57eeSAlex Bennée #endif
14142f3a57eeSAlex Bennée }
14152f3a57eeSAlex Bennée 
141625d3ec58SRichard Henderson static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
1417732d5487SAnton Johansson                       int mmu_idx, uint64_t val, vaddr addr,
1418be5c4787STony Nguyen                       uintptr_t retaddr, MemOp op)
1419d9bb58e5SYang Zhong {
142029a0af61SRichard Henderson     CPUState *cpu = env_cpu(env);
14212d54f194SPeter Maydell     hwaddr mr_offset;
14222d54f194SPeter Maydell     MemoryRegionSection *section;
14232d54f194SPeter Maydell     MemoryRegion *mr;
142404e3aabdSPeter Maydell     MemTxResult r;
1425d9bb58e5SYang Zhong 
142625d3ec58SRichard Henderson     section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
14272d54f194SPeter Maydell     mr = section->mr;
142825d3ec58SRichard Henderson     mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
142908565552SRichard Henderson     if (!cpu->can_do_io) {
1430d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1431d9bb58e5SYang Zhong     }
1432d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
1433d9bb58e5SYang Zhong 
14342f3a57eeSAlex Bennée     /*
14352f3a57eeSAlex Bennée      * The memory_region_dispatch may trigger a flush/resize
14362f3a57eeSAlex Bennée      * so for plugins we save the iotlb_data just in case.
14372f3a57eeSAlex Bennée      */
143837523ff7SRichard Henderson     save_iotlb_data(cpu, section, mr_offset);
14392f3a57eeSAlex Bennée 
144061b59fb2SRichard Henderson     {
144161b59fb2SRichard Henderson         QEMU_IOTHREAD_LOCK_GUARD();
144225d3ec58SRichard Henderson         r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
144361b59fb2SRichard Henderson     }
144461b59fb2SRichard Henderson 
144504e3aabdSPeter Maydell     if (r != MEMTX_OK) {
14462d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
14472d54f194SPeter Maydell             section->offset_within_address_space -
14482d54f194SPeter Maydell             section->offset_within_region;
14492d54f194SPeter Maydell 
1450be5c4787STony Nguyen         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
145125d3ec58SRichard Henderson                                MMU_DATA_STORE, mmu_idx, full->attrs, r,
1452be5c4787STony Nguyen                                retaddr);
145304e3aabdSPeter Maydell     }
1454d9bb58e5SYang Zhong }
1455d9bb58e5SYang Zhong 
1456d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
1457d9bb58e5SYang Zhong    back to the main tlb.  */
1458d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1459732d5487SAnton Johansson                            MMUAccessType access_type, vaddr page)
1460d9bb58e5SYang Zhong {
1461d9bb58e5SYang Zhong     size_t vidx;
146271aec354SEmilio G. Cota 
146329a0af61SRichard Henderson     assert_cpu_is_self(env_cpu(env));
1464d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1465a40ec84eSRichard Henderson         CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
14669e39de98SAnton Johansson         uint64_t cmp = tlb_read_idx(vtlb, access_type);
1467d9bb58e5SYang Zhong 
1468d9bb58e5SYang Zhong         if (cmp == page) {
1469d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
1470a40ec84eSRichard Henderson             CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1471d9bb58e5SYang Zhong 
1472a40ec84eSRichard Henderson             qemu_spin_lock(&env_tlb(env)->c.lock);
147371aec354SEmilio G. Cota             copy_tlb_helper_locked(&tmptlb, tlb);
147471aec354SEmilio G. Cota             copy_tlb_helper_locked(tlb, vtlb);
147571aec354SEmilio G. Cota             copy_tlb_helper_locked(vtlb, &tmptlb);
1476a40ec84eSRichard Henderson             qemu_spin_unlock(&env_tlb(env)->c.lock);
1477d9bb58e5SYang Zhong 
147825d3ec58SRichard Henderson             CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
147925d3ec58SRichard Henderson             CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
148025d3ec58SRichard Henderson             CPUTLBEntryFull tmpf;
148125d3ec58SRichard Henderson             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1482d9bb58e5SYang Zhong             return true;
1483d9bb58e5SYang Zhong         }
1484d9bb58e5SYang Zhong     }
1485d9bb58e5SYang Zhong     return false;
1486d9bb58e5SYang Zhong }
1487d9bb58e5SYang Zhong 
1488707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
148925d3ec58SRichard Henderson                            CPUTLBEntryFull *full, uintptr_t retaddr)
1490707526adSRichard Henderson {
149125d3ec58SRichard Henderson     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1492707526adSRichard Henderson 
1493707526adSRichard Henderson     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1494707526adSRichard Henderson 
1495707526adSRichard Henderson     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1496f349e92eSPhilippe Mathieu-Daudé         tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
1497707526adSRichard Henderson     }
1498707526adSRichard Henderson 
1499707526adSRichard Henderson     /*
1500707526adSRichard Henderson      * Set both VGA and migration bits for simplicity and to remove
1501707526adSRichard Henderson      * the notdirty callback faster.
1502707526adSRichard Henderson      */
1503707526adSRichard Henderson     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1504707526adSRichard Henderson 
1505707526adSRichard Henderson     /* We remove the notdirty callback only if the code has been flushed. */
1506707526adSRichard Henderson     if (!cpu_physical_memory_is_clean(ram_addr)) {
1507707526adSRichard Henderson         trace_memory_notdirty_set_dirty(mem_vaddr);
1508707526adSRichard Henderson         tlb_set_dirty(cpu, mem_vaddr);
1509707526adSRichard Henderson     }
1510707526adSRichard Henderson }
1511707526adSRichard Henderson 
15124f8f4127SAnton Johansson static int probe_access_internal(CPUArchState *env, vaddr addr,
1513069cfe77SRichard Henderson                                  int fault_size, MMUAccessType access_type,
1514069cfe77SRichard Henderson                                  int mmu_idx, bool nonfault,
1515af803a4fSRichard Henderson                                  void **phost, CPUTLBEntryFull **pfull,
1516*6d03226bSAlex Bennée                                  uintptr_t retaddr, bool check_mem_cbs)
1517d9bb58e5SYang Zhong {
1518383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
1519383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
15209e39de98SAnton Johansson     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
15214f8f4127SAnton Johansson     vaddr page_addr = addr & TARGET_PAGE_MASK;
152258e8f1f6SRichard Henderson     int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
1523*6d03226bSAlex Bennée     bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(env_cpu(env));
152458e8f1f6SRichard Henderson     CPUTLBEntryFull *full;
1525ca86cf32SDavid Hildenbrand 
1526069cfe77SRichard Henderson     if (!tlb_hit_page(tlb_addr, page_addr)) {
15270b3c75adSRichard Henderson         if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
1528069cfe77SRichard Henderson             CPUState *cs = env_cpu(env);
1529069cfe77SRichard Henderson 
15308810ee2aSAlex Bennée             if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1531069cfe77SRichard Henderson                                            mmu_idx, nonfault, retaddr)) {
1532069cfe77SRichard Henderson                 /* Non-faulting page table read failed.  */
1533069cfe77SRichard Henderson                 *phost = NULL;
1534af803a4fSRichard Henderson                 *pfull = NULL;
1535069cfe77SRichard Henderson                 return TLB_INVALID_MASK;
1536069cfe77SRichard Henderson             }
1537069cfe77SRichard Henderson 
153803a98189SDavid Hildenbrand             /* TLB resize via tlb_fill may have moved the entry.  */
1539af803a4fSRichard Henderson             index = tlb_index(env, mmu_idx, addr);
154003a98189SDavid Hildenbrand             entry = tlb_entry(env, mmu_idx, addr);
1541c3c8bf57SRichard Henderson 
1542c3c8bf57SRichard Henderson             /*
1543c3c8bf57SRichard Henderson              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1544c3c8bf57SRichard Henderson              * to force the next access through tlb_fill.  We've just
1545c3c8bf57SRichard Henderson              * called tlb_fill, so we know that this entry *is* valid.
1546c3c8bf57SRichard Henderson              */
1547c3c8bf57SRichard Henderson             flags &= ~TLB_INVALID_MASK;
1548d9bb58e5SYang Zhong         }
15490b3c75adSRichard Henderson         tlb_addr = tlb_read_idx(entry, access_type);
155003a98189SDavid Hildenbrand     }
1551c3c8bf57SRichard Henderson     flags &= tlb_addr;
155203a98189SDavid Hildenbrand 
155358e8f1f6SRichard Henderson     *pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
155458e8f1f6SRichard Henderson     flags |= full->slow_flags[access_type];
1555af803a4fSRichard Henderson 
1556069cfe77SRichard Henderson     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
1557*6d03226bSAlex Bennée     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))
1558*6d03226bSAlex Bennée         ||
1559*6d03226bSAlex Bennée         (access_type != MMU_INST_FETCH && force_mmio)) {
1560069cfe77SRichard Henderson         *phost = NULL;
1561069cfe77SRichard Henderson         return TLB_MMIO;
1562fef39ccdSDavid Hildenbrand     }
1563fef39ccdSDavid Hildenbrand 
1564069cfe77SRichard Henderson     /* Everything else is RAM. */
1565069cfe77SRichard Henderson     *phost = (void *)((uintptr_t)addr + entry->addend);
1566069cfe77SRichard Henderson     return flags;
1567069cfe77SRichard Henderson }
1568069cfe77SRichard Henderson 
15694f8f4127SAnton Johansson int probe_access_full(CPUArchState *env, vaddr addr, int size,
1570069cfe77SRichard Henderson                       MMUAccessType access_type, int mmu_idx,
1571af803a4fSRichard Henderson                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1572af803a4fSRichard Henderson                       uintptr_t retaddr)
1573069cfe77SRichard Henderson {
1574d507e6c5SRichard Henderson     int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1575*6d03226bSAlex Bennée                                       nonfault, phost, pfull, retaddr, true);
1576069cfe77SRichard Henderson 
1577069cfe77SRichard Henderson     /* Handle clean RAM pages.  */
1578069cfe77SRichard Henderson     if (unlikely(flags & TLB_NOTDIRTY)) {
1579af803a4fSRichard Henderson         notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
1580069cfe77SRichard Henderson         flags &= ~TLB_NOTDIRTY;
1581069cfe77SRichard Henderson     }
1582069cfe77SRichard Henderson 
1583069cfe77SRichard Henderson     return flags;
1584069cfe77SRichard Henderson }
1585069cfe77SRichard Henderson 
1586*6d03226bSAlex Bennée int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
1587*6d03226bSAlex Bennée                           MMUAccessType access_type, int mmu_idx,
1588*6d03226bSAlex Bennée                           void **phost, CPUTLBEntryFull **pfull)
1589*6d03226bSAlex Bennée {
1590*6d03226bSAlex Bennée     void *discard_phost;
1591*6d03226bSAlex Bennée     CPUTLBEntryFull *discard_tlb;
1592*6d03226bSAlex Bennée 
1593*6d03226bSAlex Bennée     /* privately handle users that don't need full results */
1594*6d03226bSAlex Bennée     phost = phost ? phost : &discard_phost;
1595*6d03226bSAlex Bennée     pfull = pfull ? pfull : &discard_tlb;
1596*6d03226bSAlex Bennée 
1597*6d03226bSAlex Bennée     int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1598*6d03226bSAlex Bennée                                       true, phost, pfull, 0, false);
1599*6d03226bSAlex Bennée 
1600*6d03226bSAlex Bennée     /* Handle clean RAM pages.  */
1601*6d03226bSAlex Bennée     if (unlikely(flags & TLB_NOTDIRTY)) {
1602*6d03226bSAlex Bennée         notdirty_write(env_cpu(env), addr, 1, *pfull, 0);
1603*6d03226bSAlex Bennée         flags &= ~TLB_NOTDIRTY;
1604*6d03226bSAlex Bennée     }
1605*6d03226bSAlex Bennée 
1606*6d03226bSAlex Bennée     return flags;
1607*6d03226bSAlex Bennée }
1608*6d03226bSAlex Bennée 
16094f8f4127SAnton Johansson int probe_access_flags(CPUArchState *env, vaddr addr, int size,
1610af803a4fSRichard Henderson                        MMUAccessType access_type, int mmu_idx,
1611af803a4fSRichard Henderson                        bool nonfault, void **phost, uintptr_t retaddr)
1612af803a4fSRichard Henderson {
1613af803a4fSRichard Henderson     CPUTLBEntryFull *full;
16141770b2f2SDaniel Henrique Barboza     int flags;
1615af803a4fSRichard Henderson 
16161770b2f2SDaniel Henrique Barboza     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
16171770b2f2SDaniel Henrique Barboza 
16181770b2f2SDaniel Henrique Barboza     flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1619*6d03226bSAlex Bennée                                   nonfault, phost, &full, retaddr, true);
16201770b2f2SDaniel Henrique Barboza 
16211770b2f2SDaniel Henrique Barboza     /* Handle clean RAM pages. */
16221770b2f2SDaniel Henrique Barboza     if (unlikely(flags & TLB_NOTDIRTY)) {
16231770b2f2SDaniel Henrique Barboza         notdirty_write(env_cpu(env), addr, 1, full, retaddr);
16241770b2f2SDaniel Henrique Barboza         flags &= ~TLB_NOTDIRTY;
16251770b2f2SDaniel Henrique Barboza     }
16261770b2f2SDaniel Henrique Barboza 
16271770b2f2SDaniel Henrique Barboza     return flags;
1628af803a4fSRichard Henderson }
1629af803a4fSRichard Henderson 
16304f8f4127SAnton Johansson void *probe_access(CPUArchState *env, vaddr addr, int size,
1631069cfe77SRichard Henderson                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1632069cfe77SRichard Henderson {
1633af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1634069cfe77SRichard Henderson     void *host;
1635069cfe77SRichard Henderson     int flags;
1636069cfe77SRichard Henderson 
1637069cfe77SRichard Henderson     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1638069cfe77SRichard Henderson 
1639069cfe77SRichard Henderson     flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1640*6d03226bSAlex Bennée                                   false, &host, &full, retaddr, true);
1641069cfe77SRichard Henderson 
1642069cfe77SRichard Henderson     /* Per the interface, size == 0 merely faults the access. */
1643069cfe77SRichard Henderson     if (size == 0) {
164473bc0bd4SRichard Henderson         return NULL;
164573bc0bd4SRichard Henderson     }
164673bc0bd4SRichard Henderson 
1647069cfe77SRichard Henderson     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
164803a98189SDavid Hildenbrand         /* Handle watchpoints.  */
1649069cfe77SRichard Henderson         if (flags & TLB_WATCHPOINT) {
1650069cfe77SRichard Henderson             int wp_access = (access_type == MMU_DATA_STORE
1651069cfe77SRichard Henderson                              ? BP_MEM_WRITE : BP_MEM_READ);
165203a98189SDavid Hildenbrand             cpu_check_watchpoint(env_cpu(env), addr, size,
165325d3ec58SRichard Henderson                                  full->attrs, wp_access, retaddr);
1654d9bb58e5SYang Zhong         }
1655fef39ccdSDavid Hildenbrand 
165673bc0bd4SRichard Henderson         /* Handle clean RAM pages.  */
1657069cfe77SRichard Henderson         if (flags & TLB_NOTDIRTY) {
165825d3ec58SRichard Henderson             notdirty_write(env_cpu(env), addr, 1, full, retaddr);
165973bc0bd4SRichard Henderson         }
1660fef39ccdSDavid Hildenbrand     }
1661fef39ccdSDavid Hildenbrand 
1662069cfe77SRichard Henderson     return host;
1663d9bb58e5SYang Zhong }
1664d9bb58e5SYang Zhong 
16654811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
16664811e909SRichard Henderson                         MMUAccessType access_type, int mmu_idx)
16674811e909SRichard Henderson {
1668af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1669069cfe77SRichard Henderson     void *host;
1670069cfe77SRichard Henderson     int flags;
16714811e909SRichard Henderson 
1672069cfe77SRichard Henderson     flags = probe_access_internal(env, addr, 0, access_type,
1673*6d03226bSAlex Bennée                                   mmu_idx, true, &host, &full, 0, false);
1674069cfe77SRichard Henderson 
1675069cfe77SRichard Henderson     /* No combination of flags are expected by the caller. */
1676069cfe77SRichard Henderson     return flags ? NULL : host;
16774811e909SRichard Henderson }
16784811e909SRichard Henderson 
16797e0d9973SRichard Henderson /*
16807e0d9973SRichard Henderson  * Return a ram_addr_t for the virtual address for execution.
16817e0d9973SRichard Henderson  *
16827e0d9973SRichard Henderson  * Return -1 if we can't translate and execute from an entire page
16837e0d9973SRichard Henderson  * of RAM.  This will force us to execute by loading and translating
16847e0d9973SRichard Henderson  * one insn at a time, without caching.
16857e0d9973SRichard Henderson  *
16867e0d9973SRichard Henderson  * NOTE: This function will trigger an exception if the page is
16877e0d9973SRichard Henderson  * not executable.
16887e0d9973SRichard Henderson  */
16894f8f4127SAnton Johansson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
16907e0d9973SRichard Henderson                                         void **hostp)
16917e0d9973SRichard Henderson {
1692af803a4fSRichard Henderson     CPUTLBEntryFull *full;
16937e0d9973SRichard Henderson     void *p;
16947e0d9973SRichard Henderson 
16957e0d9973SRichard Henderson     (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
1696*6d03226bSAlex Bennée                                 cpu_mmu_index(env, true), false,
1697*6d03226bSAlex Bennée                                 &p, &full, 0, false);
16987e0d9973SRichard Henderson     if (p == NULL) {
16997e0d9973SRichard Henderson         return -1;
17007e0d9973SRichard Henderson     }
1701ac01ec6fSWeiwei Li 
1702ac01ec6fSWeiwei Li     if (full->lg_page_size < TARGET_PAGE_BITS) {
1703ac01ec6fSWeiwei Li         return -1;
1704ac01ec6fSWeiwei Li     }
1705ac01ec6fSWeiwei Li 
17067e0d9973SRichard Henderson     if (hostp) {
17077e0d9973SRichard Henderson         *hostp = p;
17087e0d9973SRichard Henderson     }
17097e0d9973SRichard Henderson     return qemu_ram_addr_from_host_nofail(p);
17107e0d9973SRichard Henderson }
17117e0d9973SRichard Henderson 
1712cdfac37bSRichard Henderson /* Load/store with atomicity primitives. */
1713cdfac37bSRichard Henderson #include "ldst_atomicity.c.inc"
1714cdfac37bSRichard Henderson 
1715235537faSAlex Bennée #ifdef CONFIG_PLUGIN
1716235537faSAlex Bennée /*
1717235537faSAlex Bennée  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1718235537faSAlex Bennée  * This should be a hot path as we will have just looked this path up
1719235537faSAlex Bennée  * in the softmmu lookup code (or helper). We don't handle re-fills or
1720235537faSAlex Bennée  * checking the victim table. This is purely informational.
1721235537faSAlex Bennée  *
17222f3a57eeSAlex Bennée  * This almost never fails as the memory access being instrumented
17232f3a57eeSAlex Bennée  * should have just filled the TLB. The one corner case is io_writex
17242f3a57eeSAlex Bennée  * which can cause TLB flushes and potential resizing of the TLBs
1725570ef309SAlex Bennée  * losing the information we need. In those cases we need to recover
172625d3ec58SRichard Henderson  * data from a copy of the CPUTLBEntryFull. As long as this always occurs
1727570ef309SAlex Bennée  * from the same thread (which a mem callback will be) this is safe.
1728235537faSAlex Bennée  */
1729235537faSAlex Bennée 
1730732d5487SAnton Johansson bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
1731235537faSAlex Bennée                        bool is_store, struct qemu_plugin_hwaddr *data)
1732235537faSAlex Bennée {
1733235537faSAlex Bennée     CPUArchState *env = cpu->env_ptr;
1734235537faSAlex Bennée     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1735235537faSAlex Bennée     uintptr_t index = tlb_index(env, mmu_idx, addr);
17369e39de98SAnton Johansson     uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1737235537faSAlex Bennée 
1738235537faSAlex Bennée     if (likely(tlb_hit(tlb_addr, addr))) {
1739235537faSAlex Bennée         /* We must have an iotlb entry for MMIO */
1740235537faSAlex Bennée         if (tlb_addr & TLB_MMIO) {
174125d3ec58SRichard Henderson             CPUTLBEntryFull *full;
174225d3ec58SRichard Henderson             full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1743235537faSAlex Bennée             data->is_io = true;
174425d3ec58SRichard Henderson             data->v.io.section =
174525d3ec58SRichard Henderson                 iotlb_to_section(cpu, full->xlat_section, full->attrs);
174625d3ec58SRichard Henderson             data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1747235537faSAlex Bennée         } else {
1748235537faSAlex Bennée             data->is_io = false;
17492d932039SAlex Bennée             data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1750235537faSAlex Bennée         }
1751235537faSAlex Bennée         return true;
17522f3a57eeSAlex Bennée     } else {
17532f3a57eeSAlex Bennée         SavedIOTLB *saved = &cpu->saved_iotlb;
17542f3a57eeSAlex Bennée         data->is_io = true;
17552f3a57eeSAlex Bennée         data->v.io.section = saved->section;
17562f3a57eeSAlex Bennée         data->v.io.offset = saved->mr_offset;
17572f3a57eeSAlex Bennée         return true;
1758235537faSAlex Bennée     }
1759235537faSAlex Bennée }
1760235537faSAlex Bennée 
1761235537faSAlex Bennée #endif
1762235537faSAlex Bennée 
176308dff435SRichard Henderson /*
17648cfdacaaSRichard Henderson  * Probe for a load/store operation.
17658cfdacaaSRichard Henderson  * Return the host address and into @flags.
17668cfdacaaSRichard Henderson  */
17678cfdacaaSRichard Henderson 
17688cfdacaaSRichard Henderson typedef struct MMULookupPageData {
17698cfdacaaSRichard Henderson     CPUTLBEntryFull *full;
17708cfdacaaSRichard Henderson     void *haddr;
1771fb2c53cbSAnton Johansson     vaddr addr;
17728cfdacaaSRichard Henderson     int flags;
17738cfdacaaSRichard Henderson     int size;
17748cfdacaaSRichard Henderson } MMULookupPageData;
17758cfdacaaSRichard Henderson 
17768cfdacaaSRichard Henderson typedef struct MMULookupLocals {
17778cfdacaaSRichard Henderson     MMULookupPageData page[2];
17788cfdacaaSRichard Henderson     MemOp memop;
17798cfdacaaSRichard Henderson     int mmu_idx;
17808cfdacaaSRichard Henderson } MMULookupLocals;
17818cfdacaaSRichard Henderson 
17828cfdacaaSRichard Henderson /**
17838cfdacaaSRichard Henderson  * mmu_lookup1: translate one page
17848cfdacaaSRichard Henderson  * @env: cpu context
17858cfdacaaSRichard Henderson  * @data: lookup parameters
17868cfdacaaSRichard Henderson  * @mmu_idx: virtual address context
17878cfdacaaSRichard Henderson  * @access_type: load/store/code
17888cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
17898cfdacaaSRichard Henderson  *
17908cfdacaaSRichard Henderson  * Resolve the translation for the one page at @data.addr, filling in
17918cfdacaaSRichard Henderson  * the rest of @data with the results.  If the translation fails,
17928cfdacaaSRichard Henderson  * tlb_fill will longjmp out.  Return true if the softmmu tlb for
17938cfdacaaSRichard Henderson  * @mmu_idx may have resized.
17948cfdacaaSRichard Henderson  */
17958cfdacaaSRichard Henderson static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
17968cfdacaaSRichard Henderson                         int mmu_idx, MMUAccessType access_type, uintptr_t ra)
17978cfdacaaSRichard Henderson {
1798fb2c53cbSAnton Johansson     vaddr addr = data->addr;
17998cfdacaaSRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
18008cfdacaaSRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
18019e39de98SAnton Johansson     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
18028cfdacaaSRichard Henderson     bool maybe_resized = false;
180358e8f1f6SRichard Henderson     CPUTLBEntryFull *full;
180458e8f1f6SRichard Henderson     int flags;
18058cfdacaaSRichard Henderson 
18068cfdacaaSRichard Henderson     /* If the TLB entry is for a different page, reload and try again.  */
18078cfdacaaSRichard Henderson     if (!tlb_hit(tlb_addr, addr)) {
18088cfdacaaSRichard Henderson         if (!victim_tlb_hit(env, mmu_idx, index, access_type,
18098cfdacaaSRichard Henderson                             addr & TARGET_PAGE_MASK)) {
18108cfdacaaSRichard Henderson             tlb_fill(env_cpu(env), addr, data->size, access_type, mmu_idx, ra);
18118cfdacaaSRichard Henderson             maybe_resized = true;
18128cfdacaaSRichard Henderson             index = tlb_index(env, mmu_idx, addr);
18138cfdacaaSRichard Henderson             entry = tlb_entry(env, mmu_idx, addr);
18148cfdacaaSRichard Henderson         }
18158cfdacaaSRichard Henderson         tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
18168cfdacaaSRichard Henderson     }
18178cfdacaaSRichard Henderson 
181858e8f1f6SRichard Henderson     full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
181958e8f1f6SRichard Henderson     flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
182058e8f1f6SRichard Henderson     flags |= full->slow_flags[access_type];
182158e8f1f6SRichard Henderson 
182258e8f1f6SRichard Henderson     data->full = full;
182358e8f1f6SRichard Henderson     data->flags = flags;
18248cfdacaaSRichard Henderson     /* Compute haddr speculatively; depending on flags it might be invalid. */
18258cfdacaaSRichard Henderson     data->haddr = (void *)((uintptr_t)addr + entry->addend);
18268cfdacaaSRichard Henderson 
18278cfdacaaSRichard Henderson     return maybe_resized;
18288cfdacaaSRichard Henderson }
18298cfdacaaSRichard Henderson 
18308cfdacaaSRichard Henderson /**
18318cfdacaaSRichard Henderson  * mmu_watch_or_dirty
18328cfdacaaSRichard Henderson  * @env: cpu context
18338cfdacaaSRichard Henderson  * @data: lookup parameters
18348cfdacaaSRichard Henderson  * @access_type: load/store/code
18358cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
18368cfdacaaSRichard Henderson  *
18378cfdacaaSRichard Henderson  * Trigger watchpoints for @data.addr:@data.size;
18388cfdacaaSRichard Henderson  * record writes to protected clean pages.
18398cfdacaaSRichard Henderson  */
18408cfdacaaSRichard Henderson static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
18418cfdacaaSRichard Henderson                                MMUAccessType access_type, uintptr_t ra)
18428cfdacaaSRichard Henderson {
18438cfdacaaSRichard Henderson     CPUTLBEntryFull *full = data->full;
1844fb2c53cbSAnton Johansson     vaddr addr = data->addr;
18458cfdacaaSRichard Henderson     int flags = data->flags;
18468cfdacaaSRichard Henderson     int size = data->size;
18478cfdacaaSRichard Henderson 
18488cfdacaaSRichard Henderson     /* On watchpoint hit, this will longjmp out.  */
18498cfdacaaSRichard Henderson     if (flags & TLB_WATCHPOINT) {
18508cfdacaaSRichard Henderson         int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
18518cfdacaaSRichard Henderson         cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra);
18528cfdacaaSRichard Henderson         flags &= ~TLB_WATCHPOINT;
18538cfdacaaSRichard Henderson     }
18548cfdacaaSRichard Henderson 
18558cfdacaaSRichard Henderson     /* Note that notdirty is only set for writes. */
18568cfdacaaSRichard Henderson     if (flags & TLB_NOTDIRTY) {
18578cfdacaaSRichard Henderson         notdirty_write(env_cpu(env), addr, size, full, ra);
18588cfdacaaSRichard Henderson         flags &= ~TLB_NOTDIRTY;
18598cfdacaaSRichard Henderson     }
18608cfdacaaSRichard Henderson     data->flags = flags;
18618cfdacaaSRichard Henderson }
18628cfdacaaSRichard Henderson 
18638cfdacaaSRichard Henderson /**
18648cfdacaaSRichard Henderson  * mmu_lookup: translate page(s)
18658cfdacaaSRichard Henderson  * @env: cpu context
18668cfdacaaSRichard Henderson  * @addr: virtual address
18678cfdacaaSRichard Henderson  * @oi: combined mmu_idx and MemOp
18688cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
18698cfdacaaSRichard Henderson  * @access_type: load/store/code
18708cfdacaaSRichard Henderson  * @l: output result
18718cfdacaaSRichard Henderson  *
18728cfdacaaSRichard Henderson  * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
18738cfdacaaSRichard Henderson  * bytes.  Return true if the lookup crosses a page boundary.
18748cfdacaaSRichard Henderson  */
1875fb2c53cbSAnton Johansson static bool mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
18768cfdacaaSRichard Henderson                        uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
18778cfdacaaSRichard Henderson {
18788cfdacaaSRichard Henderson     unsigned a_bits;
18798cfdacaaSRichard Henderson     bool crosspage;
18808cfdacaaSRichard Henderson     int flags;
18818cfdacaaSRichard Henderson 
18828cfdacaaSRichard Henderson     l->memop = get_memop(oi);
18838cfdacaaSRichard Henderson     l->mmu_idx = get_mmuidx(oi);
18848cfdacaaSRichard Henderson 
18858cfdacaaSRichard Henderson     tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
18868cfdacaaSRichard Henderson 
18878cfdacaaSRichard Henderson     /* Handle CPU specific unaligned behaviour */
18888cfdacaaSRichard Henderson     a_bits = get_alignment_bits(l->memop);
18898cfdacaaSRichard Henderson     if (addr & ((1 << a_bits) - 1)) {
18908cfdacaaSRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra);
18918cfdacaaSRichard Henderson     }
18928cfdacaaSRichard Henderson 
18938cfdacaaSRichard Henderson     l->page[0].addr = addr;
18948cfdacaaSRichard Henderson     l->page[0].size = memop_size(l->memop);
18958cfdacaaSRichard Henderson     l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
18968cfdacaaSRichard Henderson     l->page[1].size = 0;
18978cfdacaaSRichard Henderson     crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
18988cfdacaaSRichard Henderson 
18998cfdacaaSRichard Henderson     if (likely(!crosspage)) {
19008cfdacaaSRichard Henderson         mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
19018cfdacaaSRichard Henderson 
19028cfdacaaSRichard Henderson         flags = l->page[0].flags;
19038cfdacaaSRichard Henderson         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
19048cfdacaaSRichard Henderson             mmu_watch_or_dirty(env, &l->page[0], type, ra);
19058cfdacaaSRichard Henderson         }
19068cfdacaaSRichard Henderson         if (unlikely(flags & TLB_BSWAP)) {
19078cfdacaaSRichard Henderson             l->memop ^= MO_BSWAP;
19088cfdacaaSRichard Henderson         }
19098cfdacaaSRichard Henderson     } else {
19108cfdacaaSRichard Henderson         /* Finish compute of page crossing. */
19118cfdacaaSRichard Henderson         int size0 = l->page[1].addr - addr;
19128cfdacaaSRichard Henderson         l->page[1].size = l->page[0].size - size0;
19138cfdacaaSRichard Henderson         l->page[0].size = size0;
19148cfdacaaSRichard Henderson 
19158cfdacaaSRichard Henderson         /*
19168cfdacaaSRichard Henderson          * Lookup both pages, recognizing exceptions from either.  If the
19178cfdacaaSRichard Henderson          * second lookup potentially resized, refresh first CPUTLBEntryFull.
19188cfdacaaSRichard Henderson          */
19198cfdacaaSRichard Henderson         mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
19208cfdacaaSRichard Henderson         if (mmu_lookup1(env, &l->page[1], l->mmu_idx, type, ra)) {
19218cfdacaaSRichard Henderson             uintptr_t index = tlb_index(env, l->mmu_idx, addr);
19228cfdacaaSRichard Henderson             l->page[0].full = &env_tlb(env)->d[l->mmu_idx].fulltlb[index];
19238cfdacaaSRichard Henderson         }
19248cfdacaaSRichard Henderson 
19258cfdacaaSRichard Henderson         flags = l->page[0].flags | l->page[1].flags;
19268cfdacaaSRichard Henderson         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
19278cfdacaaSRichard Henderson             mmu_watch_or_dirty(env, &l->page[0], type, ra);
19288cfdacaaSRichard Henderson             mmu_watch_or_dirty(env, &l->page[1], type, ra);
19298cfdacaaSRichard Henderson         }
19308cfdacaaSRichard Henderson 
19318cfdacaaSRichard Henderson         /*
19328cfdacaaSRichard Henderson          * Since target/sparc is the only user of TLB_BSWAP, and all
19338cfdacaaSRichard Henderson          * Sparc accesses are aligned, any treatment across two pages
19348cfdacaaSRichard Henderson          * would be arbitrary.  Refuse it until there's a use.
19358cfdacaaSRichard Henderson          */
19368cfdacaaSRichard Henderson         tcg_debug_assert((flags & TLB_BSWAP) == 0);
19378cfdacaaSRichard Henderson     }
19388cfdacaaSRichard Henderson 
19398cfdacaaSRichard Henderson     return crosspage;
19408cfdacaaSRichard Henderson }
19418cfdacaaSRichard Henderson 
19428cfdacaaSRichard Henderson /*
194308dff435SRichard Henderson  * Probe for an atomic operation.  Do not allow unaligned operations,
194408dff435SRichard Henderson  * or io operations to proceed.  Return the host address.
194508dff435SRichard Henderson  */
1946b0326eb9SAnton Johansson static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
1947b0326eb9SAnton Johansson                                int size, uintptr_t retaddr)
1948d9bb58e5SYang Zhong {
1949b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
195014776ab5STony Nguyen     MemOp mop = get_memop(oi);
1951d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
195208dff435SRichard Henderson     uintptr_t index;
195308dff435SRichard Henderson     CPUTLBEntry *tlbe;
1954b0326eb9SAnton Johansson     vaddr tlb_addr;
195534d49937SPeter Maydell     void *hostaddr;
1956417aeaffSRichard Henderson     CPUTLBEntryFull *full;
1957d9bb58e5SYang Zhong 
1958b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1959b826044fSRichard Henderson 
1960d9bb58e5SYang Zhong     /* Adjust the given return address.  */
1961d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
1962d9bb58e5SYang Zhong 
1963d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1964d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1965d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
196629a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1967d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1968d9bb58e5SYang Zhong     }
1969d9bb58e5SYang Zhong 
1970d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
197108dff435SRichard Henderson     if (unlikely(addr & (size - 1))) {
1972d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1973d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1974d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1975d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1976d9bb58e5SYang Zhong         goto stop_the_world;
1977d9bb58e5SYang Zhong     }
1978d9bb58e5SYang Zhong 
197908dff435SRichard Henderson     index = tlb_index(env, mmu_idx, addr);
198008dff435SRichard Henderson     tlbe = tlb_entry(env, mmu_idx, addr);
198108dff435SRichard Henderson 
1982d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
198308dff435SRichard Henderson     tlb_addr = tlb_addr_write(tlbe);
1984334692bcSPeter Maydell     if (!tlb_hit(tlb_addr, addr)) {
19850b3c75adSRichard Henderson         if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE,
19860b3c75adSRichard Henderson                             addr & TARGET_PAGE_MASK)) {
198708dff435SRichard Henderson             tlb_fill(env_cpu(env), addr, size,
198808dff435SRichard Henderson                      MMU_DATA_STORE, mmu_idx, retaddr);
19896d967cb8SEmilio G. Cota             index = tlb_index(env, mmu_idx, addr);
19906d967cb8SEmilio G. Cota             tlbe = tlb_entry(env, mmu_idx, addr);
1991d9bb58e5SYang Zhong         }
1992403f290cSEmilio G. Cota         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1993d9bb58e5SYang Zhong     }
1994d9bb58e5SYang Zhong 
1995417aeaffSRichard Henderson     /*
1996417aeaffSRichard Henderson      * Let the guest notice RMW on a write-only page.
1997417aeaffSRichard Henderson      * We have just verified that the page is writable.
1998417aeaffSRichard Henderson      * Subpage lookups may have left TLB_INVALID_MASK set,
1999417aeaffSRichard Henderson      * but addr_read will only be -1 if PAGE_READ was unset.
2000417aeaffSRichard Henderson      */
2001417aeaffSRichard Henderson     if (unlikely(tlbe->addr_read == -1)) {
20027bedee32SRichard Henderson         tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
200308dff435SRichard Henderson         /*
2004417aeaffSRichard Henderson          * Since we don't support reads and writes to different
2005417aeaffSRichard Henderson          * addresses, and we do have the proper page loaded for
2006417aeaffSRichard Henderson          * write, this shouldn't ever return.  But just in case,
2007417aeaffSRichard Henderson          * handle via stop-the-world.
200808dff435SRichard Henderson          */
200908dff435SRichard Henderson         goto stop_the_world;
201008dff435SRichard Henderson     }
2011187ba694SRichard Henderson     /* Collect tlb flags for read. */
2012417aeaffSRichard Henderson     tlb_addr |= tlbe->addr_read;
201308dff435SRichard Henderson 
201455df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
20150953674eSRichard Henderson     if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
2016d9bb58e5SYang Zhong         /* There's really nothing that can be done to
2017d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
2018d9bb58e5SYang Zhong         goto stop_the_world;
2019d9bb58e5SYang Zhong     }
2020d9bb58e5SYang Zhong 
202134d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
2022417aeaffSRichard Henderson     full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
202334d49937SPeter Maydell 
202434d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
2025417aeaffSRichard Henderson         notdirty_write(env_cpu(env), addr, size, full, retaddr);
2026417aeaffSRichard Henderson     }
2027417aeaffSRichard Henderson 
2028187ba694SRichard Henderson     if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
2029187ba694SRichard Henderson         int wp_flags = 0;
2030187ba694SRichard Henderson 
2031187ba694SRichard Henderson         if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
2032187ba694SRichard Henderson             wp_flags |= BP_MEM_WRITE;
2033187ba694SRichard Henderson         }
2034187ba694SRichard Henderson         if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
2035187ba694SRichard Henderson             wp_flags |= BP_MEM_READ;
2036187ba694SRichard Henderson         }
2037187ba694SRichard Henderson         if (wp_flags) {
2038187ba694SRichard Henderson             cpu_check_watchpoint(env_cpu(env), addr, size,
2039187ba694SRichard Henderson                                  full->attrs, wp_flags, retaddr);
2040187ba694SRichard Henderson         }
204134d49937SPeter Maydell     }
204234d49937SPeter Maydell 
204334d49937SPeter Maydell     return hostaddr;
2044d9bb58e5SYang Zhong 
2045d9bb58e5SYang Zhong  stop_the_world:
204629a0af61SRichard Henderson     cpu_loop_exit_atomic(env_cpu(env), retaddr);
2047d9bb58e5SYang Zhong }
2048d9bb58e5SYang Zhong 
2049eed56642SAlex Bennée /*
2050eed56642SAlex Bennée  * Load Helpers
2051eed56642SAlex Bennée  *
2052eed56642SAlex Bennée  * We support two different access types. SOFTMMU_CODE_ACCESS is
2053eed56642SAlex Bennée  * specifically for reading instructions from system memory. It is
2054eed56642SAlex Bennée  * called by the translation loop and in some helpers where the code
2055eed56642SAlex Bennée  * is disassembled. It shouldn't be called directly by guest code.
2056cdfac37bSRichard Henderson  *
2057eed56642SAlex Bennée  * For the benefit of TCG generated code, we want to avoid the
2058eed56642SAlex Bennée  * complication of ABI-specific return type promotion and always
2059eed56642SAlex Bennée  * return a value extended to the register size of the host. This is
2060eed56642SAlex Bennée  * tcg_target_long, except in the case of a 32-bit host and 64-bit
2061eed56642SAlex Bennée  * data, and for that we always have uint64_t.
2062eed56642SAlex Bennée  *
2063eed56642SAlex Bennée  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2064eed56642SAlex Bennée  */
2065eed56642SAlex Bennée 
20668cfdacaaSRichard Henderson /**
20678cfdacaaSRichard Henderson  * do_ld_mmio_beN:
20688cfdacaaSRichard Henderson  * @env: cpu context
20698cfdacaaSRichard Henderson  * @p: translation parameters
20708cfdacaaSRichard Henderson  * @ret_be: accumulated data
20718cfdacaaSRichard Henderson  * @mmu_idx: virtual address context
20728cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
20738cfdacaaSRichard Henderson  *
20748cfdacaaSRichard Henderson  * Load @p->size bytes from @p->addr, which is memory-mapped i/o.
20758cfdacaaSRichard Henderson  * The bytes are concatenated in big-endian order with @ret_be.
20768cfdacaaSRichard Henderson  */
20778cfdacaaSRichard Henderson static uint64_t do_ld_mmio_beN(CPUArchState *env, MMULookupPageData *p,
20788cfdacaaSRichard Henderson                                uint64_t ret_be, int mmu_idx,
20798cfdacaaSRichard Henderson                                MMUAccessType type, uintptr_t ra)
20802dd92606SRichard Henderson {
20818cfdacaaSRichard Henderson     CPUTLBEntryFull *full = p->full;
2082fb2c53cbSAnton Johansson     vaddr addr = p->addr;
20838cfdacaaSRichard Henderson     int i, size = p->size;
20848cfdacaaSRichard Henderson 
20858cfdacaaSRichard Henderson     QEMU_IOTHREAD_LOCK_GUARD();
20868cfdacaaSRichard Henderson     for (i = 0; i < size; i++) {
20878cfdacaaSRichard Henderson         uint8_t x = io_readx(env, full, mmu_idx, addr + i, ra, type, MO_UB);
20888cfdacaaSRichard Henderson         ret_be = (ret_be << 8) | x;
20898cfdacaaSRichard Henderson     }
20908cfdacaaSRichard Henderson     return ret_be;
20918cfdacaaSRichard Henderson }
20928cfdacaaSRichard Henderson 
20938cfdacaaSRichard Henderson /**
20948cfdacaaSRichard Henderson  * do_ld_bytes_beN
20958cfdacaaSRichard Henderson  * @p: translation parameters
20968cfdacaaSRichard Henderson  * @ret_be: accumulated data
20978cfdacaaSRichard Henderson  *
20988cfdacaaSRichard Henderson  * Load @p->size bytes from @p->haddr, which is RAM.
20998cfdacaaSRichard Henderson  * The bytes to concatenated in big-endian order with @ret_be.
21008cfdacaaSRichard Henderson  */
21018cfdacaaSRichard Henderson static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
21028cfdacaaSRichard Henderson {
21038cfdacaaSRichard Henderson     uint8_t *haddr = p->haddr;
21048cfdacaaSRichard Henderson     int i, size = p->size;
21058cfdacaaSRichard Henderson 
21068cfdacaaSRichard Henderson     for (i = 0; i < size; i++) {
21078cfdacaaSRichard Henderson         ret_be = (ret_be << 8) | haddr[i];
21088cfdacaaSRichard Henderson     }
21098cfdacaaSRichard Henderson     return ret_be;
21108cfdacaaSRichard Henderson }
21118cfdacaaSRichard Henderson 
2112cdfac37bSRichard Henderson /**
2113cdfac37bSRichard Henderson  * do_ld_parts_beN
2114cdfac37bSRichard Henderson  * @p: translation parameters
2115cdfac37bSRichard Henderson  * @ret_be: accumulated data
2116cdfac37bSRichard Henderson  *
2117cdfac37bSRichard Henderson  * As do_ld_bytes_beN, but atomically on each aligned part.
2118cdfac37bSRichard Henderson  */
2119cdfac37bSRichard Henderson static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
2120cdfac37bSRichard Henderson {
2121cdfac37bSRichard Henderson     void *haddr = p->haddr;
2122cdfac37bSRichard Henderson     int size = p->size;
2123cdfac37bSRichard Henderson 
2124cdfac37bSRichard Henderson     do {
2125cdfac37bSRichard Henderson         uint64_t x;
2126cdfac37bSRichard Henderson         int n;
2127cdfac37bSRichard Henderson 
2128cdfac37bSRichard Henderson         /*
2129cdfac37bSRichard Henderson          * Find minimum of alignment and size.
2130cdfac37bSRichard Henderson          * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2131cdfac37bSRichard Henderson          * would have only checked the low bits of addr|size once at the start,
2132cdfac37bSRichard Henderson          * but is just as easy.
2133cdfac37bSRichard Henderson          */
2134cdfac37bSRichard Henderson         switch (((uintptr_t)haddr | size) & 7) {
2135cdfac37bSRichard Henderson         case 4:
2136cdfac37bSRichard Henderson             x = cpu_to_be32(load_atomic4(haddr));
2137cdfac37bSRichard Henderson             ret_be = (ret_be << 32) | x;
2138cdfac37bSRichard Henderson             n = 4;
2139cdfac37bSRichard Henderson             break;
2140cdfac37bSRichard Henderson         case 2:
2141cdfac37bSRichard Henderson         case 6:
2142cdfac37bSRichard Henderson             x = cpu_to_be16(load_atomic2(haddr));
2143cdfac37bSRichard Henderson             ret_be = (ret_be << 16) | x;
2144cdfac37bSRichard Henderson             n = 2;
2145cdfac37bSRichard Henderson             break;
2146cdfac37bSRichard Henderson         default:
2147cdfac37bSRichard Henderson             x = *(uint8_t *)haddr;
2148cdfac37bSRichard Henderson             ret_be = (ret_be << 8) | x;
2149cdfac37bSRichard Henderson             n = 1;
2150cdfac37bSRichard Henderson             break;
2151cdfac37bSRichard Henderson         case 0:
2152cdfac37bSRichard Henderson             g_assert_not_reached();
2153cdfac37bSRichard Henderson         }
2154cdfac37bSRichard Henderson         haddr += n;
2155cdfac37bSRichard Henderson         size -= n;
2156cdfac37bSRichard Henderson     } while (size != 0);
2157cdfac37bSRichard Henderson     return ret_be;
2158cdfac37bSRichard Henderson }
2159cdfac37bSRichard Henderson 
2160cdfac37bSRichard Henderson /**
2161cdfac37bSRichard Henderson  * do_ld_parts_be4
2162cdfac37bSRichard Henderson  * @p: translation parameters
2163cdfac37bSRichard Henderson  * @ret_be: accumulated data
2164cdfac37bSRichard Henderson  *
2165cdfac37bSRichard Henderson  * As do_ld_bytes_beN, but with one atomic load.
2166cdfac37bSRichard Henderson  * Four aligned bytes are guaranteed to cover the load.
2167cdfac37bSRichard Henderson  */
2168cdfac37bSRichard Henderson static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
2169cdfac37bSRichard Henderson {
2170cdfac37bSRichard Henderson     int o = p->addr & 3;
2171cdfac37bSRichard Henderson     uint32_t x = load_atomic4(p->haddr - o);
2172cdfac37bSRichard Henderson 
2173cdfac37bSRichard Henderson     x = cpu_to_be32(x);
2174cdfac37bSRichard Henderson     x <<= o * 8;
2175cdfac37bSRichard Henderson     x >>= (4 - p->size) * 8;
2176cdfac37bSRichard Henderson     return (ret_be << (p->size * 8)) | x;
2177cdfac37bSRichard Henderson }
2178cdfac37bSRichard Henderson 
2179cdfac37bSRichard Henderson /**
2180cdfac37bSRichard Henderson  * do_ld_parts_be8
2181cdfac37bSRichard Henderson  * @p: translation parameters
2182cdfac37bSRichard Henderson  * @ret_be: accumulated data
2183cdfac37bSRichard Henderson  *
2184cdfac37bSRichard Henderson  * As do_ld_bytes_beN, but with one atomic load.
2185cdfac37bSRichard Henderson  * Eight aligned bytes are guaranteed to cover the load.
2186cdfac37bSRichard Henderson  */
2187cdfac37bSRichard Henderson static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra,
2188cdfac37bSRichard Henderson                                 MMULookupPageData *p, uint64_t ret_be)
2189cdfac37bSRichard Henderson {
2190cdfac37bSRichard Henderson     int o = p->addr & 7;
2191cdfac37bSRichard Henderson     uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o);
2192cdfac37bSRichard Henderson 
2193cdfac37bSRichard Henderson     x = cpu_to_be64(x);
2194cdfac37bSRichard Henderson     x <<= o * 8;
2195cdfac37bSRichard Henderson     x >>= (8 - p->size) * 8;
2196cdfac37bSRichard Henderson     return (ret_be << (p->size * 8)) | x;
2197cdfac37bSRichard Henderson }
2198cdfac37bSRichard Henderson 
219935c653c4SRichard Henderson /**
220035c653c4SRichard Henderson  * do_ld_parts_be16
220135c653c4SRichard Henderson  * @p: translation parameters
220235c653c4SRichard Henderson  * @ret_be: accumulated data
220335c653c4SRichard Henderson  *
220435c653c4SRichard Henderson  * As do_ld_bytes_beN, but with one atomic load.
220535c653c4SRichard Henderson  * 16 aligned bytes are guaranteed to cover the load.
220635c653c4SRichard Henderson  */
220735c653c4SRichard Henderson static Int128 do_ld_whole_be16(CPUArchState *env, uintptr_t ra,
220835c653c4SRichard Henderson                                MMULookupPageData *p, uint64_t ret_be)
220935c653c4SRichard Henderson {
221035c653c4SRichard Henderson     int o = p->addr & 15;
221135c653c4SRichard Henderson     Int128 x, y = load_atomic16_or_exit(env, ra, p->haddr - o);
221235c653c4SRichard Henderson     int size = p->size;
221335c653c4SRichard Henderson 
221435c653c4SRichard Henderson     if (!HOST_BIG_ENDIAN) {
221535c653c4SRichard Henderson         y = bswap128(y);
221635c653c4SRichard Henderson     }
221735c653c4SRichard Henderson     y = int128_lshift(y, o * 8);
221835c653c4SRichard Henderson     y = int128_urshift(y, (16 - size) * 8);
221935c653c4SRichard Henderson     x = int128_make64(ret_be);
222035c653c4SRichard Henderson     x = int128_lshift(x, size * 8);
222135c653c4SRichard Henderson     return int128_or(x, y);
222235c653c4SRichard Henderson }
222335c653c4SRichard Henderson 
22248cfdacaaSRichard Henderson /*
22258cfdacaaSRichard Henderson  * Wrapper for the above.
22268cfdacaaSRichard Henderson  */
22278cfdacaaSRichard Henderson static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
2228cdfac37bSRichard Henderson                           uint64_t ret_be, int mmu_idx, MMUAccessType type,
2229cdfac37bSRichard Henderson                           MemOp mop, uintptr_t ra)
22308cfdacaaSRichard Henderson {
2231cdfac37bSRichard Henderson     MemOp atom;
2232cdfac37bSRichard Henderson     unsigned tmp, half_size;
2233cdfac37bSRichard Henderson 
22348cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
22358cfdacaaSRichard Henderson         return do_ld_mmio_beN(env, p, ret_be, mmu_idx, type, ra);
2236cdfac37bSRichard Henderson     }
2237cdfac37bSRichard Henderson 
2238cdfac37bSRichard Henderson     /*
2239cdfac37bSRichard Henderson      * It is a given that we cross a page and therefore there is no
2240cdfac37bSRichard Henderson      * atomicity for the load as a whole, but subobjects may need attention.
2241cdfac37bSRichard Henderson      */
2242cdfac37bSRichard Henderson     atom = mop & MO_ATOM_MASK;
2243cdfac37bSRichard Henderson     switch (atom) {
2244cdfac37bSRichard Henderson     case MO_ATOM_SUBALIGN:
2245cdfac37bSRichard Henderson         return do_ld_parts_beN(p, ret_be);
2246cdfac37bSRichard Henderson 
2247cdfac37bSRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
2248cdfac37bSRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
2249cdfac37bSRichard Henderson         tmp = mop & MO_SIZE;
2250cdfac37bSRichard Henderson         tmp = tmp ? tmp - 1 : 0;
2251cdfac37bSRichard Henderson         half_size = 1 << tmp;
2252cdfac37bSRichard Henderson         if (atom == MO_ATOM_IFALIGN_PAIR
2253cdfac37bSRichard Henderson             ? p->size == half_size
2254cdfac37bSRichard Henderson             : p->size >= half_size) {
2255cdfac37bSRichard Henderson             if (!HAVE_al8_fast && p->size < 4) {
2256cdfac37bSRichard Henderson                 return do_ld_whole_be4(p, ret_be);
22578cfdacaaSRichard Henderson             } else {
2258cdfac37bSRichard Henderson                 return do_ld_whole_be8(env, ra, p, ret_be);
2259cdfac37bSRichard Henderson             }
2260cdfac37bSRichard Henderson         }
2261cdfac37bSRichard Henderson         /* fall through */
2262cdfac37bSRichard Henderson 
2263cdfac37bSRichard Henderson     case MO_ATOM_IFALIGN:
2264cdfac37bSRichard Henderson     case MO_ATOM_WITHIN16:
2265cdfac37bSRichard Henderson     case MO_ATOM_NONE:
22668cfdacaaSRichard Henderson         return do_ld_bytes_beN(p, ret_be);
2267cdfac37bSRichard Henderson 
2268cdfac37bSRichard Henderson     default:
2269cdfac37bSRichard Henderson         g_assert_not_reached();
22708cfdacaaSRichard Henderson     }
22718cfdacaaSRichard Henderson }
22728cfdacaaSRichard Henderson 
227335c653c4SRichard Henderson /*
227435c653c4SRichard Henderson  * Wrapper for the above, for 8 < size < 16.
227535c653c4SRichard Henderson  */
227635c653c4SRichard Henderson static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
227735c653c4SRichard Henderson                           uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
227835c653c4SRichard Henderson {
227935c653c4SRichard Henderson     int size = p->size;
228035c653c4SRichard Henderson     uint64_t b;
228135c653c4SRichard Henderson     MemOp atom;
228235c653c4SRichard Henderson 
228335c653c4SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
228435c653c4SRichard Henderson         p->size = size - 8;
228535c653c4SRichard Henderson         a = do_ld_mmio_beN(env, p, a, mmu_idx, MMU_DATA_LOAD, ra);
228635c653c4SRichard Henderson         p->addr += p->size;
228735c653c4SRichard Henderson         p->size = 8;
228835c653c4SRichard Henderson         b = do_ld_mmio_beN(env, p, 0, mmu_idx, MMU_DATA_LOAD, ra);
228935c653c4SRichard Henderson         return int128_make128(b, a);
229035c653c4SRichard Henderson     }
229135c653c4SRichard Henderson 
229235c653c4SRichard Henderson     /*
229335c653c4SRichard Henderson      * It is a given that we cross a page and therefore there is no
229435c653c4SRichard Henderson      * atomicity for the load as a whole, but subobjects may need attention.
229535c653c4SRichard Henderson      */
229635c653c4SRichard Henderson     atom = mop & MO_ATOM_MASK;
229735c653c4SRichard Henderson     switch (atom) {
229835c653c4SRichard Henderson     case MO_ATOM_SUBALIGN:
229935c653c4SRichard Henderson         p->size = size - 8;
230035c653c4SRichard Henderson         a = do_ld_parts_beN(p, a);
230135c653c4SRichard Henderson         p->haddr += size - 8;
230235c653c4SRichard Henderson         p->size = 8;
230335c653c4SRichard Henderson         b = do_ld_parts_beN(p, 0);
230435c653c4SRichard Henderson         break;
230535c653c4SRichard Henderson 
230635c653c4SRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
230735c653c4SRichard Henderson         /* Since size > 8, this is the half that must be atomic. */
230835c653c4SRichard Henderson         return do_ld_whole_be16(env, ra, p, a);
230935c653c4SRichard Henderson 
231035c653c4SRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
231135c653c4SRichard Henderson         /*
231235c653c4SRichard Henderson          * Since size > 8, both halves are misaligned,
231335c653c4SRichard Henderson          * and so neither is atomic.
231435c653c4SRichard Henderson          */
231535c653c4SRichard Henderson     case MO_ATOM_IFALIGN:
231635c653c4SRichard Henderson     case MO_ATOM_WITHIN16:
231735c653c4SRichard Henderson     case MO_ATOM_NONE:
231835c653c4SRichard Henderson         p->size = size - 8;
231935c653c4SRichard Henderson         a = do_ld_bytes_beN(p, a);
232035c653c4SRichard Henderson         b = ldq_be_p(p->haddr + size - 8);
232135c653c4SRichard Henderson         break;
232235c653c4SRichard Henderson 
232335c653c4SRichard Henderson     default:
232435c653c4SRichard Henderson         g_assert_not_reached();
232535c653c4SRichard Henderson     }
232635c653c4SRichard Henderson 
232735c653c4SRichard Henderson     return int128_make128(b, a);
232835c653c4SRichard Henderson }
232935c653c4SRichard Henderson 
23308cfdacaaSRichard Henderson static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
23318cfdacaaSRichard Henderson                        MMUAccessType type, uintptr_t ra)
23328cfdacaaSRichard Henderson {
23338cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
23348cfdacaaSRichard Henderson         return io_readx(env, p->full, mmu_idx, p->addr, ra, type, MO_UB);
23358cfdacaaSRichard Henderson     } else {
23368cfdacaaSRichard Henderson         return *(uint8_t *)p->haddr;
23378cfdacaaSRichard Henderson     }
23388cfdacaaSRichard Henderson }
23398cfdacaaSRichard Henderson 
23408cfdacaaSRichard Henderson static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
23418cfdacaaSRichard Henderson                         MMUAccessType type, MemOp memop, uintptr_t ra)
23428cfdacaaSRichard Henderson {
23438cfdacaaSRichard Henderson     uint64_t ret;
23448cfdacaaSRichard Henderson 
23458cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
23468cfdacaaSRichard Henderson         return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop);
23478cfdacaaSRichard Henderson     }
23488cfdacaaSRichard Henderson 
23498cfdacaaSRichard Henderson     /* Perform the load host endian, then swap if necessary. */
2350cdfac37bSRichard Henderson     ret = load_atom_2(env, ra, p->haddr, memop);
23518cfdacaaSRichard Henderson     if (memop & MO_BSWAP) {
23528cfdacaaSRichard Henderson         ret = bswap16(ret);
23538cfdacaaSRichard Henderson     }
23548cfdacaaSRichard Henderson     return ret;
23558cfdacaaSRichard Henderson }
23568cfdacaaSRichard Henderson 
23578cfdacaaSRichard Henderson static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
23588cfdacaaSRichard Henderson                         MMUAccessType type, MemOp memop, uintptr_t ra)
23598cfdacaaSRichard Henderson {
23608cfdacaaSRichard Henderson     uint32_t ret;
23618cfdacaaSRichard Henderson 
23628cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
23638cfdacaaSRichard Henderson         return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop);
23648cfdacaaSRichard Henderson     }
23658cfdacaaSRichard Henderson 
23668cfdacaaSRichard Henderson     /* Perform the load host endian. */
2367cdfac37bSRichard Henderson     ret = load_atom_4(env, ra, p->haddr, memop);
23688cfdacaaSRichard Henderson     if (memop & MO_BSWAP) {
23698cfdacaaSRichard Henderson         ret = bswap32(ret);
23708cfdacaaSRichard Henderson     }
23718cfdacaaSRichard Henderson     return ret;
23728cfdacaaSRichard Henderson }
23738cfdacaaSRichard Henderson 
23748cfdacaaSRichard Henderson static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
23758cfdacaaSRichard Henderson                         MMUAccessType type, MemOp memop, uintptr_t ra)
23768cfdacaaSRichard Henderson {
23778cfdacaaSRichard Henderson     uint64_t ret;
23788cfdacaaSRichard Henderson 
23798cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
23808cfdacaaSRichard Henderson         return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop);
23818cfdacaaSRichard Henderson     }
23828cfdacaaSRichard Henderson 
23838cfdacaaSRichard Henderson     /* Perform the load host endian. */
2384cdfac37bSRichard Henderson     ret = load_atom_8(env, ra, p->haddr, memop);
23858cfdacaaSRichard Henderson     if (memop & MO_BSWAP) {
23868cfdacaaSRichard Henderson         ret = bswap64(ret);
23878cfdacaaSRichard Henderson     }
23888cfdacaaSRichard Henderson     return ret;
23898cfdacaaSRichard Henderson }
23908cfdacaaSRichard Henderson 
2391fb2c53cbSAnton Johansson static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
23928cfdacaaSRichard Henderson                           uintptr_t ra, MMUAccessType access_type)
23938cfdacaaSRichard Henderson {
23948cfdacaaSRichard Henderson     MMULookupLocals l;
23958cfdacaaSRichard Henderson     bool crosspage;
23968cfdacaaSRichard Henderson 
2397f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
23988cfdacaaSRichard Henderson     crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
23998cfdacaaSRichard Henderson     tcg_debug_assert(!crosspage);
24008cfdacaaSRichard Henderson 
24018cfdacaaSRichard Henderson     return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
24022dd92606SRichard Henderson }
24032dd92606SRichard Henderson 
240424e46e6cSRichard Henderson tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
24059002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
2406eed56642SAlex Bennée {
24070cadc1edSRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
24088cfdacaaSRichard Henderson     return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
24092dd92606SRichard Henderson }
24102dd92606SRichard Henderson 
2411fb2c53cbSAnton Johansson static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
24128cfdacaaSRichard Henderson                            uintptr_t ra, MMUAccessType access_type)
24132dd92606SRichard Henderson {
24148cfdacaaSRichard Henderson     MMULookupLocals l;
24158cfdacaaSRichard Henderson     bool crosspage;
24168cfdacaaSRichard Henderson     uint16_t ret;
24178cfdacaaSRichard Henderson     uint8_t a, b;
24188cfdacaaSRichard Henderson 
2419f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
24208cfdacaaSRichard Henderson     crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
24218cfdacaaSRichard Henderson     if (likely(!crosspage)) {
24228cfdacaaSRichard Henderson         return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
24238cfdacaaSRichard Henderson     }
24248cfdacaaSRichard Henderson 
24258cfdacaaSRichard Henderson     a = do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
24268cfdacaaSRichard Henderson     b = do_ld_1(env, &l.page[1], l.mmu_idx, access_type, ra);
24278cfdacaaSRichard Henderson 
24288cfdacaaSRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
24298cfdacaaSRichard Henderson         ret = a | (b << 8);
24308cfdacaaSRichard Henderson     } else {
24318cfdacaaSRichard Henderson         ret = b | (a << 8);
24328cfdacaaSRichard Henderson     }
24338cfdacaaSRichard Henderson     return ret;
2434eed56642SAlex Bennée }
2435eed56642SAlex Bennée 
243624e46e6cSRichard Henderson tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
24379002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
2438eed56642SAlex Bennée {
24390cadc1edSRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
24408cfdacaaSRichard Henderson     return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
24412dd92606SRichard Henderson }
24422dd92606SRichard Henderson 
2443fb2c53cbSAnton Johansson static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
24448cfdacaaSRichard Henderson                            uintptr_t ra, MMUAccessType access_type)
24452dd92606SRichard Henderson {
24468cfdacaaSRichard Henderson     MMULookupLocals l;
24478cfdacaaSRichard Henderson     bool crosspage;
24488cfdacaaSRichard Henderson     uint32_t ret;
24498cfdacaaSRichard Henderson 
2450f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
24518cfdacaaSRichard Henderson     crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
24528cfdacaaSRichard Henderson     if (likely(!crosspage)) {
24538cfdacaaSRichard Henderson         return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
24548cfdacaaSRichard Henderson     }
24558cfdacaaSRichard Henderson 
2456cdfac37bSRichard Henderson     ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2457cdfac37bSRichard Henderson     ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
24588cfdacaaSRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
24598cfdacaaSRichard Henderson         ret = bswap32(ret);
24608cfdacaaSRichard Henderson     }
24618cfdacaaSRichard Henderson     return ret;
2462eed56642SAlex Bennée }
2463eed56642SAlex Bennée 
246424e46e6cSRichard Henderson tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
24659002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
2466eed56642SAlex Bennée {
24670cadc1edSRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
24688cfdacaaSRichard Henderson     return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
24698cfdacaaSRichard Henderson }
24708cfdacaaSRichard Henderson 
2471fb2c53cbSAnton Johansson static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
24728cfdacaaSRichard Henderson                            uintptr_t ra, MMUAccessType access_type)
24738cfdacaaSRichard Henderson {
24748cfdacaaSRichard Henderson     MMULookupLocals l;
24758cfdacaaSRichard Henderson     bool crosspage;
24768cfdacaaSRichard Henderson     uint64_t ret;
24778cfdacaaSRichard Henderson 
2478f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
24798cfdacaaSRichard Henderson     crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
24808cfdacaaSRichard Henderson     if (likely(!crosspage)) {
24818cfdacaaSRichard Henderson         return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
24828cfdacaaSRichard Henderson     }
24838cfdacaaSRichard Henderson 
2484cdfac37bSRichard Henderson     ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2485cdfac37bSRichard Henderson     ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
24868cfdacaaSRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
24878cfdacaaSRichard Henderson         ret = bswap64(ret);
24888cfdacaaSRichard Henderson     }
24898cfdacaaSRichard Henderson     return ret;
2490eed56642SAlex Bennée }
2491eed56642SAlex Bennée 
249224e46e6cSRichard Henderson uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
24939002ffcbSRichard Henderson                         MemOpIdx oi, uintptr_t retaddr)
2494eed56642SAlex Bennée {
24950cadc1edSRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
24968cfdacaaSRichard Henderson     return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
2497eed56642SAlex Bennée }
2498eed56642SAlex Bennée 
2499eed56642SAlex Bennée /*
2500eed56642SAlex Bennée  * Provide signed versions of the load routines as well.  We can of course
2501eed56642SAlex Bennée  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2502eed56642SAlex Bennée  */
2503eed56642SAlex Bennée 
250424e46e6cSRichard Henderson tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
25059002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
2506eed56642SAlex Bennée {
25070cadc1edSRichard Henderson     return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
2508eed56642SAlex Bennée }
2509eed56642SAlex Bennée 
251024e46e6cSRichard Henderson tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
25119002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
2512eed56642SAlex Bennée {
25130cadc1edSRichard Henderson     return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
2514eed56642SAlex Bennée }
2515eed56642SAlex Bennée 
251624e46e6cSRichard Henderson tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
25179002ffcbSRichard Henderson                                  MemOpIdx oi, uintptr_t retaddr)
2518eed56642SAlex Bennée {
25190cadc1edSRichard Henderson     return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
2520eed56642SAlex Bennée }
2521eed56642SAlex Bennée 
2522fb2c53cbSAnton Johansson static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
252335c653c4SRichard Henderson                           MemOpIdx oi, uintptr_t ra)
252435c653c4SRichard Henderson {
252535c653c4SRichard Henderson     MMULookupLocals l;
252635c653c4SRichard Henderson     bool crosspage;
252735c653c4SRichard Henderson     uint64_t a, b;
252835c653c4SRichard Henderson     Int128 ret;
252935c653c4SRichard Henderson     int first;
253035c653c4SRichard Henderson 
2531f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
253235c653c4SRichard Henderson     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
253335c653c4SRichard Henderson     if (likely(!crosspage)) {
253435c653c4SRichard Henderson         /* Perform the load host endian. */
253535c653c4SRichard Henderson         if (unlikely(l.page[0].flags & TLB_MMIO)) {
253635c653c4SRichard Henderson             QEMU_IOTHREAD_LOCK_GUARD();
253735c653c4SRichard Henderson             a = io_readx(env, l.page[0].full, l.mmu_idx, addr,
253835c653c4SRichard Henderson                          ra, MMU_DATA_LOAD, MO_64);
253935c653c4SRichard Henderson             b = io_readx(env, l.page[0].full, l.mmu_idx, addr + 8,
254035c653c4SRichard Henderson                          ra, MMU_DATA_LOAD, MO_64);
254135c653c4SRichard Henderson             ret = int128_make128(HOST_BIG_ENDIAN ? b : a,
254235c653c4SRichard Henderson                                  HOST_BIG_ENDIAN ? a : b);
254335c653c4SRichard Henderson         } else {
254435c653c4SRichard Henderson             ret = load_atom_16(env, ra, l.page[0].haddr, l.memop);
254535c653c4SRichard Henderson         }
254635c653c4SRichard Henderson         if (l.memop & MO_BSWAP) {
254735c653c4SRichard Henderson             ret = bswap128(ret);
254835c653c4SRichard Henderson         }
254935c653c4SRichard Henderson         return ret;
255035c653c4SRichard Henderson     }
255135c653c4SRichard Henderson 
255235c653c4SRichard Henderson     first = l.page[0].size;
255335c653c4SRichard Henderson     if (first == 8) {
255435c653c4SRichard Henderson         MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
255535c653c4SRichard Henderson 
255635c653c4SRichard Henderson         a = do_ld_8(env, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
255735c653c4SRichard Henderson         b = do_ld_8(env, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
255835c653c4SRichard Henderson         if ((mop8 & MO_BSWAP) == MO_LE) {
255935c653c4SRichard Henderson             ret = int128_make128(a, b);
256035c653c4SRichard Henderson         } else {
256135c653c4SRichard Henderson             ret = int128_make128(b, a);
256235c653c4SRichard Henderson         }
256335c653c4SRichard Henderson         return ret;
256435c653c4SRichard Henderson     }
256535c653c4SRichard Henderson 
256635c653c4SRichard Henderson     if (first < 8) {
256735c653c4SRichard Henderson         a = do_ld_beN(env, &l.page[0], 0, l.mmu_idx,
256835c653c4SRichard Henderson                       MMU_DATA_LOAD, l.memop, ra);
256935c653c4SRichard Henderson         ret = do_ld16_beN(env, &l.page[1], a, l.mmu_idx, l.memop, ra);
257035c653c4SRichard Henderson     } else {
257135c653c4SRichard Henderson         ret = do_ld16_beN(env, &l.page[0], 0, l.mmu_idx, l.memop, ra);
257235c653c4SRichard Henderson         b = int128_getlo(ret);
257335c653c4SRichard Henderson         ret = int128_lshift(ret, l.page[1].size * 8);
257435c653c4SRichard Henderson         a = int128_gethi(ret);
257535c653c4SRichard Henderson         b = do_ld_beN(env, &l.page[1], b, l.mmu_idx,
257635c653c4SRichard Henderson                       MMU_DATA_LOAD, l.memop, ra);
257735c653c4SRichard Henderson         ret = int128_make128(b, a);
257835c653c4SRichard Henderson     }
257935c653c4SRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
258035c653c4SRichard Henderson         ret = bswap128(ret);
258135c653c4SRichard Henderson     }
258235c653c4SRichard Henderson     return ret;
258335c653c4SRichard Henderson }
258435c653c4SRichard Henderson 
258524e46e6cSRichard Henderson Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
258635c653c4SRichard Henderson                        uint32_t oi, uintptr_t retaddr)
258735c653c4SRichard Henderson {
258835c653c4SRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
258935c653c4SRichard Henderson     return do_ld16_mmu(env, addr, oi, retaddr);
259035c653c4SRichard Henderson }
259135c653c4SRichard Henderson 
2592e570597aSRichard Henderson Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
259335c653c4SRichard Henderson {
259435c653c4SRichard Henderson     return helper_ld16_mmu(env, addr, oi, GETPC());
259535c653c4SRichard Henderson }
259635c653c4SRichard Henderson 
2597eed56642SAlex Bennée /*
2598d03f1408SRichard Henderson  * Load helpers for cpu_ldst.h.
2599d03f1408SRichard Henderson  */
2600d03f1408SRichard Henderson 
26018cfdacaaSRichard Henderson static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
2602d03f1408SRichard Henderson {
260337aff087SRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2604d03f1408SRichard Henderson }
2605d03f1408SRichard Henderson 
2606f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2607d03f1408SRichard Henderson {
26088cfdacaaSRichard Henderson     uint8_t ret;
26098cfdacaaSRichard Henderson 
26100cadc1edSRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
26118cfdacaaSRichard Henderson     ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
26128cfdacaaSRichard Henderson     plugin_load_cb(env, addr, oi);
26138cfdacaaSRichard Henderson     return ret;
2614d03f1408SRichard Henderson }
2615d03f1408SRichard Henderson 
2616fbea7a40SRichard Henderson uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
2617f83bcecbSRichard Henderson                      MemOpIdx oi, uintptr_t ra)
2618d03f1408SRichard Henderson {
26198cfdacaaSRichard Henderson     uint16_t ret;
26208cfdacaaSRichard Henderson 
2621fbea7a40SRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
26228cfdacaaSRichard Henderson     ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
26238cfdacaaSRichard Henderson     plugin_load_cb(env, addr, oi);
26248cfdacaaSRichard Henderson     return ret;
2625d03f1408SRichard Henderson }
2626d03f1408SRichard Henderson 
2627fbea7a40SRichard Henderson uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
2628f83bcecbSRichard Henderson                      MemOpIdx oi, uintptr_t ra)
2629d03f1408SRichard Henderson {
26308cfdacaaSRichard Henderson     uint32_t ret;
26318cfdacaaSRichard Henderson 
2632fbea7a40SRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
26338cfdacaaSRichard Henderson     ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
26348cfdacaaSRichard Henderson     plugin_load_cb(env, addr, oi);
26358cfdacaaSRichard Henderson     return ret;
2636d03f1408SRichard Henderson }
2637d03f1408SRichard Henderson 
2638fbea7a40SRichard Henderson uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
2639f83bcecbSRichard Henderson                      MemOpIdx oi, uintptr_t ra)
2640d03f1408SRichard Henderson {
26418cfdacaaSRichard Henderson     uint64_t ret;
26428cfdacaaSRichard Henderson 
2643fbea7a40SRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
26448cfdacaaSRichard Henderson     ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
26458cfdacaaSRichard Henderson     plugin_load_cb(env, addr, oi);
26468cfdacaaSRichard Henderson     return ret;
2647d03f1408SRichard Henderson }
2648d03f1408SRichard Henderson 
2649fbea7a40SRichard Henderson Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
2650cb48f365SRichard Henderson                     MemOpIdx oi, uintptr_t ra)
2651cb48f365SRichard Henderson {
265235c653c4SRichard Henderson     Int128 ret;
2653cb48f365SRichard Henderson 
2654fbea7a40SRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
265535c653c4SRichard Henderson     ret = do_ld16_mmu(env, addr, oi, ra);
265635c653c4SRichard Henderson     plugin_load_cb(env, addr, oi);
265735c653c4SRichard Henderson     return ret;
2658cb48f365SRichard Henderson }
2659cb48f365SRichard Henderson 
2660d03f1408SRichard Henderson /*
2661eed56642SAlex Bennée  * Store Helpers
2662eed56642SAlex Bennée  */
2663eed56642SAlex Bennée 
266459213461SRichard Henderson /**
266559213461SRichard Henderson  * do_st_mmio_leN:
266659213461SRichard Henderson  * @env: cpu context
266759213461SRichard Henderson  * @p: translation parameters
266859213461SRichard Henderson  * @val_le: data to store
266959213461SRichard Henderson  * @mmu_idx: virtual address context
267059213461SRichard Henderson  * @ra: return address into tcg generated code, or 0
267159213461SRichard Henderson  *
267259213461SRichard Henderson  * Store @p->size bytes at @p->addr, which is memory-mapped i/o.
267359213461SRichard Henderson  * The bytes to store are extracted in little-endian order from @val_le;
267459213461SRichard Henderson  * return the bytes of @val_le beyond @p->size that have not been stored.
267559213461SRichard Henderson  */
267659213461SRichard Henderson static uint64_t do_st_mmio_leN(CPUArchState *env, MMULookupPageData *p,
267759213461SRichard Henderson                                uint64_t val_le, int mmu_idx, uintptr_t ra)
26786b8b622eSRichard Henderson {
267959213461SRichard Henderson     CPUTLBEntryFull *full = p->full;
2680fb2c53cbSAnton Johansson     vaddr addr = p->addr;
268159213461SRichard Henderson     int i, size = p->size;
26826b8b622eSRichard Henderson 
268359213461SRichard Henderson     QEMU_IOTHREAD_LOCK_GUARD();
268459213461SRichard Henderson     for (i = 0; i < size; i++, val_le >>= 8) {
268559213461SRichard Henderson         io_writex(env, full, mmu_idx, val_le, addr + i, ra, MO_UB);
268659213461SRichard Henderson     }
268759213461SRichard Henderson     return val_le;
268859213461SRichard Henderson }
268959213461SRichard Henderson 
26906b8b622eSRichard Henderson /*
269159213461SRichard Henderson  * Wrapper for the above.
26926b8b622eSRichard Henderson  */
269359213461SRichard Henderson static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
26945b36f268SRichard Henderson                           uint64_t val_le, int mmu_idx,
26955b36f268SRichard Henderson                           MemOp mop, uintptr_t ra)
269659213461SRichard Henderson {
26975b36f268SRichard Henderson     MemOp atom;
26985b36f268SRichard Henderson     unsigned tmp, half_size;
26995b36f268SRichard Henderson 
270059213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
270159213461SRichard Henderson         return do_st_mmio_leN(env, p, val_le, mmu_idx, ra);
270259213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
270359213461SRichard Henderson         return val_le >> (p->size * 8);
27045b36f268SRichard Henderson     }
27055b36f268SRichard Henderson 
27065b36f268SRichard Henderson     /*
27075b36f268SRichard Henderson      * It is a given that we cross a page and therefore there is no atomicity
27085b36f268SRichard Henderson      * for the store as a whole, but subobjects may need attention.
27095b36f268SRichard Henderson      */
27105b36f268SRichard Henderson     atom = mop & MO_ATOM_MASK;
27115b36f268SRichard Henderson     switch (atom) {
27125b36f268SRichard Henderson     case MO_ATOM_SUBALIGN:
27135b36f268SRichard Henderson         return store_parts_leN(p->haddr, p->size, val_le);
27145b36f268SRichard Henderson 
27155b36f268SRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
27165b36f268SRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
27175b36f268SRichard Henderson         tmp = mop & MO_SIZE;
27185b36f268SRichard Henderson         tmp = tmp ? tmp - 1 : 0;
27195b36f268SRichard Henderson         half_size = 1 << tmp;
27205b36f268SRichard Henderson         if (atom == MO_ATOM_IFALIGN_PAIR
27215b36f268SRichard Henderson             ? p->size == half_size
27225b36f268SRichard Henderson             : p->size >= half_size) {
27235b36f268SRichard Henderson             if (!HAVE_al8_fast && p->size <= 4) {
27245b36f268SRichard Henderson                 return store_whole_le4(p->haddr, p->size, val_le);
27255b36f268SRichard Henderson             } else if (HAVE_al8) {
27265b36f268SRichard Henderson                 return store_whole_le8(p->haddr, p->size, val_le);
27276b8b622eSRichard Henderson             } else {
27285b36f268SRichard Henderson                 cpu_loop_exit_atomic(env_cpu(env), ra);
27295b36f268SRichard Henderson             }
27305b36f268SRichard Henderson         }
27315b36f268SRichard Henderson         /* fall through */
27325b36f268SRichard Henderson 
27335b36f268SRichard Henderson     case MO_ATOM_IFALIGN:
27345b36f268SRichard Henderson     case MO_ATOM_WITHIN16:
27355b36f268SRichard Henderson     case MO_ATOM_NONE:
27365b36f268SRichard Henderson         return store_bytes_leN(p->haddr, p->size, val_le);
27375b36f268SRichard Henderson 
27385b36f268SRichard Henderson     default:
27395b36f268SRichard Henderson         g_assert_not_reached();
27406b8b622eSRichard Henderson     }
27416b8b622eSRichard Henderson }
27426b8b622eSRichard Henderson 
274335c653c4SRichard Henderson /*
274435c653c4SRichard Henderson  * Wrapper for the above, for 8 < size < 16.
274535c653c4SRichard Henderson  */
274635c653c4SRichard Henderson static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
274735c653c4SRichard Henderson                             Int128 val_le, int mmu_idx,
274835c653c4SRichard Henderson                             MemOp mop, uintptr_t ra)
274935c653c4SRichard Henderson {
275035c653c4SRichard Henderson     int size = p->size;
275135c653c4SRichard Henderson     MemOp atom;
275235c653c4SRichard Henderson 
275335c653c4SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
275435c653c4SRichard Henderson         p->size = 8;
275535c653c4SRichard Henderson         do_st_mmio_leN(env, p, int128_getlo(val_le), mmu_idx, ra);
275635c653c4SRichard Henderson         p->size = size - 8;
275735c653c4SRichard Henderson         p->addr += 8;
275835c653c4SRichard Henderson         return do_st_mmio_leN(env, p, int128_gethi(val_le), mmu_idx, ra);
275935c653c4SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
276035c653c4SRichard Henderson         return int128_gethi(val_le) >> ((size - 8) * 8);
276135c653c4SRichard Henderson     }
276235c653c4SRichard Henderson 
276335c653c4SRichard Henderson     /*
276435c653c4SRichard Henderson      * It is a given that we cross a page and therefore there is no atomicity
276535c653c4SRichard Henderson      * for the store as a whole, but subobjects may need attention.
276635c653c4SRichard Henderson      */
276735c653c4SRichard Henderson     atom = mop & MO_ATOM_MASK;
276835c653c4SRichard Henderson     switch (atom) {
276935c653c4SRichard Henderson     case MO_ATOM_SUBALIGN:
277035c653c4SRichard Henderson         store_parts_leN(p->haddr, 8, int128_getlo(val_le));
277135c653c4SRichard Henderson         return store_parts_leN(p->haddr + 8, p->size - 8,
277235c653c4SRichard Henderson                                int128_gethi(val_le));
277335c653c4SRichard Henderson 
277435c653c4SRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
277535c653c4SRichard Henderson         /* Since size > 8, this is the half that must be atomic. */
27768dc24ff4SRichard Henderson         if (!HAVE_ATOMIC128_RW) {
277735c653c4SRichard Henderson             cpu_loop_exit_atomic(env_cpu(env), ra);
277835c653c4SRichard Henderson         }
277935c653c4SRichard Henderson         return store_whole_le16(p->haddr, p->size, val_le);
278035c653c4SRichard Henderson 
278135c653c4SRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
278235c653c4SRichard Henderson         /*
278335c653c4SRichard Henderson          * Since size > 8, both halves are misaligned,
278435c653c4SRichard Henderson          * and so neither is atomic.
278535c653c4SRichard Henderson          */
278635c653c4SRichard Henderson     case MO_ATOM_IFALIGN:
27872be6a486SRichard Henderson     case MO_ATOM_WITHIN16:
278835c653c4SRichard Henderson     case MO_ATOM_NONE:
278935c653c4SRichard Henderson         stq_le_p(p->haddr, int128_getlo(val_le));
279035c653c4SRichard Henderson         return store_bytes_leN(p->haddr + 8, p->size - 8,
279135c653c4SRichard Henderson                                int128_gethi(val_le));
279235c653c4SRichard Henderson 
279335c653c4SRichard Henderson     default:
279435c653c4SRichard Henderson         g_assert_not_reached();
279535c653c4SRichard Henderson     }
279635c653c4SRichard Henderson }
279735c653c4SRichard Henderson 
279859213461SRichard Henderson static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
279959213461SRichard Henderson                     int mmu_idx, uintptr_t ra)
2800eed56642SAlex Bennée {
280159213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
280259213461SRichard Henderson         io_writex(env, p->full, mmu_idx, val, p->addr, ra, MO_UB);
280359213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
280459213461SRichard Henderson         /* nothing */
28055b87b3e6SRichard Henderson     } else {
280659213461SRichard Henderson         *(uint8_t *)p->haddr = val;
28075b87b3e6SRichard Henderson     }
2808eed56642SAlex Bennée }
2809eed56642SAlex Bennée 
281059213461SRichard Henderson static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
281159213461SRichard Henderson                     int mmu_idx, MemOp memop, uintptr_t ra)
2812eed56642SAlex Bennée {
281359213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
281459213461SRichard Henderson         io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop);
281559213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
281659213461SRichard Henderson         /* nothing */
281759213461SRichard Henderson     } else {
281859213461SRichard Henderson         /* Swap to host endian if necessary, then store. */
281959213461SRichard Henderson         if (memop & MO_BSWAP) {
282059213461SRichard Henderson             val = bswap16(val);
282159213461SRichard Henderson         }
28225b36f268SRichard Henderson         store_atom_2(env, ra, p->haddr, memop, val);
282359213461SRichard Henderson     }
282459213461SRichard Henderson }
282559213461SRichard Henderson 
282659213461SRichard Henderson static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
282759213461SRichard Henderson                     int mmu_idx, MemOp memop, uintptr_t ra)
282859213461SRichard Henderson {
282959213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
283059213461SRichard Henderson         io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop);
283159213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
283259213461SRichard Henderson         /* nothing */
283359213461SRichard Henderson     } else {
283459213461SRichard Henderson         /* Swap to host endian if necessary, then store. */
283559213461SRichard Henderson         if (memop & MO_BSWAP) {
283659213461SRichard Henderson             val = bswap32(val);
283759213461SRichard Henderson         }
28385b36f268SRichard Henderson         store_atom_4(env, ra, p->haddr, memop, val);
283959213461SRichard Henderson     }
284059213461SRichard Henderson }
284159213461SRichard Henderson 
284259213461SRichard Henderson static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
284359213461SRichard Henderson                     int mmu_idx, MemOp memop, uintptr_t ra)
284459213461SRichard Henderson {
284559213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
284659213461SRichard Henderson         io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop);
284759213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
284859213461SRichard Henderson         /* nothing */
284959213461SRichard Henderson     } else {
285059213461SRichard Henderson         /* Swap to host endian if necessary, then store. */
285159213461SRichard Henderson         if (memop & MO_BSWAP) {
285259213461SRichard Henderson             val = bswap64(val);
285359213461SRichard Henderson         }
28545b36f268SRichard Henderson         store_atom_8(env, ra, p->haddr, memop, val);
285559213461SRichard Henderson     }
2856eed56642SAlex Bennée }
2857eed56642SAlex Bennée 
285824e46e6cSRichard Henderson void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
285959213461SRichard Henderson                     MemOpIdx oi, uintptr_t ra)
2860f83bcecbSRichard Henderson {
286159213461SRichard Henderson     MMULookupLocals l;
286259213461SRichard Henderson     bool crosspage;
286359213461SRichard Henderson 
28640cadc1edSRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
2865f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
286659213461SRichard Henderson     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
286759213461SRichard Henderson     tcg_debug_assert(!crosspage);
286859213461SRichard Henderson 
286959213461SRichard Henderson     do_st_1(env, &l.page[0], val, l.mmu_idx, ra);
2870f83bcecbSRichard Henderson }
2871f83bcecbSRichard Henderson 
2872fb2c53cbSAnton Johansson static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val,
287359213461SRichard Henderson                        MemOpIdx oi, uintptr_t ra)
2874f83bcecbSRichard Henderson {
287559213461SRichard Henderson     MMULookupLocals l;
287659213461SRichard Henderson     bool crosspage;
287759213461SRichard Henderson     uint8_t a, b;
287859213461SRichard Henderson 
2879f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
288059213461SRichard Henderson     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
288159213461SRichard Henderson     if (likely(!crosspage)) {
288259213461SRichard Henderson         do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
288359213461SRichard Henderson         return;
288459213461SRichard Henderson     }
288559213461SRichard Henderson 
288659213461SRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
288759213461SRichard Henderson         a = val, b = val >> 8;
288859213461SRichard Henderson     } else {
288959213461SRichard Henderson         b = val, a = val >> 8;
289059213461SRichard Henderson     }
289159213461SRichard Henderson     do_st_1(env, &l.page[0], a, l.mmu_idx, ra);
289259213461SRichard Henderson     do_st_1(env, &l.page[1], b, l.mmu_idx, ra);
2893f83bcecbSRichard Henderson }
2894f83bcecbSRichard Henderson 
289524e46e6cSRichard Henderson void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
28969002ffcbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2897eed56642SAlex Bennée {
28980cadc1edSRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
289959213461SRichard Henderson     do_st2_mmu(env, addr, val, oi, retaddr);
2900f83bcecbSRichard Henderson }
2901f83bcecbSRichard Henderson 
2902fb2c53cbSAnton Johansson static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val,
290359213461SRichard Henderson                        MemOpIdx oi, uintptr_t ra)
2904f83bcecbSRichard Henderson {
290559213461SRichard Henderson     MMULookupLocals l;
290659213461SRichard Henderson     bool crosspage;
290759213461SRichard Henderson 
2908f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
290959213461SRichard Henderson     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
291059213461SRichard Henderson     if (likely(!crosspage)) {
291159213461SRichard Henderson         do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
291259213461SRichard Henderson         return;
291359213461SRichard Henderson     }
291459213461SRichard Henderson 
291559213461SRichard Henderson     /* Swap to little endian for simplicity, then store by bytes. */
291659213461SRichard Henderson     if ((l.memop & MO_BSWAP) != MO_LE) {
291759213461SRichard Henderson         val = bswap32(val);
291859213461SRichard Henderson     }
29195b36f268SRichard Henderson     val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
29205b36f268SRichard Henderson     (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
2921eed56642SAlex Bennée }
2922eed56642SAlex Bennée 
292324e46e6cSRichard Henderson void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
29249002ffcbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2925eed56642SAlex Bennée {
29260cadc1edSRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
292759213461SRichard Henderson     do_st4_mmu(env, addr, val, oi, retaddr);
292859213461SRichard Henderson }
292959213461SRichard Henderson 
2930fb2c53cbSAnton Johansson static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val,
293159213461SRichard Henderson                        MemOpIdx oi, uintptr_t ra)
293259213461SRichard Henderson {
293359213461SRichard Henderson     MMULookupLocals l;
293459213461SRichard Henderson     bool crosspage;
293559213461SRichard Henderson 
2936f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
293759213461SRichard Henderson     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
293859213461SRichard Henderson     if (likely(!crosspage)) {
293959213461SRichard Henderson         do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
294059213461SRichard Henderson         return;
294159213461SRichard Henderson     }
294259213461SRichard Henderson 
294359213461SRichard Henderson     /* Swap to little endian for simplicity, then store by bytes. */
294459213461SRichard Henderson     if ((l.memop & MO_BSWAP) != MO_LE) {
294559213461SRichard Henderson         val = bswap64(val);
294659213461SRichard Henderson     }
29475b36f268SRichard Henderson     val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
29485b36f268SRichard Henderson     (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
2949eed56642SAlex Bennée }
2950eed56642SAlex Bennée 
295124e46e6cSRichard Henderson void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
29529002ffcbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
2953eed56642SAlex Bennée {
29540cadc1edSRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
295559213461SRichard Henderson     do_st8_mmu(env, addr, val, oi, retaddr);
2956eed56642SAlex Bennée }
2957d9bb58e5SYang Zhong 
2958fb2c53cbSAnton Johansson static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
295935c653c4SRichard Henderson                         MemOpIdx oi, uintptr_t ra)
296035c653c4SRichard Henderson {
296135c653c4SRichard Henderson     MMULookupLocals l;
296235c653c4SRichard Henderson     bool crosspage;
296335c653c4SRichard Henderson     uint64_t a, b;
296435c653c4SRichard Henderson     int first;
296535c653c4SRichard Henderson 
2966f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
296735c653c4SRichard Henderson     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
296835c653c4SRichard Henderson     if (likely(!crosspage)) {
296935c653c4SRichard Henderson         /* Swap to host endian if necessary, then store. */
297035c653c4SRichard Henderson         if (l.memop & MO_BSWAP) {
297135c653c4SRichard Henderson             val = bswap128(val);
297235c653c4SRichard Henderson         }
297335c653c4SRichard Henderson         if (unlikely(l.page[0].flags & TLB_MMIO)) {
297435c653c4SRichard Henderson             QEMU_IOTHREAD_LOCK_GUARD();
297535c653c4SRichard Henderson             if (HOST_BIG_ENDIAN) {
297635c653c4SRichard Henderson                 b = int128_getlo(val), a = int128_gethi(val);
297735c653c4SRichard Henderson             } else {
297835c653c4SRichard Henderson                 a = int128_getlo(val), b = int128_gethi(val);
297935c653c4SRichard Henderson             }
298035c653c4SRichard Henderson             io_writex(env, l.page[0].full, l.mmu_idx, a, addr, ra, MO_64);
298135c653c4SRichard Henderson             io_writex(env, l.page[0].full, l.mmu_idx, b, addr + 8, ra, MO_64);
298235c653c4SRichard Henderson         } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
298335c653c4SRichard Henderson             /* nothing */
298435c653c4SRichard Henderson         } else {
298535c653c4SRichard Henderson             store_atom_16(env, ra, l.page[0].haddr, l.memop, val);
298635c653c4SRichard Henderson         }
298735c653c4SRichard Henderson         return;
298835c653c4SRichard Henderson     }
298935c653c4SRichard Henderson 
299035c653c4SRichard Henderson     first = l.page[0].size;
299135c653c4SRichard Henderson     if (first == 8) {
299235c653c4SRichard Henderson         MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
299335c653c4SRichard Henderson 
299435c653c4SRichard Henderson         if (l.memop & MO_BSWAP) {
299535c653c4SRichard Henderson             val = bswap128(val);
299635c653c4SRichard Henderson         }
299735c653c4SRichard Henderson         if (HOST_BIG_ENDIAN) {
299835c653c4SRichard Henderson             b = int128_getlo(val), a = int128_gethi(val);
299935c653c4SRichard Henderson         } else {
300035c653c4SRichard Henderson             a = int128_getlo(val), b = int128_gethi(val);
300135c653c4SRichard Henderson         }
300235c653c4SRichard Henderson         do_st_8(env, &l.page[0], a, l.mmu_idx, mop8, ra);
300335c653c4SRichard Henderson         do_st_8(env, &l.page[1], b, l.mmu_idx, mop8, ra);
300435c653c4SRichard Henderson         return;
300535c653c4SRichard Henderson     }
300635c653c4SRichard Henderson 
300735c653c4SRichard Henderson     if ((l.memop & MO_BSWAP) != MO_LE) {
300835c653c4SRichard Henderson         val = bswap128(val);
300935c653c4SRichard Henderson     }
301035c653c4SRichard Henderson     if (first < 8) {
301135c653c4SRichard Henderson         do_st_leN(env, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
301235c653c4SRichard Henderson         val = int128_urshift(val, first * 8);
301335c653c4SRichard Henderson         do_st16_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
301435c653c4SRichard Henderson     } else {
301535c653c4SRichard Henderson         b = do_st16_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
301635c653c4SRichard Henderson         do_st_leN(env, &l.page[1], b, l.mmu_idx, l.memop, ra);
301735c653c4SRichard Henderson     }
301835c653c4SRichard Henderson }
301935c653c4SRichard Henderson 
302024e46e6cSRichard Henderson void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
302135c653c4SRichard Henderson                      MemOpIdx oi, uintptr_t retaddr)
302235c653c4SRichard Henderson {
302335c653c4SRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
302435c653c4SRichard Henderson     do_st16_mmu(env, addr, val, oi, retaddr);
302535c653c4SRichard Henderson }
302635c653c4SRichard Henderson 
3027e570597aSRichard Henderson void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
302835c653c4SRichard Henderson {
302935c653c4SRichard Henderson     helper_st16_mmu(env, addr, val, oi, GETPC());
303035c653c4SRichard Henderson }
303135c653c4SRichard Henderson 
3032d03f1408SRichard Henderson /*
3033d03f1408SRichard Henderson  * Store Helpers for cpu_ldst.h
3034d03f1408SRichard Henderson  */
3035d03f1408SRichard Henderson 
303659213461SRichard Henderson static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
3037d03f1408SRichard Henderson {
303837aff087SRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
3039d03f1408SRichard Henderson }
3040d03f1408SRichard Henderson 
3041f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
3042f83bcecbSRichard Henderson                  MemOpIdx oi, uintptr_t retaddr)
3043d03f1408SRichard Henderson {
30440cadc1edSRichard Henderson     helper_stb_mmu(env, addr, val, oi, retaddr);
304559213461SRichard Henderson     plugin_store_cb(env, addr, oi);
3046d03f1408SRichard Henderson }
3047d03f1408SRichard Henderson 
3048fbea7a40SRichard Henderson void cpu_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
3049f83bcecbSRichard Henderson                  MemOpIdx oi, uintptr_t retaddr)
3050d03f1408SRichard Henderson {
3051fbea7a40SRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
30520cadc1edSRichard Henderson     do_st2_mmu(env, addr, val, oi, retaddr);
305359213461SRichard Henderson     plugin_store_cb(env, addr, oi);
3054d03f1408SRichard Henderson }
3055d03f1408SRichard Henderson 
3056fbea7a40SRichard Henderson void cpu_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
3057f83bcecbSRichard Henderson                     MemOpIdx oi, uintptr_t retaddr)
3058d03f1408SRichard Henderson {
3059fbea7a40SRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
30600cadc1edSRichard Henderson     do_st4_mmu(env, addr, val, oi, retaddr);
306159213461SRichard Henderson     plugin_store_cb(env, addr, oi);
3062d03f1408SRichard Henderson }
3063d03f1408SRichard Henderson 
3064fbea7a40SRichard Henderson void cpu_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
3065f83bcecbSRichard Henderson                  MemOpIdx oi, uintptr_t retaddr)
3066d03f1408SRichard Henderson {
3067fbea7a40SRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
30680cadc1edSRichard Henderson     do_st8_mmu(env, addr, val, oi, retaddr);
306959213461SRichard Henderson     plugin_store_cb(env, addr, oi);
3070b9e60257SRichard Henderson }
3071b9e60257SRichard Henderson 
3072fbea7a40SRichard Henderson void cpu_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val,
3073f83bcecbSRichard Henderson                   MemOpIdx oi, uintptr_t retaddr)
3074b9e60257SRichard Henderson {
3075fbea7a40SRichard Henderson     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
307635c653c4SRichard Henderson     do_st16_mmu(env, addr, val, oi, retaddr);
307735c653c4SRichard Henderson     plugin_store_cb(env, addr, oi);
3078cb48f365SRichard Henderson }
3079cb48f365SRichard Henderson 
3080f83bcecbSRichard Henderson #include "ldst_common.c.inc"
3081cfe04a4bSRichard Henderson 
3082be9568b4SRichard Henderson /*
3083be9568b4SRichard Henderson  * First set of functions passes in OI and RETADDR.
3084be9568b4SRichard Henderson  * This makes them callable from other helpers.
3085be9568b4SRichard Henderson  */
3086d9bb58e5SYang Zhong 
3087d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
3088be9568b4SRichard Henderson     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
3089a754f7f3SRichard Henderson 
3090707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP
3091d9bb58e5SYang Zhong 
3092139c1837SPaolo Bonzini #include "atomic_common.c.inc"
3093d9bb58e5SYang Zhong 
3094d9bb58e5SYang Zhong #define DATA_SIZE 1
3095d9bb58e5SYang Zhong #include "atomic_template.h"
3096d9bb58e5SYang Zhong 
3097d9bb58e5SYang Zhong #define DATA_SIZE 2
3098d9bb58e5SYang Zhong #include "atomic_template.h"
3099d9bb58e5SYang Zhong 
3100d9bb58e5SYang Zhong #define DATA_SIZE 4
3101d9bb58e5SYang Zhong #include "atomic_template.h"
3102d9bb58e5SYang Zhong 
3103d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
3104d9bb58e5SYang Zhong #define DATA_SIZE 8
3105d9bb58e5SYang Zhong #include "atomic_template.h"
3106d9bb58e5SYang Zhong #endif
3107d9bb58e5SYang Zhong 
31084deb39ebSRichard Henderson #if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
3109d9bb58e5SYang Zhong #define DATA_SIZE 16
3110d9bb58e5SYang Zhong #include "atomic_template.h"
3111d9bb58e5SYang Zhong #endif
3112d9bb58e5SYang Zhong 
3113d9bb58e5SYang Zhong /* Code access functions.  */
3114d9bb58e5SYang Zhong 
3115fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
3116eed56642SAlex Bennée {
31179002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
31188cfdacaaSRichard Henderson     return do_ld1_mmu(env, addr, oi, 0, MMU_INST_FETCH);
31194cef72d0SAlex Bennée }
31204cef72d0SAlex Bennée 
3121fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
31222dd92606SRichard Henderson {
31239002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
31248cfdacaaSRichard Henderson     return do_ld2_mmu(env, addr, oi, 0, MMU_INST_FETCH);
31252dd92606SRichard Henderson }
31262dd92606SRichard Henderson 
3127fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
31284cef72d0SAlex Bennée {
31299002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
31308cfdacaaSRichard Henderson     return do_ld4_mmu(env, addr, oi, 0, MMU_INST_FETCH);
3131eed56642SAlex Bennée }
3132d9bb58e5SYang Zhong 
3133fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
3134eed56642SAlex Bennée {
3135fc313c64SFrédéric Pétrot     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
31368cfdacaaSRichard Henderson     return do_ld8_mmu(env, addr, oi, 0, MMU_INST_FETCH);
3137eed56642SAlex Bennée }
313828990626SRichard Henderson 
313928990626SRichard Henderson uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
314028990626SRichard Henderson                          MemOpIdx oi, uintptr_t retaddr)
314128990626SRichard Henderson {
31428cfdacaaSRichard Henderson     return do_ld1_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
314328990626SRichard Henderson }
314428990626SRichard Henderson 
314528990626SRichard Henderson uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
314628990626SRichard Henderson                           MemOpIdx oi, uintptr_t retaddr)
314728990626SRichard Henderson {
31488cfdacaaSRichard Henderson     return do_ld2_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
314928990626SRichard Henderson }
315028990626SRichard Henderson 
315128990626SRichard Henderson uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
315228990626SRichard Henderson                           MemOpIdx oi, uintptr_t retaddr)
315328990626SRichard Henderson {
31548cfdacaaSRichard Henderson     return do_ld4_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
315528990626SRichard Henderson }
315628990626SRichard Henderson 
315728990626SRichard Henderson uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
315828990626SRichard Henderson                           MemOpIdx oi, uintptr_t retaddr)
315928990626SRichard Henderson {
31608cfdacaaSRichard Henderson     return do_ld8_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
316128990626SRichard Henderson }
3162