xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 6046f6e94d8d530ecc28176232479889abbee47e)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9fb0343d5SThomas Huth  * version 2.1 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
26d9bb58e5SYang Zhong #include "exec/cputlb.h"
27d9bb58e5SYang Zhong #include "exec/memory-internal.h"
28d9bb58e5SYang Zhong #include "exec/ram_addr.h"
29d9bb58e5SYang Zhong #include "tcg/tcg.h"
30d9bb58e5SYang Zhong #include "qemu/error-report.h"
31d9bb58e5SYang Zhong #include "exec/log.h"
32c213ee2dSRichard Henderson #include "exec/helper-proto-common.h"
33d9bb58e5SYang Zhong #include "qemu/atomic.h"
34e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
353b9bd3f4SPaolo Bonzini #include "exec/translate-all.h"
3651807763SPhilippe Mathieu-Daudé #include "trace.h"
37e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h"
3843e7a2d3SPhilippe Mathieu-Daudé #include "internal-common.h"
394c268d6dSPhilippe Mathieu-Daudé #include "internal-target.h"
40235537faSAlex Bennée #ifdef CONFIG_PLUGIN
41235537faSAlex Bennée #include "qemu/plugin-memory.h"
42235537faSAlex Bennée #endif
43d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h"
4470f168f8SRichard Henderson #include "tcg/oversized-guest.h"
45d9bb58e5SYang Zhong 
46d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
47d9bb58e5SYang Zhong /* #define DEBUG_TLB */
48d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
49d9bb58e5SYang Zhong 
50d9bb58e5SYang Zhong #ifdef DEBUG_TLB
51d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
52d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
53d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
54d9bb58e5SYang Zhong # else
55d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
56d9bb58e5SYang Zhong # endif
57d9bb58e5SYang Zhong #else
58d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
59d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
60d9bb58e5SYang Zhong #endif
61d9bb58e5SYang Zhong 
62d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
63d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
64d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
65d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
66d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
67d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
68d9bb58e5SYang Zhong     } \
69d9bb58e5SYang Zhong } while (0)
70d9bb58e5SYang Zhong 
71ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do {                              \
72d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
73ea9025cbSEmilio G. Cota             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
74d9bb58e5SYang Zhong         }                                                         \
75d9bb58e5SYang Zhong     } while (0)
76d9bb58e5SYang Zhong 
77d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
78e79f8142SAnton Johansson  * vaddr even on 32 bit builds
79e79f8142SAnton Johansson  */
80e79f8142SAnton Johansson QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data));
81d9bb58e5SYang Zhong 
82d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
83d9bb58e5SYang Zhong  */
84d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
85d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
86d9bb58e5SYang Zhong 
87722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
887a1efe1bSRichard Henderson {
89722a1c1eSRichard Henderson     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
907a1efe1bSRichard Henderson }
917a1efe1bSRichard Henderson 
92722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
9386e1eff8SEmilio G. Cota {
94722a1c1eSRichard Henderson     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
9586e1eff8SEmilio G. Cota }
9686e1eff8SEmilio G. Cota 
9779e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
9886e1eff8SEmilio G. Cota                              size_t max_entries)
9986e1eff8SEmilio G. Cota {
10079e42085SRichard Henderson     desc->window_begin_ns = ns;
10179e42085SRichard Henderson     desc->window_max_entries = max_entries;
10286e1eff8SEmilio G. Cota }
10386e1eff8SEmilio G. Cota 
10406f3831cSAnton Johansson static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
1050f4abea8SRichard Henderson {
106a976a99aSRichard Henderson     CPUJumpCache *jc = cpu->tb_jmp_cache;
10799ab4d50SEric Auger     int i, i0;
1080f4abea8SRichard Henderson 
10999ab4d50SEric Auger     if (unlikely(!jc)) {
11099ab4d50SEric Auger         return;
11199ab4d50SEric Auger     }
11299ab4d50SEric Auger 
11399ab4d50SEric Auger     i0 = tb_jmp_cache_hash_page(page_addr);
1140f4abea8SRichard Henderson     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
115a976a99aSRichard Henderson         qatomic_set(&jc->array[i0 + i].tb, NULL);
1160f4abea8SRichard Henderson     }
1170f4abea8SRichard Henderson }
1180f4abea8SRichard Henderson 
11986e1eff8SEmilio G. Cota /**
12086e1eff8SEmilio G. Cota  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
12171ccd47bSRichard Henderson  * @desc: The CPUTLBDesc portion of the TLB
12271ccd47bSRichard Henderson  * @fast: The CPUTLBDescFast portion of the same TLB
12386e1eff8SEmilio G. Cota  *
12486e1eff8SEmilio G. Cota  * Called with tlb_lock_held.
12586e1eff8SEmilio G. Cota  *
12686e1eff8SEmilio G. Cota  * We have two main constraints when resizing a TLB: (1) we only resize it
12786e1eff8SEmilio G. Cota  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
12886e1eff8SEmilio G. Cota  * the array or unnecessarily flushing it), which means we do not control how
12986e1eff8SEmilio G. Cota  * frequently the resizing can occur; (2) we don't have access to the guest's
13086e1eff8SEmilio G. Cota  * future scheduling decisions, and therefore have to decide the magnitude of
13186e1eff8SEmilio G. Cota  * the resize based on past observations.
13286e1eff8SEmilio G. Cota  *
13386e1eff8SEmilio G. Cota  * In general, a memory-hungry process can benefit greatly from an appropriately
13486e1eff8SEmilio G. Cota  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
13586e1eff8SEmilio G. Cota  * we just have to make the TLB as large as possible; while an oversized TLB
13686e1eff8SEmilio G. Cota  * results in minimal TLB miss rates, it also takes longer to be flushed
13786e1eff8SEmilio G. Cota  * (flushes can be _very_ frequent), and the reduced locality can also hurt
13886e1eff8SEmilio G. Cota  * performance.
13986e1eff8SEmilio G. Cota  *
14086e1eff8SEmilio G. Cota  * To achieve near-optimal performance for all kinds of workloads, we:
14186e1eff8SEmilio G. Cota  *
14286e1eff8SEmilio G. Cota  * 1. Aggressively increase the size of the TLB when the use rate of the
14386e1eff8SEmilio G. Cota  * TLB being flushed is high, since it is likely that in the near future this
14486e1eff8SEmilio G. Cota  * memory-hungry process will execute again, and its memory hungriness will
14586e1eff8SEmilio G. Cota  * probably be similar.
14686e1eff8SEmilio G. Cota  *
14786e1eff8SEmilio G. Cota  * 2. Slowly reduce the size of the TLB as the use rate declines over a
14886e1eff8SEmilio G. Cota  * reasonably large time window. The rationale is that if in such a time window
14986e1eff8SEmilio G. Cota  * we have not observed a high TLB use rate, it is likely that we won't observe
15086e1eff8SEmilio G. Cota  * it in the near future. In that case, once a time window expires we downsize
15186e1eff8SEmilio G. Cota  * the TLB to match the maximum use rate observed in the window.
15286e1eff8SEmilio G. Cota  *
15386e1eff8SEmilio G. Cota  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
15486e1eff8SEmilio G. Cota  * since in that range performance is likely near-optimal. Recall that the TLB
15586e1eff8SEmilio G. Cota  * is direct mapped, so we want the use rate to be low (or at least not too
15686e1eff8SEmilio G. Cota  * high), since otherwise we are likely to have a significant amount of
15786e1eff8SEmilio G. Cota  * conflict misses.
15886e1eff8SEmilio G. Cota  */
1593c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
1603c3959f2SRichard Henderson                                   int64_t now)
16186e1eff8SEmilio G. Cota {
16271ccd47bSRichard Henderson     size_t old_size = tlb_n_entries(fast);
16386e1eff8SEmilio G. Cota     size_t rate;
16486e1eff8SEmilio G. Cota     size_t new_size = old_size;
16586e1eff8SEmilio G. Cota     int64_t window_len_ms = 100;
16686e1eff8SEmilio G. Cota     int64_t window_len_ns = window_len_ms * 1000 * 1000;
16779e42085SRichard Henderson     bool window_expired = now > desc->window_begin_ns + window_len_ns;
16886e1eff8SEmilio G. Cota 
16979e42085SRichard Henderson     if (desc->n_used_entries > desc->window_max_entries) {
17079e42085SRichard Henderson         desc->window_max_entries = desc->n_used_entries;
17186e1eff8SEmilio G. Cota     }
17279e42085SRichard Henderson     rate = desc->window_max_entries * 100 / old_size;
17386e1eff8SEmilio G. Cota 
17486e1eff8SEmilio G. Cota     if (rate > 70) {
17586e1eff8SEmilio G. Cota         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
17686e1eff8SEmilio G. Cota     } else if (rate < 30 && window_expired) {
17779e42085SRichard Henderson         size_t ceil = pow2ceil(desc->window_max_entries);
17879e42085SRichard Henderson         size_t expected_rate = desc->window_max_entries * 100 / ceil;
17986e1eff8SEmilio G. Cota 
18086e1eff8SEmilio G. Cota         /*
18186e1eff8SEmilio G. Cota          * Avoid undersizing when the max number of entries seen is just below
18286e1eff8SEmilio G. Cota          * a pow2. For instance, if max_entries == 1025, the expected use rate
18386e1eff8SEmilio G. Cota          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
18486e1eff8SEmilio G. Cota          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
18586e1eff8SEmilio G. Cota          * later. Thus, make sure that the expected use rate remains below 70%.
18686e1eff8SEmilio G. Cota          * (and since we double the size, that means the lowest rate we'd
18786e1eff8SEmilio G. Cota          * expect to get is 35%, which is still in the 30-70% range where
18886e1eff8SEmilio G. Cota          * we consider that the size is appropriate.)
18986e1eff8SEmilio G. Cota          */
19086e1eff8SEmilio G. Cota         if (expected_rate > 70) {
19186e1eff8SEmilio G. Cota             ceil *= 2;
19286e1eff8SEmilio G. Cota         }
19386e1eff8SEmilio G. Cota         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
19486e1eff8SEmilio G. Cota     }
19586e1eff8SEmilio G. Cota 
19686e1eff8SEmilio G. Cota     if (new_size == old_size) {
19786e1eff8SEmilio G. Cota         if (window_expired) {
19879e42085SRichard Henderson             tlb_window_reset(desc, now, desc->n_used_entries);
19986e1eff8SEmilio G. Cota         }
20086e1eff8SEmilio G. Cota         return;
20186e1eff8SEmilio G. Cota     }
20286e1eff8SEmilio G. Cota 
20371ccd47bSRichard Henderson     g_free(fast->table);
20425d3ec58SRichard Henderson     g_free(desc->fulltlb);
20586e1eff8SEmilio G. Cota 
20679e42085SRichard Henderson     tlb_window_reset(desc, now, 0);
20786e1eff8SEmilio G. Cota     /* desc->n_used_entries is cleared by the caller */
20871ccd47bSRichard Henderson     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
20971ccd47bSRichard Henderson     fast->table = g_try_new(CPUTLBEntry, new_size);
21025d3ec58SRichard Henderson     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
21171ccd47bSRichard Henderson 
21286e1eff8SEmilio G. Cota     /*
21386e1eff8SEmilio G. Cota      * If the allocations fail, try smaller sizes. We just freed some
21486e1eff8SEmilio G. Cota      * memory, so going back to half of new_size has a good chance of working.
21586e1eff8SEmilio G. Cota      * Increased memory pressure elsewhere in the system might cause the
21686e1eff8SEmilio G. Cota      * allocations to fail though, so we progressively reduce the allocation
21786e1eff8SEmilio G. Cota      * size, aborting if we cannot even allocate the smallest TLB we support.
21886e1eff8SEmilio G. Cota      */
21925d3ec58SRichard Henderson     while (fast->table == NULL || desc->fulltlb == NULL) {
22086e1eff8SEmilio G. Cota         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
22186e1eff8SEmilio G. Cota             error_report("%s: %s", __func__, strerror(errno));
22286e1eff8SEmilio G. Cota             abort();
22386e1eff8SEmilio G. Cota         }
22486e1eff8SEmilio G. Cota         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
22571ccd47bSRichard Henderson         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
22686e1eff8SEmilio G. Cota 
22771ccd47bSRichard Henderson         g_free(fast->table);
22825d3ec58SRichard Henderson         g_free(desc->fulltlb);
22971ccd47bSRichard Henderson         fast->table = g_try_new(CPUTLBEntry, new_size);
23025d3ec58SRichard Henderson         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
23186e1eff8SEmilio G. Cota     }
23286e1eff8SEmilio G. Cota }
23386e1eff8SEmilio G. Cota 
234bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
23586e1eff8SEmilio G. Cota {
2365c948e31SRichard Henderson     desc->n_used_entries = 0;
2375c948e31SRichard Henderson     desc->large_page_addr = -1;
2385c948e31SRichard Henderson     desc->large_page_mask = -1;
2395c948e31SRichard Henderson     desc->vindex = 0;
2405c948e31SRichard Henderson     memset(fast->table, -1, sizeof_tlb(fast));
2415c948e31SRichard Henderson     memset(desc->vtable, -1, sizeof(desc->vtable));
24286e1eff8SEmilio G. Cota }
24386e1eff8SEmilio G. Cota 
24410b32e2cSAnton Johansson static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
2453c3959f2SRichard Henderson                                         int64_t now)
246bbf021b0SRichard Henderson {
24710b32e2cSAnton Johansson     CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
24810b32e2cSAnton Johansson     CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
249bbf021b0SRichard Henderson 
2503c3959f2SRichard Henderson     tlb_mmu_resize_locked(desc, fast, now);
251bbf021b0SRichard Henderson     tlb_mmu_flush_locked(desc, fast);
252bbf021b0SRichard Henderson }
253bbf021b0SRichard Henderson 
25456e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
25556e89f76SRichard Henderson {
25656e89f76SRichard Henderson     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
25756e89f76SRichard Henderson 
25856e89f76SRichard Henderson     tlb_window_reset(desc, now, 0);
25956e89f76SRichard Henderson     desc->n_used_entries = 0;
26056e89f76SRichard Henderson     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
26156e89f76SRichard Henderson     fast->table = g_new(CPUTLBEntry, n_entries);
26225d3ec58SRichard Henderson     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
2633c16304aSRichard Henderson     tlb_mmu_flush_locked(desc, fast);
26456e89f76SRichard Henderson }
26556e89f76SRichard Henderson 
26610b32e2cSAnton Johansson static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx)
26786e1eff8SEmilio G. Cota {
26810b32e2cSAnton Johansson     cpu->neg.tlb.d[mmu_idx].n_used_entries++;
26986e1eff8SEmilio G. Cota }
27086e1eff8SEmilio G. Cota 
27110b32e2cSAnton Johansson static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx)
27286e1eff8SEmilio G. Cota {
27310b32e2cSAnton Johansson     cpu->neg.tlb.d[mmu_idx].n_used_entries--;
27486e1eff8SEmilio G. Cota }
27586e1eff8SEmilio G. Cota 
2765005e253SEmilio G. Cota void tlb_init(CPUState *cpu)
2775005e253SEmilio G. Cota {
27856e89f76SRichard Henderson     int64_t now = get_clock_realtime();
27956e89f76SRichard Henderson     int i;
28071aec354SEmilio G. Cota 
28110b32e2cSAnton Johansson     qemu_spin_init(&cpu->neg.tlb.c.lock);
2823d1523ceSRichard Henderson 
2833c16304aSRichard Henderson     /* All tlbs are initialized flushed. */
28410b32e2cSAnton Johansson     cpu->neg.tlb.c.dirty = 0;
28586e1eff8SEmilio G. Cota 
28656e89f76SRichard Henderson     for (i = 0; i < NB_MMU_MODES; i++) {
28710b32e2cSAnton Johansson         tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
28856e89f76SRichard Henderson     }
2895005e253SEmilio G. Cota }
2905005e253SEmilio G. Cota 
291816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu)
292816d9be5SEmilio G. Cota {
293816d9be5SEmilio G. Cota     int i;
294816d9be5SEmilio G. Cota 
29510b32e2cSAnton Johansson     qemu_spin_destroy(&cpu->neg.tlb.c.lock);
296816d9be5SEmilio G. Cota     for (i = 0; i < NB_MMU_MODES; i++) {
29710b32e2cSAnton Johansson         CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
29810b32e2cSAnton Johansson         CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
299816d9be5SEmilio G. Cota 
300816d9be5SEmilio G. Cota         g_free(fast->table);
30125d3ec58SRichard Henderson         g_free(desc->fulltlb);
302816d9be5SEmilio G. Cota     }
303816d9be5SEmilio G. Cota }
304816d9be5SEmilio G. Cota 
305d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
306d9bb58e5SYang Zhong  *
307d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
308d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
309d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
310d9bb58e5SYang Zhong  * again.
311d9bb58e5SYang Zhong  */
312d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
313d9bb58e5SYang Zhong                              run_on_cpu_data d)
314d9bb58e5SYang Zhong {
315d9bb58e5SYang Zhong     CPUState *cpu;
316d9bb58e5SYang Zhong 
317d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
318d9bb58e5SYang Zhong         if (cpu != src) {
319d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
320d9bb58e5SYang Zhong         }
321d9bb58e5SYang Zhong     }
322d9bb58e5SYang Zhong }
323d9bb58e5SYang Zhong 
324d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
325d9bb58e5SYang Zhong {
3263d1523ceSRichard Henderson     uint16_t asked = data.host_int;
3273d1523ceSRichard Henderson     uint16_t all_dirty, work, to_clean;
3283c3959f2SRichard Henderson     int64_t now = get_clock_realtime();
329d9bb58e5SYang Zhong 
330d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
331d9bb58e5SYang Zhong 
3323d1523ceSRichard Henderson     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
333d9bb58e5SYang Zhong 
33410b32e2cSAnton Johansson     qemu_spin_lock(&cpu->neg.tlb.c.lock);
33560a2ad7dSRichard Henderson 
33610b32e2cSAnton Johansson     all_dirty = cpu->neg.tlb.c.dirty;
3373d1523ceSRichard Henderson     to_clean = asked & all_dirty;
3383d1523ceSRichard Henderson     all_dirty &= ~to_clean;
33910b32e2cSAnton Johansson     cpu->neg.tlb.c.dirty = all_dirty;
3403d1523ceSRichard Henderson 
3413d1523ceSRichard Henderson     for (work = to_clean; work != 0; work &= work - 1) {
3423d1523ceSRichard Henderson         int mmu_idx = ctz32(work);
34310b32e2cSAnton Johansson         tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now);
344d9bb58e5SYang Zhong     }
3453d1523ceSRichard Henderson 
34610b32e2cSAnton Johansson     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
347d9bb58e5SYang Zhong 
348a976a99aSRichard Henderson     tcg_flush_jmp_cache(cpu);
34964f2674bSRichard Henderson 
3503d1523ceSRichard Henderson     if (to_clean == ALL_MMUIDX_BITS) {
35110b32e2cSAnton Johansson         qatomic_set(&cpu->neg.tlb.c.full_flush_count,
35210b32e2cSAnton Johansson                     cpu->neg.tlb.c.full_flush_count + 1);
353e09de0a2SRichard Henderson     } else {
35410b32e2cSAnton Johansson         qatomic_set(&cpu->neg.tlb.c.part_flush_count,
35510b32e2cSAnton Johansson                     cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean));
3563d1523ceSRichard Henderson         if (to_clean != asked) {
35710b32e2cSAnton Johansson             qatomic_set(&cpu->neg.tlb.c.elide_flush_count,
35810b32e2cSAnton Johansson                         cpu->neg.tlb.c.elide_flush_count +
3593d1523ceSRichard Henderson                         ctpop16(asked & ~to_clean));
3603d1523ceSRichard Henderson         }
36164f2674bSRichard Henderson     }
362d9bb58e5SYang Zhong }
363d9bb58e5SYang Zhong 
364d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
365d9bb58e5SYang Zhong {
366d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
367d9bb58e5SYang Zhong 
36864f2674bSRichard Henderson     if (cpu->created && !qemu_cpu_is_self(cpu)) {
369d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
370ab651105SRichard Henderson                          RUN_ON_CPU_HOST_INT(idxmap));
371d9bb58e5SYang Zhong     } else {
37260a2ad7dSRichard Henderson         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
373d9bb58e5SYang Zhong     }
374d9bb58e5SYang Zhong }
375d9bb58e5SYang Zhong 
37664f2674bSRichard Henderson void tlb_flush(CPUState *cpu)
37764f2674bSRichard Henderson {
37864f2674bSRichard Henderson     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
37964f2674bSRichard Henderson }
38064f2674bSRichard Henderson 
381d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
382d9bb58e5SYang Zhong {
383d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
384d9bb58e5SYang Zhong 
385d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
386d9bb58e5SYang Zhong 
387d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
388d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
389d9bb58e5SYang Zhong }
390d9bb58e5SYang Zhong 
39164f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu)
39264f2674bSRichard Henderson {
39364f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
39464f2674bSRichard Henderson }
39564f2674bSRichard Henderson 
39664f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
397d9bb58e5SYang Zhong {
398d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
399d9bb58e5SYang Zhong 
400d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
401d9bb58e5SYang Zhong 
402d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
403d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
404d9bb58e5SYang Zhong }
405d9bb58e5SYang Zhong 
40664f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu)
40764f2674bSRichard Henderson {
40864f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
40964f2674bSRichard Henderson }
41064f2674bSRichard Henderson 
4113ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
412732d5487SAnton Johansson                                       vaddr page, vaddr mask)
4133ab6e68cSRichard Henderson {
4143ab6e68cSRichard Henderson     page &= mask;
4153ab6e68cSRichard Henderson     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
4163ab6e68cSRichard Henderson 
4173ab6e68cSRichard Henderson     return (page == (tlb_entry->addr_read & mask) ||
4183ab6e68cSRichard Henderson             page == (tlb_addr_write(tlb_entry) & mask) ||
4193ab6e68cSRichard Henderson             page == (tlb_entry->addr_code & mask));
4203ab6e68cSRichard Henderson }
4213ab6e68cSRichard Henderson 
422732d5487SAnton Johansson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
423d9bb58e5SYang Zhong {
4243ab6e68cSRichard Henderson     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
42568fea038SRichard Henderson }
42668fea038SRichard Henderson 
4273cea94bbSEmilio G. Cota /**
4283cea94bbSEmilio G. Cota  * tlb_entry_is_empty - return true if the entry is not in use
4293cea94bbSEmilio G. Cota  * @te: pointer to CPUTLBEntry
4303cea94bbSEmilio G. Cota  */
4313cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
4323cea94bbSEmilio G. Cota {
4333cea94bbSEmilio G. Cota     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
4343cea94bbSEmilio G. Cota }
4353cea94bbSEmilio G. Cota 
43653d28455SRichard Henderson /* Called with tlb_c.lock held */
4373ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
438732d5487SAnton Johansson                                         vaddr page,
439732d5487SAnton Johansson                                         vaddr mask)
44068fea038SRichard Henderson {
4413ab6e68cSRichard Henderson     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
442d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
44386e1eff8SEmilio G. Cota         return true;
444d9bb58e5SYang Zhong     }
44586e1eff8SEmilio G. Cota     return false;
446d9bb58e5SYang Zhong }
447d9bb58e5SYang Zhong 
448732d5487SAnton Johansson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
44968fea038SRichard Henderson {
4503ab6e68cSRichard Henderson     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
4513ab6e68cSRichard Henderson }
4523ab6e68cSRichard Henderson 
4533ab6e68cSRichard Henderson /* Called with tlb_c.lock held */
45410b32e2cSAnton Johansson static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx,
455732d5487SAnton Johansson                                             vaddr page,
456732d5487SAnton Johansson                                             vaddr mask)
4573ab6e68cSRichard Henderson {
45810b32e2cSAnton Johansson     CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx];
45968fea038SRichard Henderson     int k;
46071aec354SEmilio G. Cota 
46110b32e2cSAnton Johansson     assert_cpu_is_self(cpu);
46268fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
4633ab6e68cSRichard Henderson         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
46410b32e2cSAnton Johansson             tlb_n_used_entries_dec(cpu, mmu_idx);
46586e1eff8SEmilio G. Cota         }
46668fea038SRichard Henderson     }
46768fea038SRichard Henderson }
46868fea038SRichard Henderson 
46910b32e2cSAnton Johansson static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
470732d5487SAnton Johansson                                               vaddr page)
4713ab6e68cSRichard Henderson {
47210b32e2cSAnton Johansson     tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
4733ab6e68cSRichard Henderson }
4743ab6e68cSRichard Henderson 
47510b32e2cSAnton Johansson static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
4761308e026SRichard Henderson {
47710b32e2cSAnton Johansson     vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr;
47810b32e2cSAnton Johansson     vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask;
4791308e026SRichard Henderson 
4801308e026SRichard Henderson     /* Check if we need to flush due to large pages.  */
4811308e026SRichard Henderson     if ((page & lp_mask) == lp_addr) {
4828c605cf1SAnton Johansson         tlb_debug("forcing full flush midx %d (%016"
4838c605cf1SAnton Johansson                   VADDR_PRIx "/%016" VADDR_PRIx ")\n",
4841308e026SRichard Henderson                   midx, lp_addr, lp_mask);
48510b32e2cSAnton Johansson         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
4861308e026SRichard Henderson     } else {
48710b32e2cSAnton Johansson         if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) {
48810b32e2cSAnton Johansson             tlb_n_used_entries_dec(cpu, midx);
48986e1eff8SEmilio G. Cota         }
49010b32e2cSAnton Johansson         tlb_flush_vtlb_page_locked(cpu, midx, page);
4911308e026SRichard Henderson     }
4921308e026SRichard Henderson }
4931308e026SRichard Henderson 
4947b7d00e0SRichard Henderson /**
4957b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_0:
4967b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
4977b7d00e0SRichard Henderson  * @addr: page of virtual address to flush
4987b7d00e0SRichard Henderson  * @idxmap: set of mmu_idx to flush
4997b7d00e0SRichard Henderson  *
5007b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
5017b7d00e0SRichard Henderson  * at @addr from the tlbs indicated by @idxmap from @cpu.
502d9bb58e5SYang Zhong  */
5037b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
504732d5487SAnton Johansson                                              vaddr addr,
5057b7d00e0SRichard Henderson                                              uint16_t idxmap)
506d9bb58e5SYang Zhong {
507d9bb58e5SYang Zhong     int mmu_idx;
508d9bb58e5SYang Zhong 
509d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
510d9bb58e5SYang Zhong 
5118c605cf1SAnton Johansson     tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
512d9bb58e5SYang Zhong 
51310b32e2cSAnton Johansson     qemu_spin_lock(&cpu->neg.tlb.c.lock);
514d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
5157b7d00e0SRichard Henderson         if ((idxmap >> mmu_idx) & 1) {
51610b32e2cSAnton Johansson             tlb_flush_page_locked(cpu, mmu_idx, addr);
517d9bb58e5SYang Zhong         }
518d9bb58e5SYang Zhong     }
51910b32e2cSAnton Johansson     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
520d9bb58e5SYang Zhong 
5211d41a79bSRichard Henderson     /*
5221d41a79bSRichard Henderson      * Discard jump cache entries for any tb which might potentially
5231d41a79bSRichard Henderson      * overlap the flushed page, which includes the previous.
5241d41a79bSRichard Henderson      */
5251d41a79bSRichard Henderson     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
5261d41a79bSRichard Henderson     tb_jmp_cache_clear_page(cpu, addr);
527d9bb58e5SYang Zhong }
528d9bb58e5SYang Zhong 
5297b7d00e0SRichard Henderson /**
5307b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_1:
5317b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5327b7d00e0SRichard Henderson  * @data: encoded addr + idxmap
5337b7d00e0SRichard Henderson  *
5347b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
5357b7d00e0SRichard Henderson  * async_run_on_cpu.  The idxmap parameter is encoded in the page
5367b7d00e0SRichard Henderson  * offset of the target_ptr field.  This limits the set of mmu_idx
5377b7d00e0SRichard Henderson  * that can be passed via this method.
5387b7d00e0SRichard Henderson  */
5397b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
5407b7d00e0SRichard Henderson                                              run_on_cpu_data data)
5417b7d00e0SRichard Henderson {
542732d5487SAnton Johansson     vaddr addr_and_idxmap = data.target_ptr;
543732d5487SAnton Johansson     vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
5447b7d00e0SRichard Henderson     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
5457b7d00e0SRichard Henderson 
5467b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
5477b7d00e0SRichard Henderson }
5487b7d00e0SRichard Henderson 
5497b7d00e0SRichard Henderson typedef struct {
550732d5487SAnton Johansson     vaddr addr;
5517b7d00e0SRichard Henderson     uint16_t idxmap;
5527b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData;
5537b7d00e0SRichard Henderson 
5547b7d00e0SRichard Henderson /**
5557b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_2:
5567b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5577b7d00e0SRichard Henderson  * @data: allocated addr + idxmap
5587b7d00e0SRichard Henderson  *
5597b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
5607b7d00e0SRichard Henderson  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
5617b7d00e0SRichard Henderson  * TLBFlushPageByMMUIdxData structure that has been allocated
5627b7d00e0SRichard Henderson  * specifically for this helper.  Free the structure when done.
5637b7d00e0SRichard Henderson  */
5647b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
5657b7d00e0SRichard Henderson                                              run_on_cpu_data data)
5667b7d00e0SRichard Henderson {
5677b7d00e0SRichard Henderson     TLBFlushPageByMMUIdxData *d = data.host_ptr;
5687b7d00e0SRichard Henderson 
5697b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
5707b7d00e0SRichard Henderson     g_free(d);
5717b7d00e0SRichard Henderson }
5727b7d00e0SRichard Henderson 
573732d5487SAnton Johansson void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
574d9bb58e5SYang Zhong {
5758c605cf1SAnton Johansson     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
576d9bb58e5SYang Zhong 
577d9bb58e5SYang Zhong     /* This should already be page aligned */
5787b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
579d9bb58e5SYang Zhong 
5807b7d00e0SRichard Henderson     if (qemu_cpu_is_self(cpu)) {
5817b7d00e0SRichard Henderson         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
5827b7d00e0SRichard Henderson     } else if (idxmap < TARGET_PAGE_SIZE) {
5837b7d00e0SRichard Henderson         /*
5847b7d00e0SRichard Henderson          * Most targets have only a few mmu_idx.  In the case where
5857b7d00e0SRichard Henderson          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
5867b7d00e0SRichard Henderson          * allocating memory for this operation.
5877b7d00e0SRichard Henderson          */
5887b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
5897b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
590d9bb58e5SYang Zhong     } else {
5917b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
5927b7d00e0SRichard Henderson 
5937b7d00e0SRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
5947b7d00e0SRichard Henderson         d->addr = addr;
5957b7d00e0SRichard Henderson         d->idxmap = idxmap;
5967b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
5977b7d00e0SRichard Henderson                          RUN_ON_CPU_HOST_PTR(d));
598d9bb58e5SYang Zhong     }
599d9bb58e5SYang Zhong }
600d9bb58e5SYang Zhong 
601732d5487SAnton Johansson void tlb_flush_page(CPUState *cpu, vaddr addr)
602f8144c6cSRichard Henderson {
603f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
604f8144c6cSRichard Henderson }
605f8144c6cSRichard Henderson 
606732d5487SAnton Johansson void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
607d9bb58e5SYang Zhong                                        uint16_t idxmap)
608d9bb58e5SYang Zhong {
6098c605cf1SAnton Johansson     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
610d9bb58e5SYang Zhong 
611d9bb58e5SYang Zhong     /* This should already be page aligned */
6127b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
613d9bb58e5SYang Zhong 
6147b7d00e0SRichard Henderson     /*
6157b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
6167b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
6177b7d00e0SRichard Henderson      */
6187b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
6197b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6207b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6217b7d00e0SRichard Henderson     } else {
6227b7d00e0SRichard Henderson         CPUState *dst_cpu;
6237b7d00e0SRichard Henderson 
6247b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
6257b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
6267b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
6277b7d00e0SRichard Henderson                 TLBFlushPageByMMUIdxData *d
6287b7d00e0SRichard Henderson                     = g_new(TLBFlushPageByMMUIdxData, 1);
6297b7d00e0SRichard Henderson 
6307b7d00e0SRichard Henderson                 d->addr = addr;
6317b7d00e0SRichard Henderson                 d->idxmap = idxmap;
6327b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
6337b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
6347b7d00e0SRichard Henderson             }
6357b7d00e0SRichard Henderson         }
6367b7d00e0SRichard Henderson     }
6377b7d00e0SRichard Henderson 
6387b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
639d9bb58e5SYang Zhong }
640d9bb58e5SYang Zhong 
641732d5487SAnton Johansson void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
642f8144c6cSRichard Henderson {
643f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
644f8144c6cSRichard Henderson }
645f8144c6cSRichard Henderson 
646d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
647732d5487SAnton Johansson                                               vaddr addr,
648d9bb58e5SYang Zhong                                               uint16_t idxmap)
649d9bb58e5SYang Zhong {
6508c605cf1SAnton Johansson     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
651d9bb58e5SYang Zhong 
652d9bb58e5SYang Zhong     /* This should already be page aligned */
6537b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
654d9bb58e5SYang Zhong 
6557b7d00e0SRichard Henderson     /*
6567b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
6577b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
6587b7d00e0SRichard Henderson      */
6597b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
6607b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6617b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6627b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6637b7d00e0SRichard Henderson                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6647b7d00e0SRichard Henderson     } else {
6657b7d00e0SRichard Henderson         CPUState *dst_cpu;
6667b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d;
6677b7d00e0SRichard Henderson 
6687b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
6697b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
6707b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
6717b7d00e0SRichard Henderson                 d = g_new(TLBFlushPageByMMUIdxData, 1);
6727b7d00e0SRichard Henderson                 d->addr = addr;
6737b7d00e0SRichard Henderson                 d->idxmap = idxmap;
6747b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
6757b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
6767b7d00e0SRichard Henderson             }
6777b7d00e0SRichard Henderson         }
6787b7d00e0SRichard Henderson 
6797b7d00e0SRichard Henderson         d = g_new(TLBFlushPageByMMUIdxData, 1);
6807b7d00e0SRichard Henderson         d->addr = addr;
6817b7d00e0SRichard Henderson         d->idxmap = idxmap;
6827b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
6837b7d00e0SRichard Henderson                               RUN_ON_CPU_HOST_PTR(d));
6847b7d00e0SRichard Henderson     }
685d9bb58e5SYang Zhong }
686d9bb58e5SYang Zhong 
687732d5487SAnton Johansson void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
688d9bb58e5SYang Zhong {
689f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
690d9bb58e5SYang Zhong }
691d9bb58e5SYang Zhong 
69210b32e2cSAnton Johansson static void tlb_flush_range_locked(CPUState *cpu, int midx,
693732d5487SAnton Johansson                                    vaddr addr, vaddr len,
6943c4ddec1SRichard Henderson                                    unsigned bits)
6953ab6e68cSRichard Henderson {
69610b32e2cSAnton Johansson     CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
69710b32e2cSAnton Johansson     CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
698732d5487SAnton Johansson     vaddr mask = MAKE_64BIT_MASK(0, bits);
6993ab6e68cSRichard Henderson 
7003ab6e68cSRichard Henderson     /*
7013ab6e68cSRichard Henderson      * If @bits is smaller than the tlb size, there may be multiple entries
7023ab6e68cSRichard Henderson      * within the TLB; otherwise all addresses that match under @mask hit
7033ab6e68cSRichard Henderson      * the same TLB entry.
7043ab6e68cSRichard Henderson      * TODO: Perhaps allow bits to be a few bits less than the size.
7053ab6e68cSRichard Henderson      * For now, just flush the entire TLB.
7063c4ddec1SRichard Henderson      *
7073c4ddec1SRichard Henderson      * If @len is larger than the tlb size, then it will take longer to
7083c4ddec1SRichard Henderson      * test all of the entries in the TLB than it will to flush it all.
7093ab6e68cSRichard Henderson      */
7103c4ddec1SRichard Henderson     if (mask < f->mask || len > f->mask) {
7113ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
7128c605cf1SAnton Johansson                   "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
7133c4ddec1SRichard Henderson                   midx, addr, mask, len);
71410b32e2cSAnton Johansson         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
7153ab6e68cSRichard Henderson         return;
7163ab6e68cSRichard Henderson     }
7173ab6e68cSRichard Henderson 
7183c4ddec1SRichard Henderson     /*
7193c4ddec1SRichard Henderson      * Check if we need to flush due to large pages.
7203c4ddec1SRichard Henderson      * Because large_page_mask contains all 1's from the msb,
7213c4ddec1SRichard Henderson      * we only need to test the end of the range.
7223c4ddec1SRichard Henderson      */
7233c4ddec1SRichard Henderson     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
7243ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
7258c605cf1SAnton Johansson                   "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
7263ab6e68cSRichard Henderson                   midx, d->large_page_addr, d->large_page_mask);
72710b32e2cSAnton Johansson         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
7283ab6e68cSRichard Henderson         return;
7293ab6e68cSRichard Henderson     }
7303ab6e68cSRichard Henderson 
731732d5487SAnton Johansson     for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
732732d5487SAnton Johansson         vaddr page = addr + i;
73310b32e2cSAnton Johansson         CPUTLBEntry *entry = tlb_entry(cpu, midx, page);
7343c4ddec1SRichard Henderson 
7353c4ddec1SRichard Henderson         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
73610b32e2cSAnton Johansson             tlb_n_used_entries_dec(cpu, midx);
7373ab6e68cSRichard Henderson         }
73810b32e2cSAnton Johansson         tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
7393ab6e68cSRichard Henderson     }
7403c4ddec1SRichard Henderson }
7413ab6e68cSRichard Henderson 
7423ab6e68cSRichard Henderson typedef struct {
743732d5487SAnton Johansson     vaddr addr;
744732d5487SAnton Johansson     vaddr len;
7453ab6e68cSRichard Henderson     uint16_t idxmap;
7463ab6e68cSRichard Henderson     uint16_t bits;
7473960a59fSRichard Henderson } TLBFlushRangeData;
7483ab6e68cSRichard Henderson 
7496be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
7503960a59fSRichard Henderson                                               TLBFlushRangeData d)
7513ab6e68cSRichard Henderson {
7523ab6e68cSRichard Henderson     int mmu_idx;
7533ab6e68cSRichard Henderson 
7543ab6e68cSRichard Henderson     assert_cpu_is_self(cpu);
7553ab6e68cSRichard Henderson 
7568c605cf1SAnton Johansson     tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
7573c4ddec1SRichard Henderson               d.addr, d.bits, d.len, d.idxmap);
7583ab6e68cSRichard Henderson 
75910b32e2cSAnton Johansson     qemu_spin_lock(&cpu->neg.tlb.c.lock);
7603ab6e68cSRichard Henderson     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
7613ab6e68cSRichard Henderson         if ((d.idxmap >> mmu_idx) & 1) {
76210b32e2cSAnton Johansson             tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits);
7633ab6e68cSRichard Henderson         }
7643ab6e68cSRichard Henderson     }
76510b32e2cSAnton Johansson     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
7663ab6e68cSRichard Henderson 
767cfc2a2d6SIdan Horowitz     /*
768cfc2a2d6SIdan Horowitz      * If the length is larger than the jump cache size, then it will take
769cfc2a2d6SIdan Horowitz      * longer to clear each entry individually than it will to clear it all.
770cfc2a2d6SIdan Horowitz      */
771cfc2a2d6SIdan Horowitz     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
772a976a99aSRichard Henderson         tcg_flush_jmp_cache(cpu);
773cfc2a2d6SIdan Horowitz         return;
774cfc2a2d6SIdan Horowitz     }
775cfc2a2d6SIdan Horowitz 
7761d41a79bSRichard Henderson     /*
7771d41a79bSRichard Henderson      * Discard jump cache entries for any tb which might potentially
7781d41a79bSRichard Henderson      * overlap the flushed pages, which includes the previous.
7791d41a79bSRichard Henderson      */
7801d41a79bSRichard Henderson     d.addr -= TARGET_PAGE_SIZE;
781732d5487SAnton Johansson     for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
7821d41a79bSRichard Henderson         tb_jmp_cache_clear_page(cpu, d.addr);
7831d41a79bSRichard Henderson         d.addr += TARGET_PAGE_SIZE;
7843c4ddec1SRichard Henderson     }
7853ab6e68cSRichard Henderson }
7863ab6e68cSRichard Henderson 
787206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
7883ab6e68cSRichard Henderson                                               run_on_cpu_data data)
7893ab6e68cSRichard Henderson {
7903960a59fSRichard Henderson     TLBFlushRangeData *d = data.host_ptr;
7916be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
7923ab6e68cSRichard Henderson     g_free(d);
7933ab6e68cSRichard Henderson }
7943ab6e68cSRichard Henderson 
795732d5487SAnton Johansson void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
796732d5487SAnton Johansson                                vaddr len, uint16_t idxmap,
797e5b1921bSRichard Henderson                                unsigned bits)
7983ab6e68cSRichard Henderson {
7993960a59fSRichard Henderson     TLBFlushRangeData d;
8003ab6e68cSRichard Henderson 
801e5b1921bSRichard Henderson     /*
802e5b1921bSRichard Henderson      * If all bits are significant, and len is small,
803e5b1921bSRichard Henderson      * this devolves to tlb_flush_page.
804e5b1921bSRichard Henderson      */
805e5b1921bSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8063ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
8073ab6e68cSRichard Henderson         return;
8083ab6e68cSRichard Henderson     }
8093ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
8103ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
8113ab6e68cSRichard Henderson         tlb_flush_by_mmuidx(cpu, idxmap);
8123ab6e68cSRichard Henderson         return;
8133ab6e68cSRichard Henderson     }
8143ab6e68cSRichard Henderson 
8153ab6e68cSRichard Henderson     /* This should already be page aligned */
8163ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
817e5b1921bSRichard Henderson     d.len = len;
8183ab6e68cSRichard Henderson     d.idxmap = idxmap;
8193ab6e68cSRichard Henderson     d.bits = bits;
8203ab6e68cSRichard Henderson 
8213ab6e68cSRichard Henderson     if (qemu_cpu_is_self(cpu)) {
8226be48e45SRichard Henderson         tlb_flush_range_by_mmuidx_async_0(cpu, d);
8233ab6e68cSRichard Henderson     } else {
8243ab6e68cSRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
8253960a59fSRichard Henderson         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
826206a583dSRichard Henderson         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
8273ab6e68cSRichard Henderson                          RUN_ON_CPU_HOST_PTR(p));
8283ab6e68cSRichard Henderson     }
8293ab6e68cSRichard Henderson }
8303ab6e68cSRichard Henderson 
831732d5487SAnton Johansson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
832e5b1921bSRichard Henderson                                    uint16_t idxmap, unsigned bits)
833e5b1921bSRichard Henderson {
834e5b1921bSRichard Henderson     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
835e5b1921bSRichard Henderson }
836e5b1921bSRichard Henderson 
837600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
838732d5487SAnton Johansson                                         vaddr addr, vaddr len,
839600b819fSRichard Henderson                                         uint16_t idxmap, unsigned bits)
8403ab6e68cSRichard Henderson {
8413960a59fSRichard Henderson     TLBFlushRangeData d;
842d34e4d1aSRichard Henderson     CPUState *dst_cpu;
8433ab6e68cSRichard Henderson 
844600b819fSRichard Henderson     /*
845600b819fSRichard Henderson      * If all bits are significant, and len is small,
846600b819fSRichard Henderson      * this devolves to tlb_flush_page.
847600b819fSRichard Henderson      */
848600b819fSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8493ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
8503ab6e68cSRichard Henderson         return;
8513ab6e68cSRichard Henderson     }
8523ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
8533ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
8543ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
8553ab6e68cSRichard Henderson         return;
8563ab6e68cSRichard Henderson     }
8573ab6e68cSRichard Henderson 
8583ab6e68cSRichard Henderson     /* This should already be page aligned */
8593ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
860600b819fSRichard Henderson     d.len = len;
8613ab6e68cSRichard Henderson     d.idxmap = idxmap;
8623ab6e68cSRichard Henderson     d.bits = bits;
8633ab6e68cSRichard Henderson 
8643ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
8653ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
8663ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
8673960a59fSRichard Henderson             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
8683ab6e68cSRichard Henderson             async_run_on_cpu(dst_cpu,
869206a583dSRichard Henderson                              tlb_flush_range_by_mmuidx_async_1,
8703ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
8713ab6e68cSRichard Henderson         }
8723ab6e68cSRichard Henderson     }
8733ab6e68cSRichard Henderson 
8746be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
8753ab6e68cSRichard Henderson }
8763ab6e68cSRichard Henderson 
877600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
878732d5487SAnton Johansson                                             vaddr addr, uint16_t idxmap,
879732d5487SAnton Johansson                                             unsigned bits)
880600b819fSRichard Henderson {
881600b819fSRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
882600b819fSRichard Henderson                                        idxmap, bits);
883600b819fSRichard Henderson }
884600b819fSRichard Henderson 
885c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
886732d5487SAnton Johansson                                                vaddr addr,
887732d5487SAnton Johansson                                                vaddr len,
8883ab6e68cSRichard Henderson                                                uint16_t idxmap,
8893ab6e68cSRichard Henderson                                                unsigned bits)
8903ab6e68cSRichard Henderson {
891d34e4d1aSRichard Henderson     TLBFlushRangeData d, *p;
892d34e4d1aSRichard Henderson     CPUState *dst_cpu;
8933ab6e68cSRichard Henderson 
894c13b27d8SRichard Henderson     /*
895c13b27d8SRichard Henderson      * If all bits are significant, and len is small,
896c13b27d8SRichard Henderson      * this devolves to tlb_flush_page.
897c13b27d8SRichard Henderson      */
898c13b27d8SRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8993ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
9003ab6e68cSRichard Henderson         return;
9013ab6e68cSRichard Henderson     }
9023ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
9033ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
9043ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
9053ab6e68cSRichard Henderson         return;
9063ab6e68cSRichard Henderson     }
9073ab6e68cSRichard Henderson 
9083ab6e68cSRichard Henderson     /* This should already be page aligned */
9093ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
910c13b27d8SRichard Henderson     d.len = len;
9113ab6e68cSRichard Henderson     d.idxmap = idxmap;
9123ab6e68cSRichard Henderson     d.bits = bits;
9133ab6e68cSRichard Henderson 
9143ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
9153ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
9163ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
9176d244788SRichard Henderson             p = g_memdup(&d, sizeof(d));
918206a583dSRichard Henderson             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
9193ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
9203ab6e68cSRichard Henderson         }
9213ab6e68cSRichard Henderson     }
9223ab6e68cSRichard Henderson 
9236d244788SRichard Henderson     p = g_memdup(&d, sizeof(d));
924206a583dSRichard Henderson     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
9253ab6e68cSRichard Henderson                           RUN_ON_CPU_HOST_PTR(p));
9263ab6e68cSRichard Henderson }
9273ab6e68cSRichard Henderson 
928c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
929732d5487SAnton Johansson                                                    vaddr addr,
930c13b27d8SRichard Henderson                                                    uint16_t idxmap,
931c13b27d8SRichard Henderson                                                    unsigned bits)
932c13b27d8SRichard Henderson {
933c13b27d8SRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
934c13b27d8SRichard Henderson                                               idxmap, bits);
935c13b27d8SRichard Henderson }
936c13b27d8SRichard Henderson 
937d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
938d9bb58e5SYang Zhong    can be detected */
939d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
940d9bb58e5SYang Zhong {
94193b99616SRichard Henderson     cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
94293b99616SRichard Henderson                                              TARGET_PAGE_SIZE,
943d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
944d9bb58e5SYang Zhong }
945d9bb58e5SYang Zhong 
946d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
947d9bb58e5SYang Zhong    tested for self modifying code */
948d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
949d9bb58e5SYang Zhong {
950d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
951d9bb58e5SYang Zhong }
952d9bb58e5SYang Zhong 
953d9bb58e5SYang Zhong 
954d9bb58e5SYang Zhong /*
955d9bb58e5SYang Zhong  * Dirty write flag handling
956d9bb58e5SYang Zhong  *
957d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
958d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
959d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
960d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
961d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
962d9bb58e5SYang Zhong  * generated code.
963d9bb58e5SYang Zhong  *
96471aec354SEmilio G. Cota  * Other vCPUs might be reading their TLBs during guest execution, so we update
965d73415a3SStefan Hajnoczi  * te->addr_write with qatomic_set. We don't need to worry about this for
96671aec354SEmilio G. Cota  * oversized guests as MTTCG is disabled for them.
967d9bb58e5SYang Zhong  *
96853d28455SRichard Henderson  * Called with tlb_c.lock held.
969d9bb58e5SYang Zhong  */
97071aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
97171aec354SEmilio G. Cota                                          uintptr_t start, uintptr_t length)
972d9bb58e5SYang Zhong {
973d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
974d9bb58e5SYang Zhong 
9757b0d792cSRichard Henderson     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
9767b0d792cSRichard Henderson                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
977d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
978d9bb58e5SYang Zhong         addr += tlb_entry->addend;
979d9bb58e5SYang Zhong         if ((addr - start) < length) {
980238f4380SRichard Henderson #if TARGET_LONG_BITS == 32
981238f4380SRichard Henderson             uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
982238f4380SRichard Henderson             ptr_write += HOST_BIG_ENDIAN;
983238f4380SRichard Henderson             qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
984238f4380SRichard Henderson #elif TCG_OVERSIZED_GUEST
98571aec354SEmilio G. Cota             tlb_entry->addr_write |= TLB_NOTDIRTY;
986d9bb58e5SYang Zhong #else
987d73415a3SStefan Hajnoczi             qatomic_set(&tlb_entry->addr_write,
98871aec354SEmilio G. Cota                         tlb_entry->addr_write | TLB_NOTDIRTY);
989d9bb58e5SYang Zhong #endif
990d9bb58e5SYang Zhong         }
99171aec354SEmilio G. Cota     }
99271aec354SEmilio G. Cota }
99371aec354SEmilio G. Cota 
99471aec354SEmilio G. Cota /*
99553d28455SRichard Henderson  * Called with tlb_c.lock held.
99671aec354SEmilio G. Cota  * Called only from the vCPU context, i.e. the TLB's owner thread.
99771aec354SEmilio G. Cota  */
99871aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
99971aec354SEmilio G. Cota {
100071aec354SEmilio G. Cota     *d = *s;
100171aec354SEmilio G. Cota }
1002d9bb58e5SYang Zhong 
1003d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
100471aec354SEmilio G. Cota  * the target vCPU).
100553d28455SRichard Henderson  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
100671aec354SEmilio G. Cota  * thing actually updated is the target TLB entry ->addr_write flags.
1007d9bb58e5SYang Zhong  */
1008d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1009d9bb58e5SYang Zhong {
1010d9bb58e5SYang Zhong     int mmu_idx;
1011d9bb58e5SYang Zhong 
101210b32e2cSAnton Johansson     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1013d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1014d9bb58e5SYang Zhong         unsigned int i;
101510b32e2cSAnton Johansson         unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
1016d9bb58e5SYang Zhong 
101786e1eff8SEmilio G. Cota         for (i = 0; i < n; i++) {
101810b32e2cSAnton Johansson             tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
1019a40ec84eSRichard Henderson                                          start1, length);
1020d9bb58e5SYang Zhong         }
1021d9bb58e5SYang Zhong 
1022d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
102310b32e2cSAnton Johansson             tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
1024a40ec84eSRichard Henderson                                          start1, length);
1025d9bb58e5SYang Zhong         }
1026d9bb58e5SYang Zhong     }
102710b32e2cSAnton Johansson     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1028d9bb58e5SYang Zhong }
1029d9bb58e5SYang Zhong 
103053d28455SRichard Henderson /* Called with tlb_c.lock held */
103171aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1032732d5487SAnton Johansson                                          vaddr addr)
1033d9bb58e5SYang Zhong {
1034732d5487SAnton Johansson     if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
1035732d5487SAnton Johansson         tlb_entry->addr_write = addr;
1036d9bb58e5SYang Zhong     }
1037d9bb58e5SYang Zhong }
1038d9bb58e5SYang Zhong 
1039d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
1040d9bb58e5SYang Zhong    so that it is no longer dirty */
1041732d5487SAnton Johansson void tlb_set_dirty(CPUState *cpu, vaddr addr)
1042d9bb58e5SYang Zhong {
1043d9bb58e5SYang Zhong     int mmu_idx;
1044d9bb58e5SYang Zhong 
1045d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
1046d9bb58e5SYang Zhong 
1047732d5487SAnton Johansson     addr &= TARGET_PAGE_MASK;
104810b32e2cSAnton Johansson     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1049d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
105010b32e2cSAnton Johansson         tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr);
1051d9bb58e5SYang Zhong     }
1052d9bb58e5SYang Zhong 
1053d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1054d9bb58e5SYang Zhong         int k;
1055d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
105610b32e2cSAnton Johansson             tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr);
1057d9bb58e5SYang Zhong         }
1058d9bb58e5SYang Zhong     }
105910b32e2cSAnton Johansson     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1060d9bb58e5SYang Zhong }
1061d9bb58e5SYang Zhong 
1062d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
1063d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
106410b32e2cSAnton Johansson static void tlb_add_large_page(CPUState *cpu, int mmu_idx,
1065732d5487SAnton Johansson                                vaddr addr, uint64_t size)
1066d9bb58e5SYang Zhong {
106710b32e2cSAnton Johansson     vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr;
1068732d5487SAnton Johansson     vaddr lp_mask = ~(size - 1);
1069d9bb58e5SYang Zhong 
1070732d5487SAnton Johansson     if (lp_addr == (vaddr)-1) {
10711308e026SRichard Henderson         /* No previous large page.  */
1072732d5487SAnton Johansson         lp_addr = addr;
10731308e026SRichard Henderson     } else {
1074d9bb58e5SYang Zhong         /* Extend the existing region to include the new page.
10751308e026SRichard Henderson            This is a compromise between unnecessary flushes and
10761308e026SRichard Henderson            the cost of maintaining a full variable size TLB.  */
107710b32e2cSAnton Johansson         lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask;
1078732d5487SAnton Johansson         while (((lp_addr ^ addr) & lp_mask) != 0) {
10791308e026SRichard Henderson             lp_mask <<= 1;
1080d9bb58e5SYang Zhong         }
10811308e026SRichard Henderson     }
108210b32e2cSAnton Johansson     cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask;
108310b32e2cSAnton Johansson     cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask;
1084d9bb58e5SYang Zhong }
1085d9bb58e5SYang Zhong 
108658e8f1f6SRichard Henderson static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
1087d712b116SAnton Johansson                                    vaddr address, int flags,
108858e8f1f6SRichard Henderson                                    MMUAccessType access_type, bool enable)
108958e8f1f6SRichard Henderson {
109058e8f1f6SRichard Henderson     if (enable) {
109158e8f1f6SRichard Henderson         address |= flags & TLB_FLAGS_MASK;
109258e8f1f6SRichard Henderson         flags &= TLB_SLOW_FLAGS_MASK;
109358e8f1f6SRichard Henderson         if (flags) {
109458e8f1f6SRichard Henderson             address |= TLB_FORCE_SLOW;
109558e8f1f6SRichard Henderson         }
109658e8f1f6SRichard Henderson     } else {
109758e8f1f6SRichard Henderson         address = -1;
109858e8f1f6SRichard Henderson         flags = 0;
109958e8f1f6SRichard Henderson     }
110058e8f1f6SRichard Henderson     ent->addr_idx[access_type] = address;
110158e8f1f6SRichard Henderson     full->slow_flags[access_type] = flags;
110258e8f1f6SRichard Henderson }
110358e8f1f6SRichard Henderson 
110440473689SRichard Henderson /*
110540473689SRichard Henderson  * Add a new TLB entry. At most one entry for a given virtual address
1106d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1107d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
1108d9bb58e5SYang Zhong  *
1109d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
1110d9bb58e5SYang Zhong  * critical section.
1111d9bb58e5SYang Zhong  */
111240473689SRichard Henderson void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1113732d5487SAnton Johansson                        vaddr addr, CPUTLBEntryFull *full)
1114d9bb58e5SYang Zhong {
111510b32e2cSAnton Johansson     CPUTLB *tlb = &cpu->neg.tlb;
1116a40ec84eSRichard Henderson     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1117d9bb58e5SYang Zhong     MemoryRegionSection *section;
111858e8f1f6SRichard Henderson     unsigned int index, read_flags, write_flags;
1119d9bb58e5SYang Zhong     uintptr_t addend;
112068fea038SRichard Henderson     CPUTLBEntry *te, tn;
112155df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
1122732d5487SAnton Johansson     vaddr addr_page;
112340473689SRichard Henderson     int asidx, wp_flags, prot;
11248f5db641SRichard Henderson     bool is_ram, is_romd;
1125d9bb58e5SYang Zhong 
1126d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
112755df6fcfSPeter Maydell 
112840473689SRichard Henderson     if (full->lg_page_size <= TARGET_PAGE_BITS) {
112955df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
113055df6fcfSPeter Maydell     } else {
113140473689SRichard Henderson         sz = (hwaddr)1 << full->lg_page_size;
113210b32e2cSAnton Johansson         tlb_add_large_page(cpu, mmu_idx, addr, sz);
113355df6fcfSPeter Maydell     }
1134732d5487SAnton Johansson     addr_page = addr & TARGET_PAGE_MASK;
113540473689SRichard Henderson     paddr_page = full->phys_addr & TARGET_PAGE_MASK;
113655df6fcfSPeter Maydell 
113740473689SRichard Henderson     prot = full->prot;
113840473689SRichard Henderson     asidx = cpu_asidx_from_attrs(cpu, full->attrs);
113955df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
114040473689SRichard Henderson                                                 &xlat, &sz, full->attrs, &prot);
1141d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
1142d9bb58e5SYang Zhong 
11438c605cf1SAnton Johansson     tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
1144d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
1145732d5487SAnton Johansson               addr, full->phys_addr, prot, mmu_idx);
1146d9bb58e5SYang Zhong 
114758e8f1f6SRichard Henderson     read_flags = 0;
114840473689SRichard Henderson     if (full->lg_page_size < TARGET_PAGE_BITS) {
114930d7e098SRichard Henderson         /* Repeat the MMU check and TLB fill on every access.  */
115058e8f1f6SRichard Henderson         read_flags |= TLB_INVALID_MASK;
115155df6fcfSPeter Maydell     }
115240473689SRichard Henderson     if (full->attrs.byte_swap) {
115358e8f1f6SRichard Henderson         read_flags |= TLB_BSWAP;
1154a26fc6f5STony Nguyen     }
11558f5db641SRichard Henderson 
11568f5db641SRichard Henderson     is_ram = memory_region_is_ram(section->mr);
11578f5db641SRichard Henderson     is_romd = memory_region_is_romd(section->mr);
11588f5db641SRichard Henderson 
11598f5db641SRichard Henderson     if (is_ram || is_romd) {
11608f5db641SRichard Henderson         /* RAM and ROMD both have associated host memory. */
1161d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
11628f5db641SRichard Henderson     } else {
11638f5db641SRichard Henderson         /* I/O does not; force the host address to NULL. */
11648f5db641SRichard Henderson         addend = 0;
1165d9bb58e5SYang Zhong     }
1166d9bb58e5SYang Zhong 
116758e8f1f6SRichard Henderson     write_flags = read_flags;
11688f5db641SRichard Henderson     if (is_ram) {
11698f5db641SRichard Henderson         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1170dff1ab68SLIU Zhiwei         assert(!(iotlb & ~TARGET_PAGE_MASK));
11718f5db641SRichard Henderson         /*
11728f5db641SRichard Henderson          * Computing is_clean is expensive; avoid all that unless
11738f5db641SRichard Henderson          * the page is actually writable.
11748f5db641SRichard Henderson          */
11758f5db641SRichard Henderson         if (prot & PAGE_WRITE) {
11768f5db641SRichard Henderson             if (section->readonly) {
117758e8f1f6SRichard Henderson                 write_flags |= TLB_DISCARD_WRITE;
11788f5db641SRichard Henderson             } else if (cpu_physical_memory_is_clean(iotlb)) {
117958e8f1f6SRichard Henderson                 write_flags |= TLB_NOTDIRTY;
11808f5db641SRichard Henderson             }
11818f5db641SRichard Henderson         }
11828f5db641SRichard Henderson     } else {
11838f5db641SRichard Henderson         /* I/O or ROMD */
11848f5db641SRichard Henderson         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
11858f5db641SRichard Henderson         /*
11868f5db641SRichard Henderson          * Writes to romd devices must go through MMIO to enable write.
11878f5db641SRichard Henderson          * Reads to romd devices go through the ram_ptr found above,
11888f5db641SRichard Henderson          * but of course reads to I/O must go through MMIO.
11898f5db641SRichard Henderson          */
119058e8f1f6SRichard Henderson         write_flags |= TLB_MMIO;
11918f5db641SRichard Henderson         if (!is_romd) {
119258e8f1f6SRichard Henderson             read_flags = write_flags;
11938f5db641SRichard Henderson         }
11948f5db641SRichard Henderson     }
11958f5db641SRichard Henderson 
1196732d5487SAnton Johansson     wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
119750b107c5SRichard Henderson                                               TARGET_PAGE_SIZE);
1198d9bb58e5SYang Zhong 
119910b32e2cSAnton Johansson     index = tlb_index(cpu, mmu_idx, addr_page);
120010b32e2cSAnton Johansson     te = tlb_entry(cpu, mmu_idx, addr_page);
1201d9bb58e5SYang Zhong 
120268fea038SRichard Henderson     /*
120371aec354SEmilio G. Cota      * Hold the TLB lock for the rest of the function. We could acquire/release
120471aec354SEmilio G. Cota      * the lock several times in the function, but it is faster to amortize the
120571aec354SEmilio G. Cota      * acquisition cost by acquiring it just once. Note that this leads to
120671aec354SEmilio G. Cota      * a longer critical section, but this is not a concern since the TLB lock
120771aec354SEmilio G. Cota      * is unlikely to be contended.
120871aec354SEmilio G. Cota      */
1209a40ec84eSRichard Henderson     qemu_spin_lock(&tlb->c.lock);
121071aec354SEmilio G. Cota 
12113d1523ceSRichard Henderson     /* Note that the tlb is no longer clean.  */
1212a40ec84eSRichard Henderson     tlb->c.dirty |= 1 << mmu_idx;
12133d1523ceSRichard Henderson 
121471aec354SEmilio G. Cota     /* Make sure there's no cached translation for the new page.  */
121510b32e2cSAnton Johansson     tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page);
121671aec354SEmilio G. Cota 
121771aec354SEmilio G. Cota     /*
121868fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
121968fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
122068fea038SRichard Henderson      */
1221732d5487SAnton Johansson     if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
1222a40ec84eSRichard Henderson         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1223a40ec84eSRichard Henderson         CPUTLBEntry *tv = &desc->vtable[vidx];
122468fea038SRichard Henderson 
122568fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
122671aec354SEmilio G. Cota         copy_tlb_helper_locked(tv, te);
122725d3ec58SRichard Henderson         desc->vfulltlb[vidx] = desc->fulltlb[index];
122810b32e2cSAnton Johansson         tlb_n_used_entries_dec(cpu, mmu_idx);
122968fea038SRichard Henderson     }
1230d9bb58e5SYang Zhong 
1231d9bb58e5SYang Zhong     /* refill the tlb */
1232ace41090SPeter Maydell     /*
1233dff1ab68SLIU Zhiwei      * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
1234dff1ab68SLIU Zhiwei      * aligned ram_addr_t of the page base of the target RAM.
1235dff1ab68SLIU Zhiwei      * Otherwise, iotlb contains
1236dff1ab68SLIU Zhiwei      *  - a physical section number in the lower TARGET_PAGE_BITS
1237dff1ab68SLIU Zhiwei      *  - the offset within section->mr of the page base (I/O, ROMD) with the
1238dff1ab68SLIU Zhiwei      *    TARGET_PAGE_BITS masked off.
123958e8f1f6SRichard Henderson      * We subtract addr_page (which is page aligned and thus won't
1240ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
1241ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
1242ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
1243ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
1244fb3cb376SRichard Henderson      * vaddr we add back in io_prepare()/get_page_addr_code().
1245ace41090SPeter Maydell      */
124640473689SRichard Henderson     desc->fulltlb[index] = *full;
124758e8f1f6SRichard Henderson     full = &desc->fulltlb[index];
124858e8f1f6SRichard Henderson     full->xlat_section = iotlb - addr_page;
124958e8f1f6SRichard Henderson     full->phys_addr = paddr_page;
1250d9bb58e5SYang Zhong 
1251d9bb58e5SYang Zhong     /* Now calculate the new entry */
1252732d5487SAnton Johansson     tn.addend = addend - addr_page;
125358e8f1f6SRichard Henderson 
125458e8f1f6SRichard Henderson     tlb_set_compare(full, &tn, addr_page, read_flags,
125558e8f1f6SRichard Henderson                     MMU_INST_FETCH, prot & PAGE_EXEC);
125658e8f1f6SRichard Henderson 
125750b107c5SRichard Henderson     if (wp_flags & BP_MEM_READ) {
125858e8f1f6SRichard Henderson         read_flags |= TLB_WATCHPOINT;
125950b107c5SRichard Henderson     }
126058e8f1f6SRichard Henderson     tlb_set_compare(full, &tn, addr_page, read_flags,
126158e8f1f6SRichard Henderson                     MMU_DATA_LOAD, prot & PAGE_READ);
1262d9bb58e5SYang Zhong 
1263f52bfb12SDavid Hildenbrand     if (prot & PAGE_WRITE_INV) {
126458e8f1f6SRichard Henderson         write_flags |= TLB_INVALID_MASK;
1265f52bfb12SDavid Hildenbrand     }
126650b107c5SRichard Henderson     if (wp_flags & BP_MEM_WRITE) {
126758e8f1f6SRichard Henderson         write_flags |= TLB_WATCHPOINT;
126850b107c5SRichard Henderson     }
126958e8f1f6SRichard Henderson     tlb_set_compare(full, &tn, addr_page, write_flags,
127058e8f1f6SRichard Henderson                     MMU_DATA_STORE, prot & PAGE_WRITE);
1271d9bb58e5SYang Zhong 
127271aec354SEmilio G. Cota     copy_tlb_helper_locked(te, &tn);
127310b32e2cSAnton Johansson     tlb_n_used_entries_inc(cpu, mmu_idx);
1274a40ec84eSRichard Henderson     qemu_spin_unlock(&tlb->c.lock);
1275d9bb58e5SYang Zhong }
1276d9bb58e5SYang Zhong 
1277732d5487SAnton Johansson void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
127840473689SRichard Henderson                              hwaddr paddr, MemTxAttrs attrs, int prot,
1279732d5487SAnton Johansson                              int mmu_idx, uint64_t size)
128040473689SRichard Henderson {
128140473689SRichard Henderson     CPUTLBEntryFull full = {
128240473689SRichard Henderson         .phys_addr = paddr,
128340473689SRichard Henderson         .attrs = attrs,
128440473689SRichard Henderson         .prot = prot,
128540473689SRichard Henderson         .lg_page_size = ctz64(size)
128640473689SRichard Henderson     };
128740473689SRichard Henderson 
128840473689SRichard Henderson     assert(is_power_of_2(size));
1289732d5487SAnton Johansson     tlb_set_page_full(cpu, mmu_idx, addr, &full);
129040473689SRichard Henderson }
129140473689SRichard Henderson 
1292732d5487SAnton Johansson void tlb_set_page(CPUState *cpu, vaddr addr,
1293d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
1294732d5487SAnton Johansson                   int mmu_idx, uint64_t size)
1295d9bb58e5SYang Zhong {
1296732d5487SAnton Johansson     tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
1297d9bb58e5SYang Zhong                             prot, mmu_idx, size);
1298d9bb58e5SYang Zhong }
1299d9bb58e5SYang Zhong 
1300c319dc13SRichard Henderson /*
1301c319dc13SRichard Henderson  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1302c319dc13SRichard Henderson  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1303c319dc13SRichard Henderson  * be discarded and looked up again (e.g. via tlb_entry()).
1304c319dc13SRichard Henderson  */
1305732d5487SAnton Johansson static void tlb_fill(CPUState *cpu, vaddr addr, int size,
1306c319dc13SRichard Henderson                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1307c319dc13SRichard Henderson {
1308c319dc13SRichard Henderson     bool ok;
1309c319dc13SRichard Henderson 
1310c319dc13SRichard Henderson     /*
1311c319dc13SRichard Henderson      * This is not a probe, so only valid return is success; failure
1312c319dc13SRichard Henderson      * should result in exception + longjmp to the cpu loop.
1313c319dc13SRichard Henderson      */
13148810ee2aSAlex Bennée     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1315e124536fSEduardo Habkost                                     access_type, mmu_idx, false, retaddr);
1316c319dc13SRichard Henderson     assert(ok);
1317c319dc13SRichard Henderson }
1318c319dc13SRichard Henderson 
131978271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
132078271684SClaudio Fontana                                         MMUAccessType access_type,
132178271684SClaudio Fontana                                         int mmu_idx, uintptr_t retaddr)
132278271684SClaudio Fontana {
13238810ee2aSAlex Bennée     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
13248810ee2aSAlex Bennée                                           mmu_idx, retaddr);
132578271684SClaudio Fontana }
132678271684SClaudio Fontana 
1327fb3cb376SRichard Henderson static MemoryRegionSection *
1328d50ef446SAnton Johansson io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
1329fb3cb376SRichard Henderson            MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
1330d9bb58e5SYang Zhong {
13312d54f194SPeter Maydell     MemoryRegionSection *section;
1332fb3cb376SRichard Henderson     hwaddr mr_offset;
1333d9bb58e5SYang Zhong 
1334fb3cb376SRichard Henderson     section = iotlb_to_section(cpu, xlat, attrs);
1335fb3cb376SRichard Henderson     mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
1336d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
1337464dacf6SRichard Henderson     if (!cpu->neg.can_do_io) {
1338d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1339d9bb58e5SYang Zhong     }
1340d9bb58e5SYang Zhong 
1341fb3cb376SRichard Henderson     *out_offset = mr_offset;
1342fb3cb376SRichard Henderson     return section;
1343fb3cb376SRichard Henderson }
1344fb3cb376SRichard Henderson 
1345d50ef446SAnton Johansson static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr,
1346fb3cb376SRichard Henderson                       unsigned size, MMUAccessType access_type, int mmu_idx,
13470e114440SRichard Henderson                       MemTxResult response, uintptr_t retaddr)
1348fb3cb376SRichard Henderson {
1349d50ef446SAnton Johansson     if (!cpu->ignore_memory_transaction_failures
1350d50ef446SAnton Johansson         && cpu->cc->tcg_ops->do_transaction_failed) {
13510e114440SRichard Henderson         hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1352bef0c216SRichard Henderson 
1353d50ef446SAnton Johansson         cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1354bef0c216SRichard Henderson                                                 access_type, mmu_idx,
1355bef0c216SRichard Henderson                                                 full->attrs, response, retaddr);
1356bef0c216SRichard Henderson     }
1357bef0c216SRichard Henderson }
1358fb3cb376SRichard Henderson 
1359d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
1360d9bb58e5SYang Zhong    back to the main tlb.  */
136110b32e2cSAnton Johansson static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
1362732d5487SAnton Johansson                            MMUAccessType access_type, vaddr page)
1363d9bb58e5SYang Zhong {
1364d9bb58e5SYang Zhong     size_t vidx;
136571aec354SEmilio G. Cota 
136610b32e2cSAnton Johansson     assert_cpu_is_self(cpu);
1367d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
136810b32e2cSAnton Johansson         CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx];
13699e39de98SAnton Johansson         uint64_t cmp = tlb_read_idx(vtlb, access_type);
1370d9bb58e5SYang Zhong 
1371d9bb58e5SYang Zhong         if (cmp == page) {
1372d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
137310b32e2cSAnton Johansson             CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
1374d9bb58e5SYang Zhong 
137510b32e2cSAnton Johansson             qemu_spin_lock(&cpu->neg.tlb.c.lock);
137671aec354SEmilio G. Cota             copy_tlb_helper_locked(&tmptlb, tlb);
137771aec354SEmilio G. Cota             copy_tlb_helper_locked(tlb, vtlb);
137871aec354SEmilio G. Cota             copy_tlb_helper_locked(vtlb, &tmptlb);
137910b32e2cSAnton Johansson             qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1380d9bb58e5SYang Zhong 
138110b32e2cSAnton Johansson             CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
138210b32e2cSAnton Johansson             CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx];
138325d3ec58SRichard Henderson             CPUTLBEntryFull tmpf;
138425d3ec58SRichard Henderson             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1385d9bb58e5SYang Zhong             return true;
1386d9bb58e5SYang Zhong         }
1387d9bb58e5SYang Zhong     }
1388d9bb58e5SYang Zhong     return false;
1389d9bb58e5SYang Zhong }
1390d9bb58e5SYang Zhong 
1391707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
139225d3ec58SRichard Henderson                            CPUTLBEntryFull *full, uintptr_t retaddr)
1393707526adSRichard Henderson {
139425d3ec58SRichard Henderson     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1395707526adSRichard Henderson 
1396707526adSRichard Henderson     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1397707526adSRichard Henderson 
1398707526adSRichard Henderson     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1399f349e92eSPhilippe Mathieu-Daudé         tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
1400707526adSRichard Henderson     }
1401707526adSRichard Henderson 
1402707526adSRichard Henderson     /*
1403707526adSRichard Henderson      * Set both VGA and migration bits for simplicity and to remove
1404707526adSRichard Henderson      * the notdirty callback faster.
1405707526adSRichard Henderson      */
1406707526adSRichard Henderson     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1407707526adSRichard Henderson 
1408707526adSRichard Henderson     /* We remove the notdirty callback only if the code has been flushed. */
1409707526adSRichard Henderson     if (!cpu_physical_memory_is_clean(ram_addr)) {
1410707526adSRichard Henderson         trace_memory_notdirty_set_dirty(mem_vaddr);
1411707526adSRichard Henderson         tlb_set_dirty(cpu, mem_vaddr);
1412707526adSRichard Henderson     }
1413707526adSRichard Henderson }
1414707526adSRichard Henderson 
14155afec1c6SAnton Johansson static int probe_access_internal(CPUState *cpu, vaddr addr,
1416069cfe77SRichard Henderson                                  int fault_size, MMUAccessType access_type,
1417069cfe77SRichard Henderson                                  int mmu_idx, bool nonfault,
1418af803a4fSRichard Henderson                                  void **phost, CPUTLBEntryFull **pfull,
14196d03226bSAlex Bennée                                  uintptr_t retaddr, bool check_mem_cbs)
1420d9bb58e5SYang Zhong {
14215afec1c6SAnton Johansson     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
14225afec1c6SAnton Johansson     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
14239e39de98SAnton Johansson     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
14244f8f4127SAnton Johansson     vaddr page_addr = addr & TARGET_PAGE_MASK;
142558e8f1f6SRichard Henderson     int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
14265afec1c6SAnton Johansson     bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu);
142758e8f1f6SRichard Henderson     CPUTLBEntryFull *full;
1428ca86cf32SDavid Hildenbrand 
1429069cfe77SRichard Henderson     if (!tlb_hit_page(tlb_addr, page_addr)) {
14305afec1c6SAnton Johansson         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
14315afec1c6SAnton Johansson             if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
1432069cfe77SRichard Henderson                                             mmu_idx, nonfault, retaddr)) {
1433069cfe77SRichard Henderson                 /* Non-faulting page table read failed.  */
1434069cfe77SRichard Henderson                 *phost = NULL;
1435af803a4fSRichard Henderson                 *pfull = NULL;
1436069cfe77SRichard Henderson                 return TLB_INVALID_MASK;
1437069cfe77SRichard Henderson             }
1438069cfe77SRichard Henderson 
143903a98189SDavid Hildenbrand             /* TLB resize via tlb_fill may have moved the entry.  */
14405afec1c6SAnton Johansson             index = tlb_index(cpu, mmu_idx, addr);
14415afec1c6SAnton Johansson             entry = tlb_entry(cpu, mmu_idx, addr);
1442c3c8bf57SRichard Henderson 
1443c3c8bf57SRichard Henderson             /*
1444c3c8bf57SRichard Henderson              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1445c3c8bf57SRichard Henderson              * to force the next access through tlb_fill.  We've just
1446c3c8bf57SRichard Henderson              * called tlb_fill, so we know that this entry *is* valid.
1447c3c8bf57SRichard Henderson              */
1448c3c8bf57SRichard Henderson             flags &= ~TLB_INVALID_MASK;
1449d9bb58e5SYang Zhong         }
14500b3c75adSRichard Henderson         tlb_addr = tlb_read_idx(entry, access_type);
145103a98189SDavid Hildenbrand     }
1452c3c8bf57SRichard Henderson     flags &= tlb_addr;
145303a98189SDavid Hildenbrand 
14545afec1c6SAnton Johansson     *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
145558e8f1f6SRichard Henderson     flags |= full->slow_flags[access_type];
1456af803a4fSRichard Henderson 
1457069cfe77SRichard Henderson     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
14586d03226bSAlex Bennée     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))
14596d03226bSAlex Bennée         ||
14606d03226bSAlex Bennée         (access_type != MMU_INST_FETCH && force_mmio)) {
1461069cfe77SRichard Henderson         *phost = NULL;
1462069cfe77SRichard Henderson         return TLB_MMIO;
1463fef39ccdSDavid Hildenbrand     }
1464fef39ccdSDavid Hildenbrand 
1465069cfe77SRichard Henderson     /* Everything else is RAM. */
1466069cfe77SRichard Henderson     *phost = (void *)((uintptr_t)addr + entry->addend);
1467069cfe77SRichard Henderson     return flags;
1468069cfe77SRichard Henderson }
1469069cfe77SRichard Henderson 
14704f8f4127SAnton Johansson int probe_access_full(CPUArchState *env, vaddr addr, int size,
1471069cfe77SRichard Henderson                       MMUAccessType access_type, int mmu_idx,
1472af803a4fSRichard Henderson                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1473af803a4fSRichard Henderson                       uintptr_t retaddr)
1474069cfe77SRichard Henderson {
14755afec1c6SAnton Johansson     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
14765afec1c6SAnton Johansson                                       mmu_idx, nonfault, phost, pfull, retaddr,
14775afec1c6SAnton Johansson                                       true);
1478069cfe77SRichard Henderson 
1479069cfe77SRichard Henderson     /* Handle clean RAM pages.  */
1480069cfe77SRichard Henderson     if (unlikely(flags & TLB_NOTDIRTY)) {
1481af803a4fSRichard Henderson         notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
1482069cfe77SRichard Henderson         flags &= ~TLB_NOTDIRTY;
1483069cfe77SRichard Henderson     }
1484069cfe77SRichard Henderson 
1485069cfe77SRichard Henderson     return flags;
1486069cfe77SRichard Henderson }
1487069cfe77SRichard Henderson 
14886d03226bSAlex Bennée int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
14896d03226bSAlex Bennée                           MMUAccessType access_type, int mmu_idx,
14906d03226bSAlex Bennée                           void **phost, CPUTLBEntryFull **pfull)
14916d03226bSAlex Bennée {
14926d03226bSAlex Bennée     void *discard_phost;
14936d03226bSAlex Bennée     CPUTLBEntryFull *discard_tlb;
14946d03226bSAlex Bennée 
14956d03226bSAlex Bennée     /* privately handle users that don't need full results */
14966d03226bSAlex Bennée     phost = phost ? phost : &discard_phost;
14976d03226bSAlex Bennée     pfull = pfull ? pfull : &discard_tlb;
14986d03226bSAlex Bennée 
14995afec1c6SAnton Johansson     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
15005afec1c6SAnton Johansson                                       mmu_idx, true, phost, pfull, 0, false);
15016d03226bSAlex Bennée 
15026d03226bSAlex Bennée     /* Handle clean RAM pages.  */
15036d03226bSAlex Bennée     if (unlikely(flags & TLB_NOTDIRTY)) {
15046d03226bSAlex Bennée         notdirty_write(env_cpu(env), addr, 1, *pfull, 0);
15056d03226bSAlex Bennée         flags &= ~TLB_NOTDIRTY;
15066d03226bSAlex Bennée     }
15076d03226bSAlex Bennée 
15086d03226bSAlex Bennée     return flags;
15096d03226bSAlex Bennée }
15106d03226bSAlex Bennée 
15114f8f4127SAnton Johansson int probe_access_flags(CPUArchState *env, vaddr addr, int size,
1512af803a4fSRichard Henderson                        MMUAccessType access_type, int mmu_idx,
1513af803a4fSRichard Henderson                        bool nonfault, void **phost, uintptr_t retaddr)
1514af803a4fSRichard Henderson {
1515af803a4fSRichard Henderson     CPUTLBEntryFull *full;
15161770b2f2SDaniel Henrique Barboza     int flags;
1517af803a4fSRichard Henderson 
15181770b2f2SDaniel Henrique Barboza     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
15191770b2f2SDaniel Henrique Barboza 
15205afec1c6SAnton Johansson     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
15215afec1c6SAnton Johansson                                   mmu_idx, nonfault, phost, &full, retaddr,
15225afec1c6SAnton Johansson                                   true);
15231770b2f2SDaniel Henrique Barboza 
15241770b2f2SDaniel Henrique Barboza     /* Handle clean RAM pages. */
15251770b2f2SDaniel Henrique Barboza     if (unlikely(flags & TLB_NOTDIRTY)) {
15261770b2f2SDaniel Henrique Barboza         notdirty_write(env_cpu(env), addr, 1, full, retaddr);
15271770b2f2SDaniel Henrique Barboza         flags &= ~TLB_NOTDIRTY;
15281770b2f2SDaniel Henrique Barboza     }
15291770b2f2SDaniel Henrique Barboza 
15301770b2f2SDaniel Henrique Barboza     return flags;
1531af803a4fSRichard Henderson }
1532af803a4fSRichard Henderson 
15334f8f4127SAnton Johansson void *probe_access(CPUArchState *env, vaddr addr, int size,
1534069cfe77SRichard Henderson                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1535069cfe77SRichard Henderson {
1536af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1537069cfe77SRichard Henderson     void *host;
1538069cfe77SRichard Henderson     int flags;
1539069cfe77SRichard Henderson 
1540069cfe77SRichard Henderson     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1541069cfe77SRichard Henderson 
15425afec1c6SAnton Johansson     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
15435afec1c6SAnton Johansson                                   mmu_idx, false, &host, &full, retaddr,
15445afec1c6SAnton Johansson                                   true);
1545069cfe77SRichard Henderson 
1546069cfe77SRichard Henderson     /* Per the interface, size == 0 merely faults the access. */
1547069cfe77SRichard Henderson     if (size == 0) {
154873bc0bd4SRichard Henderson         return NULL;
154973bc0bd4SRichard Henderson     }
155073bc0bd4SRichard Henderson 
1551069cfe77SRichard Henderson     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
155203a98189SDavid Hildenbrand         /* Handle watchpoints.  */
1553069cfe77SRichard Henderson         if (flags & TLB_WATCHPOINT) {
1554069cfe77SRichard Henderson             int wp_access = (access_type == MMU_DATA_STORE
1555069cfe77SRichard Henderson                              ? BP_MEM_WRITE : BP_MEM_READ);
155603a98189SDavid Hildenbrand             cpu_check_watchpoint(env_cpu(env), addr, size,
155725d3ec58SRichard Henderson                                  full->attrs, wp_access, retaddr);
1558d9bb58e5SYang Zhong         }
1559fef39ccdSDavid Hildenbrand 
156073bc0bd4SRichard Henderson         /* Handle clean RAM pages.  */
1561069cfe77SRichard Henderson         if (flags & TLB_NOTDIRTY) {
156225d3ec58SRichard Henderson             notdirty_write(env_cpu(env), addr, 1, full, retaddr);
156373bc0bd4SRichard Henderson         }
1564fef39ccdSDavid Hildenbrand     }
1565fef39ccdSDavid Hildenbrand 
1566069cfe77SRichard Henderson     return host;
1567d9bb58e5SYang Zhong }
1568d9bb58e5SYang Zhong 
15694811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
15704811e909SRichard Henderson                         MMUAccessType access_type, int mmu_idx)
15714811e909SRichard Henderson {
1572af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1573069cfe77SRichard Henderson     void *host;
1574069cfe77SRichard Henderson     int flags;
15754811e909SRichard Henderson 
15765afec1c6SAnton Johansson     flags = probe_access_internal(env_cpu(env), addr, 0, access_type,
15776d03226bSAlex Bennée                                   mmu_idx, true, &host, &full, 0, false);
1578069cfe77SRichard Henderson 
1579069cfe77SRichard Henderson     /* No combination of flags are expected by the caller. */
1580069cfe77SRichard Henderson     return flags ? NULL : host;
15814811e909SRichard Henderson }
15824811e909SRichard Henderson 
15837e0d9973SRichard Henderson /*
15847e0d9973SRichard Henderson  * Return a ram_addr_t for the virtual address for execution.
15857e0d9973SRichard Henderson  *
15867e0d9973SRichard Henderson  * Return -1 if we can't translate and execute from an entire page
15877e0d9973SRichard Henderson  * of RAM.  This will force us to execute by loading and translating
15887e0d9973SRichard Henderson  * one insn at a time, without caching.
15897e0d9973SRichard Henderson  *
15907e0d9973SRichard Henderson  * NOTE: This function will trigger an exception if the page is
15917e0d9973SRichard Henderson  * not executable.
15927e0d9973SRichard Henderson  */
15934f8f4127SAnton Johansson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
15947e0d9973SRichard Henderson                                         void **hostp)
15957e0d9973SRichard Henderson {
1596af803a4fSRichard Henderson     CPUTLBEntryFull *full;
15977e0d9973SRichard Henderson     void *p;
15987e0d9973SRichard Henderson 
15995afec1c6SAnton Johansson     (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
16006d03226bSAlex Bennée                                 cpu_mmu_index(env, true), false,
16016d03226bSAlex Bennée                                 &p, &full, 0, false);
16027e0d9973SRichard Henderson     if (p == NULL) {
16037e0d9973SRichard Henderson         return -1;
16047e0d9973SRichard Henderson     }
1605ac01ec6fSWeiwei Li 
1606ac01ec6fSWeiwei Li     if (full->lg_page_size < TARGET_PAGE_BITS) {
1607ac01ec6fSWeiwei Li         return -1;
1608ac01ec6fSWeiwei Li     }
1609ac01ec6fSWeiwei Li 
16107e0d9973SRichard Henderson     if (hostp) {
16117e0d9973SRichard Henderson         *hostp = p;
16127e0d9973SRichard Henderson     }
16137e0d9973SRichard Henderson     return qemu_ram_addr_from_host_nofail(p);
16147e0d9973SRichard Henderson }
16157e0d9973SRichard Henderson 
1616cdfac37bSRichard Henderson /* Load/store with atomicity primitives. */
1617cdfac37bSRichard Henderson #include "ldst_atomicity.c.inc"
1618cdfac37bSRichard Henderson 
1619235537faSAlex Bennée #ifdef CONFIG_PLUGIN
1620235537faSAlex Bennée /*
1621235537faSAlex Bennée  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1622235537faSAlex Bennée  * This should be a hot path as we will have just looked this path up
1623235537faSAlex Bennée  * in the softmmu lookup code (or helper). We don't handle re-fills or
1624235537faSAlex Bennée  * checking the victim table. This is purely informational.
1625235537faSAlex Bennée  *
1626da6aef48SRichard Henderson  * The one corner case is i/o write, which can cause changes to the
1627da6aef48SRichard Henderson  * address space.  Those changes, and the corresponding tlb flush,
1628da6aef48SRichard Henderson  * should be delayed until the next TB, so even then this ought not fail.
1629da6aef48SRichard Henderson  * But check, Just in Case.
1630235537faSAlex Bennée  */
1631732d5487SAnton Johansson bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
1632235537faSAlex Bennée                        bool is_store, struct qemu_plugin_hwaddr *data)
1633235537faSAlex Bennée {
163410b32e2cSAnton Johansson     CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr);
163510b32e2cSAnton Johansson     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1636da6aef48SRichard Henderson     MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
1637da6aef48SRichard Henderson     uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
1638405c02d8SRichard Henderson     CPUTLBEntryFull *full;
1639235537faSAlex Bennée 
1640da6aef48SRichard Henderson     if (unlikely(!tlb_hit(tlb_addr, addr))) {
1641da6aef48SRichard Henderson         return false;
1642da6aef48SRichard Henderson     }
1643da6aef48SRichard Henderson 
164410b32e2cSAnton Johansson     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1645405c02d8SRichard Henderson     data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1646405c02d8SRichard Henderson 
1647235537faSAlex Bennée     /* We must have an iotlb entry for MMIO */
1648235537faSAlex Bennée     if (tlb_addr & TLB_MMIO) {
1649405c02d8SRichard Henderson         MemoryRegionSection *section =
1650405c02d8SRichard Henderson             iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
1651405c02d8SRichard Henderson                              full->attrs);
1652235537faSAlex Bennée         data->is_io = true;
1653405c02d8SRichard Henderson         data->mr = section->mr;
1654235537faSAlex Bennée     } else {
1655235537faSAlex Bennée         data->is_io = false;
1656405c02d8SRichard Henderson         data->mr = NULL;
1657235537faSAlex Bennée     }
1658235537faSAlex Bennée     return true;
1659235537faSAlex Bennée }
1660235537faSAlex Bennée #endif
1661235537faSAlex Bennée 
166208dff435SRichard Henderson /*
16638cfdacaaSRichard Henderson  * Probe for a load/store operation.
16648cfdacaaSRichard Henderson  * Return the host address and into @flags.
16658cfdacaaSRichard Henderson  */
16668cfdacaaSRichard Henderson 
16678cfdacaaSRichard Henderson typedef struct MMULookupPageData {
16688cfdacaaSRichard Henderson     CPUTLBEntryFull *full;
16698cfdacaaSRichard Henderson     void *haddr;
1670fb2c53cbSAnton Johansson     vaddr addr;
16718cfdacaaSRichard Henderson     int flags;
16728cfdacaaSRichard Henderson     int size;
16738cfdacaaSRichard Henderson } MMULookupPageData;
16748cfdacaaSRichard Henderson 
16758cfdacaaSRichard Henderson typedef struct MMULookupLocals {
16768cfdacaaSRichard Henderson     MMULookupPageData page[2];
16778cfdacaaSRichard Henderson     MemOp memop;
16788cfdacaaSRichard Henderson     int mmu_idx;
16798cfdacaaSRichard Henderson } MMULookupLocals;
16808cfdacaaSRichard Henderson 
16818cfdacaaSRichard Henderson /**
16828cfdacaaSRichard Henderson  * mmu_lookup1: translate one page
1683d50ef446SAnton Johansson  * @cpu: generic cpu state
16848cfdacaaSRichard Henderson  * @data: lookup parameters
16858cfdacaaSRichard Henderson  * @mmu_idx: virtual address context
16868cfdacaaSRichard Henderson  * @access_type: load/store/code
16878cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
16888cfdacaaSRichard Henderson  *
16898cfdacaaSRichard Henderson  * Resolve the translation for the one page at @data.addr, filling in
16908cfdacaaSRichard Henderson  * the rest of @data with the results.  If the translation fails,
16918cfdacaaSRichard Henderson  * tlb_fill will longjmp out.  Return true if the softmmu tlb for
16928cfdacaaSRichard Henderson  * @mmu_idx may have resized.
16938cfdacaaSRichard Henderson  */
1694d50ef446SAnton Johansson static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
16958cfdacaaSRichard Henderson                         int mmu_idx, MMUAccessType access_type, uintptr_t ra)
16968cfdacaaSRichard Henderson {
1697fb2c53cbSAnton Johansson     vaddr addr = data->addr;
1698d50ef446SAnton Johansson     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1699d50ef446SAnton Johansson     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
17009e39de98SAnton Johansson     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
17018cfdacaaSRichard Henderson     bool maybe_resized = false;
170258e8f1f6SRichard Henderson     CPUTLBEntryFull *full;
170358e8f1f6SRichard Henderson     int flags;
17048cfdacaaSRichard Henderson 
17058cfdacaaSRichard Henderson     /* If the TLB entry is for a different page, reload and try again.  */
17068cfdacaaSRichard Henderson     if (!tlb_hit(tlb_addr, addr)) {
1707d50ef446SAnton Johansson         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
17088cfdacaaSRichard Henderson                             addr & TARGET_PAGE_MASK)) {
1709d50ef446SAnton Johansson             tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
17108cfdacaaSRichard Henderson             maybe_resized = true;
1711d50ef446SAnton Johansson             index = tlb_index(cpu, mmu_idx, addr);
1712d50ef446SAnton Johansson             entry = tlb_entry(cpu, mmu_idx, addr);
17138cfdacaaSRichard Henderson         }
17148cfdacaaSRichard Henderson         tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
17158cfdacaaSRichard Henderson     }
17168cfdacaaSRichard Henderson 
1717d50ef446SAnton Johansson     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
171858e8f1f6SRichard Henderson     flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
171958e8f1f6SRichard Henderson     flags |= full->slow_flags[access_type];
172058e8f1f6SRichard Henderson 
172158e8f1f6SRichard Henderson     data->full = full;
172258e8f1f6SRichard Henderson     data->flags = flags;
17238cfdacaaSRichard Henderson     /* Compute haddr speculatively; depending on flags it might be invalid. */
17248cfdacaaSRichard Henderson     data->haddr = (void *)((uintptr_t)addr + entry->addend);
17258cfdacaaSRichard Henderson 
17268cfdacaaSRichard Henderson     return maybe_resized;
17278cfdacaaSRichard Henderson }
17288cfdacaaSRichard Henderson 
17298cfdacaaSRichard Henderson /**
17308cfdacaaSRichard Henderson  * mmu_watch_or_dirty
1731d50ef446SAnton Johansson  * @cpu: generic cpu state
17328cfdacaaSRichard Henderson  * @data: lookup parameters
17338cfdacaaSRichard Henderson  * @access_type: load/store/code
17348cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
17358cfdacaaSRichard Henderson  *
17368cfdacaaSRichard Henderson  * Trigger watchpoints for @data.addr:@data.size;
17378cfdacaaSRichard Henderson  * record writes to protected clean pages.
17388cfdacaaSRichard Henderson  */
1739d50ef446SAnton Johansson static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
17408cfdacaaSRichard Henderson                                MMUAccessType access_type, uintptr_t ra)
17418cfdacaaSRichard Henderson {
17428cfdacaaSRichard Henderson     CPUTLBEntryFull *full = data->full;
1743fb2c53cbSAnton Johansson     vaddr addr = data->addr;
17448cfdacaaSRichard Henderson     int flags = data->flags;
17458cfdacaaSRichard Henderson     int size = data->size;
17468cfdacaaSRichard Henderson 
17478cfdacaaSRichard Henderson     /* On watchpoint hit, this will longjmp out.  */
17488cfdacaaSRichard Henderson     if (flags & TLB_WATCHPOINT) {
17498cfdacaaSRichard Henderson         int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
1750d50ef446SAnton Johansson         cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
17518cfdacaaSRichard Henderson         flags &= ~TLB_WATCHPOINT;
17528cfdacaaSRichard Henderson     }
17538cfdacaaSRichard Henderson 
17548cfdacaaSRichard Henderson     /* Note that notdirty is only set for writes. */
17558cfdacaaSRichard Henderson     if (flags & TLB_NOTDIRTY) {
1756d50ef446SAnton Johansson         notdirty_write(cpu, addr, size, full, ra);
17578cfdacaaSRichard Henderson         flags &= ~TLB_NOTDIRTY;
17588cfdacaaSRichard Henderson     }
17598cfdacaaSRichard Henderson     data->flags = flags;
17608cfdacaaSRichard Henderson }
17618cfdacaaSRichard Henderson 
17628cfdacaaSRichard Henderson /**
17638cfdacaaSRichard Henderson  * mmu_lookup: translate page(s)
1764d50ef446SAnton Johansson  * @cpu: generic cpu state
17658cfdacaaSRichard Henderson  * @addr: virtual address
17668cfdacaaSRichard Henderson  * @oi: combined mmu_idx and MemOp
17678cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
17688cfdacaaSRichard Henderson  * @access_type: load/store/code
17698cfdacaaSRichard Henderson  * @l: output result
17708cfdacaaSRichard Henderson  *
17718cfdacaaSRichard Henderson  * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
17728cfdacaaSRichard Henderson  * bytes.  Return true if the lookup crosses a page boundary.
17738cfdacaaSRichard Henderson  */
1774d50ef446SAnton Johansson static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
17758cfdacaaSRichard Henderson                        uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
17768cfdacaaSRichard Henderson {
17778cfdacaaSRichard Henderson     unsigned a_bits;
17788cfdacaaSRichard Henderson     bool crosspage;
17798cfdacaaSRichard Henderson     int flags;
17808cfdacaaSRichard Henderson 
17818cfdacaaSRichard Henderson     l->memop = get_memop(oi);
17828cfdacaaSRichard Henderson     l->mmu_idx = get_mmuidx(oi);
17838cfdacaaSRichard Henderson 
17848cfdacaaSRichard Henderson     tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
17858cfdacaaSRichard Henderson 
17868cfdacaaSRichard Henderson     /* Handle CPU specific unaligned behaviour */
17878cfdacaaSRichard Henderson     a_bits = get_alignment_bits(l->memop);
17888cfdacaaSRichard Henderson     if (addr & ((1 << a_bits) - 1)) {
1789d50ef446SAnton Johansson         cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
17908cfdacaaSRichard Henderson     }
17918cfdacaaSRichard Henderson 
17928cfdacaaSRichard Henderson     l->page[0].addr = addr;
17938cfdacaaSRichard Henderson     l->page[0].size = memop_size(l->memop);
17948cfdacaaSRichard Henderson     l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
17958cfdacaaSRichard Henderson     l->page[1].size = 0;
17968cfdacaaSRichard Henderson     crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
17978cfdacaaSRichard Henderson 
17988cfdacaaSRichard Henderson     if (likely(!crosspage)) {
1799d50ef446SAnton Johansson         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
18008cfdacaaSRichard Henderson 
18018cfdacaaSRichard Henderson         flags = l->page[0].flags;
18028cfdacaaSRichard Henderson         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1803d50ef446SAnton Johansson             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
18048cfdacaaSRichard Henderson         }
18058cfdacaaSRichard Henderson         if (unlikely(flags & TLB_BSWAP)) {
18068cfdacaaSRichard Henderson             l->memop ^= MO_BSWAP;
18078cfdacaaSRichard Henderson         }
18088cfdacaaSRichard Henderson     } else {
18098cfdacaaSRichard Henderson         /* Finish compute of page crossing. */
18108cfdacaaSRichard Henderson         int size0 = l->page[1].addr - addr;
18118cfdacaaSRichard Henderson         l->page[1].size = l->page[0].size - size0;
18128cfdacaaSRichard Henderson         l->page[0].size = size0;
18138cfdacaaSRichard Henderson 
18148cfdacaaSRichard Henderson         /*
18158cfdacaaSRichard Henderson          * Lookup both pages, recognizing exceptions from either.  If the
18168cfdacaaSRichard Henderson          * second lookup potentially resized, refresh first CPUTLBEntryFull.
18178cfdacaaSRichard Henderson          */
1818d50ef446SAnton Johansson         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1819d50ef446SAnton Johansson         if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
1820d50ef446SAnton Johansson             uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
1821d50ef446SAnton Johansson             l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
18228cfdacaaSRichard Henderson         }
18238cfdacaaSRichard Henderson 
18248cfdacaaSRichard Henderson         flags = l->page[0].flags | l->page[1].flags;
18258cfdacaaSRichard Henderson         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1826d50ef446SAnton Johansson             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1827d50ef446SAnton Johansson             mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
18288cfdacaaSRichard Henderson         }
18298cfdacaaSRichard Henderson 
18308cfdacaaSRichard Henderson         /*
18318cfdacaaSRichard Henderson          * Since target/sparc is the only user of TLB_BSWAP, and all
18328cfdacaaSRichard Henderson          * Sparc accesses are aligned, any treatment across two pages
18338cfdacaaSRichard Henderson          * would be arbitrary.  Refuse it until there's a use.
18348cfdacaaSRichard Henderson          */
18358cfdacaaSRichard Henderson         tcg_debug_assert((flags & TLB_BSWAP) == 0);
18368cfdacaaSRichard Henderson     }
18378cfdacaaSRichard Henderson 
18388cfdacaaSRichard Henderson     return crosspage;
18398cfdacaaSRichard Henderson }
18408cfdacaaSRichard Henderson 
18418cfdacaaSRichard Henderson /*
184208dff435SRichard Henderson  * Probe for an atomic operation.  Do not allow unaligned operations,
184308dff435SRichard Henderson  * or io operations to proceed.  Return the host address.
184408dff435SRichard Henderson  */
1845d560225fSAnton Johansson static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1846b0326eb9SAnton Johansson                                int size, uintptr_t retaddr)
1847d9bb58e5SYang Zhong {
1848b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
184914776ab5STony Nguyen     MemOp mop = get_memop(oi);
1850d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
185108dff435SRichard Henderson     uintptr_t index;
185208dff435SRichard Henderson     CPUTLBEntry *tlbe;
1853b0326eb9SAnton Johansson     vaddr tlb_addr;
185434d49937SPeter Maydell     void *hostaddr;
1855417aeaffSRichard Henderson     CPUTLBEntryFull *full;
1856d9bb58e5SYang Zhong 
1857b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1858b826044fSRichard Henderson 
1859d9bb58e5SYang Zhong     /* Adjust the given return address.  */
1860d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
1861d9bb58e5SYang Zhong 
1862d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1863d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1864d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1865d560225fSAnton Johansson         cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
1866d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1867d9bb58e5SYang Zhong     }
1868d9bb58e5SYang Zhong 
1869d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
187008dff435SRichard Henderson     if (unlikely(addr & (size - 1))) {
1871d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1872d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1873d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1874d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1875d9bb58e5SYang Zhong         goto stop_the_world;
1876d9bb58e5SYang Zhong     }
1877d9bb58e5SYang Zhong 
1878d560225fSAnton Johansson     index = tlb_index(cpu, mmu_idx, addr);
1879d560225fSAnton Johansson     tlbe = tlb_entry(cpu, mmu_idx, addr);
188008dff435SRichard Henderson 
1881d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
188208dff435SRichard Henderson     tlb_addr = tlb_addr_write(tlbe);
1883334692bcSPeter Maydell     if (!tlb_hit(tlb_addr, addr)) {
1884d560225fSAnton Johansson         if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
18850b3c75adSRichard Henderson                             addr & TARGET_PAGE_MASK)) {
1886d560225fSAnton Johansson             tlb_fill(cpu, addr, size,
188708dff435SRichard Henderson                      MMU_DATA_STORE, mmu_idx, retaddr);
1888d560225fSAnton Johansson             index = tlb_index(cpu, mmu_idx, addr);
1889d560225fSAnton Johansson             tlbe = tlb_entry(cpu, mmu_idx, addr);
1890d9bb58e5SYang Zhong         }
1891403f290cSEmilio G. Cota         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1892d9bb58e5SYang Zhong     }
1893d9bb58e5SYang Zhong 
1894417aeaffSRichard Henderson     /*
1895417aeaffSRichard Henderson      * Let the guest notice RMW on a write-only page.
1896417aeaffSRichard Henderson      * We have just verified that the page is writable.
1897417aeaffSRichard Henderson      * Subpage lookups may have left TLB_INVALID_MASK set,
1898417aeaffSRichard Henderson      * but addr_read will only be -1 if PAGE_READ was unset.
1899417aeaffSRichard Henderson      */
1900417aeaffSRichard Henderson     if (unlikely(tlbe->addr_read == -1)) {
1901d560225fSAnton Johansson         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
190208dff435SRichard Henderson         /*
1903417aeaffSRichard Henderson          * Since we don't support reads and writes to different
1904417aeaffSRichard Henderson          * addresses, and we do have the proper page loaded for
1905417aeaffSRichard Henderson          * write, this shouldn't ever return.  But just in case,
1906417aeaffSRichard Henderson          * handle via stop-the-world.
190708dff435SRichard Henderson          */
190808dff435SRichard Henderson         goto stop_the_world;
190908dff435SRichard Henderson     }
1910187ba694SRichard Henderson     /* Collect tlb flags for read. */
1911417aeaffSRichard Henderson     tlb_addr |= tlbe->addr_read;
191208dff435SRichard Henderson 
191355df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
19140953674eSRichard Henderson     if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
1915d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1916d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1917d9bb58e5SYang Zhong         goto stop_the_world;
1918d9bb58e5SYang Zhong     }
1919d9bb58e5SYang Zhong 
192034d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1921d560225fSAnton Johansson     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
192234d49937SPeter Maydell 
192334d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1924d560225fSAnton Johansson         notdirty_write(cpu, addr, size, full, retaddr);
1925417aeaffSRichard Henderson     }
1926417aeaffSRichard Henderson 
1927187ba694SRichard Henderson     if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
1928187ba694SRichard Henderson         int wp_flags = 0;
1929187ba694SRichard Henderson 
1930187ba694SRichard Henderson         if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
1931187ba694SRichard Henderson             wp_flags |= BP_MEM_WRITE;
1932187ba694SRichard Henderson         }
1933187ba694SRichard Henderson         if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
1934187ba694SRichard Henderson             wp_flags |= BP_MEM_READ;
1935187ba694SRichard Henderson         }
1936187ba694SRichard Henderson         if (wp_flags) {
1937d560225fSAnton Johansson             cpu_check_watchpoint(cpu, addr, size,
1938187ba694SRichard Henderson                                  full->attrs, wp_flags, retaddr);
1939187ba694SRichard Henderson         }
194034d49937SPeter Maydell     }
194134d49937SPeter Maydell 
194234d49937SPeter Maydell     return hostaddr;
1943d9bb58e5SYang Zhong 
1944d9bb58e5SYang Zhong  stop_the_world:
1945d560225fSAnton Johansson     cpu_loop_exit_atomic(cpu, retaddr);
1946d9bb58e5SYang Zhong }
1947d9bb58e5SYang Zhong 
1948eed56642SAlex Bennée /*
1949eed56642SAlex Bennée  * Load Helpers
1950eed56642SAlex Bennée  *
1951eed56642SAlex Bennée  * We support two different access types. SOFTMMU_CODE_ACCESS is
1952eed56642SAlex Bennée  * specifically for reading instructions from system memory. It is
1953eed56642SAlex Bennée  * called by the translation loop and in some helpers where the code
1954eed56642SAlex Bennée  * is disassembled. It shouldn't be called directly by guest code.
1955cdfac37bSRichard Henderson  *
1956eed56642SAlex Bennée  * For the benefit of TCG generated code, we want to avoid the
1957eed56642SAlex Bennée  * complication of ABI-specific return type promotion and always
1958eed56642SAlex Bennée  * return a value extended to the register size of the host. This is
1959eed56642SAlex Bennée  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1960eed56642SAlex Bennée  * data, and for that we always have uint64_t.
1961eed56642SAlex Bennée  *
1962eed56642SAlex Bennée  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1963eed56642SAlex Bennée  */
1964eed56642SAlex Bennée 
19658cfdacaaSRichard Henderson /**
19668cfdacaaSRichard Henderson  * do_ld_mmio_beN:
1967d50ef446SAnton Johansson  * @cpu: generic cpu state
19681966855eSRichard Henderson  * @full: page parameters
19698cfdacaaSRichard Henderson  * @ret_be: accumulated data
19701966855eSRichard Henderson  * @addr: virtual address
19711966855eSRichard Henderson  * @size: number of bytes
19728cfdacaaSRichard Henderson  * @mmu_idx: virtual address context
19738cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
19741966855eSRichard Henderson  * Context: iothread lock held
19758cfdacaaSRichard Henderson  *
19761966855eSRichard Henderson  * Load @size bytes from @addr, which is memory-mapped i/o.
19778cfdacaaSRichard Henderson  * The bytes are concatenated in big-endian order with @ret_be.
19788cfdacaaSRichard Henderson  */
1979d50ef446SAnton Johansson static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
19801966855eSRichard Henderson                                 uint64_t ret_be, vaddr addr, int size,
19818bf67267SRichard Henderson                                 int mmu_idx, MMUAccessType type, uintptr_t ra,
19828bf67267SRichard Henderson                                 MemoryRegion *mr, hwaddr mr_offset)
19832dd92606SRichard Henderson {
1984190aba80SRichard Henderson     do {
198513e61747SRichard Henderson         MemOp this_mop;
198613e61747SRichard Henderson         unsigned this_size;
198713e61747SRichard Henderson         uint64_t val;
198813e61747SRichard Henderson         MemTxResult r;
198913e61747SRichard Henderson 
1990190aba80SRichard Henderson         /* Read aligned pieces up to 8 bytes. */
199113e61747SRichard Henderson         this_mop = ctz32(size | (int)addr | 8);
199213e61747SRichard Henderson         this_size = 1 << this_mop;
199313e61747SRichard Henderson         this_mop |= MO_BE;
199413e61747SRichard Henderson 
19958bf67267SRichard Henderson         r = memory_region_dispatch_read(mr, mr_offset, &val,
19968bf67267SRichard Henderson                                         this_mop, full->attrs);
199713e61747SRichard Henderson         if (unlikely(r != MEMTX_OK)) {
1998d50ef446SAnton Johansson             io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
19998cfdacaaSRichard Henderson         }
200013e61747SRichard Henderson         if (this_size == 8) {
200113e61747SRichard Henderson             return val;
200213e61747SRichard Henderson         }
200313e61747SRichard Henderson 
200413e61747SRichard Henderson         ret_be = (ret_be << (this_size * 8)) | val;
200513e61747SRichard Henderson         addr += this_size;
200613e61747SRichard Henderson         mr_offset += this_size;
200713e61747SRichard Henderson         size -= this_size;
2008190aba80SRichard Henderson     } while (size);
200913e61747SRichard Henderson 
20108cfdacaaSRichard Henderson     return ret_be;
20118cfdacaaSRichard Henderson }
20128cfdacaaSRichard Henderson 
2013d50ef446SAnton Johansson static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
20148bf67267SRichard Henderson                                uint64_t ret_be, vaddr addr, int size,
20158bf67267SRichard Henderson                                int mmu_idx, MMUAccessType type, uintptr_t ra)
20168bf67267SRichard Henderson {
20178bf67267SRichard Henderson     MemoryRegionSection *section;
20188bf67267SRichard Henderson     MemoryRegion *mr;
20198bf67267SRichard Henderson     hwaddr mr_offset;
20208bf67267SRichard Henderson     MemTxAttrs attrs;
20218bf67267SRichard Henderson     uint64_t ret;
20228bf67267SRichard Henderson 
20238bf67267SRichard Henderson     tcg_debug_assert(size > 0 && size <= 8);
20248bf67267SRichard Henderson 
20258bf67267SRichard Henderson     attrs = full->attrs;
2026d50ef446SAnton Johansson     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
20278bf67267SRichard Henderson     mr = section->mr;
20288bf67267SRichard Henderson 
20298bf67267SRichard Henderson     qemu_mutex_lock_iothread();
2030d50ef446SAnton Johansson     ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
20318bf67267SRichard Henderson                           type, ra, mr, mr_offset);
20328bf67267SRichard Henderson     qemu_mutex_unlock_iothread();
20338bf67267SRichard Henderson 
20348bf67267SRichard Henderson     return ret;
20358bf67267SRichard Henderson }
20368bf67267SRichard Henderson 
2037d50ef446SAnton Johansson static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
20388bf67267SRichard Henderson                                uint64_t ret_be, vaddr addr, int size,
20398bf67267SRichard Henderson                                int mmu_idx, uintptr_t ra)
20408bf67267SRichard Henderson {
20418bf67267SRichard Henderson     MemoryRegionSection *section;
20428bf67267SRichard Henderson     MemoryRegion *mr;
20438bf67267SRichard Henderson     hwaddr mr_offset;
20448bf67267SRichard Henderson     MemTxAttrs attrs;
20458bf67267SRichard Henderson     uint64_t a, b;
20468bf67267SRichard Henderson 
20478bf67267SRichard Henderson     tcg_debug_assert(size > 8 && size <= 16);
20488bf67267SRichard Henderson 
20498bf67267SRichard Henderson     attrs = full->attrs;
2050d50ef446SAnton Johansson     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
20518bf67267SRichard Henderson     mr = section->mr;
20528bf67267SRichard Henderson 
20538bf67267SRichard Henderson     qemu_mutex_lock_iothread();
2054d50ef446SAnton Johansson     a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
20558bf67267SRichard Henderson                         MMU_DATA_LOAD, ra, mr, mr_offset);
2056d50ef446SAnton Johansson     b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
20578bf67267SRichard Henderson                         MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
20588bf67267SRichard Henderson     qemu_mutex_unlock_iothread();
20598bf67267SRichard Henderson 
20608bf67267SRichard Henderson     return int128_make128(b, a);
20618bf67267SRichard Henderson }
20628bf67267SRichard Henderson 
20638cfdacaaSRichard Henderson /**
20648cfdacaaSRichard Henderson  * do_ld_bytes_beN
20658cfdacaaSRichard Henderson  * @p: translation parameters
20668cfdacaaSRichard Henderson  * @ret_be: accumulated data
20678cfdacaaSRichard Henderson  *
20688cfdacaaSRichard Henderson  * Load @p->size bytes from @p->haddr, which is RAM.
20698cfdacaaSRichard Henderson  * The bytes to concatenated in big-endian order with @ret_be.
20708cfdacaaSRichard Henderson  */
20718cfdacaaSRichard Henderson static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
20728cfdacaaSRichard Henderson {
20738cfdacaaSRichard Henderson     uint8_t *haddr = p->haddr;
20748cfdacaaSRichard Henderson     int i, size = p->size;
20758cfdacaaSRichard Henderson 
20768cfdacaaSRichard Henderson     for (i = 0; i < size; i++) {
20778cfdacaaSRichard Henderson         ret_be = (ret_be << 8) | haddr[i];
20788cfdacaaSRichard Henderson     }
20798cfdacaaSRichard Henderson     return ret_be;
20808cfdacaaSRichard Henderson }
20818cfdacaaSRichard Henderson 
2082cdfac37bSRichard Henderson /**
2083cdfac37bSRichard Henderson  * do_ld_parts_beN
2084cdfac37bSRichard Henderson  * @p: translation parameters
2085cdfac37bSRichard Henderson  * @ret_be: accumulated data
2086cdfac37bSRichard Henderson  *
2087cdfac37bSRichard Henderson  * As do_ld_bytes_beN, but atomically on each aligned part.
2088cdfac37bSRichard Henderson  */
2089cdfac37bSRichard Henderson static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
2090cdfac37bSRichard Henderson {
2091cdfac37bSRichard Henderson     void *haddr = p->haddr;
2092cdfac37bSRichard Henderson     int size = p->size;
2093cdfac37bSRichard Henderson 
2094cdfac37bSRichard Henderson     do {
2095cdfac37bSRichard Henderson         uint64_t x;
2096cdfac37bSRichard Henderson         int n;
2097cdfac37bSRichard Henderson 
2098cdfac37bSRichard Henderson         /*
2099cdfac37bSRichard Henderson          * Find minimum of alignment and size.
2100cdfac37bSRichard Henderson          * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2101cdfac37bSRichard Henderson          * would have only checked the low bits of addr|size once at the start,
2102cdfac37bSRichard Henderson          * but is just as easy.
2103cdfac37bSRichard Henderson          */
2104cdfac37bSRichard Henderson         switch (((uintptr_t)haddr | size) & 7) {
2105cdfac37bSRichard Henderson         case 4:
2106cdfac37bSRichard Henderson             x = cpu_to_be32(load_atomic4(haddr));
2107cdfac37bSRichard Henderson             ret_be = (ret_be << 32) | x;
2108cdfac37bSRichard Henderson             n = 4;
2109cdfac37bSRichard Henderson             break;
2110cdfac37bSRichard Henderson         case 2:
2111cdfac37bSRichard Henderson         case 6:
2112cdfac37bSRichard Henderson             x = cpu_to_be16(load_atomic2(haddr));
2113cdfac37bSRichard Henderson             ret_be = (ret_be << 16) | x;
2114cdfac37bSRichard Henderson             n = 2;
2115cdfac37bSRichard Henderson             break;
2116cdfac37bSRichard Henderson         default:
2117cdfac37bSRichard Henderson             x = *(uint8_t *)haddr;
2118cdfac37bSRichard Henderson             ret_be = (ret_be << 8) | x;
2119cdfac37bSRichard Henderson             n = 1;
2120cdfac37bSRichard Henderson             break;
2121cdfac37bSRichard Henderson         case 0:
2122cdfac37bSRichard Henderson             g_assert_not_reached();
2123cdfac37bSRichard Henderson         }
2124cdfac37bSRichard Henderson         haddr += n;
2125cdfac37bSRichard Henderson         size -= n;
2126cdfac37bSRichard Henderson     } while (size != 0);
2127cdfac37bSRichard Henderson     return ret_be;
2128cdfac37bSRichard Henderson }
2129cdfac37bSRichard Henderson 
2130cdfac37bSRichard Henderson /**
2131cdfac37bSRichard Henderson  * do_ld_parts_be4
2132cdfac37bSRichard Henderson  * @p: translation parameters
2133cdfac37bSRichard Henderson  * @ret_be: accumulated data
2134cdfac37bSRichard Henderson  *
2135cdfac37bSRichard Henderson  * As do_ld_bytes_beN, but with one atomic load.
2136cdfac37bSRichard Henderson  * Four aligned bytes are guaranteed to cover the load.
2137cdfac37bSRichard Henderson  */
2138cdfac37bSRichard Henderson static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
2139cdfac37bSRichard Henderson {
2140cdfac37bSRichard Henderson     int o = p->addr & 3;
2141cdfac37bSRichard Henderson     uint32_t x = load_atomic4(p->haddr - o);
2142cdfac37bSRichard Henderson 
2143cdfac37bSRichard Henderson     x = cpu_to_be32(x);
2144cdfac37bSRichard Henderson     x <<= o * 8;
2145cdfac37bSRichard Henderson     x >>= (4 - p->size) * 8;
2146cdfac37bSRichard Henderson     return (ret_be << (p->size * 8)) | x;
2147cdfac37bSRichard Henderson }
2148cdfac37bSRichard Henderson 
2149cdfac37bSRichard Henderson /**
2150cdfac37bSRichard Henderson  * do_ld_parts_be8
2151cdfac37bSRichard Henderson  * @p: translation parameters
2152cdfac37bSRichard Henderson  * @ret_be: accumulated data
2153cdfac37bSRichard Henderson  *
2154cdfac37bSRichard Henderson  * As do_ld_bytes_beN, but with one atomic load.
2155cdfac37bSRichard Henderson  * Eight aligned bytes are guaranteed to cover the load.
2156cdfac37bSRichard Henderson  */
2157d50ef446SAnton Johansson static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
2158cdfac37bSRichard Henderson                                 MMULookupPageData *p, uint64_t ret_be)
2159cdfac37bSRichard Henderson {
2160cdfac37bSRichard Henderson     int o = p->addr & 7;
216173fda56fSAnton Johansson     uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o);
2162cdfac37bSRichard Henderson 
2163cdfac37bSRichard Henderson     x = cpu_to_be64(x);
2164cdfac37bSRichard Henderson     x <<= o * 8;
2165cdfac37bSRichard Henderson     x >>= (8 - p->size) * 8;
2166cdfac37bSRichard Henderson     return (ret_be << (p->size * 8)) | x;
2167cdfac37bSRichard Henderson }
2168cdfac37bSRichard Henderson 
216935c653c4SRichard Henderson /**
217035c653c4SRichard Henderson  * do_ld_parts_be16
217135c653c4SRichard Henderson  * @p: translation parameters
217235c653c4SRichard Henderson  * @ret_be: accumulated data
217335c653c4SRichard Henderson  *
217435c653c4SRichard Henderson  * As do_ld_bytes_beN, but with one atomic load.
217535c653c4SRichard Henderson  * 16 aligned bytes are guaranteed to cover the load.
217635c653c4SRichard Henderson  */
2177d50ef446SAnton Johansson static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
217835c653c4SRichard Henderson                                MMULookupPageData *p, uint64_t ret_be)
217935c653c4SRichard Henderson {
218035c653c4SRichard Henderson     int o = p->addr & 15;
218173fda56fSAnton Johansson     Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o);
218235c653c4SRichard Henderson     int size = p->size;
218335c653c4SRichard Henderson 
218435c653c4SRichard Henderson     if (!HOST_BIG_ENDIAN) {
218535c653c4SRichard Henderson         y = bswap128(y);
218635c653c4SRichard Henderson     }
218735c653c4SRichard Henderson     y = int128_lshift(y, o * 8);
218835c653c4SRichard Henderson     y = int128_urshift(y, (16 - size) * 8);
218935c653c4SRichard Henderson     x = int128_make64(ret_be);
219035c653c4SRichard Henderson     x = int128_lshift(x, size * 8);
219135c653c4SRichard Henderson     return int128_or(x, y);
219235c653c4SRichard Henderson }
219335c653c4SRichard Henderson 
21948cfdacaaSRichard Henderson /*
21958cfdacaaSRichard Henderson  * Wrapper for the above.
21968cfdacaaSRichard Henderson  */
2197d50ef446SAnton Johansson static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
2198cdfac37bSRichard Henderson                           uint64_t ret_be, int mmu_idx, MMUAccessType type,
2199cdfac37bSRichard Henderson                           MemOp mop, uintptr_t ra)
22008cfdacaaSRichard Henderson {
2201cdfac37bSRichard Henderson     MemOp atom;
2202cdfac37bSRichard Henderson     unsigned tmp, half_size;
2203cdfac37bSRichard Henderson 
22048cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2205d50ef446SAnton Johansson         return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
22061966855eSRichard Henderson                               mmu_idx, type, ra);
2207cdfac37bSRichard Henderson     }
2208cdfac37bSRichard Henderson 
2209cdfac37bSRichard Henderson     /*
2210cdfac37bSRichard Henderson      * It is a given that we cross a page and therefore there is no
2211cdfac37bSRichard Henderson      * atomicity for the load as a whole, but subobjects may need attention.
2212cdfac37bSRichard Henderson      */
2213cdfac37bSRichard Henderson     atom = mop & MO_ATOM_MASK;
2214cdfac37bSRichard Henderson     switch (atom) {
2215cdfac37bSRichard Henderson     case MO_ATOM_SUBALIGN:
2216cdfac37bSRichard Henderson         return do_ld_parts_beN(p, ret_be);
2217cdfac37bSRichard Henderson 
2218cdfac37bSRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
2219cdfac37bSRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
2220cdfac37bSRichard Henderson         tmp = mop & MO_SIZE;
2221cdfac37bSRichard Henderson         tmp = tmp ? tmp - 1 : 0;
2222cdfac37bSRichard Henderson         half_size = 1 << tmp;
2223cdfac37bSRichard Henderson         if (atom == MO_ATOM_IFALIGN_PAIR
2224cdfac37bSRichard Henderson             ? p->size == half_size
2225cdfac37bSRichard Henderson             : p->size >= half_size) {
2226cdfac37bSRichard Henderson             if (!HAVE_al8_fast && p->size < 4) {
2227cdfac37bSRichard Henderson                 return do_ld_whole_be4(p, ret_be);
22288cfdacaaSRichard Henderson             } else {
2229d50ef446SAnton Johansson                 return do_ld_whole_be8(cpu, ra, p, ret_be);
2230cdfac37bSRichard Henderson             }
2231cdfac37bSRichard Henderson         }
2232cdfac37bSRichard Henderson         /* fall through */
2233cdfac37bSRichard Henderson 
2234cdfac37bSRichard Henderson     case MO_ATOM_IFALIGN:
2235cdfac37bSRichard Henderson     case MO_ATOM_WITHIN16:
2236cdfac37bSRichard Henderson     case MO_ATOM_NONE:
22378cfdacaaSRichard Henderson         return do_ld_bytes_beN(p, ret_be);
2238cdfac37bSRichard Henderson 
2239cdfac37bSRichard Henderson     default:
2240cdfac37bSRichard Henderson         g_assert_not_reached();
22418cfdacaaSRichard Henderson     }
22428cfdacaaSRichard Henderson }
22438cfdacaaSRichard Henderson 
224435c653c4SRichard Henderson /*
224535c653c4SRichard Henderson  * Wrapper for the above, for 8 < size < 16.
224635c653c4SRichard Henderson  */
2247d50ef446SAnton Johansson static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p,
224835c653c4SRichard Henderson                           uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
224935c653c4SRichard Henderson {
225035c653c4SRichard Henderson     int size = p->size;
225135c653c4SRichard Henderson     uint64_t b;
225235c653c4SRichard Henderson     MemOp atom;
225335c653c4SRichard Henderson 
225435c653c4SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2255d50ef446SAnton Johansson         return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra);
225635c653c4SRichard Henderson     }
225735c653c4SRichard Henderson 
225835c653c4SRichard Henderson     /*
225935c653c4SRichard Henderson      * It is a given that we cross a page and therefore there is no
226035c653c4SRichard Henderson      * atomicity for the load as a whole, but subobjects may need attention.
226135c653c4SRichard Henderson      */
226235c653c4SRichard Henderson     atom = mop & MO_ATOM_MASK;
226335c653c4SRichard Henderson     switch (atom) {
226435c653c4SRichard Henderson     case MO_ATOM_SUBALIGN:
226535c653c4SRichard Henderson         p->size = size - 8;
226635c653c4SRichard Henderson         a = do_ld_parts_beN(p, a);
226735c653c4SRichard Henderson         p->haddr += size - 8;
226835c653c4SRichard Henderson         p->size = 8;
226935c653c4SRichard Henderson         b = do_ld_parts_beN(p, 0);
227035c653c4SRichard Henderson         break;
227135c653c4SRichard Henderson 
227235c653c4SRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
227335c653c4SRichard Henderson         /* Since size > 8, this is the half that must be atomic. */
2274d50ef446SAnton Johansson         return do_ld_whole_be16(cpu, ra, p, a);
227535c653c4SRichard Henderson 
227635c653c4SRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
227735c653c4SRichard Henderson         /*
227835c653c4SRichard Henderson          * Since size > 8, both halves are misaligned,
227935c653c4SRichard Henderson          * and so neither is atomic.
228035c653c4SRichard Henderson          */
228135c653c4SRichard Henderson     case MO_ATOM_IFALIGN:
228235c653c4SRichard Henderson     case MO_ATOM_WITHIN16:
228335c653c4SRichard Henderson     case MO_ATOM_NONE:
228435c653c4SRichard Henderson         p->size = size - 8;
228535c653c4SRichard Henderson         a = do_ld_bytes_beN(p, a);
228635c653c4SRichard Henderson         b = ldq_be_p(p->haddr + size - 8);
228735c653c4SRichard Henderson         break;
228835c653c4SRichard Henderson 
228935c653c4SRichard Henderson     default:
229035c653c4SRichard Henderson         g_assert_not_reached();
229135c653c4SRichard Henderson     }
229235c653c4SRichard Henderson 
229335c653c4SRichard Henderson     return int128_make128(b, a);
229435c653c4SRichard Henderson }
229535c653c4SRichard Henderson 
2296d50ef446SAnton Johansson static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
22978cfdacaaSRichard Henderson                        MMUAccessType type, uintptr_t ra)
22988cfdacaaSRichard Henderson {
22998cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2300d50ef446SAnton Johansson         return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra);
23018cfdacaaSRichard Henderson     } else {
23028cfdacaaSRichard Henderson         return *(uint8_t *)p->haddr;
23038cfdacaaSRichard Henderson     }
23048cfdacaaSRichard Henderson }
23058cfdacaaSRichard Henderson 
2306d50ef446SAnton Johansson static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
23078cfdacaaSRichard Henderson                         MMUAccessType type, MemOp memop, uintptr_t ra)
23088cfdacaaSRichard Henderson {
2309f7eaf9d7SRichard Henderson     uint16_t ret;
23108cfdacaaSRichard Henderson 
23118cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2312d50ef446SAnton Johansson         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra);
2313f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) == MO_LE) {
2314f7eaf9d7SRichard Henderson             ret = bswap16(ret);
23158cfdacaaSRichard Henderson         }
2316f7eaf9d7SRichard Henderson     } else {
23178cfdacaaSRichard Henderson         /* Perform the load host endian, then swap if necessary. */
231873fda56fSAnton Johansson         ret = load_atom_2(cpu, ra, p->haddr, memop);
23198cfdacaaSRichard Henderson         if (memop & MO_BSWAP) {
23208cfdacaaSRichard Henderson             ret = bswap16(ret);
23218cfdacaaSRichard Henderson         }
2322f7eaf9d7SRichard Henderson     }
23238cfdacaaSRichard Henderson     return ret;
23248cfdacaaSRichard Henderson }
23258cfdacaaSRichard Henderson 
2326d50ef446SAnton Johansson static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
23278cfdacaaSRichard Henderson                         MMUAccessType type, MemOp memop, uintptr_t ra)
23288cfdacaaSRichard Henderson {
23298cfdacaaSRichard Henderson     uint32_t ret;
23308cfdacaaSRichard Henderson 
23318cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2332d50ef446SAnton Johansson         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra);
2333f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) == MO_LE) {
2334f7eaf9d7SRichard Henderson             ret = bswap32(ret);
23358cfdacaaSRichard Henderson         }
2336f7eaf9d7SRichard Henderson     } else {
23378cfdacaaSRichard Henderson         /* Perform the load host endian. */
233873fda56fSAnton Johansson         ret = load_atom_4(cpu, ra, p->haddr, memop);
23398cfdacaaSRichard Henderson         if (memop & MO_BSWAP) {
23408cfdacaaSRichard Henderson             ret = bswap32(ret);
23418cfdacaaSRichard Henderson         }
2342f7eaf9d7SRichard Henderson     }
23438cfdacaaSRichard Henderson     return ret;
23448cfdacaaSRichard Henderson }
23458cfdacaaSRichard Henderson 
2346d50ef446SAnton Johansson static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
23478cfdacaaSRichard Henderson                         MMUAccessType type, MemOp memop, uintptr_t ra)
23488cfdacaaSRichard Henderson {
23498cfdacaaSRichard Henderson     uint64_t ret;
23508cfdacaaSRichard Henderson 
23518cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2352d50ef446SAnton Johansson         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra);
2353f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) == MO_LE) {
2354f7eaf9d7SRichard Henderson             ret = bswap64(ret);
23558cfdacaaSRichard Henderson         }
2356f7eaf9d7SRichard Henderson     } else {
23578cfdacaaSRichard Henderson         /* Perform the load host endian. */
235873fda56fSAnton Johansson         ret = load_atom_8(cpu, ra, p->haddr, memop);
23598cfdacaaSRichard Henderson         if (memop & MO_BSWAP) {
23608cfdacaaSRichard Henderson             ret = bswap64(ret);
23618cfdacaaSRichard Henderson         }
2362f7eaf9d7SRichard Henderson     }
23638cfdacaaSRichard Henderson     return ret;
23648cfdacaaSRichard Henderson }
23658cfdacaaSRichard Henderson 
2366d50ef446SAnton Johansson static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
23678cfdacaaSRichard Henderson                           uintptr_t ra, MMUAccessType access_type)
23688cfdacaaSRichard Henderson {
23698cfdacaaSRichard Henderson     MMULookupLocals l;
23708cfdacaaSRichard Henderson     bool crosspage;
23718cfdacaaSRichard Henderson 
2372f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2373d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
23748cfdacaaSRichard Henderson     tcg_debug_assert(!crosspage);
23758cfdacaaSRichard Henderson 
2376d50ef446SAnton Johansson     return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
23772dd92606SRichard Henderson }
23782dd92606SRichard Henderson 
2379d50ef446SAnton Johansson static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
23808cfdacaaSRichard Henderson                            uintptr_t ra, MMUAccessType access_type)
23812dd92606SRichard Henderson {
23828cfdacaaSRichard Henderson     MMULookupLocals l;
23838cfdacaaSRichard Henderson     bool crosspage;
23848cfdacaaSRichard Henderson     uint16_t ret;
23858cfdacaaSRichard Henderson     uint8_t a, b;
23868cfdacaaSRichard Henderson 
2387f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2388d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
23898cfdacaaSRichard Henderson     if (likely(!crosspage)) {
2390d50ef446SAnton Johansson         return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
23918cfdacaaSRichard Henderson     }
23928cfdacaaSRichard Henderson 
2393d50ef446SAnton Johansson     a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2394d50ef446SAnton Johansson     b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra);
23958cfdacaaSRichard Henderson 
23968cfdacaaSRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
23978cfdacaaSRichard Henderson         ret = a | (b << 8);
23988cfdacaaSRichard Henderson     } else {
23998cfdacaaSRichard Henderson         ret = b | (a << 8);
24008cfdacaaSRichard Henderson     }
24018cfdacaaSRichard Henderson     return ret;
2402eed56642SAlex Bennée }
2403eed56642SAlex Bennée 
2404d50ef446SAnton Johansson static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
24058cfdacaaSRichard Henderson                            uintptr_t ra, MMUAccessType access_type)
24062dd92606SRichard Henderson {
24078cfdacaaSRichard Henderson     MMULookupLocals l;
24088cfdacaaSRichard Henderson     bool crosspage;
24098cfdacaaSRichard Henderson     uint32_t ret;
24108cfdacaaSRichard Henderson 
2411f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2412d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
24138cfdacaaSRichard Henderson     if (likely(!crosspage)) {
2414d50ef446SAnton Johansson         return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
24158cfdacaaSRichard Henderson     }
24168cfdacaaSRichard Henderson 
2417d50ef446SAnton Johansson     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2418d50ef446SAnton Johansson     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
24198cfdacaaSRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
24208cfdacaaSRichard Henderson         ret = bswap32(ret);
24218cfdacaaSRichard Henderson     }
24228cfdacaaSRichard Henderson     return ret;
2423eed56642SAlex Bennée }
2424eed56642SAlex Bennée 
2425d50ef446SAnton Johansson static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
24268cfdacaaSRichard Henderson                            uintptr_t ra, MMUAccessType access_type)
24278cfdacaaSRichard Henderson {
24288cfdacaaSRichard Henderson     MMULookupLocals l;
24298cfdacaaSRichard Henderson     bool crosspage;
24308cfdacaaSRichard Henderson     uint64_t ret;
24318cfdacaaSRichard Henderson 
2432f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2433d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
24348cfdacaaSRichard Henderson     if (likely(!crosspage)) {
2435d50ef446SAnton Johansson         return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
24368cfdacaaSRichard Henderson     }
24378cfdacaaSRichard Henderson 
2438d50ef446SAnton Johansson     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2439d50ef446SAnton Johansson     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
24408cfdacaaSRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
24418cfdacaaSRichard Henderson         ret = bswap64(ret);
24428cfdacaaSRichard Henderson     }
24438cfdacaaSRichard Henderson     return ret;
2444eed56642SAlex Bennée }
2445eed56642SAlex Bennée 
2446d50ef446SAnton Johansson static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
244735c653c4SRichard Henderson                           MemOpIdx oi, uintptr_t ra)
244835c653c4SRichard Henderson {
244935c653c4SRichard Henderson     MMULookupLocals l;
245035c653c4SRichard Henderson     bool crosspage;
245135c653c4SRichard Henderson     uint64_t a, b;
245235c653c4SRichard Henderson     Int128 ret;
245335c653c4SRichard Henderson     int first;
245435c653c4SRichard Henderson 
2455f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2456d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
245735c653c4SRichard Henderson     if (likely(!crosspage)) {
245835c653c4SRichard Henderson         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2459d50ef446SAnton Johansson             ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16,
24608bf67267SRichard Henderson                                    l.mmu_idx, ra);
2461f7eaf9d7SRichard Henderson             if ((l.memop & MO_BSWAP) == MO_LE) {
2462f7eaf9d7SRichard Henderson                 ret = bswap128(ret);
246335c653c4SRichard Henderson             }
2464f7eaf9d7SRichard Henderson         } else {
2465f7eaf9d7SRichard Henderson             /* Perform the load host endian. */
246673fda56fSAnton Johansson             ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop);
246735c653c4SRichard Henderson             if (l.memop & MO_BSWAP) {
246835c653c4SRichard Henderson                 ret = bswap128(ret);
246935c653c4SRichard Henderson             }
2470f7eaf9d7SRichard Henderson         }
247135c653c4SRichard Henderson         return ret;
247235c653c4SRichard Henderson     }
247335c653c4SRichard Henderson 
247435c653c4SRichard Henderson     first = l.page[0].size;
247535c653c4SRichard Henderson     if (first == 8) {
247635c653c4SRichard Henderson         MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
247735c653c4SRichard Henderson 
2478d50ef446SAnton Johansson         a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2479d50ef446SAnton Johansson         b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
248035c653c4SRichard Henderson         if ((mop8 & MO_BSWAP) == MO_LE) {
248135c653c4SRichard Henderson             ret = int128_make128(a, b);
248235c653c4SRichard Henderson         } else {
248335c653c4SRichard Henderson             ret = int128_make128(b, a);
248435c653c4SRichard Henderson         }
248535c653c4SRichard Henderson         return ret;
248635c653c4SRichard Henderson     }
248735c653c4SRichard Henderson 
248835c653c4SRichard Henderson     if (first < 8) {
2489d50ef446SAnton Johansson         a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx,
249035c653c4SRichard Henderson                       MMU_DATA_LOAD, l.memop, ra);
2491d50ef446SAnton Johansson         ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra);
249235c653c4SRichard Henderson     } else {
2493d50ef446SAnton Johansson         ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra);
249435c653c4SRichard Henderson         b = int128_getlo(ret);
249535c653c4SRichard Henderson         ret = int128_lshift(ret, l.page[1].size * 8);
249635c653c4SRichard Henderson         a = int128_gethi(ret);
2497d50ef446SAnton Johansson         b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx,
249835c653c4SRichard Henderson                       MMU_DATA_LOAD, l.memop, ra);
249935c653c4SRichard Henderson         ret = int128_make128(b, a);
250035c653c4SRichard Henderson     }
250135c653c4SRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
250235c653c4SRichard Henderson         ret = bswap128(ret);
250335c653c4SRichard Henderson     }
250435c653c4SRichard Henderson     return ret;
250535c653c4SRichard Henderson }
250635c653c4SRichard Henderson 
2507d03f1408SRichard Henderson /*
2508eed56642SAlex Bennée  * Store Helpers
2509eed56642SAlex Bennée  */
2510eed56642SAlex Bennée 
251159213461SRichard Henderson /**
251259213461SRichard Henderson  * do_st_mmio_leN:
2513d50ef446SAnton Johansson  * @cpu: generic cpu state
25141966855eSRichard Henderson  * @full: page parameters
251559213461SRichard Henderson  * @val_le: data to store
25161966855eSRichard Henderson  * @addr: virtual address
25171966855eSRichard Henderson  * @size: number of bytes
251859213461SRichard Henderson  * @mmu_idx: virtual address context
251959213461SRichard Henderson  * @ra: return address into tcg generated code, or 0
25201966855eSRichard Henderson  * Context: iothread lock held
252159213461SRichard Henderson  *
25221966855eSRichard Henderson  * Store @size bytes at @addr, which is memory-mapped i/o.
252359213461SRichard Henderson  * The bytes to store are extracted in little-endian order from @val_le;
252459213461SRichard Henderson  * return the bytes of @val_le beyond @p->size that have not been stored.
252559213461SRichard Henderson  */
2526d50ef446SAnton Johansson static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
25271966855eSRichard Henderson                                 uint64_t val_le, vaddr addr, int size,
25281f9823ceSRichard Henderson                                 int mmu_idx, uintptr_t ra,
25291f9823ceSRichard Henderson                                 MemoryRegion *mr, hwaddr mr_offset)
25306b8b622eSRichard Henderson {
2531190aba80SRichard Henderson     do {
25325646d6a7SRichard Henderson         MemOp this_mop;
25335646d6a7SRichard Henderson         unsigned this_size;
25345646d6a7SRichard Henderson         MemTxResult r;
25355646d6a7SRichard Henderson 
2536190aba80SRichard Henderson         /* Store aligned pieces up to 8 bytes. */
25375646d6a7SRichard Henderson         this_mop = ctz32(size | (int)addr | 8);
25385646d6a7SRichard Henderson         this_size = 1 << this_mop;
25395646d6a7SRichard Henderson         this_mop |= MO_LE;
25405646d6a7SRichard Henderson 
25415646d6a7SRichard Henderson         r = memory_region_dispatch_write(mr, mr_offset, val_le,
25421f9823ceSRichard Henderson                                          this_mop, full->attrs);
25435646d6a7SRichard Henderson         if (unlikely(r != MEMTX_OK)) {
2544d50ef446SAnton Johansson             io_failed(cpu, full, addr, this_size, MMU_DATA_STORE,
25455646d6a7SRichard Henderson                       mmu_idx, r, ra);
254659213461SRichard Henderson         }
25475646d6a7SRichard Henderson         if (this_size == 8) {
25485646d6a7SRichard Henderson             return 0;
25495646d6a7SRichard Henderson         }
25505646d6a7SRichard Henderson 
25515646d6a7SRichard Henderson         val_le >>= this_size * 8;
25525646d6a7SRichard Henderson         addr += this_size;
25535646d6a7SRichard Henderson         mr_offset += this_size;
25545646d6a7SRichard Henderson         size -= this_size;
2555190aba80SRichard Henderson     } while (size);
2556190aba80SRichard Henderson 
255759213461SRichard Henderson     return val_le;
255859213461SRichard Henderson }
255959213461SRichard Henderson 
2560d50ef446SAnton Johansson static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
25611f9823ceSRichard Henderson                                uint64_t val_le, vaddr addr, int size,
25621f9823ceSRichard Henderson                                int mmu_idx, uintptr_t ra)
25631f9823ceSRichard Henderson {
25641f9823ceSRichard Henderson     MemoryRegionSection *section;
25651f9823ceSRichard Henderson     hwaddr mr_offset;
25661f9823ceSRichard Henderson     MemoryRegion *mr;
25671f9823ceSRichard Henderson     MemTxAttrs attrs;
25681f9823ceSRichard Henderson     uint64_t ret;
25691f9823ceSRichard Henderson 
25701f9823ceSRichard Henderson     tcg_debug_assert(size > 0 && size <= 8);
25711f9823ceSRichard Henderson 
25721f9823ceSRichard Henderson     attrs = full->attrs;
2573d50ef446SAnton Johansson     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
25741f9823ceSRichard Henderson     mr = section->mr;
25751f9823ceSRichard Henderson 
25761f9823ceSRichard Henderson     qemu_mutex_lock_iothread();
2577d50ef446SAnton Johansson     ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
25781f9823ceSRichard Henderson                           ra, mr, mr_offset);
25791f9823ceSRichard Henderson     qemu_mutex_unlock_iothread();
25801f9823ceSRichard Henderson 
25811f9823ceSRichard Henderson     return ret;
25821f9823ceSRichard Henderson }
25831f9823ceSRichard Henderson 
2584d50ef446SAnton Johansson static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
25851f9823ceSRichard Henderson                                  Int128 val_le, vaddr addr, int size,
25861f9823ceSRichard Henderson                                  int mmu_idx, uintptr_t ra)
25871f9823ceSRichard Henderson {
25881f9823ceSRichard Henderson     MemoryRegionSection *section;
25891f9823ceSRichard Henderson     MemoryRegion *mr;
25901f9823ceSRichard Henderson     hwaddr mr_offset;
25911f9823ceSRichard Henderson     MemTxAttrs attrs;
25921f9823ceSRichard Henderson     uint64_t ret;
25931f9823ceSRichard Henderson 
25941f9823ceSRichard Henderson     tcg_debug_assert(size > 8 && size <= 16);
25951f9823ceSRichard Henderson 
25961f9823ceSRichard Henderson     attrs = full->attrs;
2597d50ef446SAnton Johansson     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
25981f9823ceSRichard Henderson     mr = section->mr;
25991f9823ceSRichard Henderson 
26001f9823ceSRichard Henderson     qemu_mutex_lock_iothread();
2601d50ef446SAnton Johansson     int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
26021f9823ceSRichard Henderson                     mmu_idx, ra, mr, mr_offset);
2603d50ef446SAnton Johansson     ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
26041f9823ceSRichard Henderson                           size - 8, mmu_idx, ra, mr, mr_offset + 8);
26051f9823ceSRichard Henderson     qemu_mutex_unlock_iothread();
26061f9823ceSRichard Henderson 
26071f9823ceSRichard Henderson     return ret;
26081f9823ceSRichard Henderson }
26091f9823ceSRichard Henderson 
26106b8b622eSRichard Henderson /*
261159213461SRichard Henderson  * Wrapper for the above.
26126b8b622eSRichard Henderson  */
2613d50ef446SAnton Johansson static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
26145b36f268SRichard Henderson                           uint64_t val_le, int mmu_idx,
26155b36f268SRichard Henderson                           MemOp mop, uintptr_t ra)
261659213461SRichard Henderson {
26175b36f268SRichard Henderson     MemOp atom;
26185b36f268SRichard Henderson     unsigned tmp, half_size;
26195b36f268SRichard Henderson 
262059213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2621d50ef446SAnton Johansson         return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
26221966855eSRichard Henderson                               p->size, mmu_idx, ra);
262359213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
262459213461SRichard Henderson         return val_le >> (p->size * 8);
26255b36f268SRichard Henderson     }
26265b36f268SRichard Henderson 
26275b36f268SRichard Henderson     /*
26285b36f268SRichard Henderson      * It is a given that we cross a page and therefore there is no atomicity
26295b36f268SRichard Henderson      * for the store as a whole, but subobjects may need attention.
26305b36f268SRichard Henderson      */
26315b36f268SRichard Henderson     atom = mop & MO_ATOM_MASK;
26325b36f268SRichard Henderson     switch (atom) {
26335b36f268SRichard Henderson     case MO_ATOM_SUBALIGN:
26345b36f268SRichard Henderson         return store_parts_leN(p->haddr, p->size, val_le);
26355b36f268SRichard Henderson 
26365b36f268SRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
26375b36f268SRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
26385b36f268SRichard Henderson         tmp = mop & MO_SIZE;
26395b36f268SRichard Henderson         tmp = tmp ? tmp - 1 : 0;
26405b36f268SRichard Henderson         half_size = 1 << tmp;
26415b36f268SRichard Henderson         if (atom == MO_ATOM_IFALIGN_PAIR
26425b36f268SRichard Henderson             ? p->size == half_size
26435b36f268SRichard Henderson             : p->size >= half_size) {
26445b36f268SRichard Henderson             if (!HAVE_al8_fast && p->size <= 4) {
26455b36f268SRichard Henderson                 return store_whole_le4(p->haddr, p->size, val_le);
26465b36f268SRichard Henderson             } else if (HAVE_al8) {
26475b36f268SRichard Henderson                 return store_whole_le8(p->haddr, p->size, val_le);
26486b8b622eSRichard Henderson             } else {
2649d50ef446SAnton Johansson                 cpu_loop_exit_atomic(cpu, ra);
26505b36f268SRichard Henderson             }
26515b36f268SRichard Henderson         }
26525b36f268SRichard Henderson         /* fall through */
26535b36f268SRichard Henderson 
26545b36f268SRichard Henderson     case MO_ATOM_IFALIGN:
26555b36f268SRichard Henderson     case MO_ATOM_WITHIN16:
26565b36f268SRichard Henderson     case MO_ATOM_NONE:
26575b36f268SRichard Henderson         return store_bytes_leN(p->haddr, p->size, val_le);
26585b36f268SRichard Henderson 
26595b36f268SRichard Henderson     default:
26605b36f268SRichard Henderson         g_assert_not_reached();
26616b8b622eSRichard Henderson     }
26626b8b622eSRichard Henderson }
26636b8b622eSRichard Henderson 
266435c653c4SRichard Henderson /*
266535c653c4SRichard Henderson  * Wrapper for the above, for 8 < size < 16.
266635c653c4SRichard Henderson  */
2667d50ef446SAnton Johansson static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
266835c653c4SRichard Henderson                             Int128 val_le, int mmu_idx,
266935c653c4SRichard Henderson                             MemOp mop, uintptr_t ra)
267035c653c4SRichard Henderson {
267135c653c4SRichard Henderson     int size = p->size;
267235c653c4SRichard Henderson     MemOp atom;
267335c653c4SRichard Henderson 
267435c653c4SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2675d50ef446SAnton Johansson         return do_st16_mmio_leN(cpu, p->full, val_le, p->addr,
26761f9823ceSRichard Henderson                                 size, mmu_idx, ra);
267735c653c4SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
267835c653c4SRichard Henderson         return int128_gethi(val_le) >> ((size - 8) * 8);
267935c653c4SRichard Henderson     }
268035c653c4SRichard Henderson 
268135c653c4SRichard Henderson     /*
268235c653c4SRichard Henderson      * It is a given that we cross a page and therefore there is no atomicity
268335c653c4SRichard Henderson      * for the store as a whole, but subobjects may need attention.
268435c653c4SRichard Henderson      */
268535c653c4SRichard Henderson     atom = mop & MO_ATOM_MASK;
268635c653c4SRichard Henderson     switch (atom) {
268735c653c4SRichard Henderson     case MO_ATOM_SUBALIGN:
268835c653c4SRichard Henderson         store_parts_leN(p->haddr, 8, int128_getlo(val_le));
268935c653c4SRichard Henderson         return store_parts_leN(p->haddr + 8, p->size - 8,
269035c653c4SRichard Henderson                                int128_gethi(val_le));
269135c653c4SRichard Henderson 
269235c653c4SRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
269335c653c4SRichard Henderson         /* Since size > 8, this is the half that must be atomic. */
2694*6046f6e9SRichard Henderson         if (!HAVE_CMPXCHG128) {
2695d50ef446SAnton Johansson             cpu_loop_exit_atomic(cpu, ra);
269635c653c4SRichard Henderson         }
269735c653c4SRichard Henderson         return store_whole_le16(p->haddr, p->size, val_le);
269835c653c4SRichard Henderson 
269935c653c4SRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
270035c653c4SRichard Henderson         /*
270135c653c4SRichard Henderson          * Since size > 8, both halves are misaligned,
270235c653c4SRichard Henderson          * and so neither is atomic.
270335c653c4SRichard Henderson          */
270435c653c4SRichard Henderson     case MO_ATOM_IFALIGN:
27052be6a486SRichard Henderson     case MO_ATOM_WITHIN16:
270635c653c4SRichard Henderson     case MO_ATOM_NONE:
270735c653c4SRichard Henderson         stq_le_p(p->haddr, int128_getlo(val_le));
270835c653c4SRichard Henderson         return store_bytes_leN(p->haddr + 8, p->size - 8,
270935c653c4SRichard Henderson                                int128_gethi(val_le));
271035c653c4SRichard Henderson 
271135c653c4SRichard Henderson     default:
271235c653c4SRichard Henderson         g_assert_not_reached();
271335c653c4SRichard Henderson     }
271435c653c4SRichard Henderson }
271535c653c4SRichard Henderson 
2716d50ef446SAnton Johansson static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val,
271759213461SRichard Henderson                     int mmu_idx, uintptr_t ra)
2718eed56642SAlex Bennée {
271959213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2720d50ef446SAnton Johansson         do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra);
272159213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
272259213461SRichard Henderson         /* nothing */
27235b87b3e6SRichard Henderson     } else {
272459213461SRichard Henderson         *(uint8_t *)p->haddr = val;
27255b87b3e6SRichard Henderson     }
2726eed56642SAlex Bennée }
2727eed56642SAlex Bennée 
2728d50ef446SAnton Johansson static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
272959213461SRichard Henderson                     int mmu_idx, MemOp memop, uintptr_t ra)
2730eed56642SAlex Bennée {
273159213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2732f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) != MO_LE) {
2733f7eaf9d7SRichard Henderson             val = bswap16(val);
2734f7eaf9d7SRichard Henderson         }
2735d50ef446SAnton Johansson         do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra);
273659213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
273759213461SRichard Henderson         /* nothing */
273859213461SRichard Henderson     } else {
273959213461SRichard Henderson         /* Swap to host endian if necessary, then store. */
274059213461SRichard Henderson         if (memop & MO_BSWAP) {
274159213461SRichard Henderson             val = bswap16(val);
274259213461SRichard Henderson         }
274373fda56fSAnton Johansson         store_atom_2(cpu, ra, p->haddr, memop, val);
274459213461SRichard Henderson     }
274559213461SRichard Henderson }
274659213461SRichard Henderson 
2747d50ef446SAnton Johansson static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
274859213461SRichard Henderson                     int mmu_idx, MemOp memop, uintptr_t ra)
274959213461SRichard Henderson {
275059213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2751f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) != MO_LE) {
2752f7eaf9d7SRichard Henderson             val = bswap32(val);
2753f7eaf9d7SRichard Henderson         }
2754d50ef446SAnton Johansson         do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra);
275559213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
275659213461SRichard Henderson         /* nothing */
275759213461SRichard Henderson     } else {
275859213461SRichard Henderson         /* Swap to host endian if necessary, then store. */
275959213461SRichard Henderson         if (memop & MO_BSWAP) {
276059213461SRichard Henderson             val = bswap32(val);
276159213461SRichard Henderson         }
276273fda56fSAnton Johansson         store_atom_4(cpu, ra, p->haddr, memop, val);
276359213461SRichard Henderson     }
276459213461SRichard Henderson }
276559213461SRichard Henderson 
2766d50ef446SAnton Johansson static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
276759213461SRichard Henderson                     int mmu_idx, MemOp memop, uintptr_t ra)
276859213461SRichard Henderson {
276959213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2770f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) != MO_LE) {
2771f7eaf9d7SRichard Henderson             val = bswap64(val);
2772f7eaf9d7SRichard Henderson         }
2773d50ef446SAnton Johansson         do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra);
277459213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
277559213461SRichard Henderson         /* nothing */
277659213461SRichard Henderson     } else {
277759213461SRichard Henderson         /* Swap to host endian if necessary, then store. */
277859213461SRichard Henderson         if (memop & MO_BSWAP) {
277959213461SRichard Henderson             val = bswap64(val);
278059213461SRichard Henderson         }
278173fda56fSAnton Johansson         store_atom_8(cpu, ra, p->haddr, memop, val);
278259213461SRichard Henderson     }
2783eed56642SAlex Bennée }
2784eed56642SAlex Bennée 
2785e20f73fbSAnton Johansson static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
278659213461SRichard Henderson                        MemOpIdx oi, uintptr_t ra)
2787f83bcecbSRichard Henderson {
278859213461SRichard Henderson     MMULookupLocals l;
278959213461SRichard Henderson     bool crosspage;
279059213461SRichard Henderson 
2791f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2792e20f73fbSAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
279359213461SRichard Henderson     tcg_debug_assert(!crosspage);
279459213461SRichard Henderson 
2795e20f73fbSAnton Johansson     do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra);
2796e20f73fbSAnton Johansson }
2797e20f73fbSAnton Johansson 
2798d50ef446SAnton Johansson static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
279959213461SRichard Henderson                        MemOpIdx oi, uintptr_t ra)
2800f83bcecbSRichard Henderson {
280159213461SRichard Henderson     MMULookupLocals l;
280259213461SRichard Henderson     bool crosspage;
280359213461SRichard Henderson     uint8_t a, b;
280459213461SRichard Henderson 
2805f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2806d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
280759213461SRichard Henderson     if (likely(!crosspage)) {
2808d50ef446SAnton Johansson         do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
280959213461SRichard Henderson         return;
281059213461SRichard Henderson     }
281159213461SRichard Henderson 
281259213461SRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
281359213461SRichard Henderson         a = val, b = val >> 8;
281459213461SRichard Henderson     } else {
281559213461SRichard Henderson         b = val, a = val >> 8;
281659213461SRichard Henderson     }
2817d50ef446SAnton Johansson     do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra);
2818d50ef446SAnton Johansson     do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra);
2819f83bcecbSRichard Henderson }
2820f83bcecbSRichard Henderson 
2821d50ef446SAnton Johansson static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
282259213461SRichard Henderson                        MemOpIdx oi, uintptr_t ra)
2823f83bcecbSRichard Henderson {
282459213461SRichard Henderson     MMULookupLocals l;
282559213461SRichard Henderson     bool crosspage;
282659213461SRichard Henderson 
2827f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2828d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
282959213461SRichard Henderson     if (likely(!crosspage)) {
2830d50ef446SAnton Johansson         do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
283159213461SRichard Henderson         return;
283259213461SRichard Henderson     }
283359213461SRichard Henderson 
283459213461SRichard Henderson     /* Swap to little endian for simplicity, then store by bytes. */
283559213461SRichard Henderson     if ((l.memop & MO_BSWAP) != MO_LE) {
283659213461SRichard Henderson         val = bswap32(val);
283759213461SRichard Henderson     }
2838d50ef446SAnton Johansson     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2839d50ef446SAnton Johansson     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2840eed56642SAlex Bennée }
2841eed56642SAlex Bennée 
2842d50ef446SAnton Johansson static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
284359213461SRichard Henderson                        MemOpIdx oi, uintptr_t ra)
284459213461SRichard Henderson {
284559213461SRichard Henderson     MMULookupLocals l;
284659213461SRichard Henderson     bool crosspage;
284759213461SRichard Henderson 
2848f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2849d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
285059213461SRichard Henderson     if (likely(!crosspage)) {
2851d50ef446SAnton Johansson         do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
285259213461SRichard Henderson         return;
285359213461SRichard Henderson     }
285459213461SRichard Henderson 
285559213461SRichard Henderson     /* Swap to little endian for simplicity, then store by bytes. */
285659213461SRichard Henderson     if ((l.memop & MO_BSWAP) != MO_LE) {
285759213461SRichard Henderson         val = bswap64(val);
285859213461SRichard Henderson     }
2859d50ef446SAnton Johansson     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2860d50ef446SAnton Johansson     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2861eed56642SAlex Bennée }
2862eed56642SAlex Bennée 
2863d50ef446SAnton Johansson static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
286435c653c4SRichard Henderson                         MemOpIdx oi, uintptr_t ra)
286535c653c4SRichard Henderson {
286635c653c4SRichard Henderson     MMULookupLocals l;
286735c653c4SRichard Henderson     bool crosspage;
286835c653c4SRichard Henderson     uint64_t a, b;
286935c653c4SRichard Henderson     int first;
287035c653c4SRichard Henderson 
2871f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2872d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
287335c653c4SRichard Henderson     if (likely(!crosspage)) {
2874f7eaf9d7SRichard Henderson         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2875f7eaf9d7SRichard Henderson             if ((l.memop & MO_BSWAP) != MO_LE) {
2876f7eaf9d7SRichard Henderson                 val = bswap128(val);
2877f7eaf9d7SRichard Henderson             }
2878d50ef446SAnton Johansson             do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
2879f7eaf9d7SRichard Henderson         } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
2880f7eaf9d7SRichard Henderson             /* nothing */
2881f7eaf9d7SRichard Henderson         } else {
288235c653c4SRichard Henderson             /* Swap to host endian if necessary, then store. */
288335c653c4SRichard Henderson             if (l.memop & MO_BSWAP) {
288435c653c4SRichard Henderson                 val = bswap128(val);
288535c653c4SRichard Henderson             }
288673fda56fSAnton Johansson             store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val);
288735c653c4SRichard Henderson         }
288835c653c4SRichard Henderson         return;
288935c653c4SRichard Henderson     }
289035c653c4SRichard Henderson 
289135c653c4SRichard Henderson     first = l.page[0].size;
289235c653c4SRichard Henderson     if (first == 8) {
289335c653c4SRichard Henderson         MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
289435c653c4SRichard Henderson 
289535c653c4SRichard Henderson         if (l.memop & MO_BSWAP) {
289635c653c4SRichard Henderson             val = bswap128(val);
289735c653c4SRichard Henderson         }
289835c653c4SRichard Henderson         if (HOST_BIG_ENDIAN) {
289935c653c4SRichard Henderson             b = int128_getlo(val), a = int128_gethi(val);
290035c653c4SRichard Henderson         } else {
290135c653c4SRichard Henderson             a = int128_getlo(val), b = int128_gethi(val);
290235c653c4SRichard Henderson         }
2903d50ef446SAnton Johansson         do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra);
2904d50ef446SAnton Johansson         do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra);
290535c653c4SRichard Henderson         return;
290635c653c4SRichard Henderson     }
290735c653c4SRichard Henderson 
290835c653c4SRichard Henderson     if ((l.memop & MO_BSWAP) != MO_LE) {
290935c653c4SRichard Henderson         val = bswap128(val);
291035c653c4SRichard Henderson     }
291135c653c4SRichard Henderson     if (first < 8) {
2912d50ef446SAnton Johansson         do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
291335c653c4SRichard Henderson         val = int128_urshift(val, first * 8);
2914d50ef446SAnton Johansson         do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
291535c653c4SRichard Henderson     } else {
2916d50ef446SAnton Johansson         b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2917d50ef446SAnton Johansson         do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra);
291835c653c4SRichard Henderson     }
291935c653c4SRichard Henderson }
292035c653c4SRichard Henderson 
2921f83bcecbSRichard Henderson #include "ldst_common.c.inc"
2922cfe04a4bSRichard Henderson 
2923be9568b4SRichard Henderson /*
2924be9568b4SRichard Henderson  * First set of functions passes in OI and RETADDR.
2925be9568b4SRichard Henderson  * This makes them callable from other helpers.
2926be9568b4SRichard Henderson  */
2927d9bb58e5SYang Zhong 
2928d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
2929be9568b4SRichard Henderson     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2930a754f7f3SRichard Henderson 
2931707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP
2932d9bb58e5SYang Zhong 
2933139c1837SPaolo Bonzini #include "atomic_common.c.inc"
2934d9bb58e5SYang Zhong 
2935d9bb58e5SYang Zhong #define DATA_SIZE 1
2936d9bb58e5SYang Zhong #include "atomic_template.h"
2937d9bb58e5SYang Zhong 
2938d9bb58e5SYang Zhong #define DATA_SIZE 2
2939d9bb58e5SYang Zhong #include "atomic_template.h"
2940d9bb58e5SYang Zhong 
2941d9bb58e5SYang Zhong #define DATA_SIZE 4
2942d9bb58e5SYang Zhong #include "atomic_template.h"
2943d9bb58e5SYang Zhong 
2944d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
2945d9bb58e5SYang Zhong #define DATA_SIZE 8
2946d9bb58e5SYang Zhong #include "atomic_template.h"
2947d9bb58e5SYang Zhong #endif
2948d9bb58e5SYang Zhong 
294976f9d6adSRichard Henderson #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
2950d9bb58e5SYang Zhong #define DATA_SIZE 16
2951d9bb58e5SYang Zhong #include "atomic_template.h"
2952d9bb58e5SYang Zhong #endif
2953d9bb58e5SYang Zhong 
2954d9bb58e5SYang Zhong /* Code access functions.  */
2955d9bb58e5SYang Zhong 
2956fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2957eed56642SAlex Bennée {
29589002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2959d50ef446SAnton Johansson     return do_ld1_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
29604cef72d0SAlex Bennée }
29614cef72d0SAlex Bennée 
2962fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
29632dd92606SRichard Henderson {
29649002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2965d50ef446SAnton Johansson     return do_ld2_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
29662dd92606SRichard Henderson }
29672dd92606SRichard Henderson 
2968fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
29694cef72d0SAlex Bennée {
29709002ffcbSRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2971d50ef446SAnton Johansson     return do_ld4_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
2972eed56642SAlex Bennée }
2973d9bb58e5SYang Zhong 
2974fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2975eed56642SAlex Bennée {
2976fc313c64SFrédéric Pétrot     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
2977d50ef446SAnton Johansson     return do_ld8_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
2978eed56642SAlex Bennée }
297928990626SRichard Henderson 
298028990626SRichard Henderson uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
298128990626SRichard Henderson                          MemOpIdx oi, uintptr_t retaddr)
298228990626SRichard Henderson {
2983d50ef446SAnton Johansson     return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
298428990626SRichard Henderson }
298528990626SRichard Henderson 
298628990626SRichard Henderson uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
298728990626SRichard Henderson                           MemOpIdx oi, uintptr_t retaddr)
298828990626SRichard Henderson {
2989d50ef446SAnton Johansson     return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
299028990626SRichard Henderson }
299128990626SRichard Henderson 
299228990626SRichard Henderson uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
299328990626SRichard Henderson                           MemOpIdx oi, uintptr_t retaddr)
299428990626SRichard Henderson {
2995d50ef446SAnton Johansson     return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
299628990626SRichard Henderson }
299728990626SRichard Henderson 
299828990626SRichard Henderson uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
299928990626SRichard Henderson                           MemOpIdx oi, uintptr_t retaddr)
300028990626SRichard Henderson {
3001d50ef446SAnton Johansson     return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
300228990626SRichard Henderson }
3003