xref: /openbmc/qemu/accel/tcg/cputlb.c (revision aacfd8bbaf99444f84b408e6b052651fb8056c41)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9fb0343d5SThomas Huth  * version 2.1 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
26d9bb58e5SYang Zhong #include "exec/cputlb.h"
27f4f826c0SPhilippe Mathieu-Daudé #include "exec/tb-flush.h"
28d9bb58e5SYang Zhong #include "exec/memory-internal.h"
29d9bb58e5SYang Zhong #include "exec/ram_addr.h"
30*aacfd8bbSPhilippe Mathieu-Daudé #include "exec/mmu-access-type.h"
31*aacfd8bbSPhilippe Mathieu-Daudé #include "exec/tlb-common.h"
32*aacfd8bbSPhilippe Mathieu-Daudé #include "exec/vaddr.h"
33d9bb58e5SYang Zhong #include "tcg/tcg.h"
34d9bb58e5SYang Zhong #include "qemu/error-report.h"
35d9bb58e5SYang Zhong #include "exec/log.h"
36c213ee2dSRichard Henderson #include "exec/helper-proto-common.h"
37d9bb58e5SYang Zhong #include "qemu/atomic.h"
38e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
393b9bd3f4SPaolo Bonzini #include "exec/translate-all.h"
4051807763SPhilippe Mathieu-Daudé #include "trace.h"
41e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h"
4243e7a2d3SPhilippe Mathieu-Daudé #include "internal-common.h"
434c268d6dSPhilippe Mathieu-Daudé #include "internal-target.h"
44235537faSAlex Bennée #ifdef CONFIG_PLUGIN
45235537faSAlex Bennée #include "qemu/plugin-memory.h"
46235537faSAlex Bennée #endif
47d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h"
4870f168f8SRichard Henderson #include "tcg/oversized-guest.h"
49d9bb58e5SYang Zhong 
50d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
51d9bb58e5SYang Zhong /* #define DEBUG_TLB */
52d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
53d9bb58e5SYang Zhong 
54d9bb58e5SYang Zhong #ifdef DEBUG_TLB
55d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
56d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
57d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
58d9bb58e5SYang Zhong # else
59d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
60d9bb58e5SYang Zhong # endif
61d9bb58e5SYang Zhong #else
62d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
63d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
64d9bb58e5SYang Zhong #endif
65d9bb58e5SYang Zhong 
66d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
67d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
68d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
69d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
70d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
71d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
72d9bb58e5SYang Zhong     } \
73d9bb58e5SYang Zhong } while (0)
74d9bb58e5SYang Zhong 
75ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do {                              \
76d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
77ea9025cbSEmilio G. Cota             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
78d9bb58e5SYang Zhong         }                                                         \
79d9bb58e5SYang Zhong     } while (0)
80d9bb58e5SYang Zhong 
81d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
82e79f8142SAnton Johansson  * vaddr even on 32 bit builds
83e79f8142SAnton Johansson  */
84e79f8142SAnton Johansson QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data));
85d9bb58e5SYang Zhong 
86d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
87d9bb58e5SYang Zhong  */
88d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
89d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
90d9bb58e5SYang Zhong 
91722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
927a1efe1bSRichard Henderson {
93722a1c1eSRichard Henderson     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
947a1efe1bSRichard Henderson }
957a1efe1bSRichard Henderson 
96722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
9786e1eff8SEmilio G. Cota {
98722a1c1eSRichard Henderson     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
9986e1eff8SEmilio G. Cota }
10086e1eff8SEmilio G. Cota 
101*aacfd8bbSPhilippe Mathieu-Daudé static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
102*aacfd8bbSPhilippe Mathieu-Daudé                                     MMUAccessType access_type)
103*aacfd8bbSPhilippe Mathieu-Daudé {
104*aacfd8bbSPhilippe Mathieu-Daudé     /* Do not rearrange the CPUTLBEntry structure members. */
105*aacfd8bbSPhilippe Mathieu-Daudé     QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
106*aacfd8bbSPhilippe Mathieu-Daudé                       MMU_DATA_LOAD * sizeof(uint64_t));
107*aacfd8bbSPhilippe Mathieu-Daudé     QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
108*aacfd8bbSPhilippe Mathieu-Daudé                       MMU_DATA_STORE * sizeof(uint64_t));
109*aacfd8bbSPhilippe Mathieu-Daudé     QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
110*aacfd8bbSPhilippe Mathieu-Daudé                       MMU_INST_FETCH * sizeof(uint64_t));
111*aacfd8bbSPhilippe Mathieu-Daudé 
112*aacfd8bbSPhilippe Mathieu-Daudé #if TARGET_LONG_BITS == 32
113*aacfd8bbSPhilippe Mathieu-Daudé     /* Use qatomic_read, in case of addr_write; only care about low bits. */
114*aacfd8bbSPhilippe Mathieu-Daudé     const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
115*aacfd8bbSPhilippe Mathieu-Daudé     ptr += HOST_BIG_ENDIAN;
116*aacfd8bbSPhilippe Mathieu-Daudé     return qatomic_read(ptr);
117*aacfd8bbSPhilippe Mathieu-Daudé #else
118*aacfd8bbSPhilippe Mathieu-Daudé     const uint64_t *ptr = &entry->addr_idx[access_type];
119*aacfd8bbSPhilippe Mathieu-Daudé # if TCG_OVERSIZED_GUEST
120*aacfd8bbSPhilippe Mathieu-Daudé     return *ptr;
121*aacfd8bbSPhilippe Mathieu-Daudé # else
122*aacfd8bbSPhilippe Mathieu-Daudé     /* ofs might correspond to .addr_write, so use qatomic_read */
123*aacfd8bbSPhilippe Mathieu-Daudé     return qatomic_read(ptr);
124*aacfd8bbSPhilippe Mathieu-Daudé # endif
125*aacfd8bbSPhilippe Mathieu-Daudé #endif
126*aacfd8bbSPhilippe Mathieu-Daudé }
127*aacfd8bbSPhilippe Mathieu-Daudé 
128*aacfd8bbSPhilippe Mathieu-Daudé static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
129*aacfd8bbSPhilippe Mathieu-Daudé {
130*aacfd8bbSPhilippe Mathieu-Daudé     return tlb_read_idx(entry, MMU_DATA_STORE);
131*aacfd8bbSPhilippe Mathieu-Daudé }
132*aacfd8bbSPhilippe Mathieu-Daudé 
133*aacfd8bbSPhilippe Mathieu-Daudé /* Find the TLB index corresponding to the mmu_idx + address pair.  */
134*aacfd8bbSPhilippe Mathieu-Daudé static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
135*aacfd8bbSPhilippe Mathieu-Daudé                                   vaddr addr)
136*aacfd8bbSPhilippe Mathieu-Daudé {
137*aacfd8bbSPhilippe Mathieu-Daudé     uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
138*aacfd8bbSPhilippe Mathieu-Daudé 
139*aacfd8bbSPhilippe Mathieu-Daudé     return (addr >> TARGET_PAGE_BITS) & size_mask;
140*aacfd8bbSPhilippe Mathieu-Daudé }
141*aacfd8bbSPhilippe Mathieu-Daudé 
142*aacfd8bbSPhilippe Mathieu-Daudé /* Find the TLB entry corresponding to the mmu_idx + address pair.  */
143*aacfd8bbSPhilippe Mathieu-Daudé static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
144*aacfd8bbSPhilippe Mathieu-Daudé                                      vaddr addr)
145*aacfd8bbSPhilippe Mathieu-Daudé {
146*aacfd8bbSPhilippe Mathieu-Daudé     return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)];
147*aacfd8bbSPhilippe Mathieu-Daudé }
148*aacfd8bbSPhilippe Mathieu-Daudé 
14979e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
15086e1eff8SEmilio G. Cota                              size_t max_entries)
15186e1eff8SEmilio G. Cota {
15279e42085SRichard Henderson     desc->window_begin_ns = ns;
15379e42085SRichard Henderson     desc->window_max_entries = max_entries;
15486e1eff8SEmilio G. Cota }
15586e1eff8SEmilio G. Cota 
15606f3831cSAnton Johansson static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
1570f4abea8SRichard Henderson {
158a976a99aSRichard Henderson     CPUJumpCache *jc = cpu->tb_jmp_cache;
15999ab4d50SEric Auger     int i, i0;
1600f4abea8SRichard Henderson 
16199ab4d50SEric Auger     if (unlikely(!jc)) {
16299ab4d50SEric Auger         return;
16399ab4d50SEric Auger     }
16499ab4d50SEric Auger 
16599ab4d50SEric Auger     i0 = tb_jmp_cache_hash_page(page_addr);
1660f4abea8SRichard Henderson     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
167a976a99aSRichard Henderson         qatomic_set(&jc->array[i0 + i].tb, NULL);
1680f4abea8SRichard Henderson     }
1690f4abea8SRichard Henderson }
1700f4abea8SRichard Henderson 
17186e1eff8SEmilio G. Cota /**
17286e1eff8SEmilio G. Cota  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
17371ccd47bSRichard Henderson  * @desc: The CPUTLBDesc portion of the TLB
17471ccd47bSRichard Henderson  * @fast: The CPUTLBDescFast portion of the same TLB
17586e1eff8SEmilio G. Cota  *
17686e1eff8SEmilio G. Cota  * Called with tlb_lock_held.
17786e1eff8SEmilio G. Cota  *
17886e1eff8SEmilio G. Cota  * We have two main constraints when resizing a TLB: (1) we only resize it
17986e1eff8SEmilio G. Cota  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
18086e1eff8SEmilio G. Cota  * the array or unnecessarily flushing it), which means we do not control how
18186e1eff8SEmilio G. Cota  * frequently the resizing can occur; (2) we don't have access to the guest's
18286e1eff8SEmilio G. Cota  * future scheduling decisions, and therefore have to decide the magnitude of
18386e1eff8SEmilio G. Cota  * the resize based on past observations.
18486e1eff8SEmilio G. Cota  *
18586e1eff8SEmilio G. Cota  * In general, a memory-hungry process can benefit greatly from an appropriately
18686e1eff8SEmilio G. Cota  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
18786e1eff8SEmilio G. Cota  * we just have to make the TLB as large as possible; while an oversized TLB
18886e1eff8SEmilio G. Cota  * results in minimal TLB miss rates, it also takes longer to be flushed
18986e1eff8SEmilio G. Cota  * (flushes can be _very_ frequent), and the reduced locality can also hurt
19086e1eff8SEmilio G. Cota  * performance.
19186e1eff8SEmilio G. Cota  *
19286e1eff8SEmilio G. Cota  * To achieve near-optimal performance for all kinds of workloads, we:
19386e1eff8SEmilio G. Cota  *
19486e1eff8SEmilio G. Cota  * 1. Aggressively increase the size of the TLB when the use rate of the
19586e1eff8SEmilio G. Cota  * TLB being flushed is high, since it is likely that in the near future this
19686e1eff8SEmilio G. Cota  * memory-hungry process will execute again, and its memory hungriness will
19786e1eff8SEmilio G. Cota  * probably be similar.
19886e1eff8SEmilio G. Cota  *
19986e1eff8SEmilio G. Cota  * 2. Slowly reduce the size of the TLB as the use rate declines over a
20086e1eff8SEmilio G. Cota  * reasonably large time window. The rationale is that if in such a time window
20186e1eff8SEmilio G. Cota  * we have not observed a high TLB use rate, it is likely that we won't observe
20286e1eff8SEmilio G. Cota  * it in the near future. In that case, once a time window expires we downsize
20386e1eff8SEmilio G. Cota  * the TLB to match the maximum use rate observed in the window.
20486e1eff8SEmilio G. Cota  *
20586e1eff8SEmilio G. Cota  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
20686e1eff8SEmilio G. Cota  * since in that range performance is likely near-optimal. Recall that the TLB
20786e1eff8SEmilio G. Cota  * is direct mapped, so we want the use rate to be low (or at least not too
20886e1eff8SEmilio G. Cota  * high), since otherwise we are likely to have a significant amount of
20986e1eff8SEmilio G. Cota  * conflict misses.
21086e1eff8SEmilio G. Cota  */
2113c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
2123c3959f2SRichard Henderson                                   int64_t now)
21386e1eff8SEmilio G. Cota {
21471ccd47bSRichard Henderson     size_t old_size = tlb_n_entries(fast);
21586e1eff8SEmilio G. Cota     size_t rate;
21686e1eff8SEmilio G. Cota     size_t new_size = old_size;
21786e1eff8SEmilio G. Cota     int64_t window_len_ms = 100;
21886e1eff8SEmilio G. Cota     int64_t window_len_ns = window_len_ms * 1000 * 1000;
21979e42085SRichard Henderson     bool window_expired = now > desc->window_begin_ns + window_len_ns;
22086e1eff8SEmilio G. Cota 
22179e42085SRichard Henderson     if (desc->n_used_entries > desc->window_max_entries) {
22279e42085SRichard Henderson         desc->window_max_entries = desc->n_used_entries;
22386e1eff8SEmilio G. Cota     }
22479e42085SRichard Henderson     rate = desc->window_max_entries * 100 / old_size;
22586e1eff8SEmilio G. Cota 
22686e1eff8SEmilio G. Cota     if (rate > 70) {
22786e1eff8SEmilio G. Cota         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
22886e1eff8SEmilio G. Cota     } else if (rate < 30 && window_expired) {
22979e42085SRichard Henderson         size_t ceil = pow2ceil(desc->window_max_entries);
23079e42085SRichard Henderson         size_t expected_rate = desc->window_max_entries * 100 / ceil;
23186e1eff8SEmilio G. Cota 
23286e1eff8SEmilio G. Cota         /*
23386e1eff8SEmilio G. Cota          * Avoid undersizing when the max number of entries seen is just below
23486e1eff8SEmilio G. Cota          * a pow2. For instance, if max_entries == 1025, the expected use rate
23586e1eff8SEmilio G. Cota          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
23686e1eff8SEmilio G. Cota          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
23786e1eff8SEmilio G. Cota          * later. Thus, make sure that the expected use rate remains below 70%.
23886e1eff8SEmilio G. Cota          * (and since we double the size, that means the lowest rate we'd
23986e1eff8SEmilio G. Cota          * expect to get is 35%, which is still in the 30-70% range where
24086e1eff8SEmilio G. Cota          * we consider that the size is appropriate.)
24186e1eff8SEmilio G. Cota          */
24286e1eff8SEmilio G. Cota         if (expected_rate > 70) {
24386e1eff8SEmilio G. Cota             ceil *= 2;
24486e1eff8SEmilio G. Cota         }
24586e1eff8SEmilio G. Cota         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
24686e1eff8SEmilio G. Cota     }
24786e1eff8SEmilio G. Cota 
24886e1eff8SEmilio G. Cota     if (new_size == old_size) {
24986e1eff8SEmilio G. Cota         if (window_expired) {
25079e42085SRichard Henderson             tlb_window_reset(desc, now, desc->n_used_entries);
25186e1eff8SEmilio G. Cota         }
25286e1eff8SEmilio G. Cota         return;
25386e1eff8SEmilio G. Cota     }
25486e1eff8SEmilio G. Cota 
25571ccd47bSRichard Henderson     g_free(fast->table);
25625d3ec58SRichard Henderson     g_free(desc->fulltlb);
25786e1eff8SEmilio G. Cota 
25879e42085SRichard Henderson     tlb_window_reset(desc, now, 0);
25986e1eff8SEmilio G. Cota     /* desc->n_used_entries is cleared by the caller */
26071ccd47bSRichard Henderson     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
26171ccd47bSRichard Henderson     fast->table = g_try_new(CPUTLBEntry, new_size);
26225d3ec58SRichard Henderson     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
26371ccd47bSRichard Henderson 
26486e1eff8SEmilio G. Cota     /*
26586e1eff8SEmilio G. Cota      * If the allocations fail, try smaller sizes. We just freed some
26686e1eff8SEmilio G. Cota      * memory, so going back to half of new_size has a good chance of working.
26786e1eff8SEmilio G. Cota      * Increased memory pressure elsewhere in the system might cause the
26886e1eff8SEmilio G. Cota      * allocations to fail though, so we progressively reduce the allocation
26986e1eff8SEmilio G. Cota      * size, aborting if we cannot even allocate the smallest TLB we support.
27086e1eff8SEmilio G. Cota      */
27125d3ec58SRichard Henderson     while (fast->table == NULL || desc->fulltlb == NULL) {
27286e1eff8SEmilio G. Cota         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
27386e1eff8SEmilio G. Cota             error_report("%s: %s", __func__, strerror(errno));
27486e1eff8SEmilio G. Cota             abort();
27586e1eff8SEmilio G. Cota         }
27686e1eff8SEmilio G. Cota         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
27771ccd47bSRichard Henderson         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
27886e1eff8SEmilio G. Cota 
27971ccd47bSRichard Henderson         g_free(fast->table);
28025d3ec58SRichard Henderson         g_free(desc->fulltlb);
28171ccd47bSRichard Henderson         fast->table = g_try_new(CPUTLBEntry, new_size);
28225d3ec58SRichard Henderson         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
28386e1eff8SEmilio G. Cota     }
28486e1eff8SEmilio G. Cota }
28586e1eff8SEmilio G. Cota 
286bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
28786e1eff8SEmilio G. Cota {
2885c948e31SRichard Henderson     desc->n_used_entries = 0;
2895c948e31SRichard Henderson     desc->large_page_addr = -1;
2905c948e31SRichard Henderson     desc->large_page_mask = -1;
2915c948e31SRichard Henderson     desc->vindex = 0;
2925c948e31SRichard Henderson     memset(fast->table, -1, sizeof_tlb(fast));
2935c948e31SRichard Henderson     memset(desc->vtable, -1, sizeof(desc->vtable));
29486e1eff8SEmilio G. Cota }
29586e1eff8SEmilio G. Cota 
29610b32e2cSAnton Johansson static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
2973c3959f2SRichard Henderson                                         int64_t now)
298bbf021b0SRichard Henderson {
29910b32e2cSAnton Johansson     CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
30010b32e2cSAnton Johansson     CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
301bbf021b0SRichard Henderson 
3023c3959f2SRichard Henderson     tlb_mmu_resize_locked(desc, fast, now);
303bbf021b0SRichard Henderson     tlb_mmu_flush_locked(desc, fast);
304bbf021b0SRichard Henderson }
305bbf021b0SRichard Henderson 
30656e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
30756e89f76SRichard Henderson {
30856e89f76SRichard Henderson     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
30956e89f76SRichard Henderson 
31056e89f76SRichard Henderson     tlb_window_reset(desc, now, 0);
31156e89f76SRichard Henderson     desc->n_used_entries = 0;
31256e89f76SRichard Henderson     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
31356e89f76SRichard Henderson     fast->table = g_new(CPUTLBEntry, n_entries);
31425d3ec58SRichard Henderson     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
3153c16304aSRichard Henderson     tlb_mmu_flush_locked(desc, fast);
31656e89f76SRichard Henderson }
31756e89f76SRichard Henderson 
31810b32e2cSAnton Johansson static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx)
31986e1eff8SEmilio G. Cota {
32010b32e2cSAnton Johansson     cpu->neg.tlb.d[mmu_idx].n_used_entries++;
32186e1eff8SEmilio G. Cota }
32286e1eff8SEmilio G. Cota 
32310b32e2cSAnton Johansson static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx)
32486e1eff8SEmilio G. Cota {
32510b32e2cSAnton Johansson     cpu->neg.tlb.d[mmu_idx].n_used_entries--;
32686e1eff8SEmilio G. Cota }
32786e1eff8SEmilio G. Cota 
3285005e253SEmilio G. Cota void tlb_init(CPUState *cpu)
3295005e253SEmilio G. Cota {
33056e89f76SRichard Henderson     int64_t now = get_clock_realtime();
33156e89f76SRichard Henderson     int i;
33271aec354SEmilio G. Cota 
33310b32e2cSAnton Johansson     qemu_spin_init(&cpu->neg.tlb.c.lock);
3343d1523ceSRichard Henderson 
3353c16304aSRichard Henderson     /* All tlbs are initialized flushed. */
33610b32e2cSAnton Johansson     cpu->neg.tlb.c.dirty = 0;
33786e1eff8SEmilio G. Cota 
33856e89f76SRichard Henderson     for (i = 0; i < NB_MMU_MODES; i++) {
33910b32e2cSAnton Johansson         tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
34056e89f76SRichard Henderson     }
3415005e253SEmilio G. Cota }
3425005e253SEmilio G. Cota 
343816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu)
344816d9be5SEmilio G. Cota {
345816d9be5SEmilio G. Cota     int i;
346816d9be5SEmilio G. Cota 
34710b32e2cSAnton Johansson     qemu_spin_destroy(&cpu->neg.tlb.c.lock);
348816d9be5SEmilio G. Cota     for (i = 0; i < NB_MMU_MODES; i++) {
34910b32e2cSAnton Johansson         CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
35010b32e2cSAnton Johansson         CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
351816d9be5SEmilio G. Cota 
352816d9be5SEmilio G. Cota         g_free(fast->table);
35325d3ec58SRichard Henderson         g_free(desc->fulltlb);
354816d9be5SEmilio G. Cota     }
355816d9be5SEmilio G. Cota }
356816d9be5SEmilio G. Cota 
357d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
358d9bb58e5SYang Zhong  *
359d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
360d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
361d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
362d9bb58e5SYang Zhong  * again.
363d9bb58e5SYang Zhong  */
364d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
365d9bb58e5SYang Zhong                              run_on_cpu_data d)
366d9bb58e5SYang Zhong {
367d9bb58e5SYang Zhong     CPUState *cpu;
368d9bb58e5SYang Zhong 
369d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
370d9bb58e5SYang Zhong         if (cpu != src) {
371d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
372d9bb58e5SYang Zhong         }
373d9bb58e5SYang Zhong     }
374d9bb58e5SYang Zhong }
375d9bb58e5SYang Zhong 
376d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
377d9bb58e5SYang Zhong {
3783d1523ceSRichard Henderson     uint16_t asked = data.host_int;
3793d1523ceSRichard Henderson     uint16_t all_dirty, work, to_clean;
3803c3959f2SRichard Henderson     int64_t now = get_clock_realtime();
381d9bb58e5SYang Zhong 
382d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
383d9bb58e5SYang Zhong 
3843d1523ceSRichard Henderson     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
385d9bb58e5SYang Zhong 
38610b32e2cSAnton Johansson     qemu_spin_lock(&cpu->neg.tlb.c.lock);
38760a2ad7dSRichard Henderson 
38810b32e2cSAnton Johansson     all_dirty = cpu->neg.tlb.c.dirty;
3893d1523ceSRichard Henderson     to_clean = asked & all_dirty;
3903d1523ceSRichard Henderson     all_dirty &= ~to_clean;
39110b32e2cSAnton Johansson     cpu->neg.tlb.c.dirty = all_dirty;
3923d1523ceSRichard Henderson 
3933d1523ceSRichard Henderson     for (work = to_clean; work != 0; work &= work - 1) {
3943d1523ceSRichard Henderson         int mmu_idx = ctz32(work);
39510b32e2cSAnton Johansson         tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now);
396d9bb58e5SYang Zhong     }
3973d1523ceSRichard Henderson 
39810b32e2cSAnton Johansson     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
399d9bb58e5SYang Zhong 
400a976a99aSRichard Henderson     tcg_flush_jmp_cache(cpu);
40164f2674bSRichard Henderson 
4023d1523ceSRichard Henderson     if (to_clean == ALL_MMUIDX_BITS) {
40310b32e2cSAnton Johansson         qatomic_set(&cpu->neg.tlb.c.full_flush_count,
40410b32e2cSAnton Johansson                     cpu->neg.tlb.c.full_flush_count + 1);
405e09de0a2SRichard Henderson     } else {
40610b32e2cSAnton Johansson         qatomic_set(&cpu->neg.tlb.c.part_flush_count,
40710b32e2cSAnton Johansson                     cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean));
4083d1523ceSRichard Henderson         if (to_clean != asked) {
40910b32e2cSAnton Johansson             qatomic_set(&cpu->neg.tlb.c.elide_flush_count,
41010b32e2cSAnton Johansson                         cpu->neg.tlb.c.elide_flush_count +
4113d1523ceSRichard Henderson                         ctpop16(asked & ~to_clean));
4123d1523ceSRichard Henderson         }
41364f2674bSRichard Henderson     }
414d9bb58e5SYang Zhong }
415d9bb58e5SYang Zhong 
416d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
417d9bb58e5SYang Zhong {
418d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
419d9bb58e5SYang Zhong 
42064f2674bSRichard Henderson     if (cpu->created && !qemu_cpu_is_self(cpu)) {
421d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
422ab651105SRichard Henderson                          RUN_ON_CPU_HOST_INT(idxmap));
423d9bb58e5SYang Zhong     } else {
42460a2ad7dSRichard Henderson         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
425d9bb58e5SYang Zhong     }
426d9bb58e5SYang Zhong }
427d9bb58e5SYang Zhong 
42864f2674bSRichard Henderson void tlb_flush(CPUState *cpu)
42964f2674bSRichard Henderson {
43064f2674bSRichard Henderson     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
43164f2674bSRichard Henderson }
43264f2674bSRichard Henderson 
433d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
434d9bb58e5SYang Zhong {
435d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
436d9bb58e5SYang Zhong 
437d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
438d9bb58e5SYang Zhong 
439d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
440d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
441d9bb58e5SYang Zhong }
442d9bb58e5SYang Zhong 
44364f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu)
44464f2674bSRichard Henderson {
44564f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
44664f2674bSRichard Henderson }
44764f2674bSRichard Henderson 
44864f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
449d9bb58e5SYang Zhong {
450d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
451d9bb58e5SYang Zhong 
452d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
453d9bb58e5SYang Zhong 
454d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
455d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
456d9bb58e5SYang Zhong }
457d9bb58e5SYang Zhong 
45864f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu)
45964f2674bSRichard Henderson {
46064f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
46164f2674bSRichard Henderson }
46264f2674bSRichard Henderson 
4633ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
464732d5487SAnton Johansson                                       vaddr page, vaddr mask)
4653ab6e68cSRichard Henderson {
4663ab6e68cSRichard Henderson     page &= mask;
4673ab6e68cSRichard Henderson     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
4683ab6e68cSRichard Henderson 
4693ab6e68cSRichard Henderson     return (page == (tlb_entry->addr_read & mask) ||
4703ab6e68cSRichard Henderson             page == (tlb_addr_write(tlb_entry) & mask) ||
4713ab6e68cSRichard Henderson             page == (tlb_entry->addr_code & mask));
4723ab6e68cSRichard Henderson }
4733ab6e68cSRichard Henderson 
474732d5487SAnton Johansson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
475d9bb58e5SYang Zhong {
4763ab6e68cSRichard Henderson     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
47768fea038SRichard Henderson }
47868fea038SRichard Henderson 
4793cea94bbSEmilio G. Cota /**
4803cea94bbSEmilio G. Cota  * tlb_entry_is_empty - return true if the entry is not in use
4813cea94bbSEmilio G. Cota  * @te: pointer to CPUTLBEntry
4823cea94bbSEmilio G. Cota  */
4833cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
4843cea94bbSEmilio G. Cota {
4853cea94bbSEmilio G. Cota     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
4863cea94bbSEmilio G. Cota }
4873cea94bbSEmilio G. Cota 
48853d28455SRichard Henderson /* Called with tlb_c.lock held */
4893ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
490732d5487SAnton Johansson                                         vaddr page,
491732d5487SAnton Johansson                                         vaddr mask)
49268fea038SRichard Henderson {
4933ab6e68cSRichard Henderson     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
494d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
49586e1eff8SEmilio G. Cota         return true;
496d9bb58e5SYang Zhong     }
49786e1eff8SEmilio G. Cota     return false;
498d9bb58e5SYang Zhong }
499d9bb58e5SYang Zhong 
500732d5487SAnton Johansson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
50168fea038SRichard Henderson {
5023ab6e68cSRichard Henderson     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
5033ab6e68cSRichard Henderson }
5043ab6e68cSRichard Henderson 
5053ab6e68cSRichard Henderson /* Called with tlb_c.lock held */
50610b32e2cSAnton Johansson static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx,
507732d5487SAnton Johansson                                             vaddr page,
508732d5487SAnton Johansson                                             vaddr mask)
5093ab6e68cSRichard Henderson {
51010b32e2cSAnton Johansson     CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx];
51168fea038SRichard Henderson     int k;
51271aec354SEmilio G. Cota 
51310b32e2cSAnton Johansson     assert_cpu_is_self(cpu);
51468fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
5153ab6e68cSRichard Henderson         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
51610b32e2cSAnton Johansson             tlb_n_used_entries_dec(cpu, mmu_idx);
51786e1eff8SEmilio G. Cota         }
51868fea038SRichard Henderson     }
51968fea038SRichard Henderson }
52068fea038SRichard Henderson 
52110b32e2cSAnton Johansson static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
522732d5487SAnton Johansson                                               vaddr page)
5233ab6e68cSRichard Henderson {
52410b32e2cSAnton Johansson     tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
5253ab6e68cSRichard Henderson }
5263ab6e68cSRichard Henderson 
52710b32e2cSAnton Johansson static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
5281308e026SRichard Henderson {
52910b32e2cSAnton Johansson     vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr;
53010b32e2cSAnton Johansson     vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask;
5311308e026SRichard Henderson 
5321308e026SRichard Henderson     /* Check if we need to flush due to large pages.  */
5331308e026SRichard Henderson     if ((page & lp_mask) == lp_addr) {
5348c605cf1SAnton Johansson         tlb_debug("forcing full flush midx %d (%016"
5358c605cf1SAnton Johansson                   VADDR_PRIx "/%016" VADDR_PRIx ")\n",
5361308e026SRichard Henderson                   midx, lp_addr, lp_mask);
53710b32e2cSAnton Johansson         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
5381308e026SRichard Henderson     } else {
53910b32e2cSAnton Johansson         if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) {
54010b32e2cSAnton Johansson             tlb_n_used_entries_dec(cpu, midx);
54186e1eff8SEmilio G. Cota         }
54210b32e2cSAnton Johansson         tlb_flush_vtlb_page_locked(cpu, midx, page);
5431308e026SRichard Henderson     }
5441308e026SRichard Henderson }
5451308e026SRichard Henderson 
5467b7d00e0SRichard Henderson /**
5477b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_0:
5487b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5497b7d00e0SRichard Henderson  * @addr: page of virtual address to flush
5507b7d00e0SRichard Henderson  * @idxmap: set of mmu_idx to flush
5517b7d00e0SRichard Henderson  *
5527b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
5537b7d00e0SRichard Henderson  * at @addr from the tlbs indicated by @idxmap from @cpu.
554d9bb58e5SYang Zhong  */
5557b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
556732d5487SAnton Johansson                                              vaddr addr,
5577b7d00e0SRichard Henderson                                              uint16_t idxmap)
558d9bb58e5SYang Zhong {
559d9bb58e5SYang Zhong     int mmu_idx;
560d9bb58e5SYang Zhong 
561d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
562d9bb58e5SYang Zhong 
5638c605cf1SAnton Johansson     tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
564d9bb58e5SYang Zhong 
56510b32e2cSAnton Johansson     qemu_spin_lock(&cpu->neg.tlb.c.lock);
566d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
5677b7d00e0SRichard Henderson         if ((idxmap >> mmu_idx) & 1) {
56810b32e2cSAnton Johansson             tlb_flush_page_locked(cpu, mmu_idx, addr);
569d9bb58e5SYang Zhong         }
570d9bb58e5SYang Zhong     }
57110b32e2cSAnton Johansson     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
572d9bb58e5SYang Zhong 
5731d41a79bSRichard Henderson     /*
5741d41a79bSRichard Henderson      * Discard jump cache entries for any tb which might potentially
5751d41a79bSRichard Henderson      * overlap the flushed page, which includes the previous.
5761d41a79bSRichard Henderson      */
5771d41a79bSRichard Henderson     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
5781d41a79bSRichard Henderson     tb_jmp_cache_clear_page(cpu, addr);
579d9bb58e5SYang Zhong }
580d9bb58e5SYang Zhong 
5817b7d00e0SRichard Henderson /**
5827b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_1:
5837b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
5847b7d00e0SRichard Henderson  * @data: encoded addr + idxmap
5857b7d00e0SRichard Henderson  *
5867b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
5877b7d00e0SRichard Henderson  * async_run_on_cpu.  The idxmap parameter is encoded in the page
5887b7d00e0SRichard Henderson  * offset of the target_ptr field.  This limits the set of mmu_idx
5897b7d00e0SRichard Henderson  * that can be passed via this method.
5907b7d00e0SRichard Henderson  */
5917b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
5927b7d00e0SRichard Henderson                                              run_on_cpu_data data)
5937b7d00e0SRichard Henderson {
594732d5487SAnton Johansson     vaddr addr_and_idxmap = data.target_ptr;
595732d5487SAnton Johansson     vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
5967b7d00e0SRichard Henderson     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
5977b7d00e0SRichard Henderson 
5987b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
5997b7d00e0SRichard Henderson }
6007b7d00e0SRichard Henderson 
6017b7d00e0SRichard Henderson typedef struct {
602732d5487SAnton Johansson     vaddr addr;
6037b7d00e0SRichard Henderson     uint16_t idxmap;
6047b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData;
6057b7d00e0SRichard Henderson 
6067b7d00e0SRichard Henderson /**
6077b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_2:
6087b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
6097b7d00e0SRichard Henderson  * @data: allocated addr + idxmap
6107b7d00e0SRichard Henderson  *
6117b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
6127b7d00e0SRichard Henderson  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
6137b7d00e0SRichard Henderson  * TLBFlushPageByMMUIdxData structure that has been allocated
6147b7d00e0SRichard Henderson  * specifically for this helper.  Free the structure when done.
6157b7d00e0SRichard Henderson  */
6167b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
6177b7d00e0SRichard Henderson                                              run_on_cpu_data data)
6187b7d00e0SRichard Henderson {
6197b7d00e0SRichard Henderson     TLBFlushPageByMMUIdxData *d = data.host_ptr;
6207b7d00e0SRichard Henderson 
6217b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
6227b7d00e0SRichard Henderson     g_free(d);
6237b7d00e0SRichard Henderson }
6247b7d00e0SRichard Henderson 
625732d5487SAnton Johansson void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
626d9bb58e5SYang Zhong {
6278c605cf1SAnton Johansson     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
628d9bb58e5SYang Zhong 
629d9bb58e5SYang Zhong     /* This should already be page aligned */
6307b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
631d9bb58e5SYang Zhong 
6327b7d00e0SRichard Henderson     if (qemu_cpu_is_self(cpu)) {
6337b7d00e0SRichard Henderson         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
6347b7d00e0SRichard Henderson     } else if (idxmap < TARGET_PAGE_SIZE) {
6357b7d00e0SRichard Henderson         /*
6367b7d00e0SRichard Henderson          * Most targets have only a few mmu_idx.  In the case where
6377b7d00e0SRichard Henderson          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
6387b7d00e0SRichard Henderson          * allocating memory for this operation.
6397b7d00e0SRichard Henderson          */
6407b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
6417b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
642d9bb58e5SYang Zhong     } else {
6437b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
6447b7d00e0SRichard Henderson 
6457b7d00e0SRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
6467b7d00e0SRichard Henderson         d->addr = addr;
6477b7d00e0SRichard Henderson         d->idxmap = idxmap;
6487b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
6497b7d00e0SRichard Henderson                          RUN_ON_CPU_HOST_PTR(d));
650d9bb58e5SYang Zhong     }
651d9bb58e5SYang Zhong }
652d9bb58e5SYang Zhong 
653732d5487SAnton Johansson void tlb_flush_page(CPUState *cpu, vaddr addr)
654f8144c6cSRichard Henderson {
655f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
656f8144c6cSRichard Henderson }
657f8144c6cSRichard Henderson 
658732d5487SAnton Johansson void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
659d9bb58e5SYang Zhong                                        uint16_t idxmap)
660d9bb58e5SYang Zhong {
6618c605cf1SAnton Johansson     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
662d9bb58e5SYang Zhong 
663d9bb58e5SYang Zhong     /* This should already be page aligned */
6647b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
665d9bb58e5SYang Zhong 
6667b7d00e0SRichard Henderson     /*
6677b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
6687b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
6697b7d00e0SRichard Henderson      */
6707b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
6717b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
6727b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
6737b7d00e0SRichard Henderson     } else {
6747b7d00e0SRichard Henderson         CPUState *dst_cpu;
6757b7d00e0SRichard Henderson 
6767b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
6777b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
6787b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
6797b7d00e0SRichard Henderson                 TLBFlushPageByMMUIdxData *d
6807b7d00e0SRichard Henderson                     = g_new(TLBFlushPageByMMUIdxData, 1);
6817b7d00e0SRichard Henderson 
6827b7d00e0SRichard Henderson                 d->addr = addr;
6837b7d00e0SRichard Henderson                 d->idxmap = idxmap;
6847b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
6857b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
6867b7d00e0SRichard Henderson             }
6877b7d00e0SRichard Henderson         }
6887b7d00e0SRichard Henderson     }
6897b7d00e0SRichard Henderson 
6907b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
691d9bb58e5SYang Zhong }
692d9bb58e5SYang Zhong 
693732d5487SAnton Johansson void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
694f8144c6cSRichard Henderson {
695f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
696f8144c6cSRichard Henderson }
697f8144c6cSRichard Henderson 
698d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
699732d5487SAnton Johansson                                               vaddr addr,
700d9bb58e5SYang Zhong                                               uint16_t idxmap)
701d9bb58e5SYang Zhong {
7028c605cf1SAnton Johansson     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
703d9bb58e5SYang Zhong 
704d9bb58e5SYang Zhong     /* This should already be page aligned */
7057b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
706d9bb58e5SYang Zhong 
7077b7d00e0SRichard Henderson     /*
7087b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
7097b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
7107b7d00e0SRichard Henderson      */
7117b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
7127b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
7137b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
7147b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
7157b7d00e0SRichard Henderson                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
7167b7d00e0SRichard Henderson     } else {
7177b7d00e0SRichard Henderson         CPUState *dst_cpu;
7187b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d;
7197b7d00e0SRichard Henderson 
7207b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
7217b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
7227b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
7237b7d00e0SRichard Henderson                 d = g_new(TLBFlushPageByMMUIdxData, 1);
7247b7d00e0SRichard Henderson                 d->addr = addr;
7257b7d00e0SRichard Henderson                 d->idxmap = idxmap;
7267b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
7277b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
7287b7d00e0SRichard Henderson             }
7297b7d00e0SRichard Henderson         }
7307b7d00e0SRichard Henderson 
7317b7d00e0SRichard Henderson         d = g_new(TLBFlushPageByMMUIdxData, 1);
7327b7d00e0SRichard Henderson         d->addr = addr;
7337b7d00e0SRichard Henderson         d->idxmap = idxmap;
7347b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
7357b7d00e0SRichard Henderson                               RUN_ON_CPU_HOST_PTR(d));
7367b7d00e0SRichard Henderson     }
737d9bb58e5SYang Zhong }
738d9bb58e5SYang Zhong 
739732d5487SAnton Johansson void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
740d9bb58e5SYang Zhong {
741f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
742d9bb58e5SYang Zhong }
743d9bb58e5SYang Zhong 
74410b32e2cSAnton Johansson static void tlb_flush_range_locked(CPUState *cpu, int midx,
745732d5487SAnton Johansson                                    vaddr addr, vaddr len,
7463c4ddec1SRichard Henderson                                    unsigned bits)
7473ab6e68cSRichard Henderson {
74810b32e2cSAnton Johansson     CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
74910b32e2cSAnton Johansson     CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
750732d5487SAnton Johansson     vaddr mask = MAKE_64BIT_MASK(0, bits);
7513ab6e68cSRichard Henderson 
7523ab6e68cSRichard Henderson     /*
7533ab6e68cSRichard Henderson      * If @bits is smaller than the tlb size, there may be multiple entries
7543ab6e68cSRichard Henderson      * within the TLB; otherwise all addresses that match under @mask hit
7553ab6e68cSRichard Henderson      * the same TLB entry.
7563ab6e68cSRichard Henderson      * TODO: Perhaps allow bits to be a few bits less than the size.
7573ab6e68cSRichard Henderson      * For now, just flush the entire TLB.
7583c4ddec1SRichard Henderson      *
7593c4ddec1SRichard Henderson      * If @len is larger than the tlb size, then it will take longer to
7603c4ddec1SRichard Henderson      * test all of the entries in the TLB than it will to flush it all.
7613ab6e68cSRichard Henderson      */
7623c4ddec1SRichard Henderson     if (mask < f->mask || len > f->mask) {
7633ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
7648c605cf1SAnton Johansson                   "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
7653c4ddec1SRichard Henderson                   midx, addr, mask, len);
76610b32e2cSAnton Johansson         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
7673ab6e68cSRichard Henderson         return;
7683ab6e68cSRichard Henderson     }
7693ab6e68cSRichard Henderson 
7703c4ddec1SRichard Henderson     /*
7713c4ddec1SRichard Henderson      * Check if we need to flush due to large pages.
7723c4ddec1SRichard Henderson      * Because large_page_mask contains all 1's from the msb,
7733c4ddec1SRichard Henderson      * we only need to test the end of the range.
7743c4ddec1SRichard Henderson      */
7753c4ddec1SRichard Henderson     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
7763ab6e68cSRichard Henderson         tlb_debug("forcing full flush midx %d ("
7778c605cf1SAnton Johansson                   "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
7783ab6e68cSRichard Henderson                   midx, d->large_page_addr, d->large_page_mask);
77910b32e2cSAnton Johansson         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
7803ab6e68cSRichard Henderson         return;
7813ab6e68cSRichard Henderson     }
7823ab6e68cSRichard Henderson 
783732d5487SAnton Johansson     for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
784732d5487SAnton Johansson         vaddr page = addr + i;
78510b32e2cSAnton Johansson         CPUTLBEntry *entry = tlb_entry(cpu, midx, page);
7863c4ddec1SRichard Henderson 
7873c4ddec1SRichard Henderson         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
78810b32e2cSAnton Johansson             tlb_n_used_entries_dec(cpu, midx);
7893ab6e68cSRichard Henderson         }
79010b32e2cSAnton Johansson         tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
7913ab6e68cSRichard Henderson     }
7923c4ddec1SRichard Henderson }
7933ab6e68cSRichard Henderson 
7943ab6e68cSRichard Henderson typedef struct {
795732d5487SAnton Johansson     vaddr addr;
796732d5487SAnton Johansson     vaddr len;
7973ab6e68cSRichard Henderson     uint16_t idxmap;
7983ab6e68cSRichard Henderson     uint16_t bits;
7993960a59fSRichard Henderson } TLBFlushRangeData;
8003ab6e68cSRichard Henderson 
8016be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
8023960a59fSRichard Henderson                                               TLBFlushRangeData d)
8033ab6e68cSRichard Henderson {
8043ab6e68cSRichard Henderson     int mmu_idx;
8053ab6e68cSRichard Henderson 
8063ab6e68cSRichard Henderson     assert_cpu_is_self(cpu);
8073ab6e68cSRichard Henderson 
8088c605cf1SAnton Johansson     tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
8093c4ddec1SRichard Henderson               d.addr, d.bits, d.len, d.idxmap);
8103ab6e68cSRichard Henderson 
81110b32e2cSAnton Johansson     qemu_spin_lock(&cpu->neg.tlb.c.lock);
8123ab6e68cSRichard Henderson     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
8133ab6e68cSRichard Henderson         if ((d.idxmap >> mmu_idx) & 1) {
81410b32e2cSAnton Johansson             tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits);
8153ab6e68cSRichard Henderson         }
8163ab6e68cSRichard Henderson     }
81710b32e2cSAnton Johansson     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
8183ab6e68cSRichard Henderson 
819cfc2a2d6SIdan Horowitz     /*
820cfc2a2d6SIdan Horowitz      * If the length is larger than the jump cache size, then it will take
821cfc2a2d6SIdan Horowitz      * longer to clear each entry individually than it will to clear it all.
822cfc2a2d6SIdan Horowitz      */
823cfc2a2d6SIdan Horowitz     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
824a976a99aSRichard Henderson         tcg_flush_jmp_cache(cpu);
825cfc2a2d6SIdan Horowitz         return;
826cfc2a2d6SIdan Horowitz     }
827cfc2a2d6SIdan Horowitz 
8281d41a79bSRichard Henderson     /*
8291d41a79bSRichard Henderson      * Discard jump cache entries for any tb which might potentially
8301d41a79bSRichard Henderson      * overlap the flushed pages, which includes the previous.
8311d41a79bSRichard Henderson      */
8321d41a79bSRichard Henderson     d.addr -= TARGET_PAGE_SIZE;
833732d5487SAnton Johansson     for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
8341d41a79bSRichard Henderson         tb_jmp_cache_clear_page(cpu, d.addr);
8351d41a79bSRichard Henderson         d.addr += TARGET_PAGE_SIZE;
8363c4ddec1SRichard Henderson     }
8373ab6e68cSRichard Henderson }
8383ab6e68cSRichard Henderson 
839206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
8403ab6e68cSRichard Henderson                                               run_on_cpu_data data)
8413ab6e68cSRichard Henderson {
8423960a59fSRichard Henderson     TLBFlushRangeData *d = data.host_ptr;
8436be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
8443ab6e68cSRichard Henderson     g_free(d);
8453ab6e68cSRichard Henderson }
8463ab6e68cSRichard Henderson 
847732d5487SAnton Johansson void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
848732d5487SAnton Johansson                                vaddr len, uint16_t idxmap,
849e5b1921bSRichard Henderson                                unsigned bits)
8503ab6e68cSRichard Henderson {
8513960a59fSRichard Henderson     TLBFlushRangeData d;
8523ab6e68cSRichard Henderson 
853e5b1921bSRichard Henderson     /*
854e5b1921bSRichard Henderson      * If all bits are significant, and len is small,
855e5b1921bSRichard Henderson      * this devolves to tlb_flush_page.
856e5b1921bSRichard Henderson      */
857e5b1921bSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
8583ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
8593ab6e68cSRichard Henderson         return;
8603ab6e68cSRichard Henderson     }
8613ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
8623ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
8633ab6e68cSRichard Henderson         tlb_flush_by_mmuidx(cpu, idxmap);
8643ab6e68cSRichard Henderson         return;
8653ab6e68cSRichard Henderson     }
8663ab6e68cSRichard Henderson 
8673ab6e68cSRichard Henderson     /* This should already be page aligned */
8683ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
869e5b1921bSRichard Henderson     d.len = len;
8703ab6e68cSRichard Henderson     d.idxmap = idxmap;
8713ab6e68cSRichard Henderson     d.bits = bits;
8723ab6e68cSRichard Henderson 
8733ab6e68cSRichard Henderson     if (qemu_cpu_is_self(cpu)) {
8746be48e45SRichard Henderson         tlb_flush_range_by_mmuidx_async_0(cpu, d);
8753ab6e68cSRichard Henderson     } else {
8763ab6e68cSRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
8773960a59fSRichard Henderson         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
878206a583dSRichard Henderson         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
8793ab6e68cSRichard Henderson                          RUN_ON_CPU_HOST_PTR(p));
8803ab6e68cSRichard Henderson     }
8813ab6e68cSRichard Henderson }
8823ab6e68cSRichard Henderson 
883732d5487SAnton Johansson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
884e5b1921bSRichard Henderson                                    uint16_t idxmap, unsigned bits)
885e5b1921bSRichard Henderson {
886e5b1921bSRichard Henderson     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
887e5b1921bSRichard Henderson }
888e5b1921bSRichard Henderson 
889600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
890732d5487SAnton Johansson                                         vaddr addr, vaddr len,
891600b819fSRichard Henderson                                         uint16_t idxmap, unsigned bits)
8923ab6e68cSRichard Henderson {
8933960a59fSRichard Henderson     TLBFlushRangeData d;
894d34e4d1aSRichard Henderson     CPUState *dst_cpu;
8953ab6e68cSRichard Henderson 
896600b819fSRichard Henderson     /*
897600b819fSRichard Henderson      * If all bits are significant, and len is small,
898600b819fSRichard Henderson      * this devolves to tlb_flush_page.
899600b819fSRichard Henderson      */
900600b819fSRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
9013ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
9023ab6e68cSRichard Henderson         return;
9033ab6e68cSRichard Henderson     }
9043ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
9053ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
9063ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
9073ab6e68cSRichard Henderson         return;
9083ab6e68cSRichard Henderson     }
9093ab6e68cSRichard Henderson 
9103ab6e68cSRichard Henderson     /* This should already be page aligned */
9113ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
912600b819fSRichard Henderson     d.len = len;
9133ab6e68cSRichard Henderson     d.idxmap = idxmap;
9143ab6e68cSRichard Henderson     d.bits = bits;
9153ab6e68cSRichard Henderson 
9163ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
9173ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
9183ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
9193960a59fSRichard Henderson             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
9203ab6e68cSRichard Henderson             async_run_on_cpu(dst_cpu,
921206a583dSRichard Henderson                              tlb_flush_range_by_mmuidx_async_1,
9223ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
9233ab6e68cSRichard Henderson         }
9243ab6e68cSRichard Henderson     }
9253ab6e68cSRichard Henderson 
9266be48e45SRichard Henderson     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
9273ab6e68cSRichard Henderson }
9283ab6e68cSRichard Henderson 
929600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
930732d5487SAnton Johansson                                             vaddr addr, uint16_t idxmap,
931732d5487SAnton Johansson                                             unsigned bits)
932600b819fSRichard Henderson {
933600b819fSRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
934600b819fSRichard Henderson                                        idxmap, bits);
935600b819fSRichard Henderson }
936600b819fSRichard Henderson 
937c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
938732d5487SAnton Johansson                                                vaddr addr,
939732d5487SAnton Johansson                                                vaddr len,
9403ab6e68cSRichard Henderson                                                uint16_t idxmap,
9413ab6e68cSRichard Henderson                                                unsigned bits)
9423ab6e68cSRichard Henderson {
943d34e4d1aSRichard Henderson     TLBFlushRangeData d, *p;
944d34e4d1aSRichard Henderson     CPUState *dst_cpu;
9453ab6e68cSRichard Henderson 
946c13b27d8SRichard Henderson     /*
947c13b27d8SRichard Henderson      * If all bits are significant, and len is small,
948c13b27d8SRichard Henderson      * this devolves to tlb_flush_page.
949c13b27d8SRichard Henderson      */
950c13b27d8SRichard Henderson     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
9513ab6e68cSRichard Henderson         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
9523ab6e68cSRichard Henderson         return;
9533ab6e68cSRichard Henderson     }
9543ab6e68cSRichard Henderson     /* If no page bits are significant, this devolves to tlb_flush. */
9553ab6e68cSRichard Henderson     if (bits < TARGET_PAGE_BITS) {
9563ab6e68cSRichard Henderson         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
9573ab6e68cSRichard Henderson         return;
9583ab6e68cSRichard Henderson     }
9593ab6e68cSRichard Henderson 
9603ab6e68cSRichard Henderson     /* This should already be page aligned */
9613ab6e68cSRichard Henderson     d.addr = addr & TARGET_PAGE_MASK;
962c13b27d8SRichard Henderson     d.len = len;
9633ab6e68cSRichard Henderson     d.idxmap = idxmap;
9643ab6e68cSRichard Henderson     d.bits = bits;
9653ab6e68cSRichard Henderson 
9663ab6e68cSRichard Henderson     /* Allocate a separate data block for each destination cpu.  */
9673ab6e68cSRichard Henderson     CPU_FOREACH(dst_cpu) {
9683ab6e68cSRichard Henderson         if (dst_cpu != src_cpu) {
9696d244788SRichard Henderson             p = g_memdup(&d, sizeof(d));
970206a583dSRichard Henderson             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
9713ab6e68cSRichard Henderson                              RUN_ON_CPU_HOST_PTR(p));
9723ab6e68cSRichard Henderson         }
9733ab6e68cSRichard Henderson     }
9743ab6e68cSRichard Henderson 
9756d244788SRichard Henderson     p = g_memdup(&d, sizeof(d));
976206a583dSRichard Henderson     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
9773ab6e68cSRichard Henderson                           RUN_ON_CPU_HOST_PTR(p));
9783ab6e68cSRichard Henderson }
9793ab6e68cSRichard Henderson 
980c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
981732d5487SAnton Johansson                                                    vaddr addr,
982c13b27d8SRichard Henderson                                                    uint16_t idxmap,
983c13b27d8SRichard Henderson                                                    unsigned bits)
984c13b27d8SRichard Henderson {
985c13b27d8SRichard Henderson     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
986c13b27d8SRichard Henderson                                               idxmap, bits);
987c13b27d8SRichard Henderson }
988c13b27d8SRichard Henderson 
989d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
990d9bb58e5SYang Zhong    can be detected */
991d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
992d9bb58e5SYang Zhong {
99393b99616SRichard Henderson     cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
99493b99616SRichard Henderson                                              TARGET_PAGE_SIZE,
995d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
996d9bb58e5SYang Zhong }
997d9bb58e5SYang Zhong 
998d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
999d9bb58e5SYang Zhong    tested for self modifying code */
1000d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
1001d9bb58e5SYang Zhong {
1002d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
1003d9bb58e5SYang Zhong }
1004d9bb58e5SYang Zhong 
1005d9bb58e5SYang Zhong 
1006d9bb58e5SYang Zhong /*
1007d9bb58e5SYang Zhong  * Dirty write flag handling
1008d9bb58e5SYang Zhong  *
1009d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
1010d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
1011d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
1012d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
1013d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
1014d9bb58e5SYang Zhong  * generated code.
1015d9bb58e5SYang Zhong  *
101671aec354SEmilio G. Cota  * Other vCPUs might be reading their TLBs during guest execution, so we update
1017d73415a3SStefan Hajnoczi  * te->addr_write with qatomic_set. We don't need to worry about this for
101871aec354SEmilio G. Cota  * oversized guests as MTTCG is disabled for them.
1019d9bb58e5SYang Zhong  *
102053d28455SRichard Henderson  * Called with tlb_c.lock held.
1021d9bb58e5SYang Zhong  */
102271aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
102371aec354SEmilio G. Cota                                          uintptr_t start, uintptr_t length)
1024d9bb58e5SYang Zhong {
1025d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
1026d9bb58e5SYang Zhong 
10277b0d792cSRichard Henderson     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
10287b0d792cSRichard Henderson                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
1029d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
1030d9bb58e5SYang Zhong         addr += tlb_entry->addend;
1031d9bb58e5SYang Zhong         if ((addr - start) < length) {
1032238f4380SRichard Henderson #if TARGET_LONG_BITS == 32
1033238f4380SRichard Henderson             uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
1034238f4380SRichard Henderson             ptr_write += HOST_BIG_ENDIAN;
1035238f4380SRichard Henderson             qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
1036238f4380SRichard Henderson #elif TCG_OVERSIZED_GUEST
103771aec354SEmilio G. Cota             tlb_entry->addr_write |= TLB_NOTDIRTY;
1038d9bb58e5SYang Zhong #else
1039d73415a3SStefan Hajnoczi             qatomic_set(&tlb_entry->addr_write,
104071aec354SEmilio G. Cota                         tlb_entry->addr_write | TLB_NOTDIRTY);
1041d9bb58e5SYang Zhong #endif
1042d9bb58e5SYang Zhong         }
104371aec354SEmilio G. Cota     }
104471aec354SEmilio G. Cota }
104571aec354SEmilio G. Cota 
104671aec354SEmilio G. Cota /*
104753d28455SRichard Henderson  * Called with tlb_c.lock held.
104871aec354SEmilio G. Cota  * Called only from the vCPU context, i.e. the TLB's owner thread.
104971aec354SEmilio G. Cota  */
105071aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
105171aec354SEmilio G. Cota {
105271aec354SEmilio G. Cota     *d = *s;
105371aec354SEmilio G. Cota }
1054d9bb58e5SYang Zhong 
1055d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
105671aec354SEmilio G. Cota  * the target vCPU).
105753d28455SRichard Henderson  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
105871aec354SEmilio G. Cota  * thing actually updated is the target TLB entry ->addr_write flags.
1059d9bb58e5SYang Zhong  */
1060d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1061d9bb58e5SYang Zhong {
1062d9bb58e5SYang Zhong     int mmu_idx;
1063d9bb58e5SYang Zhong 
106410b32e2cSAnton Johansson     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1065d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1066d9bb58e5SYang Zhong         unsigned int i;
106710b32e2cSAnton Johansson         unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
1068d9bb58e5SYang Zhong 
106986e1eff8SEmilio G. Cota         for (i = 0; i < n; i++) {
107010b32e2cSAnton Johansson             tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
1071a40ec84eSRichard Henderson                                          start1, length);
1072d9bb58e5SYang Zhong         }
1073d9bb58e5SYang Zhong 
1074d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
107510b32e2cSAnton Johansson             tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
1076a40ec84eSRichard Henderson                                          start1, length);
1077d9bb58e5SYang Zhong         }
1078d9bb58e5SYang Zhong     }
107910b32e2cSAnton Johansson     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1080d9bb58e5SYang Zhong }
1081d9bb58e5SYang Zhong 
108253d28455SRichard Henderson /* Called with tlb_c.lock held */
108371aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1084732d5487SAnton Johansson                                          vaddr addr)
1085d9bb58e5SYang Zhong {
1086732d5487SAnton Johansson     if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
1087732d5487SAnton Johansson         tlb_entry->addr_write = addr;
1088d9bb58e5SYang Zhong     }
1089d9bb58e5SYang Zhong }
1090d9bb58e5SYang Zhong 
1091d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
1092d9bb58e5SYang Zhong    so that it is no longer dirty */
109351579d40SPhilippe Mathieu-Daudé static void tlb_set_dirty(CPUState *cpu, vaddr addr)
1094d9bb58e5SYang Zhong {
1095d9bb58e5SYang Zhong     int mmu_idx;
1096d9bb58e5SYang Zhong 
1097d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
1098d9bb58e5SYang Zhong 
1099732d5487SAnton Johansson     addr &= TARGET_PAGE_MASK;
110010b32e2cSAnton Johansson     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1101d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
110210b32e2cSAnton Johansson         tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr);
1103d9bb58e5SYang Zhong     }
1104d9bb58e5SYang Zhong 
1105d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1106d9bb58e5SYang Zhong         int k;
1107d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
110810b32e2cSAnton Johansson             tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr);
1109d9bb58e5SYang Zhong         }
1110d9bb58e5SYang Zhong     }
111110b32e2cSAnton Johansson     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1112d9bb58e5SYang Zhong }
1113d9bb58e5SYang Zhong 
1114d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
1115d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
111610b32e2cSAnton Johansson static void tlb_add_large_page(CPUState *cpu, int mmu_idx,
1117732d5487SAnton Johansson                                vaddr addr, uint64_t size)
1118d9bb58e5SYang Zhong {
111910b32e2cSAnton Johansson     vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr;
1120732d5487SAnton Johansson     vaddr lp_mask = ~(size - 1);
1121d9bb58e5SYang Zhong 
1122732d5487SAnton Johansson     if (lp_addr == (vaddr)-1) {
11231308e026SRichard Henderson         /* No previous large page.  */
1124732d5487SAnton Johansson         lp_addr = addr;
11251308e026SRichard Henderson     } else {
1126d9bb58e5SYang Zhong         /* Extend the existing region to include the new page.
11271308e026SRichard Henderson            This is a compromise between unnecessary flushes and
11281308e026SRichard Henderson            the cost of maintaining a full variable size TLB.  */
112910b32e2cSAnton Johansson         lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask;
1130732d5487SAnton Johansson         while (((lp_addr ^ addr) & lp_mask) != 0) {
11311308e026SRichard Henderson             lp_mask <<= 1;
1132d9bb58e5SYang Zhong         }
11331308e026SRichard Henderson     }
113410b32e2cSAnton Johansson     cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask;
113510b32e2cSAnton Johansson     cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask;
1136d9bb58e5SYang Zhong }
1137d9bb58e5SYang Zhong 
113858e8f1f6SRichard Henderson static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
1139d712b116SAnton Johansson                                    vaddr address, int flags,
114058e8f1f6SRichard Henderson                                    MMUAccessType access_type, bool enable)
114158e8f1f6SRichard Henderson {
114258e8f1f6SRichard Henderson     if (enable) {
114358e8f1f6SRichard Henderson         address |= flags & TLB_FLAGS_MASK;
114458e8f1f6SRichard Henderson         flags &= TLB_SLOW_FLAGS_MASK;
114558e8f1f6SRichard Henderson         if (flags) {
114658e8f1f6SRichard Henderson             address |= TLB_FORCE_SLOW;
114758e8f1f6SRichard Henderson         }
114858e8f1f6SRichard Henderson     } else {
114958e8f1f6SRichard Henderson         address = -1;
115058e8f1f6SRichard Henderson         flags = 0;
115158e8f1f6SRichard Henderson     }
115258e8f1f6SRichard Henderson     ent->addr_idx[access_type] = address;
115358e8f1f6SRichard Henderson     full->slow_flags[access_type] = flags;
115458e8f1f6SRichard Henderson }
115558e8f1f6SRichard Henderson 
115640473689SRichard Henderson /*
115740473689SRichard Henderson  * Add a new TLB entry. At most one entry for a given virtual address
1158d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1159d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
1160d9bb58e5SYang Zhong  *
1161d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
1162d9bb58e5SYang Zhong  * critical section.
1163d9bb58e5SYang Zhong  */
116440473689SRichard Henderson void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1165732d5487SAnton Johansson                        vaddr addr, CPUTLBEntryFull *full)
1166d9bb58e5SYang Zhong {
116710b32e2cSAnton Johansson     CPUTLB *tlb = &cpu->neg.tlb;
1168a40ec84eSRichard Henderson     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1169d9bb58e5SYang Zhong     MemoryRegionSection *section;
117058e8f1f6SRichard Henderson     unsigned int index, read_flags, write_flags;
1171d9bb58e5SYang Zhong     uintptr_t addend;
117268fea038SRichard Henderson     CPUTLBEntry *te, tn;
117355df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
1174732d5487SAnton Johansson     vaddr addr_page;
117540473689SRichard Henderson     int asidx, wp_flags, prot;
11768f5db641SRichard Henderson     bool is_ram, is_romd;
1177d9bb58e5SYang Zhong 
1178d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
117955df6fcfSPeter Maydell 
118040473689SRichard Henderson     if (full->lg_page_size <= TARGET_PAGE_BITS) {
118155df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
118255df6fcfSPeter Maydell     } else {
118340473689SRichard Henderson         sz = (hwaddr)1 << full->lg_page_size;
118410b32e2cSAnton Johansson         tlb_add_large_page(cpu, mmu_idx, addr, sz);
118555df6fcfSPeter Maydell     }
1186732d5487SAnton Johansson     addr_page = addr & TARGET_PAGE_MASK;
118740473689SRichard Henderson     paddr_page = full->phys_addr & TARGET_PAGE_MASK;
118855df6fcfSPeter Maydell 
118940473689SRichard Henderson     prot = full->prot;
119040473689SRichard Henderson     asidx = cpu_asidx_from_attrs(cpu, full->attrs);
119155df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
119240473689SRichard Henderson                                                 &xlat, &sz, full->attrs, &prot);
1193d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
1194d9bb58e5SYang Zhong 
11958c605cf1SAnton Johansson     tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
1196d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
1197732d5487SAnton Johansson               addr, full->phys_addr, prot, mmu_idx);
1198d9bb58e5SYang Zhong 
1199a0ff4a87SRichard Henderson     read_flags = full->tlb_fill_flags;
120040473689SRichard Henderson     if (full->lg_page_size < TARGET_PAGE_BITS) {
120130d7e098SRichard Henderson         /* Repeat the MMU check and TLB fill on every access.  */
120258e8f1f6SRichard Henderson         read_flags |= TLB_INVALID_MASK;
120355df6fcfSPeter Maydell     }
12048f5db641SRichard Henderson 
12058f5db641SRichard Henderson     is_ram = memory_region_is_ram(section->mr);
12068f5db641SRichard Henderson     is_romd = memory_region_is_romd(section->mr);
12078f5db641SRichard Henderson 
12088f5db641SRichard Henderson     if (is_ram || is_romd) {
12098f5db641SRichard Henderson         /* RAM and ROMD both have associated host memory. */
1210d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
12118f5db641SRichard Henderson     } else {
12128f5db641SRichard Henderson         /* I/O does not; force the host address to NULL. */
12138f5db641SRichard Henderson         addend = 0;
1214d9bb58e5SYang Zhong     }
1215d9bb58e5SYang Zhong 
121658e8f1f6SRichard Henderson     write_flags = read_flags;
12178f5db641SRichard Henderson     if (is_ram) {
12188f5db641SRichard Henderson         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1219dff1ab68SLIU Zhiwei         assert(!(iotlb & ~TARGET_PAGE_MASK));
12208f5db641SRichard Henderson         /*
12218f5db641SRichard Henderson          * Computing is_clean is expensive; avoid all that unless
12228f5db641SRichard Henderson          * the page is actually writable.
12238f5db641SRichard Henderson          */
12248f5db641SRichard Henderson         if (prot & PAGE_WRITE) {
12258f5db641SRichard Henderson             if (section->readonly) {
122658e8f1f6SRichard Henderson                 write_flags |= TLB_DISCARD_WRITE;
12278f5db641SRichard Henderson             } else if (cpu_physical_memory_is_clean(iotlb)) {
122858e8f1f6SRichard Henderson                 write_flags |= TLB_NOTDIRTY;
12298f5db641SRichard Henderson             }
12308f5db641SRichard Henderson         }
12318f5db641SRichard Henderson     } else {
12328f5db641SRichard Henderson         /* I/O or ROMD */
12338f5db641SRichard Henderson         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
12348f5db641SRichard Henderson         /*
12358f5db641SRichard Henderson          * Writes to romd devices must go through MMIO to enable write.
12368f5db641SRichard Henderson          * Reads to romd devices go through the ram_ptr found above,
12378f5db641SRichard Henderson          * but of course reads to I/O must go through MMIO.
12388f5db641SRichard Henderson          */
123958e8f1f6SRichard Henderson         write_flags |= TLB_MMIO;
12408f5db641SRichard Henderson         if (!is_romd) {
124158e8f1f6SRichard Henderson             read_flags = write_flags;
12428f5db641SRichard Henderson         }
12438f5db641SRichard Henderson     }
12448f5db641SRichard Henderson 
1245732d5487SAnton Johansson     wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
124650b107c5SRichard Henderson                                               TARGET_PAGE_SIZE);
1247d9bb58e5SYang Zhong 
124810b32e2cSAnton Johansson     index = tlb_index(cpu, mmu_idx, addr_page);
124910b32e2cSAnton Johansson     te = tlb_entry(cpu, mmu_idx, addr_page);
1250d9bb58e5SYang Zhong 
125168fea038SRichard Henderson     /*
125271aec354SEmilio G. Cota      * Hold the TLB lock for the rest of the function. We could acquire/release
125371aec354SEmilio G. Cota      * the lock several times in the function, but it is faster to amortize the
125471aec354SEmilio G. Cota      * acquisition cost by acquiring it just once. Note that this leads to
125571aec354SEmilio G. Cota      * a longer critical section, but this is not a concern since the TLB lock
125671aec354SEmilio G. Cota      * is unlikely to be contended.
125771aec354SEmilio G. Cota      */
1258a40ec84eSRichard Henderson     qemu_spin_lock(&tlb->c.lock);
125971aec354SEmilio G. Cota 
12603d1523ceSRichard Henderson     /* Note that the tlb is no longer clean.  */
1261a40ec84eSRichard Henderson     tlb->c.dirty |= 1 << mmu_idx;
12623d1523ceSRichard Henderson 
126371aec354SEmilio G. Cota     /* Make sure there's no cached translation for the new page.  */
126410b32e2cSAnton Johansson     tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page);
126571aec354SEmilio G. Cota 
126671aec354SEmilio G. Cota     /*
126768fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
126868fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
126968fea038SRichard Henderson      */
1270732d5487SAnton Johansson     if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
1271a40ec84eSRichard Henderson         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1272a40ec84eSRichard Henderson         CPUTLBEntry *tv = &desc->vtable[vidx];
127368fea038SRichard Henderson 
127468fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
127571aec354SEmilio G. Cota         copy_tlb_helper_locked(tv, te);
127625d3ec58SRichard Henderson         desc->vfulltlb[vidx] = desc->fulltlb[index];
127710b32e2cSAnton Johansson         tlb_n_used_entries_dec(cpu, mmu_idx);
127868fea038SRichard Henderson     }
1279d9bb58e5SYang Zhong 
1280d9bb58e5SYang Zhong     /* refill the tlb */
1281ace41090SPeter Maydell     /*
1282dff1ab68SLIU Zhiwei      * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
1283dff1ab68SLIU Zhiwei      * aligned ram_addr_t of the page base of the target RAM.
1284dff1ab68SLIU Zhiwei      * Otherwise, iotlb contains
1285dff1ab68SLIU Zhiwei      *  - a physical section number in the lower TARGET_PAGE_BITS
1286dff1ab68SLIU Zhiwei      *  - the offset within section->mr of the page base (I/O, ROMD) with the
1287dff1ab68SLIU Zhiwei      *    TARGET_PAGE_BITS masked off.
128858e8f1f6SRichard Henderson      * We subtract addr_page (which is page aligned and thus won't
1289ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
1290ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
1291ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
1292ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
1293fb3cb376SRichard Henderson      * vaddr we add back in io_prepare()/get_page_addr_code().
1294ace41090SPeter Maydell      */
129540473689SRichard Henderson     desc->fulltlb[index] = *full;
129658e8f1f6SRichard Henderson     full = &desc->fulltlb[index];
129758e8f1f6SRichard Henderson     full->xlat_section = iotlb - addr_page;
129858e8f1f6SRichard Henderson     full->phys_addr = paddr_page;
1299d9bb58e5SYang Zhong 
1300d9bb58e5SYang Zhong     /* Now calculate the new entry */
1301732d5487SAnton Johansson     tn.addend = addend - addr_page;
130258e8f1f6SRichard Henderson 
130358e8f1f6SRichard Henderson     tlb_set_compare(full, &tn, addr_page, read_flags,
130458e8f1f6SRichard Henderson                     MMU_INST_FETCH, prot & PAGE_EXEC);
130558e8f1f6SRichard Henderson 
130650b107c5SRichard Henderson     if (wp_flags & BP_MEM_READ) {
130758e8f1f6SRichard Henderson         read_flags |= TLB_WATCHPOINT;
130850b107c5SRichard Henderson     }
130958e8f1f6SRichard Henderson     tlb_set_compare(full, &tn, addr_page, read_flags,
131058e8f1f6SRichard Henderson                     MMU_DATA_LOAD, prot & PAGE_READ);
1311d9bb58e5SYang Zhong 
1312f52bfb12SDavid Hildenbrand     if (prot & PAGE_WRITE_INV) {
131358e8f1f6SRichard Henderson         write_flags |= TLB_INVALID_MASK;
1314f52bfb12SDavid Hildenbrand     }
131550b107c5SRichard Henderson     if (wp_flags & BP_MEM_WRITE) {
131658e8f1f6SRichard Henderson         write_flags |= TLB_WATCHPOINT;
131750b107c5SRichard Henderson     }
131858e8f1f6SRichard Henderson     tlb_set_compare(full, &tn, addr_page, write_flags,
131958e8f1f6SRichard Henderson                     MMU_DATA_STORE, prot & PAGE_WRITE);
1320d9bb58e5SYang Zhong 
132171aec354SEmilio G. Cota     copy_tlb_helper_locked(te, &tn);
132210b32e2cSAnton Johansson     tlb_n_used_entries_inc(cpu, mmu_idx);
1323a40ec84eSRichard Henderson     qemu_spin_unlock(&tlb->c.lock);
1324d9bb58e5SYang Zhong }
1325d9bb58e5SYang Zhong 
1326732d5487SAnton Johansson void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
132740473689SRichard Henderson                              hwaddr paddr, MemTxAttrs attrs, int prot,
1328732d5487SAnton Johansson                              int mmu_idx, uint64_t size)
132940473689SRichard Henderson {
133040473689SRichard Henderson     CPUTLBEntryFull full = {
133140473689SRichard Henderson         .phys_addr = paddr,
133240473689SRichard Henderson         .attrs = attrs,
133340473689SRichard Henderson         .prot = prot,
133440473689SRichard Henderson         .lg_page_size = ctz64(size)
133540473689SRichard Henderson     };
133640473689SRichard Henderson 
133740473689SRichard Henderson     assert(is_power_of_2(size));
1338732d5487SAnton Johansson     tlb_set_page_full(cpu, mmu_idx, addr, &full);
133940473689SRichard Henderson }
134040473689SRichard Henderson 
1341732d5487SAnton Johansson void tlb_set_page(CPUState *cpu, vaddr addr,
1342d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
1343732d5487SAnton Johansson                   int mmu_idx, uint64_t size)
1344d9bb58e5SYang Zhong {
1345732d5487SAnton Johansson     tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
1346d9bb58e5SYang Zhong                             prot, mmu_idx, size);
1347d9bb58e5SYang Zhong }
1348d9bb58e5SYang Zhong 
1349c319dc13SRichard Henderson /*
1350c319dc13SRichard Henderson  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1351c319dc13SRichard Henderson  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1352c319dc13SRichard Henderson  * be discarded and looked up again (e.g. via tlb_entry()).
1353c319dc13SRichard Henderson  */
1354732d5487SAnton Johansson static void tlb_fill(CPUState *cpu, vaddr addr, int size,
1355c319dc13SRichard Henderson                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1356c319dc13SRichard Henderson {
1357c319dc13SRichard Henderson     bool ok;
1358c319dc13SRichard Henderson 
1359c319dc13SRichard Henderson     /*
1360c319dc13SRichard Henderson      * This is not a probe, so only valid return is success; failure
1361c319dc13SRichard Henderson      * should result in exception + longjmp to the cpu loop.
1362c319dc13SRichard Henderson      */
13638810ee2aSAlex Bennée     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1364e124536fSEduardo Habkost                                     access_type, mmu_idx, false, retaddr);
1365c319dc13SRichard Henderson     assert(ok);
1366c319dc13SRichard Henderson }
1367c319dc13SRichard Henderson 
136878271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
136978271684SClaudio Fontana                                         MMUAccessType access_type,
137078271684SClaudio Fontana                                         int mmu_idx, uintptr_t retaddr)
137178271684SClaudio Fontana {
13728810ee2aSAlex Bennée     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
13738810ee2aSAlex Bennée                                           mmu_idx, retaddr);
137478271684SClaudio Fontana }
137578271684SClaudio Fontana 
1376fb3cb376SRichard Henderson static MemoryRegionSection *
1377d50ef446SAnton Johansson io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
1378fb3cb376SRichard Henderson            MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
1379d9bb58e5SYang Zhong {
13802d54f194SPeter Maydell     MemoryRegionSection *section;
1381fb3cb376SRichard Henderson     hwaddr mr_offset;
1382d9bb58e5SYang Zhong 
1383fb3cb376SRichard Henderson     section = iotlb_to_section(cpu, xlat, attrs);
1384fb3cb376SRichard Henderson     mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
1385d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
1386464dacf6SRichard Henderson     if (!cpu->neg.can_do_io) {
1387d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1388d9bb58e5SYang Zhong     }
1389d9bb58e5SYang Zhong 
1390fb3cb376SRichard Henderson     *out_offset = mr_offset;
1391fb3cb376SRichard Henderson     return section;
1392fb3cb376SRichard Henderson }
1393fb3cb376SRichard Henderson 
1394d50ef446SAnton Johansson static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr,
1395fb3cb376SRichard Henderson                       unsigned size, MMUAccessType access_type, int mmu_idx,
13960e114440SRichard Henderson                       MemTxResult response, uintptr_t retaddr)
1397fb3cb376SRichard Henderson {
1398d50ef446SAnton Johansson     if (!cpu->ignore_memory_transaction_failures
1399d50ef446SAnton Johansson         && cpu->cc->tcg_ops->do_transaction_failed) {
14000e114440SRichard Henderson         hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1401bef0c216SRichard Henderson 
1402d50ef446SAnton Johansson         cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1403bef0c216SRichard Henderson                                                 access_type, mmu_idx,
1404bef0c216SRichard Henderson                                                 full->attrs, response, retaddr);
1405bef0c216SRichard Henderson     }
1406bef0c216SRichard Henderson }
1407fb3cb376SRichard Henderson 
1408d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
1409d9bb58e5SYang Zhong    back to the main tlb.  */
141010b32e2cSAnton Johansson static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
1411732d5487SAnton Johansson                            MMUAccessType access_type, vaddr page)
1412d9bb58e5SYang Zhong {
1413d9bb58e5SYang Zhong     size_t vidx;
141471aec354SEmilio G. Cota 
141510b32e2cSAnton Johansson     assert_cpu_is_self(cpu);
1416d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
141710b32e2cSAnton Johansson         CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx];
14189e39de98SAnton Johansson         uint64_t cmp = tlb_read_idx(vtlb, access_type);
1419d9bb58e5SYang Zhong 
1420d9bb58e5SYang Zhong         if (cmp == page) {
1421d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
142210b32e2cSAnton Johansson             CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
1423d9bb58e5SYang Zhong 
142410b32e2cSAnton Johansson             qemu_spin_lock(&cpu->neg.tlb.c.lock);
142571aec354SEmilio G. Cota             copy_tlb_helper_locked(&tmptlb, tlb);
142671aec354SEmilio G. Cota             copy_tlb_helper_locked(tlb, vtlb);
142771aec354SEmilio G. Cota             copy_tlb_helper_locked(vtlb, &tmptlb);
142810b32e2cSAnton Johansson             qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1429d9bb58e5SYang Zhong 
143010b32e2cSAnton Johansson             CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
143110b32e2cSAnton Johansson             CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx];
143225d3ec58SRichard Henderson             CPUTLBEntryFull tmpf;
143325d3ec58SRichard Henderson             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1434d9bb58e5SYang Zhong             return true;
1435d9bb58e5SYang Zhong         }
1436d9bb58e5SYang Zhong     }
1437d9bb58e5SYang Zhong     return false;
1438d9bb58e5SYang Zhong }
1439d9bb58e5SYang Zhong 
1440707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
144125d3ec58SRichard Henderson                            CPUTLBEntryFull *full, uintptr_t retaddr)
1442707526adSRichard Henderson {
144325d3ec58SRichard Henderson     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1444707526adSRichard Henderson 
1445707526adSRichard Henderson     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1446707526adSRichard Henderson 
1447707526adSRichard Henderson     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1448f349e92eSPhilippe Mathieu-Daudé         tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
1449707526adSRichard Henderson     }
1450707526adSRichard Henderson 
1451707526adSRichard Henderson     /*
1452707526adSRichard Henderson      * Set both VGA and migration bits for simplicity and to remove
1453707526adSRichard Henderson      * the notdirty callback faster.
1454707526adSRichard Henderson      */
1455707526adSRichard Henderson     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1456707526adSRichard Henderson 
1457707526adSRichard Henderson     /* We remove the notdirty callback only if the code has been flushed. */
1458707526adSRichard Henderson     if (!cpu_physical_memory_is_clean(ram_addr)) {
1459707526adSRichard Henderson         trace_memory_notdirty_set_dirty(mem_vaddr);
1460707526adSRichard Henderson         tlb_set_dirty(cpu, mem_vaddr);
1461707526adSRichard Henderson     }
1462707526adSRichard Henderson }
1463707526adSRichard Henderson 
14645afec1c6SAnton Johansson static int probe_access_internal(CPUState *cpu, vaddr addr,
1465069cfe77SRichard Henderson                                  int fault_size, MMUAccessType access_type,
1466069cfe77SRichard Henderson                                  int mmu_idx, bool nonfault,
1467af803a4fSRichard Henderson                                  void **phost, CPUTLBEntryFull **pfull,
14686d03226bSAlex Bennée                                  uintptr_t retaddr, bool check_mem_cbs)
1469d9bb58e5SYang Zhong {
14705afec1c6SAnton Johansson     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
14715afec1c6SAnton Johansson     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
14729e39de98SAnton Johansson     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
14734f8f4127SAnton Johansson     vaddr page_addr = addr & TARGET_PAGE_MASK;
147458e8f1f6SRichard Henderson     int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
14755afec1c6SAnton Johansson     bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu);
147658e8f1f6SRichard Henderson     CPUTLBEntryFull *full;
1477ca86cf32SDavid Hildenbrand 
1478069cfe77SRichard Henderson     if (!tlb_hit_page(tlb_addr, page_addr)) {
14795afec1c6SAnton Johansson         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
14805afec1c6SAnton Johansson             if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
1481069cfe77SRichard Henderson                                             mmu_idx, nonfault, retaddr)) {
1482069cfe77SRichard Henderson                 /* Non-faulting page table read failed.  */
1483069cfe77SRichard Henderson                 *phost = NULL;
1484af803a4fSRichard Henderson                 *pfull = NULL;
1485069cfe77SRichard Henderson                 return TLB_INVALID_MASK;
1486069cfe77SRichard Henderson             }
1487069cfe77SRichard Henderson 
148803a98189SDavid Hildenbrand             /* TLB resize via tlb_fill may have moved the entry.  */
14895afec1c6SAnton Johansson             index = tlb_index(cpu, mmu_idx, addr);
14905afec1c6SAnton Johansson             entry = tlb_entry(cpu, mmu_idx, addr);
1491c3c8bf57SRichard Henderson 
1492c3c8bf57SRichard Henderson             /*
1493c3c8bf57SRichard Henderson              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1494c3c8bf57SRichard Henderson              * to force the next access through tlb_fill.  We've just
1495c3c8bf57SRichard Henderson              * called tlb_fill, so we know that this entry *is* valid.
1496c3c8bf57SRichard Henderson              */
1497c3c8bf57SRichard Henderson             flags &= ~TLB_INVALID_MASK;
1498d9bb58e5SYang Zhong         }
14990b3c75adSRichard Henderson         tlb_addr = tlb_read_idx(entry, access_type);
150003a98189SDavid Hildenbrand     }
1501c3c8bf57SRichard Henderson     flags &= tlb_addr;
150203a98189SDavid Hildenbrand 
15035afec1c6SAnton Johansson     *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
150458e8f1f6SRichard Henderson     flags |= full->slow_flags[access_type];
1505af803a4fSRichard Henderson 
1506069cfe77SRichard Henderson     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
150749fa457cSRichard Henderson     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED))
150849fa457cSRichard Henderson         || (access_type != MMU_INST_FETCH && force_mmio)) {
1509069cfe77SRichard Henderson         *phost = NULL;
1510069cfe77SRichard Henderson         return TLB_MMIO;
1511fef39ccdSDavid Hildenbrand     }
1512fef39ccdSDavid Hildenbrand 
1513069cfe77SRichard Henderson     /* Everything else is RAM. */
1514069cfe77SRichard Henderson     *phost = (void *)((uintptr_t)addr + entry->addend);
1515069cfe77SRichard Henderson     return flags;
1516069cfe77SRichard Henderson }
1517069cfe77SRichard Henderson 
15184f8f4127SAnton Johansson int probe_access_full(CPUArchState *env, vaddr addr, int size,
1519069cfe77SRichard Henderson                       MMUAccessType access_type, int mmu_idx,
1520af803a4fSRichard Henderson                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1521af803a4fSRichard Henderson                       uintptr_t retaddr)
1522069cfe77SRichard Henderson {
15235afec1c6SAnton Johansson     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
15245afec1c6SAnton Johansson                                       mmu_idx, nonfault, phost, pfull, retaddr,
15255afec1c6SAnton Johansson                                       true);
1526069cfe77SRichard Henderson 
1527069cfe77SRichard Henderson     /* Handle clean RAM pages.  */
1528069cfe77SRichard Henderson     if (unlikely(flags & TLB_NOTDIRTY)) {
1529e2faabeeSJessica Clarke         int dirtysize = size == 0 ? 1 : size;
1530e2faabeeSJessica Clarke         notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr);
1531069cfe77SRichard Henderson         flags &= ~TLB_NOTDIRTY;
1532069cfe77SRichard Henderson     }
1533069cfe77SRichard Henderson 
1534069cfe77SRichard Henderson     return flags;
1535069cfe77SRichard Henderson }
1536069cfe77SRichard Henderson 
15376d03226bSAlex Bennée int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
15386d03226bSAlex Bennée                           MMUAccessType access_type, int mmu_idx,
15396d03226bSAlex Bennée                           void **phost, CPUTLBEntryFull **pfull)
15406d03226bSAlex Bennée {
15416d03226bSAlex Bennée     void *discard_phost;
15426d03226bSAlex Bennée     CPUTLBEntryFull *discard_tlb;
15436d03226bSAlex Bennée 
15446d03226bSAlex Bennée     /* privately handle users that don't need full results */
15456d03226bSAlex Bennée     phost = phost ? phost : &discard_phost;
15466d03226bSAlex Bennée     pfull = pfull ? pfull : &discard_tlb;
15476d03226bSAlex Bennée 
15485afec1c6SAnton Johansson     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
15495afec1c6SAnton Johansson                                       mmu_idx, true, phost, pfull, 0, false);
15506d03226bSAlex Bennée 
15516d03226bSAlex Bennée     /* Handle clean RAM pages.  */
15526d03226bSAlex Bennée     if (unlikely(flags & TLB_NOTDIRTY)) {
1553e2faabeeSJessica Clarke         int dirtysize = size == 0 ? 1 : size;
1554e2faabeeSJessica Clarke         notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0);
15556d03226bSAlex Bennée         flags &= ~TLB_NOTDIRTY;
15566d03226bSAlex Bennée     }
15576d03226bSAlex Bennée 
15586d03226bSAlex Bennée     return flags;
15596d03226bSAlex Bennée }
15606d03226bSAlex Bennée 
15614f8f4127SAnton Johansson int probe_access_flags(CPUArchState *env, vaddr addr, int size,
1562af803a4fSRichard Henderson                        MMUAccessType access_type, int mmu_idx,
1563af803a4fSRichard Henderson                        bool nonfault, void **phost, uintptr_t retaddr)
1564af803a4fSRichard Henderson {
1565af803a4fSRichard Henderson     CPUTLBEntryFull *full;
15661770b2f2SDaniel Henrique Barboza     int flags;
1567af803a4fSRichard Henderson 
15681770b2f2SDaniel Henrique Barboza     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
15691770b2f2SDaniel Henrique Barboza 
15705afec1c6SAnton Johansson     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
15715afec1c6SAnton Johansson                                   mmu_idx, nonfault, phost, &full, retaddr,
15725afec1c6SAnton Johansson                                   true);
15731770b2f2SDaniel Henrique Barboza 
15741770b2f2SDaniel Henrique Barboza     /* Handle clean RAM pages. */
15751770b2f2SDaniel Henrique Barboza     if (unlikely(flags & TLB_NOTDIRTY)) {
1576e2faabeeSJessica Clarke         int dirtysize = size == 0 ? 1 : size;
1577e2faabeeSJessica Clarke         notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr);
15781770b2f2SDaniel Henrique Barboza         flags &= ~TLB_NOTDIRTY;
15791770b2f2SDaniel Henrique Barboza     }
15801770b2f2SDaniel Henrique Barboza 
15811770b2f2SDaniel Henrique Barboza     return flags;
1582af803a4fSRichard Henderson }
1583af803a4fSRichard Henderson 
15844f8f4127SAnton Johansson void *probe_access(CPUArchState *env, vaddr addr, int size,
1585069cfe77SRichard Henderson                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1586069cfe77SRichard Henderson {
1587af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1588069cfe77SRichard Henderson     void *host;
1589069cfe77SRichard Henderson     int flags;
1590069cfe77SRichard Henderson 
1591069cfe77SRichard Henderson     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1592069cfe77SRichard Henderson 
15935afec1c6SAnton Johansson     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
15945afec1c6SAnton Johansson                                   mmu_idx, false, &host, &full, retaddr,
15955afec1c6SAnton Johansson                                   true);
1596069cfe77SRichard Henderson 
1597069cfe77SRichard Henderson     /* Per the interface, size == 0 merely faults the access. */
1598069cfe77SRichard Henderson     if (size == 0) {
159973bc0bd4SRichard Henderson         return NULL;
160073bc0bd4SRichard Henderson     }
160173bc0bd4SRichard Henderson 
1602069cfe77SRichard Henderson     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
160303a98189SDavid Hildenbrand         /* Handle watchpoints.  */
1604069cfe77SRichard Henderson         if (flags & TLB_WATCHPOINT) {
1605069cfe77SRichard Henderson             int wp_access = (access_type == MMU_DATA_STORE
1606069cfe77SRichard Henderson                              ? BP_MEM_WRITE : BP_MEM_READ);
160703a98189SDavid Hildenbrand             cpu_check_watchpoint(env_cpu(env), addr, size,
160825d3ec58SRichard Henderson                                  full->attrs, wp_access, retaddr);
1609d9bb58e5SYang Zhong         }
1610fef39ccdSDavid Hildenbrand 
161173bc0bd4SRichard Henderson         /* Handle clean RAM pages.  */
1612069cfe77SRichard Henderson         if (flags & TLB_NOTDIRTY) {
1613e2faabeeSJessica Clarke             notdirty_write(env_cpu(env), addr, size, full, retaddr);
161473bc0bd4SRichard Henderson         }
1615fef39ccdSDavid Hildenbrand     }
1616fef39ccdSDavid Hildenbrand 
1617069cfe77SRichard Henderson     return host;
1618d9bb58e5SYang Zhong }
1619d9bb58e5SYang Zhong 
16204811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
16214811e909SRichard Henderson                         MMUAccessType access_type, int mmu_idx)
16224811e909SRichard Henderson {
1623af803a4fSRichard Henderson     CPUTLBEntryFull *full;
1624069cfe77SRichard Henderson     void *host;
1625069cfe77SRichard Henderson     int flags;
16264811e909SRichard Henderson 
16275afec1c6SAnton Johansson     flags = probe_access_internal(env_cpu(env), addr, 0, access_type,
16286d03226bSAlex Bennée                                   mmu_idx, true, &host, &full, 0, false);
1629069cfe77SRichard Henderson 
1630069cfe77SRichard Henderson     /* No combination of flags are expected by the caller. */
1631069cfe77SRichard Henderson     return flags ? NULL : host;
16324811e909SRichard Henderson }
16334811e909SRichard Henderson 
16347e0d9973SRichard Henderson /*
16357e0d9973SRichard Henderson  * Return a ram_addr_t for the virtual address for execution.
16367e0d9973SRichard Henderson  *
16377e0d9973SRichard Henderson  * Return -1 if we can't translate and execute from an entire page
16387e0d9973SRichard Henderson  * of RAM.  This will force us to execute by loading and translating
16397e0d9973SRichard Henderson  * one insn at a time, without caching.
16407e0d9973SRichard Henderson  *
16417e0d9973SRichard Henderson  * NOTE: This function will trigger an exception if the page is
16427e0d9973SRichard Henderson  * not executable.
16437e0d9973SRichard Henderson  */
16444f8f4127SAnton Johansson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
16457e0d9973SRichard Henderson                                         void **hostp)
16467e0d9973SRichard Henderson {
1647af803a4fSRichard Henderson     CPUTLBEntryFull *full;
16487e0d9973SRichard Henderson     void *p;
16497e0d9973SRichard Henderson 
16505afec1c6SAnton Johansson     (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
16513b916140SRichard Henderson                                 cpu_mmu_index(env_cpu(env), true), false,
16526d03226bSAlex Bennée                                 &p, &full, 0, false);
16537e0d9973SRichard Henderson     if (p == NULL) {
16547e0d9973SRichard Henderson         return -1;
16557e0d9973SRichard Henderson     }
1656ac01ec6fSWeiwei Li 
1657ac01ec6fSWeiwei Li     if (full->lg_page_size < TARGET_PAGE_BITS) {
1658ac01ec6fSWeiwei Li         return -1;
1659ac01ec6fSWeiwei Li     }
1660ac01ec6fSWeiwei Li 
16617e0d9973SRichard Henderson     if (hostp) {
16627e0d9973SRichard Henderson         *hostp = p;
16637e0d9973SRichard Henderson     }
16647e0d9973SRichard Henderson     return qemu_ram_addr_from_host_nofail(p);
16657e0d9973SRichard Henderson }
16667e0d9973SRichard Henderson 
1667cdfac37bSRichard Henderson /* Load/store with atomicity primitives. */
1668cdfac37bSRichard Henderson #include "ldst_atomicity.c.inc"
1669cdfac37bSRichard Henderson 
1670235537faSAlex Bennée #ifdef CONFIG_PLUGIN
1671235537faSAlex Bennée /*
1672235537faSAlex Bennée  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1673235537faSAlex Bennée  * This should be a hot path as we will have just looked this path up
1674235537faSAlex Bennée  * in the softmmu lookup code (or helper). We don't handle re-fills or
1675235537faSAlex Bennée  * checking the victim table. This is purely informational.
1676235537faSAlex Bennée  *
1677da6aef48SRichard Henderson  * The one corner case is i/o write, which can cause changes to the
1678da6aef48SRichard Henderson  * address space.  Those changes, and the corresponding tlb flush,
1679da6aef48SRichard Henderson  * should be delayed until the next TB, so even then this ought not fail.
1680da6aef48SRichard Henderson  * But check, Just in Case.
1681235537faSAlex Bennée  */
1682732d5487SAnton Johansson bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
1683235537faSAlex Bennée                        bool is_store, struct qemu_plugin_hwaddr *data)
1684235537faSAlex Bennée {
168510b32e2cSAnton Johansson     CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr);
168610b32e2cSAnton Johansson     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1687da6aef48SRichard Henderson     MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
1688da6aef48SRichard Henderson     uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
1689405c02d8SRichard Henderson     CPUTLBEntryFull *full;
1690235537faSAlex Bennée 
1691da6aef48SRichard Henderson     if (unlikely(!tlb_hit(tlb_addr, addr))) {
1692da6aef48SRichard Henderson         return false;
1693da6aef48SRichard Henderson     }
1694da6aef48SRichard Henderson 
169510b32e2cSAnton Johansson     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1696405c02d8SRichard Henderson     data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1697405c02d8SRichard Henderson 
1698235537faSAlex Bennée     /* We must have an iotlb entry for MMIO */
1699235537faSAlex Bennée     if (tlb_addr & TLB_MMIO) {
1700405c02d8SRichard Henderson         MemoryRegionSection *section =
1701405c02d8SRichard Henderson             iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
1702405c02d8SRichard Henderson                              full->attrs);
1703235537faSAlex Bennée         data->is_io = true;
1704405c02d8SRichard Henderson         data->mr = section->mr;
1705235537faSAlex Bennée     } else {
1706235537faSAlex Bennée         data->is_io = false;
1707405c02d8SRichard Henderson         data->mr = NULL;
1708235537faSAlex Bennée     }
1709235537faSAlex Bennée     return true;
1710235537faSAlex Bennée }
1711235537faSAlex Bennée #endif
1712235537faSAlex Bennée 
171308dff435SRichard Henderson /*
17148cfdacaaSRichard Henderson  * Probe for a load/store operation.
17158cfdacaaSRichard Henderson  * Return the host address and into @flags.
17168cfdacaaSRichard Henderson  */
17178cfdacaaSRichard Henderson 
17188cfdacaaSRichard Henderson typedef struct MMULookupPageData {
17198cfdacaaSRichard Henderson     CPUTLBEntryFull *full;
17208cfdacaaSRichard Henderson     void *haddr;
1721fb2c53cbSAnton Johansson     vaddr addr;
17228cfdacaaSRichard Henderson     int flags;
17238cfdacaaSRichard Henderson     int size;
17248cfdacaaSRichard Henderson } MMULookupPageData;
17258cfdacaaSRichard Henderson 
17268cfdacaaSRichard Henderson typedef struct MMULookupLocals {
17278cfdacaaSRichard Henderson     MMULookupPageData page[2];
17288cfdacaaSRichard Henderson     MemOp memop;
17298cfdacaaSRichard Henderson     int mmu_idx;
17308cfdacaaSRichard Henderson } MMULookupLocals;
17318cfdacaaSRichard Henderson 
17328cfdacaaSRichard Henderson /**
17338cfdacaaSRichard Henderson  * mmu_lookup1: translate one page
1734d50ef446SAnton Johansson  * @cpu: generic cpu state
17358cfdacaaSRichard Henderson  * @data: lookup parameters
17368cfdacaaSRichard Henderson  * @mmu_idx: virtual address context
17378cfdacaaSRichard Henderson  * @access_type: load/store/code
17388cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
17398cfdacaaSRichard Henderson  *
17408cfdacaaSRichard Henderson  * Resolve the translation for the one page at @data.addr, filling in
17418cfdacaaSRichard Henderson  * the rest of @data with the results.  If the translation fails,
17428cfdacaaSRichard Henderson  * tlb_fill will longjmp out.  Return true if the softmmu tlb for
17438cfdacaaSRichard Henderson  * @mmu_idx may have resized.
17448cfdacaaSRichard Henderson  */
1745d50ef446SAnton Johansson static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
17468cfdacaaSRichard Henderson                         int mmu_idx, MMUAccessType access_type, uintptr_t ra)
17478cfdacaaSRichard Henderson {
1748fb2c53cbSAnton Johansson     vaddr addr = data->addr;
1749d50ef446SAnton Johansson     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1750d50ef446SAnton Johansson     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
17519e39de98SAnton Johansson     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
17528cfdacaaSRichard Henderson     bool maybe_resized = false;
175358e8f1f6SRichard Henderson     CPUTLBEntryFull *full;
175458e8f1f6SRichard Henderson     int flags;
17558cfdacaaSRichard Henderson 
17568cfdacaaSRichard Henderson     /* If the TLB entry is for a different page, reload and try again.  */
17578cfdacaaSRichard Henderson     if (!tlb_hit(tlb_addr, addr)) {
1758d50ef446SAnton Johansson         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
17598cfdacaaSRichard Henderson                             addr & TARGET_PAGE_MASK)) {
1760d50ef446SAnton Johansson             tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
17618cfdacaaSRichard Henderson             maybe_resized = true;
1762d50ef446SAnton Johansson             index = tlb_index(cpu, mmu_idx, addr);
1763d50ef446SAnton Johansson             entry = tlb_entry(cpu, mmu_idx, addr);
17648cfdacaaSRichard Henderson         }
17658cfdacaaSRichard Henderson         tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
17668cfdacaaSRichard Henderson     }
17678cfdacaaSRichard Henderson 
1768d50ef446SAnton Johansson     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
176958e8f1f6SRichard Henderson     flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
177058e8f1f6SRichard Henderson     flags |= full->slow_flags[access_type];
177158e8f1f6SRichard Henderson 
177258e8f1f6SRichard Henderson     data->full = full;
177358e8f1f6SRichard Henderson     data->flags = flags;
17748cfdacaaSRichard Henderson     /* Compute haddr speculatively; depending on flags it might be invalid. */
17758cfdacaaSRichard Henderson     data->haddr = (void *)((uintptr_t)addr + entry->addend);
17768cfdacaaSRichard Henderson 
17778cfdacaaSRichard Henderson     return maybe_resized;
17788cfdacaaSRichard Henderson }
17798cfdacaaSRichard Henderson 
17808cfdacaaSRichard Henderson /**
17818cfdacaaSRichard Henderson  * mmu_watch_or_dirty
1782d50ef446SAnton Johansson  * @cpu: generic cpu state
17838cfdacaaSRichard Henderson  * @data: lookup parameters
17848cfdacaaSRichard Henderson  * @access_type: load/store/code
17858cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
17868cfdacaaSRichard Henderson  *
17878cfdacaaSRichard Henderson  * Trigger watchpoints for @data.addr:@data.size;
17888cfdacaaSRichard Henderson  * record writes to protected clean pages.
17898cfdacaaSRichard Henderson  */
1790d50ef446SAnton Johansson static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
17918cfdacaaSRichard Henderson                                MMUAccessType access_type, uintptr_t ra)
17928cfdacaaSRichard Henderson {
17938cfdacaaSRichard Henderson     CPUTLBEntryFull *full = data->full;
1794fb2c53cbSAnton Johansson     vaddr addr = data->addr;
17958cfdacaaSRichard Henderson     int flags = data->flags;
17968cfdacaaSRichard Henderson     int size = data->size;
17978cfdacaaSRichard Henderson 
17988cfdacaaSRichard Henderson     /* On watchpoint hit, this will longjmp out.  */
17998cfdacaaSRichard Henderson     if (flags & TLB_WATCHPOINT) {
18008cfdacaaSRichard Henderson         int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
1801d50ef446SAnton Johansson         cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
18028cfdacaaSRichard Henderson         flags &= ~TLB_WATCHPOINT;
18038cfdacaaSRichard Henderson     }
18048cfdacaaSRichard Henderson 
18058cfdacaaSRichard Henderson     /* Note that notdirty is only set for writes. */
18068cfdacaaSRichard Henderson     if (flags & TLB_NOTDIRTY) {
1807d50ef446SAnton Johansson         notdirty_write(cpu, addr, size, full, ra);
18088cfdacaaSRichard Henderson         flags &= ~TLB_NOTDIRTY;
18098cfdacaaSRichard Henderson     }
18108cfdacaaSRichard Henderson     data->flags = flags;
18118cfdacaaSRichard Henderson }
18128cfdacaaSRichard Henderson 
18138cfdacaaSRichard Henderson /**
18148cfdacaaSRichard Henderson  * mmu_lookup: translate page(s)
1815d50ef446SAnton Johansson  * @cpu: generic cpu state
18168cfdacaaSRichard Henderson  * @addr: virtual address
18178cfdacaaSRichard Henderson  * @oi: combined mmu_idx and MemOp
18188cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
18198cfdacaaSRichard Henderson  * @access_type: load/store/code
18208cfdacaaSRichard Henderson  * @l: output result
18218cfdacaaSRichard Henderson  *
18228cfdacaaSRichard Henderson  * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
18238cfdacaaSRichard Henderson  * bytes.  Return true if the lookup crosses a page boundary.
18248cfdacaaSRichard Henderson  */
1825d50ef446SAnton Johansson static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
18268cfdacaaSRichard Henderson                        uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
18278cfdacaaSRichard Henderson {
18288cfdacaaSRichard Henderson     unsigned a_bits;
18298cfdacaaSRichard Henderson     bool crosspage;
18308cfdacaaSRichard Henderson     int flags;
18318cfdacaaSRichard Henderson 
18328cfdacaaSRichard Henderson     l->memop = get_memop(oi);
18338cfdacaaSRichard Henderson     l->mmu_idx = get_mmuidx(oi);
18348cfdacaaSRichard Henderson 
18358cfdacaaSRichard Henderson     tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
18368cfdacaaSRichard Henderson 
18378cfdacaaSRichard Henderson     /* Handle CPU specific unaligned behaviour */
18388cfdacaaSRichard Henderson     a_bits = get_alignment_bits(l->memop);
18398cfdacaaSRichard Henderson     if (addr & ((1 << a_bits) - 1)) {
1840d50ef446SAnton Johansson         cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
18418cfdacaaSRichard Henderson     }
18428cfdacaaSRichard Henderson 
18438cfdacaaSRichard Henderson     l->page[0].addr = addr;
18448cfdacaaSRichard Henderson     l->page[0].size = memop_size(l->memop);
18458cfdacaaSRichard Henderson     l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
18468cfdacaaSRichard Henderson     l->page[1].size = 0;
18478cfdacaaSRichard Henderson     crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
18488cfdacaaSRichard Henderson 
18498cfdacaaSRichard Henderson     if (likely(!crosspage)) {
1850d50ef446SAnton Johansson         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
18518cfdacaaSRichard Henderson 
18528cfdacaaSRichard Henderson         flags = l->page[0].flags;
18538cfdacaaSRichard Henderson         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1854d50ef446SAnton Johansson             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
18558cfdacaaSRichard Henderson         }
18568cfdacaaSRichard Henderson         if (unlikely(flags & TLB_BSWAP)) {
18578cfdacaaSRichard Henderson             l->memop ^= MO_BSWAP;
18588cfdacaaSRichard Henderson         }
18598cfdacaaSRichard Henderson     } else {
18608cfdacaaSRichard Henderson         /* Finish compute of page crossing. */
18618cfdacaaSRichard Henderson         int size0 = l->page[1].addr - addr;
18628cfdacaaSRichard Henderson         l->page[1].size = l->page[0].size - size0;
18638cfdacaaSRichard Henderson         l->page[0].size = size0;
18648cfdacaaSRichard Henderson 
18658cfdacaaSRichard Henderson         /*
18668cfdacaaSRichard Henderson          * Lookup both pages, recognizing exceptions from either.  If the
18678cfdacaaSRichard Henderson          * second lookup potentially resized, refresh first CPUTLBEntryFull.
18688cfdacaaSRichard Henderson          */
1869d50ef446SAnton Johansson         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1870d50ef446SAnton Johansson         if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
1871d50ef446SAnton Johansson             uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
1872d50ef446SAnton Johansson             l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
18738cfdacaaSRichard Henderson         }
18748cfdacaaSRichard Henderson 
18758cfdacaaSRichard Henderson         flags = l->page[0].flags | l->page[1].flags;
18768cfdacaaSRichard Henderson         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1877d50ef446SAnton Johansson             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1878d50ef446SAnton Johansson             mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
18798cfdacaaSRichard Henderson         }
18808cfdacaaSRichard Henderson 
18818cfdacaaSRichard Henderson         /*
18828cfdacaaSRichard Henderson          * Since target/sparc is the only user of TLB_BSWAP, and all
18838cfdacaaSRichard Henderson          * Sparc accesses are aligned, any treatment across two pages
18848cfdacaaSRichard Henderson          * would be arbitrary.  Refuse it until there's a use.
18858cfdacaaSRichard Henderson          */
18868cfdacaaSRichard Henderson         tcg_debug_assert((flags & TLB_BSWAP) == 0);
18878cfdacaaSRichard Henderson     }
18888cfdacaaSRichard Henderson 
188949fa457cSRichard Henderson     /*
189049fa457cSRichard Henderson      * This alignment check differs from the one above, in that this is
189149fa457cSRichard Henderson      * based on the atomicity of the operation. The intended use case is
189249fa457cSRichard Henderson      * the ARM memory type field of each PTE, where access to pages with
189349fa457cSRichard Henderson      * Device memory type require alignment.
189449fa457cSRichard Henderson      */
189549fa457cSRichard Henderson     if (unlikely(flags & TLB_CHECK_ALIGNED)) {
189649fa457cSRichard Henderson         MemOp size = l->memop & MO_SIZE;
189749fa457cSRichard Henderson 
189849fa457cSRichard Henderson         switch (l->memop & MO_ATOM_MASK) {
189949fa457cSRichard Henderson         case MO_ATOM_NONE:
190049fa457cSRichard Henderson             size = MO_8;
190149fa457cSRichard Henderson             break;
190249fa457cSRichard Henderson         case MO_ATOM_IFALIGN_PAIR:
190349fa457cSRichard Henderson         case MO_ATOM_WITHIN16_PAIR:
190449fa457cSRichard Henderson             size = size ? size - 1 : 0;
190549fa457cSRichard Henderson             break;
190649fa457cSRichard Henderson         default:
190749fa457cSRichard Henderson             break;
190849fa457cSRichard Henderson         }
190949fa457cSRichard Henderson         if (addr & ((1 << size) - 1)) {
191049fa457cSRichard Henderson             cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
191149fa457cSRichard Henderson         }
191249fa457cSRichard Henderson     }
191349fa457cSRichard Henderson 
19148cfdacaaSRichard Henderson     return crosspage;
19158cfdacaaSRichard Henderson }
19168cfdacaaSRichard Henderson 
19178cfdacaaSRichard Henderson /*
191808dff435SRichard Henderson  * Probe for an atomic operation.  Do not allow unaligned operations,
191908dff435SRichard Henderson  * or io operations to proceed.  Return the host address.
192008dff435SRichard Henderson  */
1921d560225fSAnton Johansson static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1922b0326eb9SAnton Johansson                                int size, uintptr_t retaddr)
1923d9bb58e5SYang Zhong {
1924b826044fSRichard Henderson     uintptr_t mmu_idx = get_mmuidx(oi);
192514776ab5STony Nguyen     MemOp mop = get_memop(oi);
1926d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
192708dff435SRichard Henderson     uintptr_t index;
192808dff435SRichard Henderson     CPUTLBEntry *tlbe;
1929b0326eb9SAnton Johansson     vaddr tlb_addr;
193034d49937SPeter Maydell     void *hostaddr;
1931417aeaffSRichard Henderson     CPUTLBEntryFull *full;
1932d9bb58e5SYang Zhong 
1933b826044fSRichard Henderson     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1934b826044fSRichard Henderson 
1935d9bb58e5SYang Zhong     /* Adjust the given return address.  */
1936d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
1937d9bb58e5SYang Zhong 
1938d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1939d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1940d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1941d560225fSAnton Johansson         cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
1942d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1943d9bb58e5SYang Zhong     }
1944d9bb58e5SYang Zhong 
1945d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
194608dff435SRichard Henderson     if (unlikely(addr & (size - 1))) {
1947d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1948d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1949d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1950d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1951d9bb58e5SYang Zhong         goto stop_the_world;
1952d9bb58e5SYang Zhong     }
1953d9bb58e5SYang Zhong 
1954d560225fSAnton Johansson     index = tlb_index(cpu, mmu_idx, addr);
1955d560225fSAnton Johansson     tlbe = tlb_entry(cpu, mmu_idx, addr);
195608dff435SRichard Henderson 
1957d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
195808dff435SRichard Henderson     tlb_addr = tlb_addr_write(tlbe);
1959334692bcSPeter Maydell     if (!tlb_hit(tlb_addr, addr)) {
1960d560225fSAnton Johansson         if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
19610b3c75adSRichard Henderson                             addr & TARGET_PAGE_MASK)) {
1962d560225fSAnton Johansson             tlb_fill(cpu, addr, size,
196308dff435SRichard Henderson                      MMU_DATA_STORE, mmu_idx, retaddr);
1964d560225fSAnton Johansson             index = tlb_index(cpu, mmu_idx, addr);
1965d560225fSAnton Johansson             tlbe = tlb_entry(cpu, mmu_idx, addr);
1966d9bb58e5SYang Zhong         }
1967403f290cSEmilio G. Cota         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1968d9bb58e5SYang Zhong     }
1969d9bb58e5SYang Zhong 
1970417aeaffSRichard Henderson     /*
1971417aeaffSRichard Henderson      * Let the guest notice RMW on a write-only page.
1972417aeaffSRichard Henderson      * We have just verified that the page is writable.
1973417aeaffSRichard Henderson      * Subpage lookups may have left TLB_INVALID_MASK set,
1974417aeaffSRichard Henderson      * but addr_read will only be -1 if PAGE_READ was unset.
1975417aeaffSRichard Henderson      */
1976417aeaffSRichard Henderson     if (unlikely(tlbe->addr_read == -1)) {
1977d560225fSAnton Johansson         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
197808dff435SRichard Henderson         /*
1979417aeaffSRichard Henderson          * Since we don't support reads and writes to different
1980417aeaffSRichard Henderson          * addresses, and we do have the proper page loaded for
1981417aeaffSRichard Henderson          * write, this shouldn't ever return.  But just in case,
1982417aeaffSRichard Henderson          * handle via stop-the-world.
198308dff435SRichard Henderson          */
198408dff435SRichard Henderson         goto stop_the_world;
198508dff435SRichard Henderson     }
1986187ba694SRichard Henderson     /* Collect tlb flags for read. */
1987417aeaffSRichard Henderson     tlb_addr |= tlbe->addr_read;
198808dff435SRichard Henderson 
198955df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
19900953674eSRichard Henderson     if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
1991d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1992d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1993d9bb58e5SYang Zhong         goto stop_the_world;
1994d9bb58e5SYang Zhong     }
1995d9bb58e5SYang Zhong 
199634d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1997d560225fSAnton Johansson     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
199834d49937SPeter Maydell 
199934d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
2000d560225fSAnton Johansson         notdirty_write(cpu, addr, size, full, retaddr);
2001417aeaffSRichard Henderson     }
2002417aeaffSRichard Henderson 
2003187ba694SRichard Henderson     if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
2004187ba694SRichard Henderson         int wp_flags = 0;
2005187ba694SRichard Henderson 
2006187ba694SRichard Henderson         if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
2007187ba694SRichard Henderson             wp_flags |= BP_MEM_WRITE;
2008187ba694SRichard Henderson         }
2009187ba694SRichard Henderson         if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
2010187ba694SRichard Henderson             wp_flags |= BP_MEM_READ;
2011187ba694SRichard Henderson         }
2012187ba694SRichard Henderson         if (wp_flags) {
2013d560225fSAnton Johansson             cpu_check_watchpoint(cpu, addr, size,
2014187ba694SRichard Henderson                                  full->attrs, wp_flags, retaddr);
2015187ba694SRichard Henderson         }
201634d49937SPeter Maydell     }
201734d49937SPeter Maydell 
201834d49937SPeter Maydell     return hostaddr;
2019d9bb58e5SYang Zhong 
2020d9bb58e5SYang Zhong  stop_the_world:
2021d560225fSAnton Johansson     cpu_loop_exit_atomic(cpu, retaddr);
2022d9bb58e5SYang Zhong }
2023d9bb58e5SYang Zhong 
2024eed56642SAlex Bennée /*
2025eed56642SAlex Bennée  * Load Helpers
2026eed56642SAlex Bennée  *
2027eed56642SAlex Bennée  * We support two different access types. SOFTMMU_CODE_ACCESS is
2028eed56642SAlex Bennée  * specifically for reading instructions from system memory. It is
2029eed56642SAlex Bennée  * called by the translation loop and in some helpers where the code
2030eed56642SAlex Bennée  * is disassembled. It shouldn't be called directly by guest code.
2031cdfac37bSRichard Henderson  *
2032eed56642SAlex Bennée  * For the benefit of TCG generated code, we want to avoid the
2033eed56642SAlex Bennée  * complication of ABI-specific return type promotion and always
2034eed56642SAlex Bennée  * return a value extended to the register size of the host. This is
2035eed56642SAlex Bennée  * tcg_target_long, except in the case of a 32-bit host and 64-bit
2036eed56642SAlex Bennée  * data, and for that we always have uint64_t.
2037eed56642SAlex Bennée  *
2038eed56642SAlex Bennée  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2039eed56642SAlex Bennée  */
2040eed56642SAlex Bennée 
20418cfdacaaSRichard Henderson /**
20428cfdacaaSRichard Henderson  * do_ld_mmio_beN:
2043d50ef446SAnton Johansson  * @cpu: generic cpu state
20441966855eSRichard Henderson  * @full: page parameters
20458cfdacaaSRichard Henderson  * @ret_be: accumulated data
20461966855eSRichard Henderson  * @addr: virtual address
20471966855eSRichard Henderson  * @size: number of bytes
20488cfdacaaSRichard Henderson  * @mmu_idx: virtual address context
20498cfdacaaSRichard Henderson  * @ra: return address into tcg generated code, or 0
2050a4a411fbSStefan Hajnoczi  * Context: BQL held
20518cfdacaaSRichard Henderson  *
20521966855eSRichard Henderson  * Load @size bytes from @addr, which is memory-mapped i/o.
20538cfdacaaSRichard Henderson  * The bytes are concatenated in big-endian order with @ret_be.
20548cfdacaaSRichard Henderson  */
2055d50ef446SAnton Johansson static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
20561966855eSRichard Henderson                                 uint64_t ret_be, vaddr addr, int size,
20578bf67267SRichard Henderson                                 int mmu_idx, MMUAccessType type, uintptr_t ra,
20588bf67267SRichard Henderson                                 MemoryRegion *mr, hwaddr mr_offset)
20592dd92606SRichard Henderson {
2060190aba80SRichard Henderson     do {
206113e61747SRichard Henderson         MemOp this_mop;
206213e61747SRichard Henderson         unsigned this_size;
206313e61747SRichard Henderson         uint64_t val;
206413e61747SRichard Henderson         MemTxResult r;
206513e61747SRichard Henderson 
2066190aba80SRichard Henderson         /* Read aligned pieces up to 8 bytes. */
206713e61747SRichard Henderson         this_mop = ctz32(size | (int)addr | 8);
206813e61747SRichard Henderson         this_size = 1 << this_mop;
206913e61747SRichard Henderson         this_mop |= MO_BE;
207013e61747SRichard Henderson 
20718bf67267SRichard Henderson         r = memory_region_dispatch_read(mr, mr_offset, &val,
20728bf67267SRichard Henderson                                         this_mop, full->attrs);
207313e61747SRichard Henderson         if (unlikely(r != MEMTX_OK)) {
2074d50ef446SAnton Johansson             io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
20758cfdacaaSRichard Henderson         }
207613e61747SRichard Henderson         if (this_size == 8) {
207713e61747SRichard Henderson             return val;
207813e61747SRichard Henderson         }
207913e61747SRichard Henderson 
208013e61747SRichard Henderson         ret_be = (ret_be << (this_size * 8)) | val;
208113e61747SRichard Henderson         addr += this_size;
208213e61747SRichard Henderson         mr_offset += this_size;
208313e61747SRichard Henderson         size -= this_size;
2084190aba80SRichard Henderson     } while (size);
208513e61747SRichard Henderson 
20868cfdacaaSRichard Henderson     return ret_be;
20878cfdacaaSRichard Henderson }
20888cfdacaaSRichard Henderson 
2089d50ef446SAnton Johansson static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
20908bf67267SRichard Henderson                                uint64_t ret_be, vaddr addr, int size,
20918bf67267SRichard Henderson                                int mmu_idx, MMUAccessType type, uintptr_t ra)
20928bf67267SRichard Henderson {
20938bf67267SRichard Henderson     MemoryRegionSection *section;
20948bf67267SRichard Henderson     MemoryRegion *mr;
20958bf67267SRichard Henderson     hwaddr mr_offset;
20968bf67267SRichard Henderson     MemTxAttrs attrs;
20978bf67267SRichard Henderson 
20988bf67267SRichard Henderson     tcg_debug_assert(size > 0 && size <= 8);
20998bf67267SRichard Henderson 
21008bf67267SRichard Henderson     attrs = full->attrs;
2101d50ef446SAnton Johansson     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
21028bf67267SRichard Henderson     mr = section->mr;
21038bf67267SRichard Henderson 
21046aba908dSJonathan Cameron     BQL_LOCK_GUARD();
21056aba908dSJonathan Cameron     return int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
21068bf67267SRichard Henderson                            type, ra, mr, mr_offset);
21078bf67267SRichard Henderson }
21088bf67267SRichard Henderson 
2109d50ef446SAnton Johansson static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
21108bf67267SRichard Henderson                                uint64_t ret_be, vaddr addr, int size,
21118bf67267SRichard Henderson                                int mmu_idx, uintptr_t ra)
21128bf67267SRichard Henderson {
21138bf67267SRichard Henderson     MemoryRegionSection *section;
21148bf67267SRichard Henderson     MemoryRegion *mr;
21158bf67267SRichard Henderson     hwaddr mr_offset;
21168bf67267SRichard Henderson     MemTxAttrs attrs;
21178bf67267SRichard Henderson     uint64_t a, b;
21188bf67267SRichard Henderson 
21198bf67267SRichard Henderson     tcg_debug_assert(size > 8 && size <= 16);
21208bf67267SRichard Henderson 
21218bf67267SRichard Henderson     attrs = full->attrs;
2122d50ef446SAnton Johansson     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
21238bf67267SRichard Henderson     mr = section->mr;
21248bf67267SRichard Henderson 
21256aba908dSJonathan Cameron     BQL_LOCK_GUARD();
2126d50ef446SAnton Johansson     a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
21278bf67267SRichard Henderson                         MMU_DATA_LOAD, ra, mr, mr_offset);
2128d50ef446SAnton Johansson     b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
21298bf67267SRichard Henderson                         MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
21308bf67267SRichard Henderson     return int128_make128(b, a);
21318bf67267SRichard Henderson }
21328bf67267SRichard Henderson 
21338cfdacaaSRichard Henderson /**
21348cfdacaaSRichard Henderson  * do_ld_bytes_beN
21358cfdacaaSRichard Henderson  * @p: translation parameters
21368cfdacaaSRichard Henderson  * @ret_be: accumulated data
21378cfdacaaSRichard Henderson  *
21388cfdacaaSRichard Henderson  * Load @p->size bytes from @p->haddr, which is RAM.
21398cfdacaaSRichard Henderson  * The bytes to concatenated in big-endian order with @ret_be.
21408cfdacaaSRichard Henderson  */
21418cfdacaaSRichard Henderson static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
21428cfdacaaSRichard Henderson {
21438cfdacaaSRichard Henderson     uint8_t *haddr = p->haddr;
21448cfdacaaSRichard Henderson     int i, size = p->size;
21458cfdacaaSRichard Henderson 
21468cfdacaaSRichard Henderson     for (i = 0; i < size; i++) {
21478cfdacaaSRichard Henderson         ret_be = (ret_be << 8) | haddr[i];
21488cfdacaaSRichard Henderson     }
21498cfdacaaSRichard Henderson     return ret_be;
21508cfdacaaSRichard Henderson }
21518cfdacaaSRichard Henderson 
2152cdfac37bSRichard Henderson /**
2153cdfac37bSRichard Henderson  * do_ld_parts_beN
2154cdfac37bSRichard Henderson  * @p: translation parameters
2155cdfac37bSRichard Henderson  * @ret_be: accumulated data
2156cdfac37bSRichard Henderson  *
2157cdfac37bSRichard Henderson  * As do_ld_bytes_beN, but atomically on each aligned part.
2158cdfac37bSRichard Henderson  */
2159cdfac37bSRichard Henderson static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
2160cdfac37bSRichard Henderson {
2161cdfac37bSRichard Henderson     void *haddr = p->haddr;
2162cdfac37bSRichard Henderson     int size = p->size;
2163cdfac37bSRichard Henderson 
2164cdfac37bSRichard Henderson     do {
2165cdfac37bSRichard Henderson         uint64_t x;
2166cdfac37bSRichard Henderson         int n;
2167cdfac37bSRichard Henderson 
2168cdfac37bSRichard Henderson         /*
2169cdfac37bSRichard Henderson          * Find minimum of alignment and size.
2170cdfac37bSRichard Henderson          * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2171cdfac37bSRichard Henderson          * would have only checked the low bits of addr|size once at the start,
2172cdfac37bSRichard Henderson          * but is just as easy.
2173cdfac37bSRichard Henderson          */
2174cdfac37bSRichard Henderson         switch (((uintptr_t)haddr | size) & 7) {
2175cdfac37bSRichard Henderson         case 4:
2176cdfac37bSRichard Henderson             x = cpu_to_be32(load_atomic4(haddr));
2177cdfac37bSRichard Henderson             ret_be = (ret_be << 32) | x;
2178cdfac37bSRichard Henderson             n = 4;
2179cdfac37bSRichard Henderson             break;
2180cdfac37bSRichard Henderson         case 2:
2181cdfac37bSRichard Henderson         case 6:
2182cdfac37bSRichard Henderson             x = cpu_to_be16(load_atomic2(haddr));
2183cdfac37bSRichard Henderson             ret_be = (ret_be << 16) | x;
2184cdfac37bSRichard Henderson             n = 2;
2185cdfac37bSRichard Henderson             break;
2186cdfac37bSRichard Henderson         default:
2187cdfac37bSRichard Henderson             x = *(uint8_t *)haddr;
2188cdfac37bSRichard Henderson             ret_be = (ret_be << 8) | x;
2189cdfac37bSRichard Henderson             n = 1;
2190cdfac37bSRichard Henderson             break;
2191cdfac37bSRichard Henderson         case 0:
2192cdfac37bSRichard Henderson             g_assert_not_reached();
2193cdfac37bSRichard Henderson         }
2194cdfac37bSRichard Henderson         haddr += n;
2195cdfac37bSRichard Henderson         size -= n;
2196cdfac37bSRichard Henderson     } while (size != 0);
2197cdfac37bSRichard Henderson     return ret_be;
2198cdfac37bSRichard Henderson }
2199cdfac37bSRichard Henderson 
2200cdfac37bSRichard Henderson /**
2201cdfac37bSRichard Henderson  * do_ld_parts_be4
2202cdfac37bSRichard Henderson  * @p: translation parameters
2203cdfac37bSRichard Henderson  * @ret_be: accumulated data
2204cdfac37bSRichard Henderson  *
2205cdfac37bSRichard Henderson  * As do_ld_bytes_beN, but with one atomic load.
2206cdfac37bSRichard Henderson  * Four aligned bytes are guaranteed to cover the load.
2207cdfac37bSRichard Henderson  */
2208cdfac37bSRichard Henderson static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
2209cdfac37bSRichard Henderson {
2210cdfac37bSRichard Henderson     int o = p->addr & 3;
2211cdfac37bSRichard Henderson     uint32_t x = load_atomic4(p->haddr - o);
2212cdfac37bSRichard Henderson 
2213cdfac37bSRichard Henderson     x = cpu_to_be32(x);
2214cdfac37bSRichard Henderson     x <<= o * 8;
2215cdfac37bSRichard Henderson     x >>= (4 - p->size) * 8;
2216cdfac37bSRichard Henderson     return (ret_be << (p->size * 8)) | x;
2217cdfac37bSRichard Henderson }
2218cdfac37bSRichard Henderson 
2219cdfac37bSRichard Henderson /**
2220cdfac37bSRichard Henderson  * do_ld_parts_be8
2221cdfac37bSRichard Henderson  * @p: translation parameters
2222cdfac37bSRichard Henderson  * @ret_be: accumulated data
2223cdfac37bSRichard Henderson  *
2224cdfac37bSRichard Henderson  * As do_ld_bytes_beN, but with one atomic load.
2225cdfac37bSRichard Henderson  * Eight aligned bytes are guaranteed to cover the load.
2226cdfac37bSRichard Henderson  */
2227d50ef446SAnton Johansson static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
2228cdfac37bSRichard Henderson                                 MMULookupPageData *p, uint64_t ret_be)
2229cdfac37bSRichard Henderson {
2230cdfac37bSRichard Henderson     int o = p->addr & 7;
223173fda56fSAnton Johansson     uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o);
2232cdfac37bSRichard Henderson 
2233cdfac37bSRichard Henderson     x = cpu_to_be64(x);
2234cdfac37bSRichard Henderson     x <<= o * 8;
2235cdfac37bSRichard Henderson     x >>= (8 - p->size) * 8;
2236cdfac37bSRichard Henderson     return (ret_be << (p->size * 8)) | x;
2237cdfac37bSRichard Henderson }
2238cdfac37bSRichard Henderson 
223935c653c4SRichard Henderson /**
224035c653c4SRichard Henderson  * do_ld_parts_be16
224135c653c4SRichard Henderson  * @p: translation parameters
224235c653c4SRichard Henderson  * @ret_be: accumulated data
224335c653c4SRichard Henderson  *
224435c653c4SRichard Henderson  * As do_ld_bytes_beN, but with one atomic load.
224535c653c4SRichard Henderson  * 16 aligned bytes are guaranteed to cover the load.
224635c653c4SRichard Henderson  */
2247d50ef446SAnton Johansson static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
224835c653c4SRichard Henderson                                MMULookupPageData *p, uint64_t ret_be)
224935c653c4SRichard Henderson {
225035c653c4SRichard Henderson     int o = p->addr & 15;
225173fda56fSAnton Johansson     Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o);
225235c653c4SRichard Henderson     int size = p->size;
225335c653c4SRichard Henderson 
225435c653c4SRichard Henderson     if (!HOST_BIG_ENDIAN) {
225535c653c4SRichard Henderson         y = bswap128(y);
225635c653c4SRichard Henderson     }
225735c653c4SRichard Henderson     y = int128_lshift(y, o * 8);
225835c653c4SRichard Henderson     y = int128_urshift(y, (16 - size) * 8);
225935c653c4SRichard Henderson     x = int128_make64(ret_be);
226035c653c4SRichard Henderson     x = int128_lshift(x, size * 8);
226135c653c4SRichard Henderson     return int128_or(x, y);
226235c653c4SRichard Henderson }
226335c653c4SRichard Henderson 
22648cfdacaaSRichard Henderson /*
22658cfdacaaSRichard Henderson  * Wrapper for the above.
22668cfdacaaSRichard Henderson  */
2267d50ef446SAnton Johansson static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
2268cdfac37bSRichard Henderson                           uint64_t ret_be, int mmu_idx, MMUAccessType type,
2269cdfac37bSRichard Henderson                           MemOp mop, uintptr_t ra)
22708cfdacaaSRichard Henderson {
2271cdfac37bSRichard Henderson     MemOp atom;
2272cdfac37bSRichard Henderson     unsigned tmp, half_size;
2273cdfac37bSRichard Henderson 
22748cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2275d50ef446SAnton Johansson         return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
22761966855eSRichard Henderson                               mmu_idx, type, ra);
2277cdfac37bSRichard Henderson     }
2278cdfac37bSRichard Henderson 
2279cdfac37bSRichard Henderson     /*
2280cdfac37bSRichard Henderson      * It is a given that we cross a page and therefore there is no
2281cdfac37bSRichard Henderson      * atomicity for the load as a whole, but subobjects may need attention.
2282cdfac37bSRichard Henderson      */
2283cdfac37bSRichard Henderson     atom = mop & MO_ATOM_MASK;
2284cdfac37bSRichard Henderson     switch (atom) {
2285cdfac37bSRichard Henderson     case MO_ATOM_SUBALIGN:
2286cdfac37bSRichard Henderson         return do_ld_parts_beN(p, ret_be);
2287cdfac37bSRichard Henderson 
2288cdfac37bSRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
2289cdfac37bSRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
2290cdfac37bSRichard Henderson         tmp = mop & MO_SIZE;
2291cdfac37bSRichard Henderson         tmp = tmp ? tmp - 1 : 0;
2292cdfac37bSRichard Henderson         half_size = 1 << tmp;
2293cdfac37bSRichard Henderson         if (atom == MO_ATOM_IFALIGN_PAIR
2294cdfac37bSRichard Henderson             ? p->size == half_size
2295cdfac37bSRichard Henderson             : p->size >= half_size) {
2296cdfac37bSRichard Henderson             if (!HAVE_al8_fast && p->size < 4) {
2297cdfac37bSRichard Henderson                 return do_ld_whole_be4(p, ret_be);
22988cfdacaaSRichard Henderson             } else {
2299d50ef446SAnton Johansson                 return do_ld_whole_be8(cpu, ra, p, ret_be);
2300cdfac37bSRichard Henderson             }
2301cdfac37bSRichard Henderson         }
2302cdfac37bSRichard Henderson         /* fall through */
2303cdfac37bSRichard Henderson 
2304cdfac37bSRichard Henderson     case MO_ATOM_IFALIGN:
2305cdfac37bSRichard Henderson     case MO_ATOM_WITHIN16:
2306cdfac37bSRichard Henderson     case MO_ATOM_NONE:
23078cfdacaaSRichard Henderson         return do_ld_bytes_beN(p, ret_be);
2308cdfac37bSRichard Henderson 
2309cdfac37bSRichard Henderson     default:
2310cdfac37bSRichard Henderson         g_assert_not_reached();
23118cfdacaaSRichard Henderson     }
23128cfdacaaSRichard Henderson }
23138cfdacaaSRichard Henderson 
231435c653c4SRichard Henderson /*
231535c653c4SRichard Henderson  * Wrapper for the above, for 8 < size < 16.
231635c653c4SRichard Henderson  */
2317d50ef446SAnton Johansson static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p,
231835c653c4SRichard Henderson                           uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
231935c653c4SRichard Henderson {
232035c653c4SRichard Henderson     int size = p->size;
232135c653c4SRichard Henderson     uint64_t b;
232235c653c4SRichard Henderson     MemOp atom;
232335c653c4SRichard Henderson 
232435c653c4SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2325d50ef446SAnton Johansson         return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra);
232635c653c4SRichard Henderson     }
232735c653c4SRichard Henderson 
232835c653c4SRichard Henderson     /*
232935c653c4SRichard Henderson      * It is a given that we cross a page and therefore there is no
233035c653c4SRichard Henderson      * atomicity for the load as a whole, but subobjects may need attention.
233135c653c4SRichard Henderson      */
233235c653c4SRichard Henderson     atom = mop & MO_ATOM_MASK;
233335c653c4SRichard Henderson     switch (atom) {
233435c653c4SRichard Henderson     case MO_ATOM_SUBALIGN:
233535c653c4SRichard Henderson         p->size = size - 8;
233635c653c4SRichard Henderson         a = do_ld_parts_beN(p, a);
233735c653c4SRichard Henderson         p->haddr += size - 8;
233835c653c4SRichard Henderson         p->size = 8;
233935c653c4SRichard Henderson         b = do_ld_parts_beN(p, 0);
234035c653c4SRichard Henderson         break;
234135c653c4SRichard Henderson 
234235c653c4SRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
234335c653c4SRichard Henderson         /* Since size > 8, this is the half that must be atomic. */
2344d50ef446SAnton Johansson         return do_ld_whole_be16(cpu, ra, p, a);
234535c653c4SRichard Henderson 
234635c653c4SRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
234735c653c4SRichard Henderson         /*
234835c653c4SRichard Henderson          * Since size > 8, both halves are misaligned,
234935c653c4SRichard Henderson          * and so neither is atomic.
235035c653c4SRichard Henderson          */
235135c653c4SRichard Henderson     case MO_ATOM_IFALIGN:
235235c653c4SRichard Henderson     case MO_ATOM_WITHIN16:
235335c653c4SRichard Henderson     case MO_ATOM_NONE:
235435c653c4SRichard Henderson         p->size = size - 8;
235535c653c4SRichard Henderson         a = do_ld_bytes_beN(p, a);
235635c653c4SRichard Henderson         b = ldq_be_p(p->haddr + size - 8);
235735c653c4SRichard Henderson         break;
235835c653c4SRichard Henderson 
235935c653c4SRichard Henderson     default:
236035c653c4SRichard Henderson         g_assert_not_reached();
236135c653c4SRichard Henderson     }
236235c653c4SRichard Henderson 
236335c653c4SRichard Henderson     return int128_make128(b, a);
236435c653c4SRichard Henderson }
236535c653c4SRichard Henderson 
2366d50ef446SAnton Johansson static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
23678cfdacaaSRichard Henderson                        MMUAccessType type, uintptr_t ra)
23688cfdacaaSRichard Henderson {
23698cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2370d50ef446SAnton Johansson         return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra);
23718cfdacaaSRichard Henderson     } else {
23728cfdacaaSRichard Henderson         return *(uint8_t *)p->haddr;
23738cfdacaaSRichard Henderson     }
23748cfdacaaSRichard Henderson }
23758cfdacaaSRichard Henderson 
2376d50ef446SAnton Johansson static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
23778cfdacaaSRichard Henderson                         MMUAccessType type, MemOp memop, uintptr_t ra)
23788cfdacaaSRichard Henderson {
2379f7eaf9d7SRichard Henderson     uint16_t ret;
23808cfdacaaSRichard Henderson 
23818cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2382d50ef446SAnton Johansson         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra);
2383f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) == MO_LE) {
2384f7eaf9d7SRichard Henderson             ret = bswap16(ret);
23858cfdacaaSRichard Henderson         }
2386f7eaf9d7SRichard Henderson     } else {
23878cfdacaaSRichard Henderson         /* Perform the load host endian, then swap if necessary. */
238873fda56fSAnton Johansson         ret = load_atom_2(cpu, ra, p->haddr, memop);
23898cfdacaaSRichard Henderson         if (memop & MO_BSWAP) {
23908cfdacaaSRichard Henderson             ret = bswap16(ret);
23918cfdacaaSRichard Henderson         }
2392f7eaf9d7SRichard Henderson     }
23938cfdacaaSRichard Henderson     return ret;
23948cfdacaaSRichard Henderson }
23958cfdacaaSRichard Henderson 
2396d50ef446SAnton Johansson static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
23978cfdacaaSRichard Henderson                         MMUAccessType type, MemOp memop, uintptr_t ra)
23988cfdacaaSRichard Henderson {
23998cfdacaaSRichard Henderson     uint32_t ret;
24008cfdacaaSRichard Henderson 
24018cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2402d50ef446SAnton Johansson         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra);
2403f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) == MO_LE) {
2404f7eaf9d7SRichard Henderson             ret = bswap32(ret);
24058cfdacaaSRichard Henderson         }
2406f7eaf9d7SRichard Henderson     } else {
24078cfdacaaSRichard Henderson         /* Perform the load host endian. */
240873fda56fSAnton Johansson         ret = load_atom_4(cpu, ra, p->haddr, memop);
24098cfdacaaSRichard Henderson         if (memop & MO_BSWAP) {
24108cfdacaaSRichard Henderson             ret = bswap32(ret);
24118cfdacaaSRichard Henderson         }
2412f7eaf9d7SRichard Henderson     }
24138cfdacaaSRichard Henderson     return ret;
24148cfdacaaSRichard Henderson }
24158cfdacaaSRichard Henderson 
2416d50ef446SAnton Johansson static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
24178cfdacaaSRichard Henderson                         MMUAccessType type, MemOp memop, uintptr_t ra)
24188cfdacaaSRichard Henderson {
24198cfdacaaSRichard Henderson     uint64_t ret;
24208cfdacaaSRichard Henderson 
24218cfdacaaSRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2422d50ef446SAnton Johansson         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra);
2423f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) == MO_LE) {
2424f7eaf9d7SRichard Henderson             ret = bswap64(ret);
24258cfdacaaSRichard Henderson         }
2426f7eaf9d7SRichard Henderson     } else {
24278cfdacaaSRichard Henderson         /* Perform the load host endian. */
242873fda56fSAnton Johansson         ret = load_atom_8(cpu, ra, p->haddr, memop);
24298cfdacaaSRichard Henderson         if (memop & MO_BSWAP) {
24308cfdacaaSRichard Henderson             ret = bswap64(ret);
24318cfdacaaSRichard Henderson         }
2432f7eaf9d7SRichard Henderson     }
24338cfdacaaSRichard Henderson     return ret;
24348cfdacaaSRichard Henderson }
24358cfdacaaSRichard Henderson 
2436d50ef446SAnton Johansson static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
24378cfdacaaSRichard Henderson                           uintptr_t ra, MMUAccessType access_type)
24388cfdacaaSRichard Henderson {
24398cfdacaaSRichard Henderson     MMULookupLocals l;
24408cfdacaaSRichard Henderson     bool crosspage;
24418cfdacaaSRichard Henderson 
2442f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2443d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
24448cfdacaaSRichard Henderson     tcg_debug_assert(!crosspage);
24458cfdacaaSRichard Henderson 
2446d50ef446SAnton Johansson     return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
24472dd92606SRichard Henderson }
24482dd92606SRichard Henderson 
2449d50ef446SAnton Johansson static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
24508cfdacaaSRichard Henderson                            uintptr_t ra, MMUAccessType access_type)
24512dd92606SRichard Henderson {
24528cfdacaaSRichard Henderson     MMULookupLocals l;
24538cfdacaaSRichard Henderson     bool crosspage;
24548cfdacaaSRichard Henderson     uint16_t ret;
24558cfdacaaSRichard Henderson     uint8_t a, b;
24568cfdacaaSRichard Henderson 
2457f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2458d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
24598cfdacaaSRichard Henderson     if (likely(!crosspage)) {
2460d50ef446SAnton Johansson         return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
24618cfdacaaSRichard Henderson     }
24628cfdacaaSRichard Henderson 
2463d50ef446SAnton Johansson     a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2464d50ef446SAnton Johansson     b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra);
24658cfdacaaSRichard Henderson 
24668cfdacaaSRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
24678cfdacaaSRichard Henderson         ret = a | (b << 8);
24688cfdacaaSRichard Henderson     } else {
24698cfdacaaSRichard Henderson         ret = b | (a << 8);
24708cfdacaaSRichard Henderson     }
24718cfdacaaSRichard Henderson     return ret;
2472eed56642SAlex Bennée }
2473eed56642SAlex Bennée 
2474d50ef446SAnton Johansson static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
24758cfdacaaSRichard Henderson                            uintptr_t ra, MMUAccessType access_type)
24762dd92606SRichard Henderson {
24778cfdacaaSRichard Henderson     MMULookupLocals l;
24788cfdacaaSRichard Henderson     bool crosspage;
24798cfdacaaSRichard Henderson     uint32_t ret;
24808cfdacaaSRichard Henderson 
2481f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2482d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
24838cfdacaaSRichard Henderson     if (likely(!crosspage)) {
2484d50ef446SAnton Johansson         return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
24858cfdacaaSRichard Henderson     }
24868cfdacaaSRichard Henderson 
2487d50ef446SAnton Johansson     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2488d50ef446SAnton Johansson     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
24898cfdacaaSRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
24908cfdacaaSRichard Henderson         ret = bswap32(ret);
24918cfdacaaSRichard Henderson     }
24928cfdacaaSRichard Henderson     return ret;
2493eed56642SAlex Bennée }
2494eed56642SAlex Bennée 
2495d50ef446SAnton Johansson static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
24968cfdacaaSRichard Henderson                            uintptr_t ra, MMUAccessType access_type)
24978cfdacaaSRichard Henderson {
24988cfdacaaSRichard Henderson     MMULookupLocals l;
24998cfdacaaSRichard Henderson     bool crosspage;
25008cfdacaaSRichard Henderson     uint64_t ret;
25018cfdacaaSRichard Henderson 
2502f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2503d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
25048cfdacaaSRichard Henderson     if (likely(!crosspage)) {
2505d50ef446SAnton Johansson         return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
25068cfdacaaSRichard Henderson     }
25078cfdacaaSRichard Henderson 
2508d50ef446SAnton Johansson     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2509d50ef446SAnton Johansson     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
25108cfdacaaSRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
25118cfdacaaSRichard Henderson         ret = bswap64(ret);
25128cfdacaaSRichard Henderson     }
25138cfdacaaSRichard Henderson     return ret;
2514eed56642SAlex Bennée }
2515eed56642SAlex Bennée 
2516d50ef446SAnton Johansson static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
251735c653c4SRichard Henderson                           MemOpIdx oi, uintptr_t ra)
251835c653c4SRichard Henderson {
251935c653c4SRichard Henderson     MMULookupLocals l;
252035c653c4SRichard Henderson     bool crosspage;
252135c653c4SRichard Henderson     uint64_t a, b;
252235c653c4SRichard Henderson     Int128 ret;
252335c653c4SRichard Henderson     int first;
252435c653c4SRichard Henderson 
2525f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2526d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
252735c653c4SRichard Henderson     if (likely(!crosspage)) {
252835c653c4SRichard Henderson         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2529d50ef446SAnton Johansson             ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16,
25308bf67267SRichard Henderson                                    l.mmu_idx, ra);
2531f7eaf9d7SRichard Henderson             if ((l.memop & MO_BSWAP) == MO_LE) {
2532f7eaf9d7SRichard Henderson                 ret = bswap128(ret);
253335c653c4SRichard Henderson             }
2534f7eaf9d7SRichard Henderson         } else {
2535f7eaf9d7SRichard Henderson             /* Perform the load host endian. */
253673fda56fSAnton Johansson             ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop);
253735c653c4SRichard Henderson             if (l.memop & MO_BSWAP) {
253835c653c4SRichard Henderson                 ret = bswap128(ret);
253935c653c4SRichard Henderson             }
2540f7eaf9d7SRichard Henderson         }
254135c653c4SRichard Henderson         return ret;
254235c653c4SRichard Henderson     }
254335c653c4SRichard Henderson 
254435c653c4SRichard Henderson     first = l.page[0].size;
254535c653c4SRichard Henderson     if (first == 8) {
254635c653c4SRichard Henderson         MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
254735c653c4SRichard Henderson 
2548d50ef446SAnton Johansson         a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2549d50ef446SAnton Johansson         b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
255035c653c4SRichard Henderson         if ((mop8 & MO_BSWAP) == MO_LE) {
255135c653c4SRichard Henderson             ret = int128_make128(a, b);
255235c653c4SRichard Henderson         } else {
255335c653c4SRichard Henderson             ret = int128_make128(b, a);
255435c653c4SRichard Henderson         }
255535c653c4SRichard Henderson         return ret;
255635c653c4SRichard Henderson     }
255735c653c4SRichard Henderson 
255835c653c4SRichard Henderson     if (first < 8) {
2559d50ef446SAnton Johansson         a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx,
256035c653c4SRichard Henderson                       MMU_DATA_LOAD, l.memop, ra);
2561d50ef446SAnton Johansson         ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra);
256235c653c4SRichard Henderson     } else {
2563d50ef446SAnton Johansson         ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra);
256435c653c4SRichard Henderson         b = int128_getlo(ret);
256535c653c4SRichard Henderson         ret = int128_lshift(ret, l.page[1].size * 8);
256635c653c4SRichard Henderson         a = int128_gethi(ret);
2567d50ef446SAnton Johansson         b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx,
256835c653c4SRichard Henderson                       MMU_DATA_LOAD, l.memop, ra);
256935c653c4SRichard Henderson         ret = int128_make128(b, a);
257035c653c4SRichard Henderson     }
257135c653c4SRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
257235c653c4SRichard Henderson         ret = bswap128(ret);
257335c653c4SRichard Henderson     }
257435c653c4SRichard Henderson     return ret;
257535c653c4SRichard Henderson }
257635c653c4SRichard Henderson 
2577d03f1408SRichard Henderson /*
2578eed56642SAlex Bennée  * Store Helpers
2579eed56642SAlex Bennée  */
2580eed56642SAlex Bennée 
258159213461SRichard Henderson /**
258259213461SRichard Henderson  * do_st_mmio_leN:
2583d50ef446SAnton Johansson  * @cpu: generic cpu state
25841966855eSRichard Henderson  * @full: page parameters
258559213461SRichard Henderson  * @val_le: data to store
25861966855eSRichard Henderson  * @addr: virtual address
25871966855eSRichard Henderson  * @size: number of bytes
258859213461SRichard Henderson  * @mmu_idx: virtual address context
258959213461SRichard Henderson  * @ra: return address into tcg generated code, or 0
2590a4a411fbSStefan Hajnoczi  * Context: BQL held
259159213461SRichard Henderson  *
25921966855eSRichard Henderson  * Store @size bytes at @addr, which is memory-mapped i/o.
259359213461SRichard Henderson  * The bytes to store are extracted in little-endian order from @val_le;
259459213461SRichard Henderson  * return the bytes of @val_le beyond @p->size that have not been stored.
259559213461SRichard Henderson  */
2596d50ef446SAnton Johansson static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
25971966855eSRichard Henderson                                 uint64_t val_le, vaddr addr, int size,
25981f9823ceSRichard Henderson                                 int mmu_idx, uintptr_t ra,
25991f9823ceSRichard Henderson                                 MemoryRegion *mr, hwaddr mr_offset)
26006b8b622eSRichard Henderson {
2601190aba80SRichard Henderson     do {
26025646d6a7SRichard Henderson         MemOp this_mop;
26035646d6a7SRichard Henderson         unsigned this_size;
26045646d6a7SRichard Henderson         MemTxResult r;
26055646d6a7SRichard Henderson 
2606190aba80SRichard Henderson         /* Store aligned pieces up to 8 bytes. */
26075646d6a7SRichard Henderson         this_mop = ctz32(size | (int)addr | 8);
26085646d6a7SRichard Henderson         this_size = 1 << this_mop;
26095646d6a7SRichard Henderson         this_mop |= MO_LE;
26105646d6a7SRichard Henderson 
26115646d6a7SRichard Henderson         r = memory_region_dispatch_write(mr, mr_offset, val_le,
26121f9823ceSRichard Henderson                                          this_mop, full->attrs);
26135646d6a7SRichard Henderson         if (unlikely(r != MEMTX_OK)) {
2614d50ef446SAnton Johansson             io_failed(cpu, full, addr, this_size, MMU_DATA_STORE,
26155646d6a7SRichard Henderson                       mmu_idx, r, ra);
261659213461SRichard Henderson         }
26175646d6a7SRichard Henderson         if (this_size == 8) {
26185646d6a7SRichard Henderson             return 0;
26195646d6a7SRichard Henderson         }
26205646d6a7SRichard Henderson 
26215646d6a7SRichard Henderson         val_le >>= this_size * 8;
26225646d6a7SRichard Henderson         addr += this_size;
26235646d6a7SRichard Henderson         mr_offset += this_size;
26245646d6a7SRichard Henderson         size -= this_size;
2625190aba80SRichard Henderson     } while (size);
2626190aba80SRichard Henderson 
262759213461SRichard Henderson     return val_le;
262859213461SRichard Henderson }
262959213461SRichard Henderson 
2630d50ef446SAnton Johansson static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
26311f9823ceSRichard Henderson                                uint64_t val_le, vaddr addr, int size,
26321f9823ceSRichard Henderson                                int mmu_idx, uintptr_t ra)
26331f9823ceSRichard Henderson {
26341f9823ceSRichard Henderson     MemoryRegionSection *section;
26351f9823ceSRichard Henderson     hwaddr mr_offset;
26361f9823ceSRichard Henderson     MemoryRegion *mr;
26371f9823ceSRichard Henderson     MemTxAttrs attrs;
26381f9823ceSRichard Henderson 
26391f9823ceSRichard Henderson     tcg_debug_assert(size > 0 && size <= 8);
26401f9823ceSRichard Henderson 
26411f9823ceSRichard Henderson     attrs = full->attrs;
2642d50ef446SAnton Johansson     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
26431f9823ceSRichard Henderson     mr = section->mr;
26441f9823ceSRichard Henderson 
26456aba908dSJonathan Cameron     BQL_LOCK_GUARD();
26466aba908dSJonathan Cameron     return int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
26471f9823ceSRichard Henderson                            ra, mr, mr_offset);
26481f9823ceSRichard Henderson }
26491f9823ceSRichard Henderson 
2650d50ef446SAnton Johansson static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
26511f9823ceSRichard Henderson                                  Int128 val_le, vaddr addr, int size,
26521f9823ceSRichard Henderson                                  int mmu_idx, uintptr_t ra)
26531f9823ceSRichard Henderson {
26541f9823ceSRichard Henderson     MemoryRegionSection *section;
26551f9823ceSRichard Henderson     MemoryRegion *mr;
26561f9823ceSRichard Henderson     hwaddr mr_offset;
26571f9823ceSRichard Henderson     MemTxAttrs attrs;
26581f9823ceSRichard Henderson 
26591f9823ceSRichard Henderson     tcg_debug_assert(size > 8 && size <= 16);
26601f9823ceSRichard Henderson 
26611f9823ceSRichard Henderson     attrs = full->attrs;
2662d50ef446SAnton Johansson     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
26631f9823ceSRichard Henderson     mr = section->mr;
26641f9823ceSRichard Henderson 
26656aba908dSJonathan Cameron     BQL_LOCK_GUARD();
2666d50ef446SAnton Johansson     int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
26671f9823ceSRichard Henderson                     mmu_idx, ra, mr, mr_offset);
26686aba908dSJonathan Cameron     return int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
26691f9823ceSRichard Henderson                            size - 8, mmu_idx, ra, mr, mr_offset + 8);
26701f9823ceSRichard Henderson }
26711f9823ceSRichard Henderson 
26726b8b622eSRichard Henderson /*
267359213461SRichard Henderson  * Wrapper for the above.
26746b8b622eSRichard Henderson  */
2675d50ef446SAnton Johansson static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
26765b36f268SRichard Henderson                           uint64_t val_le, int mmu_idx,
26775b36f268SRichard Henderson                           MemOp mop, uintptr_t ra)
267859213461SRichard Henderson {
26795b36f268SRichard Henderson     MemOp atom;
26805b36f268SRichard Henderson     unsigned tmp, half_size;
26815b36f268SRichard Henderson 
268259213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2683d50ef446SAnton Johansson         return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
26841966855eSRichard Henderson                               p->size, mmu_idx, ra);
268559213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
268659213461SRichard Henderson         return val_le >> (p->size * 8);
26875b36f268SRichard Henderson     }
26885b36f268SRichard Henderson 
26895b36f268SRichard Henderson     /*
26905b36f268SRichard Henderson      * It is a given that we cross a page and therefore there is no atomicity
26915b36f268SRichard Henderson      * for the store as a whole, but subobjects may need attention.
26925b36f268SRichard Henderson      */
26935b36f268SRichard Henderson     atom = mop & MO_ATOM_MASK;
26945b36f268SRichard Henderson     switch (atom) {
26955b36f268SRichard Henderson     case MO_ATOM_SUBALIGN:
26965b36f268SRichard Henderson         return store_parts_leN(p->haddr, p->size, val_le);
26975b36f268SRichard Henderson 
26985b36f268SRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
26995b36f268SRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
27005b36f268SRichard Henderson         tmp = mop & MO_SIZE;
27015b36f268SRichard Henderson         tmp = tmp ? tmp - 1 : 0;
27025b36f268SRichard Henderson         half_size = 1 << tmp;
27035b36f268SRichard Henderson         if (atom == MO_ATOM_IFALIGN_PAIR
27045b36f268SRichard Henderson             ? p->size == half_size
27055b36f268SRichard Henderson             : p->size >= half_size) {
27065b36f268SRichard Henderson             if (!HAVE_al8_fast && p->size <= 4) {
27075b36f268SRichard Henderson                 return store_whole_le4(p->haddr, p->size, val_le);
27085b36f268SRichard Henderson             } else if (HAVE_al8) {
27095b36f268SRichard Henderson                 return store_whole_le8(p->haddr, p->size, val_le);
27106b8b622eSRichard Henderson             } else {
2711d50ef446SAnton Johansson                 cpu_loop_exit_atomic(cpu, ra);
27125b36f268SRichard Henderson             }
27135b36f268SRichard Henderson         }
27145b36f268SRichard Henderson         /* fall through */
27155b36f268SRichard Henderson 
27165b36f268SRichard Henderson     case MO_ATOM_IFALIGN:
27175b36f268SRichard Henderson     case MO_ATOM_WITHIN16:
27185b36f268SRichard Henderson     case MO_ATOM_NONE:
27195b36f268SRichard Henderson         return store_bytes_leN(p->haddr, p->size, val_le);
27205b36f268SRichard Henderson 
27215b36f268SRichard Henderson     default:
27225b36f268SRichard Henderson         g_assert_not_reached();
27236b8b622eSRichard Henderson     }
27246b8b622eSRichard Henderson }
27256b8b622eSRichard Henderson 
272635c653c4SRichard Henderson /*
272735c653c4SRichard Henderson  * Wrapper for the above, for 8 < size < 16.
272835c653c4SRichard Henderson  */
2729d50ef446SAnton Johansson static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
273035c653c4SRichard Henderson                             Int128 val_le, int mmu_idx,
273135c653c4SRichard Henderson                             MemOp mop, uintptr_t ra)
273235c653c4SRichard Henderson {
273335c653c4SRichard Henderson     int size = p->size;
273435c653c4SRichard Henderson     MemOp atom;
273535c653c4SRichard Henderson 
273635c653c4SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2737d50ef446SAnton Johansson         return do_st16_mmio_leN(cpu, p->full, val_le, p->addr,
27381f9823ceSRichard Henderson                                 size, mmu_idx, ra);
273935c653c4SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
274035c653c4SRichard Henderson         return int128_gethi(val_le) >> ((size - 8) * 8);
274135c653c4SRichard Henderson     }
274235c653c4SRichard Henderson 
274335c653c4SRichard Henderson     /*
274435c653c4SRichard Henderson      * It is a given that we cross a page and therefore there is no atomicity
274535c653c4SRichard Henderson      * for the store as a whole, but subobjects may need attention.
274635c653c4SRichard Henderson      */
274735c653c4SRichard Henderson     atom = mop & MO_ATOM_MASK;
274835c653c4SRichard Henderson     switch (atom) {
274935c653c4SRichard Henderson     case MO_ATOM_SUBALIGN:
275035c653c4SRichard Henderson         store_parts_leN(p->haddr, 8, int128_getlo(val_le));
275135c653c4SRichard Henderson         return store_parts_leN(p->haddr + 8, p->size - 8,
275235c653c4SRichard Henderson                                int128_gethi(val_le));
275335c653c4SRichard Henderson 
275435c653c4SRichard Henderson     case MO_ATOM_WITHIN16_PAIR:
275535c653c4SRichard Henderson         /* Since size > 8, this is the half that must be atomic. */
27566046f6e9SRichard Henderson         if (!HAVE_CMPXCHG128) {
2757d50ef446SAnton Johansson             cpu_loop_exit_atomic(cpu, ra);
275835c653c4SRichard Henderson         }
275935c653c4SRichard Henderson         return store_whole_le16(p->haddr, p->size, val_le);
276035c653c4SRichard Henderson 
276135c653c4SRichard Henderson     case MO_ATOM_IFALIGN_PAIR:
276235c653c4SRichard Henderson         /*
276335c653c4SRichard Henderson          * Since size > 8, both halves are misaligned,
276435c653c4SRichard Henderson          * and so neither is atomic.
276535c653c4SRichard Henderson          */
276635c653c4SRichard Henderson     case MO_ATOM_IFALIGN:
27672be6a486SRichard Henderson     case MO_ATOM_WITHIN16:
276835c653c4SRichard Henderson     case MO_ATOM_NONE:
276935c653c4SRichard Henderson         stq_le_p(p->haddr, int128_getlo(val_le));
277035c653c4SRichard Henderson         return store_bytes_leN(p->haddr + 8, p->size - 8,
277135c653c4SRichard Henderson                                int128_gethi(val_le));
277235c653c4SRichard Henderson 
277335c653c4SRichard Henderson     default:
277435c653c4SRichard Henderson         g_assert_not_reached();
277535c653c4SRichard Henderson     }
277635c653c4SRichard Henderson }
277735c653c4SRichard Henderson 
2778d50ef446SAnton Johansson static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val,
277959213461SRichard Henderson                     int mmu_idx, uintptr_t ra)
2780eed56642SAlex Bennée {
278159213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2782d50ef446SAnton Johansson         do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra);
278359213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
278459213461SRichard Henderson         /* nothing */
27855b87b3e6SRichard Henderson     } else {
278659213461SRichard Henderson         *(uint8_t *)p->haddr = val;
27875b87b3e6SRichard Henderson     }
2788eed56642SAlex Bennée }
2789eed56642SAlex Bennée 
2790d50ef446SAnton Johansson static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
279159213461SRichard Henderson                     int mmu_idx, MemOp memop, uintptr_t ra)
2792eed56642SAlex Bennée {
279359213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2794f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) != MO_LE) {
2795f7eaf9d7SRichard Henderson             val = bswap16(val);
2796f7eaf9d7SRichard Henderson         }
2797d50ef446SAnton Johansson         do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra);
279859213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
279959213461SRichard Henderson         /* nothing */
280059213461SRichard Henderson     } else {
280159213461SRichard Henderson         /* Swap to host endian if necessary, then store. */
280259213461SRichard Henderson         if (memop & MO_BSWAP) {
280359213461SRichard Henderson             val = bswap16(val);
280459213461SRichard Henderson         }
280573fda56fSAnton Johansson         store_atom_2(cpu, ra, p->haddr, memop, val);
280659213461SRichard Henderson     }
280759213461SRichard Henderson }
280859213461SRichard Henderson 
2809d50ef446SAnton Johansson static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
281059213461SRichard Henderson                     int mmu_idx, MemOp memop, uintptr_t ra)
281159213461SRichard Henderson {
281259213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2813f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) != MO_LE) {
2814f7eaf9d7SRichard Henderson             val = bswap32(val);
2815f7eaf9d7SRichard Henderson         }
2816d50ef446SAnton Johansson         do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra);
281759213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
281859213461SRichard Henderson         /* nothing */
281959213461SRichard Henderson     } else {
282059213461SRichard Henderson         /* Swap to host endian if necessary, then store. */
282159213461SRichard Henderson         if (memop & MO_BSWAP) {
282259213461SRichard Henderson             val = bswap32(val);
282359213461SRichard Henderson         }
282473fda56fSAnton Johansson         store_atom_4(cpu, ra, p->haddr, memop, val);
282559213461SRichard Henderson     }
282659213461SRichard Henderson }
282759213461SRichard Henderson 
2828d50ef446SAnton Johansson static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
282959213461SRichard Henderson                     int mmu_idx, MemOp memop, uintptr_t ra)
283059213461SRichard Henderson {
283159213461SRichard Henderson     if (unlikely(p->flags & TLB_MMIO)) {
2832f7eaf9d7SRichard Henderson         if ((memop & MO_BSWAP) != MO_LE) {
2833f7eaf9d7SRichard Henderson             val = bswap64(val);
2834f7eaf9d7SRichard Henderson         }
2835d50ef446SAnton Johansson         do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra);
283659213461SRichard Henderson     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
283759213461SRichard Henderson         /* nothing */
283859213461SRichard Henderson     } else {
283959213461SRichard Henderson         /* Swap to host endian if necessary, then store. */
284059213461SRichard Henderson         if (memop & MO_BSWAP) {
284159213461SRichard Henderson             val = bswap64(val);
284259213461SRichard Henderson         }
284373fda56fSAnton Johansson         store_atom_8(cpu, ra, p->haddr, memop, val);
284459213461SRichard Henderson     }
2845eed56642SAlex Bennée }
2846eed56642SAlex Bennée 
2847e20f73fbSAnton Johansson static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
284859213461SRichard Henderson                        MemOpIdx oi, uintptr_t ra)
2849f83bcecbSRichard Henderson {
285059213461SRichard Henderson     MMULookupLocals l;
285159213461SRichard Henderson     bool crosspage;
285259213461SRichard Henderson 
2853f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2854e20f73fbSAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
285559213461SRichard Henderson     tcg_debug_assert(!crosspage);
285659213461SRichard Henderson 
2857e20f73fbSAnton Johansson     do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra);
2858e20f73fbSAnton Johansson }
2859e20f73fbSAnton Johansson 
2860d50ef446SAnton Johansson static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
286159213461SRichard Henderson                        MemOpIdx oi, uintptr_t ra)
2862f83bcecbSRichard Henderson {
286359213461SRichard Henderson     MMULookupLocals l;
286459213461SRichard Henderson     bool crosspage;
286559213461SRichard Henderson     uint8_t a, b;
286659213461SRichard Henderson 
2867f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2868d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
286959213461SRichard Henderson     if (likely(!crosspage)) {
2870d50ef446SAnton Johansson         do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
287159213461SRichard Henderson         return;
287259213461SRichard Henderson     }
287359213461SRichard Henderson 
287459213461SRichard Henderson     if ((l.memop & MO_BSWAP) == MO_LE) {
287559213461SRichard Henderson         a = val, b = val >> 8;
287659213461SRichard Henderson     } else {
287759213461SRichard Henderson         b = val, a = val >> 8;
287859213461SRichard Henderson     }
2879d50ef446SAnton Johansson     do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra);
2880d50ef446SAnton Johansson     do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra);
2881f83bcecbSRichard Henderson }
2882f83bcecbSRichard Henderson 
2883d50ef446SAnton Johansson static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
288459213461SRichard Henderson                        MemOpIdx oi, uintptr_t ra)
2885f83bcecbSRichard Henderson {
288659213461SRichard Henderson     MMULookupLocals l;
288759213461SRichard Henderson     bool crosspage;
288859213461SRichard Henderson 
2889f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2890d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
289159213461SRichard Henderson     if (likely(!crosspage)) {
2892d50ef446SAnton Johansson         do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
289359213461SRichard Henderson         return;
289459213461SRichard Henderson     }
289559213461SRichard Henderson 
289659213461SRichard Henderson     /* Swap to little endian for simplicity, then store by bytes. */
289759213461SRichard Henderson     if ((l.memop & MO_BSWAP) != MO_LE) {
289859213461SRichard Henderson         val = bswap32(val);
289959213461SRichard Henderson     }
2900d50ef446SAnton Johansson     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2901d50ef446SAnton Johansson     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2902eed56642SAlex Bennée }
2903eed56642SAlex Bennée 
2904d50ef446SAnton Johansson static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
290559213461SRichard Henderson                        MemOpIdx oi, uintptr_t ra)
290659213461SRichard Henderson {
290759213461SRichard Henderson     MMULookupLocals l;
290859213461SRichard Henderson     bool crosspage;
290959213461SRichard Henderson 
2910f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2911d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
291259213461SRichard Henderson     if (likely(!crosspage)) {
2913d50ef446SAnton Johansson         do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
291459213461SRichard Henderson         return;
291559213461SRichard Henderson     }
291659213461SRichard Henderson 
291759213461SRichard Henderson     /* Swap to little endian for simplicity, then store by bytes. */
291859213461SRichard Henderson     if ((l.memop & MO_BSWAP) != MO_LE) {
291959213461SRichard Henderson         val = bswap64(val);
292059213461SRichard Henderson     }
2921d50ef446SAnton Johansson     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2922d50ef446SAnton Johansson     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2923eed56642SAlex Bennée }
2924eed56642SAlex Bennée 
2925d50ef446SAnton Johansson static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
292635c653c4SRichard Henderson                         MemOpIdx oi, uintptr_t ra)
292735c653c4SRichard Henderson {
292835c653c4SRichard Henderson     MMULookupLocals l;
292935c653c4SRichard Henderson     bool crosspage;
293035c653c4SRichard Henderson     uint64_t a, b;
293135c653c4SRichard Henderson     int first;
293235c653c4SRichard Henderson 
2933f86e8f3dSRichard Henderson     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2934d50ef446SAnton Johansson     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
293535c653c4SRichard Henderson     if (likely(!crosspage)) {
2936f7eaf9d7SRichard Henderson         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2937f7eaf9d7SRichard Henderson             if ((l.memop & MO_BSWAP) != MO_LE) {
2938f7eaf9d7SRichard Henderson                 val = bswap128(val);
2939f7eaf9d7SRichard Henderson             }
2940d50ef446SAnton Johansson             do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
2941f7eaf9d7SRichard Henderson         } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
2942f7eaf9d7SRichard Henderson             /* nothing */
2943f7eaf9d7SRichard Henderson         } else {
294435c653c4SRichard Henderson             /* Swap to host endian if necessary, then store. */
294535c653c4SRichard Henderson             if (l.memop & MO_BSWAP) {
294635c653c4SRichard Henderson                 val = bswap128(val);
294735c653c4SRichard Henderson             }
294873fda56fSAnton Johansson             store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val);
294935c653c4SRichard Henderson         }
295035c653c4SRichard Henderson         return;
295135c653c4SRichard Henderson     }
295235c653c4SRichard Henderson 
295335c653c4SRichard Henderson     first = l.page[0].size;
295435c653c4SRichard Henderson     if (first == 8) {
295535c653c4SRichard Henderson         MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
295635c653c4SRichard Henderson 
295735c653c4SRichard Henderson         if (l.memop & MO_BSWAP) {
295835c653c4SRichard Henderson             val = bswap128(val);
295935c653c4SRichard Henderson         }
296035c653c4SRichard Henderson         if (HOST_BIG_ENDIAN) {
296135c653c4SRichard Henderson             b = int128_getlo(val), a = int128_gethi(val);
296235c653c4SRichard Henderson         } else {
296335c653c4SRichard Henderson             a = int128_getlo(val), b = int128_gethi(val);
296435c653c4SRichard Henderson         }
2965d50ef446SAnton Johansson         do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra);
2966d50ef446SAnton Johansson         do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra);
296735c653c4SRichard Henderson         return;
296835c653c4SRichard Henderson     }
296935c653c4SRichard Henderson 
297035c653c4SRichard Henderson     if ((l.memop & MO_BSWAP) != MO_LE) {
297135c653c4SRichard Henderson         val = bswap128(val);
297235c653c4SRichard Henderson     }
297335c653c4SRichard Henderson     if (first < 8) {
2974d50ef446SAnton Johansson         do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
297535c653c4SRichard Henderson         val = int128_urshift(val, first * 8);
2976d50ef446SAnton Johansson         do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
297735c653c4SRichard Henderson     } else {
2978d50ef446SAnton Johansson         b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2979d50ef446SAnton Johansson         do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra);
298035c653c4SRichard Henderson     }
298135c653c4SRichard Henderson }
298235c653c4SRichard Henderson 
2983f83bcecbSRichard Henderson #include "ldst_common.c.inc"
2984cfe04a4bSRichard Henderson 
2985be9568b4SRichard Henderson /*
2986be9568b4SRichard Henderson  * First set of functions passes in OI and RETADDR.
2987be9568b4SRichard Henderson  * This makes them callable from other helpers.
2988be9568b4SRichard Henderson  */
2989d9bb58e5SYang Zhong 
2990d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
2991be9568b4SRichard Henderson     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2992a754f7f3SRichard Henderson 
2993707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP
2994d9bb58e5SYang Zhong 
2995139c1837SPaolo Bonzini #include "atomic_common.c.inc"
2996d9bb58e5SYang Zhong 
2997d9bb58e5SYang Zhong #define DATA_SIZE 1
2998d9bb58e5SYang Zhong #include "atomic_template.h"
2999d9bb58e5SYang Zhong 
3000d9bb58e5SYang Zhong #define DATA_SIZE 2
3001d9bb58e5SYang Zhong #include "atomic_template.h"
3002d9bb58e5SYang Zhong 
3003d9bb58e5SYang Zhong #define DATA_SIZE 4
3004d9bb58e5SYang Zhong #include "atomic_template.h"
3005d9bb58e5SYang Zhong 
3006d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
3007d9bb58e5SYang Zhong #define DATA_SIZE 8
3008d9bb58e5SYang Zhong #include "atomic_template.h"
3009d9bb58e5SYang Zhong #endif
3010d9bb58e5SYang Zhong 
301176f9d6adSRichard Henderson #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
3012d9bb58e5SYang Zhong #define DATA_SIZE 16
3013d9bb58e5SYang Zhong #include "atomic_template.h"
3014d9bb58e5SYang Zhong #endif
3015d9bb58e5SYang Zhong 
3016d9bb58e5SYang Zhong /* Code access functions.  */
3017d9bb58e5SYang Zhong 
3018fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
3019eed56642SAlex Bennée {
30203b916140SRichard Henderson     CPUState *cs = env_cpu(env);
30213b916140SRichard Henderson     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true));
30223b916140SRichard Henderson     return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
30234cef72d0SAlex Bennée }
30244cef72d0SAlex Bennée 
3025fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
30262dd92606SRichard Henderson {
30273b916140SRichard Henderson     CPUState *cs = env_cpu(env);
30283b916140SRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true));
30293b916140SRichard Henderson     return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
30302dd92606SRichard Henderson }
30312dd92606SRichard Henderson 
3032fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
30334cef72d0SAlex Bennée {
30343b916140SRichard Henderson     CPUState *cs = env_cpu(env);
30353b916140SRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true));
30363b916140SRichard Henderson     return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
3037eed56642SAlex Bennée }
3038d9bb58e5SYang Zhong 
3039fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
3040eed56642SAlex Bennée {
30413b916140SRichard Henderson     CPUState *cs = env_cpu(env);
30423b916140SRichard Henderson     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true));
30433b916140SRichard Henderson     return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
3044eed56642SAlex Bennée }
304528990626SRichard Henderson 
304628990626SRichard Henderson uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
304728990626SRichard Henderson                          MemOpIdx oi, uintptr_t retaddr)
304828990626SRichard Henderson {
3049d50ef446SAnton Johansson     return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
305028990626SRichard Henderson }
305128990626SRichard Henderson 
305228990626SRichard Henderson uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
305328990626SRichard Henderson                           MemOpIdx oi, uintptr_t retaddr)
305428990626SRichard Henderson {
3055d50ef446SAnton Johansson     return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
305628990626SRichard Henderson }
305728990626SRichard Henderson 
305828990626SRichard Henderson uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
305928990626SRichard Henderson                           MemOpIdx oi, uintptr_t retaddr)
306028990626SRichard Henderson {
3061d50ef446SAnton Johansson     return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
306228990626SRichard Henderson }
306328990626SRichard Henderson 
306428990626SRichard Henderson uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
306528990626SRichard Henderson                           MemOpIdx oi, uintptr_t retaddr)
306628990626SRichard Henderson {
3067d50ef446SAnton Johansson     return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
306828990626SRichard Henderson }
3069