xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 79e4208506651660b866f536616a5f8f3175f909)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9fb0343d5SThomas Huth  * version 2.1 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
22d9bb58e5SYang Zhong #include "cpu.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/address-spaces.h"
26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
27d9bb58e5SYang Zhong #include "exec/cputlb.h"
28d9bb58e5SYang Zhong #include "exec/memory-internal.h"
29d9bb58e5SYang Zhong #include "exec/ram_addr.h"
30d9bb58e5SYang Zhong #include "tcg/tcg.h"
31d9bb58e5SYang Zhong #include "qemu/error-report.h"
32d9bb58e5SYang Zhong #include "exec/log.h"
33d9bb58e5SYang Zhong #include "exec/helper-proto.h"
34d9bb58e5SYang Zhong #include "qemu/atomic.h"
35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
36d9bb58e5SYang Zhong 
37d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38d9bb58e5SYang Zhong /* #define DEBUG_TLB */
39d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
40d9bb58e5SYang Zhong 
41d9bb58e5SYang Zhong #ifdef DEBUG_TLB
42d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
43d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
44d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
45d9bb58e5SYang Zhong # else
46d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
47d9bb58e5SYang Zhong # endif
48d9bb58e5SYang Zhong #else
49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
50d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
51d9bb58e5SYang Zhong #endif
52d9bb58e5SYang Zhong 
53d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
54d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
55d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
56d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
57d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
58d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
59d9bb58e5SYang Zhong     } \
60d9bb58e5SYang Zhong } while (0)
61d9bb58e5SYang Zhong 
62ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do {                              \
63d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
64ea9025cbSEmilio G. Cota             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
65d9bb58e5SYang Zhong         }                                                         \
66d9bb58e5SYang Zhong     } while (0)
67d9bb58e5SYang Zhong 
68d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
69d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
70d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
71d9bb58e5SYang Zhong 
72d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
73d9bb58e5SYang Zhong  */
74d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
75d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
76d9bb58e5SYang Zhong 
7786e1eff8SEmilio G. Cota static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
7886e1eff8SEmilio G. Cota {
7986e1eff8SEmilio G. Cota     return env->tlb_mask[mmu_idx] + (1 << CPU_TLB_ENTRY_BITS);
8086e1eff8SEmilio G. Cota }
8186e1eff8SEmilio G. Cota 
82*79e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
8386e1eff8SEmilio G. Cota                              size_t max_entries)
8486e1eff8SEmilio G. Cota {
85*79e42085SRichard Henderson     desc->window_begin_ns = ns;
86*79e42085SRichard Henderson     desc->window_max_entries = max_entries;
8786e1eff8SEmilio G. Cota }
8886e1eff8SEmilio G. Cota 
8986e1eff8SEmilio G. Cota static void tlb_dyn_init(CPUArchState *env)
9086e1eff8SEmilio G. Cota {
9186e1eff8SEmilio G. Cota     int i;
9286e1eff8SEmilio G. Cota 
9386e1eff8SEmilio G. Cota     for (i = 0; i < NB_MMU_MODES; i++) {
9486e1eff8SEmilio G. Cota         CPUTLBDesc *desc = &env->tlb_d[i];
9586e1eff8SEmilio G. Cota         size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
9686e1eff8SEmilio G. Cota 
97*79e42085SRichard Henderson         tlb_window_reset(desc, get_clock_realtime(), 0);
9886e1eff8SEmilio G. Cota         desc->n_used_entries = 0;
9986e1eff8SEmilio G. Cota         env->tlb_mask[i] = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
10086e1eff8SEmilio G. Cota         env->tlb_table[i] = g_new(CPUTLBEntry, n_entries);
10186e1eff8SEmilio G. Cota         env->iotlb[i] = g_new(CPUIOTLBEntry, n_entries);
10286e1eff8SEmilio G. Cota     }
10386e1eff8SEmilio G. Cota }
10486e1eff8SEmilio G. Cota 
10586e1eff8SEmilio G. Cota /**
10686e1eff8SEmilio G. Cota  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
10786e1eff8SEmilio G. Cota  * @env: CPU that owns the TLB
10886e1eff8SEmilio G. Cota  * @mmu_idx: MMU index of the TLB
10986e1eff8SEmilio G. Cota  *
11086e1eff8SEmilio G. Cota  * Called with tlb_lock_held.
11186e1eff8SEmilio G. Cota  *
11286e1eff8SEmilio G. Cota  * We have two main constraints when resizing a TLB: (1) we only resize it
11386e1eff8SEmilio G. Cota  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
11486e1eff8SEmilio G. Cota  * the array or unnecessarily flushing it), which means we do not control how
11586e1eff8SEmilio G. Cota  * frequently the resizing can occur; (2) we don't have access to the guest's
11686e1eff8SEmilio G. Cota  * future scheduling decisions, and therefore have to decide the magnitude of
11786e1eff8SEmilio G. Cota  * the resize based on past observations.
11886e1eff8SEmilio G. Cota  *
11986e1eff8SEmilio G. Cota  * In general, a memory-hungry process can benefit greatly from an appropriately
12086e1eff8SEmilio G. Cota  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
12186e1eff8SEmilio G. Cota  * we just have to make the TLB as large as possible; while an oversized TLB
12286e1eff8SEmilio G. Cota  * results in minimal TLB miss rates, it also takes longer to be flushed
12386e1eff8SEmilio G. Cota  * (flushes can be _very_ frequent), and the reduced locality can also hurt
12486e1eff8SEmilio G. Cota  * performance.
12586e1eff8SEmilio G. Cota  *
12686e1eff8SEmilio G. Cota  * To achieve near-optimal performance for all kinds of workloads, we:
12786e1eff8SEmilio G. Cota  *
12886e1eff8SEmilio G. Cota  * 1. Aggressively increase the size of the TLB when the use rate of the
12986e1eff8SEmilio G. Cota  * TLB being flushed is high, since it is likely that in the near future this
13086e1eff8SEmilio G. Cota  * memory-hungry process will execute again, and its memory hungriness will
13186e1eff8SEmilio G. Cota  * probably be similar.
13286e1eff8SEmilio G. Cota  *
13386e1eff8SEmilio G. Cota  * 2. Slowly reduce the size of the TLB as the use rate declines over a
13486e1eff8SEmilio G. Cota  * reasonably large time window. The rationale is that if in such a time window
13586e1eff8SEmilio G. Cota  * we have not observed a high TLB use rate, it is likely that we won't observe
13686e1eff8SEmilio G. Cota  * it in the near future. In that case, once a time window expires we downsize
13786e1eff8SEmilio G. Cota  * the TLB to match the maximum use rate observed in the window.
13886e1eff8SEmilio G. Cota  *
13986e1eff8SEmilio G. Cota  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
14086e1eff8SEmilio G. Cota  * since in that range performance is likely near-optimal. Recall that the TLB
14186e1eff8SEmilio G. Cota  * is direct mapped, so we want the use rate to be low (or at least not too
14286e1eff8SEmilio G. Cota  * high), since otherwise we are likely to have a significant amount of
14386e1eff8SEmilio G. Cota  * conflict misses.
14486e1eff8SEmilio G. Cota  */
14586e1eff8SEmilio G. Cota static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
14686e1eff8SEmilio G. Cota {
14786e1eff8SEmilio G. Cota     CPUTLBDesc *desc = &env->tlb_d[mmu_idx];
14886e1eff8SEmilio G. Cota     size_t old_size = tlb_n_entries(env, mmu_idx);
14986e1eff8SEmilio G. Cota     size_t rate;
15086e1eff8SEmilio G. Cota     size_t new_size = old_size;
15186e1eff8SEmilio G. Cota     int64_t now = get_clock_realtime();
15286e1eff8SEmilio G. Cota     int64_t window_len_ms = 100;
15386e1eff8SEmilio G. Cota     int64_t window_len_ns = window_len_ms * 1000 * 1000;
154*79e42085SRichard Henderson     bool window_expired = now > desc->window_begin_ns + window_len_ns;
15586e1eff8SEmilio G. Cota 
156*79e42085SRichard Henderson     if (desc->n_used_entries > desc->window_max_entries) {
157*79e42085SRichard Henderson         desc->window_max_entries = desc->n_used_entries;
15886e1eff8SEmilio G. Cota     }
159*79e42085SRichard Henderson     rate = desc->window_max_entries * 100 / old_size;
16086e1eff8SEmilio G. Cota 
16186e1eff8SEmilio G. Cota     if (rate > 70) {
16286e1eff8SEmilio G. Cota         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
16386e1eff8SEmilio G. Cota     } else if (rate < 30 && window_expired) {
164*79e42085SRichard Henderson         size_t ceil = pow2ceil(desc->window_max_entries);
165*79e42085SRichard Henderson         size_t expected_rate = desc->window_max_entries * 100 / ceil;
16686e1eff8SEmilio G. Cota 
16786e1eff8SEmilio G. Cota         /*
16886e1eff8SEmilio G. Cota          * Avoid undersizing when the max number of entries seen is just below
16986e1eff8SEmilio G. Cota          * a pow2. For instance, if max_entries == 1025, the expected use rate
17086e1eff8SEmilio G. Cota          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
17186e1eff8SEmilio G. Cota          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
17286e1eff8SEmilio G. Cota          * later. Thus, make sure that the expected use rate remains below 70%.
17386e1eff8SEmilio G. Cota          * (and since we double the size, that means the lowest rate we'd
17486e1eff8SEmilio G. Cota          * expect to get is 35%, which is still in the 30-70% range where
17586e1eff8SEmilio G. Cota          * we consider that the size is appropriate.)
17686e1eff8SEmilio G. Cota          */
17786e1eff8SEmilio G. Cota         if (expected_rate > 70) {
17886e1eff8SEmilio G. Cota             ceil *= 2;
17986e1eff8SEmilio G. Cota         }
18086e1eff8SEmilio G. Cota         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
18186e1eff8SEmilio G. Cota     }
18286e1eff8SEmilio G. Cota 
18386e1eff8SEmilio G. Cota     if (new_size == old_size) {
18486e1eff8SEmilio G. Cota         if (window_expired) {
185*79e42085SRichard Henderson             tlb_window_reset(desc, now, desc->n_used_entries);
18686e1eff8SEmilio G. Cota         }
18786e1eff8SEmilio G. Cota         return;
18886e1eff8SEmilio G. Cota     }
18986e1eff8SEmilio G. Cota 
19086e1eff8SEmilio G. Cota     g_free(env->tlb_table[mmu_idx]);
19186e1eff8SEmilio G. Cota     g_free(env->iotlb[mmu_idx]);
19286e1eff8SEmilio G. Cota 
193*79e42085SRichard Henderson     tlb_window_reset(desc, now, 0);
19486e1eff8SEmilio G. Cota     /* desc->n_used_entries is cleared by the caller */
19586e1eff8SEmilio G. Cota     env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
19686e1eff8SEmilio G. Cota     env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
19786e1eff8SEmilio G. Cota     env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
19886e1eff8SEmilio G. Cota     /*
19986e1eff8SEmilio G. Cota      * If the allocations fail, try smaller sizes. We just freed some
20086e1eff8SEmilio G. Cota      * memory, so going back to half of new_size has a good chance of working.
20186e1eff8SEmilio G. Cota      * Increased memory pressure elsewhere in the system might cause the
20286e1eff8SEmilio G. Cota      * allocations to fail though, so we progressively reduce the allocation
20386e1eff8SEmilio G. Cota      * size, aborting if we cannot even allocate the smallest TLB we support.
20486e1eff8SEmilio G. Cota      */
20586e1eff8SEmilio G. Cota     while (env->tlb_table[mmu_idx] == NULL || env->iotlb[mmu_idx] == NULL) {
20686e1eff8SEmilio G. Cota         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
20786e1eff8SEmilio G. Cota             error_report("%s: %s", __func__, strerror(errno));
20886e1eff8SEmilio G. Cota             abort();
20986e1eff8SEmilio G. Cota         }
21086e1eff8SEmilio G. Cota         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
21186e1eff8SEmilio G. Cota         env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS;
21286e1eff8SEmilio G. Cota 
21386e1eff8SEmilio G. Cota         g_free(env->tlb_table[mmu_idx]);
21486e1eff8SEmilio G. Cota         g_free(env->iotlb[mmu_idx]);
21586e1eff8SEmilio G. Cota         env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size);
21686e1eff8SEmilio G. Cota         env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size);
21786e1eff8SEmilio G. Cota     }
21886e1eff8SEmilio G. Cota }
21986e1eff8SEmilio G. Cota 
22086e1eff8SEmilio G. Cota static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
22186e1eff8SEmilio G. Cota {
22286e1eff8SEmilio G. Cota     tlb_mmu_resize_locked(env, mmu_idx);
22386e1eff8SEmilio G. Cota     memset(env->tlb_table[mmu_idx], -1, sizeof_tlb(env, mmu_idx));
22486e1eff8SEmilio G. Cota     env->tlb_d[mmu_idx].n_used_entries = 0;
22586e1eff8SEmilio G. Cota }
22686e1eff8SEmilio G. Cota 
22786e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
22886e1eff8SEmilio G. Cota {
22986e1eff8SEmilio G. Cota     env->tlb_d[mmu_idx].n_used_entries++;
23086e1eff8SEmilio G. Cota }
23186e1eff8SEmilio G. Cota 
23286e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
23386e1eff8SEmilio G. Cota {
23486e1eff8SEmilio G. Cota     env->tlb_d[mmu_idx].n_used_entries--;
23586e1eff8SEmilio G. Cota }
23686e1eff8SEmilio G. Cota 
2375005e253SEmilio G. Cota void tlb_init(CPUState *cpu)
2385005e253SEmilio G. Cota {
23971aec354SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
24071aec354SEmilio G. Cota 
24153d28455SRichard Henderson     qemu_spin_init(&env->tlb_c.lock);
2423d1523ceSRichard Henderson 
2433d1523ceSRichard Henderson     /* Ensure that cpu_reset performs a full flush.  */
2443d1523ceSRichard Henderson     env->tlb_c.dirty = ALL_MMUIDX_BITS;
24586e1eff8SEmilio G. Cota 
24686e1eff8SEmilio G. Cota     tlb_dyn_init(env);
2475005e253SEmilio G. Cota }
2485005e253SEmilio G. Cota 
249d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
250d9bb58e5SYang Zhong  *
251d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
252d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
253d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
254d9bb58e5SYang Zhong  * again.
255d9bb58e5SYang Zhong  */
256d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
257d9bb58e5SYang Zhong                              run_on_cpu_data d)
258d9bb58e5SYang Zhong {
259d9bb58e5SYang Zhong     CPUState *cpu;
260d9bb58e5SYang Zhong 
261d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
262d9bb58e5SYang Zhong         if (cpu != src) {
263d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
264d9bb58e5SYang Zhong         }
265d9bb58e5SYang Zhong     }
266d9bb58e5SYang Zhong }
267d9bb58e5SYang Zhong 
268e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
26983974cf4SEmilio G. Cota {
27083974cf4SEmilio G. Cota     CPUState *cpu;
271e09de0a2SRichard Henderson     size_t full = 0, part = 0, elide = 0;
27283974cf4SEmilio G. Cota 
27383974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
27483974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
27583974cf4SEmilio G. Cota 
276e09de0a2SRichard Henderson         full += atomic_read(&env->tlb_c.full_flush_count);
277e09de0a2SRichard Henderson         part += atomic_read(&env->tlb_c.part_flush_count);
278e09de0a2SRichard Henderson         elide += atomic_read(&env->tlb_c.elide_flush_count);
27983974cf4SEmilio G. Cota     }
280e09de0a2SRichard Henderson     *pfull = full;
281e09de0a2SRichard Henderson     *ppart = part;
282e09de0a2SRichard Henderson     *pelide = elide;
28383974cf4SEmilio G. Cota }
284d9bb58e5SYang Zhong 
2851308e026SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
2861308e026SRichard Henderson {
28786e1eff8SEmilio G. Cota     tlb_table_flush_by_mmuidx(env, mmu_idx);
2881308e026SRichard Henderson     memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
2891308e026SRichard Henderson     env->tlb_d[mmu_idx].large_page_addr = -1;
2901308e026SRichard Henderson     env->tlb_d[mmu_idx].large_page_mask = -1;
291d5363e58SRichard Henderson     env->tlb_d[mmu_idx].vindex = 0;
2921308e026SRichard Henderson }
2931308e026SRichard Henderson 
294d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
295d9bb58e5SYang Zhong {
296d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
2973d1523ceSRichard Henderson     uint16_t asked = data.host_int;
2983d1523ceSRichard Henderson     uint16_t all_dirty, work, to_clean;
299d9bb58e5SYang Zhong 
300d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
301d9bb58e5SYang Zhong 
3023d1523ceSRichard Henderson     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
303d9bb58e5SYang Zhong 
30453d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
30560a2ad7dSRichard Henderson 
3063d1523ceSRichard Henderson     all_dirty = env->tlb_c.dirty;
3073d1523ceSRichard Henderson     to_clean = asked & all_dirty;
3083d1523ceSRichard Henderson     all_dirty &= ~to_clean;
3093d1523ceSRichard Henderson     env->tlb_c.dirty = all_dirty;
3103d1523ceSRichard Henderson 
3113d1523ceSRichard Henderson     for (work = to_clean; work != 0; work &= work - 1) {
3123d1523ceSRichard Henderson         int mmu_idx = ctz32(work);
3131308e026SRichard Henderson         tlb_flush_one_mmuidx_locked(env, mmu_idx);
314d9bb58e5SYang Zhong     }
3153d1523ceSRichard Henderson 
31653d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
317d9bb58e5SYang Zhong 
318f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
31964f2674bSRichard Henderson 
3203d1523ceSRichard Henderson     if (to_clean == ALL_MMUIDX_BITS) {
321e09de0a2SRichard Henderson         atomic_set(&env->tlb_c.full_flush_count,
322e09de0a2SRichard Henderson                    env->tlb_c.full_flush_count + 1);
323e09de0a2SRichard Henderson     } else {
324e09de0a2SRichard Henderson         atomic_set(&env->tlb_c.part_flush_count,
3253d1523ceSRichard Henderson                    env->tlb_c.part_flush_count + ctpop16(to_clean));
3263d1523ceSRichard Henderson         if (to_clean != asked) {
3273d1523ceSRichard Henderson             atomic_set(&env->tlb_c.elide_flush_count,
3283d1523ceSRichard Henderson                        env->tlb_c.elide_flush_count +
3293d1523ceSRichard Henderson                        ctpop16(asked & ~to_clean));
3303d1523ceSRichard Henderson         }
33164f2674bSRichard Henderson     }
332d9bb58e5SYang Zhong }
333d9bb58e5SYang Zhong 
334d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
335d9bb58e5SYang Zhong {
336d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
337d9bb58e5SYang Zhong 
33864f2674bSRichard Henderson     if (cpu->created && !qemu_cpu_is_self(cpu)) {
339d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
340ab651105SRichard Henderson                          RUN_ON_CPU_HOST_INT(idxmap));
341d9bb58e5SYang Zhong     } else {
34260a2ad7dSRichard Henderson         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
343d9bb58e5SYang Zhong     }
344d9bb58e5SYang Zhong }
345d9bb58e5SYang Zhong 
34664f2674bSRichard Henderson void tlb_flush(CPUState *cpu)
34764f2674bSRichard Henderson {
34864f2674bSRichard Henderson     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
34964f2674bSRichard Henderson }
35064f2674bSRichard Henderson 
351d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
352d9bb58e5SYang Zhong {
353d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
354d9bb58e5SYang Zhong 
355d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
356d9bb58e5SYang Zhong 
357d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
358d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
359d9bb58e5SYang Zhong }
360d9bb58e5SYang Zhong 
36164f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu)
36264f2674bSRichard Henderson {
36364f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
36464f2674bSRichard Henderson }
36564f2674bSRichard Henderson 
36664f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
367d9bb58e5SYang Zhong {
368d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
369d9bb58e5SYang Zhong 
370d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
371d9bb58e5SYang Zhong 
372d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
373d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
374d9bb58e5SYang Zhong }
375d9bb58e5SYang Zhong 
37664f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu)
37764f2674bSRichard Henderson {
37864f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
37964f2674bSRichard Henderson }
38064f2674bSRichard Henderson 
38168fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
38268fea038SRichard Henderson                                         target_ulong page)
383d9bb58e5SYang Zhong {
38468fea038SRichard Henderson     return tlb_hit_page(tlb_entry->addr_read, page) ||
385403f290cSEmilio G. Cota            tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
38668fea038SRichard Henderson            tlb_hit_page(tlb_entry->addr_code, page);
38768fea038SRichard Henderson }
38868fea038SRichard Henderson 
3893cea94bbSEmilio G. Cota /**
3903cea94bbSEmilio G. Cota  * tlb_entry_is_empty - return true if the entry is not in use
3913cea94bbSEmilio G. Cota  * @te: pointer to CPUTLBEntry
3923cea94bbSEmilio G. Cota  */
3933cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
3943cea94bbSEmilio G. Cota {
3953cea94bbSEmilio G. Cota     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
3963cea94bbSEmilio G. Cota }
3973cea94bbSEmilio G. Cota 
39853d28455SRichard Henderson /* Called with tlb_c.lock held */
39986e1eff8SEmilio G. Cota static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
40071aec354SEmilio G. Cota                                           target_ulong page)
40168fea038SRichard Henderson {
40268fea038SRichard Henderson     if (tlb_hit_page_anyprot(tlb_entry, page)) {
403d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
40486e1eff8SEmilio G. Cota         return true;
405d9bb58e5SYang Zhong     }
40686e1eff8SEmilio G. Cota     return false;
407d9bb58e5SYang Zhong }
408d9bb58e5SYang Zhong 
40953d28455SRichard Henderson /* Called with tlb_c.lock held */
41071aec354SEmilio G. Cota static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
41168fea038SRichard Henderson                                               target_ulong page)
41268fea038SRichard Henderson {
41368fea038SRichard Henderson     int k;
41471aec354SEmilio G. Cota 
41571aec354SEmilio G. Cota     assert_cpu_is_self(ENV_GET_CPU(env));
41668fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
41786e1eff8SEmilio G. Cota         if (tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page)) {
41886e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, mmu_idx);
41986e1eff8SEmilio G. Cota         }
42068fea038SRichard Henderson     }
42168fea038SRichard Henderson }
42268fea038SRichard Henderson 
4231308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx,
4241308e026SRichard Henderson                                   target_ulong page)
4251308e026SRichard Henderson {
4261308e026SRichard Henderson     target_ulong lp_addr = env->tlb_d[midx].large_page_addr;
4271308e026SRichard Henderson     target_ulong lp_mask = env->tlb_d[midx].large_page_mask;
4281308e026SRichard Henderson 
4291308e026SRichard Henderson     /* Check if we need to flush due to large pages.  */
4301308e026SRichard Henderson     if ((page & lp_mask) == lp_addr) {
4311308e026SRichard Henderson         tlb_debug("forcing full flush midx %d ("
4321308e026SRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
4331308e026SRichard Henderson                   midx, lp_addr, lp_mask);
4341308e026SRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx);
4351308e026SRichard Henderson     } else {
43686e1eff8SEmilio G. Cota         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
43786e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, midx);
43886e1eff8SEmilio G. Cota         }
4391308e026SRichard Henderson         tlb_flush_vtlb_page_locked(env, midx, page);
4401308e026SRichard Henderson     }
4411308e026SRichard Henderson }
4421308e026SRichard Henderson 
443d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a
444d9bb58e5SYang Zhong  * mmuidx bit mask we need to fail to build if we can't do that
445d9bb58e5SYang Zhong  */
446d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
447d9bb58e5SYang Zhong 
448d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
449d9bb58e5SYang Zhong                                                 run_on_cpu_data data)
450d9bb58e5SYang Zhong {
451d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
452d9bb58e5SYang Zhong     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
453d9bb58e5SYang Zhong     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
454d9bb58e5SYang Zhong     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
455d9bb58e5SYang Zhong     int mmu_idx;
456d9bb58e5SYang Zhong 
457d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
458d9bb58e5SYang Zhong 
4591308e026SRichard Henderson     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
460383beda9SRichard Henderson               addr, mmu_idx_bitmap);
461d9bb58e5SYang Zhong 
46253d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
463d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
464d9bb58e5SYang Zhong         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
4651308e026SRichard Henderson             tlb_flush_page_locked(env, mmu_idx, addr);
466d9bb58e5SYang Zhong         }
467d9bb58e5SYang Zhong     }
46853d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
469d9bb58e5SYang Zhong 
470d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
471d9bb58e5SYang Zhong }
472d9bb58e5SYang Zhong 
473d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
474d9bb58e5SYang Zhong {
475d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
476d9bb58e5SYang Zhong 
477d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
478d9bb58e5SYang Zhong 
479d9bb58e5SYang Zhong     /* This should already be page aligned */
480d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
481d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
482d9bb58e5SYang Zhong 
483d9bb58e5SYang Zhong     if (!qemu_cpu_is_self(cpu)) {
4841308e026SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
485d9bb58e5SYang Zhong                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
486d9bb58e5SYang Zhong     } else {
4871308e026SRichard Henderson         tlb_flush_page_by_mmuidx_async_work(
488d9bb58e5SYang Zhong             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
489d9bb58e5SYang Zhong     }
490d9bb58e5SYang Zhong }
491d9bb58e5SYang Zhong 
492f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr)
493f8144c6cSRichard Henderson {
494f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
495f8144c6cSRichard Henderson }
496f8144c6cSRichard Henderson 
497d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
498d9bb58e5SYang Zhong                                        uint16_t idxmap)
499d9bb58e5SYang Zhong {
5001308e026SRichard Henderson     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
501d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
502d9bb58e5SYang Zhong 
503d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
504d9bb58e5SYang Zhong 
505d9bb58e5SYang Zhong     /* This should already be page aligned */
506d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
507d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
508d9bb58e5SYang Zhong 
509d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
510d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
511d9bb58e5SYang Zhong }
512d9bb58e5SYang Zhong 
513f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
514f8144c6cSRichard Henderson {
515f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
516f8144c6cSRichard Henderson }
517f8144c6cSRichard Henderson 
518d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
519d9bb58e5SYang Zhong                                               target_ulong addr,
520d9bb58e5SYang Zhong                                               uint16_t idxmap)
521d9bb58e5SYang Zhong {
5221308e026SRichard Henderson     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
523d9bb58e5SYang Zhong     target_ulong addr_and_mmu_idx;
524d9bb58e5SYang Zhong 
525d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
526d9bb58e5SYang Zhong 
527d9bb58e5SYang Zhong     /* This should already be page aligned */
528d9bb58e5SYang Zhong     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
529d9bb58e5SYang Zhong     addr_and_mmu_idx |= idxmap;
530d9bb58e5SYang Zhong 
531d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
532d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
533d9bb58e5SYang Zhong }
534d9bb58e5SYang Zhong 
535f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
536d9bb58e5SYang Zhong {
537f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
538d9bb58e5SYang Zhong }
539d9bb58e5SYang Zhong 
540d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
541d9bb58e5SYang Zhong    can be detected */
542d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
543d9bb58e5SYang Zhong {
544d9bb58e5SYang Zhong     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
545d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
546d9bb58e5SYang Zhong }
547d9bb58e5SYang Zhong 
548d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
549d9bb58e5SYang Zhong    tested for self modifying code */
550d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
551d9bb58e5SYang Zhong {
552d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
553d9bb58e5SYang Zhong }
554d9bb58e5SYang Zhong 
555d9bb58e5SYang Zhong 
556d9bb58e5SYang Zhong /*
557d9bb58e5SYang Zhong  * Dirty write flag handling
558d9bb58e5SYang Zhong  *
559d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
560d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
561d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
562d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
563d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
564d9bb58e5SYang Zhong  * generated code.
565d9bb58e5SYang Zhong  *
56671aec354SEmilio G. Cota  * Other vCPUs might be reading their TLBs during guest execution, so we update
56771aec354SEmilio G. Cota  * te->addr_write with atomic_set. We don't need to worry about this for
56871aec354SEmilio G. Cota  * oversized guests as MTTCG is disabled for them.
569d9bb58e5SYang Zhong  *
57053d28455SRichard Henderson  * Called with tlb_c.lock held.
571d9bb58e5SYang Zhong  */
57271aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
57371aec354SEmilio G. Cota                                          uintptr_t start, uintptr_t length)
574d9bb58e5SYang Zhong {
575d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
576d9bb58e5SYang Zhong 
577d9bb58e5SYang Zhong     if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
578d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
579d9bb58e5SYang Zhong         addr += tlb_entry->addend;
580d9bb58e5SYang Zhong         if ((addr - start) < length) {
581d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
58271aec354SEmilio G. Cota             tlb_entry->addr_write |= TLB_NOTDIRTY;
583d9bb58e5SYang Zhong #else
58471aec354SEmilio G. Cota             atomic_set(&tlb_entry->addr_write,
58571aec354SEmilio G. Cota                        tlb_entry->addr_write | TLB_NOTDIRTY);
586d9bb58e5SYang Zhong #endif
587d9bb58e5SYang Zhong         }
58871aec354SEmilio G. Cota     }
58971aec354SEmilio G. Cota }
59071aec354SEmilio G. Cota 
59171aec354SEmilio G. Cota /*
59253d28455SRichard Henderson  * Called with tlb_c.lock held.
59371aec354SEmilio G. Cota  * Called only from the vCPU context, i.e. the TLB's owner thread.
59471aec354SEmilio G. Cota  */
59571aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
59671aec354SEmilio G. Cota {
59771aec354SEmilio G. Cota     *d = *s;
59871aec354SEmilio G. Cota }
599d9bb58e5SYang Zhong 
600d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
60171aec354SEmilio G. Cota  * the target vCPU).
60253d28455SRichard Henderson  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
60371aec354SEmilio G. Cota  * thing actually updated is the target TLB entry ->addr_write flags.
604d9bb58e5SYang Zhong  */
605d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
606d9bb58e5SYang Zhong {
607d9bb58e5SYang Zhong     CPUArchState *env;
608d9bb58e5SYang Zhong 
609d9bb58e5SYang Zhong     int mmu_idx;
610d9bb58e5SYang Zhong 
611d9bb58e5SYang Zhong     env = cpu->env_ptr;
61253d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
613d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
614d9bb58e5SYang Zhong         unsigned int i;
61586e1eff8SEmilio G. Cota         unsigned int n = tlb_n_entries(env, mmu_idx);
616d9bb58e5SYang Zhong 
61786e1eff8SEmilio G. Cota         for (i = 0; i < n; i++) {
61871aec354SEmilio G. Cota             tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1,
61971aec354SEmilio G. Cota                                          length);
620d9bb58e5SYang Zhong         }
621d9bb58e5SYang Zhong 
622d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
62371aec354SEmilio G. Cota             tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1,
62471aec354SEmilio G. Cota                                          length);
625d9bb58e5SYang Zhong         }
626d9bb58e5SYang Zhong     }
62753d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
628d9bb58e5SYang Zhong }
629d9bb58e5SYang Zhong 
63053d28455SRichard Henderson /* Called with tlb_c.lock held */
63171aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
63271aec354SEmilio G. Cota                                          target_ulong vaddr)
633d9bb58e5SYang Zhong {
634d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
635d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
636d9bb58e5SYang Zhong     }
637d9bb58e5SYang Zhong }
638d9bb58e5SYang Zhong 
639d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
640d9bb58e5SYang Zhong    so that it is no longer dirty */
641d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
642d9bb58e5SYang Zhong {
643d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
644d9bb58e5SYang Zhong     int mmu_idx;
645d9bb58e5SYang Zhong 
646d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
647d9bb58e5SYang Zhong 
648d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
64953d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
650d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
651383beda9SRichard Henderson         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
652d9bb58e5SYang Zhong     }
653d9bb58e5SYang Zhong 
654d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
655d9bb58e5SYang Zhong         int k;
656d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
65771aec354SEmilio G. Cota             tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr);
658d9bb58e5SYang Zhong         }
659d9bb58e5SYang Zhong     }
66053d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
661d9bb58e5SYang Zhong }
662d9bb58e5SYang Zhong 
663d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
664d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
6651308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
6661308e026SRichard Henderson                                target_ulong vaddr, target_ulong size)
667d9bb58e5SYang Zhong {
6681308e026SRichard Henderson     target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr;
6691308e026SRichard Henderson     target_ulong lp_mask = ~(size - 1);
670d9bb58e5SYang Zhong 
6711308e026SRichard Henderson     if (lp_addr == (target_ulong)-1) {
6721308e026SRichard Henderson         /* No previous large page.  */
6731308e026SRichard Henderson         lp_addr = vaddr;
6741308e026SRichard Henderson     } else {
675d9bb58e5SYang Zhong         /* Extend the existing region to include the new page.
6761308e026SRichard Henderson            This is a compromise between unnecessary flushes and
6771308e026SRichard Henderson            the cost of maintaining a full variable size TLB.  */
6781308e026SRichard Henderson         lp_mask &= env->tlb_d[mmu_idx].large_page_mask;
6791308e026SRichard Henderson         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
6801308e026SRichard Henderson             lp_mask <<= 1;
681d9bb58e5SYang Zhong         }
6821308e026SRichard Henderson     }
6831308e026SRichard Henderson     env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask;
6841308e026SRichard Henderson     env->tlb_d[mmu_idx].large_page_mask = lp_mask;
685d9bb58e5SYang Zhong }
686d9bb58e5SYang Zhong 
687d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address
688d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
689d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
690d9bb58e5SYang Zhong  *
691d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
692d9bb58e5SYang Zhong  * critical section.
693d9bb58e5SYang Zhong  */
694d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
695d9bb58e5SYang Zhong                              hwaddr paddr, MemTxAttrs attrs, int prot,
696d9bb58e5SYang Zhong                              int mmu_idx, target_ulong size)
697d9bb58e5SYang Zhong {
698d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
699d9bb58e5SYang Zhong     MemoryRegionSection *section;
700d9bb58e5SYang Zhong     unsigned int index;
701d9bb58e5SYang Zhong     target_ulong address;
702d9bb58e5SYang Zhong     target_ulong code_address;
703d9bb58e5SYang Zhong     uintptr_t addend;
70468fea038SRichard Henderson     CPUTLBEntry *te, tn;
70555df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
70655df6fcfSPeter Maydell     target_ulong vaddr_page;
707d9bb58e5SYang Zhong     int asidx = cpu_asidx_from_attrs(cpu, attrs);
708d9bb58e5SYang Zhong 
709d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
71055df6fcfSPeter Maydell 
7111308e026SRichard Henderson     if (size <= TARGET_PAGE_SIZE) {
71255df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
71355df6fcfSPeter Maydell     } else {
7141308e026SRichard Henderson         tlb_add_large_page(env, mmu_idx, vaddr, size);
715d9bb58e5SYang Zhong         sz = size;
71655df6fcfSPeter Maydell     }
71755df6fcfSPeter Maydell     vaddr_page = vaddr & TARGET_PAGE_MASK;
71855df6fcfSPeter Maydell     paddr_page = paddr & TARGET_PAGE_MASK;
71955df6fcfSPeter Maydell 
72055df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
72155df6fcfSPeter Maydell                                                 &xlat, &sz, attrs, &prot);
722d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
723d9bb58e5SYang Zhong 
724d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
725d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
726d9bb58e5SYang Zhong               vaddr, paddr, prot, mmu_idx);
727d9bb58e5SYang Zhong 
72855df6fcfSPeter Maydell     address = vaddr_page;
72955df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
73055df6fcfSPeter Maydell         /*
73155df6fcfSPeter Maydell          * Slow-path the TLB entries; we will repeat the MMU check and TLB
73255df6fcfSPeter Maydell          * fill on every access.
73355df6fcfSPeter Maydell          */
73455df6fcfSPeter Maydell         address |= TLB_RECHECK;
73555df6fcfSPeter Maydell     }
73655df6fcfSPeter Maydell     if (!memory_region_is_ram(section->mr) &&
73755df6fcfSPeter Maydell         !memory_region_is_romd(section->mr)) {
738d9bb58e5SYang Zhong         /* IO memory case */
739d9bb58e5SYang Zhong         address |= TLB_MMIO;
740d9bb58e5SYang Zhong         addend = 0;
741d9bb58e5SYang Zhong     } else {
742d9bb58e5SYang Zhong         /* TLB_MMIO for rom/romd handled below */
743d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
744d9bb58e5SYang Zhong     }
745d9bb58e5SYang Zhong 
746d9bb58e5SYang Zhong     code_address = address;
74755df6fcfSPeter Maydell     iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
74855df6fcfSPeter Maydell                                             paddr_page, xlat, prot, &address);
749d9bb58e5SYang Zhong 
750383beda9SRichard Henderson     index = tlb_index(env, mmu_idx, vaddr_page);
751383beda9SRichard Henderson     te = tlb_entry(env, mmu_idx, vaddr_page);
752d9bb58e5SYang Zhong 
75368fea038SRichard Henderson     /*
75471aec354SEmilio G. Cota      * Hold the TLB lock for the rest of the function. We could acquire/release
75571aec354SEmilio G. Cota      * the lock several times in the function, but it is faster to amortize the
75671aec354SEmilio G. Cota      * acquisition cost by acquiring it just once. Note that this leads to
75771aec354SEmilio G. Cota      * a longer critical section, but this is not a concern since the TLB lock
75871aec354SEmilio G. Cota      * is unlikely to be contended.
75971aec354SEmilio G. Cota      */
76053d28455SRichard Henderson     qemu_spin_lock(&env->tlb_c.lock);
76171aec354SEmilio G. Cota 
7623d1523ceSRichard Henderson     /* Note that the tlb is no longer clean.  */
7633d1523ceSRichard Henderson     env->tlb_c.dirty |= 1 << mmu_idx;
7643d1523ceSRichard Henderson 
76571aec354SEmilio G. Cota     /* Make sure there's no cached translation for the new page.  */
76671aec354SEmilio G. Cota     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
76771aec354SEmilio G. Cota 
76871aec354SEmilio G. Cota     /*
76968fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
77068fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
77168fea038SRichard Henderson      */
7723cea94bbSEmilio G. Cota     if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
773d5363e58SRichard Henderson         unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE;
77468fea038SRichard Henderson         CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
77568fea038SRichard Henderson 
77668fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
77771aec354SEmilio G. Cota         copy_tlb_helper_locked(tv, te);
778d9bb58e5SYang Zhong         env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
77986e1eff8SEmilio G. Cota         tlb_n_used_entries_dec(env, mmu_idx);
78068fea038SRichard Henderson     }
781d9bb58e5SYang Zhong 
782d9bb58e5SYang Zhong     /* refill the tlb */
783ace41090SPeter Maydell     /*
784ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
785ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
786ace41090SPeter Maydell      *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
787ace41090SPeter Maydell      *  + the offset within section->mr of the page base (otherwise)
78855df6fcfSPeter Maydell      * We subtract the vaddr_page (which is page aligned and thus won't
789ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
790ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
791ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
792ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
793ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
794ace41090SPeter Maydell      */
79555df6fcfSPeter Maydell     env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page;
796d9bb58e5SYang Zhong     env->iotlb[mmu_idx][index].attrs = attrs;
797d9bb58e5SYang Zhong 
798d9bb58e5SYang Zhong     /* Now calculate the new entry */
79955df6fcfSPeter Maydell     tn.addend = addend - vaddr_page;
800d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
801d9bb58e5SYang Zhong         tn.addr_read = address;
802d9bb58e5SYang Zhong     } else {
803d9bb58e5SYang Zhong         tn.addr_read = -1;
804d9bb58e5SYang Zhong     }
805d9bb58e5SYang Zhong 
806d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
807d9bb58e5SYang Zhong         tn.addr_code = code_address;
808d9bb58e5SYang Zhong     } else {
809d9bb58e5SYang Zhong         tn.addr_code = -1;
810d9bb58e5SYang Zhong     }
811d9bb58e5SYang Zhong 
812d9bb58e5SYang Zhong     tn.addr_write = -1;
813d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
814d9bb58e5SYang Zhong         if ((memory_region_is_ram(section->mr) && section->readonly)
815d9bb58e5SYang Zhong             || memory_region_is_romd(section->mr)) {
816d9bb58e5SYang Zhong             /* Write access calls the I/O callback.  */
817d9bb58e5SYang Zhong             tn.addr_write = address | TLB_MMIO;
818d9bb58e5SYang Zhong         } else if (memory_region_is_ram(section->mr)
819d9bb58e5SYang Zhong                    && cpu_physical_memory_is_clean(
820d9bb58e5SYang Zhong                        memory_region_get_ram_addr(section->mr) + xlat)) {
821d9bb58e5SYang Zhong             tn.addr_write = address | TLB_NOTDIRTY;
822d9bb58e5SYang Zhong         } else {
823d9bb58e5SYang Zhong             tn.addr_write = address;
824d9bb58e5SYang Zhong         }
825f52bfb12SDavid Hildenbrand         if (prot & PAGE_WRITE_INV) {
826f52bfb12SDavid Hildenbrand             tn.addr_write |= TLB_INVALID_MASK;
827f52bfb12SDavid Hildenbrand         }
828d9bb58e5SYang Zhong     }
829d9bb58e5SYang Zhong 
83071aec354SEmilio G. Cota     copy_tlb_helper_locked(te, &tn);
83186e1eff8SEmilio G. Cota     tlb_n_used_entries_inc(env, mmu_idx);
83253d28455SRichard Henderson     qemu_spin_unlock(&env->tlb_c.lock);
833d9bb58e5SYang Zhong }
834d9bb58e5SYang Zhong 
835d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory
836d9bb58e5SYang Zhong  * transaction attributes to be used.
837d9bb58e5SYang Zhong  */
838d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
839d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
840d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
841d9bb58e5SYang Zhong {
842d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
843d9bb58e5SYang Zhong                             prot, mmu_idx, size);
844d9bb58e5SYang Zhong }
845d9bb58e5SYang Zhong 
846d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
847d9bb58e5SYang Zhong {
848d9bb58e5SYang Zhong     ram_addr_t ram_addr;
849d9bb58e5SYang Zhong 
850d9bb58e5SYang Zhong     ram_addr = qemu_ram_addr_from_host(ptr);
851d9bb58e5SYang Zhong     if (ram_addr == RAM_ADDR_INVALID) {
852d9bb58e5SYang Zhong         error_report("Bad ram pointer %p", ptr);
853d9bb58e5SYang Zhong         abort();
854d9bb58e5SYang Zhong     }
855d9bb58e5SYang Zhong     return ram_addr;
856d9bb58e5SYang Zhong }
857d9bb58e5SYang Zhong 
858c319dc13SRichard Henderson /*
859c319dc13SRichard Henderson  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
860c319dc13SRichard Henderson  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
861c319dc13SRichard Henderson  * be discarded and looked up again (e.g. via tlb_entry()).
862c319dc13SRichard Henderson  */
863c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
864c319dc13SRichard Henderson                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
865c319dc13SRichard Henderson {
866c319dc13SRichard Henderson     CPUClass *cc = CPU_GET_CLASS(cpu);
867c319dc13SRichard Henderson     bool ok;
868c319dc13SRichard Henderson 
869c319dc13SRichard Henderson     /*
870c319dc13SRichard Henderson      * This is not a probe, so only valid return is success; failure
871c319dc13SRichard Henderson      * should result in exception + longjmp to the cpu loop.
872c319dc13SRichard Henderson      */
873c319dc13SRichard Henderson     ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
874c319dc13SRichard Henderson     assert(ok);
875c319dc13SRichard Henderson }
876c319dc13SRichard Henderson 
877d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
878f1be3696SRichard Henderson                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
879f1be3696SRichard Henderson                          MMUAccessType access_type, int size)
880d9bb58e5SYang Zhong {
881d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
8822d54f194SPeter Maydell     hwaddr mr_offset;
8832d54f194SPeter Maydell     MemoryRegionSection *section;
8842d54f194SPeter Maydell     MemoryRegion *mr;
885d9bb58e5SYang Zhong     uint64_t val;
886d9bb58e5SYang Zhong     bool locked = false;
88704e3aabdSPeter Maydell     MemTxResult r;
888d9bb58e5SYang Zhong 
8892d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
8902d54f194SPeter Maydell     mr = section->mr;
8912d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
892d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
893d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
894d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
895d9bb58e5SYang Zhong     }
896d9bb58e5SYang Zhong 
897d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
898dbea78a4SPeter Maydell     cpu->mem_io_access_type = access_type;
899d9bb58e5SYang Zhong 
9008b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
901d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
902d9bb58e5SYang Zhong         locked = true;
903d9bb58e5SYang Zhong     }
9042d54f194SPeter Maydell     r = memory_region_dispatch_read(mr, mr_offset,
90504e3aabdSPeter Maydell                                     &val, size, iotlbentry->attrs);
90604e3aabdSPeter Maydell     if (r != MEMTX_OK) {
9072d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
9082d54f194SPeter Maydell             section->offset_within_address_space -
9092d54f194SPeter Maydell             section->offset_within_region;
9102d54f194SPeter Maydell 
911dbea78a4SPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
91204e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
91304e3aabdSPeter Maydell     }
914d9bb58e5SYang Zhong     if (locked) {
915d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
916d9bb58e5SYang Zhong     }
917d9bb58e5SYang Zhong 
918d9bb58e5SYang Zhong     return val;
919d9bb58e5SYang Zhong }
920d9bb58e5SYang Zhong 
921d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
922f1be3696SRichard Henderson                       int mmu_idx, uint64_t val, target_ulong addr,
923f1be3696SRichard Henderson                       uintptr_t retaddr, int size)
924d9bb58e5SYang Zhong {
925d9bb58e5SYang Zhong     CPUState *cpu = ENV_GET_CPU(env);
9262d54f194SPeter Maydell     hwaddr mr_offset;
9272d54f194SPeter Maydell     MemoryRegionSection *section;
9282d54f194SPeter Maydell     MemoryRegion *mr;
929d9bb58e5SYang Zhong     bool locked = false;
93004e3aabdSPeter Maydell     MemTxResult r;
931d9bb58e5SYang Zhong 
9322d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
9332d54f194SPeter Maydell     mr = section->mr;
9342d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
935d9bb58e5SYang Zhong     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
936d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
937d9bb58e5SYang Zhong     }
938d9bb58e5SYang Zhong     cpu->mem_io_vaddr = addr;
939d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
940d9bb58e5SYang Zhong 
9418b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
942d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
943d9bb58e5SYang Zhong         locked = true;
944d9bb58e5SYang Zhong     }
9452d54f194SPeter Maydell     r = memory_region_dispatch_write(mr, mr_offset,
94604e3aabdSPeter Maydell                                      val, size, iotlbentry->attrs);
94704e3aabdSPeter Maydell     if (r != MEMTX_OK) {
9482d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
9492d54f194SPeter Maydell             section->offset_within_address_space -
9502d54f194SPeter Maydell             section->offset_within_region;
9512d54f194SPeter Maydell 
95204e3aabdSPeter Maydell         cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
95304e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
95404e3aabdSPeter Maydell     }
955d9bb58e5SYang Zhong     if (locked) {
956d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
957d9bb58e5SYang Zhong     }
958d9bb58e5SYang Zhong }
959d9bb58e5SYang Zhong 
9604811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
9614811e909SRichard Henderson {
9624811e909SRichard Henderson #if TCG_OVERSIZED_GUEST
9634811e909SRichard Henderson     return *(target_ulong *)((uintptr_t)entry + ofs);
9644811e909SRichard Henderson #else
9654811e909SRichard Henderson     /* ofs might correspond to .addr_write, so use atomic_read */
9664811e909SRichard Henderson     return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
9674811e909SRichard Henderson #endif
9684811e909SRichard Henderson }
9694811e909SRichard Henderson 
970d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
971d9bb58e5SYang Zhong    back to the main tlb.  */
972d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
973d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
974d9bb58e5SYang Zhong {
975d9bb58e5SYang Zhong     size_t vidx;
97671aec354SEmilio G. Cota 
97771aec354SEmilio G. Cota     assert_cpu_is_self(ENV_GET_CPU(env));
978d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
979d9bb58e5SYang Zhong         CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
9804811e909SRichard Henderson         target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
981d9bb58e5SYang Zhong 
982d9bb58e5SYang Zhong         if (cmp == page) {
983d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
984d9bb58e5SYang Zhong             CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
985d9bb58e5SYang Zhong 
98653d28455SRichard Henderson             qemu_spin_lock(&env->tlb_c.lock);
98771aec354SEmilio G. Cota             copy_tlb_helper_locked(&tmptlb, tlb);
98871aec354SEmilio G. Cota             copy_tlb_helper_locked(tlb, vtlb);
98971aec354SEmilio G. Cota             copy_tlb_helper_locked(vtlb, &tmptlb);
99053d28455SRichard Henderson             qemu_spin_unlock(&env->tlb_c.lock);
991d9bb58e5SYang Zhong 
992d9bb58e5SYang Zhong             CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
993d9bb58e5SYang Zhong             CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
994d9bb58e5SYang Zhong             tmpio = *io; *io = *vio; *vio = tmpio;
995d9bb58e5SYang Zhong             return true;
996d9bb58e5SYang Zhong         }
997d9bb58e5SYang Zhong     }
998d9bb58e5SYang Zhong     return false;
999d9bb58e5SYang Zhong }
1000d9bb58e5SYang Zhong 
1001d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
1002d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
1003d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1004d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
1005d9bb58e5SYang Zhong 
1006f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */
1007f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it
1008f2553f04SKONRAD Frederic  * is actually a ram_addr_t (in system mode; the user mode emulation
1009f2553f04SKONRAD Frederic  * version of this function returns a guest virtual address).
1010f2553f04SKONRAD Frederic  */
1011f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1012f2553f04SKONRAD Frederic {
1013383beda9SRichard Henderson     uintptr_t mmu_idx = cpu_mmu_index(env, true);
1014383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
1015383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1016f2553f04SKONRAD Frederic     void *p;
1017f2553f04SKONRAD Frederic 
1018383beda9SRichard Henderson     if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1019b493ccf1SPeter Maydell         if (!VICTIM_TLB_HIT(addr_code, addr)) {
102098670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
10216d967cb8SEmilio G. Cota             index = tlb_index(env, mmu_idx, addr);
10226d967cb8SEmilio G. Cota             entry = tlb_entry(env, mmu_idx, addr);
102371b9a453SKONRAD Frederic         }
1024383beda9SRichard Henderson         assert(tlb_hit(entry->addr_code, addr));
1025f2553f04SKONRAD Frederic     }
102655df6fcfSPeter Maydell 
1027383beda9SRichard Henderson     if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
102855df6fcfSPeter Maydell         /*
102955a7cb14SPeter Maydell          * Return -1 if we can't translate and execute from an entire
103055a7cb14SPeter Maydell          * page of RAM here, which will cause us to execute by loading
103155a7cb14SPeter Maydell          * and translating one insn at a time, without caching:
103255a7cb14SPeter Maydell          *  - TLB_RECHECK: means the MMU protection covers a smaller range
103355a7cb14SPeter Maydell          *    than a target page, so we must redo the MMU check every insn
103455a7cb14SPeter Maydell          *  - TLB_MMIO: region is not backed by RAM
103555df6fcfSPeter Maydell          */
103620cb6ae4SPeter Maydell         return -1;
103755df6fcfSPeter Maydell     }
103855df6fcfSPeter Maydell 
1039383beda9SRichard Henderson     p = (void *)((uintptr_t)addr + entry->addend);
1040f2553f04SKONRAD Frederic     return qemu_ram_addr_from_host_nofail(p);
1041f2553f04SKONRAD Frederic }
1042f2553f04SKONRAD Frederic 
1043d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted.
1044d9bb58e5SYang Zhong  * If it is not permitted then an exception will be taken in the same
1045d9bb58e5SYang Zhong  * way as if this were a real write access (and we will not return).
1046d9bb58e5SYang Zhong  * Otherwise the function will return, and there will be a valid
1047d9bb58e5SYang Zhong  * entry in the TLB for this access.
1048d9bb58e5SYang Zhong  */
104998670d47SLaurent Vivier void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
1050d9bb58e5SYang Zhong                  uintptr_t retaddr)
1051d9bb58e5SYang Zhong {
1052383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
1053383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1054d9bb58e5SYang Zhong 
1055403f290cSEmilio G. Cota     if (!tlb_hit(tlb_addr_write(entry), addr)) {
1056d9bb58e5SYang Zhong         /* TLB entry is for a different page */
1057d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
105898670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
105998670d47SLaurent Vivier                      mmu_idx, retaddr);
1060d9bb58e5SYang Zhong         }
1061d9bb58e5SYang Zhong     }
1062d9bb58e5SYang Zhong }
1063d9bb58e5SYang Zhong 
10644811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
10654811e909SRichard Henderson                         MMUAccessType access_type, int mmu_idx)
10664811e909SRichard Henderson {
10674811e909SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
10684811e909SRichard Henderson     uintptr_t tlb_addr, page;
10694811e909SRichard Henderson     size_t elt_ofs;
10704811e909SRichard Henderson 
10714811e909SRichard Henderson     switch (access_type) {
10724811e909SRichard Henderson     case MMU_DATA_LOAD:
10734811e909SRichard Henderson         elt_ofs = offsetof(CPUTLBEntry, addr_read);
10744811e909SRichard Henderson         break;
10754811e909SRichard Henderson     case MMU_DATA_STORE:
10764811e909SRichard Henderson         elt_ofs = offsetof(CPUTLBEntry, addr_write);
10774811e909SRichard Henderson         break;
10784811e909SRichard Henderson     case MMU_INST_FETCH:
10794811e909SRichard Henderson         elt_ofs = offsetof(CPUTLBEntry, addr_code);
10804811e909SRichard Henderson         break;
10814811e909SRichard Henderson     default:
10824811e909SRichard Henderson         g_assert_not_reached();
10834811e909SRichard Henderson     }
10844811e909SRichard Henderson 
10854811e909SRichard Henderson     page = addr & TARGET_PAGE_MASK;
10864811e909SRichard Henderson     tlb_addr = tlb_read_ofs(entry, elt_ofs);
10874811e909SRichard Henderson 
10884811e909SRichard Henderson     if (!tlb_hit_page(tlb_addr, page)) {
10894811e909SRichard Henderson         uintptr_t index = tlb_index(env, mmu_idx, addr);
10904811e909SRichard Henderson 
10914811e909SRichard Henderson         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
10924811e909SRichard Henderson             CPUState *cs = ENV_GET_CPU(env);
10934811e909SRichard Henderson             CPUClass *cc = CPU_GET_CLASS(cs);
10944811e909SRichard Henderson 
10954811e909SRichard Henderson             if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
10964811e909SRichard Henderson                 /* Non-faulting page table read failed.  */
10974811e909SRichard Henderson                 return NULL;
10984811e909SRichard Henderson             }
10994811e909SRichard Henderson 
11004811e909SRichard Henderson             /* TLB resize via tlb_fill may have moved the entry.  */
11014811e909SRichard Henderson             entry = tlb_entry(env, mmu_idx, addr);
11024811e909SRichard Henderson         }
11034811e909SRichard Henderson         tlb_addr = tlb_read_ofs(entry, elt_ofs);
11044811e909SRichard Henderson     }
11054811e909SRichard Henderson 
11064811e909SRichard Henderson     if (tlb_addr & ~TARGET_PAGE_MASK) {
11074811e909SRichard Henderson         /* IO access */
11084811e909SRichard Henderson         return NULL;
11094811e909SRichard Henderson     }
11104811e909SRichard Henderson 
11114811e909SRichard Henderson     return (void *)((uintptr_t)addr + entry->addend);
11124811e909SRichard Henderson }
11134811e909SRichard Henderson 
1114d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1115d9bb58e5SYang Zhong  * operations, or io operations to proceed.  Return the host address.  */
1116d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
111734d49937SPeter Maydell                                TCGMemOpIdx oi, uintptr_t retaddr,
111834d49937SPeter Maydell                                NotDirtyInfo *ndi)
1119d9bb58e5SYang Zhong {
1120d9bb58e5SYang Zhong     size_t mmu_idx = get_mmuidx(oi);
1121383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
1122383beda9SRichard Henderson     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1123403f290cSEmilio G. Cota     target_ulong tlb_addr = tlb_addr_write(tlbe);
1124d9bb58e5SYang Zhong     TCGMemOp mop = get_memop(oi);
1125d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
1126d9bb58e5SYang Zhong     int s_bits = mop & MO_SIZE;
112734d49937SPeter Maydell     void *hostaddr;
1128d9bb58e5SYang Zhong 
1129d9bb58e5SYang Zhong     /* Adjust the given return address.  */
1130d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
1131d9bb58e5SYang Zhong 
1132d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1133d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1134d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1135d9bb58e5SYang Zhong         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1136d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1137d9bb58e5SYang Zhong     }
1138d9bb58e5SYang Zhong 
1139d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
1140d9bb58e5SYang Zhong     if (unlikely(addr & ((1 << s_bits) - 1))) {
1141d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1142d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1143d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1144d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1145d9bb58e5SYang Zhong         goto stop_the_world;
1146d9bb58e5SYang Zhong     }
1147d9bb58e5SYang Zhong 
1148d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
1149334692bcSPeter Maydell     if (!tlb_hit(tlb_addr, addr)) {
1150d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
115198670d47SLaurent Vivier             tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
115298670d47SLaurent Vivier                      mmu_idx, retaddr);
11536d967cb8SEmilio G. Cota             index = tlb_index(env, mmu_idx, addr);
11546d967cb8SEmilio G. Cota             tlbe = tlb_entry(env, mmu_idx, addr);
1155d9bb58e5SYang Zhong         }
1156403f290cSEmilio G. Cota         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1157d9bb58e5SYang Zhong     }
1158d9bb58e5SYang Zhong 
115955df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
116055df6fcfSPeter Maydell     if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1161d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1162d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1163d9bb58e5SYang Zhong         goto stop_the_world;
1164d9bb58e5SYang Zhong     }
1165d9bb58e5SYang Zhong 
1166d9bb58e5SYang Zhong     /* Let the guest notice RMW on a write-only page.  */
116734d49937SPeter Maydell     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
116898670d47SLaurent Vivier         tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
116998670d47SLaurent Vivier                  mmu_idx, retaddr);
1170d9bb58e5SYang Zhong         /* Since we don't support reads and writes to different addresses,
1171d9bb58e5SYang Zhong            and we do have the proper page loaded for write, this shouldn't
1172d9bb58e5SYang Zhong            ever return.  But just in case, handle via stop-the-world.  */
1173d9bb58e5SYang Zhong         goto stop_the_world;
1174d9bb58e5SYang Zhong     }
1175d9bb58e5SYang Zhong 
117634d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
117734d49937SPeter Maydell 
117834d49937SPeter Maydell     ndi->active = false;
117934d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
118034d49937SPeter Maydell         ndi->active = true;
118134d49937SPeter Maydell         memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
118234d49937SPeter Maydell                                       qemu_ram_addr_from_host_nofail(hostaddr),
118334d49937SPeter Maydell                                       1 << s_bits);
118434d49937SPeter Maydell     }
118534d49937SPeter Maydell 
118634d49937SPeter Maydell     return hostaddr;
1187d9bb58e5SYang Zhong 
1188d9bb58e5SYang Zhong  stop_the_world:
1189d9bb58e5SYang Zhong     cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1190d9bb58e5SYang Zhong }
1191d9bb58e5SYang Zhong 
1192d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN
1193eed56642SAlex Bennée #define NEED_BE_BSWAP 0
1194eed56642SAlex Bennée #define NEED_LE_BSWAP 1
1195d9bb58e5SYang Zhong #else
1196eed56642SAlex Bennée #define NEED_BE_BSWAP 1
1197eed56642SAlex Bennée #define NEED_LE_BSWAP 0
1198d9bb58e5SYang Zhong #endif
1199d9bb58e5SYang Zhong 
1200eed56642SAlex Bennée /*
1201eed56642SAlex Bennée  * Byte Swap Helper
1202eed56642SAlex Bennée  *
1203eed56642SAlex Bennée  * This should all dead code away depending on the build host and
1204eed56642SAlex Bennée  * access type.
1205eed56642SAlex Bennée  */
1206d9bb58e5SYang Zhong 
1207eed56642SAlex Bennée static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
1208eed56642SAlex Bennée {
1209eed56642SAlex Bennée     if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
1210eed56642SAlex Bennée         switch (size) {
1211eed56642SAlex Bennée         case 1: return val;
1212eed56642SAlex Bennée         case 2: return bswap16(val);
1213eed56642SAlex Bennée         case 4: return bswap32(val);
1214eed56642SAlex Bennée         case 8: return bswap64(val);
1215eed56642SAlex Bennée         default:
1216eed56642SAlex Bennée             g_assert_not_reached();
1217eed56642SAlex Bennée         }
1218eed56642SAlex Bennée     } else {
1219eed56642SAlex Bennée         return val;
1220eed56642SAlex Bennée     }
1221eed56642SAlex Bennée }
1222d9bb58e5SYang Zhong 
1223eed56642SAlex Bennée /*
1224eed56642SAlex Bennée  * Load Helpers
1225eed56642SAlex Bennée  *
1226eed56642SAlex Bennée  * We support two different access types. SOFTMMU_CODE_ACCESS is
1227eed56642SAlex Bennée  * specifically for reading instructions from system memory. It is
1228eed56642SAlex Bennée  * called by the translation loop and in some helpers where the code
1229eed56642SAlex Bennée  * is disassembled. It shouldn't be called directly by guest code.
1230eed56642SAlex Bennée  */
1231d9bb58e5SYang Zhong 
12322dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
12332dd92606SRichard Henderson                                 TCGMemOpIdx oi, uintptr_t retaddr);
12342dd92606SRichard Henderson 
12352dd92606SRichard Henderson static inline uint64_t __attribute__((always_inline))
12362dd92606SRichard Henderson load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
12372dd92606SRichard Henderson             uintptr_t retaddr, size_t size, bool big_endian, bool code_read,
12382dd92606SRichard Henderson             FullLoadHelper *full_load)
1239eed56642SAlex Bennée {
1240eed56642SAlex Bennée     uintptr_t mmu_idx = get_mmuidx(oi);
1241eed56642SAlex Bennée     uintptr_t index = tlb_index(env, mmu_idx, addr);
1242eed56642SAlex Bennée     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1243eed56642SAlex Bennée     target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1244eed56642SAlex Bennée     const size_t tlb_off = code_read ?
1245eed56642SAlex Bennée         offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1246f1be3696SRichard Henderson     const MMUAccessType access_type =
1247f1be3696SRichard Henderson         code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1248eed56642SAlex Bennée     unsigned a_bits = get_alignment_bits(get_memop(oi));
1249eed56642SAlex Bennée     void *haddr;
1250eed56642SAlex Bennée     uint64_t res;
1251d9bb58e5SYang Zhong 
1252eed56642SAlex Bennée     /* Handle CPU specific unaligned behaviour */
1253eed56642SAlex Bennée     if (addr & ((1 << a_bits) - 1)) {
1254f1be3696SRichard Henderson         cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type,
1255eed56642SAlex Bennée                              mmu_idx, retaddr);
1256eed56642SAlex Bennée     }
1257eed56642SAlex Bennée 
1258eed56642SAlex Bennée     /* If the TLB entry is for a different page, reload and try again.  */
1259eed56642SAlex Bennée     if (!tlb_hit(tlb_addr, addr)) {
1260eed56642SAlex Bennée         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1261eed56642SAlex Bennée                             addr & TARGET_PAGE_MASK)) {
1262eed56642SAlex Bennée             tlb_fill(ENV_GET_CPU(env), addr, size,
1263f1be3696SRichard Henderson                      access_type, mmu_idx, retaddr);
1264eed56642SAlex Bennée             index = tlb_index(env, mmu_idx, addr);
1265eed56642SAlex Bennée             entry = tlb_entry(env, mmu_idx, addr);
1266eed56642SAlex Bennée         }
1267eed56642SAlex Bennée         tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1268eed56642SAlex Bennée     }
1269eed56642SAlex Bennée 
1270eed56642SAlex Bennée     /* Handle an IO access.  */
1271eed56642SAlex Bennée     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1272eed56642SAlex Bennée         if ((addr & (size - 1)) != 0) {
1273eed56642SAlex Bennée             goto do_unaligned_access;
1274eed56642SAlex Bennée         }
1275eed56642SAlex Bennée 
1276f1be3696SRichard Henderson         if (tlb_addr & TLB_RECHECK) {
1277f1be3696SRichard Henderson             /*
1278f1be3696SRichard Henderson              * This is a TLB_RECHECK access, where the MMU protection
1279f1be3696SRichard Henderson              * covers a smaller range than a target page, and we must
1280f1be3696SRichard Henderson              * repeat the MMU check here. This tlb_fill() call might
1281f1be3696SRichard Henderson              * longjump out if this access should cause a guest exception.
1282f1be3696SRichard Henderson              */
1283f1be3696SRichard Henderson             tlb_fill(ENV_GET_CPU(env), addr, size,
1284f1be3696SRichard Henderson                      access_type, mmu_idx, retaddr);
1285f1be3696SRichard Henderson             index = tlb_index(env, mmu_idx, addr);
1286f1be3696SRichard Henderson             entry = tlb_entry(env, mmu_idx, addr);
1287f1be3696SRichard Henderson 
1288f1be3696SRichard Henderson             tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1289f1be3696SRichard Henderson             tlb_addr &= ~TLB_RECHECK;
1290f1be3696SRichard Henderson             if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1291f1be3696SRichard Henderson                 /* RAM access */
1292f1be3696SRichard Henderson                 goto do_aligned_access;
1293f1be3696SRichard Henderson             }
1294f1be3696SRichard Henderson         }
1295f1be3696SRichard Henderson 
1296f1be3696SRichard Henderson         res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr,
1297f1be3696SRichard Henderson                        retaddr, access_type, size);
1298f1be3696SRichard Henderson         return handle_bswap(res, size, big_endian);
1299eed56642SAlex Bennée     }
1300eed56642SAlex Bennée 
1301eed56642SAlex Bennée     /* Handle slow unaligned access (it spans two pages or IO).  */
1302eed56642SAlex Bennée     if (size > 1
1303eed56642SAlex Bennée         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1304eed56642SAlex Bennée                     >= TARGET_PAGE_SIZE)) {
1305eed56642SAlex Bennée         target_ulong addr1, addr2;
1306eed56642SAlex Bennée         tcg_target_ulong r1, r2;
1307eed56642SAlex Bennée         unsigned shift;
1308eed56642SAlex Bennée     do_unaligned_access:
1309eed56642SAlex Bennée         addr1 = addr & ~(size - 1);
1310eed56642SAlex Bennée         addr2 = addr1 + size;
13112dd92606SRichard Henderson         r1 = full_load(env, addr1, oi, retaddr);
13122dd92606SRichard Henderson         r2 = full_load(env, addr2, oi, retaddr);
1313eed56642SAlex Bennée         shift = (addr & (size - 1)) * 8;
1314eed56642SAlex Bennée 
1315eed56642SAlex Bennée         if (big_endian) {
1316eed56642SAlex Bennée             /* Big-endian combine.  */
1317eed56642SAlex Bennée             res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1318eed56642SAlex Bennée         } else {
1319eed56642SAlex Bennée             /* Little-endian combine.  */
1320eed56642SAlex Bennée             res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1321eed56642SAlex Bennée         }
1322eed56642SAlex Bennée         return res & MAKE_64BIT_MASK(0, size * 8);
1323eed56642SAlex Bennée     }
1324eed56642SAlex Bennée 
1325f1be3696SRichard Henderson  do_aligned_access:
1326eed56642SAlex Bennée     haddr = (void *)((uintptr_t)addr + entry->addend);
1327eed56642SAlex Bennée     switch (size) {
1328eed56642SAlex Bennée     case 1:
1329eed56642SAlex Bennée         res = ldub_p(haddr);
1330eed56642SAlex Bennée         break;
1331eed56642SAlex Bennée     case 2:
1332eed56642SAlex Bennée         if (big_endian) {
1333eed56642SAlex Bennée             res = lduw_be_p(haddr);
1334eed56642SAlex Bennée         } else {
1335eed56642SAlex Bennée             res = lduw_le_p(haddr);
1336eed56642SAlex Bennée         }
1337eed56642SAlex Bennée         break;
1338eed56642SAlex Bennée     case 4:
1339eed56642SAlex Bennée         if (big_endian) {
1340eed56642SAlex Bennée             res = (uint32_t)ldl_be_p(haddr);
1341eed56642SAlex Bennée         } else {
1342eed56642SAlex Bennée             res = (uint32_t)ldl_le_p(haddr);
1343eed56642SAlex Bennée         }
1344eed56642SAlex Bennée         break;
1345eed56642SAlex Bennée     case 8:
1346eed56642SAlex Bennée         if (big_endian) {
1347eed56642SAlex Bennée             res = ldq_be_p(haddr);
1348eed56642SAlex Bennée         } else {
1349eed56642SAlex Bennée             res = ldq_le_p(haddr);
1350eed56642SAlex Bennée         }
1351eed56642SAlex Bennée         break;
1352eed56642SAlex Bennée     default:
1353eed56642SAlex Bennée         g_assert_not_reached();
1354eed56642SAlex Bennée     }
1355eed56642SAlex Bennée 
1356eed56642SAlex Bennée     return res;
1357eed56642SAlex Bennée }
1358eed56642SAlex Bennée 
1359eed56642SAlex Bennée /*
1360eed56642SAlex Bennée  * For the benefit of TCG generated code, we want to avoid the
1361eed56642SAlex Bennée  * complication of ABI-specific return type promotion and always
1362eed56642SAlex Bennée  * return a value extended to the register size of the host. This is
1363eed56642SAlex Bennée  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1364eed56642SAlex Bennée  * data, and for that we always have uint64_t.
1365eed56642SAlex Bennée  *
1366eed56642SAlex Bennée  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1367eed56642SAlex Bennée  */
1368eed56642SAlex Bennée 
13692dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
13702dd92606SRichard Henderson                               TCGMemOpIdx oi, uintptr_t retaddr)
13712dd92606SRichard Henderson {
13722dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 1, false, false,
13732dd92606SRichard Henderson                        full_ldub_mmu);
13742dd92606SRichard Henderson }
13752dd92606SRichard Henderson 
1376fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1377fc1bc777SRichard Henderson                                      TCGMemOpIdx oi, uintptr_t retaddr)
1378eed56642SAlex Bennée {
13792dd92606SRichard Henderson     return full_ldub_mmu(env, addr, oi, retaddr);
13802dd92606SRichard Henderson }
13812dd92606SRichard Henderson 
13822dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
13832dd92606SRichard Henderson                                  TCGMemOpIdx oi, uintptr_t retaddr)
13842dd92606SRichard Henderson {
13852dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 2, false, false,
13862dd92606SRichard Henderson                        full_le_lduw_mmu);
1387eed56642SAlex Bennée }
1388eed56642SAlex Bennée 
1389fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1390fc1bc777SRichard Henderson                                     TCGMemOpIdx oi, uintptr_t retaddr)
1391eed56642SAlex Bennée {
13922dd92606SRichard Henderson     return full_le_lduw_mmu(env, addr, oi, retaddr);
13932dd92606SRichard Henderson }
13942dd92606SRichard Henderson 
13952dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
13962dd92606SRichard Henderson                                  TCGMemOpIdx oi, uintptr_t retaddr)
13972dd92606SRichard Henderson {
13982dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 2, true, false,
13992dd92606SRichard Henderson                        full_be_lduw_mmu);
1400eed56642SAlex Bennée }
1401eed56642SAlex Bennée 
1402fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1403fc1bc777SRichard Henderson                                     TCGMemOpIdx oi, uintptr_t retaddr)
1404eed56642SAlex Bennée {
14052dd92606SRichard Henderson     return full_be_lduw_mmu(env, addr, oi, retaddr);
14062dd92606SRichard Henderson }
14072dd92606SRichard Henderson 
14082dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
14092dd92606SRichard Henderson                                  TCGMemOpIdx oi, uintptr_t retaddr)
14102dd92606SRichard Henderson {
14112dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 4, false, false,
14122dd92606SRichard Henderson                        full_le_ldul_mmu);
1413eed56642SAlex Bennée }
1414eed56642SAlex Bennée 
1415fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1416fc1bc777SRichard Henderson                                     TCGMemOpIdx oi, uintptr_t retaddr)
1417eed56642SAlex Bennée {
14182dd92606SRichard Henderson     return full_le_ldul_mmu(env, addr, oi, retaddr);
14192dd92606SRichard Henderson }
14202dd92606SRichard Henderson 
14212dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
14222dd92606SRichard Henderson                                  TCGMemOpIdx oi, uintptr_t retaddr)
14232dd92606SRichard Henderson {
14242dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 4, true, false,
14252dd92606SRichard Henderson                        full_be_ldul_mmu);
1426eed56642SAlex Bennée }
1427eed56642SAlex Bennée 
1428fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1429fc1bc777SRichard Henderson                                     TCGMemOpIdx oi, uintptr_t retaddr)
1430eed56642SAlex Bennée {
14312dd92606SRichard Henderson     return full_be_ldul_mmu(env, addr, oi, retaddr);
1432eed56642SAlex Bennée }
1433eed56642SAlex Bennée 
1434fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1435fc1bc777SRichard Henderson                            TCGMemOpIdx oi, uintptr_t retaddr)
1436eed56642SAlex Bennée {
14372dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 8, false, false,
14382dd92606SRichard Henderson                        helper_le_ldq_mmu);
1439eed56642SAlex Bennée }
1440eed56642SAlex Bennée 
1441fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1442fc1bc777SRichard Henderson                            TCGMemOpIdx oi, uintptr_t retaddr)
1443eed56642SAlex Bennée {
14442dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 8, true, false,
14452dd92606SRichard Henderson                        helper_be_ldq_mmu);
1446eed56642SAlex Bennée }
1447eed56642SAlex Bennée 
1448eed56642SAlex Bennée /*
1449eed56642SAlex Bennée  * Provide signed versions of the load routines as well.  We can of course
1450eed56642SAlex Bennée  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1451eed56642SAlex Bennée  */
1452eed56642SAlex Bennée 
1453eed56642SAlex Bennée 
1454eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1455eed56642SAlex Bennée                                      TCGMemOpIdx oi, uintptr_t retaddr)
1456eed56642SAlex Bennée {
1457eed56642SAlex Bennée     return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1458eed56642SAlex Bennée }
1459eed56642SAlex Bennée 
1460eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1461eed56642SAlex Bennée                                     TCGMemOpIdx oi, uintptr_t retaddr)
1462eed56642SAlex Bennée {
1463eed56642SAlex Bennée     return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1464eed56642SAlex Bennée }
1465eed56642SAlex Bennée 
1466eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1467eed56642SAlex Bennée                                     TCGMemOpIdx oi, uintptr_t retaddr)
1468eed56642SAlex Bennée {
1469eed56642SAlex Bennée     return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1470eed56642SAlex Bennée }
1471eed56642SAlex Bennée 
1472eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1473eed56642SAlex Bennée                                     TCGMemOpIdx oi, uintptr_t retaddr)
1474eed56642SAlex Bennée {
1475eed56642SAlex Bennée     return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1476eed56642SAlex Bennée }
1477eed56642SAlex Bennée 
1478eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1479eed56642SAlex Bennée                                     TCGMemOpIdx oi, uintptr_t retaddr)
1480eed56642SAlex Bennée {
1481eed56642SAlex Bennée     return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1482eed56642SAlex Bennée }
1483eed56642SAlex Bennée 
1484eed56642SAlex Bennée /*
1485eed56642SAlex Bennée  * Store Helpers
1486eed56642SAlex Bennée  */
1487eed56642SAlex Bennée 
14884601f8d1SRichard Henderson static inline void __attribute__((always_inline))
14894601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
14904601f8d1SRichard Henderson              TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian)
1491eed56642SAlex Bennée {
1492eed56642SAlex Bennée     uintptr_t mmu_idx = get_mmuidx(oi);
1493eed56642SAlex Bennée     uintptr_t index = tlb_index(env, mmu_idx, addr);
1494eed56642SAlex Bennée     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1495eed56642SAlex Bennée     target_ulong tlb_addr = tlb_addr_write(entry);
1496eed56642SAlex Bennée     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1497eed56642SAlex Bennée     unsigned a_bits = get_alignment_bits(get_memop(oi));
1498eed56642SAlex Bennée     void *haddr;
1499eed56642SAlex Bennée 
1500eed56642SAlex Bennée     /* Handle CPU specific unaligned behaviour */
1501eed56642SAlex Bennée     if (addr & ((1 << a_bits) - 1)) {
1502eed56642SAlex Bennée         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
1503eed56642SAlex Bennée                              mmu_idx, retaddr);
1504eed56642SAlex Bennée     }
1505eed56642SAlex Bennée 
1506eed56642SAlex Bennée     /* If the TLB entry is for a different page, reload and try again.  */
1507eed56642SAlex Bennée     if (!tlb_hit(tlb_addr, addr)) {
1508eed56642SAlex Bennée         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1509eed56642SAlex Bennée             addr & TARGET_PAGE_MASK)) {
1510eed56642SAlex Bennée             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1511eed56642SAlex Bennée                      mmu_idx, retaddr);
1512eed56642SAlex Bennée             index = tlb_index(env, mmu_idx, addr);
1513eed56642SAlex Bennée             entry = tlb_entry(env, mmu_idx, addr);
1514eed56642SAlex Bennée         }
1515eed56642SAlex Bennée         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1516eed56642SAlex Bennée     }
1517eed56642SAlex Bennée 
1518eed56642SAlex Bennée     /* Handle an IO access.  */
1519eed56642SAlex Bennée     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1520eed56642SAlex Bennée         if ((addr & (size - 1)) != 0) {
1521eed56642SAlex Bennée             goto do_unaligned_access;
1522eed56642SAlex Bennée         }
1523eed56642SAlex Bennée 
1524f1be3696SRichard Henderson         if (tlb_addr & TLB_RECHECK) {
1525f1be3696SRichard Henderson             /*
1526f1be3696SRichard Henderson              * This is a TLB_RECHECK access, where the MMU protection
1527f1be3696SRichard Henderson              * covers a smaller range than a target page, and we must
1528f1be3696SRichard Henderson              * repeat the MMU check here. This tlb_fill() call might
1529f1be3696SRichard Henderson              * longjump out if this access should cause a guest exception.
1530f1be3696SRichard Henderson              */
1531f1be3696SRichard Henderson             tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
1532f1be3696SRichard Henderson                      mmu_idx, retaddr);
1533f1be3696SRichard Henderson             index = tlb_index(env, mmu_idx, addr);
1534f1be3696SRichard Henderson             entry = tlb_entry(env, mmu_idx, addr);
1535f1be3696SRichard Henderson 
1536f1be3696SRichard Henderson             tlb_addr = tlb_addr_write(entry);
1537f1be3696SRichard Henderson             tlb_addr &= ~TLB_RECHECK;
1538f1be3696SRichard Henderson             if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1539f1be3696SRichard Henderson                 /* RAM access */
1540f1be3696SRichard Henderson                 goto do_aligned_access;
1541f1be3696SRichard Henderson             }
1542f1be3696SRichard Henderson         }
1543f1be3696SRichard Henderson 
1544f1be3696SRichard Henderson         io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx,
1545eed56642SAlex Bennée                   handle_bswap(val, size, big_endian),
1546f1be3696SRichard Henderson                   addr, retaddr, size);
1547eed56642SAlex Bennée         return;
1548eed56642SAlex Bennée     }
1549eed56642SAlex Bennée 
1550eed56642SAlex Bennée     /* Handle slow unaligned access (it spans two pages or IO).  */
1551eed56642SAlex Bennée     if (size > 1
1552eed56642SAlex Bennée         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1553eed56642SAlex Bennée                      >= TARGET_PAGE_SIZE)) {
1554eed56642SAlex Bennée         int i;
1555eed56642SAlex Bennée         uintptr_t index2;
1556eed56642SAlex Bennée         CPUTLBEntry *entry2;
1557eed56642SAlex Bennée         target_ulong page2, tlb_addr2;
1558eed56642SAlex Bennée     do_unaligned_access:
1559eed56642SAlex Bennée         /*
1560eed56642SAlex Bennée          * Ensure the second page is in the TLB.  Note that the first page
1561eed56642SAlex Bennée          * is already guaranteed to be filled, and that the second page
1562eed56642SAlex Bennée          * cannot evict the first.
1563eed56642SAlex Bennée          */
1564eed56642SAlex Bennée         page2 = (addr + size) & TARGET_PAGE_MASK;
1565eed56642SAlex Bennée         index2 = tlb_index(env, mmu_idx, page2);
1566eed56642SAlex Bennée         entry2 = tlb_entry(env, mmu_idx, page2);
1567eed56642SAlex Bennée         tlb_addr2 = tlb_addr_write(entry2);
1568eed56642SAlex Bennée         if (!tlb_hit_page(tlb_addr2, page2)
1569eed56642SAlex Bennée             && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
1570eed56642SAlex Bennée                                page2 & TARGET_PAGE_MASK)) {
1571eed56642SAlex Bennée             tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE,
1572eed56642SAlex Bennée                      mmu_idx, retaddr);
1573eed56642SAlex Bennée         }
1574eed56642SAlex Bennée 
1575eed56642SAlex Bennée         /*
1576eed56642SAlex Bennée          * XXX: not efficient, but simple.
1577eed56642SAlex Bennée          * This loop must go in the forward direction to avoid issues
1578eed56642SAlex Bennée          * with self-modifying code in Windows 64-bit.
1579eed56642SAlex Bennée          */
1580eed56642SAlex Bennée         for (i = 0; i < size; ++i) {
1581eed56642SAlex Bennée             uint8_t val8;
1582eed56642SAlex Bennée             if (big_endian) {
1583eed56642SAlex Bennée                 /* Big-endian extract.  */
1584eed56642SAlex Bennée                 val8 = val >> (((size - 1) * 8) - (i * 8));
1585eed56642SAlex Bennée             } else {
1586eed56642SAlex Bennée                 /* Little-endian extract.  */
1587eed56642SAlex Bennée                 val8 = val >> (i * 8);
1588eed56642SAlex Bennée             }
15894601f8d1SRichard Henderson             helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
1590eed56642SAlex Bennée         }
1591eed56642SAlex Bennée         return;
1592eed56642SAlex Bennée     }
1593eed56642SAlex Bennée 
1594f1be3696SRichard Henderson  do_aligned_access:
1595eed56642SAlex Bennée     haddr = (void *)((uintptr_t)addr + entry->addend);
1596eed56642SAlex Bennée     switch (size) {
1597eed56642SAlex Bennée     case 1:
1598eed56642SAlex Bennée         stb_p(haddr, val);
1599eed56642SAlex Bennée         break;
1600eed56642SAlex Bennée     case 2:
1601eed56642SAlex Bennée         if (big_endian) {
1602eed56642SAlex Bennée             stw_be_p(haddr, val);
1603eed56642SAlex Bennée         } else {
1604eed56642SAlex Bennée             stw_le_p(haddr, val);
1605eed56642SAlex Bennée         }
1606eed56642SAlex Bennée         break;
1607eed56642SAlex Bennée     case 4:
1608eed56642SAlex Bennée         if (big_endian) {
1609eed56642SAlex Bennée             stl_be_p(haddr, val);
1610eed56642SAlex Bennée         } else {
1611eed56642SAlex Bennée             stl_le_p(haddr, val);
1612eed56642SAlex Bennée         }
1613eed56642SAlex Bennée         break;
1614eed56642SAlex Bennée     case 8:
1615eed56642SAlex Bennée         if (big_endian) {
1616eed56642SAlex Bennée             stq_be_p(haddr, val);
1617eed56642SAlex Bennée         } else {
1618eed56642SAlex Bennée             stq_le_p(haddr, val);
1619eed56642SAlex Bennée         }
1620eed56642SAlex Bennée         break;
1621eed56642SAlex Bennée     default:
1622eed56642SAlex Bennée         g_assert_not_reached();
1623eed56642SAlex Bennée         break;
1624eed56642SAlex Bennée     }
1625eed56642SAlex Bennée }
1626eed56642SAlex Bennée 
1627fc1bc777SRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1628eed56642SAlex Bennée                         TCGMemOpIdx oi, uintptr_t retaddr)
1629eed56642SAlex Bennée {
1630eed56642SAlex Bennée     store_helper(env, addr, val, oi, retaddr, 1, false);
1631eed56642SAlex Bennée }
1632eed56642SAlex Bennée 
1633fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1634eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
1635eed56642SAlex Bennée {
1636eed56642SAlex Bennée     store_helper(env, addr, val, oi, retaddr, 2, false);
1637eed56642SAlex Bennée }
1638eed56642SAlex Bennée 
1639fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1640eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
1641eed56642SAlex Bennée {
1642eed56642SAlex Bennée     store_helper(env, addr, val, oi, retaddr, 2, true);
1643eed56642SAlex Bennée }
1644eed56642SAlex Bennée 
1645fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1646eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
1647eed56642SAlex Bennée {
1648eed56642SAlex Bennée     store_helper(env, addr, val, oi, retaddr, 4, false);
1649eed56642SAlex Bennée }
1650eed56642SAlex Bennée 
1651fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1652eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
1653eed56642SAlex Bennée {
1654eed56642SAlex Bennée     store_helper(env, addr, val, oi, retaddr, 4, true);
1655eed56642SAlex Bennée }
1656eed56642SAlex Bennée 
1657fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1658eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
1659eed56642SAlex Bennée {
1660eed56642SAlex Bennée     store_helper(env, addr, val, oi, retaddr, 8, false);
1661eed56642SAlex Bennée }
1662eed56642SAlex Bennée 
1663fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1664eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
1665eed56642SAlex Bennée {
1666eed56642SAlex Bennée     store_helper(env, addr, val, oi, retaddr, 8, true);
1667eed56642SAlex Bennée }
1668d9bb58e5SYang Zhong 
1669d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR.  This makes
1670d9bb58e5SYang Zhong    them callable from other helpers.  */
1671d9bb58e5SYang Zhong 
1672d9bb58e5SYang Zhong #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1673d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
1674d9bb58e5SYang Zhong     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
167534d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
167634d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
167734d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP                              \
167834d49937SPeter Maydell     do {                                                \
167934d49937SPeter Maydell         if (unlikely(ndi.active)) {                     \
168034d49937SPeter Maydell             memory_notdirty_write_complete(&ndi);       \
168134d49937SPeter Maydell         }                                               \
168234d49937SPeter Maydell     } while (0)
1683d9bb58e5SYang Zhong 
1684d9bb58e5SYang Zhong #define DATA_SIZE 1
1685d9bb58e5SYang Zhong #include "atomic_template.h"
1686d9bb58e5SYang Zhong 
1687d9bb58e5SYang Zhong #define DATA_SIZE 2
1688d9bb58e5SYang Zhong #include "atomic_template.h"
1689d9bb58e5SYang Zhong 
1690d9bb58e5SYang Zhong #define DATA_SIZE 4
1691d9bb58e5SYang Zhong #include "atomic_template.h"
1692d9bb58e5SYang Zhong 
1693d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1694d9bb58e5SYang Zhong #define DATA_SIZE 8
1695d9bb58e5SYang Zhong #include "atomic_template.h"
1696d9bb58e5SYang Zhong #endif
1697d9bb58e5SYang Zhong 
1698e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1699d9bb58e5SYang Zhong #define DATA_SIZE 16
1700d9bb58e5SYang Zhong #include "atomic_template.h"
1701d9bb58e5SYang Zhong #endif
1702d9bb58e5SYang Zhong 
1703d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers.  */
1704d9bb58e5SYang Zhong 
1705d9bb58e5SYang Zhong #undef EXTRA_ARGS
1706d9bb58e5SYang Zhong #undef ATOMIC_NAME
1707d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP
1708d9bb58e5SYang Zhong #define EXTRA_ARGS         , TCGMemOpIdx oi
1709d9bb58e5SYang Zhong #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
171034d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1711d9bb58e5SYang Zhong 
1712d9bb58e5SYang Zhong #define DATA_SIZE 1
1713d9bb58e5SYang Zhong #include "atomic_template.h"
1714d9bb58e5SYang Zhong 
1715d9bb58e5SYang Zhong #define DATA_SIZE 2
1716d9bb58e5SYang Zhong #include "atomic_template.h"
1717d9bb58e5SYang Zhong 
1718d9bb58e5SYang Zhong #define DATA_SIZE 4
1719d9bb58e5SYang Zhong #include "atomic_template.h"
1720d9bb58e5SYang Zhong 
1721d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
1722d9bb58e5SYang Zhong #define DATA_SIZE 8
1723d9bb58e5SYang Zhong #include "atomic_template.h"
1724d9bb58e5SYang Zhong #endif
1725d9bb58e5SYang Zhong 
1726d9bb58e5SYang Zhong /* Code access functions.  */
1727d9bb58e5SYang Zhong 
17282dd92606SRichard Henderson static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
17292dd92606SRichard Henderson                                TCGMemOpIdx oi, uintptr_t retaddr)
17302dd92606SRichard Henderson {
17312dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 1, false, true,
17322dd92606SRichard Henderson                        full_ldub_cmmu);
17332dd92606SRichard Henderson }
17342dd92606SRichard Henderson 
1735fc1bc777SRichard Henderson uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1736fc1bc777SRichard Henderson                             TCGMemOpIdx oi, uintptr_t retaddr)
1737eed56642SAlex Bennée {
17382dd92606SRichard Henderson     return full_ldub_cmmu(env, addr, oi, retaddr);
17392dd92606SRichard Henderson }
17402dd92606SRichard Henderson 
17412dd92606SRichard Henderson static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
17422dd92606SRichard Henderson                                   TCGMemOpIdx oi, uintptr_t retaddr)
17432dd92606SRichard Henderson {
17442dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 2, false, true,
17452dd92606SRichard Henderson                        full_le_lduw_cmmu);
1746eed56642SAlex Bennée }
1747d9bb58e5SYang Zhong 
1748fc1bc777SRichard Henderson uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1749fc1bc777SRichard Henderson                             TCGMemOpIdx oi, uintptr_t retaddr)
1750eed56642SAlex Bennée {
17512dd92606SRichard Henderson     return full_le_lduw_cmmu(env, addr, oi, retaddr);
17522dd92606SRichard Henderson }
17532dd92606SRichard Henderson 
17542dd92606SRichard Henderson static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
17552dd92606SRichard Henderson                                   TCGMemOpIdx oi, uintptr_t retaddr)
17562dd92606SRichard Henderson {
17572dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 2, true, true,
17582dd92606SRichard Henderson                        full_be_lduw_cmmu);
1759eed56642SAlex Bennée }
1760d9bb58e5SYang Zhong 
1761fc1bc777SRichard Henderson uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1762fc1bc777SRichard Henderson                             TCGMemOpIdx oi, uintptr_t retaddr)
1763eed56642SAlex Bennée {
17642dd92606SRichard Henderson     return full_be_lduw_cmmu(env, addr, oi, retaddr);
17652dd92606SRichard Henderson }
17662dd92606SRichard Henderson 
17672dd92606SRichard Henderson static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
17682dd92606SRichard Henderson                                   TCGMemOpIdx oi, uintptr_t retaddr)
17692dd92606SRichard Henderson {
17702dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 4, false, true,
17712dd92606SRichard Henderson                        full_le_ldul_cmmu);
1772eed56642SAlex Bennée }
1773d9bb58e5SYang Zhong 
1774fc1bc777SRichard Henderson uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1775fc1bc777SRichard Henderson                             TCGMemOpIdx oi, uintptr_t retaddr)
1776eed56642SAlex Bennée {
17772dd92606SRichard Henderson     return full_le_ldul_cmmu(env, addr, oi, retaddr);
17782dd92606SRichard Henderson }
17792dd92606SRichard Henderson 
17802dd92606SRichard Henderson static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
17812dd92606SRichard Henderson                                   TCGMemOpIdx oi, uintptr_t retaddr)
17822dd92606SRichard Henderson {
17832dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 4, true, true,
17842dd92606SRichard Henderson                        full_be_ldul_cmmu);
1785eed56642SAlex Bennée }
1786d9bb58e5SYang Zhong 
1787fc1bc777SRichard Henderson uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1788fc1bc777SRichard Henderson                             TCGMemOpIdx oi, uintptr_t retaddr)
1789eed56642SAlex Bennée {
17902dd92606SRichard Henderson     return full_be_ldul_cmmu(env, addr, oi, retaddr);
1791eed56642SAlex Bennée }
1792eed56642SAlex Bennée 
1793fc1bc777SRichard Henderson uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1794fc1bc777SRichard Henderson                             TCGMemOpIdx oi, uintptr_t retaddr)
1795eed56642SAlex Bennée {
17962dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 8, false, true,
17972dd92606SRichard Henderson                        helper_le_ldq_cmmu);
1798eed56642SAlex Bennée }
1799eed56642SAlex Bennée 
1800fc1bc777SRichard Henderson uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1801fc1bc777SRichard Henderson                             TCGMemOpIdx oi, uintptr_t retaddr)
1802eed56642SAlex Bennée {
18032dd92606SRichard Henderson     return load_helper(env, addr, oi, retaddr, 8, true, true,
18042dd92606SRichard Henderson                        helper_be_ldq_cmmu);
1805eed56642SAlex Bennée }
1806