xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 7b7d00e0a714e0bdcd4c8a76f0927e1c8f1b2121)
1d9bb58e5SYang Zhong /*
2d9bb58e5SYang Zhong  *  Common CPU TLB handling
3d9bb58e5SYang Zhong  *
4d9bb58e5SYang Zhong  *  Copyright (c) 2003 Fabrice Bellard
5d9bb58e5SYang Zhong  *
6d9bb58e5SYang Zhong  * This library is free software; you can redistribute it and/or
7d9bb58e5SYang Zhong  * modify it under the terms of the GNU Lesser General Public
8d9bb58e5SYang Zhong  * License as published by the Free Software Foundation; either
9fb0343d5SThomas Huth  * version 2.1 of the License, or (at your option) any later version.
10d9bb58e5SYang Zhong  *
11d9bb58e5SYang Zhong  * This library is distributed in the hope that it will be useful,
12d9bb58e5SYang Zhong  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13d9bb58e5SYang Zhong  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14d9bb58e5SYang Zhong  * Lesser General Public License for more details.
15d9bb58e5SYang Zhong  *
16d9bb58e5SYang Zhong  * You should have received a copy of the GNU Lesser General Public
17d9bb58e5SYang Zhong  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18d9bb58e5SYang Zhong  */
19d9bb58e5SYang Zhong 
20d9bb58e5SYang Zhong #include "qemu/osdep.h"
21d9bb58e5SYang Zhong #include "qemu/main-loop.h"
22d9bb58e5SYang Zhong #include "cpu.h"
23d9bb58e5SYang Zhong #include "exec/exec-all.h"
24d9bb58e5SYang Zhong #include "exec/memory.h"
25d9bb58e5SYang Zhong #include "exec/address-spaces.h"
26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h"
27d9bb58e5SYang Zhong #include "exec/cputlb.h"
28d9bb58e5SYang Zhong #include "exec/memory-internal.h"
29d9bb58e5SYang Zhong #include "exec/ram_addr.h"
30d9bb58e5SYang Zhong #include "tcg/tcg.h"
31d9bb58e5SYang Zhong #include "qemu/error-report.h"
32d9bb58e5SYang Zhong #include "exec/log.h"
33d9bb58e5SYang Zhong #include "exec/helper-proto.h"
34d9bb58e5SYang Zhong #include "qemu/atomic.h"
35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h"
36707526adSRichard Henderson #include "translate-all.h"
37d03f1408SRichard Henderson #include "trace-root.h"
38d03f1408SRichard Henderson #include "trace/mem.h"
39235537faSAlex Bennée #ifdef CONFIG_PLUGIN
40235537faSAlex Bennée #include "qemu/plugin-memory.h"
41235537faSAlex Bennée #endif
42d9bb58e5SYang Zhong 
43d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
44d9bb58e5SYang Zhong /* #define DEBUG_TLB */
45d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */
46d9bb58e5SYang Zhong 
47d9bb58e5SYang Zhong #ifdef DEBUG_TLB
48d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1
49d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG
50d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 1
51d9bb58e5SYang Zhong # else
52d9bb58e5SYang Zhong #  define DEBUG_TLB_LOG_GATE 0
53d9bb58e5SYang Zhong # endif
54d9bb58e5SYang Zhong #else
55d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0
56d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0
57d9bb58e5SYang Zhong #endif
58d9bb58e5SYang Zhong 
59d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \
60d9bb58e5SYang Zhong     if (DEBUG_TLB_LOG_GATE) { \
61d9bb58e5SYang Zhong         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
62d9bb58e5SYang Zhong                       ## __VA_ARGS__); \
63d9bb58e5SYang Zhong     } else if (DEBUG_TLB_GATE) { \
64d9bb58e5SYang Zhong         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
65d9bb58e5SYang Zhong     } \
66d9bb58e5SYang Zhong } while (0)
67d9bb58e5SYang Zhong 
68ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do {                              \
69d9bb58e5SYang Zhong         if (DEBUG_TLB_GATE) {                                     \
70ea9025cbSEmilio G. Cota             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
71d9bb58e5SYang Zhong         }                                                         \
72d9bb58e5SYang Zhong     } while (0)
73d9bb58e5SYang Zhong 
74d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a
75d9bb58e5SYang Zhong  * target_ulong even on 32 bit builds */
76d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
77d9bb58e5SYang Zhong 
78d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
79d9bb58e5SYang Zhong  */
80d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
81d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
82d9bb58e5SYang Zhong 
8386e1eff8SEmilio G. Cota static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
8486e1eff8SEmilio G. Cota {
85a40ec84eSRichard Henderson     return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
8686e1eff8SEmilio G. Cota }
8786e1eff8SEmilio G. Cota 
8879e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
8986e1eff8SEmilio G. Cota                              size_t max_entries)
9086e1eff8SEmilio G. Cota {
9179e42085SRichard Henderson     desc->window_begin_ns = ns;
9279e42085SRichard Henderson     desc->window_max_entries = max_entries;
9386e1eff8SEmilio G. Cota }
9486e1eff8SEmilio G. Cota 
9586e1eff8SEmilio G. Cota static void tlb_dyn_init(CPUArchState *env)
9686e1eff8SEmilio G. Cota {
9786e1eff8SEmilio G. Cota     int i;
9886e1eff8SEmilio G. Cota 
9986e1eff8SEmilio G. Cota     for (i = 0; i < NB_MMU_MODES; i++) {
100a40ec84eSRichard Henderson         CPUTLBDesc *desc = &env_tlb(env)->d[i];
10186e1eff8SEmilio G. Cota         size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
10286e1eff8SEmilio G. Cota 
10379e42085SRichard Henderson         tlb_window_reset(desc, get_clock_realtime(), 0);
10486e1eff8SEmilio G. Cota         desc->n_used_entries = 0;
105a40ec84eSRichard Henderson         env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
106a40ec84eSRichard Henderson         env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
107a40ec84eSRichard Henderson         env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
10886e1eff8SEmilio G. Cota     }
10986e1eff8SEmilio G. Cota }
11086e1eff8SEmilio G. Cota 
11186e1eff8SEmilio G. Cota /**
11286e1eff8SEmilio G. Cota  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
11386e1eff8SEmilio G. Cota  * @env: CPU that owns the TLB
11486e1eff8SEmilio G. Cota  * @mmu_idx: MMU index of the TLB
11586e1eff8SEmilio G. Cota  *
11686e1eff8SEmilio G. Cota  * Called with tlb_lock_held.
11786e1eff8SEmilio G. Cota  *
11886e1eff8SEmilio G. Cota  * We have two main constraints when resizing a TLB: (1) we only resize it
11986e1eff8SEmilio G. Cota  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
12086e1eff8SEmilio G. Cota  * the array or unnecessarily flushing it), which means we do not control how
12186e1eff8SEmilio G. Cota  * frequently the resizing can occur; (2) we don't have access to the guest's
12286e1eff8SEmilio G. Cota  * future scheduling decisions, and therefore have to decide the magnitude of
12386e1eff8SEmilio G. Cota  * the resize based on past observations.
12486e1eff8SEmilio G. Cota  *
12586e1eff8SEmilio G. Cota  * In general, a memory-hungry process can benefit greatly from an appropriately
12686e1eff8SEmilio G. Cota  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
12786e1eff8SEmilio G. Cota  * we just have to make the TLB as large as possible; while an oversized TLB
12886e1eff8SEmilio G. Cota  * results in minimal TLB miss rates, it also takes longer to be flushed
12986e1eff8SEmilio G. Cota  * (flushes can be _very_ frequent), and the reduced locality can also hurt
13086e1eff8SEmilio G. Cota  * performance.
13186e1eff8SEmilio G. Cota  *
13286e1eff8SEmilio G. Cota  * To achieve near-optimal performance for all kinds of workloads, we:
13386e1eff8SEmilio G. Cota  *
13486e1eff8SEmilio G. Cota  * 1. Aggressively increase the size of the TLB when the use rate of the
13586e1eff8SEmilio G. Cota  * TLB being flushed is high, since it is likely that in the near future this
13686e1eff8SEmilio G. Cota  * memory-hungry process will execute again, and its memory hungriness will
13786e1eff8SEmilio G. Cota  * probably be similar.
13886e1eff8SEmilio G. Cota  *
13986e1eff8SEmilio G. Cota  * 2. Slowly reduce the size of the TLB as the use rate declines over a
14086e1eff8SEmilio G. Cota  * reasonably large time window. The rationale is that if in such a time window
14186e1eff8SEmilio G. Cota  * we have not observed a high TLB use rate, it is likely that we won't observe
14286e1eff8SEmilio G. Cota  * it in the near future. In that case, once a time window expires we downsize
14386e1eff8SEmilio G. Cota  * the TLB to match the maximum use rate observed in the window.
14486e1eff8SEmilio G. Cota  *
14586e1eff8SEmilio G. Cota  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
14686e1eff8SEmilio G. Cota  * since in that range performance is likely near-optimal. Recall that the TLB
14786e1eff8SEmilio G. Cota  * is direct mapped, so we want the use rate to be low (or at least not too
14886e1eff8SEmilio G. Cota  * high), since otherwise we are likely to have a significant amount of
14986e1eff8SEmilio G. Cota  * conflict misses.
15086e1eff8SEmilio G. Cota  */
15186e1eff8SEmilio G. Cota static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
15286e1eff8SEmilio G. Cota {
153a40ec84eSRichard Henderson     CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
15486e1eff8SEmilio G. Cota     size_t old_size = tlb_n_entries(env, mmu_idx);
15586e1eff8SEmilio G. Cota     size_t rate;
15686e1eff8SEmilio G. Cota     size_t new_size = old_size;
15786e1eff8SEmilio G. Cota     int64_t now = get_clock_realtime();
15886e1eff8SEmilio G. Cota     int64_t window_len_ms = 100;
15986e1eff8SEmilio G. Cota     int64_t window_len_ns = window_len_ms * 1000 * 1000;
16079e42085SRichard Henderson     bool window_expired = now > desc->window_begin_ns + window_len_ns;
16186e1eff8SEmilio G. Cota 
16279e42085SRichard Henderson     if (desc->n_used_entries > desc->window_max_entries) {
16379e42085SRichard Henderson         desc->window_max_entries = desc->n_used_entries;
16486e1eff8SEmilio G. Cota     }
16579e42085SRichard Henderson     rate = desc->window_max_entries * 100 / old_size;
16686e1eff8SEmilio G. Cota 
16786e1eff8SEmilio G. Cota     if (rate > 70) {
16886e1eff8SEmilio G. Cota         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
16986e1eff8SEmilio G. Cota     } else if (rate < 30 && window_expired) {
17079e42085SRichard Henderson         size_t ceil = pow2ceil(desc->window_max_entries);
17179e42085SRichard Henderson         size_t expected_rate = desc->window_max_entries * 100 / ceil;
17286e1eff8SEmilio G. Cota 
17386e1eff8SEmilio G. Cota         /*
17486e1eff8SEmilio G. Cota          * Avoid undersizing when the max number of entries seen is just below
17586e1eff8SEmilio G. Cota          * a pow2. For instance, if max_entries == 1025, the expected use rate
17686e1eff8SEmilio G. Cota          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
17786e1eff8SEmilio G. Cota          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
17886e1eff8SEmilio G. Cota          * later. Thus, make sure that the expected use rate remains below 70%.
17986e1eff8SEmilio G. Cota          * (and since we double the size, that means the lowest rate we'd
18086e1eff8SEmilio G. Cota          * expect to get is 35%, which is still in the 30-70% range where
18186e1eff8SEmilio G. Cota          * we consider that the size is appropriate.)
18286e1eff8SEmilio G. Cota          */
18386e1eff8SEmilio G. Cota         if (expected_rate > 70) {
18486e1eff8SEmilio G. Cota             ceil *= 2;
18586e1eff8SEmilio G. Cota         }
18686e1eff8SEmilio G. Cota         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
18786e1eff8SEmilio G. Cota     }
18886e1eff8SEmilio G. Cota 
18986e1eff8SEmilio G. Cota     if (new_size == old_size) {
19086e1eff8SEmilio G. Cota         if (window_expired) {
19179e42085SRichard Henderson             tlb_window_reset(desc, now, desc->n_used_entries);
19286e1eff8SEmilio G. Cota         }
19386e1eff8SEmilio G. Cota         return;
19486e1eff8SEmilio G. Cota     }
19586e1eff8SEmilio G. Cota 
196a40ec84eSRichard Henderson     g_free(env_tlb(env)->f[mmu_idx].table);
197a40ec84eSRichard Henderson     g_free(env_tlb(env)->d[mmu_idx].iotlb);
19886e1eff8SEmilio G. Cota 
19979e42085SRichard Henderson     tlb_window_reset(desc, now, 0);
20086e1eff8SEmilio G. Cota     /* desc->n_used_entries is cleared by the caller */
201a40ec84eSRichard Henderson     env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
202a40ec84eSRichard Henderson     env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
203a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
20486e1eff8SEmilio G. Cota     /*
20586e1eff8SEmilio G. Cota      * If the allocations fail, try smaller sizes. We just freed some
20686e1eff8SEmilio G. Cota      * memory, so going back to half of new_size has a good chance of working.
20786e1eff8SEmilio G. Cota      * Increased memory pressure elsewhere in the system might cause the
20886e1eff8SEmilio G. Cota      * allocations to fail though, so we progressively reduce the allocation
20986e1eff8SEmilio G. Cota      * size, aborting if we cannot even allocate the smallest TLB we support.
21086e1eff8SEmilio G. Cota      */
211a40ec84eSRichard Henderson     while (env_tlb(env)->f[mmu_idx].table == NULL ||
212a40ec84eSRichard Henderson            env_tlb(env)->d[mmu_idx].iotlb == NULL) {
21386e1eff8SEmilio G. Cota         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
21486e1eff8SEmilio G. Cota             error_report("%s: %s", __func__, strerror(errno));
21586e1eff8SEmilio G. Cota             abort();
21686e1eff8SEmilio G. Cota         }
21786e1eff8SEmilio G. Cota         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
218a40ec84eSRichard Henderson         env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
21986e1eff8SEmilio G. Cota 
220a40ec84eSRichard Henderson         g_free(env_tlb(env)->f[mmu_idx].table);
221a40ec84eSRichard Henderson         g_free(env_tlb(env)->d[mmu_idx].iotlb);
222a40ec84eSRichard Henderson         env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
223a40ec84eSRichard Henderson         env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
22486e1eff8SEmilio G. Cota     }
22586e1eff8SEmilio G. Cota }
22686e1eff8SEmilio G. Cota 
22786e1eff8SEmilio G. Cota static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
22886e1eff8SEmilio G. Cota {
22986e1eff8SEmilio G. Cota     tlb_mmu_resize_locked(env, mmu_idx);
230a40ec84eSRichard Henderson     memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
231a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].n_used_entries = 0;
23286e1eff8SEmilio G. Cota }
23386e1eff8SEmilio G. Cota 
23486e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
23586e1eff8SEmilio G. Cota {
236a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].n_used_entries++;
23786e1eff8SEmilio G. Cota }
23886e1eff8SEmilio G. Cota 
23986e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
24086e1eff8SEmilio G. Cota {
241a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].n_used_entries--;
24286e1eff8SEmilio G. Cota }
24386e1eff8SEmilio G. Cota 
2445005e253SEmilio G. Cota void tlb_init(CPUState *cpu)
2455005e253SEmilio G. Cota {
24671aec354SEmilio G. Cota     CPUArchState *env = cpu->env_ptr;
24771aec354SEmilio G. Cota 
248a40ec84eSRichard Henderson     qemu_spin_init(&env_tlb(env)->c.lock);
2493d1523ceSRichard Henderson 
2503d1523ceSRichard Henderson     /* Ensure that cpu_reset performs a full flush.  */
251a40ec84eSRichard Henderson     env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
25286e1eff8SEmilio G. Cota 
25386e1eff8SEmilio G. Cota     tlb_dyn_init(env);
2545005e253SEmilio G. Cota }
2555005e253SEmilio G. Cota 
256d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus
257d9bb58e5SYang Zhong  *
258d9bb58e5SYang Zhong  * If the wait flag is set then the src cpu's helper will be queued as
259d9bb58e5SYang Zhong  * "safe" work and the loop exited creating a synchronisation point
260d9bb58e5SYang Zhong  * where all queued work will be finished before execution starts
261d9bb58e5SYang Zhong  * again.
262d9bb58e5SYang Zhong  */
263d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
264d9bb58e5SYang Zhong                              run_on_cpu_data d)
265d9bb58e5SYang Zhong {
266d9bb58e5SYang Zhong     CPUState *cpu;
267d9bb58e5SYang Zhong 
268d9bb58e5SYang Zhong     CPU_FOREACH(cpu) {
269d9bb58e5SYang Zhong         if (cpu != src) {
270d9bb58e5SYang Zhong             async_run_on_cpu(cpu, fn, d);
271d9bb58e5SYang Zhong         }
272d9bb58e5SYang Zhong     }
273d9bb58e5SYang Zhong }
274d9bb58e5SYang Zhong 
275e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
27683974cf4SEmilio G. Cota {
27783974cf4SEmilio G. Cota     CPUState *cpu;
278e09de0a2SRichard Henderson     size_t full = 0, part = 0, elide = 0;
27983974cf4SEmilio G. Cota 
28083974cf4SEmilio G. Cota     CPU_FOREACH(cpu) {
28183974cf4SEmilio G. Cota         CPUArchState *env = cpu->env_ptr;
28283974cf4SEmilio G. Cota 
283a40ec84eSRichard Henderson         full += atomic_read(&env_tlb(env)->c.full_flush_count);
284a40ec84eSRichard Henderson         part += atomic_read(&env_tlb(env)->c.part_flush_count);
285a40ec84eSRichard Henderson         elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
28683974cf4SEmilio G. Cota     }
287e09de0a2SRichard Henderson     *pfull = full;
288e09de0a2SRichard Henderson     *ppart = part;
289e09de0a2SRichard Henderson     *pelide = elide;
29083974cf4SEmilio G. Cota }
291d9bb58e5SYang Zhong 
2921308e026SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
2931308e026SRichard Henderson {
29486e1eff8SEmilio G. Cota     tlb_table_flush_by_mmuidx(env, mmu_idx);
295a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_addr = -1;
296a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_mask = -1;
297a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].vindex = 0;
298a40ec84eSRichard Henderson     memset(env_tlb(env)->d[mmu_idx].vtable, -1,
299a40ec84eSRichard Henderson            sizeof(env_tlb(env)->d[0].vtable));
3001308e026SRichard Henderson }
3011308e026SRichard Henderson 
302d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
303d9bb58e5SYang Zhong {
304d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
3053d1523ceSRichard Henderson     uint16_t asked = data.host_int;
3063d1523ceSRichard Henderson     uint16_t all_dirty, work, to_clean;
307d9bb58e5SYang Zhong 
308d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
309d9bb58e5SYang Zhong 
3103d1523ceSRichard Henderson     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
311d9bb58e5SYang Zhong 
312a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
31360a2ad7dSRichard Henderson 
314a40ec84eSRichard Henderson     all_dirty = env_tlb(env)->c.dirty;
3153d1523ceSRichard Henderson     to_clean = asked & all_dirty;
3163d1523ceSRichard Henderson     all_dirty &= ~to_clean;
317a40ec84eSRichard Henderson     env_tlb(env)->c.dirty = all_dirty;
3183d1523ceSRichard Henderson 
3193d1523ceSRichard Henderson     for (work = to_clean; work != 0; work &= work - 1) {
3203d1523ceSRichard Henderson         int mmu_idx = ctz32(work);
3211308e026SRichard Henderson         tlb_flush_one_mmuidx_locked(env, mmu_idx);
322d9bb58e5SYang Zhong     }
3233d1523ceSRichard Henderson 
324a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
325d9bb58e5SYang Zhong 
326f3ced3c5SEmilio G. Cota     cpu_tb_jmp_cache_clear(cpu);
32764f2674bSRichard Henderson 
3283d1523ceSRichard Henderson     if (to_clean == ALL_MMUIDX_BITS) {
329a40ec84eSRichard Henderson         atomic_set(&env_tlb(env)->c.full_flush_count,
330a40ec84eSRichard Henderson                    env_tlb(env)->c.full_flush_count + 1);
331e09de0a2SRichard Henderson     } else {
332a40ec84eSRichard Henderson         atomic_set(&env_tlb(env)->c.part_flush_count,
333a40ec84eSRichard Henderson                    env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
3343d1523ceSRichard Henderson         if (to_clean != asked) {
335a40ec84eSRichard Henderson             atomic_set(&env_tlb(env)->c.elide_flush_count,
336a40ec84eSRichard Henderson                        env_tlb(env)->c.elide_flush_count +
3373d1523ceSRichard Henderson                        ctpop16(asked & ~to_clean));
3383d1523ceSRichard Henderson         }
33964f2674bSRichard Henderson     }
340d9bb58e5SYang Zhong }
341d9bb58e5SYang Zhong 
342d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
343d9bb58e5SYang Zhong {
344d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
345d9bb58e5SYang Zhong 
34664f2674bSRichard Henderson     if (cpu->created && !qemu_cpu_is_self(cpu)) {
347d9bb58e5SYang Zhong         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
348ab651105SRichard Henderson                          RUN_ON_CPU_HOST_INT(idxmap));
349d9bb58e5SYang Zhong     } else {
35060a2ad7dSRichard Henderson         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
351d9bb58e5SYang Zhong     }
352d9bb58e5SYang Zhong }
353d9bb58e5SYang Zhong 
35464f2674bSRichard Henderson void tlb_flush(CPUState *cpu)
35564f2674bSRichard Henderson {
35664f2674bSRichard Henderson     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
35764f2674bSRichard Henderson }
35864f2674bSRichard Henderson 
359d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
360d9bb58e5SYang Zhong {
361d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
362d9bb58e5SYang Zhong 
363d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
364d9bb58e5SYang Zhong 
365d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
366d9bb58e5SYang Zhong     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
367d9bb58e5SYang Zhong }
368d9bb58e5SYang Zhong 
36964f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu)
37064f2674bSRichard Henderson {
37164f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
37264f2674bSRichard Henderson }
37364f2674bSRichard Henderson 
37464f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
375d9bb58e5SYang Zhong {
376d9bb58e5SYang Zhong     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
377d9bb58e5SYang Zhong 
378d9bb58e5SYang Zhong     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
379d9bb58e5SYang Zhong 
380d9bb58e5SYang Zhong     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
381d9bb58e5SYang Zhong     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
382d9bb58e5SYang Zhong }
383d9bb58e5SYang Zhong 
38464f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu)
38564f2674bSRichard Henderson {
38664f2674bSRichard Henderson     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
38764f2674bSRichard Henderson }
38864f2674bSRichard Henderson 
38968fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
39068fea038SRichard Henderson                                         target_ulong page)
391d9bb58e5SYang Zhong {
39268fea038SRichard Henderson     return tlb_hit_page(tlb_entry->addr_read, page) ||
393403f290cSEmilio G. Cota            tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
39468fea038SRichard Henderson            tlb_hit_page(tlb_entry->addr_code, page);
39568fea038SRichard Henderson }
39668fea038SRichard Henderson 
3973cea94bbSEmilio G. Cota /**
3983cea94bbSEmilio G. Cota  * tlb_entry_is_empty - return true if the entry is not in use
3993cea94bbSEmilio G. Cota  * @te: pointer to CPUTLBEntry
4003cea94bbSEmilio G. Cota  */
4013cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
4023cea94bbSEmilio G. Cota {
4033cea94bbSEmilio G. Cota     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
4043cea94bbSEmilio G. Cota }
4053cea94bbSEmilio G. Cota 
40653d28455SRichard Henderson /* Called with tlb_c.lock held */
40786e1eff8SEmilio G. Cota static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
40871aec354SEmilio G. Cota                                           target_ulong page)
40968fea038SRichard Henderson {
41068fea038SRichard Henderson     if (tlb_hit_page_anyprot(tlb_entry, page)) {
411d9bb58e5SYang Zhong         memset(tlb_entry, -1, sizeof(*tlb_entry));
41286e1eff8SEmilio G. Cota         return true;
413d9bb58e5SYang Zhong     }
41486e1eff8SEmilio G. Cota     return false;
415d9bb58e5SYang Zhong }
416d9bb58e5SYang Zhong 
41753d28455SRichard Henderson /* Called with tlb_c.lock held */
41871aec354SEmilio G. Cota static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
41968fea038SRichard Henderson                                               target_ulong page)
42068fea038SRichard Henderson {
421a40ec84eSRichard Henderson     CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
42268fea038SRichard Henderson     int k;
42371aec354SEmilio G. Cota 
42429a0af61SRichard Henderson     assert_cpu_is_self(env_cpu(env));
42568fea038SRichard Henderson     for (k = 0; k < CPU_VTLB_SIZE; k++) {
426a40ec84eSRichard Henderson         if (tlb_flush_entry_locked(&d->vtable[k], page)) {
42786e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, mmu_idx);
42886e1eff8SEmilio G. Cota         }
42968fea038SRichard Henderson     }
43068fea038SRichard Henderson }
43168fea038SRichard Henderson 
4321308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx,
4331308e026SRichard Henderson                                   target_ulong page)
4341308e026SRichard Henderson {
435a40ec84eSRichard Henderson     target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
436a40ec84eSRichard Henderson     target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
4371308e026SRichard Henderson 
4381308e026SRichard Henderson     /* Check if we need to flush due to large pages.  */
4391308e026SRichard Henderson     if ((page & lp_mask) == lp_addr) {
4401308e026SRichard Henderson         tlb_debug("forcing full flush midx %d ("
4411308e026SRichard Henderson                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
4421308e026SRichard Henderson                   midx, lp_addr, lp_mask);
4431308e026SRichard Henderson         tlb_flush_one_mmuidx_locked(env, midx);
4441308e026SRichard Henderson     } else {
44586e1eff8SEmilio G. Cota         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
44686e1eff8SEmilio G. Cota             tlb_n_used_entries_dec(env, midx);
44786e1eff8SEmilio G. Cota         }
4481308e026SRichard Henderson         tlb_flush_vtlb_page_locked(env, midx, page);
4491308e026SRichard Henderson     }
4501308e026SRichard Henderson }
4511308e026SRichard Henderson 
452*7b7d00e0SRichard Henderson /**
453*7b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_0:
454*7b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
455*7b7d00e0SRichard Henderson  * @addr: page of virtual address to flush
456*7b7d00e0SRichard Henderson  * @idxmap: set of mmu_idx to flush
457*7b7d00e0SRichard Henderson  *
458*7b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
459*7b7d00e0SRichard Henderson  * at @addr from the tlbs indicated by @idxmap from @cpu.
460d9bb58e5SYang Zhong  */
461*7b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
462*7b7d00e0SRichard Henderson                                              target_ulong addr,
463*7b7d00e0SRichard Henderson                                              uint16_t idxmap)
464d9bb58e5SYang Zhong {
465d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
466d9bb58e5SYang Zhong     int mmu_idx;
467d9bb58e5SYang Zhong 
468d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
469d9bb58e5SYang Zhong 
470*7b7d00e0SRichard Henderson     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
471d9bb58e5SYang Zhong 
472a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
473d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
474*7b7d00e0SRichard Henderson         if ((idxmap >> mmu_idx) & 1) {
4751308e026SRichard Henderson             tlb_flush_page_locked(env, mmu_idx, addr);
476d9bb58e5SYang Zhong         }
477d9bb58e5SYang Zhong     }
478a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
479d9bb58e5SYang Zhong 
480d9bb58e5SYang Zhong     tb_flush_jmp_cache(cpu, addr);
481d9bb58e5SYang Zhong }
482d9bb58e5SYang Zhong 
483*7b7d00e0SRichard Henderson /**
484*7b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_1:
485*7b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
486*7b7d00e0SRichard Henderson  * @data: encoded addr + idxmap
487*7b7d00e0SRichard Henderson  *
488*7b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
489*7b7d00e0SRichard Henderson  * async_run_on_cpu.  The idxmap parameter is encoded in the page
490*7b7d00e0SRichard Henderson  * offset of the target_ptr field.  This limits the set of mmu_idx
491*7b7d00e0SRichard Henderson  * that can be passed via this method.
492*7b7d00e0SRichard Henderson  */
493*7b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
494*7b7d00e0SRichard Henderson                                              run_on_cpu_data data)
495*7b7d00e0SRichard Henderson {
496*7b7d00e0SRichard Henderson     target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
497*7b7d00e0SRichard Henderson     target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
498*7b7d00e0SRichard Henderson     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
499*7b7d00e0SRichard Henderson 
500*7b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
501*7b7d00e0SRichard Henderson }
502*7b7d00e0SRichard Henderson 
503*7b7d00e0SRichard Henderson typedef struct {
504*7b7d00e0SRichard Henderson     target_ulong addr;
505*7b7d00e0SRichard Henderson     uint16_t idxmap;
506*7b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData;
507*7b7d00e0SRichard Henderson 
508*7b7d00e0SRichard Henderson /**
509*7b7d00e0SRichard Henderson  * tlb_flush_page_by_mmuidx_async_2:
510*7b7d00e0SRichard Henderson  * @cpu: cpu on which to flush
511*7b7d00e0SRichard Henderson  * @data: allocated addr + idxmap
512*7b7d00e0SRichard Henderson  *
513*7b7d00e0SRichard Henderson  * Helper for tlb_flush_page_by_mmuidx and friends, called through
514*7b7d00e0SRichard Henderson  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
515*7b7d00e0SRichard Henderson  * TLBFlushPageByMMUIdxData structure that has been allocated
516*7b7d00e0SRichard Henderson  * specifically for this helper.  Free the structure when done.
517*7b7d00e0SRichard Henderson  */
518*7b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
519*7b7d00e0SRichard Henderson                                              run_on_cpu_data data)
520*7b7d00e0SRichard Henderson {
521*7b7d00e0SRichard Henderson     TLBFlushPageByMMUIdxData *d = data.host_ptr;
522*7b7d00e0SRichard Henderson 
523*7b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
524*7b7d00e0SRichard Henderson     g_free(d);
525*7b7d00e0SRichard Henderson }
526*7b7d00e0SRichard Henderson 
527d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
528d9bb58e5SYang Zhong {
529d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
530d9bb58e5SYang Zhong 
531d9bb58e5SYang Zhong     /* This should already be page aligned */
532*7b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
533d9bb58e5SYang Zhong 
534*7b7d00e0SRichard Henderson     if (qemu_cpu_is_self(cpu)) {
535*7b7d00e0SRichard Henderson         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
536*7b7d00e0SRichard Henderson     } else if (idxmap < TARGET_PAGE_SIZE) {
537*7b7d00e0SRichard Henderson         /*
538*7b7d00e0SRichard Henderson          * Most targets have only a few mmu_idx.  In the case where
539*7b7d00e0SRichard Henderson          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
540*7b7d00e0SRichard Henderson          * allocating memory for this operation.
541*7b7d00e0SRichard Henderson          */
542*7b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
543*7b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
544d9bb58e5SYang Zhong     } else {
545*7b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
546*7b7d00e0SRichard Henderson 
547*7b7d00e0SRichard Henderson         /* Otherwise allocate a structure, freed by the worker.  */
548*7b7d00e0SRichard Henderson         d->addr = addr;
549*7b7d00e0SRichard Henderson         d->idxmap = idxmap;
550*7b7d00e0SRichard Henderson         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
551*7b7d00e0SRichard Henderson                          RUN_ON_CPU_HOST_PTR(d));
552d9bb58e5SYang Zhong     }
553d9bb58e5SYang Zhong }
554d9bb58e5SYang Zhong 
555f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr)
556f8144c6cSRichard Henderson {
557f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
558f8144c6cSRichard Henderson }
559f8144c6cSRichard Henderson 
560d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
561d9bb58e5SYang Zhong                                        uint16_t idxmap)
562d9bb58e5SYang Zhong {
563d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
564d9bb58e5SYang Zhong 
565d9bb58e5SYang Zhong     /* This should already be page aligned */
566*7b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
567d9bb58e5SYang Zhong 
568*7b7d00e0SRichard Henderson     /*
569*7b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
570*7b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
571*7b7d00e0SRichard Henderson      */
572*7b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
573*7b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
574*7b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
575*7b7d00e0SRichard Henderson     } else {
576*7b7d00e0SRichard Henderson         CPUState *dst_cpu;
577*7b7d00e0SRichard Henderson 
578*7b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
579*7b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
580*7b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
581*7b7d00e0SRichard Henderson                 TLBFlushPageByMMUIdxData *d
582*7b7d00e0SRichard Henderson                     = g_new(TLBFlushPageByMMUIdxData, 1);
583*7b7d00e0SRichard Henderson 
584*7b7d00e0SRichard Henderson                 d->addr = addr;
585*7b7d00e0SRichard Henderson                 d->idxmap = idxmap;
586*7b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
587*7b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
588*7b7d00e0SRichard Henderson             }
589*7b7d00e0SRichard Henderson         }
590*7b7d00e0SRichard Henderson     }
591*7b7d00e0SRichard Henderson 
592*7b7d00e0SRichard Henderson     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
593d9bb58e5SYang Zhong }
594d9bb58e5SYang Zhong 
595f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
596f8144c6cSRichard Henderson {
597f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
598f8144c6cSRichard Henderson }
599f8144c6cSRichard Henderson 
600d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
601d9bb58e5SYang Zhong                                               target_ulong addr,
602d9bb58e5SYang Zhong                                               uint16_t idxmap)
603d9bb58e5SYang Zhong {
604d9bb58e5SYang Zhong     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
605d9bb58e5SYang Zhong 
606d9bb58e5SYang Zhong     /* This should already be page aligned */
607*7b7d00e0SRichard Henderson     addr &= TARGET_PAGE_MASK;
608d9bb58e5SYang Zhong 
609*7b7d00e0SRichard Henderson     /*
610*7b7d00e0SRichard Henderson      * Allocate memory to hold addr+idxmap only when needed.
611*7b7d00e0SRichard Henderson      * See tlb_flush_page_by_mmuidx for details.
612*7b7d00e0SRichard Henderson      */
613*7b7d00e0SRichard Henderson     if (idxmap < TARGET_PAGE_SIZE) {
614*7b7d00e0SRichard Henderson         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
615*7b7d00e0SRichard Henderson                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
616*7b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
617*7b7d00e0SRichard Henderson                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
618*7b7d00e0SRichard Henderson     } else {
619*7b7d00e0SRichard Henderson         CPUState *dst_cpu;
620*7b7d00e0SRichard Henderson         TLBFlushPageByMMUIdxData *d;
621*7b7d00e0SRichard Henderson 
622*7b7d00e0SRichard Henderson         /* Allocate a separate data block for each destination cpu.  */
623*7b7d00e0SRichard Henderson         CPU_FOREACH(dst_cpu) {
624*7b7d00e0SRichard Henderson             if (dst_cpu != src_cpu) {
625*7b7d00e0SRichard Henderson                 d = g_new(TLBFlushPageByMMUIdxData, 1);
626*7b7d00e0SRichard Henderson                 d->addr = addr;
627*7b7d00e0SRichard Henderson                 d->idxmap = idxmap;
628*7b7d00e0SRichard Henderson                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
629*7b7d00e0SRichard Henderson                                  RUN_ON_CPU_HOST_PTR(d));
630*7b7d00e0SRichard Henderson             }
631*7b7d00e0SRichard Henderson         }
632*7b7d00e0SRichard Henderson 
633*7b7d00e0SRichard Henderson         d = g_new(TLBFlushPageByMMUIdxData, 1);
634*7b7d00e0SRichard Henderson         d->addr = addr;
635*7b7d00e0SRichard Henderson         d->idxmap = idxmap;
636*7b7d00e0SRichard Henderson         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
637*7b7d00e0SRichard Henderson                               RUN_ON_CPU_HOST_PTR(d));
638*7b7d00e0SRichard Henderson     }
639d9bb58e5SYang Zhong }
640d9bb58e5SYang Zhong 
641f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
642d9bb58e5SYang Zhong {
643f8144c6cSRichard Henderson     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
644d9bb58e5SYang Zhong }
645d9bb58e5SYang Zhong 
646d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr'
647d9bb58e5SYang Zhong    can be detected */
648d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr)
649d9bb58e5SYang Zhong {
650d9bb58e5SYang Zhong     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
651d9bb58e5SYang Zhong                                              DIRTY_MEMORY_CODE);
652d9bb58e5SYang Zhong }
653d9bb58e5SYang Zhong 
654d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer
655d9bb58e5SYang Zhong    tested for self modifying code */
656d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr)
657d9bb58e5SYang Zhong {
658d9bb58e5SYang Zhong     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
659d9bb58e5SYang Zhong }
660d9bb58e5SYang Zhong 
661d9bb58e5SYang Zhong 
662d9bb58e5SYang Zhong /*
663d9bb58e5SYang Zhong  * Dirty write flag handling
664d9bb58e5SYang Zhong  *
665d9bb58e5SYang Zhong  * When the TCG code writes to a location it looks up the address in
666d9bb58e5SYang Zhong  * the TLB and uses that data to compute the final address. If any of
667d9bb58e5SYang Zhong  * the lower bits of the address are set then the slow path is forced.
668d9bb58e5SYang Zhong  * There are a number of reasons to do this but for normal RAM the
669d9bb58e5SYang Zhong  * most usual is detecting writes to code regions which may invalidate
670d9bb58e5SYang Zhong  * generated code.
671d9bb58e5SYang Zhong  *
67271aec354SEmilio G. Cota  * Other vCPUs might be reading their TLBs during guest execution, so we update
67371aec354SEmilio G. Cota  * te->addr_write with atomic_set. We don't need to worry about this for
67471aec354SEmilio G. Cota  * oversized guests as MTTCG is disabled for them.
675d9bb58e5SYang Zhong  *
67653d28455SRichard Henderson  * Called with tlb_c.lock held.
677d9bb58e5SYang Zhong  */
67871aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
67971aec354SEmilio G. Cota                                          uintptr_t start, uintptr_t length)
680d9bb58e5SYang Zhong {
681d9bb58e5SYang Zhong     uintptr_t addr = tlb_entry->addr_write;
682d9bb58e5SYang Zhong 
6837b0d792cSRichard Henderson     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
6847b0d792cSRichard Henderson                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
685d9bb58e5SYang Zhong         addr &= TARGET_PAGE_MASK;
686d9bb58e5SYang Zhong         addr += tlb_entry->addend;
687d9bb58e5SYang Zhong         if ((addr - start) < length) {
688d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST
68971aec354SEmilio G. Cota             tlb_entry->addr_write |= TLB_NOTDIRTY;
690d9bb58e5SYang Zhong #else
69171aec354SEmilio G. Cota             atomic_set(&tlb_entry->addr_write,
69271aec354SEmilio G. Cota                        tlb_entry->addr_write | TLB_NOTDIRTY);
693d9bb58e5SYang Zhong #endif
694d9bb58e5SYang Zhong         }
69571aec354SEmilio G. Cota     }
69671aec354SEmilio G. Cota }
69771aec354SEmilio G. Cota 
69871aec354SEmilio G. Cota /*
69953d28455SRichard Henderson  * Called with tlb_c.lock held.
70071aec354SEmilio G. Cota  * Called only from the vCPU context, i.e. the TLB's owner thread.
70171aec354SEmilio G. Cota  */
70271aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
70371aec354SEmilio G. Cota {
70471aec354SEmilio G. Cota     *d = *s;
70571aec354SEmilio G. Cota }
706d9bb58e5SYang Zhong 
707d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
70871aec354SEmilio G. Cota  * the target vCPU).
70953d28455SRichard Henderson  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
71071aec354SEmilio G. Cota  * thing actually updated is the target TLB entry ->addr_write flags.
711d9bb58e5SYang Zhong  */
712d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
713d9bb58e5SYang Zhong {
714d9bb58e5SYang Zhong     CPUArchState *env;
715d9bb58e5SYang Zhong 
716d9bb58e5SYang Zhong     int mmu_idx;
717d9bb58e5SYang Zhong 
718d9bb58e5SYang Zhong     env = cpu->env_ptr;
719a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
720d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
721d9bb58e5SYang Zhong         unsigned int i;
72286e1eff8SEmilio G. Cota         unsigned int n = tlb_n_entries(env, mmu_idx);
723d9bb58e5SYang Zhong 
72486e1eff8SEmilio G. Cota         for (i = 0; i < n; i++) {
725a40ec84eSRichard Henderson             tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
726a40ec84eSRichard Henderson                                          start1, length);
727d9bb58e5SYang Zhong         }
728d9bb58e5SYang Zhong 
729d9bb58e5SYang Zhong         for (i = 0; i < CPU_VTLB_SIZE; i++) {
730a40ec84eSRichard Henderson             tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
731a40ec84eSRichard Henderson                                          start1, length);
732d9bb58e5SYang Zhong         }
733d9bb58e5SYang Zhong     }
734a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
735d9bb58e5SYang Zhong }
736d9bb58e5SYang Zhong 
73753d28455SRichard Henderson /* Called with tlb_c.lock held */
73871aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
73971aec354SEmilio G. Cota                                          target_ulong vaddr)
740d9bb58e5SYang Zhong {
741d9bb58e5SYang Zhong     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
742d9bb58e5SYang Zhong         tlb_entry->addr_write = vaddr;
743d9bb58e5SYang Zhong     }
744d9bb58e5SYang Zhong }
745d9bb58e5SYang Zhong 
746d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr
747d9bb58e5SYang Zhong    so that it is no longer dirty */
748d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
749d9bb58e5SYang Zhong {
750d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
751d9bb58e5SYang Zhong     int mmu_idx;
752d9bb58e5SYang Zhong 
753d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
754d9bb58e5SYang Zhong 
755d9bb58e5SYang Zhong     vaddr &= TARGET_PAGE_MASK;
756a40ec84eSRichard Henderson     qemu_spin_lock(&env_tlb(env)->c.lock);
757d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
758383beda9SRichard Henderson         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
759d9bb58e5SYang Zhong     }
760d9bb58e5SYang Zhong 
761d9bb58e5SYang Zhong     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
762d9bb58e5SYang Zhong         int k;
763d9bb58e5SYang Zhong         for (k = 0; k < CPU_VTLB_SIZE; k++) {
764a40ec84eSRichard Henderson             tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
765d9bb58e5SYang Zhong         }
766d9bb58e5SYang Zhong     }
767a40ec84eSRichard Henderson     qemu_spin_unlock(&env_tlb(env)->c.lock);
768d9bb58e5SYang Zhong }
769d9bb58e5SYang Zhong 
770d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by
771d9bb58e5SYang Zhong    large pages and trigger a full TLB flush if these are invalidated.  */
7721308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
7731308e026SRichard Henderson                                target_ulong vaddr, target_ulong size)
774d9bb58e5SYang Zhong {
775a40ec84eSRichard Henderson     target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
7761308e026SRichard Henderson     target_ulong lp_mask = ~(size - 1);
777d9bb58e5SYang Zhong 
7781308e026SRichard Henderson     if (lp_addr == (target_ulong)-1) {
7791308e026SRichard Henderson         /* No previous large page.  */
7801308e026SRichard Henderson         lp_addr = vaddr;
7811308e026SRichard Henderson     } else {
782d9bb58e5SYang Zhong         /* Extend the existing region to include the new page.
7831308e026SRichard Henderson            This is a compromise between unnecessary flushes and
7841308e026SRichard Henderson            the cost of maintaining a full variable size TLB.  */
785a40ec84eSRichard Henderson         lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
7861308e026SRichard Henderson         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
7871308e026SRichard Henderson             lp_mask <<= 1;
788d9bb58e5SYang Zhong         }
7891308e026SRichard Henderson     }
790a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
791a40ec84eSRichard Henderson     env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
792d9bb58e5SYang Zhong }
793d9bb58e5SYang Zhong 
794d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address
795d9bb58e5SYang Zhong  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
796d9bb58e5SYang Zhong  * supplied size is only used by tlb_flush_page.
797d9bb58e5SYang Zhong  *
798d9bb58e5SYang Zhong  * Called from TCG-generated code, which is under an RCU read-side
799d9bb58e5SYang Zhong  * critical section.
800d9bb58e5SYang Zhong  */
801d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
802d9bb58e5SYang Zhong                              hwaddr paddr, MemTxAttrs attrs, int prot,
803d9bb58e5SYang Zhong                              int mmu_idx, target_ulong size)
804d9bb58e5SYang Zhong {
805d9bb58e5SYang Zhong     CPUArchState *env = cpu->env_ptr;
806a40ec84eSRichard Henderson     CPUTLB *tlb = env_tlb(env);
807a40ec84eSRichard Henderson     CPUTLBDesc *desc = &tlb->d[mmu_idx];
808d9bb58e5SYang Zhong     MemoryRegionSection *section;
809d9bb58e5SYang Zhong     unsigned int index;
810d9bb58e5SYang Zhong     target_ulong address;
8118f5db641SRichard Henderson     target_ulong write_address;
812d9bb58e5SYang Zhong     uintptr_t addend;
81368fea038SRichard Henderson     CPUTLBEntry *te, tn;
81455df6fcfSPeter Maydell     hwaddr iotlb, xlat, sz, paddr_page;
81555df6fcfSPeter Maydell     target_ulong vaddr_page;
816d9bb58e5SYang Zhong     int asidx = cpu_asidx_from_attrs(cpu, attrs);
81750b107c5SRichard Henderson     int wp_flags;
8188f5db641SRichard Henderson     bool is_ram, is_romd;
819d9bb58e5SYang Zhong 
820d9bb58e5SYang Zhong     assert_cpu_is_self(cpu);
82155df6fcfSPeter Maydell 
8221308e026SRichard Henderson     if (size <= TARGET_PAGE_SIZE) {
82355df6fcfSPeter Maydell         sz = TARGET_PAGE_SIZE;
82455df6fcfSPeter Maydell     } else {
8251308e026SRichard Henderson         tlb_add_large_page(env, mmu_idx, vaddr, size);
826d9bb58e5SYang Zhong         sz = size;
82755df6fcfSPeter Maydell     }
82855df6fcfSPeter Maydell     vaddr_page = vaddr & TARGET_PAGE_MASK;
82955df6fcfSPeter Maydell     paddr_page = paddr & TARGET_PAGE_MASK;
83055df6fcfSPeter Maydell 
83155df6fcfSPeter Maydell     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
83255df6fcfSPeter Maydell                                                 &xlat, &sz, attrs, &prot);
833d9bb58e5SYang Zhong     assert(sz >= TARGET_PAGE_SIZE);
834d9bb58e5SYang Zhong 
835d9bb58e5SYang Zhong     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
836d9bb58e5SYang Zhong               " prot=%x idx=%d\n",
837d9bb58e5SYang Zhong               vaddr, paddr, prot, mmu_idx);
838d9bb58e5SYang Zhong 
83955df6fcfSPeter Maydell     address = vaddr_page;
84055df6fcfSPeter Maydell     if (size < TARGET_PAGE_SIZE) {
84130d7e098SRichard Henderson         /* Repeat the MMU check and TLB fill on every access.  */
84230d7e098SRichard Henderson         address |= TLB_INVALID_MASK;
84355df6fcfSPeter Maydell     }
844a26fc6f5STony Nguyen     if (attrs.byte_swap) {
8455b87b3e6SRichard Henderson         address |= TLB_BSWAP;
846a26fc6f5STony Nguyen     }
8478f5db641SRichard Henderson 
8488f5db641SRichard Henderson     is_ram = memory_region_is_ram(section->mr);
8498f5db641SRichard Henderson     is_romd = memory_region_is_romd(section->mr);
8508f5db641SRichard Henderson 
8518f5db641SRichard Henderson     if (is_ram || is_romd) {
8528f5db641SRichard Henderson         /* RAM and ROMD both have associated host memory. */
853d9bb58e5SYang Zhong         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
8548f5db641SRichard Henderson     } else {
8558f5db641SRichard Henderson         /* I/O does not; force the host address to NULL. */
8568f5db641SRichard Henderson         addend = 0;
857d9bb58e5SYang Zhong     }
858d9bb58e5SYang Zhong 
8598f5db641SRichard Henderson     write_address = address;
8608f5db641SRichard Henderson     if (is_ram) {
8618f5db641SRichard Henderson         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
8628f5db641SRichard Henderson         /*
8638f5db641SRichard Henderson          * Computing is_clean is expensive; avoid all that unless
8648f5db641SRichard Henderson          * the page is actually writable.
8658f5db641SRichard Henderson          */
8668f5db641SRichard Henderson         if (prot & PAGE_WRITE) {
8678f5db641SRichard Henderson             if (section->readonly) {
8688f5db641SRichard Henderson                 write_address |= TLB_DISCARD_WRITE;
8698f5db641SRichard Henderson             } else if (cpu_physical_memory_is_clean(iotlb)) {
8708f5db641SRichard Henderson                 write_address |= TLB_NOTDIRTY;
8718f5db641SRichard Henderson             }
8728f5db641SRichard Henderson         }
8738f5db641SRichard Henderson     } else {
8748f5db641SRichard Henderson         /* I/O or ROMD */
8758f5db641SRichard Henderson         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
8768f5db641SRichard Henderson         /*
8778f5db641SRichard Henderson          * Writes to romd devices must go through MMIO to enable write.
8788f5db641SRichard Henderson          * Reads to romd devices go through the ram_ptr found above,
8798f5db641SRichard Henderson          * but of course reads to I/O must go through MMIO.
8808f5db641SRichard Henderson          */
8818f5db641SRichard Henderson         write_address |= TLB_MMIO;
8828f5db641SRichard Henderson         if (!is_romd) {
8838f5db641SRichard Henderson             address = write_address;
8848f5db641SRichard Henderson         }
8858f5db641SRichard Henderson     }
8868f5db641SRichard Henderson 
88750b107c5SRichard Henderson     wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
88850b107c5SRichard Henderson                                               TARGET_PAGE_SIZE);
889d9bb58e5SYang Zhong 
890383beda9SRichard Henderson     index = tlb_index(env, mmu_idx, vaddr_page);
891383beda9SRichard Henderson     te = tlb_entry(env, mmu_idx, vaddr_page);
892d9bb58e5SYang Zhong 
89368fea038SRichard Henderson     /*
89471aec354SEmilio G. Cota      * Hold the TLB lock for the rest of the function. We could acquire/release
89571aec354SEmilio G. Cota      * the lock several times in the function, but it is faster to amortize the
89671aec354SEmilio G. Cota      * acquisition cost by acquiring it just once. Note that this leads to
89771aec354SEmilio G. Cota      * a longer critical section, but this is not a concern since the TLB lock
89871aec354SEmilio G. Cota      * is unlikely to be contended.
89971aec354SEmilio G. Cota      */
900a40ec84eSRichard Henderson     qemu_spin_lock(&tlb->c.lock);
90171aec354SEmilio G. Cota 
9023d1523ceSRichard Henderson     /* Note that the tlb is no longer clean.  */
903a40ec84eSRichard Henderson     tlb->c.dirty |= 1 << mmu_idx;
9043d1523ceSRichard Henderson 
90571aec354SEmilio G. Cota     /* Make sure there's no cached translation for the new page.  */
90671aec354SEmilio G. Cota     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
90771aec354SEmilio G. Cota 
90871aec354SEmilio G. Cota     /*
90968fea038SRichard Henderson      * Only evict the old entry to the victim tlb if it's for a
91068fea038SRichard Henderson      * different page; otherwise just overwrite the stale data.
91168fea038SRichard Henderson      */
9123cea94bbSEmilio G. Cota     if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
913a40ec84eSRichard Henderson         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
914a40ec84eSRichard Henderson         CPUTLBEntry *tv = &desc->vtable[vidx];
91568fea038SRichard Henderson 
91668fea038SRichard Henderson         /* Evict the old entry into the victim tlb.  */
91771aec354SEmilio G. Cota         copy_tlb_helper_locked(tv, te);
918a40ec84eSRichard Henderson         desc->viotlb[vidx] = desc->iotlb[index];
91986e1eff8SEmilio G. Cota         tlb_n_used_entries_dec(env, mmu_idx);
92068fea038SRichard Henderson     }
921d9bb58e5SYang Zhong 
922d9bb58e5SYang Zhong     /* refill the tlb */
923ace41090SPeter Maydell     /*
924ace41090SPeter Maydell      * At this point iotlb contains a physical section number in the lower
925ace41090SPeter Maydell      * TARGET_PAGE_BITS, and either
9268f5db641SRichard Henderson      *  + the ram_addr_t of the page base of the target RAM (RAM)
9278f5db641SRichard Henderson      *  + the offset within section->mr of the page base (I/O, ROMD)
92855df6fcfSPeter Maydell      * We subtract the vaddr_page (which is page aligned and thus won't
929ace41090SPeter Maydell      * disturb the low bits) to give an offset which can be added to the
930ace41090SPeter Maydell      * (non-page-aligned) vaddr of the eventual memory access to get
931ace41090SPeter Maydell      * the MemoryRegion offset for the access. Note that the vaddr we
932ace41090SPeter Maydell      * subtract here is that of the page base, and not the same as the
933ace41090SPeter Maydell      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
934ace41090SPeter Maydell      */
935a40ec84eSRichard Henderson     desc->iotlb[index].addr = iotlb - vaddr_page;
936a40ec84eSRichard Henderson     desc->iotlb[index].attrs = attrs;
937d9bb58e5SYang Zhong 
938d9bb58e5SYang Zhong     /* Now calculate the new entry */
93955df6fcfSPeter Maydell     tn.addend = addend - vaddr_page;
940d9bb58e5SYang Zhong     if (prot & PAGE_READ) {
941d9bb58e5SYang Zhong         tn.addr_read = address;
94250b107c5SRichard Henderson         if (wp_flags & BP_MEM_READ) {
94350b107c5SRichard Henderson             tn.addr_read |= TLB_WATCHPOINT;
94450b107c5SRichard Henderson         }
945d9bb58e5SYang Zhong     } else {
946d9bb58e5SYang Zhong         tn.addr_read = -1;
947d9bb58e5SYang Zhong     }
948d9bb58e5SYang Zhong 
949d9bb58e5SYang Zhong     if (prot & PAGE_EXEC) {
9508f5db641SRichard Henderson         tn.addr_code = address;
951d9bb58e5SYang Zhong     } else {
952d9bb58e5SYang Zhong         tn.addr_code = -1;
953d9bb58e5SYang Zhong     }
954d9bb58e5SYang Zhong 
955d9bb58e5SYang Zhong     tn.addr_write = -1;
956d9bb58e5SYang Zhong     if (prot & PAGE_WRITE) {
9578f5db641SRichard Henderson         tn.addr_write = write_address;
958f52bfb12SDavid Hildenbrand         if (prot & PAGE_WRITE_INV) {
959f52bfb12SDavid Hildenbrand             tn.addr_write |= TLB_INVALID_MASK;
960f52bfb12SDavid Hildenbrand         }
96150b107c5SRichard Henderson         if (wp_flags & BP_MEM_WRITE) {
96250b107c5SRichard Henderson             tn.addr_write |= TLB_WATCHPOINT;
96350b107c5SRichard Henderson         }
964d9bb58e5SYang Zhong     }
965d9bb58e5SYang Zhong 
96671aec354SEmilio G. Cota     copy_tlb_helper_locked(te, &tn);
96786e1eff8SEmilio G. Cota     tlb_n_used_entries_inc(env, mmu_idx);
968a40ec84eSRichard Henderson     qemu_spin_unlock(&tlb->c.lock);
969d9bb58e5SYang Zhong }
970d9bb58e5SYang Zhong 
971d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory
972d9bb58e5SYang Zhong  * transaction attributes to be used.
973d9bb58e5SYang Zhong  */
974d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr,
975d9bb58e5SYang Zhong                   hwaddr paddr, int prot,
976d9bb58e5SYang Zhong                   int mmu_idx, target_ulong size)
977d9bb58e5SYang Zhong {
978d9bb58e5SYang Zhong     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
979d9bb58e5SYang Zhong                             prot, mmu_idx, size);
980d9bb58e5SYang Zhong }
981d9bb58e5SYang Zhong 
982d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
983d9bb58e5SYang Zhong {
984d9bb58e5SYang Zhong     ram_addr_t ram_addr;
985d9bb58e5SYang Zhong 
986d9bb58e5SYang Zhong     ram_addr = qemu_ram_addr_from_host(ptr);
987d9bb58e5SYang Zhong     if (ram_addr == RAM_ADDR_INVALID) {
988d9bb58e5SYang Zhong         error_report("Bad ram pointer %p", ptr);
989d9bb58e5SYang Zhong         abort();
990d9bb58e5SYang Zhong     }
991d9bb58e5SYang Zhong     return ram_addr;
992d9bb58e5SYang Zhong }
993d9bb58e5SYang Zhong 
994c319dc13SRichard Henderson /*
995c319dc13SRichard Henderson  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
996c319dc13SRichard Henderson  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
997c319dc13SRichard Henderson  * be discarded and looked up again (e.g. via tlb_entry()).
998c319dc13SRichard Henderson  */
999c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1000c319dc13SRichard Henderson                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1001c319dc13SRichard Henderson {
1002c319dc13SRichard Henderson     CPUClass *cc = CPU_GET_CLASS(cpu);
1003c319dc13SRichard Henderson     bool ok;
1004c319dc13SRichard Henderson 
1005c319dc13SRichard Henderson     /*
1006c319dc13SRichard Henderson      * This is not a probe, so only valid return is success; failure
1007c319dc13SRichard Henderson      * should result in exception + longjmp to the cpu loop.
1008c319dc13SRichard Henderson      */
1009c319dc13SRichard Henderson     ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
1010c319dc13SRichard Henderson     assert(ok);
1011c319dc13SRichard Henderson }
1012c319dc13SRichard Henderson 
1013d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1014f1be3696SRichard Henderson                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
1015be5c4787STony Nguyen                          MMUAccessType access_type, MemOp op)
1016d9bb58e5SYang Zhong {
101729a0af61SRichard Henderson     CPUState *cpu = env_cpu(env);
10182d54f194SPeter Maydell     hwaddr mr_offset;
10192d54f194SPeter Maydell     MemoryRegionSection *section;
10202d54f194SPeter Maydell     MemoryRegion *mr;
1021d9bb58e5SYang Zhong     uint64_t val;
1022d9bb58e5SYang Zhong     bool locked = false;
102304e3aabdSPeter Maydell     MemTxResult r;
1024d9bb58e5SYang Zhong 
10252d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
10262d54f194SPeter Maydell     mr = section->mr;
10272d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1028d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
102908565552SRichard Henderson     if (!cpu->can_do_io) {
1030d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1031d9bb58e5SYang Zhong     }
1032d9bb58e5SYang Zhong 
10338b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
1034d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
1035d9bb58e5SYang Zhong         locked = true;
1036d9bb58e5SYang Zhong     }
1037be5c4787STony Nguyen     r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
103804e3aabdSPeter Maydell     if (r != MEMTX_OK) {
10392d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
10402d54f194SPeter Maydell             section->offset_within_address_space -
10412d54f194SPeter Maydell             section->offset_within_region;
10422d54f194SPeter Maydell 
1043be5c4787STony Nguyen         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
104404e3aabdSPeter Maydell                                mmu_idx, iotlbentry->attrs, r, retaddr);
104504e3aabdSPeter Maydell     }
1046d9bb58e5SYang Zhong     if (locked) {
1047d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
1048d9bb58e5SYang Zhong     }
1049d9bb58e5SYang Zhong 
1050d9bb58e5SYang Zhong     return val;
1051d9bb58e5SYang Zhong }
1052d9bb58e5SYang Zhong 
1053d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
1054f1be3696SRichard Henderson                       int mmu_idx, uint64_t val, target_ulong addr,
1055be5c4787STony Nguyen                       uintptr_t retaddr, MemOp op)
1056d9bb58e5SYang Zhong {
105729a0af61SRichard Henderson     CPUState *cpu = env_cpu(env);
10582d54f194SPeter Maydell     hwaddr mr_offset;
10592d54f194SPeter Maydell     MemoryRegionSection *section;
10602d54f194SPeter Maydell     MemoryRegion *mr;
1061d9bb58e5SYang Zhong     bool locked = false;
106204e3aabdSPeter Maydell     MemTxResult r;
1063d9bb58e5SYang Zhong 
10642d54f194SPeter Maydell     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
10652d54f194SPeter Maydell     mr = section->mr;
10662d54f194SPeter Maydell     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
106708565552SRichard Henderson     if (!cpu->can_do_io) {
1068d9bb58e5SYang Zhong         cpu_io_recompile(cpu, retaddr);
1069d9bb58e5SYang Zhong     }
1070d9bb58e5SYang Zhong     cpu->mem_io_pc = retaddr;
1071d9bb58e5SYang Zhong 
10728b812533SAlex Bennée     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
1073d9bb58e5SYang Zhong         qemu_mutex_lock_iothread();
1074d9bb58e5SYang Zhong         locked = true;
1075d9bb58e5SYang Zhong     }
1076be5c4787STony Nguyen     r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
107704e3aabdSPeter Maydell     if (r != MEMTX_OK) {
10782d54f194SPeter Maydell         hwaddr physaddr = mr_offset +
10792d54f194SPeter Maydell             section->offset_within_address_space -
10802d54f194SPeter Maydell             section->offset_within_region;
10812d54f194SPeter Maydell 
1082be5c4787STony Nguyen         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1083be5c4787STony Nguyen                                MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
1084be5c4787STony Nguyen                                retaddr);
108504e3aabdSPeter Maydell     }
1086d9bb58e5SYang Zhong     if (locked) {
1087d9bb58e5SYang Zhong         qemu_mutex_unlock_iothread();
1088d9bb58e5SYang Zhong     }
1089d9bb58e5SYang Zhong }
1090d9bb58e5SYang Zhong 
10914811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
10924811e909SRichard Henderson {
10934811e909SRichard Henderson #if TCG_OVERSIZED_GUEST
10944811e909SRichard Henderson     return *(target_ulong *)((uintptr_t)entry + ofs);
10954811e909SRichard Henderson #else
10964811e909SRichard Henderson     /* ofs might correspond to .addr_write, so use atomic_read */
10974811e909SRichard Henderson     return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
10984811e909SRichard Henderson #endif
10994811e909SRichard Henderson }
11004811e909SRichard Henderson 
1101d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied
1102d9bb58e5SYang Zhong    back to the main tlb.  */
1103d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1104d9bb58e5SYang Zhong                            size_t elt_ofs, target_ulong page)
1105d9bb58e5SYang Zhong {
1106d9bb58e5SYang Zhong     size_t vidx;
110771aec354SEmilio G. Cota 
110829a0af61SRichard Henderson     assert_cpu_is_self(env_cpu(env));
1109d9bb58e5SYang Zhong     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1110a40ec84eSRichard Henderson         CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1111a40ec84eSRichard Henderson         target_ulong cmp;
1112a40ec84eSRichard Henderson 
1113a40ec84eSRichard Henderson         /* elt_ofs might correspond to .addr_write, so use atomic_read */
1114a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST
1115a40ec84eSRichard Henderson         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1116a40ec84eSRichard Henderson #else
1117a40ec84eSRichard Henderson         cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1118a40ec84eSRichard Henderson #endif
1119d9bb58e5SYang Zhong 
1120d9bb58e5SYang Zhong         if (cmp == page) {
1121d9bb58e5SYang Zhong             /* Found entry in victim tlb, swap tlb and iotlb.  */
1122a40ec84eSRichard Henderson             CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1123d9bb58e5SYang Zhong 
1124a40ec84eSRichard Henderson             qemu_spin_lock(&env_tlb(env)->c.lock);
112571aec354SEmilio G. Cota             copy_tlb_helper_locked(&tmptlb, tlb);
112671aec354SEmilio G. Cota             copy_tlb_helper_locked(tlb, vtlb);
112771aec354SEmilio G. Cota             copy_tlb_helper_locked(vtlb, &tmptlb);
1128a40ec84eSRichard Henderson             qemu_spin_unlock(&env_tlb(env)->c.lock);
1129d9bb58e5SYang Zhong 
1130a40ec84eSRichard Henderson             CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1131a40ec84eSRichard Henderson             CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1132d9bb58e5SYang Zhong             tmpio = *io; *io = *vio; *vio = tmpio;
1133d9bb58e5SYang Zhong             return true;
1134d9bb58e5SYang Zhong         }
1135d9bb58e5SYang Zhong     }
1136d9bb58e5SYang Zhong     return false;
1137d9bb58e5SYang Zhong }
1138d9bb58e5SYang Zhong 
1139d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context.  */
1140d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \
1141d9bb58e5SYang Zhong   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1142d9bb58e5SYang Zhong                  (ADDR) & TARGET_PAGE_MASK)
1143d9bb58e5SYang Zhong 
114430d7e098SRichard Henderson /*
114530d7e098SRichard Henderson  * Return a ram_addr_t for the virtual address for execution.
114630d7e098SRichard Henderson  *
114730d7e098SRichard Henderson  * Return -1 if we can't translate and execute from an entire page
114830d7e098SRichard Henderson  * of RAM.  This will force us to execute by loading and translating
114930d7e098SRichard Henderson  * one insn at a time, without caching.
115030d7e098SRichard Henderson  *
115130d7e098SRichard Henderson  * NOTE: This function will trigger an exception if the page is
115230d7e098SRichard Henderson  * not executable.
1153f2553f04SKONRAD Frederic  */
11544b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
11554b2190daSEmilio G. Cota                                         void **hostp)
1156f2553f04SKONRAD Frederic {
1157383beda9SRichard Henderson     uintptr_t mmu_idx = cpu_mmu_index(env, true);
1158383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
1159383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1160f2553f04SKONRAD Frederic     void *p;
1161f2553f04SKONRAD Frederic 
1162383beda9SRichard Henderson     if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1163b493ccf1SPeter Maydell         if (!VICTIM_TLB_HIT(addr_code, addr)) {
116429a0af61SRichard Henderson             tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
11656d967cb8SEmilio G. Cota             index = tlb_index(env, mmu_idx, addr);
11666d967cb8SEmilio G. Cota             entry = tlb_entry(env, mmu_idx, addr);
116730d7e098SRichard Henderson 
116830d7e098SRichard Henderson             if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
116930d7e098SRichard Henderson                 /*
117030d7e098SRichard Henderson                  * The MMU protection covers a smaller range than a target
117130d7e098SRichard Henderson                  * page, so we must redo the MMU check for every insn.
117230d7e098SRichard Henderson                  */
117330d7e098SRichard Henderson                 return -1;
117430d7e098SRichard Henderson             }
117571b9a453SKONRAD Frederic         }
1176383beda9SRichard Henderson         assert(tlb_hit(entry->addr_code, addr));
1177f2553f04SKONRAD Frederic     }
117855df6fcfSPeter Maydell 
117930d7e098SRichard Henderson     if (unlikely(entry->addr_code & TLB_MMIO)) {
118030d7e098SRichard Henderson         /* The region is not backed by RAM.  */
11814b2190daSEmilio G. Cota         if (hostp) {
11824b2190daSEmilio G. Cota             *hostp = NULL;
11834b2190daSEmilio G. Cota         }
118420cb6ae4SPeter Maydell         return -1;
118555df6fcfSPeter Maydell     }
118655df6fcfSPeter Maydell 
1187383beda9SRichard Henderson     p = (void *)((uintptr_t)addr + entry->addend);
11884b2190daSEmilio G. Cota     if (hostp) {
11894b2190daSEmilio G. Cota         *hostp = p;
11904b2190daSEmilio G. Cota     }
1191f2553f04SKONRAD Frederic     return qemu_ram_addr_from_host_nofail(p);
1192f2553f04SKONRAD Frederic }
1193f2553f04SKONRAD Frederic 
11944b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
11954b2190daSEmilio G. Cota {
11964b2190daSEmilio G. Cota     return get_page_addr_code_hostp(env, addr, NULL);
11974b2190daSEmilio G. Cota }
11984b2190daSEmilio G. Cota 
1199707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1200707526adSRichard Henderson                            CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1201707526adSRichard Henderson {
1202707526adSRichard Henderson     ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1203707526adSRichard Henderson 
1204707526adSRichard Henderson     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1205707526adSRichard Henderson 
1206707526adSRichard Henderson     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1207707526adSRichard Henderson         struct page_collection *pages
1208707526adSRichard Henderson             = page_collection_lock(ram_addr, ram_addr + size);
12095a7c27bbSRichard Henderson         tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1210707526adSRichard Henderson         page_collection_unlock(pages);
1211707526adSRichard Henderson     }
1212707526adSRichard Henderson 
1213707526adSRichard Henderson     /*
1214707526adSRichard Henderson      * Set both VGA and migration bits for simplicity and to remove
1215707526adSRichard Henderson      * the notdirty callback faster.
1216707526adSRichard Henderson      */
1217707526adSRichard Henderson     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1218707526adSRichard Henderson 
1219707526adSRichard Henderson     /* We remove the notdirty callback only if the code has been flushed. */
1220707526adSRichard Henderson     if (!cpu_physical_memory_is_clean(ram_addr)) {
1221707526adSRichard Henderson         trace_memory_notdirty_set_dirty(mem_vaddr);
1222707526adSRichard Henderson         tlb_set_dirty(cpu, mem_vaddr);
1223707526adSRichard Henderson     }
1224707526adSRichard Henderson }
1225707526adSRichard Henderson 
1226c25c283dSDavid Hildenbrand /*
1227c25c283dSDavid Hildenbrand  * Probe for whether the specified guest access is permitted. If it is not
1228c25c283dSDavid Hildenbrand  * permitted then an exception will be taken in the same way as if this
1229c25c283dSDavid Hildenbrand  * were a real access (and we will not return).
1230fef39ccdSDavid Hildenbrand  * If the size is 0 or the page requires I/O access, returns NULL; otherwise,
1231fef39ccdSDavid Hildenbrand  * returns the address of the host page similar to tlb_vaddr_to_host().
1232d9bb58e5SYang Zhong  */
1233c25c283dSDavid Hildenbrand void *probe_access(CPUArchState *env, target_ulong addr, int size,
1234c25c283dSDavid Hildenbrand                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1235d9bb58e5SYang Zhong {
1236383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
1237383beda9SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1238c25c283dSDavid Hildenbrand     target_ulong tlb_addr;
1239c25c283dSDavid Hildenbrand     size_t elt_ofs;
1240c25c283dSDavid Hildenbrand     int wp_access;
1241d9bb58e5SYang Zhong 
1242ca86cf32SDavid Hildenbrand     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1243ca86cf32SDavid Hildenbrand 
1244c25c283dSDavid Hildenbrand     switch (access_type) {
1245c25c283dSDavid Hildenbrand     case MMU_DATA_LOAD:
1246c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_read);
1247c25c283dSDavid Hildenbrand         wp_access = BP_MEM_READ;
1248c25c283dSDavid Hildenbrand         break;
1249c25c283dSDavid Hildenbrand     case MMU_DATA_STORE:
1250c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_write);
1251c25c283dSDavid Hildenbrand         wp_access = BP_MEM_WRITE;
1252c25c283dSDavid Hildenbrand         break;
1253c25c283dSDavid Hildenbrand     case MMU_INST_FETCH:
1254c25c283dSDavid Hildenbrand         elt_ofs = offsetof(CPUTLBEntry, addr_code);
1255c25c283dSDavid Hildenbrand         wp_access = BP_MEM_READ;
1256c25c283dSDavid Hildenbrand         break;
1257c25c283dSDavid Hildenbrand     default:
1258c25c283dSDavid Hildenbrand         g_assert_not_reached();
1259c25c283dSDavid Hildenbrand     }
1260c25c283dSDavid Hildenbrand     tlb_addr = tlb_read_ofs(entry, elt_ofs);
1261c25c283dSDavid Hildenbrand 
126203a98189SDavid Hildenbrand     if (unlikely(!tlb_hit(tlb_addr, addr))) {
1263c25c283dSDavid Hildenbrand         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs,
1264c25c283dSDavid Hildenbrand                             addr & TARGET_PAGE_MASK)) {
1265c25c283dSDavid Hildenbrand             tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr);
126603a98189SDavid Hildenbrand             /* TLB resize via tlb_fill may have moved the entry. */
126703a98189SDavid Hildenbrand             index = tlb_index(env, mmu_idx, addr);
126803a98189SDavid Hildenbrand             entry = tlb_entry(env, mmu_idx, addr);
1269d9bb58e5SYang Zhong         }
1270c25c283dSDavid Hildenbrand         tlb_addr = tlb_read_ofs(entry, elt_ofs);
127103a98189SDavid Hildenbrand     }
127203a98189SDavid Hildenbrand 
1273fef39ccdSDavid Hildenbrand     if (!size) {
1274fef39ccdSDavid Hildenbrand         return NULL;
1275fef39ccdSDavid Hildenbrand     }
1276fef39ccdSDavid Hildenbrand 
127773bc0bd4SRichard Henderson     if (unlikely(tlb_addr & TLB_FLAGS_MASK)) {
127873bc0bd4SRichard Henderson         CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
127973bc0bd4SRichard Henderson 
128073bc0bd4SRichard Henderson         /* Reject I/O access, or other required slow-path.  */
128173bc0bd4SRichard Henderson         if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
128273bc0bd4SRichard Henderson             return NULL;
128373bc0bd4SRichard Henderson         }
128473bc0bd4SRichard Henderson 
128503a98189SDavid Hildenbrand         /* Handle watchpoints.  */
1286fef39ccdSDavid Hildenbrand         if (tlb_addr & TLB_WATCHPOINT) {
128703a98189SDavid Hildenbrand             cpu_check_watchpoint(env_cpu(env), addr, size,
128873bc0bd4SRichard Henderson                                  iotlbentry->attrs, wp_access, retaddr);
1289d9bb58e5SYang Zhong         }
1290fef39ccdSDavid Hildenbrand 
129173bc0bd4SRichard Henderson         /* Handle clean RAM pages.  */
129273bc0bd4SRichard Henderson         if (tlb_addr & TLB_NOTDIRTY) {
129373bc0bd4SRichard Henderson             notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
129473bc0bd4SRichard Henderson         }
1295fef39ccdSDavid Hildenbrand     }
1296fef39ccdSDavid Hildenbrand 
1297fef39ccdSDavid Hildenbrand     return (void *)((uintptr_t)addr + entry->addend);
1298d9bb58e5SYang Zhong }
1299d9bb58e5SYang Zhong 
13004811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
13014811e909SRichard Henderson                         MMUAccessType access_type, int mmu_idx)
13024811e909SRichard Henderson {
13034811e909SRichard Henderson     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
13047f445c8cSRichard Henderson     target_ulong tlb_addr, page;
13054811e909SRichard Henderson     size_t elt_ofs;
13064811e909SRichard Henderson 
13074811e909SRichard Henderson     switch (access_type) {
13084811e909SRichard Henderson     case MMU_DATA_LOAD:
13094811e909SRichard Henderson         elt_ofs = offsetof(CPUTLBEntry, addr_read);
13104811e909SRichard Henderson         break;
13114811e909SRichard Henderson     case MMU_DATA_STORE:
13124811e909SRichard Henderson         elt_ofs = offsetof(CPUTLBEntry, addr_write);
13134811e909SRichard Henderson         break;
13144811e909SRichard Henderson     case MMU_INST_FETCH:
13154811e909SRichard Henderson         elt_ofs = offsetof(CPUTLBEntry, addr_code);
13164811e909SRichard Henderson         break;
13174811e909SRichard Henderson     default:
13184811e909SRichard Henderson         g_assert_not_reached();
13194811e909SRichard Henderson     }
13204811e909SRichard Henderson 
13214811e909SRichard Henderson     page = addr & TARGET_PAGE_MASK;
13224811e909SRichard Henderson     tlb_addr = tlb_read_ofs(entry, elt_ofs);
13234811e909SRichard Henderson 
13244811e909SRichard Henderson     if (!tlb_hit_page(tlb_addr, page)) {
13254811e909SRichard Henderson         uintptr_t index = tlb_index(env, mmu_idx, addr);
13264811e909SRichard Henderson 
13274811e909SRichard Henderson         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
132829a0af61SRichard Henderson             CPUState *cs = env_cpu(env);
13294811e909SRichard Henderson             CPUClass *cc = CPU_GET_CLASS(cs);
13304811e909SRichard Henderson 
13314811e909SRichard Henderson             if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
13324811e909SRichard Henderson                 /* Non-faulting page table read failed.  */
13334811e909SRichard Henderson                 return NULL;
13344811e909SRichard Henderson             }
13354811e909SRichard Henderson 
13364811e909SRichard Henderson             /* TLB resize via tlb_fill may have moved the entry.  */
13374811e909SRichard Henderson             entry = tlb_entry(env, mmu_idx, addr);
13384811e909SRichard Henderson         }
13394811e909SRichard Henderson         tlb_addr = tlb_read_ofs(entry, elt_ofs);
13404811e909SRichard Henderson     }
13414811e909SRichard Henderson 
13424811e909SRichard Henderson     if (tlb_addr & ~TARGET_PAGE_MASK) {
13434811e909SRichard Henderson         /* IO access */
13444811e909SRichard Henderson         return NULL;
13454811e909SRichard Henderson     }
13464811e909SRichard Henderson 
13474811e909SRichard Henderson     return (void *)((uintptr_t)addr + entry->addend);
13484811e909SRichard Henderson }
13494811e909SRichard Henderson 
1350235537faSAlex Bennée 
1351235537faSAlex Bennée #ifdef CONFIG_PLUGIN
1352235537faSAlex Bennée /*
1353235537faSAlex Bennée  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1354235537faSAlex Bennée  * This should be a hot path as we will have just looked this path up
1355235537faSAlex Bennée  * in the softmmu lookup code (or helper). We don't handle re-fills or
1356235537faSAlex Bennée  * checking the victim table. This is purely informational.
1357235537faSAlex Bennée  *
1358235537faSAlex Bennée  * This should never fail as the memory access being instrumented
1359235537faSAlex Bennée  * should have just filled the TLB.
1360235537faSAlex Bennée  */
1361235537faSAlex Bennée 
1362235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1363235537faSAlex Bennée                        bool is_store, struct qemu_plugin_hwaddr *data)
1364235537faSAlex Bennée {
1365235537faSAlex Bennée     CPUArchState *env = cpu->env_ptr;
1366235537faSAlex Bennée     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1367235537faSAlex Bennée     uintptr_t index = tlb_index(env, mmu_idx, addr);
1368235537faSAlex Bennée     target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1369235537faSAlex Bennée 
1370235537faSAlex Bennée     if (likely(tlb_hit(tlb_addr, addr))) {
1371235537faSAlex Bennée         /* We must have an iotlb entry for MMIO */
1372235537faSAlex Bennée         if (tlb_addr & TLB_MMIO) {
1373235537faSAlex Bennée             CPUIOTLBEntry *iotlbentry;
1374235537faSAlex Bennée             iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1375235537faSAlex Bennée             data->is_io = true;
1376235537faSAlex Bennée             data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1377235537faSAlex Bennée             data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1378235537faSAlex Bennée         } else {
1379235537faSAlex Bennée             data->is_io = false;
1380235537faSAlex Bennée             data->v.ram.hostaddr = addr + tlbe->addend;
1381235537faSAlex Bennée         }
1382235537faSAlex Bennée         return true;
1383235537faSAlex Bennée     }
1384235537faSAlex Bennée     return false;
1385235537faSAlex Bennée }
1386235537faSAlex Bennée 
1387235537faSAlex Bennée #endif
1388235537faSAlex Bennée 
1389d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1390d9bb58e5SYang Zhong  * operations, or io operations to proceed.  Return the host address.  */
1391d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1392707526adSRichard Henderson                                TCGMemOpIdx oi, uintptr_t retaddr)
1393d9bb58e5SYang Zhong {
1394d9bb58e5SYang Zhong     size_t mmu_idx = get_mmuidx(oi);
1395383beda9SRichard Henderson     uintptr_t index = tlb_index(env, mmu_idx, addr);
1396383beda9SRichard Henderson     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1397403f290cSEmilio G. Cota     target_ulong tlb_addr = tlb_addr_write(tlbe);
139814776ab5STony Nguyen     MemOp mop = get_memop(oi);
1399d9bb58e5SYang Zhong     int a_bits = get_alignment_bits(mop);
1400d9bb58e5SYang Zhong     int s_bits = mop & MO_SIZE;
140134d49937SPeter Maydell     void *hostaddr;
1402d9bb58e5SYang Zhong 
1403d9bb58e5SYang Zhong     /* Adjust the given return address.  */
1404d9bb58e5SYang Zhong     retaddr -= GETPC_ADJ;
1405d9bb58e5SYang Zhong 
1406d9bb58e5SYang Zhong     /* Enforce guest required alignment.  */
1407d9bb58e5SYang Zhong     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1408d9bb58e5SYang Zhong         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
140929a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1410d9bb58e5SYang Zhong                              mmu_idx, retaddr);
1411d9bb58e5SYang Zhong     }
1412d9bb58e5SYang Zhong 
1413d9bb58e5SYang Zhong     /* Enforce qemu required alignment.  */
1414d9bb58e5SYang Zhong     if (unlikely(addr & ((1 << s_bits) - 1))) {
1415d9bb58e5SYang Zhong         /* We get here if guest alignment was not requested,
1416d9bb58e5SYang Zhong            or was not enforced by cpu_unaligned_access above.
1417d9bb58e5SYang Zhong            We might widen the access and emulate, but for now
1418d9bb58e5SYang Zhong            mark an exception and exit the cpu loop.  */
1419d9bb58e5SYang Zhong         goto stop_the_world;
1420d9bb58e5SYang Zhong     }
1421d9bb58e5SYang Zhong 
1422d9bb58e5SYang Zhong     /* Check TLB entry and enforce page permissions.  */
1423334692bcSPeter Maydell     if (!tlb_hit(tlb_addr, addr)) {
1424d9bb58e5SYang Zhong         if (!VICTIM_TLB_HIT(addr_write, addr)) {
142529a0af61SRichard Henderson             tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
142698670d47SLaurent Vivier                      mmu_idx, retaddr);
14276d967cb8SEmilio G. Cota             index = tlb_index(env, mmu_idx, addr);
14286d967cb8SEmilio G. Cota             tlbe = tlb_entry(env, mmu_idx, addr);
1429d9bb58e5SYang Zhong         }
1430403f290cSEmilio G. Cota         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1431d9bb58e5SYang Zhong     }
1432d9bb58e5SYang Zhong 
143355df6fcfSPeter Maydell     /* Notice an IO access or a needs-MMU-lookup access */
143430d7e098SRichard Henderson     if (unlikely(tlb_addr & TLB_MMIO)) {
1435d9bb58e5SYang Zhong         /* There's really nothing that can be done to
1436d9bb58e5SYang Zhong            support this apart from stop-the-world.  */
1437d9bb58e5SYang Zhong         goto stop_the_world;
1438d9bb58e5SYang Zhong     }
1439d9bb58e5SYang Zhong 
1440d9bb58e5SYang Zhong     /* Let the guest notice RMW on a write-only page.  */
144134d49937SPeter Maydell     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
144229a0af61SRichard Henderson         tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
144398670d47SLaurent Vivier                  mmu_idx, retaddr);
1444d9bb58e5SYang Zhong         /* Since we don't support reads and writes to different addresses,
1445d9bb58e5SYang Zhong            and we do have the proper page loaded for write, this shouldn't
1446d9bb58e5SYang Zhong            ever return.  But just in case, handle via stop-the-world.  */
1447d9bb58e5SYang Zhong         goto stop_the_world;
1448d9bb58e5SYang Zhong     }
1449d9bb58e5SYang Zhong 
145034d49937SPeter Maydell     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
145134d49937SPeter Maydell 
145234d49937SPeter Maydell     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1453707526adSRichard Henderson         notdirty_write(env_cpu(env), addr, 1 << s_bits,
1454707526adSRichard Henderson                        &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
145534d49937SPeter Maydell     }
145634d49937SPeter Maydell 
145734d49937SPeter Maydell     return hostaddr;
1458d9bb58e5SYang Zhong 
1459d9bb58e5SYang Zhong  stop_the_world:
146029a0af61SRichard Henderson     cpu_loop_exit_atomic(env_cpu(env), retaddr);
1461d9bb58e5SYang Zhong }
1462d9bb58e5SYang Zhong 
1463eed56642SAlex Bennée /*
1464eed56642SAlex Bennée  * Load Helpers
1465eed56642SAlex Bennée  *
1466eed56642SAlex Bennée  * We support two different access types. SOFTMMU_CODE_ACCESS is
1467eed56642SAlex Bennée  * specifically for reading instructions from system memory. It is
1468eed56642SAlex Bennée  * called by the translation loop and in some helpers where the code
1469eed56642SAlex Bennée  * is disassembled. It shouldn't be called directly by guest code.
1470eed56642SAlex Bennée  */
1471d9bb58e5SYang Zhong 
14722dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
14732dd92606SRichard Henderson                                 TCGMemOpIdx oi, uintptr_t retaddr);
14742dd92606SRichard Henderson 
1475c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE
147680d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op)
147780d9d1c6SRichard Henderson {
147880d9d1c6SRichard Henderson     switch (op) {
147980d9d1c6SRichard Henderson     case MO_UB:
148080d9d1c6SRichard Henderson         return ldub_p(haddr);
148180d9d1c6SRichard Henderson     case MO_BEUW:
148280d9d1c6SRichard Henderson         return lduw_be_p(haddr);
148380d9d1c6SRichard Henderson     case MO_LEUW:
148480d9d1c6SRichard Henderson         return lduw_le_p(haddr);
148580d9d1c6SRichard Henderson     case MO_BEUL:
148680d9d1c6SRichard Henderson         return (uint32_t)ldl_be_p(haddr);
148780d9d1c6SRichard Henderson     case MO_LEUL:
148880d9d1c6SRichard Henderson         return (uint32_t)ldl_le_p(haddr);
148980d9d1c6SRichard Henderson     case MO_BEQ:
149080d9d1c6SRichard Henderson         return ldq_be_p(haddr);
149180d9d1c6SRichard Henderson     case MO_LEQ:
149280d9d1c6SRichard Henderson         return ldq_le_p(haddr);
149380d9d1c6SRichard Henderson     default:
149480d9d1c6SRichard Henderson         qemu_build_not_reached();
149580d9d1c6SRichard Henderson     }
149680d9d1c6SRichard Henderson }
149780d9d1c6SRichard Henderson 
149880d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE
14992dd92606SRichard Henderson load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1500be5c4787STony Nguyen             uintptr_t retaddr, MemOp op, bool code_read,
15012dd92606SRichard Henderson             FullLoadHelper *full_load)
1502eed56642SAlex Bennée {
1503eed56642SAlex Bennée     uintptr_t mmu_idx = get_mmuidx(oi);
1504eed56642SAlex Bennée     uintptr_t index = tlb_index(env, mmu_idx, addr);
1505eed56642SAlex Bennée     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1506eed56642SAlex Bennée     target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1507eed56642SAlex Bennée     const size_t tlb_off = code_read ?
1508eed56642SAlex Bennée         offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1509f1be3696SRichard Henderson     const MMUAccessType access_type =
1510f1be3696SRichard Henderson         code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1511eed56642SAlex Bennée     unsigned a_bits = get_alignment_bits(get_memop(oi));
1512eed56642SAlex Bennée     void *haddr;
1513eed56642SAlex Bennée     uint64_t res;
1514be5c4787STony Nguyen     size_t size = memop_size(op);
1515d9bb58e5SYang Zhong 
1516eed56642SAlex Bennée     /* Handle CPU specific unaligned behaviour */
1517eed56642SAlex Bennée     if (addr & ((1 << a_bits) - 1)) {
151829a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, access_type,
1519eed56642SAlex Bennée                              mmu_idx, retaddr);
1520eed56642SAlex Bennée     }
1521eed56642SAlex Bennée 
1522eed56642SAlex Bennée     /* If the TLB entry is for a different page, reload and try again.  */
1523eed56642SAlex Bennée     if (!tlb_hit(tlb_addr, addr)) {
1524eed56642SAlex Bennée         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1525eed56642SAlex Bennée                             addr & TARGET_PAGE_MASK)) {
152629a0af61SRichard Henderson             tlb_fill(env_cpu(env), addr, size,
1527f1be3696SRichard Henderson                      access_type, mmu_idx, retaddr);
1528eed56642SAlex Bennée             index = tlb_index(env, mmu_idx, addr);
1529eed56642SAlex Bennée             entry = tlb_entry(env, mmu_idx, addr);
1530eed56642SAlex Bennée         }
1531eed56642SAlex Bennée         tlb_addr = code_read ? entry->addr_code : entry->addr_read;
153230d7e098SRichard Henderson         tlb_addr &= ~TLB_INVALID_MASK;
1533eed56642SAlex Bennée     }
1534eed56642SAlex Bennée 
153550b107c5SRichard Henderson     /* Handle anything that isn't just a straight memory access.  */
1536eed56642SAlex Bennée     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
153750b107c5SRichard Henderson         CPUIOTLBEntry *iotlbentry;
15385b87b3e6SRichard Henderson         bool need_swap;
153950b107c5SRichard Henderson 
154050b107c5SRichard Henderson         /* For anything that is unaligned, recurse through full_load.  */
1541eed56642SAlex Bennée         if ((addr & (size - 1)) != 0) {
1542eed56642SAlex Bennée             goto do_unaligned_access;
1543eed56642SAlex Bennée         }
154450b107c5SRichard Henderson 
154550b107c5SRichard Henderson         iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
154650b107c5SRichard Henderson 
154750b107c5SRichard Henderson         /* Handle watchpoints.  */
154850b107c5SRichard Henderson         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
154950b107c5SRichard Henderson             /* On watchpoint hit, this will longjmp out.  */
155050b107c5SRichard Henderson             cpu_check_watchpoint(env_cpu(env), addr, size,
155150b107c5SRichard Henderson                                  iotlbentry->attrs, BP_MEM_READ, retaddr);
15525b87b3e6SRichard Henderson         }
155350b107c5SRichard Henderson 
15545b87b3e6SRichard Henderson         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
155550b107c5SRichard Henderson 
155650b107c5SRichard Henderson         /* Handle I/O access.  */
15575b87b3e6SRichard Henderson         if (likely(tlb_addr & TLB_MMIO)) {
15585b87b3e6SRichard Henderson             return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
15595b87b3e6SRichard Henderson                             access_type, op ^ (need_swap * MO_BSWAP));
15605b87b3e6SRichard Henderson         }
15615b87b3e6SRichard Henderson 
15625b87b3e6SRichard Henderson         haddr = (void *)((uintptr_t)addr + entry->addend);
15635b87b3e6SRichard Henderson 
15645b87b3e6SRichard Henderson         /*
15655b87b3e6SRichard Henderson          * Keep these two load_memop separate to ensure that the compiler
15665b87b3e6SRichard Henderson          * is able to fold the entire function to a single instruction.
15675b87b3e6SRichard Henderson          * There is a build-time assert inside to remind you of this.  ;-)
15685b87b3e6SRichard Henderson          */
15695b87b3e6SRichard Henderson         if (unlikely(need_swap)) {
15705b87b3e6SRichard Henderson             return load_memop(haddr, op ^ MO_BSWAP);
15715b87b3e6SRichard Henderson         }
15725b87b3e6SRichard Henderson         return load_memop(haddr, op);
1573eed56642SAlex Bennée     }
1574eed56642SAlex Bennée 
1575eed56642SAlex Bennée     /* Handle slow unaligned access (it spans two pages or IO).  */
1576eed56642SAlex Bennée     if (size > 1
1577eed56642SAlex Bennée         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1578eed56642SAlex Bennée                     >= TARGET_PAGE_SIZE)) {
1579eed56642SAlex Bennée         target_ulong addr1, addr2;
15808c79b288SAlex Bennée         uint64_t r1, r2;
1581eed56642SAlex Bennée         unsigned shift;
1582eed56642SAlex Bennée     do_unaligned_access:
1583ab7a2009SAlex Bennée         addr1 = addr & ~((target_ulong)size - 1);
1584eed56642SAlex Bennée         addr2 = addr1 + size;
15852dd92606SRichard Henderson         r1 = full_load(env, addr1, oi, retaddr);
15862dd92606SRichard Henderson         r2 = full_load(env, addr2, oi, retaddr);
1587eed56642SAlex Bennée         shift = (addr & (size - 1)) * 8;
1588eed56642SAlex Bennée 
1589be5c4787STony Nguyen         if (memop_big_endian(op)) {
1590eed56642SAlex Bennée             /* Big-endian combine.  */
1591eed56642SAlex Bennée             res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1592eed56642SAlex Bennée         } else {
1593eed56642SAlex Bennée             /* Little-endian combine.  */
1594eed56642SAlex Bennée             res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1595eed56642SAlex Bennée         }
1596eed56642SAlex Bennée         return res & MAKE_64BIT_MASK(0, size * 8);
1597eed56642SAlex Bennée     }
1598eed56642SAlex Bennée 
1599eed56642SAlex Bennée     haddr = (void *)((uintptr_t)addr + entry->addend);
160080d9d1c6SRichard Henderson     return load_memop(haddr, op);
1601eed56642SAlex Bennée }
1602eed56642SAlex Bennée 
1603eed56642SAlex Bennée /*
1604eed56642SAlex Bennée  * For the benefit of TCG generated code, we want to avoid the
1605eed56642SAlex Bennée  * complication of ABI-specific return type promotion and always
1606eed56642SAlex Bennée  * return a value extended to the register size of the host. This is
1607eed56642SAlex Bennée  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1608eed56642SAlex Bennée  * data, and for that we always have uint64_t.
1609eed56642SAlex Bennée  *
1610eed56642SAlex Bennée  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1611eed56642SAlex Bennée  */
1612eed56642SAlex Bennée 
16132dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
16142dd92606SRichard Henderson                               TCGMemOpIdx oi, uintptr_t retaddr)
16152dd92606SRichard Henderson {
1616be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
16172dd92606SRichard Henderson }
16182dd92606SRichard Henderson 
1619fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1620fc1bc777SRichard Henderson                                      TCGMemOpIdx oi, uintptr_t retaddr)
1621eed56642SAlex Bennée {
16222dd92606SRichard Henderson     return full_ldub_mmu(env, addr, oi, retaddr);
16232dd92606SRichard Henderson }
16242dd92606SRichard Henderson 
16252dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
16262dd92606SRichard Henderson                                  TCGMemOpIdx oi, uintptr_t retaddr)
16272dd92606SRichard Henderson {
1628be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
16292dd92606SRichard Henderson                        full_le_lduw_mmu);
1630eed56642SAlex Bennée }
1631eed56642SAlex Bennée 
1632fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1633fc1bc777SRichard Henderson                                     TCGMemOpIdx oi, uintptr_t retaddr)
1634eed56642SAlex Bennée {
16352dd92606SRichard Henderson     return full_le_lduw_mmu(env, addr, oi, retaddr);
16362dd92606SRichard Henderson }
16372dd92606SRichard Henderson 
16382dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
16392dd92606SRichard Henderson                                  TCGMemOpIdx oi, uintptr_t retaddr)
16402dd92606SRichard Henderson {
1641be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
16422dd92606SRichard Henderson                        full_be_lduw_mmu);
1643eed56642SAlex Bennée }
1644eed56642SAlex Bennée 
1645fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1646fc1bc777SRichard Henderson                                     TCGMemOpIdx oi, uintptr_t retaddr)
1647eed56642SAlex Bennée {
16482dd92606SRichard Henderson     return full_be_lduw_mmu(env, addr, oi, retaddr);
16492dd92606SRichard Henderson }
16502dd92606SRichard Henderson 
16512dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
16522dd92606SRichard Henderson                                  TCGMemOpIdx oi, uintptr_t retaddr)
16532dd92606SRichard Henderson {
1654be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
16552dd92606SRichard Henderson                        full_le_ldul_mmu);
1656eed56642SAlex Bennée }
1657eed56642SAlex Bennée 
1658fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1659fc1bc777SRichard Henderson                                     TCGMemOpIdx oi, uintptr_t retaddr)
1660eed56642SAlex Bennée {
16612dd92606SRichard Henderson     return full_le_ldul_mmu(env, addr, oi, retaddr);
16622dd92606SRichard Henderson }
16632dd92606SRichard Henderson 
16642dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
16652dd92606SRichard Henderson                                  TCGMemOpIdx oi, uintptr_t retaddr)
16662dd92606SRichard Henderson {
1667be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
16682dd92606SRichard Henderson                        full_be_ldul_mmu);
1669eed56642SAlex Bennée }
1670eed56642SAlex Bennée 
1671fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1672fc1bc777SRichard Henderson                                     TCGMemOpIdx oi, uintptr_t retaddr)
1673eed56642SAlex Bennée {
16742dd92606SRichard Henderson     return full_be_ldul_mmu(env, addr, oi, retaddr);
1675eed56642SAlex Bennée }
1676eed56642SAlex Bennée 
1677fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1678fc1bc777SRichard Henderson                            TCGMemOpIdx oi, uintptr_t retaddr)
1679eed56642SAlex Bennée {
1680be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
16812dd92606SRichard Henderson                        helper_le_ldq_mmu);
1682eed56642SAlex Bennée }
1683eed56642SAlex Bennée 
1684fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1685fc1bc777SRichard Henderson                            TCGMemOpIdx oi, uintptr_t retaddr)
1686eed56642SAlex Bennée {
1687be5c4787STony Nguyen     return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
16882dd92606SRichard Henderson                        helper_be_ldq_mmu);
1689eed56642SAlex Bennée }
1690eed56642SAlex Bennée 
1691eed56642SAlex Bennée /*
1692eed56642SAlex Bennée  * Provide signed versions of the load routines as well.  We can of course
1693eed56642SAlex Bennée  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1694eed56642SAlex Bennée  */
1695eed56642SAlex Bennée 
1696eed56642SAlex Bennée 
1697eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1698eed56642SAlex Bennée                                      TCGMemOpIdx oi, uintptr_t retaddr)
1699eed56642SAlex Bennée {
1700eed56642SAlex Bennée     return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1701eed56642SAlex Bennée }
1702eed56642SAlex Bennée 
1703eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1704eed56642SAlex Bennée                                     TCGMemOpIdx oi, uintptr_t retaddr)
1705eed56642SAlex Bennée {
1706eed56642SAlex Bennée     return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1707eed56642SAlex Bennée }
1708eed56642SAlex Bennée 
1709eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1710eed56642SAlex Bennée                                     TCGMemOpIdx oi, uintptr_t retaddr)
1711eed56642SAlex Bennée {
1712eed56642SAlex Bennée     return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1713eed56642SAlex Bennée }
1714eed56642SAlex Bennée 
1715eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1716eed56642SAlex Bennée                                     TCGMemOpIdx oi, uintptr_t retaddr)
1717eed56642SAlex Bennée {
1718eed56642SAlex Bennée     return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1719eed56642SAlex Bennée }
1720eed56642SAlex Bennée 
1721eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1722eed56642SAlex Bennée                                     TCGMemOpIdx oi, uintptr_t retaddr)
1723eed56642SAlex Bennée {
1724eed56642SAlex Bennée     return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1725eed56642SAlex Bennée }
1726eed56642SAlex Bennée 
1727eed56642SAlex Bennée /*
1728d03f1408SRichard Henderson  * Load helpers for cpu_ldst.h.
1729d03f1408SRichard Henderson  */
1730d03f1408SRichard Henderson 
1731d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
1732d03f1408SRichard Henderson                                        int mmu_idx, uintptr_t retaddr,
1733d03f1408SRichard Henderson                                        MemOp op, FullLoadHelper *full_load)
1734d03f1408SRichard Henderson {
1735d03f1408SRichard Henderson     uint16_t meminfo;
1736d03f1408SRichard Henderson     TCGMemOpIdx oi;
1737d03f1408SRichard Henderson     uint64_t ret;
1738d03f1408SRichard Henderson 
1739d03f1408SRichard Henderson     meminfo = trace_mem_get_info(op, mmu_idx, false);
1740d03f1408SRichard Henderson     trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
1741d03f1408SRichard Henderson 
1742d03f1408SRichard Henderson     op &= ~MO_SIGN;
1743d03f1408SRichard Henderson     oi = make_memop_idx(op, mmu_idx);
1744d03f1408SRichard Henderson     ret = full_load(env, addr, oi, retaddr);
1745d03f1408SRichard Henderson 
1746d03f1408SRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
1747d03f1408SRichard Henderson 
1748d03f1408SRichard Henderson     return ret;
1749d03f1408SRichard Henderson }
1750d03f1408SRichard Henderson 
1751d03f1408SRichard Henderson uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1752d03f1408SRichard Henderson                             int mmu_idx, uintptr_t ra)
1753d03f1408SRichard Henderson {
1754d03f1408SRichard Henderson     return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
1755d03f1408SRichard Henderson }
1756d03f1408SRichard Henderson 
1757d03f1408SRichard Henderson int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1758d03f1408SRichard Henderson                        int mmu_idx, uintptr_t ra)
1759d03f1408SRichard Henderson {
1760d03f1408SRichard Henderson     return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
1761d03f1408SRichard Henderson                                    full_ldub_mmu);
1762d03f1408SRichard Henderson }
1763d03f1408SRichard Henderson 
1764d03f1408SRichard Henderson uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1765d03f1408SRichard Henderson                             int mmu_idx, uintptr_t ra)
1766d03f1408SRichard Henderson {
1767d03f1408SRichard Henderson     return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUW,
1768d03f1408SRichard Henderson                            MO_TE == MO_LE
1769d03f1408SRichard Henderson                            ? full_le_lduw_mmu : full_be_lduw_mmu);
1770d03f1408SRichard Henderson }
1771d03f1408SRichard Henderson 
1772d03f1408SRichard Henderson int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1773d03f1408SRichard Henderson                        int mmu_idx, uintptr_t ra)
1774d03f1408SRichard Henderson {
1775d03f1408SRichard Henderson     return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_TESW,
1776d03f1408SRichard Henderson                                     MO_TE == MO_LE
1777d03f1408SRichard Henderson                                     ? full_le_lduw_mmu : full_be_lduw_mmu);
1778d03f1408SRichard Henderson }
1779d03f1408SRichard Henderson 
1780d03f1408SRichard Henderson uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1781d03f1408SRichard Henderson                            int mmu_idx, uintptr_t ra)
1782d03f1408SRichard Henderson {
1783d03f1408SRichard Henderson     return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUL,
1784d03f1408SRichard Henderson                            MO_TE == MO_LE
1785d03f1408SRichard Henderson                            ? full_le_ldul_mmu : full_be_ldul_mmu);
1786d03f1408SRichard Henderson }
1787d03f1408SRichard Henderson 
1788d03f1408SRichard Henderson uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
1789d03f1408SRichard Henderson                            int mmu_idx, uintptr_t ra)
1790d03f1408SRichard Henderson {
1791d03f1408SRichard Henderson     return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEQ,
1792d03f1408SRichard Henderson                            MO_TE == MO_LE
1793d03f1408SRichard Henderson                            ? helper_le_ldq_mmu : helper_be_ldq_mmu);
1794d03f1408SRichard Henderson }
1795d03f1408SRichard Henderson 
1796cfe04a4bSRichard Henderson uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
1797cfe04a4bSRichard Henderson                           uintptr_t retaddr)
1798cfe04a4bSRichard Henderson {
1799cfe04a4bSRichard Henderson     return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
1800cfe04a4bSRichard Henderson }
1801cfe04a4bSRichard Henderson 
1802cfe04a4bSRichard Henderson int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
1803cfe04a4bSRichard Henderson {
1804cfe04a4bSRichard Henderson     return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
1805cfe04a4bSRichard Henderson }
1806cfe04a4bSRichard Henderson 
1807cfe04a4bSRichard Henderson uint32_t cpu_lduw_data_ra(CPUArchState *env, target_ulong ptr,
1808cfe04a4bSRichard Henderson                           uintptr_t retaddr)
1809cfe04a4bSRichard Henderson {
1810cfe04a4bSRichard Henderson     return cpu_lduw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
1811cfe04a4bSRichard Henderson }
1812cfe04a4bSRichard Henderson 
1813cfe04a4bSRichard Henderson int cpu_ldsw_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
1814cfe04a4bSRichard Henderson {
1815cfe04a4bSRichard Henderson     return cpu_ldsw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
1816cfe04a4bSRichard Henderson }
1817cfe04a4bSRichard Henderson 
1818cfe04a4bSRichard Henderson uint32_t cpu_ldl_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
1819cfe04a4bSRichard Henderson {
1820cfe04a4bSRichard Henderson     return cpu_ldl_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
1821cfe04a4bSRichard Henderson }
1822cfe04a4bSRichard Henderson 
1823cfe04a4bSRichard Henderson uint64_t cpu_ldq_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
1824cfe04a4bSRichard Henderson {
1825cfe04a4bSRichard Henderson     return cpu_ldq_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
1826cfe04a4bSRichard Henderson }
1827cfe04a4bSRichard Henderson 
1828cfe04a4bSRichard Henderson uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
1829cfe04a4bSRichard Henderson {
1830cfe04a4bSRichard Henderson     return cpu_ldub_data_ra(env, ptr, 0);
1831cfe04a4bSRichard Henderson }
1832cfe04a4bSRichard Henderson 
1833cfe04a4bSRichard Henderson int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
1834cfe04a4bSRichard Henderson {
1835cfe04a4bSRichard Henderson     return cpu_ldsb_data_ra(env, ptr, 0);
1836cfe04a4bSRichard Henderson }
1837cfe04a4bSRichard Henderson 
1838cfe04a4bSRichard Henderson uint32_t cpu_lduw_data(CPUArchState *env, target_ulong ptr)
1839cfe04a4bSRichard Henderson {
1840cfe04a4bSRichard Henderson     return cpu_lduw_data_ra(env, ptr, 0);
1841cfe04a4bSRichard Henderson }
1842cfe04a4bSRichard Henderson 
1843cfe04a4bSRichard Henderson int cpu_ldsw_data(CPUArchState *env, target_ulong ptr)
1844cfe04a4bSRichard Henderson {
1845cfe04a4bSRichard Henderson     return cpu_ldsw_data_ra(env, ptr, 0);
1846cfe04a4bSRichard Henderson }
1847cfe04a4bSRichard Henderson 
1848cfe04a4bSRichard Henderson uint32_t cpu_ldl_data(CPUArchState *env, target_ulong ptr)
1849cfe04a4bSRichard Henderson {
1850cfe04a4bSRichard Henderson     return cpu_ldl_data_ra(env, ptr, 0);
1851cfe04a4bSRichard Henderson }
1852cfe04a4bSRichard Henderson 
1853cfe04a4bSRichard Henderson uint64_t cpu_ldq_data(CPUArchState *env, target_ulong ptr)
1854cfe04a4bSRichard Henderson {
1855cfe04a4bSRichard Henderson     return cpu_ldq_data_ra(env, ptr, 0);
1856cfe04a4bSRichard Henderson }
1857cfe04a4bSRichard Henderson 
1858d03f1408SRichard Henderson /*
1859eed56642SAlex Bennée  * Store Helpers
1860eed56642SAlex Bennée  */
1861eed56642SAlex Bennée 
1862c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE
186380d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op)
186480d9d1c6SRichard Henderson {
186580d9d1c6SRichard Henderson     switch (op) {
186680d9d1c6SRichard Henderson     case MO_UB:
186780d9d1c6SRichard Henderson         stb_p(haddr, val);
186880d9d1c6SRichard Henderson         break;
186980d9d1c6SRichard Henderson     case MO_BEUW:
187080d9d1c6SRichard Henderson         stw_be_p(haddr, val);
187180d9d1c6SRichard Henderson         break;
187280d9d1c6SRichard Henderson     case MO_LEUW:
187380d9d1c6SRichard Henderson         stw_le_p(haddr, val);
187480d9d1c6SRichard Henderson         break;
187580d9d1c6SRichard Henderson     case MO_BEUL:
187680d9d1c6SRichard Henderson         stl_be_p(haddr, val);
187780d9d1c6SRichard Henderson         break;
187880d9d1c6SRichard Henderson     case MO_LEUL:
187980d9d1c6SRichard Henderson         stl_le_p(haddr, val);
188080d9d1c6SRichard Henderson         break;
188180d9d1c6SRichard Henderson     case MO_BEQ:
188280d9d1c6SRichard Henderson         stq_be_p(haddr, val);
188380d9d1c6SRichard Henderson         break;
188480d9d1c6SRichard Henderson     case MO_LEQ:
188580d9d1c6SRichard Henderson         stq_le_p(haddr, val);
188680d9d1c6SRichard Henderson         break;
188780d9d1c6SRichard Henderson     default:
188880d9d1c6SRichard Henderson         qemu_build_not_reached();
188980d9d1c6SRichard Henderson     }
189080d9d1c6SRichard Henderson }
189180d9d1c6SRichard Henderson 
189280d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE
18934601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
1894be5c4787STony Nguyen              TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
1895eed56642SAlex Bennée {
1896eed56642SAlex Bennée     uintptr_t mmu_idx = get_mmuidx(oi);
1897eed56642SAlex Bennée     uintptr_t index = tlb_index(env, mmu_idx, addr);
1898eed56642SAlex Bennée     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1899eed56642SAlex Bennée     target_ulong tlb_addr = tlb_addr_write(entry);
1900eed56642SAlex Bennée     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1901eed56642SAlex Bennée     unsigned a_bits = get_alignment_bits(get_memop(oi));
1902eed56642SAlex Bennée     void *haddr;
1903be5c4787STony Nguyen     size_t size = memop_size(op);
1904eed56642SAlex Bennée 
1905eed56642SAlex Bennée     /* Handle CPU specific unaligned behaviour */
1906eed56642SAlex Bennée     if (addr & ((1 << a_bits) - 1)) {
190729a0af61SRichard Henderson         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1908eed56642SAlex Bennée                              mmu_idx, retaddr);
1909eed56642SAlex Bennée     }
1910eed56642SAlex Bennée 
1911eed56642SAlex Bennée     /* If the TLB entry is for a different page, reload and try again.  */
1912eed56642SAlex Bennée     if (!tlb_hit(tlb_addr, addr)) {
1913eed56642SAlex Bennée         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1914eed56642SAlex Bennée             addr & TARGET_PAGE_MASK)) {
191529a0af61SRichard Henderson             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1916eed56642SAlex Bennée                      mmu_idx, retaddr);
1917eed56642SAlex Bennée             index = tlb_index(env, mmu_idx, addr);
1918eed56642SAlex Bennée             entry = tlb_entry(env, mmu_idx, addr);
1919eed56642SAlex Bennée         }
1920eed56642SAlex Bennée         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1921eed56642SAlex Bennée     }
1922eed56642SAlex Bennée 
192350b107c5SRichard Henderson     /* Handle anything that isn't just a straight memory access.  */
1924eed56642SAlex Bennée     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
192550b107c5SRichard Henderson         CPUIOTLBEntry *iotlbentry;
19265b87b3e6SRichard Henderson         bool need_swap;
192750b107c5SRichard Henderson 
192850b107c5SRichard Henderson         /* For anything that is unaligned, recurse through byte stores.  */
1929eed56642SAlex Bennée         if ((addr & (size - 1)) != 0) {
1930eed56642SAlex Bennée             goto do_unaligned_access;
1931eed56642SAlex Bennée         }
193250b107c5SRichard Henderson 
193350b107c5SRichard Henderson         iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
193450b107c5SRichard Henderson 
193550b107c5SRichard Henderson         /* Handle watchpoints.  */
193650b107c5SRichard Henderson         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
193750b107c5SRichard Henderson             /* On watchpoint hit, this will longjmp out.  */
193850b107c5SRichard Henderson             cpu_check_watchpoint(env_cpu(env), addr, size,
193950b107c5SRichard Henderson                                  iotlbentry->attrs, BP_MEM_WRITE, retaddr);
19405b87b3e6SRichard Henderson         }
194150b107c5SRichard Henderson 
19425b87b3e6SRichard Henderson         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
194350b107c5SRichard Henderson 
194450b107c5SRichard Henderson         /* Handle I/O access.  */
194508565552SRichard Henderson         if (tlb_addr & TLB_MMIO) {
19465b87b3e6SRichard Henderson             io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
19475b87b3e6SRichard Henderson                       op ^ (need_swap * MO_BSWAP));
19485b87b3e6SRichard Henderson             return;
19495b87b3e6SRichard Henderson         }
19505b87b3e6SRichard Henderson 
19517b0d792cSRichard Henderson         /* Ignore writes to ROM.  */
19527b0d792cSRichard Henderson         if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
19537b0d792cSRichard Henderson             return;
19547b0d792cSRichard Henderson         }
19557b0d792cSRichard Henderson 
195608565552SRichard Henderson         /* Handle clean RAM pages.  */
195708565552SRichard Henderson         if (tlb_addr & TLB_NOTDIRTY) {
1958707526adSRichard Henderson             notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
195908565552SRichard Henderson         }
196008565552SRichard Henderson 
1961707526adSRichard Henderson         haddr = (void *)((uintptr_t)addr + entry->addend);
196208565552SRichard Henderson 
19635b87b3e6SRichard Henderson         /*
19645b87b3e6SRichard Henderson          * Keep these two store_memop separate to ensure that the compiler
19655b87b3e6SRichard Henderson          * is able to fold the entire function to a single instruction.
19665b87b3e6SRichard Henderson          * There is a build-time assert inside to remind you of this.  ;-)
19675b87b3e6SRichard Henderson          */
19685b87b3e6SRichard Henderson         if (unlikely(need_swap)) {
19695b87b3e6SRichard Henderson             store_memop(haddr, val, op ^ MO_BSWAP);
19705b87b3e6SRichard Henderson         } else {
19715b87b3e6SRichard Henderson             store_memop(haddr, val, op);
19725b87b3e6SRichard Henderson         }
1973eed56642SAlex Bennée         return;
1974eed56642SAlex Bennée     }
1975eed56642SAlex Bennée 
1976eed56642SAlex Bennée     /* Handle slow unaligned access (it spans two pages or IO).  */
1977eed56642SAlex Bennée     if (size > 1
1978eed56642SAlex Bennée         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1979eed56642SAlex Bennée                      >= TARGET_PAGE_SIZE)) {
1980eed56642SAlex Bennée         int i;
1981eed56642SAlex Bennée         uintptr_t index2;
1982eed56642SAlex Bennée         CPUTLBEntry *entry2;
1983eed56642SAlex Bennée         target_ulong page2, tlb_addr2;
19848f7cd2adSRichard Henderson         size_t size2;
19858f7cd2adSRichard Henderson 
1986eed56642SAlex Bennée     do_unaligned_access:
1987eed56642SAlex Bennée         /*
1988eed56642SAlex Bennée          * Ensure the second page is in the TLB.  Note that the first page
1989eed56642SAlex Bennée          * is already guaranteed to be filled, and that the second page
1990eed56642SAlex Bennée          * cannot evict the first.
1991eed56642SAlex Bennée          */
1992eed56642SAlex Bennée         page2 = (addr + size) & TARGET_PAGE_MASK;
19938f7cd2adSRichard Henderson         size2 = (addr + size) & ~TARGET_PAGE_MASK;
1994eed56642SAlex Bennée         index2 = tlb_index(env, mmu_idx, page2);
1995eed56642SAlex Bennée         entry2 = tlb_entry(env, mmu_idx, page2);
1996eed56642SAlex Bennée         tlb_addr2 = tlb_addr_write(entry2);
199750b107c5SRichard Henderson         if (!tlb_hit_page(tlb_addr2, page2)) {
199850b107c5SRichard Henderson             if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
19998f7cd2adSRichard Henderson                 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
2000eed56642SAlex Bennée                          mmu_idx, retaddr);
200150b107c5SRichard Henderson                 index2 = tlb_index(env, mmu_idx, page2);
200250b107c5SRichard Henderson                 entry2 = tlb_entry(env, mmu_idx, page2);
200350b107c5SRichard Henderson             }
200450b107c5SRichard Henderson             tlb_addr2 = tlb_addr_write(entry2);
200550b107c5SRichard Henderson         }
200650b107c5SRichard Henderson 
200750b107c5SRichard Henderson         /*
200850b107c5SRichard Henderson          * Handle watchpoints.  Since this may trap, all checks
200950b107c5SRichard Henderson          * must happen before any store.
201050b107c5SRichard Henderson          */
201150b107c5SRichard Henderson         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
201250b107c5SRichard Henderson             cpu_check_watchpoint(env_cpu(env), addr, size - size2,
201350b107c5SRichard Henderson                                  env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
201450b107c5SRichard Henderson                                  BP_MEM_WRITE, retaddr);
201550b107c5SRichard Henderson         }
201650b107c5SRichard Henderson         if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
201750b107c5SRichard Henderson             cpu_check_watchpoint(env_cpu(env), page2, size2,
201850b107c5SRichard Henderson                                  env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
201950b107c5SRichard Henderson                                  BP_MEM_WRITE, retaddr);
2020eed56642SAlex Bennée         }
2021eed56642SAlex Bennée 
2022eed56642SAlex Bennée         /*
2023eed56642SAlex Bennée          * XXX: not efficient, but simple.
2024eed56642SAlex Bennée          * This loop must go in the forward direction to avoid issues
2025eed56642SAlex Bennée          * with self-modifying code in Windows 64-bit.
2026eed56642SAlex Bennée          */
2027eed56642SAlex Bennée         for (i = 0; i < size; ++i) {
2028eed56642SAlex Bennée             uint8_t val8;
2029be5c4787STony Nguyen             if (memop_big_endian(op)) {
2030eed56642SAlex Bennée                 /* Big-endian extract.  */
2031eed56642SAlex Bennée                 val8 = val >> (((size - 1) * 8) - (i * 8));
2032eed56642SAlex Bennée             } else {
2033eed56642SAlex Bennée                 /* Little-endian extract.  */
2034eed56642SAlex Bennée                 val8 = val >> (i * 8);
2035eed56642SAlex Bennée             }
20364601f8d1SRichard Henderson             helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
2037eed56642SAlex Bennée         }
2038eed56642SAlex Bennée         return;
2039eed56642SAlex Bennée     }
2040eed56642SAlex Bennée 
2041eed56642SAlex Bennée     haddr = (void *)((uintptr_t)addr + entry->addend);
204280d9d1c6SRichard Henderson     store_memop(haddr, val, op);
2043eed56642SAlex Bennée }
2044eed56642SAlex Bennée 
2045fc1bc777SRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2046eed56642SAlex Bennée                         TCGMemOpIdx oi, uintptr_t retaddr)
2047eed56642SAlex Bennée {
2048be5c4787STony Nguyen     store_helper(env, addr, val, oi, retaddr, MO_UB);
2049eed56642SAlex Bennée }
2050eed56642SAlex Bennée 
2051fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2052eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
2053eed56642SAlex Bennée {
2054be5c4787STony Nguyen     store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2055eed56642SAlex Bennée }
2056eed56642SAlex Bennée 
2057fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2058eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
2059eed56642SAlex Bennée {
2060be5c4787STony Nguyen     store_helper(env, addr, val, oi, retaddr, MO_BEUW);
2061eed56642SAlex Bennée }
2062eed56642SAlex Bennée 
2063fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2064eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
2065eed56642SAlex Bennée {
2066be5c4787STony Nguyen     store_helper(env, addr, val, oi, retaddr, MO_LEUL);
2067eed56642SAlex Bennée }
2068eed56642SAlex Bennée 
2069fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2070eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
2071eed56642SAlex Bennée {
2072be5c4787STony Nguyen     store_helper(env, addr, val, oi, retaddr, MO_BEUL);
2073eed56642SAlex Bennée }
2074eed56642SAlex Bennée 
2075fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2076eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
2077eed56642SAlex Bennée {
2078be5c4787STony Nguyen     store_helper(env, addr, val, oi, retaddr, MO_LEQ);
2079eed56642SAlex Bennée }
2080eed56642SAlex Bennée 
2081fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2082eed56642SAlex Bennée                        TCGMemOpIdx oi, uintptr_t retaddr)
2083eed56642SAlex Bennée {
2084be5c4787STony Nguyen     store_helper(env, addr, val, oi, retaddr, MO_BEQ);
2085eed56642SAlex Bennée }
2086d9bb58e5SYang Zhong 
2087d03f1408SRichard Henderson /*
2088d03f1408SRichard Henderson  * Store Helpers for cpu_ldst.h
2089d03f1408SRichard Henderson  */
2090d03f1408SRichard Henderson 
2091d03f1408SRichard Henderson static inline void QEMU_ALWAYS_INLINE
2092d03f1408SRichard Henderson cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
2093d03f1408SRichard Henderson                  int mmu_idx, uintptr_t retaddr, MemOp op)
2094d03f1408SRichard Henderson {
2095d03f1408SRichard Henderson     TCGMemOpIdx oi;
2096d03f1408SRichard Henderson     uint16_t meminfo;
2097d03f1408SRichard Henderson 
2098d03f1408SRichard Henderson     meminfo = trace_mem_get_info(op, mmu_idx, true);
2099d03f1408SRichard Henderson     trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
2100d03f1408SRichard Henderson 
2101d03f1408SRichard Henderson     oi = make_memop_idx(op, mmu_idx);
2102d03f1408SRichard Henderson     store_helper(env, addr, val, oi, retaddr, op);
2103d03f1408SRichard Henderson 
2104d03f1408SRichard Henderson     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
2105d03f1408SRichard Henderson }
2106d03f1408SRichard Henderson 
2107d03f1408SRichard Henderson void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2108d03f1408SRichard Henderson                        int mmu_idx, uintptr_t retaddr)
2109d03f1408SRichard Henderson {
2110d03f1408SRichard Henderson     cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
2111d03f1408SRichard Henderson }
2112d03f1408SRichard Henderson 
2113d03f1408SRichard Henderson void cpu_stw_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2114d03f1408SRichard Henderson                        int mmu_idx, uintptr_t retaddr)
2115d03f1408SRichard Henderson {
2116d03f1408SRichard Henderson     cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUW);
2117d03f1408SRichard Henderson }
2118d03f1408SRichard Henderson 
2119d03f1408SRichard Henderson void cpu_stl_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
2120d03f1408SRichard Henderson                        int mmu_idx, uintptr_t retaddr)
2121d03f1408SRichard Henderson {
2122d03f1408SRichard Henderson     cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUL);
2123d03f1408SRichard Henderson }
2124d03f1408SRichard Henderson 
2125d03f1408SRichard Henderson void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
2126d03f1408SRichard Henderson                        int mmu_idx, uintptr_t retaddr)
2127d03f1408SRichard Henderson {
2128d03f1408SRichard Henderson     cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEQ);
2129d03f1408SRichard Henderson }
2130d03f1408SRichard Henderson 
2131cfe04a4bSRichard Henderson void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
2132cfe04a4bSRichard Henderson                      uint32_t val, uintptr_t retaddr)
2133cfe04a4bSRichard Henderson {
2134cfe04a4bSRichard Henderson     cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2135cfe04a4bSRichard Henderson }
2136cfe04a4bSRichard Henderson 
2137cfe04a4bSRichard Henderson void cpu_stw_data_ra(CPUArchState *env, target_ulong ptr,
2138cfe04a4bSRichard Henderson                      uint32_t val, uintptr_t retaddr)
2139cfe04a4bSRichard Henderson {
2140cfe04a4bSRichard Henderson     cpu_stw_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2141cfe04a4bSRichard Henderson }
2142cfe04a4bSRichard Henderson 
2143cfe04a4bSRichard Henderson void cpu_stl_data_ra(CPUArchState *env, target_ulong ptr,
2144cfe04a4bSRichard Henderson                      uint32_t val, uintptr_t retaddr)
2145cfe04a4bSRichard Henderson {
2146cfe04a4bSRichard Henderson     cpu_stl_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2147cfe04a4bSRichard Henderson }
2148cfe04a4bSRichard Henderson 
2149cfe04a4bSRichard Henderson void cpu_stq_data_ra(CPUArchState *env, target_ulong ptr,
2150cfe04a4bSRichard Henderson                      uint64_t val, uintptr_t retaddr)
2151cfe04a4bSRichard Henderson {
2152cfe04a4bSRichard Henderson     cpu_stq_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
2153cfe04a4bSRichard Henderson }
2154cfe04a4bSRichard Henderson 
2155cfe04a4bSRichard Henderson void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2156cfe04a4bSRichard Henderson {
2157cfe04a4bSRichard Henderson     cpu_stb_data_ra(env, ptr, val, 0);
2158cfe04a4bSRichard Henderson }
2159cfe04a4bSRichard Henderson 
2160cfe04a4bSRichard Henderson void cpu_stw_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2161cfe04a4bSRichard Henderson {
2162cfe04a4bSRichard Henderson     cpu_stw_data_ra(env, ptr, val, 0);
2163cfe04a4bSRichard Henderson }
2164cfe04a4bSRichard Henderson 
2165cfe04a4bSRichard Henderson void cpu_stl_data(CPUArchState *env, target_ulong ptr, uint32_t val)
2166cfe04a4bSRichard Henderson {
2167cfe04a4bSRichard Henderson     cpu_stl_data_ra(env, ptr, val, 0);
2168cfe04a4bSRichard Henderson }
2169cfe04a4bSRichard Henderson 
2170cfe04a4bSRichard Henderson void cpu_stq_data(CPUArchState *env, target_ulong ptr, uint64_t val)
2171cfe04a4bSRichard Henderson {
2172cfe04a4bSRichard Henderson     cpu_stq_data_ra(env, ptr, val, 0);
2173cfe04a4bSRichard Henderson }
2174cfe04a4bSRichard Henderson 
2175d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR.  This makes
2176d9bb58e5SYang Zhong    them callable from other helpers.  */
2177d9bb58e5SYang Zhong 
2178d9bb58e5SYang Zhong #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
2179d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \
2180d9bb58e5SYang Zhong     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
2181707526adSRichard Henderson #define ATOMIC_MMU_DECLS
2182707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
2183707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP
2184504f73f7SAlex Bennée #define ATOMIC_MMU_IDX   get_mmuidx(oi)
2185d9bb58e5SYang Zhong 
2186cfec3885SEmilio G. Cota #include "atomic_common.inc.c"
2187d9bb58e5SYang Zhong 
2188d9bb58e5SYang Zhong #define DATA_SIZE 1
2189d9bb58e5SYang Zhong #include "atomic_template.h"
2190d9bb58e5SYang Zhong 
2191d9bb58e5SYang Zhong #define DATA_SIZE 2
2192d9bb58e5SYang Zhong #include "atomic_template.h"
2193d9bb58e5SYang Zhong 
2194d9bb58e5SYang Zhong #define DATA_SIZE 4
2195d9bb58e5SYang Zhong #include "atomic_template.h"
2196d9bb58e5SYang Zhong 
2197d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
2198d9bb58e5SYang Zhong #define DATA_SIZE 8
2199d9bb58e5SYang Zhong #include "atomic_template.h"
2200d9bb58e5SYang Zhong #endif
2201d9bb58e5SYang Zhong 
2202e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
2203d9bb58e5SYang Zhong #define DATA_SIZE 16
2204d9bb58e5SYang Zhong #include "atomic_template.h"
2205d9bb58e5SYang Zhong #endif
2206d9bb58e5SYang Zhong 
2207d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers.  */
2208d9bb58e5SYang Zhong 
2209d9bb58e5SYang Zhong #undef EXTRA_ARGS
2210d9bb58e5SYang Zhong #undef ATOMIC_NAME
2211d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP
2212d9bb58e5SYang Zhong #define EXTRA_ARGS         , TCGMemOpIdx oi
2213d9bb58e5SYang Zhong #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
2214707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC())
2215d9bb58e5SYang Zhong 
2216d9bb58e5SYang Zhong #define DATA_SIZE 1
2217d9bb58e5SYang Zhong #include "atomic_template.h"
2218d9bb58e5SYang Zhong 
2219d9bb58e5SYang Zhong #define DATA_SIZE 2
2220d9bb58e5SYang Zhong #include "atomic_template.h"
2221d9bb58e5SYang Zhong 
2222d9bb58e5SYang Zhong #define DATA_SIZE 4
2223d9bb58e5SYang Zhong #include "atomic_template.h"
2224d9bb58e5SYang Zhong 
2225d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64
2226d9bb58e5SYang Zhong #define DATA_SIZE 8
2227d9bb58e5SYang Zhong #include "atomic_template.h"
2228d9bb58e5SYang Zhong #endif
2229504f73f7SAlex Bennée #undef ATOMIC_MMU_IDX
2230d9bb58e5SYang Zhong 
2231d9bb58e5SYang Zhong /* Code access functions.  */
2232d9bb58e5SYang Zhong 
2233fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
22342dd92606SRichard Henderson                                TCGMemOpIdx oi, uintptr_t retaddr)
22352dd92606SRichard Henderson {
2236fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
22372dd92606SRichard Henderson }
22382dd92606SRichard Henderson 
2239fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
2240eed56642SAlex Bennée {
2241fc4120a3SRichard Henderson     TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
2242fc4120a3SRichard Henderson     return full_ldub_code(env, addr, oi, 0);
22432dd92606SRichard Henderson }
22442dd92606SRichard Henderson 
2245fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
22464cef72d0SAlex Bennée                                TCGMemOpIdx oi, uintptr_t retaddr)
22474cef72d0SAlex Bennée {
2248fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
22494cef72d0SAlex Bennée }
22504cef72d0SAlex Bennée 
2251fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
22522dd92606SRichard Henderson {
2253fc4120a3SRichard Henderson     TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
2254fc4120a3SRichard Henderson     return full_lduw_code(env, addr, oi, 0);
2255eed56642SAlex Bennée }
2256d9bb58e5SYang Zhong 
2257fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
2258fc1bc777SRichard Henderson                               TCGMemOpIdx oi, uintptr_t retaddr)
2259eed56642SAlex Bennée {
2260fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
22612dd92606SRichard Henderson }
22622dd92606SRichard Henderson 
2263fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
22644cef72d0SAlex Bennée {
2265fc4120a3SRichard Henderson     TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
2266fc4120a3SRichard Henderson     return full_ldl_code(env, addr, oi, 0);
22674cef72d0SAlex Bennée }
22684cef72d0SAlex Bennée 
2269fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
22702dd92606SRichard Henderson                               TCGMemOpIdx oi, uintptr_t retaddr)
22712dd92606SRichard Henderson {
2272fc4120a3SRichard Henderson     return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
2273eed56642SAlex Bennée }
2274d9bb58e5SYang Zhong 
2275fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
2276eed56642SAlex Bennée {
2277fc4120a3SRichard Henderson     TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
2278fc4120a3SRichard Henderson     return full_ldq_code(env, addr, oi, 0);
2279eed56642SAlex Bennée }
2280