1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 26d9bb58e5SYang Zhong #include "exec/cputlb.h" 27d9bb58e5SYang Zhong #include "exec/memory-internal.h" 28d9bb58e5SYang Zhong #include "exec/ram_addr.h" 29d9bb58e5SYang Zhong #include "tcg/tcg.h" 30d9bb58e5SYang Zhong #include "qemu/error-report.h" 31d9bb58e5SYang Zhong #include "exec/log.h" 32d9bb58e5SYang Zhong #include "exec/helper-proto.h" 33d9bb58e5SYang Zhong #include "qemu/atomic.h" 34e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 353b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 3651807763SPhilippe Mathieu-Daudé #include "trace.h" 37e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h" 3865269192SPhilippe Mathieu-Daudé #include "internal.h" 39235537faSAlex Bennée #ifdef CONFIG_PLUGIN 40235537faSAlex Bennée #include "qemu/plugin-memory.h" 41235537faSAlex Bennée #endif 42d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h" 43d9bb58e5SYang Zhong 44d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 45d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 46d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 47d9bb58e5SYang Zhong 48d9bb58e5SYang Zhong #ifdef DEBUG_TLB 49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 50d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 51d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 52d9bb58e5SYang Zhong # else 53d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 54d9bb58e5SYang Zhong # endif 55d9bb58e5SYang Zhong #else 56d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 57d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 58d9bb58e5SYang Zhong #endif 59d9bb58e5SYang Zhong 60d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 61d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 62d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 63d9bb58e5SYang Zhong ## __VA_ARGS__); \ 64d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 65d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 66d9bb58e5SYang Zhong } \ 67d9bb58e5SYang Zhong } while (0) 68d9bb58e5SYang Zhong 69ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 70d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 71ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 72d9bb58e5SYang Zhong } \ 73d9bb58e5SYang Zhong } while (0) 74d9bb58e5SYang Zhong 75d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 76d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 77d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 78d9bb58e5SYang Zhong 79d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 80d9bb58e5SYang Zhong */ 81d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 82d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 83d9bb58e5SYang Zhong 84722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 857a1efe1bSRichard Henderson { 86722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 877a1efe1bSRichard Henderson } 887a1efe1bSRichard Henderson 89722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 9086e1eff8SEmilio G. Cota { 91722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9286e1eff8SEmilio G. Cota } 9386e1eff8SEmilio G. Cota 9479e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9586e1eff8SEmilio G. Cota size_t max_entries) 9686e1eff8SEmilio G. Cota { 9779e42085SRichard Henderson desc->window_begin_ns = ns; 9879e42085SRichard Henderson desc->window_max_entries = max_entries; 9986e1eff8SEmilio G. Cota } 10086e1eff8SEmilio G. Cota 1010f4abea8SRichard Henderson static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 1020f4abea8SRichard Henderson { 103a976a99aSRichard Henderson CPUJumpCache *jc = cpu->tb_jmp_cache; 10499ab4d50SEric Auger int i, i0; 1050f4abea8SRichard Henderson 10699ab4d50SEric Auger if (unlikely(!jc)) { 10799ab4d50SEric Auger return; 10899ab4d50SEric Auger } 10999ab4d50SEric Auger 11099ab4d50SEric Auger i0 = tb_jmp_cache_hash_page(page_addr); 1110f4abea8SRichard Henderson for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 112a976a99aSRichard Henderson qatomic_set(&jc->array[i0 + i].tb, NULL); 1130f4abea8SRichard Henderson } 1140f4abea8SRichard Henderson } 1150f4abea8SRichard Henderson 11686e1eff8SEmilio G. Cota /** 11786e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 11871ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 11971ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 12086e1eff8SEmilio G. Cota * 12186e1eff8SEmilio G. Cota * Called with tlb_lock_held. 12286e1eff8SEmilio G. Cota * 12386e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 12486e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 12586e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 12686e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 12786e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 12886e1eff8SEmilio G. Cota * the resize based on past observations. 12986e1eff8SEmilio G. Cota * 13086e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 13186e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 13286e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 13386e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 13486e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 13586e1eff8SEmilio G. Cota * performance. 13686e1eff8SEmilio G. Cota * 13786e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 13886e1eff8SEmilio G. Cota * 13986e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 14086e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 14186e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 14286e1eff8SEmilio G. Cota * probably be similar. 14386e1eff8SEmilio G. Cota * 14486e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 14586e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 14686e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 14786e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 14886e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 14986e1eff8SEmilio G. Cota * 15086e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 15186e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 15286e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 15386e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 15486e1eff8SEmilio G. Cota * conflict misses. 15586e1eff8SEmilio G. Cota */ 1563c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1573c3959f2SRichard Henderson int64_t now) 15886e1eff8SEmilio G. Cota { 15971ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 16086e1eff8SEmilio G. Cota size_t rate; 16186e1eff8SEmilio G. Cota size_t new_size = old_size; 16286e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 16386e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 16479e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 16586e1eff8SEmilio G. Cota 16679e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 16779e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 16886e1eff8SEmilio G. Cota } 16979e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 17086e1eff8SEmilio G. Cota 17186e1eff8SEmilio G. Cota if (rate > 70) { 17286e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 17386e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 17479e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 17579e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 17686e1eff8SEmilio G. Cota 17786e1eff8SEmilio G. Cota /* 17886e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 17986e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 18086e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 18186e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 18286e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 18386e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 18486e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 18586e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 18686e1eff8SEmilio G. Cota */ 18786e1eff8SEmilio G. Cota if (expected_rate > 70) { 18886e1eff8SEmilio G. Cota ceil *= 2; 18986e1eff8SEmilio G. Cota } 19086e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 19186e1eff8SEmilio G. Cota } 19286e1eff8SEmilio G. Cota 19386e1eff8SEmilio G. Cota if (new_size == old_size) { 19486e1eff8SEmilio G. Cota if (window_expired) { 19579e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 19686e1eff8SEmilio G. Cota } 19786e1eff8SEmilio G. Cota return; 19886e1eff8SEmilio G. Cota } 19986e1eff8SEmilio G. Cota 20071ccd47bSRichard Henderson g_free(fast->table); 20125d3ec58SRichard Henderson g_free(desc->fulltlb); 20286e1eff8SEmilio G. Cota 20379e42085SRichard Henderson tlb_window_reset(desc, now, 0); 20486e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 20571ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 20671ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 20725d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 20871ccd47bSRichard Henderson 20986e1eff8SEmilio G. Cota /* 21086e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 21186e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 21286e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 21386e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 21486e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 21586e1eff8SEmilio G. Cota */ 21625d3ec58SRichard Henderson while (fast->table == NULL || desc->fulltlb == NULL) { 21786e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 21886e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 21986e1eff8SEmilio G. Cota abort(); 22086e1eff8SEmilio G. Cota } 22186e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 22271ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 22386e1eff8SEmilio G. Cota 22471ccd47bSRichard Henderson g_free(fast->table); 22525d3ec58SRichard Henderson g_free(desc->fulltlb); 22671ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 22725d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 22886e1eff8SEmilio G. Cota } 22986e1eff8SEmilio G. Cota } 23086e1eff8SEmilio G. Cota 231bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 23286e1eff8SEmilio G. Cota { 2335c948e31SRichard Henderson desc->n_used_entries = 0; 2345c948e31SRichard Henderson desc->large_page_addr = -1; 2355c948e31SRichard Henderson desc->large_page_mask = -1; 2365c948e31SRichard Henderson desc->vindex = 0; 2375c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2385c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 23986e1eff8SEmilio G. Cota } 24086e1eff8SEmilio G. Cota 2413c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, 2423c3959f2SRichard Henderson int64_t now) 243bbf021b0SRichard Henderson { 244bbf021b0SRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 245bbf021b0SRichard Henderson CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; 246bbf021b0SRichard Henderson 2473c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 248bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 249bbf021b0SRichard Henderson } 250bbf021b0SRichard Henderson 25156e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 25256e89f76SRichard Henderson { 25356e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 25456e89f76SRichard Henderson 25556e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 25656e89f76SRichard Henderson desc->n_used_entries = 0; 25756e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 25856e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 25925d3ec58SRichard Henderson desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); 2603c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 26156e89f76SRichard Henderson } 26256e89f76SRichard Henderson 26386e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 26486e1eff8SEmilio G. Cota { 265a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 26686e1eff8SEmilio G. Cota } 26786e1eff8SEmilio G. Cota 26886e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 26986e1eff8SEmilio G. Cota { 270a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 27186e1eff8SEmilio G. Cota } 27286e1eff8SEmilio G. Cota 2735005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2745005e253SEmilio G. Cota { 27571aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 27656e89f76SRichard Henderson int64_t now = get_clock_realtime(); 27756e89f76SRichard Henderson int i; 27871aec354SEmilio G. Cota 279a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2803d1523ceSRichard Henderson 2813c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 2823c16304aSRichard Henderson env_tlb(env)->c.dirty = 0; 28386e1eff8SEmilio G. Cota 28456e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 28556e89f76SRichard Henderson tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); 28656e89f76SRichard Henderson } 2875005e253SEmilio G. Cota } 2885005e253SEmilio G. Cota 289816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu) 290816d9be5SEmilio G. Cota { 291816d9be5SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 292816d9be5SEmilio G. Cota int i; 293816d9be5SEmilio G. Cota 294816d9be5SEmilio G. Cota qemu_spin_destroy(&env_tlb(env)->c.lock); 295816d9be5SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 296816d9be5SEmilio G. Cota CPUTLBDesc *desc = &env_tlb(env)->d[i]; 297816d9be5SEmilio G. Cota CPUTLBDescFast *fast = &env_tlb(env)->f[i]; 298816d9be5SEmilio G. Cota 299816d9be5SEmilio G. Cota g_free(fast->table); 30025d3ec58SRichard Henderson g_free(desc->fulltlb); 301816d9be5SEmilio G. Cota } 302816d9be5SEmilio G. Cota } 303816d9be5SEmilio G. Cota 304d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 305d9bb58e5SYang Zhong * 306d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 307d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 308d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 309d9bb58e5SYang Zhong * again. 310d9bb58e5SYang Zhong */ 311d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 312d9bb58e5SYang Zhong run_on_cpu_data d) 313d9bb58e5SYang Zhong { 314d9bb58e5SYang Zhong CPUState *cpu; 315d9bb58e5SYang Zhong 316d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 317d9bb58e5SYang Zhong if (cpu != src) { 318d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 319d9bb58e5SYang Zhong } 320d9bb58e5SYang Zhong } 321d9bb58e5SYang Zhong } 322d9bb58e5SYang Zhong 323e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 32483974cf4SEmilio G. Cota { 32583974cf4SEmilio G. Cota CPUState *cpu; 326e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 32783974cf4SEmilio G. Cota 32883974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 32983974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 33083974cf4SEmilio G. Cota 331d73415a3SStefan Hajnoczi full += qatomic_read(&env_tlb(env)->c.full_flush_count); 332d73415a3SStefan Hajnoczi part += qatomic_read(&env_tlb(env)->c.part_flush_count); 333d73415a3SStefan Hajnoczi elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); 33483974cf4SEmilio G. Cota } 335e09de0a2SRichard Henderson *pfull = full; 336e09de0a2SRichard Henderson *ppart = part; 337e09de0a2SRichard Henderson *pelide = elide; 33883974cf4SEmilio G. Cota } 339d9bb58e5SYang Zhong 340d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 341d9bb58e5SYang Zhong { 342d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3433d1523ceSRichard Henderson uint16_t asked = data.host_int; 3443d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3453c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 346d9bb58e5SYang Zhong 347d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 348d9bb58e5SYang Zhong 3493d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 350d9bb58e5SYang Zhong 351a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 35260a2ad7dSRichard Henderson 353a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3543d1523ceSRichard Henderson to_clean = asked & all_dirty; 3553d1523ceSRichard Henderson all_dirty &= ~to_clean; 356a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3573d1523ceSRichard Henderson 3583d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3593d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3603c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx, now); 361d9bb58e5SYang Zhong } 3623d1523ceSRichard Henderson 363a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 364d9bb58e5SYang Zhong 365a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 36664f2674bSRichard Henderson 3673d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 368d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.full_flush_count, 369a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 370e09de0a2SRichard Henderson } else { 371d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.part_flush_count, 372a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3733d1523ceSRichard Henderson if (to_clean != asked) { 374d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.elide_flush_count, 375a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3763d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3773d1523ceSRichard Henderson } 37864f2674bSRichard Henderson } 379d9bb58e5SYang Zhong } 380d9bb58e5SYang Zhong 381d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 382d9bb58e5SYang Zhong { 383d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 384d9bb58e5SYang Zhong 38564f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 386d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 387ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 388d9bb58e5SYang Zhong } else { 38960a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 390d9bb58e5SYang Zhong } 391d9bb58e5SYang Zhong } 392d9bb58e5SYang Zhong 39364f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 39464f2674bSRichard Henderson { 39564f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 39664f2674bSRichard Henderson } 39764f2674bSRichard Henderson 398d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 399d9bb58e5SYang Zhong { 400d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 401d9bb58e5SYang Zhong 402d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 403d9bb58e5SYang Zhong 404d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 405d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 406d9bb58e5SYang Zhong } 407d9bb58e5SYang Zhong 40864f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 40964f2674bSRichard Henderson { 41064f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 41164f2674bSRichard Henderson } 41264f2674bSRichard Henderson 41364f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 414d9bb58e5SYang Zhong { 415d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 416d9bb58e5SYang Zhong 417d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 418d9bb58e5SYang Zhong 419d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 420d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 421d9bb58e5SYang Zhong } 422d9bb58e5SYang Zhong 42364f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 42464f2674bSRichard Henderson { 42564f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 42664f2674bSRichard Henderson } 42764f2674bSRichard Henderson 4283ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 4293ab6e68cSRichard Henderson target_ulong page, target_ulong mask) 4303ab6e68cSRichard Henderson { 4313ab6e68cSRichard Henderson page &= mask; 4323ab6e68cSRichard Henderson mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 4333ab6e68cSRichard Henderson 4343ab6e68cSRichard Henderson return (page == (tlb_entry->addr_read & mask) || 4353ab6e68cSRichard Henderson page == (tlb_addr_write(tlb_entry) & mask) || 4363ab6e68cSRichard Henderson page == (tlb_entry->addr_code & mask)); 4373ab6e68cSRichard Henderson } 4383ab6e68cSRichard Henderson 43968fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 44068fea038SRichard Henderson target_ulong page) 441d9bb58e5SYang Zhong { 4423ab6e68cSRichard Henderson return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 44368fea038SRichard Henderson } 44468fea038SRichard Henderson 4453cea94bbSEmilio G. Cota /** 4463cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4473cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4483cea94bbSEmilio G. Cota */ 4493cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4503cea94bbSEmilio G. Cota { 4513cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4523cea94bbSEmilio G. Cota } 4533cea94bbSEmilio G. Cota 45453d28455SRichard Henderson /* Called with tlb_c.lock held */ 4553ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 4563ab6e68cSRichard Henderson target_ulong page, 4573ab6e68cSRichard Henderson target_ulong mask) 45868fea038SRichard Henderson { 4593ab6e68cSRichard Henderson if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 460d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 46186e1eff8SEmilio G. Cota return true; 462d9bb58e5SYang Zhong } 46386e1eff8SEmilio G. Cota return false; 464d9bb58e5SYang Zhong } 465d9bb58e5SYang Zhong 4663ab6e68cSRichard Henderson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 46768fea038SRichard Henderson target_ulong page) 46868fea038SRichard Henderson { 4693ab6e68cSRichard Henderson return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 4703ab6e68cSRichard Henderson } 4713ab6e68cSRichard Henderson 4723ab6e68cSRichard Henderson /* Called with tlb_c.lock held */ 4733ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, 4743ab6e68cSRichard Henderson target_ulong page, 4753ab6e68cSRichard Henderson target_ulong mask) 4763ab6e68cSRichard Henderson { 477a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 47868fea038SRichard Henderson int k; 47971aec354SEmilio G. Cota 48029a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 48168fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 4823ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 48386e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 48486e1eff8SEmilio G. Cota } 48568fea038SRichard Henderson } 48668fea038SRichard Henderson } 48768fea038SRichard Henderson 4883ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 4893ab6e68cSRichard Henderson target_ulong page) 4903ab6e68cSRichard Henderson { 4913ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); 4923ab6e68cSRichard Henderson } 4933ab6e68cSRichard Henderson 4941308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4951308e026SRichard Henderson target_ulong page) 4961308e026SRichard Henderson { 497a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 498a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 4991308e026SRichard Henderson 5001308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 5011308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 5021308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 5031308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 5041308e026SRichard Henderson midx, lp_addr, lp_mask); 5053c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 5061308e026SRichard Henderson } else { 50786e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 50886e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 50986e1eff8SEmilio G. Cota } 5101308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 5111308e026SRichard Henderson } 5121308e026SRichard Henderson } 5131308e026SRichard Henderson 5147b7d00e0SRichard Henderson /** 5157b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 5167b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5177b7d00e0SRichard Henderson * @addr: page of virtual address to flush 5187b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 5197b7d00e0SRichard Henderson * 5207b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 5217b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 522d9bb58e5SYang Zhong */ 5237b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 5247b7d00e0SRichard Henderson target_ulong addr, 5257b7d00e0SRichard Henderson uint16_t idxmap) 526d9bb58e5SYang Zhong { 527d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 528d9bb58e5SYang Zhong int mmu_idx; 529d9bb58e5SYang Zhong 530d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 531d9bb58e5SYang Zhong 5327b7d00e0SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); 533d9bb58e5SYang Zhong 534a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 535d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 5367b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 5371308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 538d9bb58e5SYang Zhong } 539d9bb58e5SYang Zhong } 540a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 541d9bb58e5SYang Zhong 5421d41a79bSRichard Henderson /* 5431d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 5441d41a79bSRichard Henderson * overlap the flushed page, which includes the previous. 5451d41a79bSRichard Henderson */ 5461d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 5471d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr); 548d9bb58e5SYang Zhong } 549d9bb58e5SYang Zhong 5507b7d00e0SRichard Henderson /** 5517b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 5527b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5537b7d00e0SRichard Henderson * @data: encoded addr + idxmap 5547b7d00e0SRichard Henderson * 5557b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5567b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 5577b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 5587b7d00e0SRichard Henderson * that can be passed via this method. 5597b7d00e0SRichard Henderson */ 5607b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5617b7d00e0SRichard Henderson run_on_cpu_data data) 5627b7d00e0SRichard Henderson { 5637b7d00e0SRichard Henderson target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; 5647b7d00e0SRichard Henderson target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; 5657b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5667b7d00e0SRichard Henderson 5677b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5687b7d00e0SRichard Henderson } 5697b7d00e0SRichard Henderson 5707b7d00e0SRichard Henderson typedef struct { 5717b7d00e0SRichard Henderson target_ulong addr; 5727b7d00e0SRichard Henderson uint16_t idxmap; 5737b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5747b7d00e0SRichard Henderson 5757b7d00e0SRichard Henderson /** 5767b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5777b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5787b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5797b7d00e0SRichard Henderson * 5807b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5817b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5827b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5837b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5847b7d00e0SRichard Henderson */ 5857b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5867b7d00e0SRichard Henderson run_on_cpu_data data) 5877b7d00e0SRichard Henderson { 5887b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5897b7d00e0SRichard Henderson 5907b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5917b7d00e0SRichard Henderson g_free(d); 5927b7d00e0SRichard Henderson } 5937b7d00e0SRichard Henderson 594d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 595d9bb58e5SYang Zhong { 596d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 597d9bb58e5SYang Zhong 598d9bb58e5SYang Zhong /* This should already be page aligned */ 5997b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 600d9bb58e5SYang Zhong 6017b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 6027b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 6037b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 6047b7d00e0SRichard Henderson /* 6057b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 6067b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 6077b7d00e0SRichard Henderson * allocating memory for this operation. 6087b7d00e0SRichard Henderson */ 6097b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 6107b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 611d9bb58e5SYang Zhong } else { 6127b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 6137b7d00e0SRichard Henderson 6147b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 6157b7d00e0SRichard Henderson d->addr = addr; 6167b7d00e0SRichard Henderson d->idxmap = idxmap; 6177b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 6187b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 619d9bb58e5SYang Zhong } 620d9bb58e5SYang Zhong } 621d9bb58e5SYang Zhong 622f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 623f8144c6cSRichard Henderson { 624f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 625f8144c6cSRichard Henderson } 626f8144c6cSRichard Henderson 627d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 628d9bb58e5SYang Zhong uint16_t idxmap) 629d9bb58e5SYang Zhong { 630d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 631d9bb58e5SYang Zhong 632d9bb58e5SYang Zhong /* This should already be page aligned */ 6337b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 634d9bb58e5SYang Zhong 6357b7d00e0SRichard Henderson /* 6367b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6377b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6387b7d00e0SRichard Henderson */ 6397b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6407b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6417b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6427b7d00e0SRichard Henderson } else { 6437b7d00e0SRichard Henderson CPUState *dst_cpu; 6447b7d00e0SRichard Henderson 6457b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6467b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6477b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6487b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 6497b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 6507b7d00e0SRichard Henderson 6517b7d00e0SRichard Henderson d->addr = addr; 6527b7d00e0SRichard Henderson d->idxmap = idxmap; 6537b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6547b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6557b7d00e0SRichard Henderson } 6567b7d00e0SRichard Henderson } 6577b7d00e0SRichard Henderson } 6587b7d00e0SRichard Henderson 6597b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 660d9bb58e5SYang Zhong } 661d9bb58e5SYang Zhong 662f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 663f8144c6cSRichard Henderson { 664f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 665f8144c6cSRichard Henderson } 666f8144c6cSRichard Henderson 667d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 668d9bb58e5SYang Zhong target_ulong addr, 669d9bb58e5SYang Zhong uint16_t idxmap) 670d9bb58e5SYang Zhong { 671d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 672d9bb58e5SYang Zhong 673d9bb58e5SYang Zhong /* This should already be page aligned */ 6747b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 675d9bb58e5SYang Zhong 6767b7d00e0SRichard Henderson /* 6777b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6787b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6797b7d00e0SRichard Henderson */ 6807b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6817b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6827b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6837b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6847b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6857b7d00e0SRichard Henderson } else { 6867b7d00e0SRichard Henderson CPUState *dst_cpu; 6877b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6887b7d00e0SRichard Henderson 6897b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6907b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6917b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6927b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6937b7d00e0SRichard Henderson d->addr = addr; 6947b7d00e0SRichard Henderson d->idxmap = idxmap; 6957b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6967b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6977b7d00e0SRichard Henderson } 6987b7d00e0SRichard Henderson } 6997b7d00e0SRichard Henderson 7007b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 7017b7d00e0SRichard Henderson d->addr = addr; 7027b7d00e0SRichard Henderson d->idxmap = idxmap; 7037b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 7047b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 7057b7d00e0SRichard Henderson } 706d9bb58e5SYang Zhong } 707d9bb58e5SYang Zhong 708f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 709d9bb58e5SYang Zhong { 710f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 711d9bb58e5SYang Zhong } 712d9bb58e5SYang Zhong 7133c4ddec1SRichard Henderson static void tlb_flush_range_locked(CPUArchState *env, int midx, 7143c4ddec1SRichard Henderson target_ulong addr, target_ulong len, 7153c4ddec1SRichard Henderson unsigned bits) 7163ab6e68cSRichard Henderson { 7173ab6e68cSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[midx]; 7183ab6e68cSRichard Henderson CPUTLBDescFast *f = &env_tlb(env)->f[midx]; 7193ab6e68cSRichard Henderson target_ulong mask = MAKE_64BIT_MASK(0, bits); 7203ab6e68cSRichard Henderson 7213ab6e68cSRichard Henderson /* 7223ab6e68cSRichard Henderson * If @bits is smaller than the tlb size, there may be multiple entries 7233ab6e68cSRichard Henderson * within the TLB; otherwise all addresses that match under @mask hit 7243ab6e68cSRichard Henderson * the same TLB entry. 7253ab6e68cSRichard Henderson * TODO: Perhaps allow bits to be a few bits less than the size. 7263ab6e68cSRichard Henderson * For now, just flush the entire TLB. 7273c4ddec1SRichard Henderson * 7283c4ddec1SRichard Henderson * If @len is larger than the tlb size, then it will take longer to 7293c4ddec1SRichard Henderson * test all of the entries in the TLB than it will to flush it all. 7303ab6e68cSRichard Henderson */ 7313c4ddec1SRichard Henderson if (mask < f->mask || len > f->mask) { 7323ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7333c4ddec1SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", 7343c4ddec1SRichard Henderson midx, addr, mask, len); 7353ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7363ab6e68cSRichard Henderson return; 7373ab6e68cSRichard Henderson } 7383ab6e68cSRichard Henderson 7393c4ddec1SRichard Henderson /* 7403c4ddec1SRichard Henderson * Check if we need to flush due to large pages. 7413c4ddec1SRichard Henderson * Because large_page_mask contains all 1's from the msb, 7423c4ddec1SRichard Henderson * we only need to test the end of the range. 7433c4ddec1SRichard Henderson */ 7443c4ddec1SRichard Henderson if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { 7453ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7463ab6e68cSRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 7473ab6e68cSRichard Henderson midx, d->large_page_addr, d->large_page_mask); 7483ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7493ab6e68cSRichard Henderson return; 7503ab6e68cSRichard Henderson } 7513ab6e68cSRichard Henderson 7523c4ddec1SRichard Henderson for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { 7533c4ddec1SRichard Henderson target_ulong page = addr + i; 7543c4ddec1SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, midx, page); 7553c4ddec1SRichard Henderson 7563c4ddec1SRichard Henderson if (tlb_flush_entry_mask_locked(entry, page, mask)) { 7573ab6e68cSRichard Henderson tlb_n_used_entries_dec(env, midx); 7583ab6e68cSRichard Henderson } 7593ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); 7603ab6e68cSRichard Henderson } 7613c4ddec1SRichard Henderson } 7623ab6e68cSRichard Henderson 7633ab6e68cSRichard Henderson typedef struct { 7643ab6e68cSRichard Henderson target_ulong addr; 7653c4ddec1SRichard Henderson target_ulong len; 7663ab6e68cSRichard Henderson uint16_t idxmap; 7673ab6e68cSRichard Henderson uint16_t bits; 7683960a59fSRichard Henderson } TLBFlushRangeData; 7693ab6e68cSRichard Henderson 7706be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, 7713960a59fSRichard Henderson TLBFlushRangeData d) 7723ab6e68cSRichard Henderson { 7733ab6e68cSRichard Henderson CPUArchState *env = cpu->env_ptr; 7743ab6e68cSRichard Henderson int mmu_idx; 7753ab6e68cSRichard Henderson 7763ab6e68cSRichard Henderson assert_cpu_is_self(cpu); 7773ab6e68cSRichard Henderson 7783c4ddec1SRichard Henderson tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", 7793c4ddec1SRichard Henderson d.addr, d.bits, d.len, d.idxmap); 7803ab6e68cSRichard Henderson 7813ab6e68cSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 7823ab6e68cSRichard Henderson for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 7833ab6e68cSRichard Henderson if ((d.idxmap >> mmu_idx) & 1) { 7843c4ddec1SRichard Henderson tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); 7853ab6e68cSRichard Henderson } 7863ab6e68cSRichard Henderson } 7873ab6e68cSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 7883ab6e68cSRichard Henderson 789cfc2a2d6SIdan Horowitz /* 790cfc2a2d6SIdan Horowitz * If the length is larger than the jump cache size, then it will take 791cfc2a2d6SIdan Horowitz * longer to clear each entry individually than it will to clear it all. 792cfc2a2d6SIdan Horowitz */ 793cfc2a2d6SIdan Horowitz if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { 794a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 795cfc2a2d6SIdan Horowitz return; 796cfc2a2d6SIdan Horowitz } 797cfc2a2d6SIdan Horowitz 7981d41a79bSRichard Henderson /* 7991d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 8001d41a79bSRichard Henderson * overlap the flushed pages, which includes the previous. 8011d41a79bSRichard Henderson */ 8021d41a79bSRichard Henderson d.addr -= TARGET_PAGE_SIZE; 8031d41a79bSRichard Henderson for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { 8041d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, d.addr); 8051d41a79bSRichard Henderson d.addr += TARGET_PAGE_SIZE; 8063c4ddec1SRichard Henderson } 8073ab6e68cSRichard Henderson } 8083ab6e68cSRichard Henderson 809206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, 8103ab6e68cSRichard Henderson run_on_cpu_data data) 8113ab6e68cSRichard Henderson { 8123960a59fSRichard Henderson TLBFlushRangeData *d = data.host_ptr; 8136be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, *d); 8143ab6e68cSRichard Henderson g_free(d); 8153ab6e68cSRichard Henderson } 8163ab6e68cSRichard Henderson 817e5b1921bSRichard Henderson void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, 818e5b1921bSRichard Henderson target_ulong len, uint16_t idxmap, 819e5b1921bSRichard Henderson unsigned bits) 8203ab6e68cSRichard Henderson { 8213960a59fSRichard Henderson TLBFlushRangeData d; 8223ab6e68cSRichard Henderson 823e5b1921bSRichard Henderson /* 824e5b1921bSRichard Henderson * If all bits are significant, and len is small, 825e5b1921bSRichard Henderson * this devolves to tlb_flush_page. 826e5b1921bSRichard Henderson */ 827e5b1921bSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8283ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 8293ab6e68cSRichard Henderson return; 8303ab6e68cSRichard Henderson } 8313ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8323ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8333ab6e68cSRichard Henderson tlb_flush_by_mmuidx(cpu, idxmap); 8343ab6e68cSRichard Henderson return; 8353ab6e68cSRichard Henderson } 8363ab6e68cSRichard Henderson 8373ab6e68cSRichard Henderson /* This should already be page aligned */ 8383ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 839e5b1921bSRichard Henderson d.len = len; 8403ab6e68cSRichard Henderson d.idxmap = idxmap; 8413ab6e68cSRichard Henderson d.bits = bits; 8423ab6e68cSRichard Henderson 8433ab6e68cSRichard Henderson if (qemu_cpu_is_self(cpu)) { 8446be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, d); 8453ab6e68cSRichard Henderson } else { 8463ab6e68cSRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 8473960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 848206a583dSRichard Henderson async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, 8493ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8503ab6e68cSRichard Henderson } 8513ab6e68cSRichard Henderson } 8523ab6e68cSRichard Henderson 853e5b1921bSRichard Henderson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, 854e5b1921bSRichard Henderson uint16_t idxmap, unsigned bits) 855e5b1921bSRichard Henderson { 856e5b1921bSRichard Henderson tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); 857e5b1921bSRichard Henderson } 858e5b1921bSRichard Henderson 859600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, 860600b819fSRichard Henderson target_ulong addr, target_ulong len, 861600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 8623ab6e68cSRichard Henderson { 8633960a59fSRichard Henderson TLBFlushRangeData d; 864d34e4d1aSRichard Henderson CPUState *dst_cpu; 8653ab6e68cSRichard Henderson 866600b819fSRichard Henderson /* 867600b819fSRichard Henderson * If all bits are significant, and len is small, 868600b819fSRichard Henderson * this devolves to tlb_flush_page. 869600b819fSRichard Henderson */ 870600b819fSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8713ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 8723ab6e68cSRichard Henderson return; 8733ab6e68cSRichard Henderson } 8743ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8753ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8763ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 8773ab6e68cSRichard Henderson return; 8783ab6e68cSRichard Henderson } 8793ab6e68cSRichard Henderson 8803ab6e68cSRichard Henderson /* This should already be page aligned */ 8813ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 882600b819fSRichard Henderson d.len = len; 8833ab6e68cSRichard Henderson d.idxmap = idxmap; 8843ab6e68cSRichard Henderson d.bits = bits; 8853ab6e68cSRichard Henderson 8863ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 8873ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 8883ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 8893960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 8903ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, 891206a583dSRichard Henderson tlb_flush_range_by_mmuidx_async_1, 8923ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8933ab6e68cSRichard Henderson } 8943ab6e68cSRichard Henderson } 8953ab6e68cSRichard Henderson 8966be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(src_cpu, d); 8973ab6e68cSRichard Henderson } 8983ab6e68cSRichard Henderson 899600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 900600b819fSRichard Henderson target_ulong addr, 901600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 902600b819fSRichard Henderson { 903600b819fSRichard Henderson tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, 904600b819fSRichard Henderson idxmap, bits); 905600b819fSRichard Henderson } 906600b819fSRichard Henderson 907c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 9083ab6e68cSRichard Henderson target_ulong addr, 909c13b27d8SRichard Henderson target_ulong len, 9103ab6e68cSRichard Henderson uint16_t idxmap, 9113ab6e68cSRichard Henderson unsigned bits) 9123ab6e68cSRichard Henderson { 913d34e4d1aSRichard Henderson TLBFlushRangeData d, *p; 914d34e4d1aSRichard Henderson CPUState *dst_cpu; 9153ab6e68cSRichard Henderson 916c13b27d8SRichard Henderson /* 917c13b27d8SRichard Henderson * If all bits are significant, and len is small, 918c13b27d8SRichard Henderson * this devolves to tlb_flush_page. 919c13b27d8SRichard Henderson */ 920c13b27d8SRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 9213ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 9223ab6e68cSRichard Henderson return; 9233ab6e68cSRichard Henderson } 9243ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 9253ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 9263ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 9273ab6e68cSRichard Henderson return; 9283ab6e68cSRichard Henderson } 9293ab6e68cSRichard Henderson 9303ab6e68cSRichard Henderson /* This should already be page aligned */ 9313ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 932c13b27d8SRichard Henderson d.len = len; 9333ab6e68cSRichard Henderson d.idxmap = idxmap; 9343ab6e68cSRichard Henderson d.bits = bits; 9353ab6e68cSRichard Henderson 9363ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 9373ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 9383ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 9396d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 940206a583dSRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, 9413ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9423ab6e68cSRichard Henderson } 9433ab6e68cSRichard Henderson } 9443ab6e68cSRichard Henderson 9456d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 946206a583dSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, 9473ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9483ab6e68cSRichard Henderson } 9493ab6e68cSRichard Henderson 950c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 951c13b27d8SRichard Henderson target_ulong addr, 952c13b27d8SRichard Henderson uint16_t idxmap, 953c13b27d8SRichard Henderson unsigned bits) 954c13b27d8SRichard Henderson { 955c13b27d8SRichard Henderson tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, 956c13b27d8SRichard Henderson idxmap, bits); 957c13b27d8SRichard Henderson } 958c13b27d8SRichard Henderson 959d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 960d9bb58e5SYang Zhong can be detected */ 961d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 962d9bb58e5SYang Zhong { 96393b99616SRichard Henderson cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, 96493b99616SRichard Henderson TARGET_PAGE_SIZE, 965d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 966d9bb58e5SYang Zhong } 967d9bb58e5SYang Zhong 968d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 969d9bb58e5SYang Zhong tested for self modifying code */ 970d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 971d9bb58e5SYang Zhong { 972d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 973d9bb58e5SYang Zhong } 974d9bb58e5SYang Zhong 975d9bb58e5SYang Zhong 976d9bb58e5SYang Zhong /* 977d9bb58e5SYang Zhong * Dirty write flag handling 978d9bb58e5SYang Zhong * 979d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 980d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 981d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 982d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 983d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 984d9bb58e5SYang Zhong * generated code. 985d9bb58e5SYang Zhong * 98671aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 987d73415a3SStefan Hajnoczi * te->addr_write with qatomic_set. We don't need to worry about this for 98871aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 989d9bb58e5SYang Zhong * 99053d28455SRichard Henderson * Called with tlb_c.lock held. 991d9bb58e5SYang Zhong */ 99271aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 99371aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 994d9bb58e5SYang Zhong { 995d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 996d9bb58e5SYang Zhong 9977b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 9987b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 999d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 1000d9bb58e5SYang Zhong addr += tlb_entry->addend; 1001d9bb58e5SYang Zhong if ((addr - start) < length) { 1002d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 100371aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 1004d9bb58e5SYang Zhong #else 1005d73415a3SStefan Hajnoczi qatomic_set(&tlb_entry->addr_write, 100671aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 1007d9bb58e5SYang Zhong #endif 1008d9bb58e5SYang Zhong } 100971aec354SEmilio G. Cota } 101071aec354SEmilio G. Cota } 101171aec354SEmilio G. Cota 101271aec354SEmilio G. Cota /* 101353d28455SRichard Henderson * Called with tlb_c.lock held. 101471aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 101571aec354SEmilio G. Cota */ 101671aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 101771aec354SEmilio G. Cota { 101871aec354SEmilio G. Cota *d = *s; 101971aec354SEmilio G. Cota } 1020d9bb58e5SYang Zhong 1021d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 102271aec354SEmilio G. Cota * the target vCPU). 102353d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 102471aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 1025d9bb58e5SYang Zhong */ 1026d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 1027d9bb58e5SYang Zhong { 1028d9bb58e5SYang Zhong CPUArchState *env; 1029d9bb58e5SYang Zhong 1030d9bb58e5SYang Zhong int mmu_idx; 1031d9bb58e5SYang Zhong 1032d9bb58e5SYang Zhong env = cpu->env_ptr; 1033a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1034d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1035d9bb58e5SYang Zhong unsigned int i; 1036722a1c1eSRichard Henderson unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 1037d9bb58e5SYang Zhong 103886e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 1039a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 1040a40ec84eSRichard Henderson start1, length); 1041d9bb58e5SYang Zhong } 1042d9bb58e5SYang Zhong 1043d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 1044a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 1045a40ec84eSRichard Henderson start1, length); 1046d9bb58e5SYang Zhong } 1047d9bb58e5SYang Zhong } 1048a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1049d9bb58e5SYang Zhong } 1050d9bb58e5SYang Zhong 105153d28455SRichard Henderson /* Called with tlb_c.lock held */ 105271aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 105371aec354SEmilio G. Cota target_ulong vaddr) 1054d9bb58e5SYang Zhong { 1055d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 1056d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 1057d9bb58e5SYang Zhong } 1058d9bb58e5SYang Zhong } 1059d9bb58e5SYang Zhong 1060d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 1061d9bb58e5SYang Zhong so that it is no longer dirty */ 1062d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 1063d9bb58e5SYang Zhong { 1064d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1065d9bb58e5SYang Zhong int mmu_idx; 1066d9bb58e5SYang Zhong 1067d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 1068d9bb58e5SYang Zhong 1069d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 1070a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1071d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1072383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 1073d9bb58e5SYang Zhong } 1074d9bb58e5SYang Zhong 1075d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1076d9bb58e5SYang Zhong int k; 1077d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 1078a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 1079d9bb58e5SYang Zhong } 1080d9bb58e5SYang Zhong } 1081a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1082d9bb58e5SYang Zhong } 1083d9bb58e5SYang Zhong 1084d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 1085d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 10861308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 10871308e026SRichard Henderson target_ulong vaddr, target_ulong size) 1088d9bb58e5SYang Zhong { 1089a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 10901308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 1091d9bb58e5SYang Zhong 10921308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 10931308e026SRichard Henderson /* No previous large page. */ 10941308e026SRichard Henderson lp_addr = vaddr; 10951308e026SRichard Henderson } else { 1096d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 10971308e026SRichard Henderson This is a compromise between unnecessary flushes and 10981308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 1099a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 11001308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 11011308e026SRichard Henderson lp_mask <<= 1; 1102d9bb58e5SYang Zhong } 11031308e026SRichard Henderson } 1104a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 1105a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 1106d9bb58e5SYang Zhong } 1107d9bb58e5SYang Zhong 110840473689SRichard Henderson /* 110940473689SRichard Henderson * Add a new TLB entry. At most one entry for a given virtual address 1110d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1111d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 1112d9bb58e5SYang Zhong * 1113d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 1114d9bb58e5SYang Zhong * critical section. 1115d9bb58e5SYang Zhong */ 111640473689SRichard Henderson void tlb_set_page_full(CPUState *cpu, int mmu_idx, 111740473689SRichard Henderson target_ulong vaddr, CPUTLBEntryFull *full) 1118d9bb58e5SYang Zhong { 1119d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1120a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 1121a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1122d9bb58e5SYang Zhong MemoryRegionSection *section; 1123d9bb58e5SYang Zhong unsigned int index; 1124d9bb58e5SYang Zhong target_ulong address; 11258f5db641SRichard Henderson target_ulong write_address; 1126d9bb58e5SYang Zhong uintptr_t addend; 112768fea038SRichard Henderson CPUTLBEntry *te, tn; 112855df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 112955df6fcfSPeter Maydell target_ulong vaddr_page; 113040473689SRichard Henderson int asidx, wp_flags, prot; 11318f5db641SRichard Henderson bool is_ram, is_romd; 1132d9bb58e5SYang Zhong 1133d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 113455df6fcfSPeter Maydell 113540473689SRichard Henderson if (full->lg_page_size <= TARGET_PAGE_BITS) { 113655df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 113755df6fcfSPeter Maydell } else { 113840473689SRichard Henderson sz = (hwaddr)1 << full->lg_page_size; 113940473689SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, sz); 114055df6fcfSPeter Maydell } 114155df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 114240473689SRichard Henderson paddr_page = full->phys_addr & TARGET_PAGE_MASK; 114355df6fcfSPeter Maydell 114440473689SRichard Henderson prot = full->prot; 114540473689SRichard Henderson asidx = cpu_asidx_from_attrs(cpu, full->attrs); 114655df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 114740473689SRichard Henderson &xlat, &sz, full->attrs, &prot); 1148d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 1149d9bb58e5SYang Zhong 1150883f2c59SPhilippe Mathieu-Daudé tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" HWADDR_FMT_plx 1151d9bb58e5SYang Zhong " prot=%x idx=%d\n", 115240473689SRichard Henderson vaddr, full->phys_addr, prot, mmu_idx); 1153d9bb58e5SYang Zhong 115455df6fcfSPeter Maydell address = vaddr_page; 115540473689SRichard Henderson if (full->lg_page_size < TARGET_PAGE_BITS) { 115630d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 115730d7e098SRichard Henderson address |= TLB_INVALID_MASK; 115855df6fcfSPeter Maydell } 115940473689SRichard Henderson if (full->attrs.byte_swap) { 11605b87b3e6SRichard Henderson address |= TLB_BSWAP; 1161a26fc6f5STony Nguyen } 11628f5db641SRichard Henderson 11638f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 11648f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 11658f5db641SRichard Henderson 11668f5db641SRichard Henderson if (is_ram || is_romd) { 11678f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 1168d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 11698f5db641SRichard Henderson } else { 11708f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 11718f5db641SRichard Henderson addend = 0; 1172d9bb58e5SYang Zhong } 1173d9bb58e5SYang Zhong 11748f5db641SRichard Henderson write_address = address; 11758f5db641SRichard Henderson if (is_ram) { 11768f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 11778f5db641SRichard Henderson /* 11788f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 11798f5db641SRichard Henderson * the page is actually writable. 11808f5db641SRichard Henderson */ 11818f5db641SRichard Henderson if (prot & PAGE_WRITE) { 11828f5db641SRichard Henderson if (section->readonly) { 11838f5db641SRichard Henderson write_address |= TLB_DISCARD_WRITE; 11848f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 11858f5db641SRichard Henderson write_address |= TLB_NOTDIRTY; 11868f5db641SRichard Henderson } 11878f5db641SRichard Henderson } 11888f5db641SRichard Henderson } else { 11898f5db641SRichard Henderson /* I/O or ROMD */ 11908f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 11918f5db641SRichard Henderson /* 11928f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 11938f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 11948f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 11958f5db641SRichard Henderson */ 11968f5db641SRichard Henderson write_address |= TLB_MMIO; 11978f5db641SRichard Henderson if (!is_romd) { 11988f5db641SRichard Henderson address = write_address; 11998f5db641SRichard Henderson } 12008f5db641SRichard Henderson } 12018f5db641SRichard Henderson 120250b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 120350b107c5SRichard Henderson TARGET_PAGE_SIZE); 1204d9bb58e5SYang Zhong 1205383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 1206383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 1207d9bb58e5SYang Zhong 120868fea038SRichard Henderson /* 120971aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 121071aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 121171aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 121271aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 121371aec354SEmilio G. Cota * is unlikely to be contended. 121471aec354SEmilio G. Cota */ 1215a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 121671aec354SEmilio G. Cota 12173d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 1218a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 12193d1523ceSRichard Henderson 122071aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 122171aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 122271aec354SEmilio G. Cota 122371aec354SEmilio G. Cota /* 122468fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 122568fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 122668fea038SRichard Henderson */ 12273cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 1228a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1229a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 123068fea038SRichard Henderson 123168fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 123271aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 123325d3ec58SRichard Henderson desc->vfulltlb[vidx] = desc->fulltlb[index]; 123486e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 123568fea038SRichard Henderson } 1236d9bb58e5SYang Zhong 1237d9bb58e5SYang Zhong /* refill the tlb */ 1238ace41090SPeter Maydell /* 1239ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 1240ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 12418f5db641SRichard Henderson * + the ram_addr_t of the page base of the target RAM (RAM) 12428f5db641SRichard Henderson * + the offset within section->mr of the page base (I/O, ROMD) 124355df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 1244ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 1245ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 1246ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 1247ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 1248ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 1249ace41090SPeter Maydell */ 125040473689SRichard Henderson desc->fulltlb[index] = *full; 125125d3ec58SRichard Henderson desc->fulltlb[index].xlat_section = iotlb - vaddr_page; 125240473689SRichard Henderson desc->fulltlb[index].phys_addr = paddr_page; 125340473689SRichard Henderson desc->fulltlb[index].prot = prot; 1254d9bb58e5SYang Zhong 1255d9bb58e5SYang Zhong /* Now calculate the new entry */ 125655df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 1257d9bb58e5SYang Zhong if (prot & PAGE_READ) { 1258d9bb58e5SYang Zhong tn.addr_read = address; 125950b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 126050b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 126150b107c5SRichard Henderson } 1262d9bb58e5SYang Zhong } else { 1263d9bb58e5SYang Zhong tn.addr_read = -1; 1264d9bb58e5SYang Zhong } 1265d9bb58e5SYang Zhong 1266d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 12678f5db641SRichard Henderson tn.addr_code = address; 1268d9bb58e5SYang Zhong } else { 1269d9bb58e5SYang Zhong tn.addr_code = -1; 1270d9bb58e5SYang Zhong } 1271d9bb58e5SYang Zhong 1272d9bb58e5SYang Zhong tn.addr_write = -1; 1273d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 12748f5db641SRichard Henderson tn.addr_write = write_address; 1275f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 1276f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 1277f52bfb12SDavid Hildenbrand } 127850b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 127950b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 128050b107c5SRichard Henderson } 1281d9bb58e5SYang Zhong } 1282d9bb58e5SYang Zhong 128371aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 128486e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 1285a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 1286d9bb58e5SYang Zhong } 1287d9bb58e5SYang Zhong 128840473689SRichard Henderson void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 128940473689SRichard Henderson hwaddr paddr, MemTxAttrs attrs, int prot, 129040473689SRichard Henderson int mmu_idx, target_ulong size) 129140473689SRichard Henderson { 129240473689SRichard Henderson CPUTLBEntryFull full = { 129340473689SRichard Henderson .phys_addr = paddr, 129440473689SRichard Henderson .attrs = attrs, 129540473689SRichard Henderson .prot = prot, 129640473689SRichard Henderson .lg_page_size = ctz64(size) 129740473689SRichard Henderson }; 129840473689SRichard Henderson 129940473689SRichard Henderson assert(is_power_of_2(size)); 130040473689SRichard Henderson tlb_set_page_full(cpu, mmu_idx, vaddr, &full); 130140473689SRichard Henderson } 130240473689SRichard Henderson 1303d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 1304d9bb58e5SYang Zhong hwaddr paddr, int prot, 1305d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1306d9bb58e5SYang Zhong { 1307d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 1308d9bb58e5SYang Zhong prot, mmu_idx, size); 1309d9bb58e5SYang Zhong } 1310d9bb58e5SYang Zhong 1311c319dc13SRichard Henderson /* 1312c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1313c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1314c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1315c319dc13SRichard Henderson */ 1316c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 1317c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1318c319dc13SRichard Henderson { 1319c319dc13SRichard Henderson bool ok; 1320c319dc13SRichard Henderson 1321c319dc13SRichard Henderson /* 1322c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1323c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1324c319dc13SRichard Henderson */ 13258810ee2aSAlex Bennée ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, 1326e124536fSEduardo Habkost access_type, mmu_idx, false, retaddr); 1327c319dc13SRichard Henderson assert(ok); 1328c319dc13SRichard Henderson } 1329c319dc13SRichard Henderson 133078271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 133178271684SClaudio Fontana MMUAccessType access_type, 133278271684SClaudio Fontana int mmu_idx, uintptr_t retaddr) 133378271684SClaudio Fontana { 13348810ee2aSAlex Bennée cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, 13358810ee2aSAlex Bennée mmu_idx, retaddr); 133678271684SClaudio Fontana } 133778271684SClaudio Fontana 133878271684SClaudio Fontana static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, 133978271684SClaudio Fontana vaddr addr, unsigned size, 134078271684SClaudio Fontana MMUAccessType access_type, 134178271684SClaudio Fontana int mmu_idx, MemTxAttrs attrs, 134278271684SClaudio Fontana MemTxResult response, 134378271684SClaudio Fontana uintptr_t retaddr) 134478271684SClaudio Fontana { 134578271684SClaudio Fontana CPUClass *cc = CPU_GET_CLASS(cpu); 134678271684SClaudio Fontana 134778271684SClaudio Fontana if (!cpu->ignore_memory_transaction_failures && 134878271684SClaudio Fontana cc->tcg_ops->do_transaction_failed) { 134978271684SClaudio Fontana cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, 135078271684SClaudio Fontana access_type, mmu_idx, attrs, 135178271684SClaudio Fontana response, retaddr); 135278271684SClaudio Fontana } 135378271684SClaudio Fontana } 135478271684SClaudio Fontana 135525d3ec58SRichard Henderson static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full, 1356f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 1357be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 1358d9bb58e5SYang Zhong { 135929a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 13602d54f194SPeter Maydell hwaddr mr_offset; 13612d54f194SPeter Maydell MemoryRegionSection *section; 13622d54f194SPeter Maydell MemoryRegion *mr; 1363d9bb58e5SYang Zhong uint64_t val; 136404e3aabdSPeter Maydell MemTxResult r; 1365d9bb58e5SYang Zhong 136625d3ec58SRichard Henderson section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 13672d54f194SPeter Maydell mr = section->mr; 136825d3ec58SRichard Henderson mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1369d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 137008565552SRichard Henderson if (!cpu->can_do_io) { 1371d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1372d9bb58e5SYang Zhong } 1373d9bb58e5SYang Zhong 137461b59fb2SRichard Henderson { 137561b59fb2SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 137625d3ec58SRichard Henderson r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs); 137761b59fb2SRichard Henderson } 137861b59fb2SRichard Henderson 137904e3aabdSPeter Maydell if (r != MEMTX_OK) { 13802d54f194SPeter Maydell hwaddr physaddr = mr_offset + 13812d54f194SPeter Maydell section->offset_within_address_space - 13822d54f194SPeter Maydell section->offset_within_region; 13832d54f194SPeter Maydell 1384be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 138525d3ec58SRichard Henderson mmu_idx, full->attrs, r, retaddr); 138604e3aabdSPeter Maydell } 1387d9bb58e5SYang Zhong return val; 1388d9bb58e5SYang Zhong } 1389d9bb58e5SYang Zhong 13902f3a57eeSAlex Bennée /* 139125d3ec58SRichard Henderson * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin. 139225d3ec58SRichard Henderson * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match 1393570ef309SAlex Bennée * because of the side effect of io_writex changing memory layout. 13942f3a57eeSAlex Bennée */ 139537523ff7SRichard Henderson static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section, 139637523ff7SRichard Henderson hwaddr mr_offset) 13972f3a57eeSAlex Bennée { 13982f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN 13992f3a57eeSAlex Bennée SavedIOTLB *saved = &cs->saved_iotlb; 14002f3a57eeSAlex Bennée saved->section = section; 14012f3a57eeSAlex Bennée saved->mr_offset = mr_offset; 14022f3a57eeSAlex Bennée #endif 14032f3a57eeSAlex Bennée } 14042f3a57eeSAlex Bennée 140525d3ec58SRichard Henderson static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, 1406f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 1407be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 1408d9bb58e5SYang Zhong { 140929a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 14102d54f194SPeter Maydell hwaddr mr_offset; 14112d54f194SPeter Maydell MemoryRegionSection *section; 14122d54f194SPeter Maydell MemoryRegion *mr; 141304e3aabdSPeter Maydell MemTxResult r; 1414d9bb58e5SYang Zhong 141525d3ec58SRichard Henderson section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 14162d54f194SPeter Maydell mr = section->mr; 141725d3ec58SRichard Henderson mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 141808565552SRichard Henderson if (!cpu->can_do_io) { 1419d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1420d9bb58e5SYang Zhong } 1421d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1422d9bb58e5SYang Zhong 14232f3a57eeSAlex Bennée /* 14242f3a57eeSAlex Bennée * The memory_region_dispatch may trigger a flush/resize 14252f3a57eeSAlex Bennée * so for plugins we save the iotlb_data just in case. 14262f3a57eeSAlex Bennée */ 142737523ff7SRichard Henderson save_iotlb_data(cpu, section, mr_offset); 14282f3a57eeSAlex Bennée 142961b59fb2SRichard Henderson { 143061b59fb2SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 143125d3ec58SRichard Henderson r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs); 143261b59fb2SRichard Henderson } 143361b59fb2SRichard Henderson 143404e3aabdSPeter Maydell if (r != MEMTX_OK) { 14352d54f194SPeter Maydell hwaddr physaddr = mr_offset + 14362d54f194SPeter Maydell section->offset_within_address_space - 14372d54f194SPeter Maydell section->offset_within_region; 14382d54f194SPeter Maydell 1439be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 144025d3ec58SRichard Henderson MMU_DATA_STORE, mmu_idx, full->attrs, r, 1441be5c4787STony Nguyen retaddr); 144204e3aabdSPeter Maydell } 1443d9bb58e5SYang Zhong } 1444d9bb58e5SYang Zhong 14454811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) 14464811e909SRichard Henderson { 14474811e909SRichard Henderson #if TCG_OVERSIZED_GUEST 14484811e909SRichard Henderson return *(target_ulong *)((uintptr_t)entry + ofs); 14494811e909SRichard Henderson #else 1450d73415a3SStefan Hajnoczi /* ofs might correspond to .addr_write, so use qatomic_read */ 1451d73415a3SStefan Hajnoczi return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); 14524811e909SRichard Henderson #endif 14534811e909SRichard Henderson } 14544811e909SRichard Henderson 1455d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1456d9bb58e5SYang Zhong back to the main tlb. */ 1457d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 1458d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 1459d9bb58e5SYang Zhong { 1460d9bb58e5SYang Zhong size_t vidx; 146171aec354SEmilio G. Cota 146229a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1463d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1464a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 1465a40ec84eSRichard Henderson target_ulong cmp; 1466a40ec84eSRichard Henderson 1467d73415a3SStefan Hajnoczi /* elt_ofs might correspond to .addr_write, so use qatomic_read */ 1468a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST 1469a40ec84eSRichard Henderson cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 1470a40ec84eSRichard Henderson #else 1471d73415a3SStefan Hajnoczi cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 1472a40ec84eSRichard Henderson #endif 1473d9bb58e5SYang Zhong 1474d9bb58e5SYang Zhong if (cmp == page) { 1475d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1476a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1477d9bb58e5SYang Zhong 1478a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 147971aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 148071aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 148171aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1482a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1483d9bb58e5SYang Zhong 148425d3ec58SRichard Henderson CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 148525d3ec58SRichard Henderson CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx]; 148625d3ec58SRichard Henderson CPUTLBEntryFull tmpf; 148725d3ec58SRichard Henderson tmpf = *f1; *f1 = *f2; *f2 = tmpf; 1488d9bb58e5SYang Zhong return true; 1489d9bb58e5SYang Zhong } 1490d9bb58e5SYang Zhong } 1491d9bb58e5SYang Zhong return false; 1492d9bb58e5SYang Zhong } 1493d9bb58e5SYang Zhong 1494d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 1495d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 1496d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 1497d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 1498d9bb58e5SYang Zhong 1499707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 150025d3ec58SRichard Henderson CPUTLBEntryFull *full, uintptr_t retaddr) 1501707526adSRichard Henderson { 150225d3ec58SRichard Henderson ram_addr_t ram_addr = mem_vaddr + full->xlat_section; 1503707526adSRichard Henderson 1504707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1505707526adSRichard Henderson 1506707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1507f349e92eSPhilippe Mathieu-Daudé tb_invalidate_phys_range_fast(ram_addr, size, retaddr); 1508707526adSRichard Henderson } 1509707526adSRichard Henderson 1510707526adSRichard Henderson /* 1511707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1512707526adSRichard Henderson * the notdirty callback faster. 1513707526adSRichard Henderson */ 1514707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1515707526adSRichard Henderson 1516707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1517707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1518707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1519707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1520707526adSRichard Henderson } 1521707526adSRichard Henderson } 1522707526adSRichard Henderson 1523069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 1524069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1525069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1526af803a4fSRichard Henderson void **phost, CPUTLBEntryFull **pfull, 1527af803a4fSRichard Henderson uintptr_t retaddr) 1528d9bb58e5SYang Zhong { 1529383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1530383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1531069cfe77SRichard Henderson target_ulong tlb_addr, page_addr; 1532c25c283dSDavid Hildenbrand size_t elt_ofs; 1533069cfe77SRichard Henderson int flags; 1534ca86cf32SDavid Hildenbrand 1535c25c283dSDavid Hildenbrand switch (access_type) { 1536c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 1537c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_read); 1538c25c283dSDavid Hildenbrand break; 1539c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 1540c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_write); 1541c25c283dSDavid Hildenbrand break; 1542c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 1543c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_code); 1544c25c283dSDavid Hildenbrand break; 1545c25c283dSDavid Hildenbrand default: 1546c25c283dSDavid Hildenbrand g_assert_not_reached(); 1547c25c283dSDavid Hildenbrand } 1548c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 1549c25c283dSDavid Hildenbrand 1550c3c8bf57SRichard Henderson flags = TLB_FLAGS_MASK; 1551069cfe77SRichard Henderson page_addr = addr & TARGET_PAGE_MASK; 1552069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 1553069cfe77SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { 1554069cfe77SRichard Henderson CPUState *cs = env_cpu(env); 1555069cfe77SRichard Henderson 15568810ee2aSAlex Bennée if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, 1557069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1558069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1559069cfe77SRichard Henderson *phost = NULL; 1560af803a4fSRichard Henderson *pfull = NULL; 1561069cfe77SRichard Henderson return TLB_INVALID_MASK; 1562069cfe77SRichard Henderson } 1563069cfe77SRichard Henderson 156403a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 1565af803a4fSRichard Henderson index = tlb_index(env, mmu_idx, addr); 156603a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1567c3c8bf57SRichard Henderson 1568c3c8bf57SRichard Henderson /* 1569c3c8bf57SRichard Henderson * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, 1570c3c8bf57SRichard Henderson * to force the next access through tlb_fill. We've just 1571c3c8bf57SRichard Henderson * called tlb_fill, so we know that this entry *is* valid. 1572c3c8bf57SRichard Henderson */ 1573c3c8bf57SRichard Henderson flags &= ~TLB_INVALID_MASK; 1574d9bb58e5SYang Zhong } 1575c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 157603a98189SDavid Hildenbrand } 1577c3c8bf57SRichard Henderson flags &= tlb_addr; 157803a98189SDavid Hildenbrand 1579af803a4fSRichard Henderson *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1580af803a4fSRichard Henderson 1581069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 1582069cfe77SRichard Henderson if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1583069cfe77SRichard Henderson *phost = NULL; 1584069cfe77SRichard Henderson return TLB_MMIO; 1585fef39ccdSDavid Hildenbrand } 1586fef39ccdSDavid Hildenbrand 1587069cfe77SRichard Henderson /* Everything else is RAM. */ 1588069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1589069cfe77SRichard Henderson return flags; 1590069cfe77SRichard Henderson } 1591069cfe77SRichard Henderson 1592af803a4fSRichard Henderson int probe_access_full(CPUArchState *env, target_ulong addr, 1593069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1594af803a4fSRichard Henderson bool nonfault, void **phost, CPUTLBEntryFull **pfull, 1595af803a4fSRichard Henderson uintptr_t retaddr) 1596069cfe77SRichard Henderson { 1597af803a4fSRichard Henderson int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, 1598af803a4fSRichard Henderson nonfault, phost, pfull, retaddr); 1599069cfe77SRichard Henderson 1600069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1601069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1602af803a4fSRichard Henderson notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr); 1603069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1604069cfe77SRichard Henderson } 1605069cfe77SRichard Henderson 1606069cfe77SRichard Henderson return flags; 1607069cfe77SRichard Henderson } 1608069cfe77SRichard Henderson 1609af803a4fSRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr, 1610af803a4fSRichard Henderson MMUAccessType access_type, int mmu_idx, 1611af803a4fSRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1612af803a4fSRichard Henderson { 1613af803a4fSRichard Henderson CPUTLBEntryFull *full; 1614af803a4fSRichard Henderson 1615af803a4fSRichard Henderson return probe_access_full(env, addr, access_type, mmu_idx, 1616af803a4fSRichard Henderson nonfault, phost, &full, retaddr); 1617af803a4fSRichard Henderson } 1618af803a4fSRichard Henderson 1619069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 1620069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1621069cfe77SRichard Henderson { 1622af803a4fSRichard Henderson CPUTLBEntryFull *full; 1623069cfe77SRichard Henderson void *host; 1624069cfe77SRichard Henderson int flags; 1625069cfe77SRichard Henderson 1626069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1627069cfe77SRichard Henderson 1628069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1629af803a4fSRichard Henderson false, &host, &full, retaddr); 1630069cfe77SRichard Henderson 1631069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1632069cfe77SRichard Henderson if (size == 0) { 163373bc0bd4SRichard Henderson return NULL; 163473bc0bd4SRichard Henderson } 163573bc0bd4SRichard Henderson 1636069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 163703a98189SDavid Hildenbrand /* Handle watchpoints. */ 1638069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1639069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1640069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 164103a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 164225d3ec58SRichard Henderson full->attrs, wp_access, retaddr); 1643d9bb58e5SYang Zhong } 1644fef39ccdSDavid Hildenbrand 164573bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1646069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 164725d3ec58SRichard Henderson notdirty_write(env_cpu(env), addr, 1, full, retaddr); 164873bc0bd4SRichard Henderson } 1649fef39ccdSDavid Hildenbrand } 1650fef39ccdSDavid Hildenbrand 1651069cfe77SRichard Henderson return host; 1652d9bb58e5SYang Zhong } 1653d9bb58e5SYang Zhong 16544811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 16554811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 16564811e909SRichard Henderson { 1657af803a4fSRichard Henderson CPUTLBEntryFull *full; 1658069cfe77SRichard Henderson void *host; 1659069cfe77SRichard Henderson int flags; 16604811e909SRichard Henderson 1661069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, 1662af803a4fSRichard Henderson mmu_idx, true, &host, &full, 0); 1663069cfe77SRichard Henderson 1664069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1665069cfe77SRichard Henderson return flags ? NULL : host; 16664811e909SRichard Henderson } 16674811e909SRichard Henderson 16687e0d9973SRichard Henderson /* 16697e0d9973SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 16707e0d9973SRichard Henderson * 16717e0d9973SRichard Henderson * Return -1 if we can't translate and execute from an entire page 16727e0d9973SRichard Henderson * of RAM. This will force us to execute by loading and translating 16737e0d9973SRichard Henderson * one insn at a time, without caching. 16747e0d9973SRichard Henderson * 16757e0d9973SRichard Henderson * NOTE: This function will trigger an exception if the page is 16767e0d9973SRichard Henderson * not executable. 16777e0d9973SRichard Henderson */ 16787e0d9973SRichard Henderson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 16797e0d9973SRichard Henderson void **hostp) 16807e0d9973SRichard Henderson { 1681af803a4fSRichard Henderson CPUTLBEntryFull *full; 16827e0d9973SRichard Henderson void *p; 16837e0d9973SRichard Henderson 16847e0d9973SRichard Henderson (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH, 1685af803a4fSRichard Henderson cpu_mmu_index(env, true), false, &p, &full, 0); 16867e0d9973SRichard Henderson if (p == NULL) { 16877e0d9973SRichard Henderson return -1; 16887e0d9973SRichard Henderson } 16897e0d9973SRichard Henderson if (hostp) { 16907e0d9973SRichard Henderson *hostp = p; 16917e0d9973SRichard Henderson } 16927e0d9973SRichard Henderson return qemu_ram_addr_from_host_nofail(p); 16937e0d9973SRichard Henderson } 16947e0d9973SRichard Henderson 1695235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1696235537faSAlex Bennée /* 1697235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1698235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1699235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1700235537faSAlex Bennée * checking the victim table. This is purely informational. 1701235537faSAlex Bennée * 17022f3a57eeSAlex Bennée * This almost never fails as the memory access being instrumented 17032f3a57eeSAlex Bennée * should have just filled the TLB. The one corner case is io_writex 17042f3a57eeSAlex Bennée * which can cause TLB flushes and potential resizing of the TLBs 1705570ef309SAlex Bennée * losing the information we need. In those cases we need to recover 170625d3ec58SRichard Henderson * data from a copy of the CPUTLBEntryFull. As long as this always occurs 1707570ef309SAlex Bennée * from the same thread (which a mem callback will be) this is safe. 1708235537faSAlex Bennée */ 1709235537faSAlex Bennée 1710235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, 1711235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1712235537faSAlex Bennée { 1713235537faSAlex Bennée CPUArchState *env = cpu->env_ptr; 1714235537faSAlex Bennée CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1715235537faSAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1716235537faSAlex Bennée target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1717235537faSAlex Bennée 1718235537faSAlex Bennée if (likely(tlb_hit(tlb_addr, addr))) { 1719235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1720235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 172125d3ec58SRichard Henderson CPUTLBEntryFull *full; 172225d3ec58SRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1723235537faSAlex Bennée data->is_io = true; 172425d3ec58SRichard Henderson data->v.io.section = 172525d3ec58SRichard Henderson iotlb_to_section(cpu, full->xlat_section, full->attrs); 172625d3ec58SRichard Henderson data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1727235537faSAlex Bennée } else { 1728235537faSAlex Bennée data->is_io = false; 17292d932039SAlex Bennée data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1730235537faSAlex Bennée } 1731235537faSAlex Bennée return true; 17322f3a57eeSAlex Bennée } else { 17332f3a57eeSAlex Bennée SavedIOTLB *saved = &cpu->saved_iotlb; 17342f3a57eeSAlex Bennée data->is_io = true; 17352f3a57eeSAlex Bennée data->v.io.section = saved->section; 17362f3a57eeSAlex Bennée data->v.io.offset = saved->mr_offset; 17372f3a57eeSAlex Bennée return true; 1738235537faSAlex Bennée } 1739235537faSAlex Bennée } 1740235537faSAlex Bennée 1741235537faSAlex Bennée #endif 1742235537faSAlex Bennée 174308dff435SRichard Henderson /* 174408dff435SRichard Henderson * Probe for an atomic operation. Do not allow unaligned operations, 174508dff435SRichard Henderson * or io operations to proceed. Return the host address. 174608dff435SRichard Henderson * 174708dff435SRichard Henderson * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. 174808dff435SRichard Henderson */ 1749d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 17509002ffcbSRichard Henderson MemOpIdx oi, int size, int prot, 175108dff435SRichard Henderson uintptr_t retaddr) 1752d9bb58e5SYang Zhong { 1753b826044fSRichard Henderson uintptr_t mmu_idx = get_mmuidx(oi); 175414776ab5STony Nguyen MemOp mop = get_memop(oi); 1755d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 175608dff435SRichard Henderson uintptr_t index; 175708dff435SRichard Henderson CPUTLBEntry *tlbe; 175808dff435SRichard Henderson target_ulong tlb_addr; 175934d49937SPeter Maydell void *hostaddr; 1760d9bb58e5SYang Zhong 1761b826044fSRichard Henderson tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1762b826044fSRichard Henderson 1763d9bb58e5SYang Zhong /* Adjust the given return address. */ 1764d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1765d9bb58e5SYang Zhong 1766d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1767d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1768d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 176929a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1770d9bb58e5SYang Zhong mmu_idx, retaddr); 1771d9bb58e5SYang Zhong } 1772d9bb58e5SYang Zhong 1773d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 177408dff435SRichard Henderson if (unlikely(addr & (size - 1))) { 1775d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1776d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1777d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1778d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1779d9bb58e5SYang Zhong goto stop_the_world; 1780d9bb58e5SYang Zhong } 1781d9bb58e5SYang Zhong 178208dff435SRichard Henderson index = tlb_index(env, mmu_idx, addr); 178308dff435SRichard Henderson tlbe = tlb_entry(env, mmu_idx, addr); 178408dff435SRichard Henderson 1785d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 178608dff435SRichard Henderson if (prot & PAGE_WRITE) { 178708dff435SRichard Henderson tlb_addr = tlb_addr_write(tlbe); 1788334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1789d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 179008dff435SRichard Henderson tlb_fill(env_cpu(env), addr, size, 179108dff435SRichard Henderson MMU_DATA_STORE, mmu_idx, retaddr); 17926d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 17936d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1794d9bb58e5SYang Zhong } 1795403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1796d9bb58e5SYang Zhong } 1797d9bb58e5SYang Zhong 179808dff435SRichard Henderson /* Let the guest notice RMW on a write-only page. */ 179908dff435SRichard Henderson if ((prot & PAGE_READ) && 180008dff435SRichard Henderson unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 180108dff435SRichard Henderson tlb_fill(env_cpu(env), addr, size, 180208dff435SRichard Henderson MMU_DATA_LOAD, mmu_idx, retaddr); 180308dff435SRichard Henderson /* 180408dff435SRichard Henderson * Since we don't support reads and writes to different addresses, 180508dff435SRichard Henderson * and we do have the proper page loaded for write, this shouldn't 180608dff435SRichard Henderson * ever return. But just in case, handle via stop-the-world. 180708dff435SRichard Henderson */ 180808dff435SRichard Henderson goto stop_the_world; 180908dff435SRichard Henderson } 181008dff435SRichard Henderson } else /* if (prot & PAGE_READ) */ { 181108dff435SRichard Henderson tlb_addr = tlbe->addr_read; 181208dff435SRichard Henderson if (!tlb_hit(tlb_addr, addr)) { 181308dff435SRichard Henderson if (!VICTIM_TLB_HIT(addr_write, addr)) { 181408dff435SRichard Henderson tlb_fill(env_cpu(env), addr, size, 181508dff435SRichard Henderson MMU_DATA_LOAD, mmu_idx, retaddr); 181608dff435SRichard Henderson index = tlb_index(env, mmu_idx, addr); 181708dff435SRichard Henderson tlbe = tlb_entry(env, mmu_idx, addr); 181808dff435SRichard Henderson } 181908dff435SRichard Henderson tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK; 182008dff435SRichard Henderson } 182108dff435SRichard Henderson } 182208dff435SRichard Henderson 182355df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 182430d7e098SRichard Henderson if (unlikely(tlb_addr & TLB_MMIO)) { 1825d9bb58e5SYang Zhong /* There's really nothing that can be done to 1826d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1827d9bb58e5SYang Zhong goto stop_the_world; 1828d9bb58e5SYang Zhong } 1829d9bb58e5SYang Zhong 183034d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 183134d49937SPeter Maydell 183234d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 183308dff435SRichard Henderson notdirty_write(env_cpu(env), addr, size, 183425d3ec58SRichard Henderson &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr); 183534d49937SPeter Maydell } 183634d49937SPeter Maydell 183734d49937SPeter Maydell return hostaddr; 1838d9bb58e5SYang Zhong 1839d9bb58e5SYang Zhong stop_the_world: 184029a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1841d9bb58e5SYang Zhong } 1842d9bb58e5SYang Zhong 1843eed56642SAlex Bennée /* 1844f83bcecbSRichard Henderson * Verify that we have passed the correct MemOp to the correct function. 1845f83bcecbSRichard Henderson * 1846f83bcecbSRichard Henderson * In the case of the helper_*_mmu functions, we will have done this by 1847f83bcecbSRichard Henderson * using the MemOp to look up the helper during code generation. 1848f83bcecbSRichard Henderson * 1849f83bcecbSRichard Henderson * In the case of the cpu_*_mmu functions, this is up to the caller. 1850f83bcecbSRichard Henderson * We could present one function to target code, and dispatch based on 1851f83bcecbSRichard Henderson * the MemOp, but so far we have worked hard to avoid an indirect function 1852f83bcecbSRichard Henderson * call along the memory path. 1853f83bcecbSRichard Henderson */ 1854f83bcecbSRichard Henderson static void validate_memop(MemOpIdx oi, MemOp expected) 1855f83bcecbSRichard Henderson { 1856f83bcecbSRichard Henderson #ifdef CONFIG_DEBUG_TCG 1857f83bcecbSRichard Henderson MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); 1858f83bcecbSRichard Henderson assert(have == expected); 1859f83bcecbSRichard Henderson #endif 1860f83bcecbSRichard Henderson } 1861f83bcecbSRichard Henderson 1862f83bcecbSRichard Henderson /* 1863eed56642SAlex Bennée * Load Helpers 1864eed56642SAlex Bennée * 1865eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1866eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1867eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1868eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1869eed56642SAlex Bennée */ 1870d9bb58e5SYang Zhong 18712dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 18729002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr); 18732dd92606SRichard Henderson 1874c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 187580d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op) 187680d9d1c6SRichard Henderson { 187780d9d1c6SRichard Henderson switch (op) { 187880d9d1c6SRichard Henderson case MO_UB: 187980d9d1c6SRichard Henderson return ldub_p(haddr); 188080d9d1c6SRichard Henderson case MO_BEUW: 188180d9d1c6SRichard Henderson return lduw_be_p(haddr); 188280d9d1c6SRichard Henderson case MO_LEUW: 188380d9d1c6SRichard Henderson return lduw_le_p(haddr); 188480d9d1c6SRichard Henderson case MO_BEUL: 188580d9d1c6SRichard Henderson return (uint32_t)ldl_be_p(haddr); 188680d9d1c6SRichard Henderson case MO_LEUL: 188780d9d1c6SRichard Henderson return (uint32_t)ldl_le_p(haddr); 1888fc313c64SFrédéric Pétrot case MO_BEUQ: 188980d9d1c6SRichard Henderson return ldq_be_p(haddr); 1890fc313c64SFrédéric Pétrot case MO_LEUQ: 189180d9d1c6SRichard Henderson return ldq_le_p(haddr); 189280d9d1c6SRichard Henderson default: 189380d9d1c6SRichard Henderson qemu_build_not_reached(); 189480d9d1c6SRichard Henderson } 189580d9d1c6SRichard Henderson } 189680d9d1c6SRichard Henderson 189780d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 18989002ffcbSRichard Henderson load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, 1899be5c4787STony Nguyen uintptr_t retaddr, MemOp op, bool code_read, 19002dd92606SRichard Henderson FullLoadHelper *full_load) 1901eed56642SAlex Bennée { 1902eed56642SAlex Bennée const size_t tlb_off = code_read ? 1903eed56642SAlex Bennée offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1904f1be3696SRichard Henderson const MMUAccessType access_type = 1905f1be3696SRichard Henderson code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1906b826044fSRichard Henderson const unsigned a_bits = get_alignment_bits(get_memop(oi)); 1907b826044fSRichard Henderson const size_t size = memop_size(op); 1908b826044fSRichard Henderson uintptr_t mmu_idx = get_mmuidx(oi); 1909b826044fSRichard Henderson uintptr_t index; 1910b826044fSRichard Henderson CPUTLBEntry *entry; 1911b826044fSRichard Henderson target_ulong tlb_addr; 1912eed56642SAlex Bennée void *haddr; 1913eed56642SAlex Bennée uint64_t res; 1914b826044fSRichard Henderson 1915b826044fSRichard Henderson tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1916d9bb58e5SYang Zhong 1917eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1918eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 191929a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, access_type, 1920eed56642SAlex Bennée mmu_idx, retaddr); 1921eed56642SAlex Bennée } 1922eed56642SAlex Bennée 1923b826044fSRichard Henderson index = tlb_index(env, mmu_idx, addr); 1924b826044fSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 1925b826044fSRichard Henderson tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1926b826044fSRichard Henderson 1927eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1928eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1929eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1930eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 193129a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, 1932f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1933eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1934eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1935eed56642SAlex Bennée } 1936eed56642SAlex Bennée tlb_addr = code_read ? entry->addr_code : entry->addr_read; 193730d7e098SRichard Henderson tlb_addr &= ~TLB_INVALID_MASK; 1938eed56642SAlex Bennée } 1939eed56642SAlex Bennée 194050b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1941eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 194225d3ec58SRichard Henderson CPUTLBEntryFull *full; 19435b87b3e6SRichard Henderson bool need_swap; 194450b107c5SRichard Henderson 194550b107c5SRichard Henderson /* For anything that is unaligned, recurse through full_load. */ 1946eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1947eed56642SAlex Bennée goto do_unaligned_access; 1948eed56642SAlex Bennée } 194950b107c5SRichard Henderson 195025d3ec58SRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 195150b107c5SRichard Henderson 195250b107c5SRichard Henderson /* Handle watchpoints. */ 195350b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 195450b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 195550b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 195625d3ec58SRichard Henderson full->attrs, BP_MEM_READ, retaddr); 19575b87b3e6SRichard Henderson } 195850b107c5SRichard Henderson 19595b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 196050b107c5SRichard Henderson 196150b107c5SRichard Henderson /* Handle I/O access. */ 19625b87b3e6SRichard Henderson if (likely(tlb_addr & TLB_MMIO)) { 196325d3ec58SRichard Henderson return io_readx(env, full, mmu_idx, addr, retaddr, 19645b87b3e6SRichard Henderson access_type, op ^ (need_swap * MO_BSWAP)); 19655b87b3e6SRichard Henderson } 19665b87b3e6SRichard Henderson 19675b87b3e6SRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 19685b87b3e6SRichard Henderson 19695b87b3e6SRichard Henderson /* 19705b87b3e6SRichard Henderson * Keep these two load_memop separate to ensure that the compiler 19715b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 19725b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 19735b87b3e6SRichard Henderson */ 19745b87b3e6SRichard Henderson if (unlikely(need_swap)) { 19755b87b3e6SRichard Henderson return load_memop(haddr, op ^ MO_BSWAP); 19765b87b3e6SRichard Henderson } 19775b87b3e6SRichard Henderson return load_memop(haddr, op); 1978eed56642SAlex Bennée } 1979eed56642SAlex Bennée 1980eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1981eed56642SAlex Bennée if (size > 1 1982eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1983eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1984eed56642SAlex Bennée target_ulong addr1, addr2; 19858c79b288SAlex Bennée uint64_t r1, r2; 1986eed56642SAlex Bennée unsigned shift; 1987eed56642SAlex Bennée do_unaligned_access: 1988ab7a2009SAlex Bennée addr1 = addr & ~((target_ulong)size - 1); 1989eed56642SAlex Bennée addr2 = addr1 + size; 19902dd92606SRichard Henderson r1 = full_load(env, addr1, oi, retaddr); 19912dd92606SRichard Henderson r2 = full_load(env, addr2, oi, retaddr); 1992eed56642SAlex Bennée shift = (addr & (size - 1)) * 8; 1993eed56642SAlex Bennée 1994be5c4787STony Nguyen if (memop_big_endian(op)) { 1995eed56642SAlex Bennée /* Big-endian combine. */ 1996eed56642SAlex Bennée res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1997eed56642SAlex Bennée } else { 1998eed56642SAlex Bennée /* Little-endian combine. */ 1999eed56642SAlex Bennée res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 2000eed56642SAlex Bennée } 2001eed56642SAlex Bennée return res & MAKE_64BIT_MASK(0, size * 8); 2002eed56642SAlex Bennée } 2003eed56642SAlex Bennée 2004eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 200580d9d1c6SRichard Henderson return load_memop(haddr, op); 2006eed56642SAlex Bennée } 2007eed56642SAlex Bennée 2008eed56642SAlex Bennée /* 2009eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 2010eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 2011eed56642SAlex Bennée * return a value extended to the register size of the host. This is 2012eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 2013eed56642SAlex Bennée * data, and for that we always have uint64_t. 2014eed56642SAlex Bennée * 2015eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 2016eed56642SAlex Bennée */ 2017eed56642SAlex Bennée 20182dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 20199002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 20202dd92606SRichard Henderson { 2021f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 2022be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); 20232dd92606SRichard Henderson } 20242dd92606SRichard Henderson 2025fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 20269002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2027eed56642SAlex Bennée { 20282dd92606SRichard Henderson return full_ldub_mmu(env, addr, oi, retaddr); 20292dd92606SRichard Henderson } 20302dd92606SRichard Henderson 20312dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 20329002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 20332dd92606SRichard Henderson { 2034f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 2035be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, false, 20362dd92606SRichard Henderson full_le_lduw_mmu); 2037eed56642SAlex Bennée } 2038eed56642SAlex Bennée 2039fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 20409002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2041eed56642SAlex Bennée { 20422dd92606SRichard Henderson return full_le_lduw_mmu(env, addr, oi, retaddr); 20432dd92606SRichard Henderson } 20442dd92606SRichard Henderson 20452dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 20469002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 20472dd92606SRichard Henderson { 2048f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 2049be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, false, 20502dd92606SRichard Henderson full_be_lduw_mmu); 2051eed56642SAlex Bennée } 2052eed56642SAlex Bennée 2053fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 20549002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2055eed56642SAlex Bennée { 20562dd92606SRichard Henderson return full_be_lduw_mmu(env, addr, oi, retaddr); 20572dd92606SRichard Henderson } 20582dd92606SRichard Henderson 20592dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 20609002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 20612dd92606SRichard Henderson { 2062f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 2063be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, false, 20642dd92606SRichard Henderson full_le_ldul_mmu); 2065eed56642SAlex Bennée } 2066eed56642SAlex Bennée 2067fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 20689002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2069eed56642SAlex Bennée { 20702dd92606SRichard Henderson return full_le_ldul_mmu(env, addr, oi, retaddr); 20712dd92606SRichard Henderson } 20722dd92606SRichard Henderson 20732dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 20749002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 20752dd92606SRichard Henderson { 2076f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 2077be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, false, 20782dd92606SRichard Henderson full_be_ldul_mmu); 2079eed56642SAlex Bennée } 2080eed56642SAlex Bennée 2081fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 20829002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2083eed56642SAlex Bennée { 20842dd92606SRichard Henderson return full_be_ldul_mmu(env, addr, oi, retaddr); 2085eed56642SAlex Bennée } 2086eed56642SAlex Bennée 2087fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 20889002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2089eed56642SAlex Bennée { 2090fc313c64SFrédéric Pétrot validate_memop(oi, MO_LEUQ); 2091fc313c64SFrédéric Pétrot return load_helper(env, addr, oi, retaddr, MO_LEUQ, false, 20922dd92606SRichard Henderson helper_le_ldq_mmu); 2093eed56642SAlex Bennée } 2094eed56642SAlex Bennée 2095fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 20969002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2097eed56642SAlex Bennée { 2098fc313c64SFrédéric Pétrot validate_memop(oi, MO_BEUQ); 2099fc313c64SFrédéric Pétrot return load_helper(env, addr, oi, retaddr, MO_BEUQ, false, 21002dd92606SRichard Henderson helper_be_ldq_mmu); 2101eed56642SAlex Bennée } 2102eed56642SAlex Bennée 2103eed56642SAlex Bennée /* 2104eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 2105eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 2106eed56642SAlex Bennée */ 2107eed56642SAlex Bennée 2108eed56642SAlex Bennée 2109eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 21109002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2111eed56642SAlex Bennée { 2112eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 2113eed56642SAlex Bennée } 2114eed56642SAlex Bennée 2115eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 21169002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2117eed56642SAlex Bennée { 2118eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 2119eed56642SAlex Bennée } 2120eed56642SAlex Bennée 2121eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 21229002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2123eed56642SAlex Bennée { 2124eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 2125eed56642SAlex Bennée } 2126eed56642SAlex Bennée 2127eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 21289002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2129eed56642SAlex Bennée { 2130eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 2131eed56642SAlex Bennée } 2132eed56642SAlex Bennée 2133eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 21349002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2135eed56642SAlex Bennée { 2136eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 2137eed56642SAlex Bennée } 2138eed56642SAlex Bennée 2139eed56642SAlex Bennée /* 2140d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 2141d03f1408SRichard Henderson */ 2142d03f1408SRichard Henderson 2143d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, 2144f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr, 2145f83bcecbSRichard Henderson FullLoadHelper *full_load) 2146d03f1408SRichard Henderson { 2147d03f1408SRichard Henderson uint64_t ret; 2148d03f1408SRichard Henderson 2149d03f1408SRichard Henderson ret = full_load(env, addr, oi, retaddr); 215037aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2151d03f1408SRichard Henderson return ret; 2152d03f1408SRichard Henderson } 2153d03f1408SRichard Henderson 2154f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) 2155d03f1408SRichard Henderson { 2156f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu); 2157d03f1408SRichard Henderson } 2158d03f1408SRichard Henderson 2159f83bcecbSRichard Henderson uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, 2160f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2161d03f1408SRichard Henderson { 2162f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu); 2163d03f1408SRichard Henderson } 2164d03f1408SRichard Henderson 2165f83bcecbSRichard Henderson uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, 2166f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2167d03f1408SRichard Henderson { 2168f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu); 2169d03f1408SRichard Henderson } 2170d03f1408SRichard Henderson 2171f83bcecbSRichard Henderson uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, 2172f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2173d03f1408SRichard Henderson { 217446697cb9SRichard Henderson return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu); 2175d03f1408SRichard Henderson } 2176d03f1408SRichard Henderson 2177f83bcecbSRichard Henderson uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, 2178f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2179d03f1408SRichard Henderson { 2180f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu); 2181d03f1408SRichard Henderson } 2182d03f1408SRichard Henderson 2183f83bcecbSRichard Henderson uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, 2184f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2185d03f1408SRichard Henderson { 2186f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu); 2187b9e60257SRichard Henderson } 2188b9e60257SRichard Henderson 2189f83bcecbSRichard Henderson uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, 2190f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2191b9e60257SRichard Henderson { 2192f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); 2193cfe04a4bSRichard Henderson } 2194cfe04a4bSRichard Henderson 2195*cb48f365SRichard Henderson Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, 2196*cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 2197*cb48f365SRichard Henderson { 2198*cb48f365SRichard Henderson MemOp mop = get_memop(oi); 2199*cb48f365SRichard Henderson int mmu_idx = get_mmuidx(oi); 2200*cb48f365SRichard Henderson MemOpIdx new_oi; 2201*cb48f365SRichard Henderson unsigned a_bits; 2202*cb48f365SRichard Henderson uint64_t h, l; 2203*cb48f365SRichard Henderson 2204*cb48f365SRichard Henderson tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); 2205*cb48f365SRichard Henderson a_bits = get_alignment_bits(mop); 2206*cb48f365SRichard Henderson 2207*cb48f365SRichard Henderson /* Handle CPU specific unaligned behaviour */ 2208*cb48f365SRichard Henderson if (addr & ((1 << a_bits) - 1)) { 2209*cb48f365SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, 2210*cb48f365SRichard Henderson mmu_idx, ra); 2211*cb48f365SRichard Henderson } 2212*cb48f365SRichard Henderson 2213*cb48f365SRichard Henderson /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2214*cb48f365SRichard Henderson mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2215*cb48f365SRichard Henderson new_oi = make_memop_idx(mop, mmu_idx); 2216*cb48f365SRichard Henderson 2217*cb48f365SRichard Henderson h = helper_be_ldq_mmu(env, addr, new_oi, ra); 2218*cb48f365SRichard Henderson l = helper_be_ldq_mmu(env, addr + 8, new_oi, ra); 2219*cb48f365SRichard Henderson 2220*cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2221*cb48f365SRichard Henderson return int128_make128(l, h); 2222*cb48f365SRichard Henderson } 2223*cb48f365SRichard Henderson 2224*cb48f365SRichard Henderson Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, 2225*cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 2226*cb48f365SRichard Henderson { 2227*cb48f365SRichard Henderson MemOp mop = get_memop(oi); 2228*cb48f365SRichard Henderson int mmu_idx = get_mmuidx(oi); 2229*cb48f365SRichard Henderson MemOpIdx new_oi; 2230*cb48f365SRichard Henderson unsigned a_bits; 2231*cb48f365SRichard Henderson uint64_t h, l; 2232*cb48f365SRichard Henderson 2233*cb48f365SRichard Henderson tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); 2234*cb48f365SRichard Henderson a_bits = get_alignment_bits(mop); 2235*cb48f365SRichard Henderson 2236*cb48f365SRichard Henderson /* Handle CPU specific unaligned behaviour */ 2237*cb48f365SRichard Henderson if (addr & ((1 << a_bits) - 1)) { 2238*cb48f365SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, 2239*cb48f365SRichard Henderson mmu_idx, ra); 2240*cb48f365SRichard Henderson } 2241*cb48f365SRichard Henderson 2242*cb48f365SRichard Henderson /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2243*cb48f365SRichard Henderson mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2244*cb48f365SRichard Henderson new_oi = make_memop_idx(mop, mmu_idx); 2245*cb48f365SRichard Henderson 2246*cb48f365SRichard Henderson l = helper_le_ldq_mmu(env, addr, new_oi, ra); 2247*cb48f365SRichard Henderson h = helper_le_ldq_mmu(env, addr + 8, new_oi, ra); 2248*cb48f365SRichard Henderson 2249*cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2250*cb48f365SRichard Henderson return int128_make128(l, h); 2251*cb48f365SRichard Henderson } 2252*cb48f365SRichard Henderson 2253d03f1408SRichard Henderson /* 2254eed56642SAlex Bennée * Store Helpers 2255eed56642SAlex Bennée */ 2256eed56642SAlex Bennée 2257c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE 225880d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op) 225980d9d1c6SRichard Henderson { 226080d9d1c6SRichard Henderson switch (op) { 226180d9d1c6SRichard Henderson case MO_UB: 226280d9d1c6SRichard Henderson stb_p(haddr, val); 226380d9d1c6SRichard Henderson break; 226480d9d1c6SRichard Henderson case MO_BEUW: 226580d9d1c6SRichard Henderson stw_be_p(haddr, val); 226680d9d1c6SRichard Henderson break; 226780d9d1c6SRichard Henderson case MO_LEUW: 226880d9d1c6SRichard Henderson stw_le_p(haddr, val); 226980d9d1c6SRichard Henderson break; 227080d9d1c6SRichard Henderson case MO_BEUL: 227180d9d1c6SRichard Henderson stl_be_p(haddr, val); 227280d9d1c6SRichard Henderson break; 227380d9d1c6SRichard Henderson case MO_LEUL: 227480d9d1c6SRichard Henderson stl_le_p(haddr, val); 227580d9d1c6SRichard Henderson break; 2276fc313c64SFrédéric Pétrot case MO_BEUQ: 227780d9d1c6SRichard Henderson stq_be_p(haddr, val); 227880d9d1c6SRichard Henderson break; 2279fc313c64SFrédéric Pétrot case MO_LEUQ: 228080d9d1c6SRichard Henderson stq_le_p(haddr, val); 228180d9d1c6SRichard Henderson break; 228280d9d1c6SRichard Henderson default: 228380d9d1c6SRichard Henderson qemu_build_not_reached(); 228480d9d1c6SRichard Henderson } 228580d9d1c6SRichard Henderson } 228680d9d1c6SRichard Henderson 2287f83bcecbSRichard Henderson static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2288f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr); 2289f83bcecbSRichard Henderson 22906b8b622eSRichard Henderson static void __attribute__((noinline)) 22916b8b622eSRichard Henderson store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, 22926b8b622eSRichard Henderson uintptr_t retaddr, size_t size, uintptr_t mmu_idx, 22936b8b622eSRichard Henderson bool big_endian) 22946b8b622eSRichard Henderson { 22956b8b622eSRichard Henderson const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 22966b8b622eSRichard Henderson uintptr_t index, index2; 22976b8b622eSRichard Henderson CPUTLBEntry *entry, *entry2; 2298b0f650f0SIlya Leoshkevich target_ulong page1, page2, tlb_addr, tlb_addr2; 22999002ffcbSRichard Henderson MemOpIdx oi; 23006b8b622eSRichard Henderson size_t size2; 23016b8b622eSRichard Henderson int i; 23026b8b622eSRichard Henderson 23036b8b622eSRichard Henderson /* 23046b8b622eSRichard Henderson * Ensure the second page is in the TLB. Note that the first page 23056b8b622eSRichard Henderson * is already guaranteed to be filled, and that the second page 2306b0f650f0SIlya Leoshkevich * cannot evict the first. An exception to this rule is PAGE_WRITE_INV 2307b0f650f0SIlya Leoshkevich * handling: the first page could have evicted itself. 23086b8b622eSRichard Henderson */ 2309b0f650f0SIlya Leoshkevich page1 = addr & TARGET_PAGE_MASK; 23106b8b622eSRichard Henderson page2 = (addr + size) & TARGET_PAGE_MASK; 23116b8b622eSRichard Henderson size2 = (addr + size) & ~TARGET_PAGE_MASK; 23126b8b622eSRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 23136b8b622eSRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 23146b8b622eSRichard Henderson 23156b8b622eSRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 2316b0f650f0SIlya Leoshkevich if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) { 23176b8b622eSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { 23186b8b622eSRichard Henderson tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, 23196b8b622eSRichard Henderson mmu_idx, retaddr); 23206b8b622eSRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 23216b8b622eSRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 23226b8b622eSRichard Henderson } 23236b8b622eSRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 23246b8b622eSRichard Henderson } 23256b8b622eSRichard Henderson 23266b8b622eSRichard Henderson index = tlb_index(env, mmu_idx, addr); 23276b8b622eSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 23286b8b622eSRichard Henderson tlb_addr = tlb_addr_write(entry); 23296b8b622eSRichard Henderson 23306b8b622eSRichard Henderson /* 23316b8b622eSRichard Henderson * Handle watchpoints. Since this may trap, all checks 23326b8b622eSRichard Henderson * must happen before any store. 23336b8b622eSRichard Henderson */ 23346b8b622eSRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 23356b8b622eSRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size - size2, 233625d3ec58SRichard Henderson env_tlb(env)->d[mmu_idx].fulltlb[index].attrs, 23376b8b622eSRichard Henderson BP_MEM_WRITE, retaddr); 23386b8b622eSRichard Henderson } 23396b8b622eSRichard Henderson if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { 23406b8b622eSRichard Henderson cpu_check_watchpoint(env_cpu(env), page2, size2, 234125d3ec58SRichard Henderson env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs, 23426b8b622eSRichard Henderson BP_MEM_WRITE, retaddr); 23436b8b622eSRichard Henderson } 23446b8b622eSRichard Henderson 23456b8b622eSRichard Henderson /* 23466b8b622eSRichard Henderson * XXX: not efficient, but simple. 23476b8b622eSRichard Henderson * This loop must go in the forward direction to avoid issues 23486b8b622eSRichard Henderson * with self-modifying code in Windows 64-bit. 23496b8b622eSRichard Henderson */ 23506b8b622eSRichard Henderson oi = make_memop_idx(MO_UB, mmu_idx); 23516b8b622eSRichard Henderson if (big_endian) { 23526b8b622eSRichard Henderson for (i = 0; i < size; ++i) { 23536b8b622eSRichard Henderson /* Big-endian extract. */ 23546b8b622eSRichard Henderson uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); 2355f83bcecbSRichard Henderson full_stb_mmu(env, addr + i, val8, oi, retaddr); 23566b8b622eSRichard Henderson } 23576b8b622eSRichard Henderson } else { 23586b8b622eSRichard Henderson for (i = 0; i < size; ++i) { 23596b8b622eSRichard Henderson /* Little-endian extract. */ 23606b8b622eSRichard Henderson uint8_t val8 = val >> (i * 8); 2361f83bcecbSRichard Henderson full_stb_mmu(env, addr + i, val8, oi, retaddr); 23626b8b622eSRichard Henderson } 23636b8b622eSRichard Henderson } 23646b8b622eSRichard Henderson } 23656b8b622eSRichard Henderson 236680d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE 23674601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 23689002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr, MemOp op) 2369eed56642SAlex Bennée { 2370eed56642SAlex Bennée const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 2371b826044fSRichard Henderson const unsigned a_bits = get_alignment_bits(get_memop(oi)); 2372b826044fSRichard Henderson const size_t size = memop_size(op); 2373b826044fSRichard Henderson uintptr_t mmu_idx = get_mmuidx(oi); 2374b826044fSRichard Henderson uintptr_t index; 2375b826044fSRichard Henderson CPUTLBEntry *entry; 2376b826044fSRichard Henderson target_ulong tlb_addr; 2377eed56642SAlex Bennée void *haddr; 2378b826044fSRichard Henderson 2379b826044fSRichard Henderson tcg_debug_assert(mmu_idx < NB_MMU_MODES); 2380eed56642SAlex Bennée 2381eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 2382eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 238329a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2384eed56642SAlex Bennée mmu_idx, retaddr); 2385eed56642SAlex Bennée } 2386eed56642SAlex Bennée 2387b826044fSRichard Henderson index = tlb_index(env, mmu_idx, addr); 2388b826044fSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 2389b826044fSRichard Henderson tlb_addr = tlb_addr_write(entry); 2390b826044fSRichard Henderson 2391eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 2392eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 2393eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 2394eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 239529a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 2396eed56642SAlex Bennée mmu_idx, retaddr); 2397eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 2398eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 2399eed56642SAlex Bennée } 2400eed56642SAlex Bennée tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 2401eed56642SAlex Bennée } 2402eed56642SAlex Bennée 240350b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 2404eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 240525d3ec58SRichard Henderson CPUTLBEntryFull *full; 24065b87b3e6SRichard Henderson bool need_swap; 240750b107c5SRichard Henderson 240850b107c5SRichard Henderson /* For anything that is unaligned, recurse through byte stores. */ 2409eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 2410eed56642SAlex Bennée goto do_unaligned_access; 2411eed56642SAlex Bennée } 241250b107c5SRichard Henderson 241325d3ec58SRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 241450b107c5SRichard Henderson 241550b107c5SRichard Henderson /* Handle watchpoints. */ 241650b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 241750b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 241850b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 241925d3ec58SRichard Henderson full->attrs, BP_MEM_WRITE, retaddr); 24205b87b3e6SRichard Henderson } 242150b107c5SRichard Henderson 24225b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 242350b107c5SRichard Henderson 242450b107c5SRichard Henderson /* Handle I/O access. */ 242508565552SRichard Henderson if (tlb_addr & TLB_MMIO) { 242625d3ec58SRichard Henderson io_writex(env, full, mmu_idx, val, addr, retaddr, 24275b87b3e6SRichard Henderson op ^ (need_swap * MO_BSWAP)); 24285b87b3e6SRichard Henderson return; 24295b87b3e6SRichard Henderson } 24305b87b3e6SRichard Henderson 24317b0d792cSRichard Henderson /* Ignore writes to ROM. */ 24327b0d792cSRichard Henderson if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { 24337b0d792cSRichard Henderson return; 24347b0d792cSRichard Henderson } 24357b0d792cSRichard Henderson 243608565552SRichard Henderson /* Handle clean RAM pages. */ 243708565552SRichard Henderson if (tlb_addr & TLB_NOTDIRTY) { 243825d3ec58SRichard Henderson notdirty_write(env_cpu(env), addr, size, full, retaddr); 243908565552SRichard Henderson } 244008565552SRichard Henderson 2441707526adSRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 244208565552SRichard Henderson 24435b87b3e6SRichard Henderson /* 24445b87b3e6SRichard Henderson * Keep these two store_memop separate to ensure that the compiler 24455b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 24465b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 24475b87b3e6SRichard Henderson */ 24485b87b3e6SRichard Henderson if (unlikely(need_swap)) { 24495b87b3e6SRichard Henderson store_memop(haddr, val, op ^ MO_BSWAP); 24505b87b3e6SRichard Henderson } else { 24515b87b3e6SRichard Henderson store_memop(haddr, val, op); 24525b87b3e6SRichard Henderson } 2453eed56642SAlex Bennée return; 2454eed56642SAlex Bennée } 2455eed56642SAlex Bennée 2456eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 2457eed56642SAlex Bennée if (size > 1 2458eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 2459eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 2460eed56642SAlex Bennée do_unaligned_access: 24616b8b622eSRichard Henderson store_helper_unaligned(env, addr, val, retaddr, size, 24626b8b622eSRichard Henderson mmu_idx, memop_big_endian(op)); 2463eed56642SAlex Bennée return; 2464eed56642SAlex Bennée } 2465eed56642SAlex Bennée 2466eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 246780d9d1c6SRichard Henderson store_memop(haddr, val, op); 2468eed56642SAlex Bennée } 2469eed56642SAlex Bennée 2470f83bcecbSRichard Henderson static void __attribute__((noinline)) 2471f83bcecbSRichard Henderson full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 24729002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2473eed56642SAlex Bennée { 2474f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 2475be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_UB); 2476eed56642SAlex Bennée } 2477eed56642SAlex Bennée 2478f83bcecbSRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2479f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2480f83bcecbSRichard Henderson { 2481f83bcecbSRichard Henderson full_stb_mmu(env, addr, val, oi, retaddr); 2482f83bcecbSRichard Henderson } 2483f83bcecbSRichard Henderson 2484f83bcecbSRichard Henderson static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2485f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2486f83bcecbSRichard Henderson { 2487f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 2488f83bcecbSRichard Henderson store_helper(env, addr, val, oi, retaddr, MO_LEUW); 2489f83bcecbSRichard Henderson } 2490f83bcecbSRichard Henderson 2491fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 24929002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2493eed56642SAlex Bennée { 2494f83bcecbSRichard Henderson full_le_stw_mmu(env, addr, val, oi, retaddr); 2495f83bcecbSRichard Henderson } 2496f83bcecbSRichard Henderson 2497f83bcecbSRichard Henderson static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2498f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2499f83bcecbSRichard Henderson { 2500f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 2501f83bcecbSRichard Henderson store_helper(env, addr, val, oi, retaddr, MO_BEUW); 2502eed56642SAlex Bennée } 2503eed56642SAlex Bennée 2504fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 25059002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2506eed56642SAlex Bennée { 2507f83bcecbSRichard Henderson full_be_stw_mmu(env, addr, val, oi, retaddr); 2508f83bcecbSRichard Henderson } 2509f83bcecbSRichard Henderson 2510f83bcecbSRichard Henderson static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2511f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2512f83bcecbSRichard Henderson { 2513f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 2514f83bcecbSRichard Henderson store_helper(env, addr, val, oi, retaddr, MO_LEUL); 2515eed56642SAlex Bennée } 2516eed56642SAlex Bennée 2517fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 25189002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2519eed56642SAlex Bennée { 2520f83bcecbSRichard Henderson full_le_stl_mmu(env, addr, val, oi, retaddr); 2521f83bcecbSRichard Henderson } 2522f83bcecbSRichard Henderson 2523f83bcecbSRichard Henderson static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2524f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2525f83bcecbSRichard Henderson { 2526f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 2527f83bcecbSRichard Henderson store_helper(env, addr, val, oi, retaddr, MO_BEUL); 2528eed56642SAlex Bennée } 2529eed56642SAlex Bennée 2530fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 25319002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2532eed56642SAlex Bennée { 2533f83bcecbSRichard Henderson full_be_stl_mmu(env, addr, val, oi, retaddr); 2534eed56642SAlex Bennée } 2535eed56642SAlex Bennée 2536fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 25379002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2538eed56642SAlex Bennée { 2539fc313c64SFrédéric Pétrot validate_memop(oi, MO_LEUQ); 2540fc313c64SFrédéric Pétrot store_helper(env, addr, val, oi, retaddr, MO_LEUQ); 2541eed56642SAlex Bennée } 2542eed56642SAlex Bennée 2543fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 25449002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2545eed56642SAlex Bennée { 2546fc313c64SFrédéric Pétrot validate_memop(oi, MO_BEUQ); 2547fc313c64SFrédéric Pétrot store_helper(env, addr, val, oi, retaddr, MO_BEUQ); 2548eed56642SAlex Bennée } 2549d9bb58e5SYang Zhong 2550d03f1408SRichard Henderson /* 2551d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 2552d03f1408SRichard Henderson */ 2553d03f1408SRichard Henderson 2554f83bcecbSRichard Henderson typedef void FullStoreHelper(CPUArchState *env, target_ulong addr, 2555f83bcecbSRichard Henderson uint64_t val, MemOpIdx oi, uintptr_t retaddr); 2556f83bcecbSRichard Henderson 2557f83bcecbSRichard Henderson static inline void cpu_store_helper(CPUArchState *env, target_ulong addr, 2558f83bcecbSRichard Henderson uint64_t val, MemOpIdx oi, uintptr_t ra, 2559f83bcecbSRichard Henderson FullStoreHelper *full_store) 2560d03f1408SRichard Henderson { 2561f83bcecbSRichard Henderson full_store(env, addr, val, oi, ra); 256237aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 2563d03f1408SRichard Henderson } 2564d03f1408SRichard Henderson 2565f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2566f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2567d03f1408SRichard Henderson { 2568f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu); 2569d03f1408SRichard Henderson } 2570d03f1408SRichard Henderson 2571f83bcecbSRichard Henderson void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2572f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2573d03f1408SRichard Henderson { 2574f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu); 2575d03f1408SRichard Henderson } 2576d03f1408SRichard Henderson 2577f83bcecbSRichard Henderson void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2578f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2579d03f1408SRichard Henderson { 2580f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu); 2581d03f1408SRichard Henderson } 2582d03f1408SRichard Henderson 2583f83bcecbSRichard Henderson void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2584f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2585d03f1408SRichard Henderson { 2586f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu); 2587b9e60257SRichard Henderson } 2588b9e60257SRichard Henderson 2589f83bcecbSRichard Henderson void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2590f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2591b9e60257SRichard Henderson { 2592f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu); 2593b9e60257SRichard Henderson } 2594b9e60257SRichard Henderson 2595f83bcecbSRichard Henderson void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2596f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2597b9e60257SRichard Henderson { 2598f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu); 2599b9e60257SRichard Henderson } 2600b9e60257SRichard Henderson 2601f83bcecbSRichard Henderson void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2602f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2603b9e60257SRichard Henderson { 2604f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); 2605d03f1408SRichard Henderson } 2606d03f1408SRichard Henderson 2607*cb48f365SRichard Henderson void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val, 2608*cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 2609*cb48f365SRichard Henderson { 2610*cb48f365SRichard Henderson MemOp mop = get_memop(oi); 2611*cb48f365SRichard Henderson int mmu_idx = get_mmuidx(oi); 2612*cb48f365SRichard Henderson MemOpIdx new_oi; 2613*cb48f365SRichard Henderson unsigned a_bits; 2614*cb48f365SRichard Henderson 2615*cb48f365SRichard Henderson tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); 2616*cb48f365SRichard Henderson a_bits = get_alignment_bits(mop); 2617*cb48f365SRichard Henderson 2618*cb48f365SRichard Henderson /* Handle CPU specific unaligned behaviour */ 2619*cb48f365SRichard Henderson if (addr & ((1 << a_bits) - 1)) { 2620*cb48f365SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2621*cb48f365SRichard Henderson mmu_idx, ra); 2622*cb48f365SRichard Henderson } 2623*cb48f365SRichard Henderson 2624*cb48f365SRichard Henderson /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2625*cb48f365SRichard Henderson mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2626*cb48f365SRichard Henderson new_oi = make_memop_idx(mop, mmu_idx); 2627*cb48f365SRichard Henderson 2628*cb48f365SRichard Henderson helper_be_stq_mmu(env, addr, int128_gethi(val), new_oi, ra); 2629*cb48f365SRichard Henderson helper_be_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra); 2630*cb48f365SRichard Henderson 2631*cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 2632*cb48f365SRichard Henderson } 2633*cb48f365SRichard Henderson 2634*cb48f365SRichard Henderson void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val, 2635*cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 2636*cb48f365SRichard Henderson { 2637*cb48f365SRichard Henderson MemOp mop = get_memop(oi); 2638*cb48f365SRichard Henderson int mmu_idx = get_mmuidx(oi); 2639*cb48f365SRichard Henderson MemOpIdx new_oi; 2640*cb48f365SRichard Henderson unsigned a_bits; 2641*cb48f365SRichard Henderson 2642*cb48f365SRichard Henderson tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); 2643*cb48f365SRichard Henderson a_bits = get_alignment_bits(mop); 2644*cb48f365SRichard Henderson 2645*cb48f365SRichard Henderson /* Handle CPU specific unaligned behaviour */ 2646*cb48f365SRichard Henderson if (addr & ((1 << a_bits) - 1)) { 2647*cb48f365SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2648*cb48f365SRichard Henderson mmu_idx, ra); 2649*cb48f365SRichard Henderson } 2650*cb48f365SRichard Henderson 2651*cb48f365SRichard Henderson /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2652*cb48f365SRichard Henderson mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2653*cb48f365SRichard Henderson new_oi = make_memop_idx(mop, mmu_idx); 2654*cb48f365SRichard Henderson 2655*cb48f365SRichard Henderson helper_le_stq_mmu(env, addr, int128_getlo(val), new_oi, ra); 2656*cb48f365SRichard Henderson helper_le_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra); 2657*cb48f365SRichard Henderson 2658*cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 2659*cb48f365SRichard Henderson } 2660*cb48f365SRichard Henderson 2661f83bcecbSRichard Henderson #include "ldst_common.c.inc" 2662cfe04a4bSRichard Henderson 2663be9568b4SRichard Henderson /* 2664be9568b4SRichard Henderson * First set of functions passes in OI and RETADDR. 2665be9568b4SRichard Henderson * This makes them callable from other helpers. 2666be9568b4SRichard Henderson */ 2667d9bb58e5SYang Zhong 2668d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 2669be9568b4SRichard Henderson glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 2670a754f7f3SRichard Henderson 2671707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 2672d9bb58e5SYang Zhong 2673139c1837SPaolo Bonzini #include "atomic_common.c.inc" 2674d9bb58e5SYang Zhong 2675d9bb58e5SYang Zhong #define DATA_SIZE 1 2676d9bb58e5SYang Zhong #include "atomic_template.h" 2677d9bb58e5SYang Zhong 2678d9bb58e5SYang Zhong #define DATA_SIZE 2 2679d9bb58e5SYang Zhong #include "atomic_template.h" 2680d9bb58e5SYang Zhong 2681d9bb58e5SYang Zhong #define DATA_SIZE 4 2682d9bb58e5SYang Zhong #include "atomic_template.h" 2683d9bb58e5SYang Zhong 2684d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2685d9bb58e5SYang Zhong #define DATA_SIZE 8 2686d9bb58e5SYang Zhong #include "atomic_template.h" 2687d9bb58e5SYang Zhong #endif 2688d9bb58e5SYang Zhong 2689e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 2690d9bb58e5SYang Zhong #define DATA_SIZE 16 2691d9bb58e5SYang Zhong #include "atomic_template.h" 2692d9bb58e5SYang Zhong #endif 2693d9bb58e5SYang Zhong 2694d9bb58e5SYang Zhong /* Code access functions. */ 2695d9bb58e5SYang Zhong 2696fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, 26979002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 26982dd92606SRichard Henderson { 2699fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); 27002dd92606SRichard Henderson } 27012dd92606SRichard Henderson 2702fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 2703eed56642SAlex Bennée { 27049002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 2705fc4120a3SRichard Henderson return full_ldub_code(env, addr, oi, 0); 27062dd92606SRichard Henderson } 27072dd92606SRichard Henderson 2708fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, 27099002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 27104cef72d0SAlex Bennée { 2711fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); 27124cef72d0SAlex Bennée } 27134cef72d0SAlex Bennée 2714fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 27152dd92606SRichard Henderson { 27169002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 2717fc4120a3SRichard Henderson return full_lduw_code(env, addr, oi, 0); 2718eed56642SAlex Bennée } 2719d9bb58e5SYang Zhong 2720fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, 27219002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2722eed56642SAlex Bennée { 2723fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); 27242dd92606SRichard Henderson } 27252dd92606SRichard Henderson 2726fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 27274cef72d0SAlex Bennée { 27289002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 2729fc4120a3SRichard Henderson return full_ldl_code(env, addr, oi, 0); 27304cef72d0SAlex Bennée } 27314cef72d0SAlex Bennée 2732fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, 27339002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 27342dd92606SRichard Henderson { 2735fc313c64SFrédéric Pétrot return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code); 2736eed56642SAlex Bennée } 2737d9bb58e5SYang Zhong 2738fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 2739eed56642SAlex Bennée { 2740fc313c64SFrédéric Pétrot MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); 2741fc4120a3SRichard Henderson return full_ldq_code(env, addr, oi, 0); 2742eed56642SAlex Bennée } 2743