1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 26d9bb58e5SYang Zhong #include "exec/cputlb.h" 27d9bb58e5SYang Zhong #include "exec/memory-internal.h" 28d9bb58e5SYang Zhong #include "exec/ram_addr.h" 29d9bb58e5SYang Zhong #include "tcg/tcg.h" 30d9bb58e5SYang Zhong #include "qemu/error-report.h" 31d9bb58e5SYang Zhong #include "exec/log.h" 32d9bb58e5SYang Zhong #include "exec/helper-proto.h" 33d9bb58e5SYang Zhong #include "qemu/atomic.h" 34e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 353b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 3651807763SPhilippe Mathieu-Daudé #include "trace.h" 37e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h" 3865269192SPhilippe Mathieu-Daudé #include "internal.h" 39235537faSAlex Bennée #ifdef CONFIG_PLUGIN 40235537faSAlex Bennée #include "qemu/plugin-memory.h" 41235537faSAlex Bennée #endif 42d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h" 43d9bb58e5SYang Zhong 44d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 45d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 46d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 47d9bb58e5SYang Zhong 48d9bb58e5SYang Zhong #ifdef DEBUG_TLB 49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 50d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 51d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 52d9bb58e5SYang Zhong # else 53d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 54d9bb58e5SYang Zhong # endif 55d9bb58e5SYang Zhong #else 56d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 57d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 58d9bb58e5SYang Zhong #endif 59d9bb58e5SYang Zhong 60d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 61d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 62d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 63d9bb58e5SYang Zhong ## __VA_ARGS__); \ 64d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 65d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 66d9bb58e5SYang Zhong } \ 67d9bb58e5SYang Zhong } while (0) 68d9bb58e5SYang Zhong 69ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 70d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 71ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 72d9bb58e5SYang Zhong } \ 73d9bb58e5SYang Zhong } while (0) 74d9bb58e5SYang Zhong 75d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 76d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 77d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 78d9bb58e5SYang Zhong 79d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 80d9bb58e5SYang Zhong */ 81d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 82d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 83d9bb58e5SYang Zhong 84722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 857a1efe1bSRichard Henderson { 86722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 877a1efe1bSRichard Henderson } 887a1efe1bSRichard Henderson 89722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 9086e1eff8SEmilio G. Cota { 91722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9286e1eff8SEmilio G. Cota } 9386e1eff8SEmilio G. Cota 9479e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9586e1eff8SEmilio G. Cota size_t max_entries) 9686e1eff8SEmilio G. Cota { 9779e42085SRichard Henderson desc->window_begin_ns = ns; 9879e42085SRichard Henderson desc->window_max_entries = max_entries; 9986e1eff8SEmilio G. Cota } 10086e1eff8SEmilio G. Cota 1010f4abea8SRichard Henderson static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 1020f4abea8SRichard Henderson { 103a976a99aSRichard Henderson CPUJumpCache *jc = cpu->tb_jmp_cache; 10499ab4d50SEric Auger int i, i0; 1050f4abea8SRichard Henderson 10699ab4d50SEric Auger if (unlikely(!jc)) { 10799ab4d50SEric Auger return; 10899ab4d50SEric Auger } 10999ab4d50SEric Auger 11099ab4d50SEric Auger i0 = tb_jmp_cache_hash_page(page_addr); 1110f4abea8SRichard Henderson for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 112a976a99aSRichard Henderson qatomic_set(&jc->array[i0 + i].tb, NULL); 1130f4abea8SRichard Henderson } 1140f4abea8SRichard Henderson } 1150f4abea8SRichard Henderson 11686e1eff8SEmilio G. Cota /** 11786e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 11871ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 11971ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 12086e1eff8SEmilio G. Cota * 12186e1eff8SEmilio G. Cota * Called with tlb_lock_held. 12286e1eff8SEmilio G. Cota * 12386e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 12486e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 12586e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 12686e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 12786e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 12886e1eff8SEmilio G. Cota * the resize based on past observations. 12986e1eff8SEmilio G. Cota * 13086e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 13186e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 13286e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 13386e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 13486e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 13586e1eff8SEmilio G. Cota * performance. 13686e1eff8SEmilio G. Cota * 13786e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 13886e1eff8SEmilio G. Cota * 13986e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 14086e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 14186e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 14286e1eff8SEmilio G. Cota * probably be similar. 14386e1eff8SEmilio G. Cota * 14486e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 14586e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 14686e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 14786e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 14886e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 14986e1eff8SEmilio G. Cota * 15086e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 15186e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 15286e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 15386e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 15486e1eff8SEmilio G. Cota * conflict misses. 15586e1eff8SEmilio G. Cota */ 1563c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1573c3959f2SRichard Henderson int64_t now) 15886e1eff8SEmilio G. Cota { 15971ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 16086e1eff8SEmilio G. Cota size_t rate; 16186e1eff8SEmilio G. Cota size_t new_size = old_size; 16286e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 16386e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 16479e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 16586e1eff8SEmilio G. Cota 16679e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 16779e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 16886e1eff8SEmilio G. Cota } 16979e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 17086e1eff8SEmilio G. Cota 17186e1eff8SEmilio G. Cota if (rate > 70) { 17286e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 17386e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 17479e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 17579e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 17686e1eff8SEmilio G. Cota 17786e1eff8SEmilio G. Cota /* 17886e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 17986e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 18086e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 18186e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 18286e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 18386e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 18486e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 18586e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 18686e1eff8SEmilio G. Cota */ 18786e1eff8SEmilio G. Cota if (expected_rate > 70) { 18886e1eff8SEmilio G. Cota ceil *= 2; 18986e1eff8SEmilio G. Cota } 19086e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 19186e1eff8SEmilio G. Cota } 19286e1eff8SEmilio G. Cota 19386e1eff8SEmilio G. Cota if (new_size == old_size) { 19486e1eff8SEmilio G. Cota if (window_expired) { 19579e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 19686e1eff8SEmilio G. Cota } 19786e1eff8SEmilio G. Cota return; 19886e1eff8SEmilio G. Cota } 19986e1eff8SEmilio G. Cota 20071ccd47bSRichard Henderson g_free(fast->table); 20125d3ec58SRichard Henderson g_free(desc->fulltlb); 20286e1eff8SEmilio G. Cota 20379e42085SRichard Henderson tlb_window_reset(desc, now, 0); 20486e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 20571ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 20671ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 20725d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 20871ccd47bSRichard Henderson 20986e1eff8SEmilio G. Cota /* 21086e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 21186e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 21286e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 21386e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 21486e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 21586e1eff8SEmilio G. Cota */ 21625d3ec58SRichard Henderson while (fast->table == NULL || desc->fulltlb == NULL) { 21786e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 21886e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 21986e1eff8SEmilio G. Cota abort(); 22086e1eff8SEmilio G. Cota } 22186e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 22271ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 22386e1eff8SEmilio G. Cota 22471ccd47bSRichard Henderson g_free(fast->table); 22525d3ec58SRichard Henderson g_free(desc->fulltlb); 22671ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 22725d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 22886e1eff8SEmilio G. Cota } 22986e1eff8SEmilio G. Cota } 23086e1eff8SEmilio G. Cota 231bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 23286e1eff8SEmilio G. Cota { 2335c948e31SRichard Henderson desc->n_used_entries = 0; 2345c948e31SRichard Henderson desc->large_page_addr = -1; 2355c948e31SRichard Henderson desc->large_page_mask = -1; 2365c948e31SRichard Henderson desc->vindex = 0; 2375c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2385c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 23986e1eff8SEmilio G. Cota } 24086e1eff8SEmilio G. Cota 2413c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, 2423c3959f2SRichard Henderson int64_t now) 243bbf021b0SRichard Henderson { 244bbf021b0SRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 245bbf021b0SRichard Henderson CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; 246bbf021b0SRichard Henderson 2473c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 248bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 249bbf021b0SRichard Henderson } 250bbf021b0SRichard Henderson 25156e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 25256e89f76SRichard Henderson { 25356e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 25456e89f76SRichard Henderson 25556e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 25656e89f76SRichard Henderson desc->n_used_entries = 0; 25756e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 25856e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 25925d3ec58SRichard Henderson desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); 2603c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 26156e89f76SRichard Henderson } 26256e89f76SRichard Henderson 26386e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 26486e1eff8SEmilio G. Cota { 265a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 26686e1eff8SEmilio G. Cota } 26786e1eff8SEmilio G. Cota 26886e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 26986e1eff8SEmilio G. Cota { 270a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 27186e1eff8SEmilio G. Cota } 27286e1eff8SEmilio G. Cota 2735005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2745005e253SEmilio G. Cota { 27571aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 27656e89f76SRichard Henderson int64_t now = get_clock_realtime(); 27756e89f76SRichard Henderson int i; 27871aec354SEmilio G. Cota 279a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2803d1523ceSRichard Henderson 2813c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 2823c16304aSRichard Henderson env_tlb(env)->c.dirty = 0; 28386e1eff8SEmilio G. Cota 28456e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 28556e89f76SRichard Henderson tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); 28656e89f76SRichard Henderson } 2875005e253SEmilio G. Cota } 2885005e253SEmilio G. Cota 289816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu) 290816d9be5SEmilio G. Cota { 291816d9be5SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 292816d9be5SEmilio G. Cota int i; 293816d9be5SEmilio G. Cota 294816d9be5SEmilio G. Cota qemu_spin_destroy(&env_tlb(env)->c.lock); 295816d9be5SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 296816d9be5SEmilio G. Cota CPUTLBDesc *desc = &env_tlb(env)->d[i]; 297816d9be5SEmilio G. Cota CPUTLBDescFast *fast = &env_tlb(env)->f[i]; 298816d9be5SEmilio G. Cota 299816d9be5SEmilio G. Cota g_free(fast->table); 30025d3ec58SRichard Henderson g_free(desc->fulltlb); 301816d9be5SEmilio G. Cota } 302816d9be5SEmilio G. Cota } 303816d9be5SEmilio G. Cota 304d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 305d9bb58e5SYang Zhong * 306d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 307d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 308d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 309d9bb58e5SYang Zhong * again. 310d9bb58e5SYang Zhong */ 311d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 312d9bb58e5SYang Zhong run_on_cpu_data d) 313d9bb58e5SYang Zhong { 314d9bb58e5SYang Zhong CPUState *cpu; 315d9bb58e5SYang Zhong 316d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 317d9bb58e5SYang Zhong if (cpu != src) { 318d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 319d9bb58e5SYang Zhong } 320d9bb58e5SYang Zhong } 321d9bb58e5SYang Zhong } 322d9bb58e5SYang Zhong 323e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 32483974cf4SEmilio G. Cota { 32583974cf4SEmilio G. Cota CPUState *cpu; 326e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 32783974cf4SEmilio G. Cota 32883974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 32983974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 33083974cf4SEmilio G. Cota 331d73415a3SStefan Hajnoczi full += qatomic_read(&env_tlb(env)->c.full_flush_count); 332d73415a3SStefan Hajnoczi part += qatomic_read(&env_tlb(env)->c.part_flush_count); 333d73415a3SStefan Hajnoczi elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); 33483974cf4SEmilio G. Cota } 335e09de0a2SRichard Henderson *pfull = full; 336e09de0a2SRichard Henderson *ppart = part; 337e09de0a2SRichard Henderson *pelide = elide; 33883974cf4SEmilio G. Cota } 339d9bb58e5SYang Zhong 340d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 341d9bb58e5SYang Zhong { 342d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3433d1523ceSRichard Henderson uint16_t asked = data.host_int; 3443d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3453c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 346d9bb58e5SYang Zhong 347d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 348d9bb58e5SYang Zhong 3493d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 350d9bb58e5SYang Zhong 351a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 35260a2ad7dSRichard Henderson 353a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3543d1523ceSRichard Henderson to_clean = asked & all_dirty; 3553d1523ceSRichard Henderson all_dirty &= ~to_clean; 356a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3573d1523ceSRichard Henderson 3583d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3593d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3603c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx, now); 361d9bb58e5SYang Zhong } 3623d1523ceSRichard Henderson 363a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 364d9bb58e5SYang Zhong 365a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 36664f2674bSRichard Henderson 3673d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 368d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.full_flush_count, 369a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 370e09de0a2SRichard Henderson } else { 371d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.part_flush_count, 372a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3733d1523ceSRichard Henderson if (to_clean != asked) { 374d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.elide_flush_count, 375a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3763d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3773d1523ceSRichard Henderson } 37864f2674bSRichard Henderson } 379d9bb58e5SYang Zhong } 380d9bb58e5SYang Zhong 381d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 382d9bb58e5SYang Zhong { 383d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 384d9bb58e5SYang Zhong 38564f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 386d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 387ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 388d9bb58e5SYang Zhong } else { 38960a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 390d9bb58e5SYang Zhong } 391d9bb58e5SYang Zhong } 392d9bb58e5SYang Zhong 39364f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 39464f2674bSRichard Henderson { 39564f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 39664f2674bSRichard Henderson } 39764f2674bSRichard Henderson 398d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 399d9bb58e5SYang Zhong { 400d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 401d9bb58e5SYang Zhong 402d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 403d9bb58e5SYang Zhong 404d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 405d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 406d9bb58e5SYang Zhong } 407d9bb58e5SYang Zhong 40864f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 40964f2674bSRichard Henderson { 41064f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 41164f2674bSRichard Henderson } 41264f2674bSRichard Henderson 41364f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 414d9bb58e5SYang Zhong { 415d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 416d9bb58e5SYang Zhong 417d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 418d9bb58e5SYang Zhong 419d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 420d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 421d9bb58e5SYang Zhong } 422d9bb58e5SYang Zhong 42364f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 42464f2674bSRichard Henderson { 42564f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 42664f2674bSRichard Henderson } 42764f2674bSRichard Henderson 4283ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 4293ab6e68cSRichard Henderson target_ulong page, target_ulong mask) 4303ab6e68cSRichard Henderson { 4313ab6e68cSRichard Henderson page &= mask; 4323ab6e68cSRichard Henderson mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 4333ab6e68cSRichard Henderson 4343ab6e68cSRichard Henderson return (page == (tlb_entry->addr_read & mask) || 4353ab6e68cSRichard Henderson page == (tlb_addr_write(tlb_entry) & mask) || 4363ab6e68cSRichard Henderson page == (tlb_entry->addr_code & mask)); 4373ab6e68cSRichard Henderson } 4383ab6e68cSRichard Henderson 43968fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 44068fea038SRichard Henderson target_ulong page) 441d9bb58e5SYang Zhong { 4423ab6e68cSRichard Henderson return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 44368fea038SRichard Henderson } 44468fea038SRichard Henderson 4453cea94bbSEmilio G. Cota /** 4463cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4473cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4483cea94bbSEmilio G. Cota */ 4493cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4503cea94bbSEmilio G. Cota { 4513cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4523cea94bbSEmilio G. Cota } 4533cea94bbSEmilio G. Cota 45453d28455SRichard Henderson /* Called with tlb_c.lock held */ 4553ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 4563ab6e68cSRichard Henderson target_ulong page, 4573ab6e68cSRichard Henderson target_ulong mask) 45868fea038SRichard Henderson { 4593ab6e68cSRichard Henderson if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 460d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 46186e1eff8SEmilio G. Cota return true; 462d9bb58e5SYang Zhong } 46386e1eff8SEmilio G. Cota return false; 464d9bb58e5SYang Zhong } 465d9bb58e5SYang Zhong 4663ab6e68cSRichard Henderson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 46768fea038SRichard Henderson target_ulong page) 46868fea038SRichard Henderson { 4693ab6e68cSRichard Henderson return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 4703ab6e68cSRichard Henderson } 4713ab6e68cSRichard Henderson 4723ab6e68cSRichard Henderson /* Called with tlb_c.lock held */ 4733ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, 4743ab6e68cSRichard Henderson target_ulong page, 4753ab6e68cSRichard Henderson target_ulong mask) 4763ab6e68cSRichard Henderson { 477a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 47868fea038SRichard Henderson int k; 47971aec354SEmilio G. Cota 48029a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 48168fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 4823ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 48386e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 48486e1eff8SEmilio G. Cota } 48568fea038SRichard Henderson } 48668fea038SRichard Henderson } 48768fea038SRichard Henderson 4883ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 4893ab6e68cSRichard Henderson target_ulong page) 4903ab6e68cSRichard Henderson { 4913ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); 4923ab6e68cSRichard Henderson } 4933ab6e68cSRichard Henderson 4941308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4951308e026SRichard Henderson target_ulong page) 4961308e026SRichard Henderson { 497a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 498a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 4991308e026SRichard Henderson 5001308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 5011308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 5021308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 5031308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 5041308e026SRichard Henderson midx, lp_addr, lp_mask); 5053c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 5061308e026SRichard Henderson } else { 50786e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 50886e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 50986e1eff8SEmilio G. Cota } 5101308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 5111308e026SRichard Henderson } 5121308e026SRichard Henderson } 5131308e026SRichard Henderson 5147b7d00e0SRichard Henderson /** 5157b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 5167b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5177b7d00e0SRichard Henderson * @addr: page of virtual address to flush 5187b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 5197b7d00e0SRichard Henderson * 5207b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 5217b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 522d9bb58e5SYang Zhong */ 5237b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 5247b7d00e0SRichard Henderson target_ulong addr, 5257b7d00e0SRichard Henderson uint16_t idxmap) 526d9bb58e5SYang Zhong { 527d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 528d9bb58e5SYang Zhong int mmu_idx; 529d9bb58e5SYang Zhong 530d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 531d9bb58e5SYang Zhong 5327b7d00e0SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); 533d9bb58e5SYang Zhong 534a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 535d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 5367b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 5371308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 538d9bb58e5SYang Zhong } 539d9bb58e5SYang Zhong } 540a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 541d9bb58e5SYang Zhong 5421d41a79bSRichard Henderson /* 5431d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 5441d41a79bSRichard Henderson * overlap the flushed page, which includes the previous. 5451d41a79bSRichard Henderson */ 5461d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 5471d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr); 548d9bb58e5SYang Zhong } 549d9bb58e5SYang Zhong 5507b7d00e0SRichard Henderson /** 5517b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 5527b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5537b7d00e0SRichard Henderson * @data: encoded addr + idxmap 5547b7d00e0SRichard Henderson * 5557b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5567b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 5577b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 5587b7d00e0SRichard Henderson * that can be passed via this method. 5597b7d00e0SRichard Henderson */ 5607b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5617b7d00e0SRichard Henderson run_on_cpu_data data) 5627b7d00e0SRichard Henderson { 5637b7d00e0SRichard Henderson target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; 5647b7d00e0SRichard Henderson target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; 5657b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5667b7d00e0SRichard Henderson 5677b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5687b7d00e0SRichard Henderson } 5697b7d00e0SRichard Henderson 5707b7d00e0SRichard Henderson typedef struct { 5717b7d00e0SRichard Henderson target_ulong addr; 5727b7d00e0SRichard Henderson uint16_t idxmap; 5737b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5747b7d00e0SRichard Henderson 5757b7d00e0SRichard Henderson /** 5767b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5777b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5787b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5797b7d00e0SRichard Henderson * 5807b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5817b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5827b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5837b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5847b7d00e0SRichard Henderson */ 5857b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5867b7d00e0SRichard Henderson run_on_cpu_data data) 5877b7d00e0SRichard Henderson { 5887b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5897b7d00e0SRichard Henderson 5907b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5917b7d00e0SRichard Henderson g_free(d); 5927b7d00e0SRichard Henderson } 5937b7d00e0SRichard Henderson 594d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 595d9bb58e5SYang Zhong { 596d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 597d9bb58e5SYang Zhong 598d9bb58e5SYang Zhong /* This should already be page aligned */ 5997b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 600d9bb58e5SYang Zhong 6017b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 6027b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 6037b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 6047b7d00e0SRichard Henderson /* 6057b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 6067b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 6077b7d00e0SRichard Henderson * allocating memory for this operation. 6087b7d00e0SRichard Henderson */ 6097b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 6107b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 611d9bb58e5SYang Zhong } else { 6127b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 6137b7d00e0SRichard Henderson 6147b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 6157b7d00e0SRichard Henderson d->addr = addr; 6167b7d00e0SRichard Henderson d->idxmap = idxmap; 6177b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 6187b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 619d9bb58e5SYang Zhong } 620d9bb58e5SYang Zhong } 621d9bb58e5SYang Zhong 622f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 623f8144c6cSRichard Henderson { 624f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 625f8144c6cSRichard Henderson } 626f8144c6cSRichard Henderson 627d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 628d9bb58e5SYang Zhong uint16_t idxmap) 629d9bb58e5SYang Zhong { 630d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 631d9bb58e5SYang Zhong 632d9bb58e5SYang Zhong /* This should already be page aligned */ 6337b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 634d9bb58e5SYang Zhong 6357b7d00e0SRichard Henderson /* 6367b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6377b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6387b7d00e0SRichard Henderson */ 6397b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6407b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6417b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6427b7d00e0SRichard Henderson } else { 6437b7d00e0SRichard Henderson CPUState *dst_cpu; 6447b7d00e0SRichard Henderson 6457b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6467b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6477b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6487b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 6497b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 6507b7d00e0SRichard Henderson 6517b7d00e0SRichard Henderson d->addr = addr; 6527b7d00e0SRichard Henderson d->idxmap = idxmap; 6537b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6547b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6557b7d00e0SRichard Henderson } 6567b7d00e0SRichard Henderson } 6577b7d00e0SRichard Henderson } 6587b7d00e0SRichard Henderson 6597b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 660d9bb58e5SYang Zhong } 661d9bb58e5SYang Zhong 662f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 663f8144c6cSRichard Henderson { 664f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 665f8144c6cSRichard Henderson } 666f8144c6cSRichard Henderson 667d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 668d9bb58e5SYang Zhong target_ulong addr, 669d9bb58e5SYang Zhong uint16_t idxmap) 670d9bb58e5SYang Zhong { 671d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 672d9bb58e5SYang Zhong 673d9bb58e5SYang Zhong /* This should already be page aligned */ 6747b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 675d9bb58e5SYang Zhong 6767b7d00e0SRichard Henderson /* 6777b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6787b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6797b7d00e0SRichard Henderson */ 6807b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6817b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6827b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6837b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6847b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6857b7d00e0SRichard Henderson } else { 6867b7d00e0SRichard Henderson CPUState *dst_cpu; 6877b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6887b7d00e0SRichard Henderson 6897b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6907b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6917b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6927b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6937b7d00e0SRichard Henderson d->addr = addr; 6947b7d00e0SRichard Henderson d->idxmap = idxmap; 6957b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6967b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6977b7d00e0SRichard Henderson } 6987b7d00e0SRichard Henderson } 6997b7d00e0SRichard Henderson 7007b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 7017b7d00e0SRichard Henderson d->addr = addr; 7027b7d00e0SRichard Henderson d->idxmap = idxmap; 7037b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 7047b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 7057b7d00e0SRichard Henderson } 706d9bb58e5SYang Zhong } 707d9bb58e5SYang Zhong 708f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 709d9bb58e5SYang Zhong { 710f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 711d9bb58e5SYang Zhong } 712d9bb58e5SYang Zhong 7133c4ddec1SRichard Henderson static void tlb_flush_range_locked(CPUArchState *env, int midx, 7143c4ddec1SRichard Henderson target_ulong addr, target_ulong len, 7153c4ddec1SRichard Henderson unsigned bits) 7163ab6e68cSRichard Henderson { 7173ab6e68cSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[midx]; 7183ab6e68cSRichard Henderson CPUTLBDescFast *f = &env_tlb(env)->f[midx]; 7193ab6e68cSRichard Henderson target_ulong mask = MAKE_64BIT_MASK(0, bits); 7203ab6e68cSRichard Henderson 7213ab6e68cSRichard Henderson /* 7223ab6e68cSRichard Henderson * If @bits is smaller than the tlb size, there may be multiple entries 7233ab6e68cSRichard Henderson * within the TLB; otherwise all addresses that match under @mask hit 7243ab6e68cSRichard Henderson * the same TLB entry. 7253ab6e68cSRichard Henderson * TODO: Perhaps allow bits to be a few bits less than the size. 7263ab6e68cSRichard Henderson * For now, just flush the entire TLB. 7273c4ddec1SRichard Henderson * 7283c4ddec1SRichard Henderson * If @len is larger than the tlb size, then it will take longer to 7293c4ddec1SRichard Henderson * test all of the entries in the TLB than it will to flush it all. 7303ab6e68cSRichard Henderson */ 7313c4ddec1SRichard Henderson if (mask < f->mask || len > f->mask) { 7323ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7333c4ddec1SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", 7343c4ddec1SRichard Henderson midx, addr, mask, len); 7353ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7363ab6e68cSRichard Henderson return; 7373ab6e68cSRichard Henderson } 7383ab6e68cSRichard Henderson 7393c4ddec1SRichard Henderson /* 7403c4ddec1SRichard Henderson * Check if we need to flush due to large pages. 7413c4ddec1SRichard Henderson * Because large_page_mask contains all 1's from the msb, 7423c4ddec1SRichard Henderson * we only need to test the end of the range. 7433c4ddec1SRichard Henderson */ 7443c4ddec1SRichard Henderson if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { 7453ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7463ab6e68cSRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 7473ab6e68cSRichard Henderson midx, d->large_page_addr, d->large_page_mask); 7483ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7493ab6e68cSRichard Henderson return; 7503ab6e68cSRichard Henderson } 7513ab6e68cSRichard Henderson 7523c4ddec1SRichard Henderson for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { 7533c4ddec1SRichard Henderson target_ulong page = addr + i; 7543c4ddec1SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, midx, page); 7553c4ddec1SRichard Henderson 7563c4ddec1SRichard Henderson if (tlb_flush_entry_mask_locked(entry, page, mask)) { 7573ab6e68cSRichard Henderson tlb_n_used_entries_dec(env, midx); 7583ab6e68cSRichard Henderson } 7593ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); 7603ab6e68cSRichard Henderson } 7613c4ddec1SRichard Henderson } 7623ab6e68cSRichard Henderson 7633ab6e68cSRichard Henderson typedef struct { 7643ab6e68cSRichard Henderson target_ulong addr; 7653c4ddec1SRichard Henderson target_ulong len; 7663ab6e68cSRichard Henderson uint16_t idxmap; 7673ab6e68cSRichard Henderson uint16_t bits; 7683960a59fSRichard Henderson } TLBFlushRangeData; 7693ab6e68cSRichard Henderson 7706be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, 7713960a59fSRichard Henderson TLBFlushRangeData d) 7723ab6e68cSRichard Henderson { 7733ab6e68cSRichard Henderson CPUArchState *env = cpu->env_ptr; 7743ab6e68cSRichard Henderson int mmu_idx; 7753ab6e68cSRichard Henderson 7763ab6e68cSRichard Henderson assert_cpu_is_self(cpu); 7773ab6e68cSRichard Henderson 7783c4ddec1SRichard Henderson tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", 7793c4ddec1SRichard Henderson d.addr, d.bits, d.len, d.idxmap); 7803ab6e68cSRichard Henderson 7813ab6e68cSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 7823ab6e68cSRichard Henderson for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 7833ab6e68cSRichard Henderson if ((d.idxmap >> mmu_idx) & 1) { 7843c4ddec1SRichard Henderson tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); 7853ab6e68cSRichard Henderson } 7863ab6e68cSRichard Henderson } 7873ab6e68cSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 7883ab6e68cSRichard Henderson 789cfc2a2d6SIdan Horowitz /* 790cfc2a2d6SIdan Horowitz * If the length is larger than the jump cache size, then it will take 791cfc2a2d6SIdan Horowitz * longer to clear each entry individually than it will to clear it all. 792cfc2a2d6SIdan Horowitz */ 793cfc2a2d6SIdan Horowitz if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { 794a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 795cfc2a2d6SIdan Horowitz return; 796cfc2a2d6SIdan Horowitz } 797cfc2a2d6SIdan Horowitz 7981d41a79bSRichard Henderson /* 7991d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 8001d41a79bSRichard Henderson * overlap the flushed pages, which includes the previous. 8011d41a79bSRichard Henderson */ 8021d41a79bSRichard Henderson d.addr -= TARGET_PAGE_SIZE; 8031d41a79bSRichard Henderson for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { 8041d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, d.addr); 8051d41a79bSRichard Henderson d.addr += TARGET_PAGE_SIZE; 8063c4ddec1SRichard Henderson } 8073ab6e68cSRichard Henderson } 8083ab6e68cSRichard Henderson 809206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, 8103ab6e68cSRichard Henderson run_on_cpu_data data) 8113ab6e68cSRichard Henderson { 8123960a59fSRichard Henderson TLBFlushRangeData *d = data.host_ptr; 8136be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, *d); 8143ab6e68cSRichard Henderson g_free(d); 8153ab6e68cSRichard Henderson } 8163ab6e68cSRichard Henderson 817e5b1921bSRichard Henderson void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, 818e5b1921bSRichard Henderson target_ulong len, uint16_t idxmap, 819e5b1921bSRichard Henderson unsigned bits) 8203ab6e68cSRichard Henderson { 8213960a59fSRichard Henderson TLBFlushRangeData d; 8223ab6e68cSRichard Henderson 823e5b1921bSRichard Henderson /* 824e5b1921bSRichard Henderson * If all bits are significant, and len is small, 825e5b1921bSRichard Henderson * this devolves to tlb_flush_page. 826e5b1921bSRichard Henderson */ 827e5b1921bSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8283ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 8293ab6e68cSRichard Henderson return; 8303ab6e68cSRichard Henderson } 8313ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8323ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8333ab6e68cSRichard Henderson tlb_flush_by_mmuidx(cpu, idxmap); 8343ab6e68cSRichard Henderson return; 8353ab6e68cSRichard Henderson } 8363ab6e68cSRichard Henderson 8373ab6e68cSRichard Henderson /* This should already be page aligned */ 8383ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 839e5b1921bSRichard Henderson d.len = len; 8403ab6e68cSRichard Henderson d.idxmap = idxmap; 8413ab6e68cSRichard Henderson d.bits = bits; 8423ab6e68cSRichard Henderson 8433ab6e68cSRichard Henderson if (qemu_cpu_is_self(cpu)) { 8446be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, d); 8453ab6e68cSRichard Henderson } else { 8463ab6e68cSRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 8473960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 848206a583dSRichard Henderson async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, 8493ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8503ab6e68cSRichard Henderson } 8513ab6e68cSRichard Henderson } 8523ab6e68cSRichard Henderson 853e5b1921bSRichard Henderson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, 854e5b1921bSRichard Henderson uint16_t idxmap, unsigned bits) 855e5b1921bSRichard Henderson { 856e5b1921bSRichard Henderson tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); 857e5b1921bSRichard Henderson } 858e5b1921bSRichard Henderson 859600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, 860600b819fSRichard Henderson target_ulong addr, target_ulong len, 861600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 8623ab6e68cSRichard Henderson { 8633960a59fSRichard Henderson TLBFlushRangeData d; 864d34e4d1aSRichard Henderson CPUState *dst_cpu; 8653ab6e68cSRichard Henderson 866600b819fSRichard Henderson /* 867600b819fSRichard Henderson * If all bits are significant, and len is small, 868600b819fSRichard Henderson * this devolves to tlb_flush_page. 869600b819fSRichard Henderson */ 870600b819fSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8713ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 8723ab6e68cSRichard Henderson return; 8733ab6e68cSRichard Henderson } 8743ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8753ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8763ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 8773ab6e68cSRichard Henderson return; 8783ab6e68cSRichard Henderson } 8793ab6e68cSRichard Henderson 8803ab6e68cSRichard Henderson /* This should already be page aligned */ 8813ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 882600b819fSRichard Henderson d.len = len; 8833ab6e68cSRichard Henderson d.idxmap = idxmap; 8843ab6e68cSRichard Henderson d.bits = bits; 8853ab6e68cSRichard Henderson 8863ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 8873ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 8883ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 8893960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 8903ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, 891206a583dSRichard Henderson tlb_flush_range_by_mmuidx_async_1, 8923ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8933ab6e68cSRichard Henderson } 8943ab6e68cSRichard Henderson } 8953ab6e68cSRichard Henderson 8966be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(src_cpu, d); 8973ab6e68cSRichard Henderson } 8983ab6e68cSRichard Henderson 899600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 900600b819fSRichard Henderson target_ulong addr, 901600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 902600b819fSRichard Henderson { 903600b819fSRichard Henderson tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, 904600b819fSRichard Henderson idxmap, bits); 905600b819fSRichard Henderson } 906600b819fSRichard Henderson 907c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 9083ab6e68cSRichard Henderson target_ulong addr, 909c13b27d8SRichard Henderson target_ulong len, 9103ab6e68cSRichard Henderson uint16_t idxmap, 9113ab6e68cSRichard Henderson unsigned bits) 9123ab6e68cSRichard Henderson { 913d34e4d1aSRichard Henderson TLBFlushRangeData d, *p; 914d34e4d1aSRichard Henderson CPUState *dst_cpu; 9153ab6e68cSRichard Henderson 916c13b27d8SRichard Henderson /* 917c13b27d8SRichard Henderson * If all bits are significant, and len is small, 918c13b27d8SRichard Henderson * this devolves to tlb_flush_page. 919c13b27d8SRichard Henderson */ 920c13b27d8SRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 9213ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 9223ab6e68cSRichard Henderson return; 9233ab6e68cSRichard Henderson } 9243ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 9253ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 9263ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 9273ab6e68cSRichard Henderson return; 9283ab6e68cSRichard Henderson } 9293ab6e68cSRichard Henderson 9303ab6e68cSRichard Henderson /* This should already be page aligned */ 9313ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 932c13b27d8SRichard Henderson d.len = len; 9333ab6e68cSRichard Henderson d.idxmap = idxmap; 9343ab6e68cSRichard Henderson d.bits = bits; 9353ab6e68cSRichard Henderson 9363ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 9373ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 9383ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 9396d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 940206a583dSRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, 9413ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9423ab6e68cSRichard Henderson } 9433ab6e68cSRichard Henderson } 9443ab6e68cSRichard Henderson 9456d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 946206a583dSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, 9473ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9483ab6e68cSRichard Henderson } 9493ab6e68cSRichard Henderson 950c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 951c13b27d8SRichard Henderson target_ulong addr, 952c13b27d8SRichard Henderson uint16_t idxmap, 953c13b27d8SRichard Henderson unsigned bits) 954c13b27d8SRichard Henderson { 955c13b27d8SRichard Henderson tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, 956c13b27d8SRichard Henderson idxmap, bits); 957c13b27d8SRichard Henderson } 958c13b27d8SRichard Henderson 959d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 960d9bb58e5SYang Zhong can be detected */ 961d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 962d9bb58e5SYang Zhong { 96393b99616SRichard Henderson cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, 96493b99616SRichard Henderson TARGET_PAGE_SIZE, 965d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 966d9bb58e5SYang Zhong } 967d9bb58e5SYang Zhong 968d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 969d9bb58e5SYang Zhong tested for self modifying code */ 970d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 971d9bb58e5SYang Zhong { 972d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 973d9bb58e5SYang Zhong } 974d9bb58e5SYang Zhong 975d9bb58e5SYang Zhong 976d9bb58e5SYang Zhong /* 977d9bb58e5SYang Zhong * Dirty write flag handling 978d9bb58e5SYang Zhong * 979d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 980d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 981d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 982d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 983d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 984d9bb58e5SYang Zhong * generated code. 985d9bb58e5SYang Zhong * 98671aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 987d73415a3SStefan Hajnoczi * te->addr_write with qatomic_set. We don't need to worry about this for 98871aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 989d9bb58e5SYang Zhong * 99053d28455SRichard Henderson * Called with tlb_c.lock held. 991d9bb58e5SYang Zhong */ 99271aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 99371aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 994d9bb58e5SYang Zhong { 995d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 996d9bb58e5SYang Zhong 9977b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 9987b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 999d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 1000d9bb58e5SYang Zhong addr += tlb_entry->addend; 1001d9bb58e5SYang Zhong if ((addr - start) < length) { 1002d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 100371aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 1004d9bb58e5SYang Zhong #else 1005d73415a3SStefan Hajnoczi qatomic_set(&tlb_entry->addr_write, 100671aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 1007d9bb58e5SYang Zhong #endif 1008d9bb58e5SYang Zhong } 100971aec354SEmilio G. Cota } 101071aec354SEmilio G. Cota } 101171aec354SEmilio G. Cota 101271aec354SEmilio G. Cota /* 101353d28455SRichard Henderson * Called with tlb_c.lock held. 101471aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 101571aec354SEmilio G. Cota */ 101671aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 101771aec354SEmilio G. Cota { 101871aec354SEmilio G. Cota *d = *s; 101971aec354SEmilio G. Cota } 1020d9bb58e5SYang Zhong 1021d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 102271aec354SEmilio G. Cota * the target vCPU). 102353d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 102471aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 1025d9bb58e5SYang Zhong */ 1026d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 1027d9bb58e5SYang Zhong { 1028d9bb58e5SYang Zhong CPUArchState *env; 1029d9bb58e5SYang Zhong 1030d9bb58e5SYang Zhong int mmu_idx; 1031d9bb58e5SYang Zhong 1032d9bb58e5SYang Zhong env = cpu->env_ptr; 1033a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1034d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1035d9bb58e5SYang Zhong unsigned int i; 1036722a1c1eSRichard Henderson unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 1037d9bb58e5SYang Zhong 103886e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 1039a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 1040a40ec84eSRichard Henderson start1, length); 1041d9bb58e5SYang Zhong } 1042d9bb58e5SYang Zhong 1043d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 1044a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 1045a40ec84eSRichard Henderson start1, length); 1046d9bb58e5SYang Zhong } 1047d9bb58e5SYang Zhong } 1048a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1049d9bb58e5SYang Zhong } 1050d9bb58e5SYang Zhong 105153d28455SRichard Henderson /* Called with tlb_c.lock held */ 105271aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 105371aec354SEmilio G. Cota target_ulong vaddr) 1054d9bb58e5SYang Zhong { 1055d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 1056d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 1057d9bb58e5SYang Zhong } 1058d9bb58e5SYang Zhong } 1059d9bb58e5SYang Zhong 1060d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 1061d9bb58e5SYang Zhong so that it is no longer dirty */ 1062d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 1063d9bb58e5SYang Zhong { 1064d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1065d9bb58e5SYang Zhong int mmu_idx; 1066d9bb58e5SYang Zhong 1067d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 1068d9bb58e5SYang Zhong 1069d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 1070a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1071d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1072383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 1073d9bb58e5SYang Zhong } 1074d9bb58e5SYang Zhong 1075d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1076d9bb58e5SYang Zhong int k; 1077d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 1078a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 1079d9bb58e5SYang Zhong } 1080d9bb58e5SYang Zhong } 1081a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1082d9bb58e5SYang Zhong } 1083d9bb58e5SYang Zhong 1084d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 1085d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 10861308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 10871308e026SRichard Henderson target_ulong vaddr, target_ulong size) 1088d9bb58e5SYang Zhong { 1089a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 10901308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 1091d9bb58e5SYang Zhong 10921308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 10931308e026SRichard Henderson /* No previous large page. */ 10941308e026SRichard Henderson lp_addr = vaddr; 10951308e026SRichard Henderson } else { 1096d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 10971308e026SRichard Henderson This is a compromise between unnecessary flushes and 10981308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 1099a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 11001308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 11011308e026SRichard Henderson lp_mask <<= 1; 1102d9bb58e5SYang Zhong } 11031308e026SRichard Henderson } 1104a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 1105a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 1106d9bb58e5SYang Zhong } 1107d9bb58e5SYang Zhong 110840473689SRichard Henderson /* 110940473689SRichard Henderson * Add a new TLB entry. At most one entry for a given virtual address 1110d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1111d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 1112d9bb58e5SYang Zhong * 1113d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 1114d9bb58e5SYang Zhong * critical section. 1115d9bb58e5SYang Zhong */ 111640473689SRichard Henderson void tlb_set_page_full(CPUState *cpu, int mmu_idx, 111740473689SRichard Henderson target_ulong vaddr, CPUTLBEntryFull *full) 1118d9bb58e5SYang Zhong { 1119d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1120a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 1121a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1122d9bb58e5SYang Zhong MemoryRegionSection *section; 1123d9bb58e5SYang Zhong unsigned int index; 1124d9bb58e5SYang Zhong target_ulong address; 11258f5db641SRichard Henderson target_ulong write_address; 1126d9bb58e5SYang Zhong uintptr_t addend; 112768fea038SRichard Henderson CPUTLBEntry *te, tn; 112855df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 112955df6fcfSPeter Maydell target_ulong vaddr_page; 113040473689SRichard Henderson int asidx, wp_flags, prot; 11318f5db641SRichard Henderson bool is_ram, is_romd; 1132d9bb58e5SYang Zhong 1133d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 113455df6fcfSPeter Maydell 113540473689SRichard Henderson if (full->lg_page_size <= TARGET_PAGE_BITS) { 113655df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 113755df6fcfSPeter Maydell } else { 113840473689SRichard Henderson sz = (hwaddr)1 << full->lg_page_size; 113940473689SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, sz); 114055df6fcfSPeter Maydell } 114155df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 114240473689SRichard Henderson paddr_page = full->phys_addr & TARGET_PAGE_MASK; 114355df6fcfSPeter Maydell 114440473689SRichard Henderson prot = full->prot; 114540473689SRichard Henderson asidx = cpu_asidx_from_attrs(cpu, full->attrs); 114655df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 114740473689SRichard Henderson &xlat, &sz, full->attrs, &prot); 1148d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 1149d9bb58e5SYang Zhong 1150883f2c59SPhilippe Mathieu-Daudé tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" HWADDR_FMT_plx 1151d9bb58e5SYang Zhong " prot=%x idx=%d\n", 115240473689SRichard Henderson vaddr, full->phys_addr, prot, mmu_idx); 1153d9bb58e5SYang Zhong 115455df6fcfSPeter Maydell address = vaddr_page; 115540473689SRichard Henderson if (full->lg_page_size < TARGET_PAGE_BITS) { 115630d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 115730d7e098SRichard Henderson address |= TLB_INVALID_MASK; 115855df6fcfSPeter Maydell } 115940473689SRichard Henderson if (full->attrs.byte_swap) { 11605b87b3e6SRichard Henderson address |= TLB_BSWAP; 1161a26fc6f5STony Nguyen } 11628f5db641SRichard Henderson 11638f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 11648f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 11658f5db641SRichard Henderson 11668f5db641SRichard Henderson if (is_ram || is_romd) { 11678f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 1168d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 11698f5db641SRichard Henderson } else { 11708f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 11718f5db641SRichard Henderson addend = 0; 1172d9bb58e5SYang Zhong } 1173d9bb58e5SYang Zhong 11748f5db641SRichard Henderson write_address = address; 11758f5db641SRichard Henderson if (is_ram) { 11768f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 11778f5db641SRichard Henderson /* 11788f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 11798f5db641SRichard Henderson * the page is actually writable. 11808f5db641SRichard Henderson */ 11818f5db641SRichard Henderson if (prot & PAGE_WRITE) { 11828f5db641SRichard Henderson if (section->readonly) { 11838f5db641SRichard Henderson write_address |= TLB_DISCARD_WRITE; 11848f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 11858f5db641SRichard Henderson write_address |= TLB_NOTDIRTY; 11868f5db641SRichard Henderson } 11878f5db641SRichard Henderson } 11888f5db641SRichard Henderson } else { 11898f5db641SRichard Henderson /* I/O or ROMD */ 11908f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 11918f5db641SRichard Henderson /* 11928f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 11938f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 11948f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 11958f5db641SRichard Henderson */ 11968f5db641SRichard Henderson write_address |= TLB_MMIO; 11978f5db641SRichard Henderson if (!is_romd) { 11988f5db641SRichard Henderson address = write_address; 11998f5db641SRichard Henderson } 12008f5db641SRichard Henderson } 12018f5db641SRichard Henderson 120250b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 120350b107c5SRichard Henderson TARGET_PAGE_SIZE); 1204d9bb58e5SYang Zhong 1205383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 1206383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 1207d9bb58e5SYang Zhong 120868fea038SRichard Henderson /* 120971aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 121071aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 121171aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 121271aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 121371aec354SEmilio G. Cota * is unlikely to be contended. 121471aec354SEmilio G. Cota */ 1215a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 121671aec354SEmilio G. Cota 12173d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 1218a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 12193d1523ceSRichard Henderson 122071aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 122171aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 122271aec354SEmilio G. Cota 122371aec354SEmilio G. Cota /* 122468fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 122568fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 122668fea038SRichard Henderson */ 12273cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 1228a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1229a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 123068fea038SRichard Henderson 123168fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 123271aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 123325d3ec58SRichard Henderson desc->vfulltlb[vidx] = desc->fulltlb[index]; 123486e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 123568fea038SRichard Henderson } 1236d9bb58e5SYang Zhong 1237d9bb58e5SYang Zhong /* refill the tlb */ 1238ace41090SPeter Maydell /* 1239ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 1240ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 12418f5db641SRichard Henderson * + the ram_addr_t of the page base of the target RAM (RAM) 12428f5db641SRichard Henderson * + the offset within section->mr of the page base (I/O, ROMD) 124355df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 1244ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 1245ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 1246ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 1247ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 1248ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 1249ace41090SPeter Maydell */ 125040473689SRichard Henderson desc->fulltlb[index] = *full; 125125d3ec58SRichard Henderson desc->fulltlb[index].xlat_section = iotlb - vaddr_page; 125240473689SRichard Henderson desc->fulltlb[index].phys_addr = paddr_page; 1253d9bb58e5SYang Zhong 1254d9bb58e5SYang Zhong /* Now calculate the new entry */ 125555df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 1256d9bb58e5SYang Zhong if (prot & PAGE_READ) { 1257d9bb58e5SYang Zhong tn.addr_read = address; 125850b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 125950b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 126050b107c5SRichard Henderson } 1261d9bb58e5SYang Zhong } else { 1262d9bb58e5SYang Zhong tn.addr_read = -1; 1263d9bb58e5SYang Zhong } 1264d9bb58e5SYang Zhong 1265d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 12668f5db641SRichard Henderson tn.addr_code = address; 1267d9bb58e5SYang Zhong } else { 1268d9bb58e5SYang Zhong tn.addr_code = -1; 1269d9bb58e5SYang Zhong } 1270d9bb58e5SYang Zhong 1271d9bb58e5SYang Zhong tn.addr_write = -1; 1272d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 12738f5db641SRichard Henderson tn.addr_write = write_address; 1274f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 1275f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 1276f52bfb12SDavid Hildenbrand } 127750b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 127850b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 127950b107c5SRichard Henderson } 1280d9bb58e5SYang Zhong } 1281d9bb58e5SYang Zhong 128271aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 128386e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 1284a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 1285d9bb58e5SYang Zhong } 1286d9bb58e5SYang Zhong 128740473689SRichard Henderson void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 128840473689SRichard Henderson hwaddr paddr, MemTxAttrs attrs, int prot, 128940473689SRichard Henderson int mmu_idx, target_ulong size) 129040473689SRichard Henderson { 129140473689SRichard Henderson CPUTLBEntryFull full = { 129240473689SRichard Henderson .phys_addr = paddr, 129340473689SRichard Henderson .attrs = attrs, 129440473689SRichard Henderson .prot = prot, 129540473689SRichard Henderson .lg_page_size = ctz64(size) 129640473689SRichard Henderson }; 129740473689SRichard Henderson 129840473689SRichard Henderson assert(is_power_of_2(size)); 129940473689SRichard Henderson tlb_set_page_full(cpu, mmu_idx, vaddr, &full); 130040473689SRichard Henderson } 130140473689SRichard Henderson 1302d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 1303d9bb58e5SYang Zhong hwaddr paddr, int prot, 1304d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1305d9bb58e5SYang Zhong { 1306d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 1307d9bb58e5SYang Zhong prot, mmu_idx, size); 1308d9bb58e5SYang Zhong } 1309d9bb58e5SYang Zhong 1310c319dc13SRichard Henderson /* 1311c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1312c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1313c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1314c319dc13SRichard Henderson */ 1315c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 1316c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1317c319dc13SRichard Henderson { 1318c319dc13SRichard Henderson bool ok; 1319c319dc13SRichard Henderson 1320c319dc13SRichard Henderson /* 1321c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1322c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1323c319dc13SRichard Henderson */ 13248810ee2aSAlex Bennée ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, 1325e124536fSEduardo Habkost access_type, mmu_idx, false, retaddr); 1326c319dc13SRichard Henderson assert(ok); 1327c319dc13SRichard Henderson } 1328c319dc13SRichard Henderson 132978271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 133078271684SClaudio Fontana MMUAccessType access_type, 133178271684SClaudio Fontana int mmu_idx, uintptr_t retaddr) 133278271684SClaudio Fontana { 13338810ee2aSAlex Bennée cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, 13348810ee2aSAlex Bennée mmu_idx, retaddr); 133578271684SClaudio Fontana } 133678271684SClaudio Fontana 133778271684SClaudio Fontana static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, 133878271684SClaudio Fontana vaddr addr, unsigned size, 133978271684SClaudio Fontana MMUAccessType access_type, 134078271684SClaudio Fontana int mmu_idx, MemTxAttrs attrs, 134178271684SClaudio Fontana MemTxResult response, 134278271684SClaudio Fontana uintptr_t retaddr) 134378271684SClaudio Fontana { 134478271684SClaudio Fontana CPUClass *cc = CPU_GET_CLASS(cpu); 134578271684SClaudio Fontana 134678271684SClaudio Fontana if (!cpu->ignore_memory_transaction_failures && 134778271684SClaudio Fontana cc->tcg_ops->do_transaction_failed) { 134878271684SClaudio Fontana cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, 134978271684SClaudio Fontana access_type, mmu_idx, attrs, 135078271684SClaudio Fontana response, retaddr); 135178271684SClaudio Fontana } 135278271684SClaudio Fontana } 135378271684SClaudio Fontana 135425d3ec58SRichard Henderson static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full, 1355f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 1356be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 1357d9bb58e5SYang Zhong { 135829a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 13592d54f194SPeter Maydell hwaddr mr_offset; 13602d54f194SPeter Maydell MemoryRegionSection *section; 13612d54f194SPeter Maydell MemoryRegion *mr; 1362d9bb58e5SYang Zhong uint64_t val; 136304e3aabdSPeter Maydell MemTxResult r; 1364d9bb58e5SYang Zhong 136525d3ec58SRichard Henderson section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 13662d54f194SPeter Maydell mr = section->mr; 136725d3ec58SRichard Henderson mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1368d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 136908565552SRichard Henderson if (!cpu->can_do_io) { 1370d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1371d9bb58e5SYang Zhong } 1372d9bb58e5SYang Zhong 137361b59fb2SRichard Henderson { 137461b59fb2SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 137525d3ec58SRichard Henderson r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs); 137661b59fb2SRichard Henderson } 137761b59fb2SRichard Henderson 137804e3aabdSPeter Maydell if (r != MEMTX_OK) { 13792d54f194SPeter Maydell hwaddr physaddr = mr_offset + 13802d54f194SPeter Maydell section->offset_within_address_space - 13812d54f194SPeter Maydell section->offset_within_region; 13822d54f194SPeter Maydell 1383be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 138425d3ec58SRichard Henderson mmu_idx, full->attrs, r, retaddr); 138504e3aabdSPeter Maydell } 1386d9bb58e5SYang Zhong return val; 1387d9bb58e5SYang Zhong } 1388d9bb58e5SYang Zhong 13892f3a57eeSAlex Bennée /* 139025d3ec58SRichard Henderson * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin. 139125d3ec58SRichard Henderson * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match 1392570ef309SAlex Bennée * because of the side effect of io_writex changing memory layout. 13932f3a57eeSAlex Bennée */ 139437523ff7SRichard Henderson static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section, 139537523ff7SRichard Henderson hwaddr mr_offset) 13962f3a57eeSAlex Bennée { 13972f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN 13982f3a57eeSAlex Bennée SavedIOTLB *saved = &cs->saved_iotlb; 13992f3a57eeSAlex Bennée saved->section = section; 14002f3a57eeSAlex Bennée saved->mr_offset = mr_offset; 14012f3a57eeSAlex Bennée #endif 14022f3a57eeSAlex Bennée } 14032f3a57eeSAlex Bennée 140425d3ec58SRichard Henderson static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, 1405f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 1406be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 1407d9bb58e5SYang Zhong { 140829a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 14092d54f194SPeter Maydell hwaddr mr_offset; 14102d54f194SPeter Maydell MemoryRegionSection *section; 14112d54f194SPeter Maydell MemoryRegion *mr; 141204e3aabdSPeter Maydell MemTxResult r; 1413d9bb58e5SYang Zhong 141425d3ec58SRichard Henderson section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 14152d54f194SPeter Maydell mr = section->mr; 141625d3ec58SRichard Henderson mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 141708565552SRichard Henderson if (!cpu->can_do_io) { 1418d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1419d9bb58e5SYang Zhong } 1420d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1421d9bb58e5SYang Zhong 14222f3a57eeSAlex Bennée /* 14232f3a57eeSAlex Bennée * The memory_region_dispatch may trigger a flush/resize 14242f3a57eeSAlex Bennée * so for plugins we save the iotlb_data just in case. 14252f3a57eeSAlex Bennée */ 142637523ff7SRichard Henderson save_iotlb_data(cpu, section, mr_offset); 14272f3a57eeSAlex Bennée 142861b59fb2SRichard Henderson { 142961b59fb2SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 143025d3ec58SRichard Henderson r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs); 143161b59fb2SRichard Henderson } 143261b59fb2SRichard Henderson 143304e3aabdSPeter Maydell if (r != MEMTX_OK) { 14342d54f194SPeter Maydell hwaddr physaddr = mr_offset + 14352d54f194SPeter Maydell section->offset_within_address_space - 14362d54f194SPeter Maydell section->offset_within_region; 14372d54f194SPeter Maydell 1438be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 143925d3ec58SRichard Henderson MMU_DATA_STORE, mmu_idx, full->attrs, r, 1440be5c4787STony Nguyen retaddr); 144104e3aabdSPeter Maydell } 1442d9bb58e5SYang Zhong } 1443d9bb58e5SYang Zhong 1444d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1445d9bb58e5SYang Zhong back to the main tlb. */ 1446d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 14470b3c75adSRichard Henderson MMUAccessType access_type, target_ulong page) 1448d9bb58e5SYang Zhong { 1449d9bb58e5SYang Zhong size_t vidx; 145071aec354SEmilio G. Cota 145129a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1452d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1453a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 14540b3c75adSRichard Henderson target_ulong cmp = tlb_read_idx(vtlb, access_type); 1455d9bb58e5SYang Zhong 1456d9bb58e5SYang Zhong if (cmp == page) { 1457d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1458a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1459d9bb58e5SYang Zhong 1460a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 146171aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 146271aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 146371aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1464a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1465d9bb58e5SYang Zhong 146625d3ec58SRichard Henderson CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 146725d3ec58SRichard Henderson CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx]; 146825d3ec58SRichard Henderson CPUTLBEntryFull tmpf; 146925d3ec58SRichard Henderson tmpf = *f1; *f1 = *f2; *f2 = tmpf; 1470d9bb58e5SYang Zhong return true; 1471d9bb58e5SYang Zhong } 1472d9bb58e5SYang Zhong } 1473d9bb58e5SYang Zhong return false; 1474d9bb58e5SYang Zhong } 1475d9bb58e5SYang Zhong 1476707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 147725d3ec58SRichard Henderson CPUTLBEntryFull *full, uintptr_t retaddr) 1478707526adSRichard Henderson { 147925d3ec58SRichard Henderson ram_addr_t ram_addr = mem_vaddr + full->xlat_section; 1480707526adSRichard Henderson 1481707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1482707526adSRichard Henderson 1483707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1484f349e92eSPhilippe Mathieu-Daudé tb_invalidate_phys_range_fast(ram_addr, size, retaddr); 1485707526adSRichard Henderson } 1486707526adSRichard Henderson 1487707526adSRichard Henderson /* 1488707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1489707526adSRichard Henderson * the notdirty callback faster. 1490707526adSRichard Henderson */ 1491707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1492707526adSRichard Henderson 1493707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1494707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1495707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1496707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1497707526adSRichard Henderson } 1498707526adSRichard Henderson } 1499707526adSRichard Henderson 1500069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 1501069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1502069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1503af803a4fSRichard Henderson void **phost, CPUTLBEntryFull **pfull, 1504af803a4fSRichard Henderson uintptr_t retaddr) 1505d9bb58e5SYang Zhong { 1506383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1507383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 15080b3c75adSRichard Henderson target_ulong tlb_addr = tlb_read_idx(entry, access_type); 15090b3c75adSRichard Henderson target_ulong page_addr = addr & TARGET_PAGE_MASK; 15100b3c75adSRichard Henderson int flags = TLB_FLAGS_MASK; 1511ca86cf32SDavid Hildenbrand 1512069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 15130b3c75adSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) { 1514069cfe77SRichard Henderson CPUState *cs = env_cpu(env); 1515069cfe77SRichard Henderson 15168810ee2aSAlex Bennée if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, 1517069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1518069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1519069cfe77SRichard Henderson *phost = NULL; 1520af803a4fSRichard Henderson *pfull = NULL; 1521069cfe77SRichard Henderson return TLB_INVALID_MASK; 1522069cfe77SRichard Henderson } 1523069cfe77SRichard Henderson 152403a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 1525af803a4fSRichard Henderson index = tlb_index(env, mmu_idx, addr); 152603a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1527c3c8bf57SRichard Henderson 1528c3c8bf57SRichard Henderson /* 1529c3c8bf57SRichard Henderson * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, 1530c3c8bf57SRichard Henderson * to force the next access through tlb_fill. We've just 1531c3c8bf57SRichard Henderson * called tlb_fill, so we know that this entry *is* valid. 1532c3c8bf57SRichard Henderson */ 1533c3c8bf57SRichard Henderson flags &= ~TLB_INVALID_MASK; 1534d9bb58e5SYang Zhong } 15350b3c75adSRichard Henderson tlb_addr = tlb_read_idx(entry, access_type); 153603a98189SDavid Hildenbrand } 1537c3c8bf57SRichard Henderson flags &= tlb_addr; 153803a98189SDavid Hildenbrand 1539af803a4fSRichard Henderson *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1540af803a4fSRichard Henderson 1541069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 1542069cfe77SRichard Henderson if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1543069cfe77SRichard Henderson *phost = NULL; 1544069cfe77SRichard Henderson return TLB_MMIO; 1545fef39ccdSDavid Hildenbrand } 1546fef39ccdSDavid Hildenbrand 1547069cfe77SRichard Henderson /* Everything else is RAM. */ 1548069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1549069cfe77SRichard Henderson return flags; 1550069cfe77SRichard Henderson } 1551069cfe77SRichard Henderson 1552d507e6c5SRichard Henderson int probe_access_full(CPUArchState *env, target_ulong addr, int size, 1553069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1554af803a4fSRichard Henderson bool nonfault, void **phost, CPUTLBEntryFull **pfull, 1555af803a4fSRichard Henderson uintptr_t retaddr) 1556069cfe77SRichard Henderson { 1557d507e6c5SRichard Henderson int flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1558af803a4fSRichard Henderson nonfault, phost, pfull, retaddr); 1559069cfe77SRichard Henderson 1560069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1561069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1562af803a4fSRichard Henderson notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr); 1563069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1564069cfe77SRichard Henderson } 1565069cfe77SRichard Henderson 1566069cfe77SRichard Henderson return flags; 1567069cfe77SRichard Henderson } 1568069cfe77SRichard Henderson 15691770b2f2SDaniel Henrique Barboza int probe_access_flags(CPUArchState *env, target_ulong addr, int size, 1570af803a4fSRichard Henderson MMUAccessType access_type, int mmu_idx, 1571af803a4fSRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1572af803a4fSRichard Henderson { 1573af803a4fSRichard Henderson CPUTLBEntryFull *full; 15741770b2f2SDaniel Henrique Barboza int flags; 1575af803a4fSRichard Henderson 15761770b2f2SDaniel Henrique Barboza g_assert(-(addr | TARGET_PAGE_MASK) >= size); 15771770b2f2SDaniel Henrique Barboza 15781770b2f2SDaniel Henrique Barboza flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1579af803a4fSRichard Henderson nonfault, phost, &full, retaddr); 15801770b2f2SDaniel Henrique Barboza 15811770b2f2SDaniel Henrique Barboza /* Handle clean RAM pages. */ 15821770b2f2SDaniel Henrique Barboza if (unlikely(flags & TLB_NOTDIRTY)) { 15831770b2f2SDaniel Henrique Barboza notdirty_write(env_cpu(env), addr, 1, full, retaddr); 15841770b2f2SDaniel Henrique Barboza flags &= ~TLB_NOTDIRTY; 15851770b2f2SDaniel Henrique Barboza } 15861770b2f2SDaniel Henrique Barboza 15871770b2f2SDaniel Henrique Barboza return flags; 1588af803a4fSRichard Henderson } 1589af803a4fSRichard Henderson 1590069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 1591069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1592069cfe77SRichard Henderson { 1593af803a4fSRichard Henderson CPUTLBEntryFull *full; 1594069cfe77SRichard Henderson void *host; 1595069cfe77SRichard Henderson int flags; 1596069cfe77SRichard Henderson 1597069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1598069cfe77SRichard Henderson 1599069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1600af803a4fSRichard Henderson false, &host, &full, retaddr); 1601069cfe77SRichard Henderson 1602069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1603069cfe77SRichard Henderson if (size == 0) { 160473bc0bd4SRichard Henderson return NULL; 160573bc0bd4SRichard Henderson } 160673bc0bd4SRichard Henderson 1607069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 160803a98189SDavid Hildenbrand /* Handle watchpoints. */ 1609069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1610069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1611069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 161203a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 161325d3ec58SRichard Henderson full->attrs, wp_access, retaddr); 1614d9bb58e5SYang Zhong } 1615fef39ccdSDavid Hildenbrand 161673bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1617069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 161825d3ec58SRichard Henderson notdirty_write(env_cpu(env), addr, 1, full, retaddr); 161973bc0bd4SRichard Henderson } 1620fef39ccdSDavid Hildenbrand } 1621fef39ccdSDavid Hildenbrand 1622069cfe77SRichard Henderson return host; 1623d9bb58e5SYang Zhong } 1624d9bb58e5SYang Zhong 16254811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 16264811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 16274811e909SRichard Henderson { 1628af803a4fSRichard Henderson CPUTLBEntryFull *full; 1629069cfe77SRichard Henderson void *host; 1630069cfe77SRichard Henderson int flags; 16314811e909SRichard Henderson 1632069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, 1633af803a4fSRichard Henderson mmu_idx, true, &host, &full, 0); 1634069cfe77SRichard Henderson 1635069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1636069cfe77SRichard Henderson return flags ? NULL : host; 16374811e909SRichard Henderson } 16384811e909SRichard Henderson 16397e0d9973SRichard Henderson /* 16407e0d9973SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 16417e0d9973SRichard Henderson * 16427e0d9973SRichard Henderson * Return -1 if we can't translate and execute from an entire page 16437e0d9973SRichard Henderson * of RAM. This will force us to execute by loading and translating 16447e0d9973SRichard Henderson * one insn at a time, without caching. 16457e0d9973SRichard Henderson * 16467e0d9973SRichard Henderson * NOTE: This function will trigger an exception if the page is 16477e0d9973SRichard Henderson * not executable. 16487e0d9973SRichard Henderson */ 16497e0d9973SRichard Henderson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 16507e0d9973SRichard Henderson void **hostp) 16517e0d9973SRichard Henderson { 1652af803a4fSRichard Henderson CPUTLBEntryFull *full; 16537e0d9973SRichard Henderson void *p; 16547e0d9973SRichard Henderson 16557e0d9973SRichard Henderson (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH, 1656af803a4fSRichard Henderson cpu_mmu_index(env, true), false, &p, &full, 0); 16577e0d9973SRichard Henderson if (p == NULL) { 16587e0d9973SRichard Henderson return -1; 16597e0d9973SRichard Henderson } 1660ac01ec6fSWeiwei Li 1661ac01ec6fSWeiwei Li if (full->lg_page_size < TARGET_PAGE_BITS) { 1662ac01ec6fSWeiwei Li return -1; 1663ac01ec6fSWeiwei Li } 1664ac01ec6fSWeiwei Li 16657e0d9973SRichard Henderson if (hostp) { 16667e0d9973SRichard Henderson *hostp = p; 16677e0d9973SRichard Henderson } 16687e0d9973SRichard Henderson return qemu_ram_addr_from_host_nofail(p); 16697e0d9973SRichard Henderson } 16707e0d9973SRichard Henderson 1671*cdfac37bSRichard Henderson /* Load/store with atomicity primitives. */ 1672*cdfac37bSRichard Henderson #include "ldst_atomicity.c.inc" 1673*cdfac37bSRichard Henderson 1674235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1675235537faSAlex Bennée /* 1676235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1677235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1678235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1679235537faSAlex Bennée * checking the victim table. This is purely informational. 1680235537faSAlex Bennée * 16812f3a57eeSAlex Bennée * This almost never fails as the memory access being instrumented 16822f3a57eeSAlex Bennée * should have just filled the TLB. The one corner case is io_writex 16832f3a57eeSAlex Bennée * which can cause TLB flushes and potential resizing of the TLBs 1684570ef309SAlex Bennée * losing the information we need. In those cases we need to recover 168525d3ec58SRichard Henderson * data from a copy of the CPUTLBEntryFull. As long as this always occurs 1686570ef309SAlex Bennée * from the same thread (which a mem callback will be) this is safe. 1687235537faSAlex Bennée */ 1688235537faSAlex Bennée 1689235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, 1690235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1691235537faSAlex Bennée { 1692235537faSAlex Bennée CPUArchState *env = cpu->env_ptr; 1693235537faSAlex Bennée CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1694235537faSAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1695235537faSAlex Bennée target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1696235537faSAlex Bennée 1697235537faSAlex Bennée if (likely(tlb_hit(tlb_addr, addr))) { 1698235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1699235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 170025d3ec58SRichard Henderson CPUTLBEntryFull *full; 170125d3ec58SRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1702235537faSAlex Bennée data->is_io = true; 170325d3ec58SRichard Henderson data->v.io.section = 170425d3ec58SRichard Henderson iotlb_to_section(cpu, full->xlat_section, full->attrs); 170525d3ec58SRichard Henderson data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1706235537faSAlex Bennée } else { 1707235537faSAlex Bennée data->is_io = false; 17082d932039SAlex Bennée data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1709235537faSAlex Bennée } 1710235537faSAlex Bennée return true; 17112f3a57eeSAlex Bennée } else { 17122f3a57eeSAlex Bennée SavedIOTLB *saved = &cpu->saved_iotlb; 17132f3a57eeSAlex Bennée data->is_io = true; 17142f3a57eeSAlex Bennée data->v.io.section = saved->section; 17152f3a57eeSAlex Bennée data->v.io.offset = saved->mr_offset; 17162f3a57eeSAlex Bennée return true; 1717235537faSAlex Bennée } 1718235537faSAlex Bennée } 1719235537faSAlex Bennée 1720235537faSAlex Bennée #endif 1721235537faSAlex Bennée 172208dff435SRichard Henderson /* 17238cfdacaaSRichard Henderson * Probe for a load/store operation. 17248cfdacaaSRichard Henderson * Return the host address and into @flags. 17258cfdacaaSRichard Henderson */ 17268cfdacaaSRichard Henderson 17278cfdacaaSRichard Henderson typedef struct MMULookupPageData { 17288cfdacaaSRichard Henderson CPUTLBEntryFull *full; 17298cfdacaaSRichard Henderson void *haddr; 17308cfdacaaSRichard Henderson target_ulong addr; 17318cfdacaaSRichard Henderson int flags; 17328cfdacaaSRichard Henderson int size; 17338cfdacaaSRichard Henderson } MMULookupPageData; 17348cfdacaaSRichard Henderson 17358cfdacaaSRichard Henderson typedef struct MMULookupLocals { 17368cfdacaaSRichard Henderson MMULookupPageData page[2]; 17378cfdacaaSRichard Henderson MemOp memop; 17388cfdacaaSRichard Henderson int mmu_idx; 17398cfdacaaSRichard Henderson } MMULookupLocals; 17408cfdacaaSRichard Henderson 17418cfdacaaSRichard Henderson /** 17428cfdacaaSRichard Henderson * mmu_lookup1: translate one page 17438cfdacaaSRichard Henderson * @env: cpu context 17448cfdacaaSRichard Henderson * @data: lookup parameters 17458cfdacaaSRichard Henderson * @mmu_idx: virtual address context 17468cfdacaaSRichard Henderson * @access_type: load/store/code 17478cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 17488cfdacaaSRichard Henderson * 17498cfdacaaSRichard Henderson * Resolve the translation for the one page at @data.addr, filling in 17508cfdacaaSRichard Henderson * the rest of @data with the results. If the translation fails, 17518cfdacaaSRichard Henderson * tlb_fill will longjmp out. Return true if the softmmu tlb for 17528cfdacaaSRichard Henderson * @mmu_idx may have resized. 17538cfdacaaSRichard Henderson */ 17548cfdacaaSRichard Henderson static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data, 17558cfdacaaSRichard Henderson int mmu_idx, MMUAccessType access_type, uintptr_t ra) 17568cfdacaaSRichard Henderson { 17578cfdacaaSRichard Henderson target_ulong addr = data->addr; 17588cfdacaaSRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 17598cfdacaaSRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 17608cfdacaaSRichard Henderson target_ulong tlb_addr = tlb_read_idx(entry, access_type); 17618cfdacaaSRichard Henderson bool maybe_resized = false; 17628cfdacaaSRichard Henderson 17638cfdacaaSRichard Henderson /* If the TLB entry is for a different page, reload and try again. */ 17648cfdacaaSRichard Henderson if (!tlb_hit(tlb_addr, addr)) { 17658cfdacaaSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, access_type, 17668cfdacaaSRichard Henderson addr & TARGET_PAGE_MASK)) { 17678cfdacaaSRichard Henderson tlb_fill(env_cpu(env), addr, data->size, access_type, mmu_idx, ra); 17688cfdacaaSRichard Henderson maybe_resized = true; 17698cfdacaaSRichard Henderson index = tlb_index(env, mmu_idx, addr); 17708cfdacaaSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 17718cfdacaaSRichard Henderson } 17728cfdacaaSRichard Henderson tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK; 17738cfdacaaSRichard Henderson } 17748cfdacaaSRichard Henderson 17758cfdacaaSRichard Henderson data->flags = tlb_addr & TLB_FLAGS_MASK; 17768cfdacaaSRichard Henderson data->full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 17778cfdacaaSRichard Henderson /* Compute haddr speculatively; depending on flags it might be invalid. */ 17788cfdacaaSRichard Henderson data->haddr = (void *)((uintptr_t)addr + entry->addend); 17798cfdacaaSRichard Henderson 17808cfdacaaSRichard Henderson return maybe_resized; 17818cfdacaaSRichard Henderson } 17828cfdacaaSRichard Henderson 17838cfdacaaSRichard Henderson /** 17848cfdacaaSRichard Henderson * mmu_watch_or_dirty 17858cfdacaaSRichard Henderson * @env: cpu context 17868cfdacaaSRichard Henderson * @data: lookup parameters 17878cfdacaaSRichard Henderson * @access_type: load/store/code 17888cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 17898cfdacaaSRichard Henderson * 17908cfdacaaSRichard Henderson * Trigger watchpoints for @data.addr:@data.size; 17918cfdacaaSRichard Henderson * record writes to protected clean pages. 17928cfdacaaSRichard Henderson */ 17938cfdacaaSRichard Henderson static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data, 17948cfdacaaSRichard Henderson MMUAccessType access_type, uintptr_t ra) 17958cfdacaaSRichard Henderson { 17968cfdacaaSRichard Henderson CPUTLBEntryFull *full = data->full; 17978cfdacaaSRichard Henderson target_ulong addr = data->addr; 17988cfdacaaSRichard Henderson int flags = data->flags; 17998cfdacaaSRichard Henderson int size = data->size; 18008cfdacaaSRichard Henderson 18018cfdacaaSRichard Henderson /* On watchpoint hit, this will longjmp out. */ 18028cfdacaaSRichard Henderson if (flags & TLB_WATCHPOINT) { 18038cfdacaaSRichard Henderson int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ; 18048cfdacaaSRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra); 18058cfdacaaSRichard Henderson flags &= ~TLB_WATCHPOINT; 18068cfdacaaSRichard Henderson } 18078cfdacaaSRichard Henderson 18088cfdacaaSRichard Henderson /* Note that notdirty is only set for writes. */ 18098cfdacaaSRichard Henderson if (flags & TLB_NOTDIRTY) { 18108cfdacaaSRichard Henderson notdirty_write(env_cpu(env), addr, size, full, ra); 18118cfdacaaSRichard Henderson flags &= ~TLB_NOTDIRTY; 18128cfdacaaSRichard Henderson } 18138cfdacaaSRichard Henderson data->flags = flags; 18148cfdacaaSRichard Henderson } 18158cfdacaaSRichard Henderson 18168cfdacaaSRichard Henderson /** 18178cfdacaaSRichard Henderson * mmu_lookup: translate page(s) 18188cfdacaaSRichard Henderson * @env: cpu context 18198cfdacaaSRichard Henderson * @addr: virtual address 18208cfdacaaSRichard Henderson * @oi: combined mmu_idx and MemOp 18218cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 18228cfdacaaSRichard Henderson * @access_type: load/store/code 18238cfdacaaSRichard Henderson * @l: output result 18248cfdacaaSRichard Henderson * 18258cfdacaaSRichard Henderson * Resolve the translation for the page(s) beginning at @addr, for MemOp.size 18268cfdacaaSRichard Henderson * bytes. Return true if the lookup crosses a page boundary. 18278cfdacaaSRichard Henderson */ 18288cfdacaaSRichard Henderson static bool mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi, 18298cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType type, MMULookupLocals *l) 18308cfdacaaSRichard Henderson { 18318cfdacaaSRichard Henderson unsigned a_bits; 18328cfdacaaSRichard Henderson bool crosspage; 18338cfdacaaSRichard Henderson int flags; 18348cfdacaaSRichard Henderson 18358cfdacaaSRichard Henderson l->memop = get_memop(oi); 18368cfdacaaSRichard Henderson l->mmu_idx = get_mmuidx(oi); 18378cfdacaaSRichard Henderson 18388cfdacaaSRichard Henderson tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); 18398cfdacaaSRichard Henderson 18408cfdacaaSRichard Henderson /* Handle CPU specific unaligned behaviour */ 18418cfdacaaSRichard Henderson a_bits = get_alignment_bits(l->memop); 18428cfdacaaSRichard Henderson if (addr & ((1 << a_bits) - 1)) { 18438cfdacaaSRichard Henderson cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra); 18448cfdacaaSRichard Henderson } 18458cfdacaaSRichard Henderson 18468cfdacaaSRichard Henderson l->page[0].addr = addr; 18478cfdacaaSRichard Henderson l->page[0].size = memop_size(l->memop); 18488cfdacaaSRichard Henderson l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; 18498cfdacaaSRichard Henderson l->page[1].size = 0; 18508cfdacaaSRichard Henderson crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; 18518cfdacaaSRichard Henderson 18528cfdacaaSRichard Henderson if (likely(!crosspage)) { 18538cfdacaaSRichard Henderson mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra); 18548cfdacaaSRichard Henderson 18558cfdacaaSRichard Henderson flags = l->page[0].flags; 18568cfdacaaSRichard Henderson if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 18578cfdacaaSRichard Henderson mmu_watch_or_dirty(env, &l->page[0], type, ra); 18588cfdacaaSRichard Henderson } 18598cfdacaaSRichard Henderson if (unlikely(flags & TLB_BSWAP)) { 18608cfdacaaSRichard Henderson l->memop ^= MO_BSWAP; 18618cfdacaaSRichard Henderson } 18628cfdacaaSRichard Henderson } else { 18638cfdacaaSRichard Henderson /* Finish compute of page crossing. */ 18648cfdacaaSRichard Henderson int size0 = l->page[1].addr - addr; 18658cfdacaaSRichard Henderson l->page[1].size = l->page[0].size - size0; 18668cfdacaaSRichard Henderson l->page[0].size = size0; 18678cfdacaaSRichard Henderson 18688cfdacaaSRichard Henderson /* 18698cfdacaaSRichard Henderson * Lookup both pages, recognizing exceptions from either. If the 18708cfdacaaSRichard Henderson * second lookup potentially resized, refresh first CPUTLBEntryFull. 18718cfdacaaSRichard Henderson */ 18728cfdacaaSRichard Henderson mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra); 18738cfdacaaSRichard Henderson if (mmu_lookup1(env, &l->page[1], l->mmu_idx, type, ra)) { 18748cfdacaaSRichard Henderson uintptr_t index = tlb_index(env, l->mmu_idx, addr); 18758cfdacaaSRichard Henderson l->page[0].full = &env_tlb(env)->d[l->mmu_idx].fulltlb[index]; 18768cfdacaaSRichard Henderson } 18778cfdacaaSRichard Henderson 18788cfdacaaSRichard Henderson flags = l->page[0].flags | l->page[1].flags; 18798cfdacaaSRichard Henderson if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 18808cfdacaaSRichard Henderson mmu_watch_or_dirty(env, &l->page[0], type, ra); 18818cfdacaaSRichard Henderson mmu_watch_or_dirty(env, &l->page[1], type, ra); 18828cfdacaaSRichard Henderson } 18838cfdacaaSRichard Henderson 18848cfdacaaSRichard Henderson /* 18858cfdacaaSRichard Henderson * Since target/sparc is the only user of TLB_BSWAP, and all 18868cfdacaaSRichard Henderson * Sparc accesses are aligned, any treatment across two pages 18878cfdacaaSRichard Henderson * would be arbitrary. Refuse it until there's a use. 18888cfdacaaSRichard Henderson */ 18898cfdacaaSRichard Henderson tcg_debug_assert((flags & TLB_BSWAP) == 0); 18908cfdacaaSRichard Henderson } 18918cfdacaaSRichard Henderson 18928cfdacaaSRichard Henderson return crosspage; 18938cfdacaaSRichard Henderson } 18948cfdacaaSRichard Henderson 18958cfdacaaSRichard Henderson /* 189608dff435SRichard Henderson * Probe for an atomic operation. Do not allow unaligned operations, 189708dff435SRichard Henderson * or io operations to proceed. Return the host address. 189808dff435SRichard Henderson * 189908dff435SRichard Henderson * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. 190008dff435SRichard Henderson */ 1901d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 19029002ffcbSRichard Henderson MemOpIdx oi, int size, int prot, 190308dff435SRichard Henderson uintptr_t retaddr) 1904d9bb58e5SYang Zhong { 1905b826044fSRichard Henderson uintptr_t mmu_idx = get_mmuidx(oi); 190614776ab5STony Nguyen MemOp mop = get_memop(oi); 1907d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 190808dff435SRichard Henderson uintptr_t index; 190908dff435SRichard Henderson CPUTLBEntry *tlbe; 191008dff435SRichard Henderson target_ulong tlb_addr; 191134d49937SPeter Maydell void *hostaddr; 1912417aeaffSRichard Henderson CPUTLBEntryFull *full; 1913d9bb58e5SYang Zhong 1914b826044fSRichard Henderson tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1915b826044fSRichard Henderson 1916d9bb58e5SYang Zhong /* Adjust the given return address. */ 1917d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1918d9bb58e5SYang Zhong 1919d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1920d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1921d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 192229a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1923d9bb58e5SYang Zhong mmu_idx, retaddr); 1924d9bb58e5SYang Zhong } 1925d9bb58e5SYang Zhong 1926d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 192708dff435SRichard Henderson if (unlikely(addr & (size - 1))) { 1928d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1929d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1930d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1931d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1932d9bb58e5SYang Zhong goto stop_the_world; 1933d9bb58e5SYang Zhong } 1934d9bb58e5SYang Zhong 193508dff435SRichard Henderson index = tlb_index(env, mmu_idx, addr); 193608dff435SRichard Henderson tlbe = tlb_entry(env, mmu_idx, addr); 193708dff435SRichard Henderson 1938d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 193908dff435SRichard Henderson if (prot & PAGE_WRITE) { 194008dff435SRichard Henderson tlb_addr = tlb_addr_write(tlbe); 1941334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 19420b3c75adSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE, 19430b3c75adSRichard Henderson addr & TARGET_PAGE_MASK)) { 194408dff435SRichard Henderson tlb_fill(env_cpu(env), addr, size, 194508dff435SRichard Henderson MMU_DATA_STORE, mmu_idx, retaddr); 19466d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 19476d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1948d9bb58e5SYang Zhong } 1949403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1950d9bb58e5SYang Zhong } 1951d9bb58e5SYang Zhong 1952417aeaffSRichard Henderson if (prot & PAGE_READ) { 1953417aeaffSRichard Henderson /* 1954417aeaffSRichard Henderson * Let the guest notice RMW on a write-only page. 1955417aeaffSRichard Henderson * We have just verified that the page is writable. 1956417aeaffSRichard Henderson * Subpage lookups may have left TLB_INVALID_MASK set, 1957417aeaffSRichard Henderson * but addr_read will only be -1 if PAGE_READ was unset. 1958417aeaffSRichard Henderson */ 1959417aeaffSRichard Henderson if (unlikely(tlbe->addr_read == -1)) { 196008dff435SRichard Henderson tlb_fill(env_cpu(env), addr, size, 196108dff435SRichard Henderson MMU_DATA_LOAD, mmu_idx, retaddr); 196208dff435SRichard Henderson /* 1963417aeaffSRichard Henderson * Since we don't support reads and writes to different 1964417aeaffSRichard Henderson * addresses, and we do have the proper page loaded for 1965417aeaffSRichard Henderson * write, this shouldn't ever return. But just in case, 1966417aeaffSRichard Henderson * handle via stop-the-world. 196708dff435SRichard Henderson */ 196808dff435SRichard Henderson goto stop_the_world; 196908dff435SRichard Henderson } 1970417aeaffSRichard Henderson /* Collect TLB_WATCHPOINT for read. */ 1971417aeaffSRichard Henderson tlb_addr |= tlbe->addr_read; 1972417aeaffSRichard Henderson } 197308dff435SRichard Henderson } else /* if (prot & PAGE_READ) */ { 197408dff435SRichard Henderson tlb_addr = tlbe->addr_read; 197508dff435SRichard Henderson if (!tlb_hit(tlb_addr, addr)) { 19760b3c75adSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_LOAD, 19770b3c75adSRichard Henderson addr & TARGET_PAGE_MASK)) { 197808dff435SRichard Henderson tlb_fill(env_cpu(env), addr, size, 197908dff435SRichard Henderson MMU_DATA_LOAD, mmu_idx, retaddr); 198008dff435SRichard Henderson index = tlb_index(env, mmu_idx, addr); 198108dff435SRichard Henderson tlbe = tlb_entry(env, mmu_idx, addr); 198208dff435SRichard Henderson } 198308dff435SRichard Henderson tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK; 198408dff435SRichard Henderson } 198508dff435SRichard Henderson } 198608dff435SRichard Henderson 198755df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 19880953674eSRichard Henderson if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) { 1989d9bb58e5SYang Zhong /* There's really nothing that can be done to 1990d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1991d9bb58e5SYang Zhong goto stop_the_world; 1992d9bb58e5SYang Zhong } 1993d9bb58e5SYang Zhong 199434d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1995417aeaffSRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 199634d49937SPeter Maydell 199734d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1998417aeaffSRichard Henderson notdirty_write(env_cpu(env), addr, size, full, retaddr); 1999417aeaffSRichard Henderson } 2000417aeaffSRichard Henderson 2001417aeaffSRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 2002417aeaffSRichard Henderson QEMU_BUILD_BUG_ON(PAGE_READ != BP_MEM_READ); 2003417aeaffSRichard Henderson QEMU_BUILD_BUG_ON(PAGE_WRITE != BP_MEM_WRITE); 2004417aeaffSRichard Henderson /* therefore prot == watchpoint bits */ 2005417aeaffSRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 2006417aeaffSRichard Henderson full->attrs, prot, retaddr); 200734d49937SPeter Maydell } 200834d49937SPeter Maydell 200934d49937SPeter Maydell return hostaddr; 2010d9bb58e5SYang Zhong 2011d9bb58e5SYang Zhong stop_the_world: 201229a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 2013d9bb58e5SYang Zhong } 2014d9bb58e5SYang Zhong 2015eed56642SAlex Bennée /* 2016f83bcecbSRichard Henderson * Verify that we have passed the correct MemOp to the correct function. 2017f83bcecbSRichard Henderson * 2018f83bcecbSRichard Henderson * In the case of the helper_*_mmu functions, we will have done this by 2019f83bcecbSRichard Henderson * using the MemOp to look up the helper during code generation. 2020f83bcecbSRichard Henderson * 2021f83bcecbSRichard Henderson * In the case of the cpu_*_mmu functions, this is up to the caller. 2022f83bcecbSRichard Henderson * We could present one function to target code, and dispatch based on 2023f83bcecbSRichard Henderson * the MemOp, but so far we have worked hard to avoid an indirect function 2024f83bcecbSRichard Henderson * call along the memory path. 2025f83bcecbSRichard Henderson */ 2026f83bcecbSRichard Henderson static void validate_memop(MemOpIdx oi, MemOp expected) 2027f83bcecbSRichard Henderson { 2028f83bcecbSRichard Henderson #ifdef CONFIG_DEBUG_TCG 2029f83bcecbSRichard Henderson MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); 2030f83bcecbSRichard Henderson assert(have == expected); 2031f83bcecbSRichard Henderson #endif 2032f83bcecbSRichard Henderson } 2033f83bcecbSRichard Henderson 2034f83bcecbSRichard Henderson /* 2035eed56642SAlex Bennée * Load Helpers 2036eed56642SAlex Bennée * 2037eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 2038eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 2039eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 2040eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 2041*cdfac37bSRichard Henderson * 2042eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 2043eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 2044eed56642SAlex Bennée * return a value extended to the register size of the host. This is 2045eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 2046eed56642SAlex Bennée * data, and for that we always have uint64_t. 2047eed56642SAlex Bennée * 2048eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 2049eed56642SAlex Bennée */ 2050eed56642SAlex Bennée 20518cfdacaaSRichard Henderson /** 20528cfdacaaSRichard Henderson * do_ld_mmio_beN: 20538cfdacaaSRichard Henderson * @env: cpu context 20548cfdacaaSRichard Henderson * @p: translation parameters 20558cfdacaaSRichard Henderson * @ret_be: accumulated data 20568cfdacaaSRichard Henderson * @mmu_idx: virtual address context 20578cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 20588cfdacaaSRichard Henderson * 20598cfdacaaSRichard Henderson * Load @p->size bytes from @p->addr, which is memory-mapped i/o. 20608cfdacaaSRichard Henderson * The bytes are concatenated in big-endian order with @ret_be. 20618cfdacaaSRichard Henderson */ 20628cfdacaaSRichard Henderson static uint64_t do_ld_mmio_beN(CPUArchState *env, MMULookupPageData *p, 20638cfdacaaSRichard Henderson uint64_t ret_be, int mmu_idx, 20648cfdacaaSRichard Henderson MMUAccessType type, uintptr_t ra) 20652dd92606SRichard Henderson { 20668cfdacaaSRichard Henderson CPUTLBEntryFull *full = p->full; 20678cfdacaaSRichard Henderson target_ulong addr = p->addr; 20688cfdacaaSRichard Henderson int i, size = p->size; 20698cfdacaaSRichard Henderson 20708cfdacaaSRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 20718cfdacaaSRichard Henderson for (i = 0; i < size; i++) { 20728cfdacaaSRichard Henderson uint8_t x = io_readx(env, full, mmu_idx, addr + i, ra, type, MO_UB); 20738cfdacaaSRichard Henderson ret_be = (ret_be << 8) | x; 20748cfdacaaSRichard Henderson } 20758cfdacaaSRichard Henderson return ret_be; 20768cfdacaaSRichard Henderson } 20778cfdacaaSRichard Henderson 20788cfdacaaSRichard Henderson /** 20798cfdacaaSRichard Henderson * do_ld_bytes_beN 20808cfdacaaSRichard Henderson * @p: translation parameters 20818cfdacaaSRichard Henderson * @ret_be: accumulated data 20828cfdacaaSRichard Henderson * 20838cfdacaaSRichard Henderson * Load @p->size bytes from @p->haddr, which is RAM. 20848cfdacaaSRichard Henderson * The bytes to concatenated in big-endian order with @ret_be. 20858cfdacaaSRichard Henderson */ 20868cfdacaaSRichard Henderson static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be) 20878cfdacaaSRichard Henderson { 20888cfdacaaSRichard Henderson uint8_t *haddr = p->haddr; 20898cfdacaaSRichard Henderson int i, size = p->size; 20908cfdacaaSRichard Henderson 20918cfdacaaSRichard Henderson for (i = 0; i < size; i++) { 20928cfdacaaSRichard Henderson ret_be = (ret_be << 8) | haddr[i]; 20938cfdacaaSRichard Henderson } 20948cfdacaaSRichard Henderson return ret_be; 20958cfdacaaSRichard Henderson } 20968cfdacaaSRichard Henderson 2097*cdfac37bSRichard Henderson /** 2098*cdfac37bSRichard Henderson * do_ld_parts_beN 2099*cdfac37bSRichard Henderson * @p: translation parameters 2100*cdfac37bSRichard Henderson * @ret_be: accumulated data 2101*cdfac37bSRichard Henderson * 2102*cdfac37bSRichard Henderson * As do_ld_bytes_beN, but atomically on each aligned part. 2103*cdfac37bSRichard Henderson */ 2104*cdfac37bSRichard Henderson static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be) 2105*cdfac37bSRichard Henderson { 2106*cdfac37bSRichard Henderson void *haddr = p->haddr; 2107*cdfac37bSRichard Henderson int size = p->size; 2108*cdfac37bSRichard Henderson 2109*cdfac37bSRichard Henderson do { 2110*cdfac37bSRichard Henderson uint64_t x; 2111*cdfac37bSRichard Henderson int n; 2112*cdfac37bSRichard Henderson 2113*cdfac37bSRichard Henderson /* 2114*cdfac37bSRichard Henderson * Find minimum of alignment and size. 2115*cdfac37bSRichard Henderson * This is slightly stronger than required by MO_ATOM_SUBALIGN, which 2116*cdfac37bSRichard Henderson * would have only checked the low bits of addr|size once at the start, 2117*cdfac37bSRichard Henderson * but is just as easy. 2118*cdfac37bSRichard Henderson */ 2119*cdfac37bSRichard Henderson switch (((uintptr_t)haddr | size) & 7) { 2120*cdfac37bSRichard Henderson case 4: 2121*cdfac37bSRichard Henderson x = cpu_to_be32(load_atomic4(haddr)); 2122*cdfac37bSRichard Henderson ret_be = (ret_be << 32) | x; 2123*cdfac37bSRichard Henderson n = 4; 2124*cdfac37bSRichard Henderson break; 2125*cdfac37bSRichard Henderson case 2: 2126*cdfac37bSRichard Henderson case 6: 2127*cdfac37bSRichard Henderson x = cpu_to_be16(load_atomic2(haddr)); 2128*cdfac37bSRichard Henderson ret_be = (ret_be << 16) | x; 2129*cdfac37bSRichard Henderson n = 2; 2130*cdfac37bSRichard Henderson break; 2131*cdfac37bSRichard Henderson default: 2132*cdfac37bSRichard Henderson x = *(uint8_t *)haddr; 2133*cdfac37bSRichard Henderson ret_be = (ret_be << 8) | x; 2134*cdfac37bSRichard Henderson n = 1; 2135*cdfac37bSRichard Henderson break; 2136*cdfac37bSRichard Henderson case 0: 2137*cdfac37bSRichard Henderson g_assert_not_reached(); 2138*cdfac37bSRichard Henderson } 2139*cdfac37bSRichard Henderson haddr += n; 2140*cdfac37bSRichard Henderson size -= n; 2141*cdfac37bSRichard Henderson } while (size != 0); 2142*cdfac37bSRichard Henderson return ret_be; 2143*cdfac37bSRichard Henderson } 2144*cdfac37bSRichard Henderson 2145*cdfac37bSRichard Henderson /** 2146*cdfac37bSRichard Henderson * do_ld_parts_be4 2147*cdfac37bSRichard Henderson * @p: translation parameters 2148*cdfac37bSRichard Henderson * @ret_be: accumulated data 2149*cdfac37bSRichard Henderson * 2150*cdfac37bSRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 2151*cdfac37bSRichard Henderson * Four aligned bytes are guaranteed to cover the load. 2152*cdfac37bSRichard Henderson */ 2153*cdfac37bSRichard Henderson static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be) 2154*cdfac37bSRichard Henderson { 2155*cdfac37bSRichard Henderson int o = p->addr & 3; 2156*cdfac37bSRichard Henderson uint32_t x = load_atomic4(p->haddr - o); 2157*cdfac37bSRichard Henderson 2158*cdfac37bSRichard Henderson x = cpu_to_be32(x); 2159*cdfac37bSRichard Henderson x <<= o * 8; 2160*cdfac37bSRichard Henderson x >>= (4 - p->size) * 8; 2161*cdfac37bSRichard Henderson return (ret_be << (p->size * 8)) | x; 2162*cdfac37bSRichard Henderson } 2163*cdfac37bSRichard Henderson 2164*cdfac37bSRichard Henderson /** 2165*cdfac37bSRichard Henderson * do_ld_parts_be8 2166*cdfac37bSRichard Henderson * @p: translation parameters 2167*cdfac37bSRichard Henderson * @ret_be: accumulated data 2168*cdfac37bSRichard Henderson * 2169*cdfac37bSRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 2170*cdfac37bSRichard Henderson * Eight aligned bytes are guaranteed to cover the load. 2171*cdfac37bSRichard Henderson */ 2172*cdfac37bSRichard Henderson static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra, 2173*cdfac37bSRichard Henderson MMULookupPageData *p, uint64_t ret_be) 2174*cdfac37bSRichard Henderson { 2175*cdfac37bSRichard Henderson int o = p->addr & 7; 2176*cdfac37bSRichard Henderson uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o); 2177*cdfac37bSRichard Henderson 2178*cdfac37bSRichard Henderson x = cpu_to_be64(x); 2179*cdfac37bSRichard Henderson x <<= o * 8; 2180*cdfac37bSRichard Henderson x >>= (8 - p->size) * 8; 2181*cdfac37bSRichard Henderson return (ret_be << (p->size * 8)) | x; 2182*cdfac37bSRichard Henderson } 2183*cdfac37bSRichard Henderson 21848cfdacaaSRichard Henderson /* 21858cfdacaaSRichard Henderson * Wrapper for the above. 21868cfdacaaSRichard Henderson */ 21878cfdacaaSRichard Henderson static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p, 2188*cdfac37bSRichard Henderson uint64_t ret_be, int mmu_idx, MMUAccessType type, 2189*cdfac37bSRichard Henderson MemOp mop, uintptr_t ra) 21908cfdacaaSRichard Henderson { 2191*cdfac37bSRichard Henderson MemOp atom; 2192*cdfac37bSRichard Henderson unsigned tmp, half_size; 2193*cdfac37bSRichard Henderson 21948cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 21958cfdacaaSRichard Henderson return do_ld_mmio_beN(env, p, ret_be, mmu_idx, type, ra); 2196*cdfac37bSRichard Henderson } 2197*cdfac37bSRichard Henderson 2198*cdfac37bSRichard Henderson /* 2199*cdfac37bSRichard Henderson * It is a given that we cross a page and therefore there is no 2200*cdfac37bSRichard Henderson * atomicity for the load as a whole, but subobjects may need attention. 2201*cdfac37bSRichard Henderson */ 2202*cdfac37bSRichard Henderson atom = mop & MO_ATOM_MASK; 2203*cdfac37bSRichard Henderson switch (atom) { 2204*cdfac37bSRichard Henderson case MO_ATOM_SUBALIGN: 2205*cdfac37bSRichard Henderson return do_ld_parts_beN(p, ret_be); 2206*cdfac37bSRichard Henderson 2207*cdfac37bSRichard Henderson case MO_ATOM_IFALIGN_PAIR: 2208*cdfac37bSRichard Henderson case MO_ATOM_WITHIN16_PAIR: 2209*cdfac37bSRichard Henderson tmp = mop & MO_SIZE; 2210*cdfac37bSRichard Henderson tmp = tmp ? tmp - 1 : 0; 2211*cdfac37bSRichard Henderson half_size = 1 << tmp; 2212*cdfac37bSRichard Henderson if (atom == MO_ATOM_IFALIGN_PAIR 2213*cdfac37bSRichard Henderson ? p->size == half_size 2214*cdfac37bSRichard Henderson : p->size >= half_size) { 2215*cdfac37bSRichard Henderson if (!HAVE_al8_fast && p->size < 4) { 2216*cdfac37bSRichard Henderson return do_ld_whole_be4(p, ret_be); 22178cfdacaaSRichard Henderson } else { 2218*cdfac37bSRichard Henderson return do_ld_whole_be8(env, ra, p, ret_be); 2219*cdfac37bSRichard Henderson } 2220*cdfac37bSRichard Henderson } 2221*cdfac37bSRichard Henderson /* fall through */ 2222*cdfac37bSRichard Henderson 2223*cdfac37bSRichard Henderson case MO_ATOM_IFALIGN: 2224*cdfac37bSRichard Henderson case MO_ATOM_WITHIN16: 2225*cdfac37bSRichard Henderson case MO_ATOM_NONE: 22268cfdacaaSRichard Henderson return do_ld_bytes_beN(p, ret_be); 2227*cdfac37bSRichard Henderson 2228*cdfac37bSRichard Henderson default: 2229*cdfac37bSRichard Henderson g_assert_not_reached(); 22308cfdacaaSRichard Henderson } 22318cfdacaaSRichard Henderson } 22328cfdacaaSRichard Henderson 22338cfdacaaSRichard Henderson static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 22348cfdacaaSRichard Henderson MMUAccessType type, uintptr_t ra) 22358cfdacaaSRichard Henderson { 22368cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 22378cfdacaaSRichard Henderson return io_readx(env, p->full, mmu_idx, p->addr, ra, type, MO_UB); 22388cfdacaaSRichard Henderson } else { 22398cfdacaaSRichard Henderson return *(uint8_t *)p->haddr; 22408cfdacaaSRichard Henderson } 22418cfdacaaSRichard Henderson } 22428cfdacaaSRichard Henderson 22438cfdacaaSRichard Henderson static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 22448cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 22458cfdacaaSRichard Henderson { 22468cfdacaaSRichard Henderson uint64_t ret; 22478cfdacaaSRichard Henderson 22488cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 22498cfdacaaSRichard Henderson return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop); 22508cfdacaaSRichard Henderson } 22518cfdacaaSRichard Henderson 22528cfdacaaSRichard Henderson /* Perform the load host endian, then swap if necessary. */ 2253*cdfac37bSRichard Henderson ret = load_atom_2(env, ra, p->haddr, memop); 22548cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 22558cfdacaaSRichard Henderson ret = bswap16(ret); 22568cfdacaaSRichard Henderson } 22578cfdacaaSRichard Henderson return ret; 22588cfdacaaSRichard Henderson } 22598cfdacaaSRichard Henderson 22608cfdacaaSRichard Henderson static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 22618cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 22628cfdacaaSRichard Henderson { 22638cfdacaaSRichard Henderson uint32_t ret; 22648cfdacaaSRichard Henderson 22658cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 22668cfdacaaSRichard Henderson return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop); 22678cfdacaaSRichard Henderson } 22688cfdacaaSRichard Henderson 22698cfdacaaSRichard Henderson /* Perform the load host endian. */ 2270*cdfac37bSRichard Henderson ret = load_atom_4(env, ra, p->haddr, memop); 22718cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 22728cfdacaaSRichard Henderson ret = bswap32(ret); 22738cfdacaaSRichard Henderson } 22748cfdacaaSRichard Henderson return ret; 22758cfdacaaSRichard Henderson } 22768cfdacaaSRichard Henderson 22778cfdacaaSRichard Henderson static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 22788cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 22798cfdacaaSRichard Henderson { 22808cfdacaaSRichard Henderson uint64_t ret; 22818cfdacaaSRichard Henderson 22828cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 22838cfdacaaSRichard Henderson return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop); 22848cfdacaaSRichard Henderson } 22858cfdacaaSRichard Henderson 22868cfdacaaSRichard Henderson /* Perform the load host endian. */ 2287*cdfac37bSRichard Henderson ret = load_atom_8(env, ra, p->haddr, memop); 22888cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 22898cfdacaaSRichard Henderson ret = bswap64(ret); 22908cfdacaaSRichard Henderson } 22918cfdacaaSRichard Henderson return ret; 22928cfdacaaSRichard Henderson } 22938cfdacaaSRichard Henderson 22948cfdacaaSRichard Henderson static uint8_t do_ld1_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, 22958cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 22968cfdacaaSRichard Henderson { 22978cfdacaaSRichard Henderson MMULookupLocals l; 22988cfdacaaSRichard Henderson bool crosspage; 22998cfdacaaSRichard Henderson 23008cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 23018cfdacaaSRichard Henderson tcg_debug_assert(!crosspage); 23028cfdacaaSRichard Henderson 23038cfdacaaSRichard Henderson return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra); 23042dd92606SRichard Henderson } 23052dd92606SRichard Henderson 2306fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 23079002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2308eed56642SAlex Bennée { 23098cfdacaaSRichard Henderson validate_memop(oi, MO_UB); 23108cfdacaaSRichard Henderson return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 23112dd92606SRichard Henderson } 23122dd92606SRichard Henderson 23138cfdacaaSRichard Henderson static uint16_t do_ld2_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, 23148cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 23152dd92606SRichard Henderson { 23168cfdacaaSRichard Henderson MMULookupLocals l; 23178cfdacaaSRichard Henderson bool crosspage; 23188cfdacaaSRichard Henderson uint16_t ret; 23198cfdacaaSRichard Henderson uint8_t a, b; 23208cfdacaaSRichard Henderson 23218cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 23228cfdacaaSRichard Henderson if (likely(!crosspage)) { 23238cfdacaaSRichard Henderson return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 23248cfdacaaSRichard Henderson } 23258cfdacaaSRichard Henderson 23268cfdacaaSRichard Henderson a = do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra); 23278cfdacaaSRichard Henderson b = do_ld_1(env, &l.page[1], l.mmu_idx, access_type, ra); 23288cfdacaaSRichard Henderson 23298cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 23308cfdacaaSRichard Henderson ret = a | (b << 8); 23318cfdacaaSRichard Henderson } else { 23328cfdacaaSRichard Henderson ret = b | (a << 8); 23338cfdacaaSRichard Henderson } 23348cfdacaaSRichard Henderson return ret; 2335eed56642SAlex Bennée } 2336eed56642SAlex Bennée 2337fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 23389002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2339eed56642SAlex Bennée { 23408cfdacaaSRichard Henderson validate_memop(oi, MO_LEUW); 23418cfdacaaSRichard Henderson return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 2342eed56642SAlex Bennée } 2343eed56642SAlex Bennée 2344fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 23459002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2346eed56642SAlex Bennée { 23478cfdacaaSRichard Henderson validate_memop(oi, MO_BEUW); 23488cfdacaaSRichard Henderson return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 23492dd92606SRichard Henderson } 23502dd92606SRichard Henderson 23518cfdacaaSRichard Henderson static uint32_t do_ld4_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, 23528cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 23532dd92606SRichard Henderson { 23548cfdacaaSRichard Henderson MMULookupLocals l; 23558cfdacaaSRichard Henderson bool crosspage; 23568cfdacaaSRichard Henderson uint32_t ret; 23578cfdacaaSRichard Henderson 23588cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 23598cfdacaaSRichard Henderson if (likely(!crosspage)) { 23608cfdacaaSRichard Henderson return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 23618cfdacaaSRichard Henderson } 23628cfdacaaSRichard Henderson 2363*cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2364*cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 23658cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 23668cfdacaaSRichard Henderson ret = bswap32(ret); 23678cfdacaaSRichard Henderson } 23688cfdacaaSRichard Henderson return ret; 2369eed56642SAlex Bennée } 2370eed56642SAlex Bennée 2371fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 23729002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2373eed56642SAlex Bennée { 23748cfdacaaSRichard Henderson validate_memop(oi, MO_LEUL); 23758cfdacaaSRichard Henderson return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 2376eed56642SAlex Bennée } 2377eed56642SAlex Bennée 2378fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 23799002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2380eed56642SAlex Bennée { 23818cfdacaaSRichard Henderson validate_memop(oi, MO_BEUL); 23828cfdacaaSRichard Henderson return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 23838cfdacaaSRichard Henderson } 23848cfdacaaSRichard Henderson 23858cfdacaaSRichard Henderson static uint64_t do_ld8_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, 23868cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 23878cfdacaaSRichard Henderson { 23888cfdacaaSRichard Henderson MMULookupLocals l; 23898cfdacaaSRichard Henderson bool crosspage; 23908cfdacaaSRichard Henderson uint64_t ret; 23918cfdacaaSRichard Henderson 23928cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 23938cfdacaaSRichard Henderson if (likely(!crosspage)) { 23948cfdacaaSRichard Henderson return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 23958cfdacaaSRichard Henderson } 23968cfdacaaSRichard Henderson 2397*cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2398*cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 23998cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 24008cfdacaaSRichard Henderson ret = bswap64(ret); 24018cfdacaaSRichard Henderson } 24028cfdacaaSRichard Henderson return ret; 2403eed56642SAlex Bennée } 2404eed56642SAlex Bennée 2405fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 24069002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2407eed56642SAlex Bennée { 2408fc313c64SFrédéric Pétrot validate_memop(oi, MO_LEUQ); 24098cfdacaaSRichard Henderson return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 2410eed56642SAlex Bennée } 2411eed56642SAlex Bennée 2412fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 24139002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2414eed56642SAlex Bennée { 2415fc313c64SFrédéric Pétrot validate_memop(oi, MO_BEUQ); 24168cfdacaaSRichard Henderson return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 2417eed56642SAlex Bennée } 2418eed56642SAlex Bennée 2419eed56642SAlex Bennée /* 2420eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 2421eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 2422eed56642SAlex Bennée */ 2423eed56642SAlex Bennée 2424eed56642SAlex Bennée 2425eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 24269002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2427eed56642SAlex Bennée { 2428eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 2429eed56642SAlex Bennée } 2430eed56642SAlex Bennée 2431eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 24329002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2433eed56642SAlex Bennée { 2434eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 2435eed56642SAlex Bennée } 2436eed56642SAlex Bennée 2437eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 24389002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2439eed56642SAlex Bennée { 2440eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 2441eed56642SAlex Bennée } 2442eed56642SAlex Bennée 2443eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 24449002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2445eed56642SAlex Bennée { 2446eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 2447eed56642SAlex Bennée } 2448eed56642SAlex Bennée 2449eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 24509002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2451eed56642SAlex Bennée { 2452eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 2453eed56642SAlex Bennée } 2454eed56642SAlex Bennée 2455eed56642SAlex Bennée /* 2456d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 2457d03f1408SRichard Henderson */ 2458d03f1408SRichard Henderson 24598cfdacaaSRichard Henderson static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) 2460d03f1408SRichard Henderson { 246137aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2462d03f1408SRichard Henderson } 2463d03f1408SRichard Henderson 2464f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) 2465d03f1408SRichard Henderson { 24668cfdacaaSRichard Henderson uint8_t ret; 24678cfdacaaSRichard Henderson 24688cfdacaaSRichard Henderson validate_memop(oi, MO_UB); 24698cfdacaaSRichard Henderson ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 24708cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 24718cfdacaaSRichard Henderson return ret; 2472d03f1408SRichard Henderson } 2473d03f1408SRichard Henderson 2474f83bcecbSRichard Henderson uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, 2475f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2476d03f1408SRichard Henderson { 24778cfdacaaSRichard Henderson uint16_t ret; 24788cfdacaaSRichard Henderson 24798cfdacaaSRichard Henderson validate_memop(oi, MO_BEUW); 24808cfdacaaSRichard Henderson ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 24818cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 24828cfdacaaSRichard Henderson return ret; 2483d03f1408SRichard Henderson } 2484d03f1408SRichard Henderson 2485f83bcecbSRichard Henderson uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, 2486f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2487d03f1408SRichard Henderson { 24888cfdacaaSRichard Henderson uint32_t ret; 24898cfdacaaSRichard Henderson 24908cfdacaaSRichard Henderson validate_memop(oi, MO_BEUL); 24918cfdacaaSRichard Henderson ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 24928cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 24938cfdacaaSRichard Henderson return ret; 2494d03f1408SRichard Henderson } 2495d03f1408SRichard Henderson 2496f83bcecbSRichard Henderson uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, 2497f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2498d03f1408SRichard Henderson { 24998cfdacaaSRichard Henderson uint64_t ret; 25008cfdacaaSRichard Henderson 25018cfdacaaSRichard Henderson validate_memop(oi, MO_BEUQ); 25028cfdacaaSRichard Henderson ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 25038cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 25048cfdacaaSRichard Henderson return ret; 2505d03f1408SRichard Henderson } 2506d03f1408SRichard Henderson 2507f83bcecbSRichard Henderson uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, 2508f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2509d03f1408SRichard Henderson { 25108cfdacaaSRichard Henderson uint16_t ret; 25118cfdacaaSRichard Henderson 25128cfdacaaSRichard Henderson validate_memop(oi, MO_LEUW); 25138cfdacaaSRichard Henderson ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 25148cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 25158cfdacaaSRichard Henderson return ret; 2516d03f1408SRichard Henderson } 2517d03f1408SRichard Henderson 2518f83bcecbSRichard Henderson uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, 2519f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2520d03f1408SRichard Henderson { 25218cfdacaaSRichard Henderson uint32_t ret; 25228cfdacaaSRichard Henderson 25238cfdacaaSRichard Henderson validate_memop(oi, MO_LEUL); 25248cfdacaaSRichard Henderson ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 25258cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 25268cfdacaaSRichard Henderson return ret; 2527b9e60257SRichard Henderson } 2528b9e60257SRichard Henderson 2529f83bcecbSRichard Henderson uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, 2530f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2531b9e60257SRichard Henderson { 25328cfdacaaSRichard Henderson uint64_t ret; 25338cfdacaaSRichard Henderson 25348cfdacaaSRichard Henderson validate_memop(oi, MO_LEUQ); 25358cfdacaaSRichard Henderson ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 25368cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 25378cfdacaaSRichard Henderson return ret; 2538cfe04a4bSRichard Henderson } 2539cfe04a4bSRichard Henderson 2540cb48f365SRichard Henderson Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, 2541cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 2542cb48f365SRichard Henderson { 2543cb48f365SRichard Henderson MemOp mop = get_memop(oi); 2544cb48f365SRichard Henderson int mmu_idx = get_mmuidx(oi); 2545cb48f365SRichard Henderson MemOpIdx new_oi; 2546cb48f365SRichard Henderson unsigned a_bits; 2547cb48f365SRichard Henderson uint64_t h, l; 2548cb48f365SRichard Henderson 2549cb48f365SRichard Henderson tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); 2550cb48f365SRichard Henderson a_bits = get_alignment_bits(mop); 2551cb48f365SRichard Henderson 2552cb48f365SRichard Henderson /* Handle CPU specific unaligned behaviour */ 2553cb48f365SRichard Henderson if (addr & ((1 << a_bits) - 1)) { 2554cb48f365SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, 2555cb48f365SRichard Henderson mmu_idx, ra); 2556cb48f365SRichard Henderson } 2557cb48f365SRichard Henderson 2558cb48f365SRichard Henderson /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2559cb48f365SRichard Henderson mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2560cb48f365SRichard Henderson new_oi = make_memop_idx(mop, mmu_idx); 2561cb48f365SRichard Henderson 2562cb48f365SRichard Henderson h = helper_be_ldq_mmu(env, addr, new_oi, ra); 2563cb48f365SRichard Henderson l = helper_be_ldq_mmu(env, addr + 8, new_oi, ra); 2564cb48f365SRichard Henderson 2565cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2566cb48f365SRichard Henderson return int128_make128(l, h); 2567cb48f365SRichard Henderson } 2568cb48f365SRichard Henderson 2569cb48f365SRichard Henderson Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, 2570cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 2571cb48f365SRichard Henderson { 2572cb48f365SRichard Henderson MemOp mop = get_memop(oi); 2573cb48f365SRichard Henderson int mmu_idx = get_mmuidx(oi); 2574cb48f365SRichard Henderson MemOpIdx new_oi; 2575cb48f365SRichard Henderson unsigned a_bits; 2576cb48f365SRichard Henderson uint64_t h, l; 2577cb48f365SRichard Henderson 2578cb48f365SRichard Henderson tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); 2579cb48f365SRichard Henderson a_bits = get_alignment_bits(mop); 2580cb48f365SRichard Henderson 2581cb48f365SRichard Henderson /* Handle CPU specific unaligned behaviour */ 2582cb48f365SRichard Henderson if (addr & ((1 << a_bits) - 1)) { 2583cb48f365SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, 2584cb48f365SRichard Henderson mmu_idx, ra); 2585cb48f365SRichard Henderson } 2586cb48f365SRichard Henderson 2587cb48f365SRichard Henderson /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2588cb48f365SRichard Henderson mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2589cb48f365SRichard Henderson new_oi = make_memop_idx(mop, mmu_idx); 2590cb48f365SRichard Henderson 2591cb48f365SRichard Henderson l = helper_le_ldq_mmu(env, addr, new_oi, ra); 2592cb48f365SRichard Henderson h = helper_le_ldq_mmu(env, addr + 8, new_oi, ra); 2593cb48f365SRichard Henderson 2594cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2595cb48f365SRichard Henderson return int128_make128(l, h); 2596cb48f365SRichard Henderson } 2597cb48f365SRichard Henderson 2598d03f1408SRichard Henderson /* 2599eed56642SAlex Bennée * Store Helpers 2600eed56642SAlex Bennée */ 2601eed56642SAlex Bennée 2602c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE 260380d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op) 260480d9d1c6SRichard Henderson { 260580d9d1c6SRichard Henderson switch (op) { 260680d9d1c6SRichard Henderson case MO_UB: 260780d9d1c6SRichard Henderson stb_p(haddr, val); 260880d9d1c6SRichard Henderson break; 260980d9d1c6SRichard Henderson case MO_BEUW: 261080d9d1c6SRichard Henderson stw_be_p(haddr, val); 261180d9d1c6SRichard Henderson break; 261280d9d1c6SRichard Henderson case MO_LEUW: 261380d9d1c6SRichard Henderson stw_le_p(haddr, val); 261480d9d1c6SRichard Henderson break; 261580d9d1c6SRichard Henderson case MO_BEUL: 261680d9d1c6SRichard Henderson stl_be_p(haddr, val); 261780d9d1c6SRichard Henderson break; 261880d9d1c6SRichard Henderson case MO_LEUL: 261980d9d1c6SRichard Henderson stl_le_p(haddr, val); 262080d9d1c6SRichard Henderson break; 2621fc313c64SFrédéric Pétrot case MO_BEUQ: 262280d9d1c6SRichard Henderson stq_be_p(haddr, val); 262380d9d1c6SRichard Henderson break; 2624fc313c64SFrédéric Pétrot case MO_LEUQ: 262580d9d1c6SRichard Henderson stq_le_p(haddr, val); 262680d9d1c6SRichard Henderson break; 262780d9d1c6SRichard Henderson default: 262880d9d1c6SRichard Henderson qemu_build_not_reached(); 262980d9d1c6SRichard Henderson } 263080d9d1c6SRichard Henderson } 263180d9d1c6SRichard Henderson 263259213461SRichard Henderson /** 263359213461SRichard Henderson * do_st_mmio_leN: 263459213461SRichard Henderson * @env: cpu context 263559213461SRichard Henderson * @p: translation parameters 263659213461SRichard Henderson * @val_le: data to store 263759213461SRichard Henderson * @mmu_idx: virtual address context 263859213461SRichard Henderson * @ra: return address into tcg generated code, or 0 263959213461SRichard Henderson * 264059213461SRichard Henderson * Store @p->size bytes at @p->addr, which is memory-mapped i/o. 264159213461SRichard Henderson * The bytes to store are extracted in little-endian order from @val_le; 264259213461SRichard Henderson * return the bytes of @val_le beyond @p->size that have not been stored. 264359213461SRichard Henderson */ 264459213461SRichard Henderson static uint64_t do_st_mmio_leN(CPUArchState *env, MMULookupPageData *p, 264559213461SRichard Henderson uint64_t val_le, int mmu_idx, uintptr_t ra) 26466b8b622eSRichard Henderson { 264759213461SRichard Henderson CPUTLBEntryFull *full = p->full; 264859213461SRichard Henderson target_ulong addr = p->addr; 264959213461SRichard Henderson int i, size = p->size; 26506b8b622eSRichard Henderson 265159213461SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 265259213461SRichard Henderson for (i = 0; i < size; i++, val_le >>= 8) { 265359213461SRichard Henderson io_writex(env, full, mmu_idx, val_le, addr + i, ra, MO_UB); 265459213461SRichard Henderson } 265559213461SRichard Henderson return val_le; 265659213461SRichard Henderson } 265759213461SRichard Henderson 265859213461SRichard Henderson /** 265959213461SRichard Henderson * do_st_bytes_leN: 266059213461SRichard Henderson * @p: translation parameters 266159213461SRichard Henderson * @val_le: data to store 266259213461SRichard Henderson * 266359213461SRichard Henderson * Store @p->size bytes at @p->haddr, which is RAM. 266459213461SRichard Henderson * The bytes to store are extracted in little-endian order from @val_le; 266559213461SRichard Henderson * return the bytes of @val_le beyond @p->size that have not been stored. 26666b8b622eSRichard Henderson */ 266759213461SRichard Henderson static uint64_t do_st_bytes_leN(MMULookupPageData *p, uint64_t val_le) 266859213461SRichard Henderson { 266959213461SRichard Henderson uint8_t *haddr = p->haddr; 267059213461SRichard Henderson int i, size = p->size; 26716b8b622eSRichard Henderson 267259213461SRichard Henderson for (i = 0; i < size; i++, val_le >>= 8) { 267359213461SRichard Henderson haddr[i] = val_le; 26746b8b622eSRichard Henderson } 267559213461SRichard Henderson return val_le; 26766b8b622eSRichard Henderson } 26776b8b622eSRichard Henderson 26786b8b622eSRichard Henderson /* 267959213461SRichard Henderson * Wrapper for the above. 26806b8b622eSRichard Henderson */ 268159213461SRichard Henderson static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p, 268259213461SRichard Henderson uint64_t val_le, int mmu_idx, uintptr_t ra) 268359213461SRichard Henderson { 268459213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 268559213461SRichard Henderson return do_st_mmio_leN(env, p, val_le, mmu_idx, ra); 268659213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 268759213461SRichard Henderson return val_le >> (p->size * 8); 26886b8b622eSRichard Henderson } else { 268959213461SRichard Henderson return do_st_bytes_leN(p, val_le); 26906b8b622eSRichard Henderson } 26916b8b622eSRichard Henderson } 26926b8b622eSRichard Henderson 269359213461SRichard Henderson static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val, 269459213461SRichard Henderson int mmu_idx, uintptr_t ra) 2695eed56642SAlex Bennée { 269659213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 269759213461SRichard Henderson io_writex(env, p->full, mmu_idx, val, p->addr, ra, MO_UB); 269859213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 269959213461SRichard Henderson /* nothing */ 27005b87b3e6SRichard Henderson } else { 270159213461SRichard Henderson *(uint8_t *)p->haddr = val; 27025b87b3e6SRichard Henderson } 2703eed56642SAlex Bennée } 2704eed56642SAlex Bennée 270559213461SRichard Henderson static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val, 270659213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 2707eed56642SAlex Bennée { 270859213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 270959213461SRichard Henderson io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop); 271059213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 271159213461SRichard Henderson /* nothing */ 271259213461SRichard Henderson } else { 271359213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 271459213461SRichard Henderson if (memop & MO_BSWAP) { 271559213461SRichard Henderson val = bswap16(val); 271659213461SRichard Henderson } 271759213461SRichard Henderson store_memop(p->haddr, val, MO_UW); 271859213461SRichard Henderson } 271959213461SRichard Henderson } 272059213461SRichard Henderson 272159213461SRichard Henderson static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val, 272259213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 272359213461SRichard Henderson { 272459213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 272559213461SRichard Henderson io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop); 272659213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 272759213461SRichard Henderson /* nothing */ 272859213461SRichard Henderson } else { 272959213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 273059213461SRichard Henderson if (memop & MO_BSWAP) { 273159213461SRichard Henderson val = bswap32(val); 273259213461SRichard Henderson } 273359213461SRichard Henderson store_memop(p->haddr, val, MO_UL); 273459213461SRichard Henderson } 273559213461SRichard Henderson } 273659213461SRichard Henderson 273759213461SRichard Henderson static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val, 273859213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 273959213461SRichard Henderson { 274059213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 274159213461SRichard Henderson io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop); 274259213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 274359213461SRichard Henderson /* nothing */ 274459213461SRichard Henderson } else { 274559213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 274659213461SRichard Henderson if (memop & MO_BSWAP) { 274759213461SRichard Henderson val = bswap64(val); 274859213461SRichard Henderson } 274959213461SRichard Henderson store_memop(p->haddr, val, MO_UQ); 275059213461SRichard Henderson } 2751eed56642SAlex Bennée } 2752eed56642SAlex Bennée 275335a0bd63SRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 275459213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2755f83bcecbSRichard Henderson { 275659213461SRichard Henderson MMULookupLocals l; 275759213461SRichard Henderson bool crosspage; 275859213461SRichard Henderson 275959213461SRichard Henderson validate_memop(oi, MO_UB); 276059213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 276159213461SRichard Henderson tcg_debug_assert(!crosspage); 276259213461SRichard Henderson 276359213461SRichard Henderson do_st_1(env, &l.page[0], val, l.mmu_idx, ra); 2764f83bcecbSRichard Henderson } 2765f83bcecbSRichard Henderson 276659213461SRichard Henderson static void do_st2_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 276759213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2768f83bcecbSRichard Henderson { 276959213461SRichard Henderson MMULookupLocals l; 277059213461SRichard Henderson bool crosspage; 277159213461SRichard Henderson uint8_t a, b; 277259213461SRichard Henderson 277359213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 277459213461SRichard Henderson if (likely(!crosspage)) { 277559213461SRichard Henderson do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 277659213461SRichard Henderson return; 277759213461SRichard Henderson } 277859213461SRichard Henderson 277959213461SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 278059213461SRichard Henderson a = val, b = val >> 8; 278159213461SRichard Henderson } else { 278259213461SRichard Henderson b = val, a = val >> 8; 278359213461SRichard Henderson } 278459213461SRichard Henderson do_st_1(env, &l.page[0], a, l.mmu_idx, ra); 278559213461SRichard Henderson do_st_1(env, &l.page[1], b, l.mmu_idx, ra); 2786f83bcecbSRichard Henderson } 2787f83bcecbSRichard Henderson 278835a0bd63SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 27899002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2790eed56642SAlex Bennée { 279159213461SRichard Henderson validate_memop(oi, MO_LEUW); 279259213461SRichard Henderson do_st2_mmu(env, addr, val, oi, retaddr); 2793eed56642SAlex Bennée } 2794eed56642SAlex Bennée 279535a0bd63SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 27969002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2797eed56642SAlex Bennée { 279859213461SRichard Henderson validate_memop(oi, MO_BEUW); 279959213461SRichard Henderson do_st2_mmu(env, addr, val, oi, retaddr); 2800f83bcecbSRichard Henderson } 2801f83bcecbSRichard Henderson 280259213461SRichard Henderson static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 280359213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2804f83bcecbSRichard Henderson { 280559213461SRichard Henderson MMULookupLocals l; 280659213461SRichard Henderson bool crosspage; 280759213461SRichard Henderson 280859213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 280959213461SRichard Henderson if (likely(!crosspage)) { 281059213461SRichard Henderson do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 281159213461SRichard Henderson return; 281259213461SRichard Henderson } 281359213461SRichard Henderson 281459213461SRichard Henderson /* Swap to little endian for simplicity, then store by bytes. */ 281559213461SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 281659213461SRichard Henderson val = bswap32(val); 281759213461SRichard Henderson } 281859213461SRichard Henderson val = do_st_leN(env, &l.page[0], val, l.mmu_idx, ra); 281959213461SRichard Henderson (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, ra); 2820eed56642SAlex Bennée } 2821eed56642SAlex Bennée 2822fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 28239002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2824eed56642SAlex Bennée { 282559213461SRichard Henderson validate_memop(oi, MO_LEUL); 282659213461SRichard Henderson do_st4_mmu(env, addr, val, oi, retaddr); 2827eed56642SAlex Bennée } 2828eed56642SAlex Bennée 2829fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 28309002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2831eed56642SAlex Bennée { 283259213461SRichard Henderson validate_memop(oi, MO_BEUL); 283359213461SRichard Henderson do_st4_mmu(env, addr, val, oi, retaddr); 283459213461SRichard Henderson } 283559213461SRichard Henderson 283659213461SRichard Henderson static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 283759213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 283859213461SRichard Henderson { 283959213461SRichard Henderson MMULookupLocals l; 284059213461SRichard Henderson bool crosspage; 284159213461SRichard Henderson 284259213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 284359213461SRichard Henderson if (likely(!crosspage)) { 284459213461SRichard Henderson do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 284559213461SRichard Henderson return; 284659213461SRichard Henderson } 284759213461SRichard Henderson 284859213461SRichard Henderson /* Swap to little endian for simplicity, then store by bytes. */ 284959213461SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 285059213461SRichard Henderson val = bswap64(val); 285159213461SRichard Henderson } 285259213461SRichard Henderson val = do_st_leN(env, &l.page[0], val, l.mmu_idx, ra); 285359213461SRichard Henderson (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, ra); 2854eed56642SAlex Bennée } 2855eed56642SAlex Bennée 2856fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 28579002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2858eed56642SAlex Bennée { 2859fc313c64SFrédéric Pétrot validate_memop(oi, MO_LEUQ); 286059213461SRichard Henderson do_st8_mmu(env, addr, val, oi, retaddr); 2861eed56642SAlex Bennée } 2862eed56642SAlex Bennée 2863fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 28649002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2865eed56642SAlex Bennée { 2866fc313c64SFrédéric Pétrot validate_memop(oi, MO_BEUQ); 286759213461SRichard Henderson do_st8_mmu(env, addr, val, oi, retaddr); 2868eed56642SAlex Bennée } 2869d9bb58e5SYang Zhong 2870d03f1408SRichard Henderson /* 2871d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 2872d03f1408SRichard Henderson */ 2873d03f1408SRichard Henderson 287459213461SRichard Henderson static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) 2875d03f1408SRichard Henderson { 287637aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 2877d03f1408SRichard Henderson } 2878d03f1408SRichard Henderson 2879f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2880f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2881d03f1408SRichard Henderson { 288259213461SRichard Henderson helper_ret_stb_mmu(env, addr, val, oi, retaddr); 288359213461SRichard Henderson plugin_store_cb(env, addr, oi); 2884d03f1408SRichard Henderson } 2885d03f1408SRichard Henderson 2886f83bcecbSRichard Henderson void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2887f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2888d03f1408SRichard Henderson { 288959213461SRichard Henderson helper_be_stw_mmu(env, addr, val, oi, retaddr); 289059213461SRichard Henderson plugin_store_cb(env, addr, oi); 2891d03f1408SRichard Henderson } 2892d03f1408SRichard Henderson 2893f83bcecbSRichard Henderson void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2894f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2895d03f1408SRichard Henderson { 289659213461SRichard Henderson helper_be_stl_mmu(env, addr, val, oi, retaddr); 289759213461SRichard Henderson plugin_store_cb(env, addr, oi); 2898d03f1408SRichard Henderson } 2899d03f1408SRichard Henderson 2900f83bcecbSRichard Henderson void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2901f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2902d03f1408SRichard Henderson { 290359213461SRichard Henderson helper_be_stq_mmu(env, addr, val, oi, retaddr); 290459213461SRichard Henderson plugin_store_cb(env, addr, oi); 2905b9e60257SRichard Henderson } 2906b9e60257SRichard Henderson 2907f83bcecbSRichard Henderson void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2908f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2909b9e60257SRichard Henderson { 291059213461SRichard Henderson helper_le_stw_mmu(env, addr, val, oi, retaddr); 291159213461SRichard Henderson plugin_store_cb(env, addr, oi); 2912b9e60257SRichard Henderson } 2913b9e60257SRichard Henderson 2914f83bcecbSRichard Henderson void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2915f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2916b9e60257SRichard Henderson { 291759213461SRichard Henderson helper_le_stl_mmu(env, addr, val, oi, retaddr); 291859213461SRichard Henderson plugin_store_cb(env, addr, oi); 2919b9e60257SRichard Henderson } 2920b9e60257SRichard Henderson 2921f83bcecbSRichard Henderson void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2922f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2923b9e60257SRichard Henderson { 292459213461SRichard Henderson helper_le_stq_mmu(env, addr, val, oi, retaddr); 292559213461SRichard Henderson plugin_store_cb(env, addr, oi); 2926d03f1408SRichard Henderson } 2927d03f1408SRichard Henderson 2928cb48f365SRichard Henderson void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val, 2929cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 2930cb48f365SRichard Henderson { 2931cb48f365SRichard Henderson MemOp mop = get_memop(oi); 2932cb48f365SRichard Henderson int mmu_idx = get_mmuidx(oi); 2933cb48f365SRichard Henderson MemOpIdx new_oi; 2934cb48f365SRichard Henderson unsigned a_bits; 2935cb48f365SRichard Henderson 2936cb48f365SRichard Henderson tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); 2937cb48f365SRichard Henderson a_bits = get_alignment_bits(mop); 2938cb48f365SRichard Henderson 2939cb48f365SRichard Henderson /* Handle CPU specific unaligned behaviour */ 2940cb48f365SRichard Henderson if (addr & ((1 << a_bits) - 1)) { 2941cb48f365SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2942cb48f365SRichard Henderson mmu_idx, ra); 2943cb48f365SRichard Henderson } 2944cb48f365SRichard Henderson 2945cb48f365SRichard Henderson /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2946cb48f365SRichard Henderson mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2947cb48f365SRichard Henderson new_oi = make_memop_idx(mop, mmu_idx); 2948cb48f365SRichard Henderson 2949cb48f365SRichard Henderson helper_be_stq_mmu(env, addr, int128_gethi(val), new_oi, ra); 2950cb48f365SRichard Henderson helper_be_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra); 2951cb48f365SRichard Henderson 2952cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 2953cb48f365SRichard Henderson } 2954cb48f365SRichard Henderson 2955cb48f365SRichard Henderson void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val, 2956cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 2957cb48f365SRichard Henderson { 2958cb48f365SRichard Henderson MemOp mop = get_memop(oi); 2959cb48f365SRichard Henderson int mmu_idx = get_mmuidx(oi); 2960cb48f365SRichard Henderson MemOpIdx new_oi; 2961cb48f365SRichard Henderson unsigned a_bits; 2962cb48f365SRichard Henderson 2963cb48f365SRichard Henderson tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); 2964cb48f365SRichard Henderson a_bits = get_alignment_bits(mop); 2965cb48f365SRichard Henderson 2966cb48f365SRichard Henderson /* Handle CPU specific unaligned behaviour */ 2967cb48f365SRichard Henderson if (addr & ((1 << a_bits) - 1)) { 2968cb48f365SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2969cb48f365SRichard Henderson mmu_idx, ra); 2970cb48f365SRichard Henderson } 2971cb48f365SRichard Henderson 2972cb48f365SRichard Henderson /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2973cb48f365SRichard Henderson mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2974cb48f365SRichard Henderson new_oi = make_memop_idx(mop, mmu_idx); 2975cb48f365SRichard Henderson 2976cb48f365SRichard Henderson helper_le_stq_mmu(env, addr, int128_getlo(val), new_oi, ra); 2977cb48f365SRichard Henderson helper_le_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra); 2978cb48f365SRichard Henderson 2979cb48f365SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 2980cb48f365SRichard Henderson } 2981cb48f365SRichard Henderson 2982f83bcecbSRichard Henderson #include "ldst_common.c.inc" 2983cfe04a4bSRichard Henderson 2984be9568b4SRichard Henderson /* 2985be9568b4SRichard Henderson * First set of functions passes in OI and RETADDR. 2986be9568b4SRichard Henderson * This makes them callable from other helpers. 2987be9568b4SRichard Henderson */ 2988d9bb58e5SYang Zhong 2989d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 2990be9568b4SRichard Henderson glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 2991a754f7f3SRichard Henderson 2992707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 2993d9bb58e5SYang Zhong 2994139c1837SPaolo Bonzini #include "atomic_common.c.inc" 2995d9bb58e5SYang Zhong 2996d9bb58e5SYang Zhong #define DATA_SIZE 1 2997d9bb58e5SYang Zhong #include "atomic_template.h" 2998d9bb58e5SYang Zhong 2999d9bb58e5SYang Zhong #define DATA_SIZE 2 3000d9bb58e5SYang Zhong #include "atomic_template.h" 3001d9bb58e5SYang Zhong 3002d9bb58e5SYang Zhong #define DATA_SIZE 4 3003d9bb58e5SYang Zhong #include "atomic_template.h" 3004d9bb58e5SYang Zhong 3005d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 3006d9bb58e5SYang Zhong #define DATA_SIZE 8 3007d9bb58e5SYang Zhong #include "atomic_template.h" 3008d9bb58e5SYang Zhong #endif 3009d9bb58e5SYang Zhong 3010e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 3011d9bb58e5SYang Zhong #define DATA_SIZE 16 3012d9bb58e5SYang Zhong #include "atomic_template.h" 3013d9bb58e5SYang Zhong #endif 3014d9bb58e5SYang Zhong 3015d9bb58e5SYang Zhong /* Code access functions. */ 3016d9bb58e5SYang Zhong 3017fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 3018eed56642SAlex Bennée { 30199002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 30208cfdacaaSRichard Henderson return do_ld1_mmu(env, addr, oi, 0, MMU_INST_FETCH); 30214cef72d0SAlex Bennée } 30224cef72d0SAlex Bennée 3023fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 30242dd92606SRichard Henderson { 30259002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 30268cfdacaaSRichard Henderson return do_ld2_mmu(env, addr, oi, 0, MMU_INST_FETCH); 30272dd92606SRichard Henderson } 30282dd92606SRichard Henderson 3029fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 30304cef72d0SAlex Bennée { 30319002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 30328cfdacaaSRichard Henderson return do_ld4_mmu(env, addr, oi, 0, MMU_INST_FETCH); 3033eed56642SAlex Bennée } 3034d9bb58e5SYang Zhong 3035fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 3036eed56642SAlex Bennée { 3037fc313c64SFrédéric Pétrot MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); 30388cfdacaaSRichard Henderson return do_ld8_mmu(env, addr, oi, 0, MMU_INST_FETCH); 3039eed56642SAlex Bennée } 304028990626SRichard Henderson 304128990626SRichard Henderson uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, 304228990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 304328990626SRichard Henderson { 30448cfdacaaSRichard Henderson return do_ld1_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 304528990626SRichard Henderson } 304628990626SRichard Henderson 304728990626SRichard Henderson uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, 304828990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 304928990626SRichard Henderson { 30508cfdacaaSRichard Henderson return do_ld2_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 305128990626SRichard Henderson } 305228990626SRichard Henderson 305328990626SRichard Henderson uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, 305428990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 305528990626SRichard Henderson { 30568cfdacaaSRichard Henderson return do_ld4_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 305728990626SRichard Henderson } 305828990626SRichard Henderson 305928990626SRichard Henderson uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, 306028990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 306128990626SRichard Henderson { 30628cfdacaaSRichard Henderson return do_ld8_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 306328990626SRichard Henderson } 3064