1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 22d9bb58e5SYang Zhong #include "cpu.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/address-spaces.h" 26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 27d9bb58e5SYang Zhong #include "exec/cputlb.h" 28d9bb58e5SYang Zhong #include "exec/memory-internal.h" 29d9bb58e5SYang Zhong #include "exec/ram_addr.h" 30d9bb58e5SYang Zhong #include "tcg/tcg.h" 31d9bb58e5SYang Zhong #include "qemu/error-report.h" 32d9bb58e5SYang Zhong #include "exec/log.h" 33d9bb58e5SYang Zhong #include "exec/helper-proto.h" 34d9bb58e5SYang Zhong #include "qemu/atomic.h" 35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 36707526adSRichard Henderson #include "translate-all.h" 37d03f1408SRichard Henderson #include "trace-root.h" 38d03f1408SRichard Henderson #include "trace/mem.h" 39235537faSAlex Bennée #ifdef CONFIG_PLUGIN 40235537faSAlex Bennée #include "qemu/plugin-memory.h" 41235537faSAlex Bennée #endif 42d9bb58e5SYang Zhong 43d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 44d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 45d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 46d9bb58e5SYang Zhong 47d9bb58e5SYang Zhong #ifdef DEBUG_TLB 48d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 49d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 50d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 51d9bb58e5SYang Zhong # else 52d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 53d9bb58e5SYang Zhong # endif 54d9bb58e5SYang Zhong #else 55d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 56d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 57d9bb58e5SYang Zhong #endif 58d9bb58e5SYang Zhong 59d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 60d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 61d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 62d9bb58e5SYang Zhong ## __VA_ARGS__); \ 63d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 64d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 65d9bb58e5SYang Zhong } \ 66d9bb58e5SYang Zhong } while (0) 67d9bb58e5SYang Zhong 68ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 69d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 70ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 71d9bb58e5SYang Zhong } \ 72d9bb58e5SYang Zhong } while (0) 73d9bb58e5SYang Zhong 74d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 75d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 76d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 77d9bb58e5SYang Zhong 78d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 79d9bb58e5SYang Zhong */ 80d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 81d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 82d9bb58e5SYang Zhong 83722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 847a1efe1bSRichard Henderson { 85722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 867a1efe1bSRichard Henderson } 877a1efe1bSRichard Henderson 88722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 8986e1eff8SEmilio G. Cota { 90722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9186e1eff8SEmilio G. Cota } 9286e1eff8SEmilio G. Cota 9379e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9486e1eff8SEmilio G. Cota size_t max_entries) 9586e1eff8SEmilio G. Cota { 9679e42085SRichard Henderson desc->window_begin_ns = ns; 9779e42085SRichard Henderson desc->window_max_entries = max_entries; 9886e1eff8SEmilio G. Cota } 9986e1eff8SEmilio G. Cota 10086e1eff8SEmilio G. Cota /** 10186e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 10271ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 10371ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 10486e1eff8SEmilio G. Cota * 10586e1eff8SEmilio G. Cota * Called with tlb_lock_held. 10686e1eff8SEmilio G. Cota * 10786e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 10886e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 10986e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 11086e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 11186e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 11286e1eff8SEmilio G. Cota * the resize based on past observations. 11386e1eff8SEmilio G. Cota * 11486e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 11586e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 11686e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 11786e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 11886e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 11986e1eff8SEmilio G. Cota * performance. 12086e1eff8SEmilio G. Cota * 12186e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 12286e1eff8SEmilio G. Cota * 12386e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 12486e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 12586e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 12686e1eff8SEmilio G. Cota * probably be similar. 12786e1eff8SEmilio G. Cota * 12886e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 12986e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 13086e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 13186e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 13286e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 13386e1eff8SEmilio G. Cota * 13486e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 13586e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 13686e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 13786e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 13886e1eff8SEmilio G. Cota * conflict misses. 13986e1eff8SEmilio G. Cota */ 1403c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1413c3959f2SRichard Henderson int64_t now) 14286e1eff8SEmilio G. Cota { 14371ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 14486e1eff8SEmilio G. Cota size_t rate; 14586e1eff8SEmilio G. Cota size_t new_size = old_size; 14686e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 14786e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 14879e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 14986e1eff8SEmilio G. Cota 15079e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 15179e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 15286e1eff8SEmilio G. Cota } 15379e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 15486e1eff8SEmilio G. Cota 15586e1eff8SEmilio G. Cota if (rate > 70) { 15686e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 15786e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 15879e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 15979e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 16086e1eff8SEmilio G. Cota 16186e1eff8SEmilio G. Cota /* 16286e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 16386e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 16486e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 16586e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 16686e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 16786e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 16886e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 16986e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 17086e1eff8SEmilio G. Cota */ 17186e1eff8SEmilio G. Cota if (expected_rate > 70) { 17286e1eff8SEmilio G. Cota ceil *= 2; 17386e1eff8SEmilio G. Cota } 17486e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 17586e1eff8SEmilio G. Cota } 17686e1eff8SEmilio G. Cota 17786e1eff8SEmilio G. Cota if (new_size == old_size) { 17886e1eff8SEmilio G. Cota if (window_expired) { 17979e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 18086e1eff8SEmilio G. Cota } 18186e1eff8SEmilio G. Cota return; 18286e1eff8SEmilio G. Cota } 18386e1eff8SEmilio G. Cota 18471ccd47bSRichard Henderson g_free(fast->table); 18571ccd47bSRichard Henderson g_free(desc->iotlb); 18686e1eff8SEmilio G. Cota 18779e42085SRichard Henderson tlb_window_reset(desc, now, 0); 18886e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 18971ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 19071ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 19171ccd47bSRichard Henderson desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); 19271ccd47bSRichard Henderson 19386e1eff8SEmilio G. Cota /* 19486e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 19586e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 19686e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 19786e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 19886e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 19986e1eff8SEmilio G. Cota */ 20071ccd47bSRichard Henderson while (fast->table == NULL || desc->iotlb == NULL) { 20186e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 20286e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 20386e1eff8SEmilio G. Cota abort(); 20486e1eff8SEmilio G. Cota } 20586e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 20671ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 20786e1eff8SEmilio G. Cota 20871ccd47bSRichard Henderson g_free(fast->table); 20971ccd47bSRichard Henderson g_free(desc->iotlb); 21071ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 21171ccd47bSRichard Henderson desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); 21286e1eff8SEmilio G. Cota } 21386e1eff8SEmilio G. Cota } 21486e1eff8SEmilio G. Cota 215bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 21686e1eff8SEmilio G. Cota { 2175c948e31SRichard Henderson desc->n_used_entries = 0; 2185c948e31SRichard Henderson desc->large_page_addr = -1; 2195c948e31SRichard Henderson desc->large_page_mask = -1; 2205c948e31SRichard Henderson desc->vindex = 0; 2215c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2225c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 22386e1eff8SEmilio G. Cota } 22486e1eff8SEmilio G. Cota 2253c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, 2263c3959f2SRichard Henderson int64_t now) 227bbf021b0SRichard Henderson { 228bbf021b0SRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 229bbf021b0SRichard Henderson CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; 230bbf021b0SRichard Henderson 2313c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 232bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 233bbf021b0SRichard Henderson } 234bbf021b0SRichard Henderson 23556e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 23656e89f76SRichard Henderson { 23756e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 23856e89f76SRichard Henderson 23956e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 24056e89f76SRichard Henderson desc->n_used_entries = 0; 24156e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 24256e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 24356e89f76SRichard Henderson desc->iotlb = g_new(CPUIOTLBEntry, n_entries); 2443c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 24556e89f76SRichard Henderson } 24656e89f76SRichard Henderson 24786e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 24886e1eff8SEmilio G. Cota { 249a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 25086e1eff8SEmilio G. Cota } 25186e1eff8SEmilio G. Cota 25286e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 25386e1eff8SEmilio G. Cota { 254a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 25586e1eff8SEmilio G. Cota } 25686e1eff8SEmilio G. Cota 2575005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2585005e253SEmilio G. Cota { 25971aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 26056e89f76SRichard Henderson int64_t now = get_clock_realtime(); 26156e89f76SRichard Henderson int i; 26271aec354SEmilio G. Cota 263a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2643d1523ceSRichard Henderson 2653c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 2663c16304aSRichard Henderson env_tlb(env)->c.dirty = 0; 26786e1eff8SEmilio G. Cota 26856e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 26956e89f76SRichard Henderson tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); 27056e89f76SRichard Henderson } 2715005e253SEmilio G. Cota } 2725005e253SEmilio G. Cota 273d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 274d9bb58e5SYang Zhong * 275d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 276d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 277d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 278d9bb58e5SYang Zhong * again. 279d9bb58e5SYang Zhong */ 280d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 281d9bb58e5SYang Zhong run_on_cpu_data d) 282d9bb58e5SYang Zhong { 283d9bb58e5SYang Zhong CPUState *cpu; 284d9bb58e5SYang Zhong 285d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 286d9bb58e5SYang Zhong if (cpu != src) { 287d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 288d9bb58e5SYang Zhong } 289d9bb58e5SYang Zhong } 290d9bb58e5SYang Zhong } 291d9bb58e5SYang Zhong 292e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 29383974cf4SEmilio G. Cota { 29483974cf4SEmilio G. Cota CPUState *cpu; 295e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 29683974cf4SEmilio G. Cota 29783974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 29883974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 29983974cf4SEmilio G. Cota 300a40ec84eSRichard Henderson full += atomic_read(&env_tlb(env)->c.full_flush_count); 301a40ec84eSRichard Henderson part += atomic_read(&env_tlb(env)->c.part_flush_count); 302a40ec84eSRichard Henderson elide += atomic_read(&env_tlb(env)->c.elide_flush_count); 30383974cf4SEmilio G. Cota } 304e09de0a2SRichard Henderson *pfull = full; 305e09de0a2SRichard Henderson *ppart = part; 306e09de0a2SRichard Henderson *pelide = elide; 30783974cf4SEmilio G. Cota } 308d9bb58e5SYang Zhong 309d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 310d9bb58e5SYang Zhong { 311d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3123d1523ceSRichard Henderson uint16_t asked = data.host_int; 3133d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3143c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 315d9bb58e5SYang Zhong 316d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 317d9bb58e5SYang Zhong 3183d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 319d9bb58e5SYang Zhong 320a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 32160a2ad7dSRichard Henderson 322a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3233d1523ceSRichard Henderson to_clean = asked & all_dirty; 3243d1523ceSRichard Henderson all_dirty &= ~to_clean; 325a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3263d1523ceSRichard Henderson 3273d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3283d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3293c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx, now); 330d9bb58e5SYang Zhong } 3313d1523ceSRichard Henderson 332a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 333d9bb58e5SYang Zhong 334f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 33564f2674bSRichard Henderson 3363d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 337a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.full_flush_count, 338a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 339e09de0a2SRichard Henderson } else { 340a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.part_flush_count, 341a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3423d1523ceSRichard Henderson if (to_clean != asked) { 343a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.elide_flush_count, 344a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3453d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3463d1523ceSRichard Henderson } 34764f2674bSRichard Henderson } 348d9bb58e5SYang Zhong } 349d9bb58e5SYang Zhong 350d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 351d9bb58e5SYang Zhong { 352d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 353d9bb58e5SYang Zhong 35464f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 355d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 356ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 357d9bb58e5SYang Zhong } else { 35860a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 359d9bb58e5SYang Zhong } 360d9bb58e5SYang Zhong } 361d9bb58e5SYang Zhong 36264f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 36364f2674bSRichard Henderson { 36464f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 36564f2674bSRichard Henderson } 36664f2674bSRichard Henderson 367d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 368d9bb58e5SYang Zhong { 369d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 370d9bb58e5SYang Zhong 371d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 372d9bb58e5SYang Zhong 373d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 374d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 375d9bb58e5SYang Zhong } 376d9bb58e5SYang Zhong 37764f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 37864f2674bSRichard Henderson { 37964f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 38064f2674bSRichard Henderson } 38164f2674bSRichard Henderson 38264f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 383d9bb58e5SYang Zhong { 384d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 385d9bb58e5SYang Zhong 386d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 387d9bb58e5SYang Zhong 388d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 389d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 390d9bb58e5SYang Zhong } 391d9bb58e5SYang Zhong 39264f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 39364f2674bSRichard Henderson { 39464f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 39564f2674bSRichard Henderson } 39664f2674bSRichard Henderson 39768fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 39868fea038SRichard Henderson target_ulong page) 399d9bb58e5SYang Zhong { 40068fea038SRichard Henderson return tlb_hit_page(tlb_entry->addr_read, page) || 401403f290cSEmilio G. Cota tlb_hit_page(tlb_addr_write(tlb_entry), page) || 40268fea038SRichard Henderson tlb_hit_page(tlb_entry->addr_code, page); 40368fea038SRichard Henderson } 40468fea038SRichard Henderson 4053cea94bbSEmilio G. Cota /** 4063cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4073cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4083cea94bbSEmilio G. Cota */ 4093cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4103cea94bbSEmilio G. Cota { 4113cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4123cea94bbSEmilio G. Cota } 4133cea94bbSEmilio G. Cota 41453d28455SRichard Henderson /* Called with tlb_c.lock held */ 41586e1eff8SEmilio G. Cota static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 41671aec354SEmilio G. Cota target_ulong page) 41768fea038SRichard Henderson { 41868fea038SRichard Henderson if (tlb_hit_page_anyprot(tlb_entry, page)) { 419d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 42086e1eff8SEmilio G. Cota return true; 421d9bb58e5SYang Zhong } 42286e1eff8SEmilio G. Cota return false; 423d9bb58e5SYang Zhong } 424d9bb58e5SYang Zhong 42553d28455SRichard Henderson /* Called with tlb_c.lock held */ 42671aec354SEmilio G. Cota static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 42768fea038SRichard Henderson target_ulong page) 42868fea038SRichard Henderson { 429a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 43068fea038SRichard Henderson int k; 43171aec354SEmilio G. Cota 43229a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 43368fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 434a40ec84eSRichard Henderson if (tlb_flush_entry_locked(&d->vtable[k], page)) { 43586e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 43686e1eff8SEmilio G. Cota } 43768fea038SRichard Henderson } 43868fea038SRichard Henderson } 43968fea038SRichard Henderson 4401308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4411308e026SRichard Henderson target_ulong page) 4421308e026SRichard Henderson { 443a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 444a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 4451308e026SRichard Henderson 4461308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 4471308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 4481308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 4491308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 4501308e026SRichard Henderson midx, lp_addr, lp_mask); 4513c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 4521308e026SRichard Henderson } else { 45386e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 45486e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 45586e1eff8SEmilio G. Cota } 4561308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 4571308e026SRichard Henderson } 4581308e026SRichard Henderson } 4591308e026SRichard Henderson 4607b7d00e0SRichard Henderson /** 4617b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 4627b7d00e0SRichard Henderson * @cpu: cpu on which to flush 4637b7d00e0SRichard Henderson * @addr: page of virtual address to flush 4647b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 4657b7d00e0SRichard Henderson * 4667b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 4677b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 468d9bb58e5SYang Zhong */ 4697b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 4707b7d00e0SRichard Henderson target_ulong addr, 4717b7d00e0SRichard Henderson uint16_t idxmap) 472d9bb58e5SYang Zhong { 473d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 474d9bb58e5SYang Zhong int mmu_idx; 475d9bb58e5SYang Zhong 476d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 477d9bb58e5SYang Zhong 4787b7d00e0SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); 479d9bb58e5SYang Zhong 480a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 481d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 4827b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 4831308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 484d9bb58e5SYang Zhong } 485d9bb58e5SYang Zhong } 486a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 487d9bb58e5SYang Zhong 488d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 489d9bb58e5SYang Zhong } 490d9bb58e5SYang Zhong 4917b7d00e0SRichard Henderson /** 4927b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 4937b7d00e0SRichard Henderson * @cpu: cpu on which to flush 4947b7d00e0SRichard Henderson * @data: encoded addr + idxmap 4957b7d00e0SRichard Henderson * 4967b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 4977b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 4987b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 4997b7d00e0SRichard Henderson * that can be passed via this method. 5007b7d00e0SRichard Henderson */ 5017b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5027b7d00e0SRichard Henderson run_on_cpu_data data) 5037b7d00e0SRichard Henderson { 5047b7d00e0SRichard Henderson target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; 5057b7d00e0SRichard Henderson target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; 5067b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5077b7d00e0SRichard Henderson 5087b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5097b7d00e0SRichard Henderson } 5107b7d00e0SRichard Henderson 5117b7d00e0SRichard Henderson typedef struct { 5127b7d00e0SRichard Henderson target_ulong addr; 5137b7d00e0SRichard Henderson uint16_t idxmap; 5147b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5157b7d00e0SRichard Henderson 5167b7d00e0SRichard Henderson /** 5177b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5187b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5197b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5207b7d00e0SRichard Henderson * 5217b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5227b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5237b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5247b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5257b7d00e0SRichard Henderson */ 5267b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5277b7d00e0SRichard Henderson run_on_cpu_data data) 5287b7d00e0SRichard Henderson { 5297b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5307b7d00e0SRichard Henderson 5317b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5327b7d00e0SRichard Henderson g_free(d); 5337b7d00e0SRichard Henderson } 5347b7d00e0SRichard Henderson 535d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 536d9bb58e5SYang Zhong { 537d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 538d9bb58e5SYang Zhong 539d9bb58e5SYang Zhong /* This should already be page aligned */ 5407b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 541d9bb58e5SYang Zhong 5427b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 5437b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5447b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 5457b7d00e0SRichard Henderson /* 5467b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 5477b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 5487b7d00e0SRichard Henderson * allocating memory for this operation. 5497b7d00e0SRichard Henderson */ 5507b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 5517b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 552d9bb58e5SYang Zhong } else { 5537b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 5547b7d00e0SRichard Henderson 5557b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 5567b7d00e0SRichard Henderson d->addr = addr; 5577b7d00e0SRichard Henderson d->idxmap = idxmap; 5587b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 5597b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 560d9bb58e5SYang Zhong } 561d9bb58e5SYang Zhong } 562d9bb58e5SYang Zhong 563f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 564f8144c6cSRichard Henderson { 565f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 566f8144c6cSRichard Henderson } 567f8144c6cSRichard Henderson 568d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 569d9bb58e5SYang Zhong uint16_t idxmap) 570d9bb58e5SYang Zhong { 571d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 572d9bb58e5SYang Zhong 573d9bb58e5SYang Zhong /* This should already be page aligned */ 5747b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 575d9bb58e5SYang Zhong 5767b7d00e0SRichard Henderson /* 5777b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 5787b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 5797b7d00e0SRichard Henderson */ 5807b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 5817b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 5827b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 5837b7d00e0SRichard Henderson } else { 5847b7d00e0SRichard Henderson CPUState *dst_cpu; 5857b7d00e0SRichard Henderson 5867b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 5877b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 5887b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 5897b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 5907b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 5917b7d00e0SRichard Henderson 5927b7d00e0SRichard Henderson d->addr = addr; 5937b7d00e0SRichard Henderson d->idxmap = idxmap; 5947b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 5957b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 5967b7d00e0SRichard Henderson } 5977b7d00e0SRichard Henderson } 5987b7d00e0SRichard Henderson } 5997b7d00e0SRichard Henderson 6007b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 601d9bb58e5SYang Zhong } 602d9bb58e5SYang Zhong 603f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 604f8144c6cSRichard Henderson { 605f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 606f8144c6cSRichard Henderson } 607f8144c6cSRichard Henderson 608d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 609d9bb58e5SYang Zhong target_ulong addr, 610d9bb58e5SYang Zhong uint16_t idxmap) 611d9bb58e5SYang Zhong { 612d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 613d9bb58e5SYang Zhong 614d9bb58e5SYang Zhong /* This should already be page aligned */ 6157b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 616d9bb58e5SYang Zhong 6177b7d00e0SRichard Henderson /* 6187b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6197b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6207b7d00e0SRichard Henderson */ 6217b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6227b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6237b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6247b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6257b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6267b7d00e0SRichard Henderson } else { 6277b7d00e0SRichard Henderson CPUState *dst_cpu; 6287b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6297b7d00e0SRichard Henderson 6307b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6317b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6327b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6337b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6347b7d00e0SRichard Henderson d->addr = addr; 6357b7d00e0SRichard Henderson d->idxmap = idxmap; 6367b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6377b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6387b7d00e0SRichard Henderson } 6397b7d00e0SRichard Henderson } 6407b7d00e0SRichard Henderson 6417b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6427b7d00e0SRichard Henderson d->addr = addr; 6437b7d00e0SRichard Henderson d->idxmap = idxmap; 6447b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 6457b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6467b7d00e0SRichard Henderson } 647d9bb58e5SYang Zhong } 648d9bb58e5SYang Zhong 649f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 650d9bb58e5SYang Zhong { 651f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 652d9bb58e5SYang Zhong } 653d9bb58e5SYang Zhong 654d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 655d9bb58e5SYang Zhong can be detected */ 656d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 657d9bb58e5SYang Zhong { 658d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 659d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 660d9bb58e5SYang Zhong } 661d9bb58e5SYang Zhong 662d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 663d9bb58e5SYang Zhong tested for self modifying code */ 664d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 665d9bb58e5SYang Zhong { 666d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 667d9bb58e5SYang Zhong } 668d9bb58e5SYang Zhong 669d9bb58e5SYang Zhong 670d9bb58e5SYang Zhong /* 671d9bb58e5SYang Zhong * Dirty write flag handling 672d9bb58e5SYang Zhong * 673d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 674d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 675d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 676d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 677d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 678d9bb58e5SYang Zhong * generated code. 679d9bb58e5SYang Zhong * 68071aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 68171aec354SEmilio G. Cota * te->addr_write with atomic_set. We don't need to worry about this for 68271aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 683d9bb58e5SYang Zhong * 68453d28455SRichard Henderson * Called with tlb_c.lock held. 685d9bb58e5SYang Zhong */ 68671aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 68771aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 688d9bb58e5SYang Zhong { 689d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 690d9bb58e5SYang Zhong 6917b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 6927b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 693d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 694d9bb58e5SYang Zhong addr += tlb_entry->addend; 695d9bb58e5SYang Zhong if ((addr - start) < length) { 696d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 69771aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 698d9bb58e5SYang Zhong #else 69971aec354SEmilio G. Cota atomic_set(&tlb_entry->addr_write, 70071aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 701d9bb58e5SYang Zhong #endif 702d9bb58e5SYang Zhong } 70371aec354SEmilio G. Cota } 70471aec354SEmilio G. Cota } 70571aec354SEmilio G. Cota 70671aec354SEmilio G. Cota /* 70753d28455SRichard Henderson * Called with tlb_c.lock held. 70871aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 70971aec354SEmilio G. Cota */ 71071aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 71171aec354SEmilio G. Cota { 71271aec354SEmilio G. Cota *d = *s; 71371aec354SEmilio G. Cota } 714d9bb58e5SYang Zhong 715d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 71671aec354SEmilio G. Cota * the target vCPU). 71753d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 71871aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 719d9bb58e5SYang Zhong */ 720d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 721d9bb58e5SYang Zhong { 722d9bb58e5SYang Zhong CPUArchState *env; 723d9bb58e5SYang Zhong 724d9bb58e5SYang Zhong int mmu_idx; 725d9bb58e5SYang Zhong 726d9bb58e5SYang Zhong env = cpu->env_ptr; 727a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 728d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 729d9bb58e5SYang Zhong unsigned int i; 730722a1c1eSRichard Henderson unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 731d9bb58e5SYang Zhong 73286e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 733a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 734a40ec84eSRichard Henderson start1, length); 735d9bb58e5SYang Zhong } 736d9bb58e5SYang Zhong 737d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 738a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 739a40ec84eSRichard Henderson start1, length); 740d9bb58e5SYang Zhong } 741d9bb58e5SYang Zhong } 742a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 743d9bb58e5SYang Zhong } 744d9bb58e5SYang Zhong 74553d28455SRichard Henderson /* Called with tlb_c.lock held */ 74671aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 74771aec354SEmilio G. Cota target_ulong vaddr) 748d9bb58e5SYang Zhong { 749d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 750d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 751d9bb58e5SYang Zhong } 752d9bb58e5SYang Zhong } 753d9bb58e5SYang Zhong 754d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 755d9bb58e5SYang Zhong so that it is no longer dirty */ 756d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 757d9bb58e5SYang Zhong { 758d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 759d9bb58e5SYang Zhong int mmu_idx; 760d9bb58e5SYang Zhong 761d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 762d9bb58e5SYang Zhong 763d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 764a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 765d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 766383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 767d9bb58e5SYang Zhong } 768d9bb58e5SYang Zhong 769d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 770d9bb58e5SYang Zhong int k; 771d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 772a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 773d9bb58e5SYang Zhong } 774d9bb58e5SYang Zhong } 775a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 776d9bb58e5SYang Zhong } 777d9bb58e5SYang Zhong 778d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 779d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 7801308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 7811308e026SRichard Henderson target_ulong vaddr, target_ulong size) 782d9bb58e5SYang Zhong { 783a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 7841308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 785d9bb58e5SYang Zhong 7861308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 7871308e026SRichard Henderson /* No previous large page. */ 7881308e026SRichard Henderson lp_addr = vaddr; 7891308e026SRichard Henderson } else { 790d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 7911308e026SRichard Henderson This is a compromise between unnecessary flushes and 7921308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 793a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 7941308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 7951308e026SRichard Henderson lp_mask <<= 1; 796d9bb58e5SYang Zhong } 7971308e026SRichard Henderson } 798a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 799a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 800d9bb58e5SYang Zhong } 801d9bb58e5SYang Zhong 802d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 803d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 804d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 805d9bb58e5SYang Zhong * 806d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 807d9bb58e5SYang Zhong * critical section. 808d9bb58e5SYang Zhong */ 809d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 810d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 811d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 812d9bb58e5SYang Zhong { 813d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 814a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 815a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 816d9bb58e5SYang Zhong MemoryRegionSection *section; 817d9bb58e5SYang Zhong unsigned int index; 818d9bb58e5SYang Zhong target_ulong address; 8198f5db641SRichard Henderson target_ulong write_address; 820d9bb58e5SYang Zhong uintptr_t addend; 82168fea038SRichard Henderson CPUTLBEntry *te, tn; 82255df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 82355df6fcfSPeter Maydell target_ulong vaddr_page; 824d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 82550b107c5SRichard Henderson int wp_flags; 8268f5db641SRichard Henderson bool is_ram, is_romd; 827d9bb58e5SYang Zhong 828d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 82955df6fcfSPeter Maydell 8301308e026SRichard Henderson if (size <= TARGET_PAGE_SIZE) { 83155df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 83255df6fcfSPeter Maydell } else { 8331308e026SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, size); 834d9bb58e5SYang Zhong sz = size; 83555df6fcfSPeter Maydell } 83655df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 83755df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 83855df6fcfSPeter Maydell 83955df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 84055df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 841d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 842d9bb58e5SYang Zhong 843d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 844d9bb58e5SYang Zhong " prot=%x idx=%d\n", 845d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 846d9bb58e5SYang Zhong 84755df6fcfSPeter Maydell address = vaddr_page; 84855df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 84930d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 85030d7e098SRichard Henderson address |= TLB_INVALID_MASK; 85155df6fcfSPeter Maydell } 852a26fc6f5STony Nguyen if (attrs.byte_swap) { 8535b87b3e6SRichard Henderson address |= TLB_BSWAP; 854a26fc6f5STony Nguyen } 8558f5db641SRichard Henderson 8568f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 8578f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 8588f5db641SRichard Henderson 8598f5db641SRichard Henderson if (is_ram || is_romd) { 8608f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 861d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 8628f5db641SRichard Henderson } else { 8638f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 8648f5db641SRichard Henderson addend = 0; 865d9bb58e5SYang Zhong } 866d9bb58e5SYang Zhong 8678f5db641SRichard Henderson write_address = address; 8688f5db641SRichard Henderson if (is_ram) { 8698f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 8708f5db641SRichard Henderson /* 8718f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 8728f5db641SRichard Henderson * the page is actually writable. 8738f5db641SRichard Henderson */ 8748f5db641SRichard Henderson if (prot & PAGE_WRITE) { 8758f5db641SRichard Henderson if (section->readonly) { 8768f5db641SRichard Henderson write_address |= TLB_DISCARD_WRITE; 8778f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 8788f5db641SRichard Henderson write_address |= TLB_NOTDIRTY; 8798f5db641SRichard Henderson } 8808f5db641SRichard Henderson } 8818f5db641SRichard Henderson } else { 8828f5db641SRichard Henderson /* I/O or ROMD */ 8838f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 8848f5db641SRichard Henderson /* 8858f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 8868f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 8878f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 8888f5db641SRichard Henderson */ 8898f5db641SRichard Henderson write_address |= TLB_MMIO; 8908f5db641SRichard Henderson if (!is_romd) { 8918f5db641SRichard Henderson address = write_address; 8928f5db641SRichard Henderson } 8938f5db641SRichard Henderson } 8948f5db641SRichard Henderson 89550b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 89650b107c5SRichard Henderson TARGET_PAGE_SIZE); 897d9bb58e5SYang Zhong 898383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 899383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 900d9bb58e5SYang Zhong 90168fea038SRichard Henderson /* 90271aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 90371aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 90471aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 90571aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 90671aec354SEmilio G. Cota * is unlikely to be contended. 90771aec354SEmilio G. Cota */ 908a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 90971aec354SEmilio G. Cota 9103d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 911a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 9123d1523ceSRichard Henderson 91371aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 91471aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 91571aec354SEmilio G. Cota 91671aec354SEmilio G. Cota /* 91768fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 91868fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 91968fea038SRichard Henderson */ 9203cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 921a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 922a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 92368fea038SRichard Henderson 92468fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 92571aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 926a40ec84eSRichard Henderson desc->viotlb[vidx] = desc->iotlb[index]; 92786e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 92868fea038SRichard Henderson } 929d9bb58e5SYang Zhong 930d9bb58e5SYang Zhong /* refill the tlb */ 931ace41090SPeter Maydell /* 932ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 933ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 9348f5db641SRichard Henderson * + the ram_addr_t of the page base of the target RAM (RAM) 9358f5db641SRichard Henderson * + the offset within section->mr of the page base (I/O, ROMD) 93655df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 937ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 938ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 939ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 940ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 941ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 942ace41090SPeter Maydell */ 943a40ec84eSRichard Henderson desc->iotlb[index].addr = iotlb - vaddr_page; 944a40ec84eSRichard Henderson desc->iotlb[index].attrs = attrs; 945d9bb58e5SYang Zhong 946d9bb58e5SYang Zhong /* Now calculate the new entry */ 94755df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 948d9bb58e5SYang Zhong if (prot & PAGE_READ) { 949d9bb58e5SYang Zhong tn.addr_read = address; 95050b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 95150b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 95250b107c5SRichard Henderson } 953d9bb58e5SYang Zhong } else { 954d9bb58e5SYang Zhong tn.addr_read = -1; 955d9bb58e5SYang Zhong } 956d9bb58e5SYang Zhong 957d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 9588f5db641SRichard Henderson tn.addr_code = address; 959d9bb58e5SYang Zhong } else { 960d9bb58e5SYang Zhong tn.addr_code = -1; 961d9bb58e5SYang Zhong } 962d9bb58e5SYang Zhong 963d9bb58e5SYang Zhong tn.addr_write = -1; 964d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 9658f5db641SRichard Henderson tn.addr_write = write_address; 966f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 967f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 968f52bfb12SDavid Hildenbrand } 96950b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 97050b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 97150b107c5SRichard Henderson } 972d9bb58e5SYang Zhong } 973d9bb58e5SYang Zhong 97471aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 97586e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 976a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 977d9bb58e5SYang Zhong } 978d9bb58e5SYang Zhong 979d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 980d9bb58e5SYang Zhong * transaction attributes to be used. 981d9bb58e5SYang Zhong */ 982d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 983d9bb58e5SYang Zhong hwaddr paddr, int prot, 984d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 985d9bb58e5SYang Zhong { 986d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 987d9bb58e5SYang Zhong prot, mmu_idx, size); 988d9bb58e5SYang Zhong } 989d9bb58e5SYang Zhong 990d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 991d9bb58e5SYang Zhong { 992d9bb58e5SYang Zhong ram_addr_t ram_addr; 993d9bb58e5SYang Zhong 994d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 995d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 996d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 997d9bb58e5SYang Zhong abort(); 998d9bb58e5SYang Zhong } 999d9bb58e5SYang Zhong return ram_addr; 1000d9bb58e5SYang Zhong } 1001d9bb58e5SYang Zhong 1002c319dc13SRichard Henderson /* 1003c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1004c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1005c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1006c319dc13SRichard Henderson */ 1007c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 1008c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1009c319dc13SRichard Henderson { 1010c319dc13SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cpu); 1011c319dc13SRichard Henderson bool ok; 1012c319dc13SRichard Henderson 1013c319dc13SRichard Henderson /* 1014c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1015c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1016c319dc13SRichard Henderson */ 1017c319dc13SRichard Henderson ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr); 1018c319dc13SRichard Henderson assert(ok); 1019c319dc13SRichard Henderson } 1020c319dc13SRichard Henderson 1021d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1022f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 1023be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 1024d9bb58e5SYang Zhong { 102529a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 10262d54f194SPeter Maydell hwaddr mr_offset; 10272d54f194SPeter Maydell MemoryRegionSection *section; 10282d54f194SPeter Maydell MemoryRegion *mr; 1029d9bb58e5SYang Zhong uint64_t val; 1030d9bb58e5SYang Zhong bool locked = false; 103104e3aabdSPeter Maydell MemTxResult r; 1032d9bb58e5SYang Zhong 10332d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 10342d54f194SPeter Maydell mr = section->mr; 10352d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1036d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 103708565552SRichard Henderson if (!cpu->can_do_io) { 1038d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1039d9bb58e5SYang Zhong } 1040d9bb58e5SYang Zhong 10418b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 1042d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1043d9bb58e5SYang Zhong locked = true; 1044d9bb58e5SYang Zhong } 1045be5c4787STony Nguyen r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); 104604e3aabdSPeter Maydell if (r != MEMTX_OK) { 10472d54f194SPeter Maydell hwaddr physaddr = mr_offset + 10482d54f194SPeter Maydell section->offset_within_address_space - 10492d54f194SPeter Maydell section->offset_within_region; 10502d54f194SPeter Maydell 1051be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 105204e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 105304e3aabdSPeter Maydell } 1054d9bb58e5SYang Zhong if (locked) { 1055d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1056d9bb58e5SYang Zhong } 1057d9bb58e5SYang Zhong 1058d9bb58e5SYang Zhong return val; 1059d9bb58e5SYang Zhong } 1060d9bb58e5SYang Zhong 1061d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1062f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 1063be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 1064d9bb58e5SYang Zhong { 106529a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 10662d54f194SPeter Maydell hwaddr mr_offset; 10672d54f194SPeter Maydell MemoryRegionSection *section; 10682d54f194SPeter Maydell MemoryRegion *mr; 1069d9bb58e5SYang Zhong bool locked = false; 107004e3aabdSPeter Maydell MemTxResult r; 1071d9bb58e5SYang Zhong 10722d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 10732d54f194SPeter Maydell mr = section->mr; 10742d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 107508565552SRichard Henderson if (!cpu->can_do_io) { 1076d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1077d9bb58e5SYang Zhong } 1078d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1079d9bb58e5SYang Zhong 10808b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 1081d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1082d9bb58e5SYang Zhong locked = true; 1083d9bb58e5SYang Zhong } 1084be5c4787STony Nguyen r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); 108504e3aabdSPeter Maydell if (r != MEMTX_OK) { 10862d54f194SPeter Maydell hwaddr physaddr = mr_offset + 10872d54f194SPeter Maydell section->offset_within_address_space - 10882d54f194SPeter Maydell section->offset_within_region; 10892d54f194SPeter Maydell 1090be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 1091be5c4787STony Nguyen MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, 1092be5c4787STony Nguyen retaddr); 109304e3aabdSPeter Maydell } 1094d9bb58e5SYang Zhong if (locked) { 1095d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1096d9bb58e5SYang Zhong } 1097d9bb58e5SYang Zhong } 1098d9bb58e5SYang Zhong 10994811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) 11004811e909SRichard Henderson { 11014811e909SRichard Henderson #if TCG_OVERSIZED_GUEST 11024811e909SRichard Henderson return *(target_ulong *)((uintptr_t)entry + ofs); 11034811e909SRichard Henderson #else 11044811e909SRichard Henderson /* ofs might correspond to .addr_write, so use atomic_read */ 11054811e909SRichard Henderson return atomic_read((target_ulong *)((uintptr_t)entry + ofs)); 11064811e909SRichard Henderson #endif 11074811e909SRichard Henderson } 11084811e909SRichard Henderson 1109d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1110d9bb58e5SYang Zhong back to the main tlb. */ 1111d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 1112d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 1113d9bb58e5SYang Zhong { 1114d9bb58e5SYang Zhong size_t vidx; 111571aec354SEmilio G. Cota 111629a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1117d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1118a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 1119a40ec84eSRichard Henderson target_ulong cmp; 1120a40ec84eSRichard Henderson 1121a40ec84eSRichard Henderson /* elt_ofs might correspond to .addr_write, so use atomic_read */ 1122a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST 1123a40ec84eSRichard Henderson cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 1124a40ec84eSRichard Henderson #else 1125a40ec84eSRichard Henderson cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 1126a40ec84eSRichard Henderson #endif 1127d9bb58e5SYang Zhong 1128d9bb58e5SYang Zhong if (cmp == page) { 1129d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1130a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1131d9bb58e5SYang Zhong 1132a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 113371aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 113471aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 113571aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1136a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1137d9bb58e5SYang Zhong 1138a40ec84eSRichard Henderson CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1139a40ec84eSRichard Henderson CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; 1140d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 1141d9bb58e5SYang Zhong return true; 1142d9bb58e5SYang Zhong } 1143d9bb58e5SYang Zhong } 1144d9bb58e5SYang Zhong return false; 1145d9bb58e5SYang Zhong } 1146d9bb58e5SYang Zhong 1147d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 1148d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 1149d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 1150d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 1151d9bb58e5SYang Zhong 115230d7e098SRichard Henderson /* 115330d7e098SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 115430d7e098SRichard Henderson * 115530d7e098SRichard Henderson * Return -1 if we can't translate and execute from an entire page 115630d7e098SRichard Henderson * of RAM. This will force us to execute by loading and translating 115730d7e098SRichard Henderson * one insn at a time, without caching. 115830d7e098SRichard Henderson * 115930d7e098SRichard Henderson * NOTE: This function will trigger an exception if the page is 116030d7e098SRichard Henderson * not executable. 1161f2553f04SKONRAD Frederic */ 11624b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 11634b2190daSEmilio G. Cota void **hostp) 1164f2553f04SKONRAD Frederic { 1165383beda9SRichard Henderson uintptr_t mmu_idx = cpu_mmu_index(env, true); 1166383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1167383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1168f2553f04SKONRAD Frederic void *p; 1169f2553f04SKONRAD Frederic 1170383beda9SRichard Henderson if (unlikely(!tlb_hit(entry->addr_code, addr))) { 1171b493ccf1SPeter Maydell if (!VICTIM_TLB_HIT(addr_code, addr)) { 117229a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 11736d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 11746d967cb8SEmilio G. Cota entry = tlb_entry(env, mmu_idx, addr); 117530d7e098SRichard Henderson 117630d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { 117730d7e098SRichard Henderson /* 117830d7e098SRichard Henderson * The MMU protection covers a smaller range than a target 117930d7e098SRichard Henderson * page, so we must redo the MMU check for every insn. 118030d7e098SRichard Henderson */ 118130d7e098SRichard Henderson return -1; 118230d7e098SRichard Henderson } 118371b9a453SKONRAD Frederic } 1184383beda9SRichard Henderson assert(tlb_hit(entry->addr_code, addr)); 1185f2553f04SKONRAD Frederic } 118655df6fcfSPeter Maydell 118730d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_MMIO)) { 118830d7e098SRichard Henderson /* The region is not backed by RAM. */ 11894b2190daSEmilio G. Cota if (hostp) { 11904b2190daSEmilio G. Cota *hostp = NULL; 11914b2190daSEmilio G. Cota } 119220cb6ae4SPeter Maydell return -1; 119355df6fcfSPeter Maydell } 119455df6fcfSPeter Maydell 1195383beda9SRichard Henderson p = (void *)((uintptr_t)addr + entry->addend); 11964b2190daSEmilio G. Cota if (hostp) { 11974b2190daSEmilio G. Cota *hostp = p; 11984b2190daSEmilio G. Cota } 1199f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 1200f2553f04SKONRAD Frederic } 1201f2553f04SKONRAD Frederic 12024b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 12034b2190daSEmilio G. Cota { 12044b2190daSEmilio G. Cota return get_page_addr_code_hostp(env, addr, NULL); 12054b2190daSEmilio G. Cota } 12064b2190daSEmilio G. Cota 1207707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 1208707526adSRichard Henderson CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) 1209707526adSRichard Henderson { 1210707526adSRichard Henderson ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; 1211707526adSRichard Henderson 1212707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1213707526adSRichard Henderson 1214707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1215707526adSRichard Henderson struct page_collection *pages 1216707526adSRichard Henderson = page_collection_lock(ram_addr, ram_addr + size); 12175a7c27bbSRichard Henderson tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); 1218707526adSRichard Henderson page_collection_unlock(pages); 1219707526adSRichard Henderson } 1220707526adSRichard Henderson 1221707526adSRichard Henderson /* 1222707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1223707526adSRichard Henderson * the notdirty callback faster. 1224707526adSRichard Henderson */ 1225707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1226707526adSRichard Henderson 1227707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1228707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1229707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1230707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1231707526adSRichard Henderson } 1232707526adSRichard Henderson } 1233707526adSRichard Henderson 1234*069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 1235*069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1236*069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1237*069cfe77SRichard Henderson void **phost, uintptr_t retaddr) 1238d9bb58e5SYang Zhong { 1239383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1240383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1241*069cfe77SRichard Henderson target_ulong tlb_addr, page_addr; 1242c25c283dSDavid Hildenbrand size_t elt_ofs; 1243*069cfe77SRichard Henderson int flags; 1244ca86cf32SDavid Hildenbrand 1245c25c283dSDavid Hildenbrand switch (access_type) { 1246c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 1247c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_read); 1248c25c283dSDavid Hildenbrand break; 1249c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 1250c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_write); 1251c25c283dSDavid Hildenbrand break; 1252c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 1253c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_code); 1254c25c283dSDavid Hildenbrand break; 1255c25c283dSDavid Hildenbrand default: 1256c25c283dSDavid Hildenbrand g_assert_not_reached(); 1257c25c283dSDavid Hildenbrand } 1258c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 1259c25c283dSDavid Hildenbrand 1260*069cfe77SRichard Henderson page_addr = addr & TARGET_PAGE_MASK; 1261*069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 1262*069cfe77SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { 1263*069cfe77SRichard Henderson CPUState *cs = env_cpu(env); 1264*069cfe77SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cs); 1265*069cfe77SRichard Henderson 1266*069cfe77SRichard Henderson if (!cc->tlb_fill(cs, addr, fault_size, access_type, 1267*069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1268*069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1269*069cfe77SRichard Henderson *phost = NULL; 1270*069cfe77SRichard Henderson return TLB_INVALID_MASK; 1271*069cfe77SRichard Henderson } 1272*069cfe77SRichard Henderson 127303a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 127403a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1275d9bb58e5SYang Zhong } 1276c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 127703a98189SDavid Hildenbrand } 1278*069cfe77SRichard Henderson flags = tlb_addr & TLB_FLAGS_MASK; 127903a98189SDavid Hildenbrand 1280*069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 1281*069cfe77SRichard Henderson if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1282*069cfe77SRichard Henderson *phost = NULL; 1283*069cfe77SRichard Henderson return TLB_MMIO; 1284fef39ccdSDavid Hildenbrand } 1285fef39ccdSDavid Hildenbrand 1286*069cfe77SRichard Henderson /* Everything else is RAM. */ 1287*069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1288*069cfe77SRichard Henderson return flags; 1289*069cfe77SRichard Henderson } 1290*069cfe77SRichard Henderson 1291*069cfe77SRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr, 1292*069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1293*069cfe77SRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1294*069cfe77SRichard Henderson { 1295*069cfe77SRichard Henderson int flags; 1296*069cfe77SRichard Henderson 1297*069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, 1298*069cfe77SRichard Henderson nonfault, phost, retaddr); 1299*069cfe77SRichard Henderson 1300*069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1301*069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1302*069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 130373bc0bd4SRichard Henderson CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 130473bc0bd4SRichard Henderson 1305*069cfe77SRichard Henderson notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); 1306*069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1307*069cfe77SRichard Henderson } 1308*069cfe77SRichard Henderson 1309*069cfe77SRichard Henderson return flags; 1310*069cfe77SRichard Henderson } 1311*069cfe77SRichard Henderson 1312*069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 1313*069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1314*069cfe77SRichard Henderson { 1315*069cfe77SRichard Henderson void *host; 1316*069cfe77SRichard Henderson int flags; 1317*069cfe77SRichard Henderson 1318*069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1319*069cfe77SRichard Henderson 1320*069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1321*069cfe77SRichard Henderson false, &host, retaddr); 1322*069cfe77SRichard Henderson 1323*069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1324*069cfe77SRichard Henderson if (size == 0) { 132573bc0bd4SRichard Henderson return NULL; 132673bc0bd4SRichard Henderson } 132773bc0bd4SRichard Henderson 1328*069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 1329*069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1330*069cfe77SRichard Henderson CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1331*069cfe77SRichard Henderson 133203a98189SDavid Hildenbrand /* Handle watchpoints. */ 1333*069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1334*069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1335*069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 133603a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 133773bc0bd4SRichard Henderson iotlbentry->attrs, wp_access, retaddr); 1338d9bb58e5SYang Zhong } 1339fef39ccdSDavid Hildenbrand 134073bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1341*069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 1342*069cfe77SRichard Henderson notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); 134373bc0bd4SRichard Henderson } 1344fef39ccdSDavid Hildenbrand } 1345fef39ccdSDavid Hildenbrand 1346*069cfe77SRichard Henderson return host; 1347d9bb58e5SYang Zhong } 1348d9bb58e5SYang Zhong 13494811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 13504811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 13514811e909SRichard Henderson { 1352*069cfe77SRichard Henderson void *host; 1353*069cfe77SRichard Henderson int flags; 13544811e909SRichard Henderson 1355*069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, 1356*069cfe77SRichard Henderson mmu_idx, true, &host, 0); 1357*069cfe77SRichard Henderson 1358*069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1359*069cfe77SRichard Henderson return flags ? NULL : host; 13604811e909SRichard Henderson } 13614811e909SRichard Henderson 1362235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1363235537faSAlex Bennée /* 1364235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1365235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1366235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1367235537faSAlex Bennée * checking the victim table. This is purely informational. 1368235537faSAlex Bennée * 1369235537faSAlex Bennée * This should never fail as the memory access being instrumented 1370235537faSAlex Bennée * should have just filled the TLB. 1371235537faSAlex Bennée */ 1372235537faSAlex Bennée 1373235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, 1374235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1375235537faSAlex Bennée { 1376235537faSAlex Bennée CPUArchState *env = cpu->env_ptr; 1377235537faSAlex Bennée CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1378235537faSAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1379235537faSAlex Bennée target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1380235537faSAlex Bennée 1381235537faSAlex Bennée if (likely(tlb_hit(tlb_addr, addr))) { 1382235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1383235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 1384235537faSAlex Bennée CPUIOTLBEntry *iotlbentry; 1385235537faSAlex Bennée iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1386235537faSAlex Bennée data->is_io = true; 1387235537faSAlex Bennée data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 1388235537faSAlex Bennée data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1389235537faSAlex Bennée } else { 1390235537faSAlex Bennée data->is_io = false; 1391235537faSAlex Bennée data->v.ram.hostaddr = addr + tlbe->addend; 1392235537faSAlex Bennée } 1393235537faSAlex Bennée return true; 1394235537faSAlex Bennée } 1395235537faSAlex Bennée return false; 1396235537faSAlex Bennée } 1397235537faSAlex Bennée 1398235537faSAlex Bennée #endif 1399235537faSAlex Bennée 1400d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 1401d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 1402d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 1403707526adSRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1404d9bb58e5SYang Zhong { 1405d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 1406383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1407383beda9SRichard Henderson CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1408403f290cSEmilio G. Cota target_ulong tlb_addr = tlb_addr_write(tlbe); 140914776ab5STony Nguyen MemOp mop = get_memop(oi); 1410d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 1411d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 141234d49937SPeter Maydell void *hostaddr; 1413d9bb58e5SYang Zhong 1414d9bb58e5SYang Zhong /* Adjust the given return address. */ 1415d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1416d9bb58e5SYang Zhong 1417d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1418d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1419d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 142029a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1421d9bb58e5SYang Zhong mmu_idx, retaddr); 1422d9bb58e5SYang Zhong } 1423d9bb58e5SYang Zhong 1424d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 1425d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 1426d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1427d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1428d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1429d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1430d9bb58e5SYang Zhong goto stop_the_world; 1431d9bb58e5SYang Zhong } 1432d9bb58e5SYang Zhong 1433d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 1434334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1435d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 143629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, 143798670d47SLaurent Vivier mmu_idx, retaddr); 14386d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 14396d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1440d9bb58e5SYang Zhong } 1441403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1442d9bb58e5SYang Zhong } 1443d9bb58e5SYang Zhong 144455df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 144530d7e098SRichard Henderson if (unlikely(tlb_addr & TLB_MMIO)) { 1446d9bb58e5SYang Zhong /* There's really nothing that can be done to 1447d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1448d9bb58e5SYang Zhong goto stop_the_world; 1449d9bb58e5SYang Zhong } 1450d9bb58e5SYang Zhong 1451d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 145234d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 145329a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, 145498670d47SLaurent Vivier mmu_idx, retaddr); 1455d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 1456d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 1457d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 1458d9bb58e5SYang Zhong goto stop_the_world; 1459d9bb58e5SYang Zhong } 1460d9bb58e5SYang Zhong 146134d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 146234d49937SPeter Maydell 146334d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1464707526adSRichard Henderson notdirty_write(env_cpu(env), addr, 1 << s_bits, 1465707526adSRichard Henderson &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); 146634d49937SPeter Maydell } 146734d49937SPeter Maydell 146834d49937SPeter Maydell return hostaddr; 1469d9bb58e5SYang Zhong 1470d9bb58e5SYang Zhong stop_the_world: 147129a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1472d9bb58e5SYang Zhong } 1473d9bb58e5SYang Zhong 1474eed56642SAlex Bennée /* 1475eed56642SAlex Bennée * Load Helpers 1476eed56642SAlex Bennée * 1477eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1478eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1479eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1480eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1481eed56642SAlex Bennée */ 1482d9bb58e5SYang Zhong 14832dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 14842dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr); 14852dd92606SRichard Henderson 1486c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 148780d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op) 148880d9d1c6SRichard Henderson { 148980d9d1c6SRichard Henderson switch (op) { 149080d9d1c6SRichard Henderson case MO_UB: 149180d9d1c6SRichard Henderson return ldub_p(haddr); 149280d9d1c6SRichard Henderson case MO_BEUW: 149380d9d1c6SRichard Henderson return lduw_be_p(haddr); 149480d9d1c6SRichard Henderson case MO_LEUW: 149580d9d1c6SRichard Henderson return lduw_le_p(haddr); 149680d9d1c6SRichard Henderson case MO_BEUL: 149780d9d1c6SRichard Henderson return (uint32_t)ldl_be_p(haddr); 149880d9d1c6SRichard Henderson case MO_LEUL: 149980d9d1c6SRichard Henderson return (uint32_t)ldl_le_p(haddr); 150080d9d1c6SRichard Henderson case MO_BEQ: 150180d9d1c6SRichard Henderson return ldq_be_p(haddr); 150280d9d1c6SRichard Henderson case MO_LEQ: 150380d9d1c6SRichard Henderson return ldq_le_p(haddr); 150480d9d1c6SRichard Henderson default: 150580d9d1c6SRichard Henderson qemu_build_not_reached(); 150680d9d1c6SRichard Henderson } 150780d9d1c6SRichard Henderson } 150880d9d1c6SRichard Henderson 150980d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 15102dd92606SRichard Henderson load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, 1511be5c4787STony Nguyen uintptr_t retaddr, MemOp op, bool code_read, 15122dd92606SRichard Henderson FullLoadHelper *full_load) 1513eed56642SAlex Bennée { 1514eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1515eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1516eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1517eed56642SAlex Bennée target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1518eed56642SAlex Bennée const size_t tlb_off = code_read ? 1519eed56642SAlex Bennée offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1520f1be3696SRichard Henderson const MMUAccessType access_type = 1521f1be3696SRichard Henderson code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1522eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1523eed56642SAlex Bennée void *haddr; 1524eed56642SAlex Bennée uint64_t res; 1525be5c4787STony Nguyen size_t size = memop_size(op); 1526d9bb58e5SYang Zhong 1527eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1528eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 152929a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, access_type, 1530eed56642SAlex Bennée mmu_idx, retaddr); 1531eed56642SAlex Bennée } 1532eed56642SAlex Bennée 1533eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1534eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1535eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1536eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 153729a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, 1538f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1539eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1540eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1541eed56642SAlex Bennée } 1542eed56642SAlex Bennée tlb_addr = code_read ? entry->addr_code : entry->addr_read; 154330d7e098SRichard Henderson tlb_addr &= ~TLB_INVALID_MASK; 1544eed56642SAlex Bennée } 1545eed56642SAlex Bennée 154650b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1547eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 154850b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 15495b87b3e6SRichard Henderson bool need_swap; 155050b107c5SRichard Henderson 155150b107c5SRichard Henderson /* For anything that is unaligned, recurse through full_load. */ 1552eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1553eed56642SAlex Bennée goto do_unaligned_access; 1554eed56642SAlex Bennée } 155550b107c5SRichard Henderson 155650b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 155750b107c5SRichard Henderson 155850b107c5SRichard Henderson /* Handle watchpoints. */ 155950b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 156050b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 156150b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 156250b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_READ, retaddr); 15635b87b3e6SRichard Henderson } 156450b107c5SRichard Henderson 15655b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 156650b107c5SRichard Henderson 156750b107c5SRichard Henderson /* Handle I/O access. */ 15685b87b3e6SRichard Henderson if (likely(tlb_addr & TLB_MMIO)) { 15695b87b3e6SRichard Henderson return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, 15705b87b3e6SRichard Henderson access_type, op ^ (need_swap * MO_BSWAP)); 15715b87b3e6SRichard Henderson } 15725b87b3e6SRichard Henderson 15735b87b3e6SRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 15745b87b3e6SRichard Henderson 15755b87b3e6SRichard Henderson /* 15765b87b3e6SRichard Henderson * Keep these two load_memop separate to ensure that the compiler 15775b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 15785b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 15795b87b3e6SRichard Henderson */ 15805b87b3e6SRichard Henderson if (unlikely(need_swap)) { 15815b87b3e6SRichard Henderson return load_memop(haddr, op ^ MO_BSWAP); 15825b87b3e6SRichard Henderson } 15835b87b3e6SRichard Henderson return load_memop(haddr, op); 1584eed56642SAlex Bennée } 1585eed56642SAlex Bennée 1586eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1587eed56642SAlex Bennée if (size > 1 1588eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1589eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1590eed56642SAlex Bennée target_ulong addr1, addr2; 15918c79b288SAlex Bennée uint64_t r1, r2; 1592eed56642SAlex Bennée unsigned shift; 1593eed56642SAlex Bennée do_unaligned_access: 1594ab7a2009SAlex Bennée addr1 = addr & ~((target_ulong)size - 1); 1595eed56642SAlex Bennée addr2 = addr1 + size; 15962dd92606SRichard Henderson r1 = full_load(env, addr1, oi, retaddr); 15972dd92606SRichard Henderson r2 = full_load(env, addr2, oi, retaddr); 1598eed56642SAlex Bennée shift = (addr & (size - 1)) * 8; 1599eed56642SAlex Bennée 1600be5c4787STony Nguyen if (memop_big_endian(op)) { 1601eed56642SAlex Bennée /* Big-endian combine. */ 1602eed56642SAlex Bennée res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1603eed56642SAlex Bennée } else { 1604eed56642SAlex Bennée /* Little-endian combine. */ 1605eed56642SAlex Bennée res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 1606eed56642SAlex Bennée } 1607eed56642SAlex Bennée return res & MAKE_64BIT_MASK(0, size * 8); 1608eed56642SAlex Bennée } 1609eed56642SAlex Bennée 1610eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 161180d9d1c6SRichard Henderson return load_memop(haddr, op); 1612eed56642SAlex Bennée } 1613eed56642SAlex Bennée 1614eed56642SAlex Bennée /* 1615eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1616eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1617eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1618eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1619eed56642SAlex Bennée * data, and for that we always have uint64_t. 1620eed56642SAlex Bennée * 1621eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1622eed56642SAlex Bennée */ 1623eed56642SAlex Bennée 16242dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 16252dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16262dd92606SRichard Henderson { 1627be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); 16282dd92606SRichard Henderson } 16292dd92606SRichard Henderson 1630fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 1631fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1632eed56642SAlex Bennée { 16332dd92606SRichard Henderson return full_ldub_mmu(env, addr, oi, retaddr); 16342dd92606SRichard Henderson } 16352dd92606SRichard Henderson 16362dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 16372dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16382dd92606SRichard Henderson { 1639be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, false, 16402dd92606SRichard Henderson full_le_lduw_mmu); 1641eed56642SAlex Bennée } 1642eed56642SAlex Bennée 1643fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 1644fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1645eed56642SAlex Bennée { 16462dd92606SRichard Henderson return full_le_lduw_mmu(env, addr, oi, retaddr); 16472dd92606SRichard Henderson } 16482dd92606SRichard Henderson 16492dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 16502dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16512dd92606SRichard Henderson { 1652be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, false, 16532dd92606SRichard Henderson full_be_lduw_mmu); 1654eed56642SAlex Bennée } 1655eed56642SAlex Bennée 1656fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 1657fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1658eed56642SAlex Bennée { 16592dd92606SRichard Henderson return full_be_lduw_mmu(env, addr, oi, retaddr); 16602dd92606SRichard Henderson } 16612dd92606SRichard Henderson 16622dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 16632dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16642dd92606SRichard Henderson { 1665be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, false, 16662dd92606SRichard Henderson full_le_ldul_mmu); 1667eed56642SAlex Bennée } 1668eed56642SAlex Bennée 1669fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 1670fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1671eed56642SAlex Bennée { 16722dd92606SRichard Henderson return full_le_ldul_mmu(env, addr, oi, retaddr); 16732dd92606SRichard Henderson } 16742dd92606SRichard Henderson 16752dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 16762dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16772dd92606SRichard Henderson { 1678be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, false, 16792dd92606SRichard Henderson full_be_ldul_mmu); 1680eed56642SAlex Bennée } 1681eed56642SAlex Bennée 1682fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 1683fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1684eed56642SAlex Bennée { 16852dd92606SRichard Henderson return full_be_ldul_mmu(env, addr, oi, retaddr); 1686eed56642SAlex Bennée } 1687eed56642SAlex Bennée 1688fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 1689fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1690eed56642SAlex Bennée { 1691be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEQ, false, 16922dd92606SRichard Henderson helper_le_ldq_mmu); 1693eed56642SAlex Bennée } 1694eed56642SAlex Bennée 1695fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 1696fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1697eed56642SAlex Bennée { 1698be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEQ, false, 16992dd92606SRichard Henderson helper_be_ldq_mmu); 1700eed56642SAlex Bennée } 1701eed56642SAlex Bennée 1702eed56642SAlex Bennée /* 1703eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 1704eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 1705eed56642SAlex Bennée */ 1706eed56642SAlex Bennée 1707eed56642SAlex Bennée 1708eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 1709eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1710eed56642SAlex Bennée { 1711eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 1712eed56642SAlex Bennée } 1713eed56642SAlex Bennée 1714eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 1715eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1716eed56642SAlex Bennée { 1717eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 1718eed56642SAlex Bennée } 1719eed56642SAlex Bennée 1720eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 1721eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1722eed56642SAlex Bennée { 1723eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 1724eed56642SAlex Bennée } 1725eed56642SAlex Bennée 1726eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 1727eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1728eed56642SAlex Bennée { 1729eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 1730eed56642SAlex Bennée } 1731eed56642SAlex Bennée 1732eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 1733eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1734eed56642SAlex Bennée { 1735eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 1736eed56642SAlex Bennée } 1737eed56642SAlex Bennée 1738eed56642SAlex Bennée /* 1739d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 1740d03f1408SRichard Henderson */ 1741d03f1408SRichard Henderson 1742d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, 1743d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, 1744d03f1408SRichard Henderson MemOp op, FullLoadHelper *full_load) 1745d03f1408SRichard Henderson { 1746d03f1408SRichard Henderson uint16_t meminfo; 1747d03f1408SRichard Henderson TCGMemOpIdx oi; 1748d03f1408SRichard Henderson uint64_t ret; 1749d03f1408SRichard Henderson 1750d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, false); 1751d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 1752d03f1408SRichard Henderson 1753d03f1408SRichard Henderson op &= ~MO_SIGN; 1754d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 1755d03f1408SRichard Henderson ret = full_load(env, addr, oi, retaddr); 1756d03f1408SRichard Henderson 1757d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 1758d03f1408SRichard Henderson 1759d03f1408SRichard Henderson return ret; 1760d03f1408SRichard Henderson } 1761d03f1408SRichard Henderson 1762d03f1408SRichard Henderson uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1763d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1764d03f1408SRichard Henderson { 1765d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); 1766d03f1408SRichard Henderson } 1767d03f1408SRichard Henderson 1768d03f1408SRichard Henderson int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1769d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1770d03f1408SRichard Henderson { 1771d03f1408SRichard Henderson return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB, 1772d03f1408SRichard Henderson full_ldub_mmu); 1773d03f1408SRichard Henderson } 1774d03f1408SRichard Henderson 1775d03f1408SRichard Henderson uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1776d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1777d03f1408SRichard Henderson { 1778d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUW, 1779d03f1408SRichard Henderson MO_TE == MO_LE 1780d03f1408SRichard Henderson ? full_le_lduw_mmu : full_be_lduw_mmu); 1781d03f1408SRichard Henderson } 1782d03f1408SRichard Henderson 1783d03f1408SRichard Henderson int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1784d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1785d03f1408SRichard Henderson { 1786d03f1408SRichard Henderson return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_TESW, 1787d03f1408SRichard Henderson MO_TE == MO_LE 1788d03f1408SRichard Henderson ? full_le_lduw_mmu : full_be_lduw_mmu); 1789d03f1408SRichard Henderson } 1790d03f1408SRichard Henderson 1791d03f1408SRichard Henderson uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1792d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1793d03f1408SRichard Henderson { 1794d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUL, 1795d03f1408SRichard Henderson MO_TE == MO_LE 1796d03f1408SRichard Henderson ? full_le_ldul_mmu : full_be_ldul_mmu); 1797d03f1408SRichard Henderson } 1798d03f1408SRichard Henderson 1799d03f1408SRichard Henderson uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1800d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1801d03f1408SRichard Henderson { 1802d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEQ, 1803d03f1408SRichard Henderson MO_TE == MO_LE 1804d03f1408SRichard Henderson ? helper_le_ldq_mmu : helper_be_ldq_mmu); 1805d03f1408SRichard Henderson } 1806d03f1408SRichard Henderson 1807cfe04a4bSRichard Henderson uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr, 1808cfe04a4bSRichard Henderson uintptr_t retaddr) 1809cfe04a4bSRichard Henderson { 1810cfe04a4bSRichard Henderson return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1811cfe04a4bSRichard Henderson } 1812cfe04a4bSRichard Henderson 1813cfe04a4bSRichard Henderson int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 1814cfe04a4bSRichard Henderson { 1815cfe04a4bSRichard Henderson return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1816cfe04a4bSRichard Henderson } 1817cfe04a4bSRichard Henderson 1818cfe04a4bSRichard Henderson uint32_t cpu_lduw_data_ra(CPUArchState *env, target_ulong ptr, 1819cfe04a4bSRichard Henderson uintptr_t retaddr) 1820cfe04a4bSRichard Henderson { 1821cfe04a4bSRichard Henderson return cpu_lduw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1822cfe04a4bSRichard Henderson } 1823cfe04a4bSRichard Henderson 1824cfe04a4bSRichard Henderson int cpu_ldsw_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 1825cfe04a4bSRichard Henderson { 1826cfe04a4bSRichard Henderson return cpu_ldsw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1827cfe04a4bSRichard Henderson } 1828cfe04a4bSRichard Henderson 1829cfe04a4bSRichard Henderson uint32_t cpu_ldl_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 1830cfe04a4bSRichard Henderson { 1831cfe04a4bSRichard Henderson return cpu_ldl_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1832cfe04a4bSRichard Henderson } 1833cfe04a4bSRichard Henderson 1834cfe04a4bSRichard Henderson uint64_t cpu_ldq_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 1835cfe04a4bSRichard Henderson { 1836cfe04a4bSRichard Henderson return cpu_ldq_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1837cfe04a4bSRichard Henderson } 1838cfe04a4bSRichard Henderson 1839cfe04a4bSRichard Henderson uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr) 1840cfe04a4bSRichard Henderson { 1841cfe04a4bSRichard Henderson return cpu_ldub_data_ra(env, ptr, 0); 1842cfe04a4bSRichard Henderson } 1843cfe04a4bSRichard Henderson 1844cfe04a4bSRichard Henderson int cpu_ldsb_data(CPUArchState *env, target_ulong ptr) 1845cfe04a4bSRichard Henderson { 1846cfe04a4bSRichard Henderson return cpu_ldsb_data_ra(env, ptr, 0); 1847cfe04a4bSRichard Henderson } 1848cfe04a4bSRichard Henderson 1849cfe04a4bSRichard Henderson uint32_t cpu_lduw_data(CPUArchState *env, target_ulong ptr) 1850cfe04a4bSRichard Henderson { 1851cfe04a4bSRichard Henderson return cpu_lduw_data_ra(env, ptr, 0); 1852cfe04a4bSRichard Henderson } 1853cfe04a4bSRichard Henderson 1854cfe04a4bSRichard Henderson int cpu_ldsw_data(CPUArchState *env, target_ulong ptr) 1855cfe04a4bSRichard Henderson { 1856cfe04a4bSRichard Henderson return cpu_ldsw_data_ra(env, ptr, 0); 1857cfe04a4bSRichard Henderson } 1858cfe04a4bSRichard Henderson 1859cfe04a4bSRichard Henderson uint32_t cpu_ldl_data(CPUArchState *env, target_ulong ptr) 1860cfe04a4bSRichard Henderson { 1861cfe04a4bSRichard Henderson return cpu_ldl_data_ra(env, ptr, 0); 1862cfe04a4bSRichard Henderson } 1863cfe04a4bSRichard Henderson 1864cfe04a4bSRichard Henderson uint64_t cpu_ldq_data(CPUArchState *env, target_ulong ptr) 1865cfe04a4bSRichard Henderson { 1866cfe04a4bSRichard Henderson return cpu_ldq_data_ra(env, ptr, 0); 1867cfe04a4bSRichard Henderson } 1868cfe04a4bSRichard Henderson 1869d03f1408SRichard Henderson /* 1870eed56642SAlex Bennée * Store Helpers 1871eed56642SAlex Bennée */ 1872eed56642SAlex Bennée 1873c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE 187480d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op) 187580d9d1c6SRichard Henderson { 187680d9d1c6SRichard Henderson switch (op) { 187780d9d1c6SRichard Henderson case MO_UB: 187880d9d1c6SRichard Henderson stb_p(haddr, val); 187980d9d1c6SRichard Henderson break; 188080d9d1c6SRichard Henderson case MO_BEUW: 188180d9d1c6SRichard Henderson stw_be_p(haddr, val); 188280d9d1c6SRichard Henderson break; 188380d9d1c6SRichard Henderson case MO_LEUW: 188480d9d1c6SRichard Henderson stw_le_p(haddr, val); 188580d9d1c6SRichard Henderson break; 188680d9d1c6SRichard Henderson case MO_BEUL: 188780d9d1c6SRichard Henderson stl_be_p(haddr, val); 188880d9d1c6SRichard Henderson break; 188980d9d1c6SRichard Henderson case MO_LEUL: 189080d9d1c6SRichard Henderson stl_le_p(haddr, val); 189180d9d1c6SRichard Henderson break; 189280d9d1c6SRichard Henderson case MO_BEQ: 189380d9d1c6SRichard Henderson stq_be_p(haddr, val); 189480d9d1c6SRichard Henderson break; 189580d9d1c6SRichard Henderson case MO_LEQ: 189680d9d1c6SRichard Henderson stq_le_p(haddr, val); 189780d9d1c6SRichard Henderson break; 189880d9d1c6SRichard Henderson default: 189980d9d1c6SRichard Henderson qemu_build_not_reached(); 190080d9d1c6SRichard Henderson } 190180d9d1c6SRichard Henderson } 190280d9d1c6SRichard Henderson 190380d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE 19044601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 1905be5c4787STony Nguyen TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) 1906eed56642SAlex Bennée { 1907eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1908eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1909eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1910eed56642SAlex Bennée target_ulong tlb_addr = tlb_addr_write(entry); 1911eed56642SAlex Bennée const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 1912eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1913eed56642SAlex Bennée void *haddr; 1914be5c4787STony Nguyen size_t size = memop_size(op); 1915eed56642SAlex Bennée 1916eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1917eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 191829a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1919eed56642SAlex Bennée mmu_idx, retaddr); 1920eed56642SAlex Bennée } 1921eed56642SAlex Bennée 1922eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1923eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1924eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1925eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 192629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 1927eed56642SAlex Bennée mmu_idx, retaddr); 1928eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1929eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1930eed56642SAlex Bennée } 1931eed56642SAlex Bennée tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 1932eed56642SAlex Bennée } 1933eed56642SAlex Bennée 193450b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1935eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 193650b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 19375b87b3e6SRichard Henderson bool need_swap; 193850b107c5SRichard Henderson 193950b107c5SRichard Henderson /* For anything that is unaligned, recurse through byte stores. */ 1940eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1941eed56642SAlex Bennée goto do_unaligned_access; 1942eed56642SAlex Bennée } 194350b107c5SRichard Henderson 194450b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 194550b107c5SRichard Henderson 194650b107c5SRichard Henderson /* Handle watchpoints. */ 194750b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 194850b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 194950b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 195050b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_WRITE, retaddr); 19515b87b3e6SRichard Henderson } 195250b107c5SRichard Henderson 19535b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 195450b107c5SRichard Henderson 195550b107c5SRichard Henderson /* Handle I/O access. */ 195608565552SRichard Henderson if (tlb_addr & TLB_MMIO) { 19575b87b3e6SRichard Henderson io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, 19585b87b3e6SRichard Henderson op ^ (need_swap * MO_BSWAP)); 19595b87b3e6SRichard Henderson return; 19605b87b3e6SRichard Henderson } 19615b87b3e6SRichard Henderson 19627b0d792cSRichard Henderson /* Ignore writes to ROM. */ 19637b0d792cSRichard Henderson if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { 19647b0d792cSRichard Henderson return; 19657b0d792cSRichard Henderson } 19667b0d792cSRichard Henderson 196708565552SRichard Henderson /* Handle clean RAM pages. */ 196808565552SRichard Henderson if (tlb_addr & TLB_NOTDIRTY) { 1969707526adSRichard Henderson notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); 197008565552SRichard Henderson } 197108565552SRichard Henderson 1972707526adSRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 197308565552SRichard Henderson 19745b87b3e6SRichard Henderson /* 19755b87b3e6SRichard Henderson * Keep these two store_memop separate to ensure that the compiler 19765b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 19775b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 19785b87b3e6SRichard Henderson */ 19795b87b3e6SRichard Henderson if (unlikely(need_swap)) { 19805b87b3e6SRichard Henderson store_memop(haddr, val, op ^ MO_BSWAP); 19815b87b3e6SRichard Henderson } else { 19825b87b3e6SRichard Henderson store_memop(haddr, val, op); 19835b87b3e6SRichard Henderson } 1984eed56642SAlex Bennée return; 1985eed56642SAlex Bennée } 1986eed56642SAlex Bennée 1987eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1988eed56642SAlex Bennée if (size > 1 1989eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1990eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1991eed56642SAlex Bennée int i; 1992eed56642SAlex Bennée uintptr_t index2; 1993eed56642SAlex Bennée CPUTLBEntry *entry2; 1994eed56642SAlex Bennée target_ulong page2, tlb_addr2; 19958f7cd2adSRichard Henderson size_t size2; 19968f7cd2adSRichard Henderson 1997eed56642SAlex Bennée do_unaligned_access: 1998eed56642SAlex Bennée /* 1999eed56642SAlex Bennée * Ensure the second page is in the TLB. Note that the first page 2000eed56642SAlex Bennée * is already guaranteed to be filled, and that the second page 2001eed56642SAlex Bennée * cannot evict the first. 2002eed56642SAlex Bennée */ 2003eed56642SAlex Bennée page2 = (addr + size) & TARGET_PAGE_MASK; 20048f7cd2adSRichard Henderson size2 = (addr + size) & ~TARGET_PAGE_MASK; 2005eed56642SAlex Bennée index2 = tlb_index(env, mmu_idx, page2); 2006eed56642SAlex Bennée entry2 = tlb_entry(env, mmu_idx, page2); 2007eed56642SAlex Bennée tlb_addr2 = tlb_addr_write(entry2); 200850b107c5SRichard Henderson if (!tlb_hit_page(tlb_addr2, page2)) { 200950b107c5SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { 20108f7cd2adSRichard Henderson tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, 2011eed56642SAlex Bennée mmu_idx, retaddr); 201250b107c5SRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 201350b107c5SRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 201450b107c5SRichard Henderson } 201550b107c5SRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 201650b107c5SRichard Henderson } 201750b107c5SRichard Henderson 201850b107c5SRichard Henderson /* 201950b107c5SRichard Henderson * Handle watchpoints. Since this may trap, all checks 202050b107c5SRichard Henderson * must happen before any store. 202150b107c5SRichard Henderson */ 202250b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 202350b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size - size2, 202450b107c5SRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index].attrs, 202550b107c5SRichard Henderson BP_MEM_WRITE, retaddr); 202650b107c5SRichard Henderson } 202750b107c5SRichard Henderson if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { 202850b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), page2, size2, 202950b107c5SRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, 203050b107c5SRichard Henderson BP_MEM_WRITE, retaddr); 2031eed56642SAlex Bennée } 2032eed56642SAlex Bennée 2033eed56642SAlex Bennée /* 2034eed56642SAlex Bennée * XXX: not efficient, but simple. 2035eed56642SAlex Bennée * This loop must go in the forward direction to avoid issues 2036eed56642SAlex Bennée * with self-modifying code in Windows 64-bit. 2037eed56642SAlex Bennée */ 2038eed56642SAlex Bennée for (i = 0; i < size; ++i) { 2039eed56642SAlex Bennée uint8_t val8; 2040be5c4787STony Nguyen if (memop_big_endian(op)) { 2041eed56642SAlex Bennée /* Big-endian extract. */ 2042eed56642SAlex Bennée val8 = val >> (((size - 1) * 8) - (i * 8)); 2043eed56642SAlex Bennée } else { 2044eed56642SAlex Bennée /* Little-endian extract. */ 2045eed56642SAlex Bennée val8 = val >> (i * 8); 2046eed56642SAlex Bennée } 20474601f8d1SRichard Henderson helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); 2048eed56642SAlex Bennée } 2049eed56642SAlex Bennée return; 2050eed56642SAlex Bennée } 2051eed56642SAlex Bennée 2052eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 205380d9d1c6SRichard Henderson store_memop(haddr, val, op); 2054eed56642SAlex Bennée } 2055eed56642SAlex Bennée 2056fc1bc777SRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2057eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2058eed56642SAlex Bennée { 2059be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_UB); 2060eed56642SAlex Bennée } 2061eed56642SAlex Bennée 2062fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2063eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2064eed56642SAlex Bennée { 2065be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUW); 2066eed56642SAlex Bennée } 2067eed56642SAlex Bennée 2068fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2069eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2070eed56642SAlex Bennée { 2071be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUW); 2072eed56642SAlex Bennée } 2073eed56642SAlex Bennée 2074fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2075eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2076eed56642SAlex Bennée { 2077be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUL); 2078eed56642SAlex Bennée } 2079eed56642SAlex Bennée 2080fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2081eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2082eed56642SAlex Bennée { 2083be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUL); 2084eed56642SAlex Bennée } 2085eed56642SAlex Bennée 2086fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2087eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2088eed56642SAlex Bennée { 2089be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEQ); 2090eed56642SAlex Bennée } 2091eed56642SAlex Bennée 2092fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2093eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2094eed56642SAlex Bennée { 2095be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEQ); 2096eed56642SAlex Bennée } 2097d9bb58e5SYang Zhong 2098d03f1408SRichard Henderson /* 2099d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 2100d03f1408SRichard Henderson */ 2101d03f1408SRichard Henderson 2102d03f1408SRichard Henderson static inline void QEMU_ALWAYS_INLINE 2103d03f1408SRichard Henderson cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 2104d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, MemOp op) 2105d03f1408SRichard Henderson { 2106d03f1408SRichard Henderson TCGMemOpIdx oi; 2107d03f1408SRichard Henderson uint16_t meminfo; 2108d03f1408SRichard Henderson 2109d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, true); 2110d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 2111d03f1408SRichard Henderson 2112d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 2113d03f1408SRichard Henderson store_helper(env, addr, val, oi, retaddr, op); 2114d03f1408SRichard Henderson 2115d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 2116d03f1408SRichard Henderson } 2117d03f1408SRichard Henderson 2118d03f1408SRichard Henderson void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2119d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2120d03f1408SRichard Henderson { 2121d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); 2122d03f1408SRichard Henderson } 2123d03f1408SRichard Henderson 2124d03f1408SRichard Henderson void cpu_stw_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2125d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2126d03f1408SRichard Henderson { 2127d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUW); 2128d03f1408SRichard Henderson } 2129d03f1408SRichard Henderson 2130d03f1408SRichard Henderson void cpu_stl_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2131d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2132d03f1408SRichard Henderson { 2133d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUL); 2134d03f1408SRichard Henderson } 2135d03f1408SRichard Henderson 2136d03f1408SRichard Henderson void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, 2137d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2138d03f1408SRichard Henderson { 2139d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEQ); 2140d03f1408SRichard Henderson } 2141d03f1408SRichard Henderson 2142cfe04a4bSRichard Henderson void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, 2143cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2144cfe04a4bSRichard Henderson { 2145cfe04a4bSRichard Henderson cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2146cfe04a4bSRichard Henderson } 2147cfe04a4bSRichard Henderson 2148cfe04a4bSRichard Henderson void cpu_stw_data_ra(CPUArchState *env, target_ulong ptr, 2149cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2150cfe04a4bSRichard Henderson { 2151cfe04a4bSRichard Henderson cpu_stw_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2152cfe04a4bSRichard Henderson } 2153cfe04a4bSRichard Henderson 2154cfe04a4bSRichard Henderson void cpu_stl_data_ra(CPUArchState *env, target_ulong ptr, 2155cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2156cfe04a4bSRichard Henderson { 2157cfe04a4bSRichard Henderson cpu_stl_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2158cfe04a4bSRichard Henderson } 2159cfe04a4bSRichard Henderson 2160cfe04a4bSRichard Henderson void cpu_stq_data_ra(CPUArchState *env, target_ulong ptr, 2161cfe04a4bSRichard Henderson uint64_t val, uintptr_t retaddr) 2162cfe04a4bSRichard Henderson { 2163cfe04a4bSRichard Henderson cpu_stq_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2164cfe04a4bSRichard Henderson } 2165cfe04a4bSRichard Henderson 2166cfe04a4bSRichard Henderson void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2167cfe04a4bSRichard Henderson { 2168cfe04a4bSRichard Henderson cpu_stb_data_ra(env, ptr, val, 0); 2169cfe04a4bSRichard Henderson } 2170cfe04a4bSRichard Henderson 2171cfe04a4bSRichard Henderson void cpu_stw_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2172cfe04a4bSRichard Henderson { 2173cfe04a4bSRichard Henderson cpu_stw_data_ra(env, ptr, val, 0); 2174cfe04a4bSRichard Henderson } 2175cfe04a4bSRichard Henderson 2176cfe04a4bSRichard Henderson void cpu_stl_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2177cfe04a4bSRichard Henderson { 2178cfe04a4bSRichard Henderson cpu_stl_data_ra(env, ptr, val, 0); 2179cfe04a4bSRichard Henderson } 2180cfe04a4bSRichard Henderson 2181cfe04a4bSRichard Henderson void cpu_stq_data(CPUArchState *env, target_ulong ptr, uint64_t val) 2182cfe04a4bSRichard Henderson { 2183cfe04a4bSRichard Henderson cpu_stq_data_ra(env, ptr, val, 0); 2184cfe04a4bSRichard Henderson } 2185cfe04a4bSRichard Henderson 2186d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 2187d9bb58e5SYang Zhong them callable from other helpers. */ 2188d9bb58e5SYang Zhong 2189d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 2190d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 2191d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 2192707526adSRichard Henderson #define ATOMIC_MMU_DECLS 2193707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) 2194707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 2195504f73f7SAlex Bennée #define ATOMIC_MMU_IDX get_mmuidx(oi) 2196d9bb58e5SYang Zhong 2197cfec3885SEmilio G. Cota #include "atomic_common.inc.c" 2198d9bb58e5SYang Zhong 2199d9bb58e5SYang Zhong #define DATA_SIZE 1 2200d9bb58e5SYang Zhong #include "atomic_template.h" 2201d9bb58e5SYang Zhong 2202d9bb58e5SYang Zhong #define DATA_SIZE 2 2203d9bb58e5SYang Zhong #include "atomic_template.h" 2204d9bb58e5SYang Zhong 2205d9bb58e5SYang Zhong #define DATA_SIZE 4 2206d9bb58e5SYang Zhong #include "atomic_template.h" 2207d9bb58e5SYang Zhong 2208d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2209d9bb58e5SYang Zhong #define DATA_SIZE 8 2210d9bb58e5SYang Zhong #include "atomic_template.h" 2211d9bb58e5SYang Zhong #endif 2212d9bb58e5SYang Zhong 2213e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 2214d9bb58e5SYang Zhong #define DATA_SIZE 16 2215d9bb58e5SYang Zhong #include "atomic_template.h" 2216d9bb58e5SYang Zhong #endif 2217d9bb58e5SYang Zhong 2218d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 2219d9bb58e5SYang Zhong 2220d9bb58e5SYang Zhong #undef EXTRA_ARGS 2221d9bb58e5SYang Zhong #undef ATOMIC_NAME 2222d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 2223d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 2224d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 2225707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) 2226d9bb58e5SYang Zhong 2227d9bb58e5SYang Zhong #define DATA_SIZE 1 2228d9bb58e5SYang Zhong #include "atomic_template.h" 2229d9bb58e5SYang Zhong 2230d9bb58e5SYang Zhong #define DATA_SIZE 2 2231d9bb58e5SYang Zhong #include "atomic_template.h" 2232d9bb58e5SYang Zhong 2233d9bb58e5SYang Zhong #define DATA_SIZE 4 2234d9bb58e5SYang Zhong #include "atomic_template.h" 2235d9bb58e5SYang Zhong 2236d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2237d9bb58e5SYang Zhong #define DATA_SIZE 8 2238d9bb58e5SYang Zhong #include "atomic_template.h" 2239d9bb58e5SYang Zhong #endif 2240504f73f7SAlex Bennée #undef ATOMIC_MMU_IDX 2241d9bb58e5SYang Zhong 2242d9bb58e5SYang Zhong /* Code access functions. */ 2243d9bb58e5SYang Zhong 2244fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, 22452dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 22462dd92606SRichard Henderson { 2247fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); 22482dd92606SRichard Henderson } 22492dd92606SRichard Henderson 2250fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 2251eed56642SAlex Bennée { 2252fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 2253fc4120a3SRichard Henderson return full_ldub_code(env, addr, oi, 0); 22542dd92606SRichard Henderson } 22552dd92606SRichard Henderson 2256fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, 22574cef72d0SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 22584cef72d0SAlex Bennée { 2259fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); 22604cef72d0SAlex Bennée } 22614cef72d0SAlex Bennée 2262fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 22632dd92606SRichard Henderson { 2264fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 2265fc4120a3SRichard Henderson return full_lduw_code(env, addr, oi, 0); 2266eed56642SAlex Bennée } 2267d9bb58e5SYang Zhong 2268fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, 2269fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2270eed56642SAlex Bennée { 2271fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); 22722dd92606SRichard Henderson } 22732dd92606SRichard Henderson 2274fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 22754cef72d0SAlex Bennée { 2276fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 2277fc4120a3SRichard Henderson return full_ldl_code(env, addr, oi, 0); 22784cef72d0SAlex Bennée } 22794cef72d0SAlex Bennée 2280fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, 22812dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 22822dd92606SRichard Henderson { 2283fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); 2284eed56642SAlex Bennée } 2285d9bb58e5SYang Zhong 2286fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 2287eed56642SAlex Bennée { 2288fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); 2289fc4120a3SRichard Henderson return full_ldq_code(env, addr, oi, 0); 2290eed56642SAlex Bennée } 2291