1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 22d9bb58e5SYang Zhong #include "cpu.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/address-spaces.h" 26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 27d9bb58e5SYang Zhong #include "exec/cputlb.h" 28d9bb58e5SYang Zhong #include "exec/memory-internal.h" 29d9bb58e5SYang Zhong #include "exec/ram_addr.h" 30d9bb58e5SYang Zhong #include "tcg/tcg.h" 31d9bb58e5SYang Zhong #include "qemu/error-report.h" 32d9bb58e5SYang Zhong #include "exec/log.h" 33d9bb58e5SYang Zhong #include "exec/helper-proto.h" 34d9bb58e5SYang Zhong #include "qemu/atomic.h" 35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 36707526adSRichard Henderson #include "translate-all.h" 37d03f1408SRichard Henderson #include "trace-root.h" 38d03f1408SRichard Henderson #include "trace/mem.h" 39235537faSAlex Bennée #ifdef CONFIG_PLUGIN 40235537faSAlex Bennée #include "qemu/plugin-memory.h" 41235537faSAlex Bennée #endif 42d9bb58e5SYang Zhong 43d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 44d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 45d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 46d9bb58e5SYang Zhong 47d9bb58e5SYang Zhong #ifdef DEBUG_TLB 48d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 49d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 50d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 51d9bb58e5SYang Zhong # else 52d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 53d9bb58e5SYang Zhong # endif 54d9bb58e5SYang Zhong #else 55d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 56d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 57d9bb58e5SYang Zhong #endif 58d9bb58e5SYang Zhong 59d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 60d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 61d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 62d9bb58e5SYang Zhong ## __VA_ARGS__); \ 63d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 64d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 65d9bb58e5SYang Zhong } \ 66d9bb58e5SYang Zhong } while (0) 67d9bb58e5SYang Zhong 68ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 69d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 70ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 71d9bb58e5SYang Zhong } \ 72d9bb58e5SYang Zhong } while (0) 73d9bb58e5SYang Zhong 74d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 75d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 76d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 77d9bb58e5SYang Zhong 78d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 79d9bb58e5SYang Zhong */ 80d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 81d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 82d9bb58e5SYang Zhong 83722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 847a1efe1bSRichard Henderson { 85722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 867a1efe1bSRichard Henderson } 877a1efe1bSRichard Henderson 88722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 8986e1eff8SEmilio G. Cota { 90722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9186e1eff8SEmilio G. Cota } 9286e1eff8SEmilio G. Cota 9379e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9486e1eff8SEmilio G. Cota size_t max_entries) 9586e1eff8SEmilio G. Cota { 9679e42085SRichard Henderson desc->window_begin_ns = ns; 9779e42085SRichard Henderson desc->window_max_entries = max_entries; 9886e1eff8SEmilio G. Cota } 9986e1eff8SEmilio G. Cota 10086e1eff8SEmilio G. Cota /** 10186e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 10271ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 10371ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 10486e1eff8SEmilio G. Cota * 10586e1eff8SEmilio G. Cota * Called with tlb_lock_held. 10686e1eff8SEmilio G. Cota * 10786e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 10886e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 10986e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 11086e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 11186e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 11286e1eff8SEmilio G. Cota * the resize based on past observations. 11386e1eff8SEmilio G. Cota * 11486e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 11586e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 11686e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 11786e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 11886e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 11986e1eff8SEmilio G. Cota * performance. 12086e1eff8SEmilio G. Cota * 12186e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 12286e1eff8SEmilio G. Cota * 12386e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 12486e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 12586e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 12686e1eff8SEmilio G. Cota * probably be similar. 12786e1eff8SEmilio G. Cota * 12886e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 12986e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 13086e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 13186e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 13286e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 13386e1eff8SEmilio G. Cota * 13486e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 13586e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 13686e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 13786e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 13886e1eff8SEmilio G. Cota * conflict misses. 13986e1eff8SEmilio G. Cota */ 1403c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1413c3959f2SRichard Henderson int64_t now) 14286e1eff8SEmilio G. Cota { 14371ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 14486e1eff8SEmilio G. Cota size_t rate; 14586e1eff8SEmilio G. Cota size_t new_size = old_size; 14686e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 14786e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 14879e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 14986e1eff8SEmilio G. Cota 15079e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 15179e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 15286e1eff8SEmilio G. Cota } 15379e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 15486e1eff8SEmilio G. Cota 15586e1eff8SEmilio G. Cota if (rate > 70) { 15686e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 15786e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 15879e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 15979e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 16086e1eff8SEmilio G. Cota 16186e1eff8SEmilio G. Cota /* 16286e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 16386e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 16486e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 16586e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 16686e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 16786e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 16886e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 16986e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 17086e1eff8SEmilio G. Cota */ 17186e1eff8SEmilio G. Cota if (expected_rate > 70) { 17286e1eff8SEmilio G. Cota ceil *= 2; 17386e1eff8SEmilio G. Cota } 17486e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 17586e1eff8SEmilio G. Cota } 17686e1eff8SEmilio G. Cota 17786e1eff8SEmilio G. Cota if (new_size == old_size) { 17886e1eff8SEmilio G. Cota if (window_expired) { 17979e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 18086e1eff8SEmilio G. Cota } 18186e1eff8SEmilio G. Cota return; 18286e1eff8SEmilio G. Cota } 18386e1eff8SEmilio G. Cota 18471ccd47bSRichard Henderson g_free(fast->table); 18571ccd47bSRichard Henderson g_free(desc->iotlb); 18686e1eff8SEmilio G. Cota 18779e42085SRichard Henderson tlb_window_reset(desc, now, 0); 18886e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 18971ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 19071ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 19171ccd47bSRichard Henderson desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); 19271ccd47bSRichard Henderson 19386e1eff8SEmilio G. Cota /* 19486e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 19586e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 19686e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 19786e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 19886e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 19986e1eff8SEmilio G. Cota */ 20071ccd47bSRichard Henderson while (fast->table == NULL || desc->iotlb == NULL) { 20186e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 20286e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 20386e1eff8SEmilio G. Cota abort(); 20486e1eff8SEmilio G. Cota } 20586e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 20671ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 20786e1eff8SEmilio G. Cota 20871ccd47bSRichard Henderson g_free(fast->table); 20971ccd47bSRichard Henderson g_free(desc->iotlb); 21071ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 21171ccd47bSRichard Henderson desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); 21286e1eff8SEmilio G. Cota } 21386e1eff8SEmilio G. Cota } 21486e1eff8SEmilio G. Cota 215bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 21686e1eff8SEmilio G. Cota { 2175c948e31SRichard Henderson desc->n_used_entries = 0; 2185c948e31SRichard Henderson desc->large_page_addr = -1; 2195c948e31SRichard Henderson desc->large_page_mask = -1; 2205c948e31SRichard Henderson desc->vindex = 0; 2215c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2225c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 22386e1eff8SEmilio G. Cota } 22486e1eff8SEmilio G. Cota 2253c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, 2263c3959f2SRichard Henderson int64_t now) 227bbf021b0SRichard Henderson { 228bbf021b0SRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 229bbf021b0SRichard Henderson CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; 230bbf021b0SRichard Henderson 2313c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 232bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 233bbf021b0SRichard Henderson } 234bbf021b0SRichard Henderson 23556e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 23656e89f76SRichard Henderson { 23756e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 23856e89f76SRichard Henderson 23956e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 24056e89f76SRichard Henderson desc->n_used_entries = 0; 24156e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 24256e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 24356e89f76SRichard Henderson desc->iotlb = g_new(CPUIOTLBEntry, n_entries); 2443c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 24556e89f76SRichard Henderson } 24656e89f76SRichard Henderson 24786e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 24886e1eff8SEmilio G. Cota { 249a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 25086e1eff8SEmilio G. Cota } 25186e1eff8SEmilio G. Cota 25286e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 25386e1eff8SEmilio G. Cota { 254a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 25586e1eff8SEmilio G. Cota } 25686e1eff8SEmilio G. Cota 2575005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2585005e253SEmilio G. Cota { 25971aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 26056e89f76SRichard Henderson int64_t now = get_clock_realtime(); 26156e89f76SRichard Henderson int i; 26271aec354SEmilio G. Cota 263a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2643d1523ceSRichard Henderson 2653c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 2663c16304aSRichard Henderson env_tlb(env)->c.dirty = 0; 26786e1eff8SEmilio G. Cota 26856e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 26956e89f76SRichard Henderson tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); 27056e89f76SRichard Henderson } 2715005e253SEmilio G. Cota } 2725005e253SEmilio G. Cota 273816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu) 274816d9be5SEmilio G. Cota { 275816d9be5SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 276816d9be5SEmilio G. Cota int i; 277816d9be5SEmilio G. Cota 278816d9be5SEmilio G. Cota qemu_spin_destroy(&env_tlb(env)->c.lock); 279816d9be5SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 280816d9be5SEmilio G. Cota CPUTLBDesc *desc = &env_tlb(env)->d[i]; 281816d9be5SEmilio G. Cota CPUTLBDescFast *fast = &env_tlb(env)->f[i]; 282816d9be5SEmilio G. Cota 283816d9be5SEmilio G. Cota g_free(fast->table); 284816d9be5SEmilio G. Cota g_free(desc->iotlb); 285816d9be5SEmilio G. Cota } 286816d9be5SEmilio G. Cota } 287816d9be5SEmilio G. Cota 288d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 289d9bb58e5SYang Zhong * 290d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 291d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 292d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 293d9bb58e5SYang Zhong * again. 294d9bb58e5SYang Zhong */ 295d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 296d9bb58e5SYang Zhong run_on_cpu_data d) 297d9bb58e5SYang Zhong { 298d9bb58e5SYang Zhong CPUState *cpu; 299d9bb58e5SYang Zhong 300d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 301d9bb58e5SYang Zhong if (cpu != src) { 302d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 303d9bb58e5SYang Zhong } 304d9bb58e5SYang Zhong } 305d9bb58e5SYang Zhong } 306d9bb58e5SYang Zhong 307e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 30883974cf4SEmilio G. Cota { 30983974cf4SEmilio G. Cota CPUState *cpu; 310e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 31183974cf4SEmilio G. Cota 31283974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 31383974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 31483974cf4SEmilio G. Cota 315a40ec84eSRichard Henderson full += atomic_read(&env_tlb(env)->c.full_flush_count); 316a40ec84eSRichard Henderson part += atomic_read(&env_tlb(env)->c.part_flush_count); 317a40ec84eSRichard Henderson elide += atomic_read(&env_tlb(env)->c.elide_flush_count); 31883974cf4SEmilio G. Cota } 319e09de0a2SRichard Henderson *pfull = full; 320e09de0a2SRichard Henderson *ppart = part; 321e09de0a2SRichard Henderson *pelide = elide; 32283974cf4SEmilio G. Cota } 323d9bb58e5SYang Zhong 324d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 325d9bb58e5SYang Zhong { 326d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3273d1523ceSRichard Henderson uint16_t asked = data.host_int; 3283d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3293c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 330d9bb58e5SYang Zhong 331d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 332d9bb58e5SYang Zhong 3333d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 334d9bb58e5SYang Zhong 335a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 33660a2ad7dSRichard Henderson 337a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3383d1523ceSRichard Henderson to_clean = asked & all_dirty; 3393d1523ceSRichard Henderson all_dirty &= ~to_clean; 340a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3413d1523ceSRichard Henderson 3423d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3433d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3443c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx, now); 345d9bb58e5SYang Zhong } 3463d1523ceSRichard Henderson 347a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 348d9bb58e5SYang Zhong 349f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 35064f2674bSRichard Henderson 3513d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 352a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.full_flush_count, 353a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 354e09de0a2SRichard Henderson } else { 355a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.part_flush_count, 356a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3573d1523ceSRichard Henderson if (to_clean != asked) { 358a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.elide_flush_count, 359a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3603d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3613d1523ceSRichard Henderson } 36264f2674bSRichard Henderson } 363d9bb58e5SYang Zhong } 364d9bb58e5SYang Zhong 365d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 366d9bb58e5SYang Zhong { 367d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 368d9bb58e5SYang Zhong 36964f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 370d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 371ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 372d9bb58e5SYang Zhong } else { 37360a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 374d9bb58e5SYang Zhong } 375d9bb58e5SYang Zhong } 376d9bb58e5SYang Zhong 37764f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 37864f2674bSRichard Henderson { 37964f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 38064f2674bSRichard Henderson } 38164f2674bSRichard Henderson 382d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 383d9bb58e5SYang Zhong { 384d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 385d9bb58e5SYang Zhong 386d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 387d9bb58e5SYang Zhong 388d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 389d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 390d9bb58e5SYang Zhong } 391d9bb58e5SYang Zhong 39264f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 39364f2674bSRichard Henderson { 39464f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 39564f2674bSRichard Henderson } 39664f2674bSRichard Henderson 39764f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 398d9bb58e5SYang Zhong { 399d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 400d9bb58e5SYang Zhong 401d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 402d9bb58e5SYang Zhong 403d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 404d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 405d9bb58e5SYang Zhong } 406d9bb58e5SYang Zhong 40764f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 40864f2674bSRichard Henderson { 40964f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 41064f2674bSRichard Henderson } 41164f2674bSRichard Henderson 41268fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 41368fea038SRichard Henderson target_ulong page) 414d9bb58e5SYang Zhong { 41568fea038SRichard Henderson return tlb_hit_page(tlb_entry->addr_read, page) || 416403f290cSEmilio G. Cota tlb_hit_page(tlb_addr_write(tlb_entry), page) || 41768fea038SRichard Henderson tlb_hit_page(tlb_entry->addr_code, page); 41868fea038SRichard Henderson } 41968fea038SRichard Henderson 4203cea94bbSEmilio G. Cota /** 4213cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4223cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4233cea94bbSEmilio G. Cota */ 4243cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4253cea94bbSEmilio G. Cota { 4263cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4273cea94bbSEmilio G. Cota } 4283cea94bbSEmilio G. Cota 42953d28455SRichard Henderson /* Called with tlb_c.lock held */ 43086e1eff8SEmilio G. Cota static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 43171aec354SEmilio G. Cota target_ulong page) 43268fea038SRichard Henderson { 43368fea038SRichard Henderson if (tlb_hit_page_anyprot(tlb_entry, page)) { 434d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 43586e1eff8SEmilio G. Cota return true; 436d9bb58e5SYang Zhong } 43786e1eff8SEmilio G. Cota return false; 438d9bb58e5SYang Zhong } 439d9bb58e5SYang Zhong 44053d28455SRichard Henderson /* Called with tlb_c.lock held */ 44171aec354SEmilio G. Cota static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 44268fea038SRichard Henderson target_ulong page) 44368fea038SRichard Henderson { 444a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 44568fea038SRichard Henderson int k; 44671aec354SEmilio G. Cota 44729a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 44868fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 449a40ec84eSRichard Henderson if (tlb_flush_entry_locked(&d->vtable[k], page)) { 45086e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 45186e1eff8SEmilio G. Cota } 45268fea038SRichard Henderson } 45368fea038SRichard Henderson } 45468fea038SRichard Henderson 4551308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4561308e026SRichard Henderson target_ulong page) 4571308e026SRichard Henderson { 458a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 459a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 4601308e026SRichard Henderson 4611308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 4621308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 4631308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 4641308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 4651308e026SRichard Henderson midx, lp_addr, lp_mask); 4663c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 4671308e026SRichard Henderson } else { 46886e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 46986e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 47086e1eff8SEmilio G. Cota } 4711308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 4721308e026SRichard Henderson } 4731308e026SRichard Henderson } 4741308e026SRichard Henderson 4757b7d00e0SRichard Henderson /** 4767b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 4777b7d00e0SRichard Henderson * @cpu: cpu on which to flush 4787b7d00e0SRichard Henderson * @addr: page of virtual address to flush 4797b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 4807b7d00e0SRichard Henderson * 4817b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 4827b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 483d9bb58e5SYang Zhong */ 4847b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 4857b7d00e0SRichard Henderson target_ulong addr, 4867b7d00e0SRichard Henderson uint16_t idxmap) 487d9bb58e5SYang Zhong { 488d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 489d9bb58e5SYang Zhong int mmu_idx; 490d9bb58e5SYang Zhong 491d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 492d9bb58e5SYang Zhong 4937b7d00e0SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); 494d9bb58e5SYang Zhong 495a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 496d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 4977b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 4981308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 499d9bb58e5SYang Zhong } 500d9bb58e5SYang Zhong } 501a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 502d9bb58e5SYang Zhong 503d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 504d9bb58e5SYang Zhong } 505d9bb58e5SYang Zhong 5067b7d00e0SRichard Henderson /** 5077b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 5087b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5097b7d00e0SRichard Henderson * @data: encoded addr + idxmap 5107b7d00e0SRichard Henderson * 5117b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5127b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 5137b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 5147b7d00e0SRichard Henderson * that can be passed via this method. 5157b7d00e0SRichard Henderson */ 5167b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5177b7d00e0SRichard Henderson run_on_cpu_data data) 5187b7d00e0SRichard Henderson { 5197b7d00e0SRichard Henderson target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; 5207b7d00e0SRichard Henderson target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; 5217b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5227b7d00e0SRichard Henderson 5237b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5247b7d00e0SRichard Henderson } 5257b7d00e0SRichard Henderson 5267b7d00e0SRichard Henderson typedef struct { 5277b7d00e0SRichard Henderson target_ulong addr; 5287b7d00e0SRichard Henderson uint16_t idxmap; 5297b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5307b7d00e0SRichard Henderson 5317b7d00e0SRichard Henderson /** 5327b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5337b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5347b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5357b7d00e0SRichard Henderson * 5367b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5377b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5387b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5397b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5407b7d00e0SRichard Henderson */ 5417b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5427b7d00e0SRichard Henderson run_on_cpu_data data) 5437b7d00e0SRichard Henderson { 5447b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5457b7d00e0SRichard Henderson 5467b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5477b7d00e0SRichard Henderson g_free(d); 5487b7d00e0SRichard Henderson } 5497b7d00e0SRichard Henderson 550d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 551d9bb58e5SYang Zhong { 552d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 553d9bb58e5SYang Zhong 554d9bb58e5SYang Zhong /* This should already be page aligned */ 5557b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 556d9bb58e5SYang Zhong 5577b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 5587b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5597b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 5607b7d00e0SRichard Henderson /* 5617b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 5627b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 5637b7d00e0SRichard Henderson * allocating memory for this operation. 5647b7d00e0SRichard Henderson */ 5657b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 5667b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 567d9bb58e5SYang Zhong } else { 5687b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 5697b7d00e0SRichard Henderson 5707b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 5717b7d00e0SRichard Henderson d->addr = addr; 5727b7d00e0SRichard Henderson d->idxmap = idxmap; 5737b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 5747b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 575d9bb58e5SYang Zhong } 576d9bb58e5SYang Zhong } 577d9bb58e5SYang Zhong 578f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 579f8144c6cSRichard Henderson { 580f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 581f8144c6cSRichard Henderson } 582f8144c6cSRichard Henderson 583d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 584d9bb58e5SYang Zhong uint16_t idxmap) 585d9bb58e5SYang Zhong { 586d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 587d9bb58e5SYang Zhong 588d9bb58e5SYang Zhong /* This should already be page aligned */ 5897b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 590d9bb58e5SYang Zhong 5917b7d00e0SRichard Henderson /* 5927b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 5937b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 5947b7d00e0SRichard Henderson */ 5957b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 5967b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 5977b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 5987b7d00e0SRichard Henderson } else { 5997b7d00e0SRichard Henderson CPUState *dst_cpu; 6007b7d00e0SRichard Henderson 6017b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6027b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6037b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6047b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 6057b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 6067b7d00e0SRichard Henderson 6077b7d00e0SRichard Henderson d->addr = addr; 6087b7d00e0SRichard Henderson d->idxmap = idxmap; 6097b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6107b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6117b7d00e0SRichard Henderson } 6127b7d00e0SRichard Henderson } 6137b7d00e0SRichard Henderson } 6147b7d00e0SRichard Henderson 6157b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 616d9bb58e5SYang Zhong } 617d9bb58e5SYang Zhong 618f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 619f8144c6cSRichard Henderson { 620f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 621f8144c6cSRichard Henderson } 622f8144c6cSRichard Henderson 623d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 624d9bb58e5SYang Zhong target_ulong addr, 625d9bb58e5SYang Zhong uint16_t idxmap) 626d9bb58e5SYang Zhong { 627d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 628d9bb58e5SYang Zhong 629d9bb58e5SYang Zhong /* This should already be page aligned */ 6307b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 631d9bb58e5SYang Zhong 6327b7d00e0SRichard Henderson /* 6337b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6347b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6357b7d00e0SRichard Henderson */ 6367b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6377b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6387b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6397b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6407b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6417b7d00e0SRichard Henderson } else { 6427b7d00e0SRichard Henderson CPUState *dst_cpu; 6437b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6447b7d00e0SRichard Henderson 6457b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6467b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6477b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6487b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6497b7d00e0SRichard Henderson d->addr = addr; 6507b7d00e0SRichard Henderson d->idxmap = idxmap; 6517b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6527b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6537b7d00e0SRichard Henderson } 6547b7d00e0SRichard Henderson } 6557b7d00e0SRichard Henderson 6567b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6577b7d00e0SRichard Henderson d->addr = addr; 6587b7d00e0SRichard Henderson d->idxmap = idxmap; 6597b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 6607b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6617b7d00e0SRichard Henderson } 662d9bb58e5SYang Zhong } 663d9bb58e5SYang Zhong 664f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 665d9bb58e5SYang Zhong { 666f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 667d9bb58e5SYang Zhong } 668d9bb58e5SYang Zhong 669d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 670d9bb58e5SYang Zhong can be detected */ 671d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 672d9bb58e5SYang Zhong { 673d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 674d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 675d9bb58e5SYang Zhong } 676d9bb58e5SYang Zhong 677d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 678d9bb58e5SYang Zhong tested for self modifying code */ 679d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 680d9bb58e5SYang Zhong { 681d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 682d9bb58e5SYang Zhong } 683d9bb58e5SYang Zhong 684d9bb58e5SYang Zhong 685d9bb58e5SYang Zhong /* 686d9bb58e5SYang Zhong * Dirty write flag handling 687d9bb58e5SYang Zhong * 688d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 689d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 690d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 691d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 692d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 693d9bb58e5SYang Zhong * generated code. 694d9bb58e5SYang Zhong * 69571aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 69671aec354SEmilio G. Cota * te->addr_write with atomic_set. We don't need to worry about this for 69771aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 698d9bb58e5SYang Zhong * 69953d28455SRichard Henderson * Called with tlb_c.lock held. 700d9bb58e5SYang Zhong */ 70171aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 70271aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 703d9bb58e5SYang Zhong { 704d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 705d9bb58e5SYang Zhong 7067b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 7077b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 708d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 709d9bb58e5SYang Zhong addr += tlb_entry->addend; 710d9bb58e5SYang Zhong if ((addr - start) < length) { 711d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 71271aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 713d9bb58e5SYang Zhong #else 71471aec354SEmilio G. Cota atomic_set(&tlb_entry->addr_write, 71571aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 716d9bb58e5SYang Zhong #endif 717d9bb58e5SYang Zhong } 71871aec354SEmilio G. Cota } 71971aec354SEmilio G. Cota } 72071aec354SEmilio G. Cota 72171aec354SEmilio G. Cota /* 72253d28455SRichard Henderson * Called with tlb_c.lock held. 72371aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 72471aec354SEmilio G. Cota */ 72571aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 72671aec354SEmilio G. Cota { 72771aec354SEmilio G. Cota *d = *s; 72871aec354SEmilio G. Cota } 729d9bb58e5SYang Zhong 730d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 73171aec354SEmilio G. Cota * the target vCPU). 73253d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 73371aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 734d9bb58e5SYang Zhong */ 735d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 736d9bb58e5SYang Zhong { 737d9bb58e5SYang Zhong CPUArchState *env; 738d9bb58e5SYang Zhong 739d9bb58e5SYang Zhong int mmu_idx; 740d9bb58e5SYang Zhong 741d9bb58e5SYang Zhong env = cpu->env_ptr; 742a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 743d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 744d9bb58e5SYang Zhong unsigned int i; 745722a1c1eSRichard Henderson unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 746d9bb58e5SYang Zhong 74786e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 748a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 749a40ec84eSRichard Henderson start1, length); 750d9bb58e5SYang Zhong } 751d9bb58e5SYang Zhong 752d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 753a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 754a40ec84eSRichard Henderson start1, length); 755d9bb58e5SYang Zhong } 756d9bb58e5SYang Zhong } 757a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 758d9bb58e5SYang Zhong } 759d9bb58e5SYang Zhong 76053d28455SRichard Henderson /* Called with tlb_c.lock held */ 76171aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 76271aec354SEmilio G. Cota target_ulong vaddr) 763d9bb58e5SYang Zhong { 764d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 765d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 766d9bb58e5SYang Zhong } 767d9bb58e5SYang Zhong } 768d9bb58e5SYang Zhong 769d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 770d9bb58e5SYang Zhong so that it is no longer dirty */ 771d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 772d9bb58e5SYang Zhong { 773d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 774d9bb58e5SYang Zhong int mmu_idx; 775d9bb58e5SYang Zhong 776d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 777d9bb58e5SYang Zhong 778d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 779a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 780d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 781383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 782d9bb58e5SYang Zhong } 783d9bb58e5SYang Zhong 784d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 785d9bb58e5SYang Zhong int k; 786d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 787a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 788d9bb58e5SYang Zhong } 789d9bb58e5SYang Zhong } 790a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 791d9bb58e5SYang Zhong } 792d9bb58e5SYang Zhong 793d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 794d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 7951308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 7961308e026SRichard Henderson target_ulong vaddr, target_ulong size) 797d9bb58e5SYang Zhong { 798a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 7991308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 800d9bb58e5SYang Zhong 8011308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 8021308e026SRichard Henderson /* No previous large page. */ 8031308e026SRichard Henderson lp_addr = vaddr; 8041308e026SRichard Henderson } else { 805d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 8061308e026SRichard Henderson This is a compromise between unnecessary flushes and 8071308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 808a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 8091308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 8101308e026SRichard Henderson lp_mask <<= 1; 811d9bb58e5SYang Zhong } 8121308e026SRichard Henderson } 813a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 814a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 815d9bb58e5SYang Zhong } 816d9bb58e5SYang Zhong 817d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 818d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 819d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 820d9bb58e5SYang Zhong * 821d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 822d9bb58e5SYang Zhong * critical section. 823d9bb58e5SYang Zhong */ 824d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 825d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 826d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 827d9bb58e5SYang Zhong { 828d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 829a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 830a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 831d9bb58e5SYang Zhong MemoryRegionSection *section; 832d9bb58e5SYang Zhong unsigned int index; 833d9bb58e5SYang Zhong target_ulong address; 8348f5db641SRichard Henderson target_ulong write_address; 835d9bb58e5SYang Zhong uintptr_t addend; 83668fea038SRichard Henderson CPUTLBEntry *te, tn; 83755df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 83855df6fcfSPeter Maydell target_ulong vaddr_page; 839d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 84050b107c5SRichard Henderson int wp_flags; 8418f5db641SRichard Henderson bool is_ram, is_romd; 842d9bb58e5SYang Zhong 843d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 84455df6fcfSPeter Maydell 8451308e026SRichard Henderson if (size <= TARGET_PAGE_SIZE) { 84655df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 84755df6fcfSPeter Maydell } else { 8481308e026SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, size); 849d9bb58e5SYang Zhong sz = size; 85055df6fcfSPeter Maydell } 85155df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 85255df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 85355df6fcfSPeter Maydell 85455df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 85555df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 856d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 857d9bb58e5SYang Zhong 858d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 859d9bb58e5SYang Zhong " prot=%x idx=%d\n", 860d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 861d9bb58e5SYang Zhong 86255df6fcfSPeter Maydell address = vaddr_page; 86355df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 86430d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 86530d7e098SRichard Henderson address |= TLB_INVALID_MASK; 86655df6fcfSPeter Maydell } 867a26fc6f5STony Nguyen if (attrs.byte_swap) { 8685b87b3e6SRichard Henderson address |= TLB_BSWAP; 869a26fc6f5STony Nguyen } 8708f5db641SRichard Henderson 8718f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 8728f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 8738f5db641SRichard Henderson 8748f5db641SRichard Henderson if (is_ram || is_romd) { 8758f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 876d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 8778f5db641SRichard Henderson } else { 8788f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 8798f5db641SRichard Henderson addend = 0; 880d9bb58e5SYang Zhong } 881d9bb58e5SYang Zhong 8828f5db641SRichard Henderson write_address = address; 8838f5db641SRichard Henderson if (is_ram) { 8848f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 8858f5db641SRichard Henderson /* 8868f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 8878f5db641SRichard Henderson * the page is actually writable. 8888f5db641SRichard Henderson */ 8898f5db641SRichard Henderson if (prot & PAGE_WRITE) { 8908f5db641SRichard Henderson if (section->readonly) { 8918f5db641SRichard Henderson write_address |= TLB_DISCARD_WRITE; 8928f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 8938f5db641SRichard Henderson write_address |= TLB_NOTDIRTY; 8948f5db641SRichard Henderson } 8958f5db641SRichard Henderson } 8968f5db641SRichard Henderson } else { 8978f5db641SRichard Henderson /* I/O or ROMD */ 8988f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 8998f5db641SRichard Henderson /* 9008f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 9018f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 9028f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 9038f5db641SRichard Henderson */ 9048f5db641SRichard Henderson write_address |= TLB_MMIO; 9058f5db641SRichard Henderson if (!is_romd) { 9068f5db641SRichard Henderson address = write_address; 9078f5db641SRichard Henderson } 9088f5db641SRichard Henderson } 9098f5db641SRichard Henderson 91050b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 91150b107c5SRichard Henderson TARGET_PAGE_SIZE); 912d9bb58e5SYang Zhong 913383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 914383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 915d9bb58e5SYang Zhong 91668fea038SRichard Henderson /* 91771aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 91871aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 91971aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 92071aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 92171aec354SEmilio G. Cota * is unlikely to be contended. 92271aec354SEmilio G. Cota */ 923a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 92471aec354SEmilio G. Cota 9253d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 926a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 9273d1523ceSRichard Henderson 92871aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 92971aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 93071aec354SEmilio G. Cota 93171aec354SEmilio G. Cota /* 93268fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 93368fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 93468fea038SRichard Henderson */ 9353cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 936a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 937a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 93868fea038SRichard Henderson 93968fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 94071aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 941a40ec84eSRichard Henderson desc->viotlb[vidx] = desc->iotlb[index]; 94286e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 94368fea038SRichard Henderson } 944d9bb58e5SYang Zhong 945d9bb58e5SYang Zhong /* refill the tlb */ 946ace41090SPeter Maydell /* 947ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 948ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 9498f5db641SRichard Henderson * + the ram_addr_t of the page base of the target RAM (RAM) 9508f5db641SRichard Henderson * + the offset within section->mr of the page base (I/O, ROMD) 95155df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 952ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 953ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 954ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 955ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 956ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 957ace41090SPeter Maydell */ 958a40ec84eSRichard Henderson desc->iotlb[index].addr = iotlb - vaddr_page; 959a40ec84eSRichard Henderson desc->iotlb[index].attrs = attrs; 960d9bb58e5SYang Zhong 961d9bb58e5SYang Zhong /* Now calculate the new entry */ 96255df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 963d9bb58e5SYang Zhong if (prot & PAGE_READ) { 964d9bb58e5SYang Zhong tn.addr_read = address; 96550b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 96650b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 96750b107c5SRichard Henderson } 968d9bb58e5SYang Zhong } else { 969d9bb58e5SYang Zhong tn.addr_read = -1; 970d9bb58e5SYang Zhong } 971d9bb58e5SYang Zhong 972d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 9738f5db641SRichard Henderson tn.addr_code = address; 974d9bb58e5SYang Zhong } else { 975d9bb58e5SYang Zhong tn.addr_code = -1; 976d9bb58e5SYang Zhong } 977d9bb58e5SYang Zhong 978d9bb58e5SYang Zhong tn.addr_write = -1; 979d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 9808f5db641SRichard Henderson tn.addr_write = write_address; 981f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 982f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 983f52bfb12SDavid Hildenbrand } 98450b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 98550b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 98650b107c5SRichard Henderson } 987d9bb58e5SYang Zhong } 988d9bb58e5SYang Zhong 98971aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 99086e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 991a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 992d9bb58e5SYang Zhong } 993d9bb58e5SYang Zhong 994d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 995d9bb58e5SYang Zhong * transaction attributes to be used. 996d9bb58e5SYang Zhong */ 997d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 998d9bb58e5SYang Zhong hwaddr paddr, int prot, 999d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1000d9bb58e5SYang Zhong { 1001d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 1002d9bb58e5SYang Zhong prot, mmu_idx, size); 1003d9bb58e5SYang Zhong } 1004d9bb58e5SYang Zhong 1005d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 1006d9bb58e5SYang Zhong { 1007d9bb58e5SYang Zhong ram_addr_t ram_addr; 1008d9bb58e5SYang Zhong 1009d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 1010d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 1011d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 1012d9bb58e5SYang Zhong abort(); 1013d9bb58e5SYang Zhong } 1014d9bb58e5SYang Zhong return ram_addr; 1015d9bb58e5SYang Zhong } 1016d9bb58e5SYang Zhong 1017c319dc13SRichard Henderson /* 1018c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1019c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1020c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1021c319dc13SRichard Henderson */ 1022c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 1023c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1024c319dc13SRichard Henderson { 1025c319dc13SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cpu); 1026c319dc13SRichard Henderson bool ok; 1027c319dc13SRichard Henderson 1028c319dc13SRichard Henderson /* 1029c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1030c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1031c319dc13SRichard Henderson */ 1032c319dc13SRichard Henderson ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr); 1033c319dc13SRichard Henderson assert(ok); 1034c319dc13SRichard Henderson } 1035c319dc13SRichard Henderson 1036d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1037f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 1038be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 1039d9bb58e5SYang Zhong { 104029a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 10412d54f194SPeter Maydell hwaddr mr_offset; 10422d54f194SPeter Maydell MemoryRegionSection *section; 10432d54f194SPeter Maydell MemoryRegion *mr; 1044d9bb58e5SYang Zhong uint64_t val; 1045d9bb58e5SYang Zhong bool locked = false; 104604e3aabdSPeter Maydell MemTxResult r; 1047d9bb58e5SYang Zhong 10482d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 10492d54f194SPeter Maydell mr = section->mr; 10502d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1051d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 105208565552SRichard Henderson if (!cpu->can_do_io) { 1053d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1054d9bb58e5SYang Zhong } 1055d9bb58e5SYang Zhong 10568b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 1057d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1058d9bb58e5SYang Zhong locked = true; 1059d9bb58e5SYang Zhong } 1060be5c4787STony Nguyen r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); 106104e3aabdSPeter Maydell if (r != MEMTX_OK) { 10622d54f194SPeter Maydell hwaddr physaddr = mr_offset + 10632d54f194SPeter Maydell section->offset_within_address_space - 10642d54f194SPeter Maydell section->offset_within_region; 10652d54f194SPeter Maydell 1066be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 106704e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 106804e3aabdSPeter Maydell } 1069d9bb58e5SYang Zhong if (locked) { 1070d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1071d9bb58e5SYang Zhong } 1072d9bb58e5SYang Zhong 1073d9bb58e5SYang Zhong return val; 1074d9bb58e5SYang Zhong } 1075d9bb58e5SYang Zhong 10762f3a57eeSAlex Bennée /* 10772f3a57eeSAlex Bennée * Save a potentially trashed IOTLB entry for later lookup by plugin. 1078*570ef309SAlex Bennée * This is read by tlb_plugin_lookup if the iotlb entry doesn't match 1079*570ef309SAlex Bennée * because of the side effect of io_writex changing memory layout. 10802f3a57eeSAlex Bennée */ 10812f3a57eeSAlex Bennée static void save_iotlb_data(CPUState *cs, hwaddr addr, 10822f3a57eeSAlex Bennée MemoryRegionSection *section, hwaddr mr_offset) 10832f3a57eeSAlex Bennée { 10842f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN 10852f3a57eeSAlex Bennée SavedIOTLB *saved = &cs->saved_iotlb; 10862f3a57eeSAlex Bennée saved->addr = addr; 10872f3a57eeSAlex Bennée saved->section = section; 10882f3a57eeSAlex Bennée saved->mr_offset = mr_offset; 10892f3a57eeSAlex Bennée #endif 10902f3a57eeSAlex Bennée } 10912f3a57eeSAlex Bennée 1092d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1093f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 1094be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 1095d9bb58e5SYang Zhong { 109629a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 10972d54f194SPeter Maydell hwaddr mr_offset; 10982d54f194SPeter Maydell MemoryRegionSection *section; 10992d54f194SPeter Maydell MemoryRegion *mr; 1100d9bb58e5SYang Zhong bool locked = false; 110104e3aabdSPeter Maydell MemTxResult r; 1102d9bb58e5SYang Zhong 11032d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 11042d54f194SPeter Maydell mr = section->mr; 11052d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 110608565552SRichard Henderson if (!cpu->can_do_io) { 1107d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1108d9bb58e5SYang Zhong } 1109d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1110d9bb58e5SYang Zhong 11112f3a57eeSAlex Bennée /* 11122f3a57eeSAlex Bennée * The memory_region_dispatch may trigger a flush/resize 11132f3a57eeSAlex Bennée * so for plugins we save the iotlb_data just in case. 11142f3a57eeSAlex Bennée */ 11152f3a57eeSAlex Bennée save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); 11162f3a57eeSAlex Bennée 11178b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 1118d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1119d9bb58e5SYang Zhong locked = true; 1120d9bb58e5SYang Zhong } 1121be5c4787STony Nguyen r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); 112204e3aabdSPeter Maydell if (r != MEMTX_OK) { 11232d54f194SPeter Maydell hwaddr physaddr = mr_offset + 11242d54f194SPeter Maydell section->offset_within_address_space - 11252d54f194SPeter Maydell section->offset_within_region; 11262d54f194SPeter Maydell 1127be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 1128be5c4787STony Nguyen MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, 1129be5c4787STony Nguyen retaddr); 113004e3aabdSPeter Maydell } 1131d9bb58e5SYang Zhong if (locked) { 1132d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1133d9bb58e5SYang Zhong } 1134d9bb58e5SYang Zhong } 1135d9bb58e5SYang Zhong 11364811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) 11374811e909SRichard Henderson { 11384811e909SRichard Henderson #if TCG_OVERSIZED_GUEST 11394811e909SRichard Henderson return *(target_ulong *)((uintptr_t)entry + ofs); 11404811e909SRichard Henderson #else 11414811e909SRichard Henderson /* ofs might correspond to .addr_write, so use atomic_read */ 11424811e909SRichard Henderson return atomic_read((target_ulong *)((uintptr_t)entry + ofs)); 11434811e909SRichard Henderson #endif 11444811e909SRichard Henderson } 11454811e909SRichard Henderson 1146d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1147d9bb58e5SYang Zhong back to the main tlb. */ 1148d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 1149d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 1150d9bb58e5SYang Zhong { 1151d9bb58e5SYang Zhong size_t vidx; 115271aec354SEmilio G. Cota 115329a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1154d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1155a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 1156a40ec84eSRichard Henderson target_ulong cmp; 1157a40ec84eSRichard Henderson 1158a40ec84eSRichard Henderson /* elt_ofs might correspond to .addr_write, so use atomic_read */ 1159a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST 1160a40ec84eSRichard Henderson cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 1161a40ec84eSRichard Henderson #else 1162a40ec84eSRichard Henderson cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 1163a40ec84eSRichard Henderson #endif 1164d9bb58e5SYang Zhong 1165d9bb58e5SYang Zhong if (cmp == page) { 1166d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1167a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1168d9bb58e5SYang Zhong 1169a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 117071aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 117171aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 117271aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1173a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1174d9bb58e5SYang Zhong 1175a40ec84eSRichard Henderson CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1176a40ec84eSRichard Henderson CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; 1177d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 1178d9bb58e5SYang Zhong return true; 1179d9bb58e5SYang Zhong } 1180d9bb58e5SYang Zhong } 1181d9bb58e5SYang Zhong return false; 1182d9bb58e5SYang Zhong } 1183d9bb58e5SYang Zhong 1184d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 1185d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 1186d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 1187d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 1188d9bb58e5SYang Zhong 118930d7e098SRichard Henderson /* 119030d7e098SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 119130d7e098SRichard Henderson * 119230d7e098SRichard Henderson * Return -1 if we can't translate and execute from an entire page 119330d7e098SRichard Henderson * of RAM. This will force us to execute by loading and translating 119430d7e098SRichard Henderson * one insn at a time, without caching. 119530d7e098SRichard Henderson * 119630d7e098SRichard Henderson * NOTE: This function will trigger an exception if the page is 119730d7e098SRichard Henderson * not executable. 1198f2553f04SKONRAD Frederic */ 11994b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 12004b2190daSEmilio G. Cota void **hostp) 1201f2553f04SKONRAD Frederic { 1202383beda9SRichard Henderson uintptr_t mmu_idx = cpu_mmu_index(env, true); 1203383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1204383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1205f2553f04SKONRAD Frederic void *p; 1206f2553f04SKONRAD Frederic 1207383beda9SRichard Henderson if (unlikely(!tlb_hit(entry->addr_code, addr))) { 1208b493ccf1SPeter Maydell if (!VICTIM_TLB_HIT(addr_code, addr)) { 120929a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 12106d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 12116d967cb8SEmilio G. Cota entry = tlb_entry(env, mmu_idx, addr); 121230d7e098SRichard Henderson 121330d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { 121430d7e098SRichard Henderson /* 121530d7e098SRichard Henderson * The MMU protection covers a smaller range than a target 121630d7e098SRichard Henderson * page, so we must redo the MMU check for every insn. 121730d7e098SRichard Henderson */ 121830d7e098SRichard Henderson return -1; 121930d7e098SRichard Henderson } 122071b9a453SKONRAD Frederic } 1221383beda9SRichard Henderson assert(tlb_hit(entry->addr_code, addr)); 1222f2553f04SKONRAD Frederic } 122355df6fcfSPeter Maydell 122430d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_MMIO)) { 122530d7e098SRichard Henderson /* The region is not backed by RAM. */ 12264b2190daSEmilio G. Cota if (hostp) { 12274b2190daSEmilio G. Cota *hostp = NULL; 12284b2190daSEmilio G. Cota } 122920cb6ae4SPeter Maydell return -1; 123055df6fcfSPeter Maydell } 123155df6fcfSPeter Maydell 1232383beda9SRichard Henderson p = (void *)((uintptr_t)addr + entry->addend); 12334b2190daSEmilio G. Cota if (hostp) { 12344b2190daSEmilio G. Cota *hostp = p; 12354b2190daSEmilio G. Cota } 1236f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 1237f2553f04SKONRAD Frederic } 1238f2553f04SKONRAD Frederic 12394b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 12404b2190daSEmilio G. Cota { 12414b2190daSEmilio G. Cota return get_page_addr_code_hostp(env, addr, NULL); 12424b2190daSEmilio G. Cota } 12434b2190daSEmilio G. Cota 1244707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 1245707526adSRichard Henderson CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) 1246707526adSRichard Henderson { 1247707526adSRichard Henderson ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; 1248707526adSRichard Henderson 1249707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1250707526adSRichard Henderson 1251707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1252707526adSRichard Henderson struct page_collection *pages 1253707526adSRichard Henderson = page_collection_lock(ram_addr, ram_addr + size); 12545a7c27bbSRichard Henderson tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); 1255707526adSRichard Henderson page_collection_unlock(pages); 1256707526adSRichard Henderson } 1257707526adSRichard Henderson 1258707526adSRichard Henderson /* 1259707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1260707526adSRichard Henderson * the notdirty callback faster. 1261707526adSRichard Henderson */ 1262707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1263707526adSRichard Henderson 1264707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1265707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1266707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1267707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1268707526adSRichard Henderson } 1269707526adSRichard Henderson } 1270707526adSRichard Henderson 1271069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 1272069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1273069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1274069cfe77SRichard Henderson void **phost, uintptr_t retaddr) 1275d9bb58e5SYang Zhong { 1276383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1277383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1278069cfe77SRichard Henderson target_ulong tlb_addr, page_addr; 1279c25c283dSDavid Hildenbrand size_t elt_ofs; 1280069cfe77SRichard Henderson int flags; 1281ca86cf32SDavid Hildenbrand 1282c25c283dSDavid Hildenbrand switch (access_type) { 1283c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 1284c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_read); 1285c25c283dSDavid Hildenbrand break; 1286c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 1287c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_write); 1288c25c283dSDavid Hildenbrand break; 1289c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 1290c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_code); 1291c25c283dSDavid Hildenbrand break; 1292c25c283dSDavid Hildenbrand default: 1293c25c283dSDavid Hildenbrand g_assert_not_reached(); 1294c25c283dSDavid Hildenbrand } 1295c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 1296c25c283dSDavid Hildenbrand 1297069cfe77SRichard Henderson page_addr = addr & TARGET_PAGE_MASK; 1298069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 1299069cfe77SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { 1300069cfe77SRichard Henderson CPUState *cs = env_cpu(env); 1301069cfe77SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cs); 1302069cfe77SRichard Henderson 1303069cfe77SRichard Henderson if (!cc->tlb_fill(cs, addr, fault_size, access_type, 1304069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1305069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1306069cfe77SRichard Henderson *phost = NULL; 1307069cfe77SRichard Henderson return TLB_INVALID_MASK; 1308069cfe77SRichard Henderson } 1309069cfe77SRichard Henderson 131003a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 131103a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1312d9bb58e5SYang Zhong } 1313c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 131403a98189SDavid Hildenbrand } 1315069cfe77SRichard Henderson flags = tlb_addr & TLB_FLAGS_MASK; 131603a98189SDavid Hildenbrand 1317069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 1318069cfe77SRichard Henderson if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1319069cfe77SRichard Henderson *phost = NULL; 1320069cfe77SRichard Henderson return TLB_MMIO; 1321fef39ccdSDavid Hildenbrand } 1322fef39ccdSDavid Hildenbrand 1323069cfe77SRichard Henderson /* Everything else is RAM. */ 1324069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1325069cfe77SRichard Henderson return flags; 1326069cfe77SRichard Henderson } 1327069cfe77SRichard Henderson 1328069cfe77SRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr, 1329069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1330069cfe77SRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1331069cfe77SRichard Henderson { 1332069cfe77SRichard Henderson int flags; 1333069cfe77SRichard Henderson 1334069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, 1335069cfe77SRichard Henderson nonfault, phost, retaddr); 1336069cfe77SRichard Henderson 1337069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1338069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1339069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 134073bc0bd4SRichard Henderson CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 134173bc0bd4SRichard Henderson 1342069cfe77SRichard Henderson notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); 1343069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1344069cfe77SRichard Henderson } 1345069cfe77SRichard Henderson 1346069cfe77SRichard Henderson return flags; 1347069cfe77SRichard Henderson } 1348069cfe77SRichard Henderson 1349069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 1350069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1351069cfe77SRichard Henderson { 1352069cfe77SRichard Henderson void *host; 1353069cfe77SRichard Henderson int flags; 1354069cfe77SRichard Henderson 1355069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1356069cfe77SRichard Henderson 1357069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1358069cfe77SRichard Henderson false, &host, retaddr); 1359069cfe77SRichard Henderson 1360069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1361069cfe77SRichard Henderson if (size == 0) { 136273bc0bd4SRichard Henderson return NULL; 136373bc0bd4SRichard Henderson } 136473bc0bd4SRichard Henderson 1365069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 1366069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1367069cfe77SRichard Henderson CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1368069cfe77SRichard Henderson 136903a98189SDavid Hildenbrand /* Handle watchpoints. */ 1370069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1371069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1372069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 137303a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 137473bc0bd4SRichard Henderson iotlbentry->attrs, wp_access, retaddr); 1375d9bb58e5SYang Zhong } 1376fef39ccdSDavid Hildenbrand 137773bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1378069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 1379069cfe77SRichard Henderson notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); 138073bc0bd4SRichard Henderson } 1381fef39ccdSDavid Hildenbrand } 1382fef39ccdSDavid Hildenbrand 1383069cfe77SRichard Henderson return host; 1384d9bb58e5SYang Zhong } 1385d9bb58e5SYang Zhong 13864811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 13874811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 13884811e909SRichard Henderson { 1389069cfe77SRichard Henderson void *host; 1390069cfe77SRichard Henderson int flags; 13914811e909SRichard Henderson 1392069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, 1393069cfe77SRichard Henderson mmu_idx, true, &host, 0); 1394069cfe77SRichard Henderson 1395069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1396069cfe77SRichard Henderson return flags ? NULL : host; 13974811e909SRichard Henderson } 13984811e909SRichard Henderson 1399235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1400235537faSAlex Bennée /* 1401235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1402235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1403235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1404235537faSAlex Bennée * checking the victim table. This is purely informational. 1405235537faSAlex Bennée * 14062f3a57eeSAlex Bennée * This almost never fails as the memory access being instrumented 14072f3a57eeSAlex Bennée * should have just filled the TLB. The one corner case is io_writex 14082f3a57eeSAlex Bennée * which can cause TLB flushes and potential resizing of the TLBs 1409*570ef309SAlex Bennée * losing the information we need. In those cases we need to recover 1410*570ef309SAlex Bennée * data from a copy of the iotlbentry. As long as this always occurs 1411*570ef309SAlex Bennée * from the same thread (which a mem callback will be) this is safe. 1412235537faSAlex Bennée */ 1413235537faSAlex Bennée 1414235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, 1415235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1416235537faSAlex Bennée { 1417235537faSAlex Bennée CPUArchState *env = cpu->env_ptr; 1418235537faSAlex Bennée CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1419235537faSAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1420235537faSAlex Bennée target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1421235537faSAlex Bennée 1422235537faSAlex Bennée if (likely(tlb_hit(tlb_addr, addr))) { 1423235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1424235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 1425235537faSAlex Bennée CPUIOTLBEntry *iotlbentry; 1426235537faSAlex Bennée iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1427235537faSAlex Bennée data->is_io = true; 1428235537faSAlex Bennée data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 1429235537faSAlex Bennée data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1430235537faSAlex Bennée } else { 1431235537faSAlex Bennée data->is_io = false; 1432235537faSAlex Bennée data->v.ram.hostaddr = addr + tlbe->addend; 1433235537faSAlex Bennée } 1434235537faSAlex Bennée return true; 14352f3a57eeSAlex Bennée } else { 14362f3a57eeSAlex Bennée SavedIOTLB *saved = &cpu->saved_iotlb; 14372f3a57eeSAlex Bennée data->is_io = true; 14382f3a57eeSAlex Bennée data->v.io.section = saved->section; 14392f3a57eeSAlex Bennée data->v.io.offset = saved->mr_offset; 14402f3a57eeSAlex Bennée return true; 1441235537faSAlex Bennée } 1442235537faSAlex Bennée } 1443235537faSAlex Bennée 1444235537faSAlex Bennée #endif 1445235537faSAlex Bennée 1446d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 1447d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 1448d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 1449707526adSRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1450d9bb58e5SYang Zhong { 1451d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 1452383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1453383beda9SRichard Henderson CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1454403f290cSEmilio G. Cota target_ulong tlb_addr = tlb_addr_write(tlbe); 145514776ab5STony Nguyen MemOp mop = get_memop(oi); 1456d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 1457d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 145834d49937SPeter Maydell void *hostaddr; 1459d9bb58e5SYang Zhong 1460d9bb58e5SYang Zhong /* Adjust the given return address. */ 1461d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1462d9bb58e5SYang Zhong 1463d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1464d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1465d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 146629a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1467d9bb58e5SYang Zhong mmu_idx, retaddr); 1468d9bb58e5SYang Zhong } 1469d9bb58e5SYang Zhong 1470d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 1471d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 1472d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1473d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1474d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1475d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1476d9bb58e5SYang Zhong goto stop_the_world; 1477d9bb58e5SYang Zhong } 1478d9bb58e5SYang Zhong 1479d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 1480334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1481d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 148229a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, 148398670d47SLaurent Vivier mmu_idx, retaddr); 14846d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 14856d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1486d9bb58e5SYang Zhong } 1487403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1488d9bb58e5SYang Zhong } 1489d9bb58e5SYang Zhong 149055df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 149130d7e098SRichard Henderson if (unlikely(tlb_addr & TLB_MMIO)) { 1492d9bb58e5SYang Zhong /* There's really nothing that can be done to 1493d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1494d9bb58e5SYang Zhong goto stop_the_world; 1495d9bb58e5SYang Zhong } 1496d9bb58e5SYang Zhong 1497d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 149834d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 149929a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, 150098670d47SLaurent Vivier mmu_idx, retaddr); 1501d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 1502d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 1503d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 1504d9bb58e5SYang Zhong goto stop_the_world; 1505d9bb58e5SYang Zhong } 1506d9bb58e5SYang Zhong 150734d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 150834d49937SPeter Maydell 150934d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1510707526adSRichard Henderson notdirty_write(env_cpu(env), addr, 1 << s_bits, 1511707526adSRichard Henderson &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); 151234d49937SPeter Maydell } 151334d49937SPeter Maydell 151434d49937SPeter Maydell return hostaddr; 1515d9bb58e5SYang Zhong 1516d9bb58e5SYang Zhong stop_the_world: 151729a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1518d9bb58e5SYang Zhong } 1519d9bb58e5SYang Zhong 1520eed56642SAlex Bennée /* 1521eed56642SAlex Bennée * Load Helpers 1522eed56642SAlex Bennée * 1523eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1524eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1525eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1526eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1527eed56642SAlex Bennée */ 1528d9bb58e5SYang Zhong 15292dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 15302dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr); 15312dd92606SRichard Henderson 1532c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 153380d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op) 153480d9d1c6SRichard Henderson { 153580d9d1c6SRichard Henderson switch (op) { 153680d9d1c6SRichard Henderson case MO_UB: 153780d9d1c6SRichard Henderson return ldub_p(haddr); 153880d9d1c6SRichard Henderson case MO_BEUW: 153980d9d1c6SRichard Henderson return lduw_be_p(haddr); 154080d9d1c6SRichard Henderson case MO_LEUW: 154180d9d1c6SRichard Henderson return lduw_le_p(haddr); 154280d9d1c6SRichard Henderson case MO_BEUL: 154380d9d1c6SRichard Henderson return (uint32_t)ldl_be_p(haddr); 154480d9d1c6SRichard Henderson case MO_LEUL: 154580d9d1c6SRichard Henderson return (uint32_t)ldl_le_p(haddr); 154680d9d1c6SRichard Henderson case MO_BEQ: 154780d9d1c6SRichard Henderson return ldq_be_p(haddr); 154880d9d1c6SRichard Henderson case MO_LEQ: 154980d9d1c6SRichard Henderson return ldq_le_p(haddr); 155080d9d1c6SRichard Henderson default: 155180d9d1c6SRichard Henderson qemu_build_not_reached(); 155280d9d1c6SRichard Henderson } 155380d9d1c6SRichard Henderson } 155480d9d1c6SRichard Henderson 155580d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 15562dd92606SRichard Henderson load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, 1557be5c4787STony Nguyen uintptr_t retaddr, MemOp op, bool code_read, 15582dd92606SRichard Henderson FullLoadHelper *full_load) 1559eed56642SAlex Bennée { 1560eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1561eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1562eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1563eed56642SAlex Bennée target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1564eed56642SAlex Bennée const size_t tlb_off = code_read ? 1565eed56642SAlex Bennée offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1566f1be3696SRichard Henderson const MMUAccessType access_type = 1567f1be3696SRichard Henderson code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1568eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1569eed56642SAlex Bennée void *haddr; 1570eed56642SAlex Bennée uint64_t res; 1571be5c4787STony Nguyen size_t size = memop_size(op); 1572d9bb58e5SYang Zhong 1573eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1574eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 157529a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, access_type, 1576eed56642SAlex Bennée mmu_idx, retaddr); 1577eed56642SAlex Bennée } 1578eed56642SAlex Bennée 1579eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1580eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1581eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1582eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 158329a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, 1584f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1585eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1586eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1587eed56642SAlex Bennée } 1588eed56642SAlex Bennée tlb_addr = code_read ? entry->addr_code : entry->addr_read; 158930d7e098SRichard Henderson tlb_addr &= ~TLB_INVALID_MASK; 1590eed56642SAlex Bennée } 1591eed56642SAlex Bennée 159250b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1593eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 159450b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 15955b87b3e6SRichard Henderson bool need_swap; 159650b107c5SRichard Henderson 159750b107c5SRichard Henderson /* For anything that is unaligned, recurse through full_load. */ 1598eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1599eed56642SAlex Bennée goto do_unaligned_access; 1600eed56642SAlex Bennée } 160150b107c5SRichard Henderson 160250b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 160350b107c5SRichard Henderson 160450b107c5SRichard Henderson /* Handle watchpoints. */ 160550b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 160650b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 160750b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 160850b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_READ, retaddr); 16095b87b3e6SRichard Henderson } 161050b107c5SRichard Henderson 16115b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 161250b107c5SRichard Henderson 161350b107c5SRichard Henderson /* Handle I/O access. */ 16145b87b3e6SRichard Henderson if (likely(tlb_addr & TLB_MMIO)) { 16155b87b3e6SRichard Henderson return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, 16165b87b3e6SRichard Henderson access_type, op ^ (need_swap * MO_BSWAP)); 16175b87b3e6SRichard Henderson } 16185b87b3e6SRichard Henderson 16195b87b3e6SRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 16205b87b3e6SRichard Henderson 16215b87b3e6SRichard Henderson /* 16225b87b3e6SRichard Henderson * Keep these two load_memop separate to ensure that the compiler 16235b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 16245b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 16255b87b3e6SRichard Henderson */ 16265b87b3e6SRichard Henderson if (unlikely(need_swap)) { 16275b87b3e6SRichard Henderson return load_memop(haddr, op ^ MO_BSWAP); 16285b87b3e6SRichard Henderson } 16295b87b3e6SRichard Henderson return load_memop(haddr, op); 1630eed56642SAlex Bennée } 1631eed56642SAlex Bennée 1632eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1633eed56642SAlex Bennée if (size > 1 1634eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1635eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1636eed56642SAlex Bennée target_ulong addr1, addr2; 16378c79b288SAlex Bennée uint64_t r1, r2; 1638eed56642SAlex Bennée unsigned shift; 1639eed56642SAlex Bennée do_unaligned_access: 1640ab7a2009SAlex Bennée addr1 = addr & ~((target_ulong)size - 1); 1641eed56642SAlex Bennée addr2 = addr1 + size; 16422dd92606SRichard Henderson r1 = full_load(env, addr1, oi, retaddr); 16432dd92606SRichard Henderson r2 = full_load(env, addr2, oi, retaddr); 1644eed56642SAlex Bennée shift = (addr & (size - 1)) * 8; 1645eed56642SAlex Bennée 1646be5c4787STony Nguyen if (memop_big_endian(op)) { 1647eed56642SAlex Bennée /* Big-endian combine. */ 1648eed56642SAlex Bennée res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1649eed56642SAlex Bennée } else { 1650eed56642SAlex Bennée /* Little-endian combine. */ 1651eed56642SAlex Bennée res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 1652eed56642SAlex Bennée } 1653eed56642SAlex Bennée return res & MAKE_64BIT_MASK(0, size * 8); 1654eed56642SAlex Bennée } 1655eed56642SAlex Bennée 1656eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 165780d9d1c6SRichard Henderson return load_memop(haddr, op); 1658eed56642SAlex Bennée } 1659eed56642SAlex Bennée 1660eed56642SAlex Bennée /* 1661eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1662eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1663eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1664eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1665eed56642SAlex Bennée * data, and for that we always have uint64_t. 1666eed56642SAlex Bennée * 1667eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1668eed56642SAlex Bennée */ 1669eed56642SAlex Bennée 16702dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 16712dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16722dd92606SRichard Henderson { 1673be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); 16742dd92606SRichard Henderson } 16752dd92606SRichard Henderson 1676fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 1677fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1678eed56642SAlex Bennée { 16792dd92606SRichard Henderson return full_ldub_mmu(env, addr, oi, retaddr); 16802dd92606SRichard Henderson } 16812dd92606SRichard Henderson 16822dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 16832dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16842dd92606SRichard Henderson { 1685be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, false, 16862dd92606SRichard Henderson full_le_lduw_mmu); 1687eed56642SAlex Bennée } 1688eed56642SAlex Bennée 1689fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 1690fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1691eed56642SAlex Bennée { 16922dd92606SRichard Henderson return full_le_lduw_mmu(env, addr, oi, retaddr); 16932dd92606SRichard Henderson } 16942dd92606SRichard Henderson 16952dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 16962dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16972dd92606SRichard Henderson { 1698be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, false, 16992dd92606SRichard Henderson full_be_lduw_mmu); 1700eed56642SAlex Bennée } 1701eed56642SAlex Bennée 1702fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 1703fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1704eed56642SAlex Bennée { 17052dd92606SRichard Henderson return full_be_lduw_mmu(env, addr, oi, retaddr); 17062dd92606SRichard Henderson } 17072dd92606SRichard Henderson 17082dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 17092dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 17102dd92606SRichard Henderson { 1711be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, false, 17122dd92606SRichard Henderson full_le_ldul_mmu); 1713eed56642SAlex Bennée } 1714eed56642SAlex Bennée 1715fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 1716fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1717eed56642SAlex Bennée { 17182dd92606SRichard Henderson return full_le_ldul_mmu(env, addr, oi, retaddr); 17192dd92606SRichard Henderson } 17202dd92606SRichard Henderson 17212dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 17222dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 17232dd92606SRichard Henderson { 1724be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, false, 17252dd92606SRichard Henderson full_be_ldul_mmu); 1726eed56642SAlex Bennée } 1727eed56642SAlex Bennée 1728fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 1729fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1730eed56642SAlex Bennée { 17312dd92606SRichard Henderson return full_be_ldul_mmu(env, addr, oi, retaddr); 1732eed56642SAlex Bennée } 1733eed56642SAlex Bennée 1734fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 1735fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1736eed56642SAlex Bennée { 1737be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEQ, false, 17382dd92606SRichard Henderson helper_le_ldq_mmu); 1739eed56642SAlex Bennée } 1740eed56642SAlex Bennée 1741fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 1742fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1743eed56642SAlex Bennée { 1744be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEQ, false, 17452dd92606SRichard Henderson helper_be_ldq_mmu); 1746eed56642SAlex Bennée } 1747eed56642SAlex Bennée 1748eed56642SAlex Bennée /* 1749eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 1750eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 1751eed56642SAlex Bennée */ 1752eed56642SAlex Bennée 1753eed56642SAlex Bennée 1754eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 1755eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1756eed56642SAlex Bennée { 1757eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 1758eed56642SAlex Bennée } 1759eed56642SAlex Bennée 1760eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 1761eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1762eed56642SAlex Bennée { 1763eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 1764eed56642SAlex Bennée } 1765eed56642SAlex Bennée 1766eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 1767eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1768eed56642SAlex Bennée { 1769eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 1770eed56642SAlex Bennée } 1771eed56642SAlex Bennée 1772eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 1773eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1774eed56642SAlex Bennée { 1775eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 1776eed56642SAlex Bennée } 1777eed56642SAlex Bennée 1778eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 1779eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1780eed56642SAlex Bennée { 1781eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 1782eed56642SAlex Bennée } 1783eed56642SAlex Bennée 1784eed56642SAlex Bennée /* 1785d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 1786d03f1408SRichard Henderson */ 1787d03f1408SRichard Henderson 1788d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, 1789d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, 1790d03f1408SRichard Henderson MemOp op, FullLoadHelper *full_load) 1791d03f1408SRichard Henderson { 1792d03f1408SRichard Henderson uint16_t meminfo; 1793d03f1408SRichard Henderson TCGMemOpIdx oi; 1794d03f1408SRichard Henderson uint64_t ret; 1795d03f1408SRichard Henderson 1796d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, false); 1797d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 1798d03f1408SRichard Henderson 1799d03f1408SRichard Henderson op &= ~MO_SIGN; 1800d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 1801d03f1408SRichard Henderson ret = full_load(env, addr, oi, retaddr); 1802d03f1408SRichard Henderson 1803d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 1804d03f1408SRichard Henderson 1805d03f1408SRichard Henderson return ret; 1806d03f1408SRichard Henderson } 1807d03f1408SRichard Henderson 1808d03f1408SRichard Henderson uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1809d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1810d03f1408SRichard Henderson { 1811d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); 1812d03f1408SRichard Henderson } 1813d03f1408SRichard Henderson 1814d03f1408SRichard Henderson int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1815d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1816d03f1408SRichard Henderson { 1817d03f1408SRichard Henderson return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB, 1818d03f1408SRichard Henderson full_ldub_mmu); 1819d03f1408SRichard Henderson } 1820d03f1408SRichard Henderson 1821b9e60257SRichard Henderson uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1822d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1823d03f1408SRichard Henderson { 1824b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu); 1825d03f1408SRichard Henderson } 1826d03f1408SRichard Henderson 1827b9e60257SRichard Henderson int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1828d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1829d03f1408SRichard Henderson { 1830b9e60257SRichard Henderson return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW, 1831b9e60257SRichard Henderson full_be_lduw_mmu); 1832d03f1408SRichard Henderson } 1833d03f1408SRichard Henderson 1834b9e60257SRichard Henderson uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1835d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1836d03f1408SRichard Henderson { 1837b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu); 1838d03f1408SRichard Henderson } 1839d03f1408SRichard Henderson 1840b9e60257SRichard Henderson uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1841d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1842d03f1408SRichard Henderson { 1843b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu); 1844b9e60257SRichard Henderson } 1845b9e60257SRichard Henderson 1846b9e60257SRichard Henderson uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1847b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 1848b9e60257SRichard Henderson { 1849b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu); 1850b9e60257SRichard Henderson } 1851b9e60257SRichard Henderson 1852b9e60257SRichard Henderson int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1853b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 1854b9e60257SRichard Henderson { 1855b9e60257SRichard Henderson return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW, 1856b9e60257SRichard Henderson full_le_lduw_mmu); 1857b9e60257SRichard Henderson } 1858b9e60257SRichard Henderson 1859b9e60257SRichard Henderson uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1860b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 1861b9e60257SRichard Henderson { 1862b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu); 1863b9e60257SRichard Henderson } 1864b9e60257SRichard Henderson 1865b9e60257SRichard Henderson uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1866b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 1867b9e60257SRichard Henderson { 1868b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu); 1869d03f1408SRichard Henderson } 1870d03f1408SRichard Henderson 1871cfe04a4bSRichard Henderson uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr, 1872cfe04a4bSRichard Henderson uintptr_t retaddr) 1873cfe04a4bSRichard Henderson { 1874cfe04a4bSRichard Henderson return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1875cfe04a4bSRichard Henderson } 1876cfe04a4bSRichard Henderson 1877cfe04a4bSRichard Henderson int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 1878cfe04a4bSRichard Henderson { 1879cfe04a4bSRichard Henderson return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1880cfe04a4bSRichard Henderson } 1881cfe04a4bSRichard Henderson 1882b9e60257SRichard Henderson uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr, 1883cfe04a4bSRichard Henderson uintptr_t retaddr) 1884cfe04a4bSRichard Henderson { 1885b9e60257SRichard Henderson return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1886cfe04a4bSRichard Henderson } 1887cfe04a4bSRichard Henderson 1888b9e60257SRichard Henderson int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 1889cfe04a4bSRichard Henderson { 1890b9e60257SRichard Henderson return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1891cfe04a4bSRichard Henderson } 1892cfe04a4bSRichard Henderson 1893b9e60257SRichard Henderson uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr, 1894b9e60257SRichard Henderson uintptr_t retaddr) 1895cfe04a4bSRichard Henderson { 1896b9e60257SRichard Henderson return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1897cfe04a4bSRichard Henderson } 1898cfe04a4bSRichard Henderson 1899b9e60257SRichard Henderson uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr, 1900b9e60257SRichard Henderson uintptr_t retaddr) 1901cfe04a4bSRichard Henderson { 1902b9e60257SRichard Henderson return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1903b9e60257SRichard Henderson } 1904b9e60257SRichard Henderson 1905b9e60257SRichard Henderson uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr, 1906b9e60257SRichard Henderson uintptr_t retaddr) 1907b9e60257SRichard Henderson { 1908b9e60257SRichard Henderson return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1909b9e60257SRichard Henderson } 1910b9e60257SRichard Henderson 1911b9e60257SRichard Henderson int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 1912b9e60257SRichard Henderson { 1913b9e60257SRichard Henderson return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1914b9e60257SRichard Henderson } 1915b9e60257SRichard Henderson 1916b9e60257SRichard Henderson uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr, 1917b9e60257SRichard Henderson uintptr_t retaddr) 1918b9e60257SRichard Henderson { 1919b9e60257SRichard Henderson return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1920b9e60257SRichard Henderson } 1921b9e60257SRichard Henderson 1922b9e60257SRichard Henderson uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr, 1923b9e60257SRichard Henderson uintptr_t retaddr) 1924b9e60257SRichard Henderson { 1925b9e60257SRichard Henderson return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1926cfe04a4bSRichard Henderson } 1927cfe04a4bSRichard Henderson 1928cfe04a4bSRichard Henderson uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr) 1929cfe04a4bSRichard Henderson { 1930cfe04a4bSRichard Henderson return cpu_ldub_data_ra(env, ptr, 0); 1931cfe04a4bSRichard Henderson } 1932cfe04a4bSRichard Henderson 1933cfe04a4bSRichard Henderson int cpu_ldsb_data(CPUArchState *env, target_ulong ptr) 1934cfe04a4bSRichard Henderson { 1935cfe04a4bSRichard Henderson return cpu_ldsb_data_ra(env, ptr, 0); 1936cfe04a4bSRichard Henderson } 1937cfe04a4bSRichard Henderson 1938b9e60257SRichard Henderson uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr) 1939cfe04a4bSRichard Henderson { 1940b9e60257SRichard Henderson return cpu_lduw_be_data_ra(env, ptr, 0); 1941cfe04a4bSRichard Henderson } 1942cfe04a4bSRichard Henderson 1943b9e60257SRichard Henderson int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr) 1944cfe04a4bSRichard Henderson { 1945b9e60257SRichard Henderson return cpu_ldsw_be_data_ra(env, ptr, 0); 1946cfe04a4bSRichard Henderson } 1947cfe04a4bSRichard Henderson 1948b9e60257SRichard Henderson uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr) 1949cfe04a4bSRichard Henderson { 1950b9e60257SRichard Henderson return cpu_ldl_be_data_ra(env, ptr, 0); 1951cfe04a4bSRichard Henderson } 1952cfe04a4bSRichard Henderson 1953b9e60257SRichard Henderson uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr) 1954cfe04a4bSRichard Henderson { 1955b9e60257SRichard Henderson return cpu_ldq_be_data_ra(env, ptr, 0); 1956b9e60257SRichard Henderson } 1957b9e60257SRichard Henderson 1958b9e60257SRichard Henderson uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr) 1959b9e60257SRichard Henderson { 1960b9e60257SRichard Henderson return cpu_lduw_le_data_ra(env, ptr, 0); 1961b9e60257SRichard Henderson } 1962b9e60257SRichard Henderson 1963b9e60257SRichard Henderson int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr) 1964b9e60257SRichard Henderson { 1965b9e60257SRichard Henderson return cpu_ldsw_le_data_ra(env, ptr, 0); 1966b9e60257SRichard Henderson } 1967b9e60257SRichard Henderson 1968b9e60257SRichard Henderson uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr) 1969b9e60257SRichard Henderson { 1970b9e60257SRichard Henderson return cpu_ldl_le_data_ra(env, ptr, 0); 1971b9e60257SRichard Henderson } 1972b9e60257SRichard Henderson 1973b9e60257SRichard Henderson uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr) 1974b9e60257SRichard Henderson { 1975b9e60257SRichard Henderson return cpu_ldq_le_data_ra(env, ptr, 0); 1976cfe04a4bSRichard Henderson } 1977cfe04a4bSRichard Henderson 1978d03f1408SRichard Henderson /* 1979eed56642SAlex Bennée * Store Helpers 1980eed56642SAlex Bennée */ 1981eed56642SAlex Bennée 1982c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE 198380d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op) 198480d9d1c6SRichard Henderson { 198580d9d1c6SRichard Henderson switch (op) { 198680d9d1c6SRichard Henderson case MO_UB: 198780d9d1c6SRichard Henderson stb_p(haddr, val); 198880d9d1c6SRichard Henderson break; 198980d9d1c6SRichard Henderson case MO_BEUW: 199080d9d1c6SRichard Henderson stw_be_p(haddr, val); 199180d9d1c6SRichard Henderson break; 199280d9d1c6SRichard Henderson case MO_LEUW: 199380d9d1c6SRichard Henderson stw_le_p(haddr, val); 199480d9d1c6SRichard Henderson break; 199580d9d1c6SRichard Henderson case MO_BEUL: 199680d9d1c6SRichard Henderson stl_be_p(haddr, val); 199780d9d1c6SRichard Henderson break; 199880d9d1c6SRichard Henderson case MO_LEUL: 199980d9d1c6SRichard Henderson stl_le_p(haddr, val); 200080d9d1c6SRichard Henderson break; 200180d9d1c6SRichard Henderson case MO_BEQ: 200280d9d1c6SRichard Henderson stq_be_p(haddr, val); 200380d9d1c6SRichard Henderson break; 200480d9d1c6SRichard Henderson case MO_LEQ: 200580d9d1c6SRichard Henderson stq_le_p(haddr, val); 200680d9d1c6SRichard Henderson break; 200780d9d1c6SRichard Henderson default: 200880d9d1c6SRichard Henderson qemu_build_not_reached(); 200980d9d1c6SRichard Henderson } 201080d9d1c6SRichard Henderson } 201180d9d1c6SRichard Henderson 201280d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE 20134601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 2014be5c4787STony Nguyen TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) 2015eed56642SAlex Bennée { 2016eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 2017eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 2018eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 2019eed56642SAlex Bennée target_ulong tlb_addr = tlb_addr_write(entry); 2020eed56642SAlex Bennée const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 2021eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 2022eed56642SAlex Bennée void *haddr; 2023be5c4787STony Nguyen size_t size = memop_size(op); 2024eed56642SAlex Bennée 2025eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 2026eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 202729a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2028eed56642SAlex Bennée mmu_idx, retaddr); 2029eed56642SAlex Bennée } 2030eed56642SAlex Bennée 2031eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 2032eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 2033eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 2034eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 203529a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 2036eed56642SAlex Bennée mmu_idx, retaddr); 2037eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 2038eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 2039eed56642SAlex Bennée } 2040eed56642SAlex Bennée tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 2041eed56642SAlex Bennée } 2042eed56642SAlex Bennée 204350b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 2044eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 204550b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 20465b87b3e6SRichard Henderson bool need_swap; 204750b107c5SRichard Henderson 204850b107c5SRichard Henderson /* For anything that is unaligned, recurse through byte stores. */ 2049eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 2050eed56642SAlex Bennée goto do_unaligned_access; 2051eed56642SAlex Bennée } 205250b107c5SRichard Henderson 205350b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 205450b107c5SRichard Henderson 205550b107c5SRichard Henderson /* Handle watchpoints. */ 205650b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 205750b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 205850b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 205950b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_WRITE, retaddr); 20605b87b3e6SRichard Henderson } 206150b107c5SRichard Henderson 20625b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 206350b107c5SRichard Henderson 206450b107c5SRichard Henderson /* Handle I/O access. */ 206508565552SRichard Henderson if (tlb_addr & TLB_MMIO) { 20665b87b3e6SRichard Henderson io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, 20675b87b3e6SRichard Henderson op ^ (need_swap * MO_BSWAP)); 20685b87b3e6SRichard Henderson return; 20695b87b3e6SRichard Henderson } 20705b87b3e6SRichard Henderson 20717b0d792cSRichard Henderson /* Ignore writes to ROM. */ 20727b0d792cSRichard Henderson if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { 20737b0d792cSRichard Henderson return; 20747b0d792cSRichard Henderson } 20757b0d792cSRichard Henderson 207608565552SRichard Henderson /* Handle clean RAM pages. */ 207708565552SRichard Henderson if (tlb_addr & TLB_NOTDIRTY) { 2078707526adSRichard Henderson notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); 207908565552SRichard Henderson } 208008565552SRichard Henderson 2081707526adSRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 208208565552SRichard Henderson 20835b87b3e6SRichard Henderson /* 20845b87b3e6SRichard Henderson * Keep these two store_memop separate to ensure that the compiler 20855b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 20865b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 20875b87b3e6SRichard Henderson */ 20885b87b3e6SRichard Henderson if (unlikely(need_swap)) { 20895b87b3e6SRichard Henderson store_memop(haddr, val, op ^ MO_BSWAP); 20905b87b3e6SRichard Henderson } else { 20915b87b3e6SRichard Henderson store_memop(haddr, val, op); 20925b87b3e6SRichard Henderson } 2093eed56642SAlex Bennée return; 2094eed56642SAlex Bennée } 2095eed56642SAlex Bennée 2096eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 2097eed56642SAlex Bennée if (size > 1 2098eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 2099eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 2100eed56642SAlex Bennée int i; 2101eed56642SAlex Bennée uintptr_t index2; 2102eed56642SAlex Bennée CPUTLBEntry *entry2; 2103eed56642SAlex Bennée target_ulong page2, tlb_addr2; 21048f7cd2adSRichard Henderson size_t size2; 21058f7cd2adSRichard Henderson 2106eed56642SAlex Bennée do_unaligned_access: 2107eed56642SAlex Bennée /* 2108eed56642SAlex Bennée * Ensure the second page is in the TLB. Note that the first page 2109eed56642SAlex Bennée * is already guaranteed to be filled, and that the second page 2110eed56642SAlex Bennée * cannot evict the first. 2111eed56642SAlex Bennée */ 2112eed56642SAlex Bennée page2 = (addr + size) & TARGET_PAGE_MASK; 21138f7cd2adSRichard Henderson size2 = (addr + size) & ~TARGET_PAGE_MASK; 2114eed56642SAlex Bennée index2 = tlb_index(env, mmu_idx, page2); 2115eed56642SAlex Bennée entry2 = tlb_entry(env, mmu_idx, page2); 2116eed56642SAlex Bennée tlb_addr2 = tlb_addr_write(entry2); 211750b107c5SRichard Henderson if (!tlb_hit_page(tlb_addr2, page2)) { 211850b107c5SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { 21198f7cd2adSRichard Henderson tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, 2120eed56642SAlex Bennée mmu_idx, retaddr); 212150b107c5SRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 212250b107c5SRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 212350b107c5SRichard Henderson } 212450b107c5SRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 212550b107c5SRichard Henderson } 212650b107c5SRichard Henderson 212750b107c5SRichard Henderson /* 212850b107c5SRichard Henderson * Handle watchpoints. Since this may trap, all checks 212950b107c5SRichard Henderson * must happen before any store. 213050b107c5SRichard Henderson */ 213150b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 213250b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size - size2, 213350b107c5SRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index].attrs, 213450b107c5SRichard Henderson BP_MEM_WRITE, retaddr); 213550b107c5SRichard Henderson } 213650b107c5SRichard Henderson if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { 213750b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), page2, size2, 213850b107c5SRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, 213950b107c5SRichard Henderson BP_MEM_WRITE, retaddr); 2140eed56642SAlex Bennée } 2141eed56642SAlex Bennée 2142eed56642SAlex Bennée /* 2143eed56642SAlex Bennée * XXX: not efficient, but simple. 2144eed56642SAlex Bennée * This loop must go in the forward direction to avoid issues 2145eed56642SAlex Bennée * with self-modifying code in Windows 64-bit. 2146eed56642SAlex Bennée */ 2147eed56642SAlex Bennée for (i = 0; i < size; ++i) { 2148eed56642SAlex Bennée uint8_t val8; 2149be5c4787STony Nguyen if (memop_big_endian(op)) { 2150eed56642SAlex Bennée /* Big-endian extract. */ 2151eed56642SAlex Bennée val8 = val >> (((size - 1) * 8) - (i * 8)); 2152eed56642SAlex Bennée } else { 2153eed56642SAlex Bennée /* Little-endian extract. */ 2154eed56642SAlex Bennée val8 = val >> (i * 8); 2155eed56642SAlex Bennée } 21564601f8d1SRichard Henderson helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); 2157eed56642SAlex Bennée } 2158eed56642SAlex Bennée return; 2159eed56642SAlex Bennée } 2160eed56642SAlex Bennée 2161eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 216280d9d1c6SRichard Henderson store_memop(haddr, val, op); 2163eed56642SAlex Bennée } 2164eed56642SAlex Bennée 2165fc1bc777SRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2166eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2167eed56642SAlex Bennée { 2168be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_UB); 2169eed56642SAlex Bennée } 2170eed56642SAlex Bennée 2171fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2172eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2173eed56642SAlex Bennée { 2174be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUW); 2175eed56642SAlex Bennée } 2176eed56642SAlex Bennée 2177fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2178eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2179eed56642SAlex Bennée { 2180be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUW); 2181eed56642SAlex Bennée } 2182eed56642SAlex Bennée 2183fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2184eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2185eed56642SAlex Bennée { 2186be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUL); 2187eed56642SAlex Bennée } 2188eed56642SAlex Bennée 2189fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2190eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2191eed56642SAlex Bennée { 2192be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUL); 2193eed56642SAlex Bennée } 2194eed56642SAlex Bennée 2195fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2196eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2197eed56642SAlex Bennée { 2198be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEQ); 2199eed56642SAlex Bennée } 2200eed56642SAlex Bennée 2201fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2202eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2203eed56642SAlex Bennée { 2204be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEQ); 2205eed56642SAlex Bennée } 2206d9bb58e5SYang Zhong 2207d03f1408SRichard Henderson /* 2208d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 2209d03f1408SRichard Henderson */ 2210d03f1408SRichard Henderson 2211d03f1408SRichard Henderson static inline void QEMU_ALWAYS_INLINE 2212d03f1408SRichard Henderson cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 2213d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, MemOp op) 2214d03f1408SRichard Henderson { 2215d03f1408SRichard Henderson TCGMemOpIdx oi; 2216d03f1408SRichard Henderson uint16_t meminfo; 2217d03f1408SRichard Henderson 2218d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, true); 2219d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 2220d03f1408SRichard Henderson 2221d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 2222d03f1408SRichard Henderson store_helper(env, addr, val, oi, retaddr, op); 2223d03f1408SRichard Henderson 2224d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 2225d03f1408SRichard Henderson } 2226d03f1408SRichard Henderson 2227d03f1408SRichard Henderson void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2228d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2229d03f1408SRichard Henderson { 2230d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); 2231d03f1408SRichard Henderson } 2232d03f1408SRichard Henderson 2233b9e60257SRichard Henderson void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2234d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2235d03f1408SRichard Henderson { 2236b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW); 2237d03f1408SRichard Henderson } 2238d03f1408SRichard Henderson 2239b9e60257SRichard Henderson void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2240d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2241d03f1408SRichard Henderson { 2242b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL); 2243d03f1408SRichard Henderson } 2244d03f1408SRichard Henderson 2245b9e60257SRichard Henderson void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, 2246d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2247d03f1408SRichard Henderson { 2248b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ); 2249b9e60257SRichard Henderson } 2250b9e60257SRichard Henderson 2251b9e60257SRichard Henderson void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2252b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2253b9e60257SRichard Henderson { 2254b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW); 2255b9e60257SRichard Henderson } 2256b9e60257SRichard Henderson 2257b9e60257SRichard Henderson void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2258b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2259b9e60257SRichard Henderson { 2260b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL); 2261b9e60257SRichard Henderson } 2262b9e60257SRichard Henderson 2263b9e60257SRichard Henderson void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, 2264b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2265b9e60257SRichard Henderson { 2266b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ); 2267d03f1408SRichard Henderson } 2268d03f1408SRichard Henderson 2269cfe04a4bSRichard Henderson void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, 2270cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2271cfe04a4bSRichard Henderson { 2272cfe04a4bSRichard Henderson cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2273cfe04a4bSRichard Henderson } 2274cfe04a4bSRichard Henderson 2275b9e60257SRichard Henderson void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr, 2276cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2277cfe04a4bSRichard Henderson { 2278b9e60257SRichard Henderson cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2279cfe04a4bSRichard Henderson } 2280cfe04a4bSRichard Henderson 2281b9e60257SRichard Henderson void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr, 2282cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2283cfe04a4bSRichard Henderson { 2284b9e60257SRichard Henderson cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2285cfe04a4bSRichard Henderson } 2286cfe04a4bSRichard Henderson 2287b9e60257SRichard Henderson void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr, 2288cfe04a4bSRichard Henderson uint64_t val, uintptr_t retaddr) 2289cfe04a4bSRichard Henderson { 2290b9e60257SRichard Henderson cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2291b9e60257SRichard Henderson } 2292b9e60257SRichard Henderson 2293b9e60257SRichard Henderson void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr, 2294b9e60257SRichard Henderson uint32_t val, uintptr_t retaddr) 2295b9e60257SRichard Henderson { 2296b9e60257SRichard Henderson cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2297b9e60257SRichard Henderson } 2298b9e60257SRichard Henderson 2299b9e60257SRichard Henderson void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr, 2300b9e60257SRichard Henderson uint32_t val, uintptr_t retaddr) 2301b9e60257SRichard Henderson { 2302b9e60257SRichard Henderson cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2303b9e60257SRichard Henderson } 2304b9e60257SRichard Henderson 2305b9e60257SRichard Henderson void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr, 2306b9e60257SRichard Henderson uint64_t val, uintptr_t retaddr) 2307b9e60257SRichard Henderson { 2308b9e60257SRichard Henderson cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2309cfe04a4bSRichard Henderson } 2310cfe04a4bSRichard Henderson 2311cfe04a4bSRichard Henderson void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2312cfe04a4bSRichard Henderson { 2313cfe04a4bSRichard Henderson cpu_stb_data_ra(env, ptr, val, 0); 2314cfe04a4bSRichard Henderson } 2315cfe04a4bSRichard Henderson 2316b9e60257SRichard Henderson void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2317cfe04a4bSRichard Henderson { 2318b9e60257SRichard Henderson cpu_stw_be_data_ra(env, ptr, val, 0); 2319cfe04a4bSRichard Henderson } 2320cfe04a4bSRichard Henderson 2321b9e60257SRichard Henderson void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2322cfe04a4bSRichard Henderson { 2323b9e60257SRichard Henderson cpu_stl_be_data_ra(env, ptr, val, 0); 2324cfe04a4bSRichard Henderson } 2325cfe04a4bSRichard Henderson 2326b9e60257SRichard Henderson void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val) 2327cfe04a4bSRichard Henderson { 2328b9e60257SRichard Henderson cpu_stq_be_data_ra(env, ptr, val, 0); 2329b9e60257SRichard Henderson } 2330b9e60257SRichard Henderson 2331b9e60257SRichard Henderson void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2332b9e60257SRichard Henderson { 2333b9e60257SRichard Henderson cpu_stw_le_data_ra(env, ptr, val, 0); 2334b9e60257SRichard Henderson } 2335b9e60257SRichard Henderson 2336b9e60257SRichard Henderson void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2337b9e60257SRichard Henderson { 2338b9e60257SRichard Henderson cpu_stl_le_data_ra(env, ptr, val, 0); 2339b9e60257SRichard Henderson } 2340b9e60257SRichard Henderson 2341b9e60257SRichard Henderson void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) 2342b9e60257SRichard Henderson { 2343b9e60257SRichard Henderson cpu_stq_le_data_ra(env, ptr, val, 0); 2344cfe04a4bSRichard Henderson } 2345cfe04a4bSRichard Henderson 2346d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 2347d9bb58e5SYang Zhong them callable from other helpers. */ 2348d9bb58e5SYang Zhong 2349d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 2350d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 2351d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 2352707526adSRichard Henderson #define ATOMIC_MMU_DECLS 2353707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) 2354707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 2355504f73f7SAlex Bennée #define ATOMIC_MMU_IDX get_mmuidx(oi) 2356d9bb58e5SYang Zhong 2357cfec3885SEmilio G. Cota #include "atomic_common.inc.c" 2358d9bb58e5SYang Zhong 2359d9bb58e5SYang Zhong #define DATA_SIZE 1 2360d9bb58e5SYang Zhong #include "atomic_template.h" 2361d9bb58e5SYang Zhong 2362d9bb58e5SYang Zhong #define DATA_SIZE 2 2363d9bb58e5SYang Zhong #include "atomic_template.h" 2364d9bb58e5SYang Zhong 2365d9bb58e5SYang Zhong #define DATA_SIZE 4 2366d9bb58e5SYang Zhong #include "atomic_template.h" 2367d9bb58e5SYang Zhong 2368d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2369d9bb58e5SYang Zhong #define DATA_SIZE 8 2370d9bb58e5SYang Zhong #include "atomic_template.h" 2371d9bb58e5SYang Zhong #endif 2372d9bb58e5SYang Zhong 2373e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 2374d9bb58e5SYang Zhong #define DATA_SIZE 16 2375d9bb58e5SYang Zhong #include "atomic_template.h" 2376d9bb58e5SYang Zhong #endif 2377d9bb58e5SYang Zhong 2378d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 2379d9bb58e5SYang Zhong 2380d9bb58e5SYang Zhong #undef EXTRA_ARGS 2381d9bb58e5SYang Zhong #undef ATOMIC_NAME 2382d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 2383d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 2384d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 2385707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) 2386d9bb58e5SYang Zhong 2387d9bb58e5SYang Zhong #define DATA_SIZE 1 2388d9bb58e5SYang Zhong #include "atomic_template.h" 2389d9bb58e5SYang Zhong 2390d9bb58e5SYang Zhong #define DATA_SIZE 2 2391d9bb58e5SYang Zhong #include "atomic_template.h" 2392d9bb58e5SYang Zhong 2393d9bb58e5SYang Zhong #define DATA_SIZE 4 2394d9bb58e5SYang Zhong #include "atomic_template.h" 2395d9bb58e5SYang Zhong 2396d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2397d9bb58e5SYang Zhong #define DATA_SIZE 8 2398d9bb58e5SYang Zhong #include "atomic_template.h" 2399d9bb58e5SYang Zhong #endif 2400504f73f7SAlex Bennée #undef ATOMIC_MMU_IDX 2401d9bb58e5SYang Zhong 2402d9bb58e5SYang Zhong /* Code access functions. */ 2403d9bb58e5SYang Zhong 2404fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, 24052dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 24062dd92606SRichard Henderson { 2407fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); 24082dd92606SRichard Henderson } 24092dd92606SRichard Henderson 2410fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 2411eed56642SAlex Bennée { 2412fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 2413fc4120a3SRichard Henderson return full_ldub_code(env, addr, oi, 0); 24142dd92606SRichard Henderson } 24152dd92606SRichard Henderson 2416fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, 24174cef72d0SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 24184cef72d0SAlex Bennée { 2419fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); 24204cef72d0SAlex Bennée } 24214cef72d0SAlex Bennée 2422fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 24232dd92606SRichard Henderson { 2424fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 2425fc4120a3SRichard Henderson return full_lduw_code(env, addr, oi, 0); 2426eed56642SAlex Bennée } 2427d9bb58e5SYang Zhong 2428fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, 2429fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2430eed56642SAlex Bennée { 2431fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); 24322dd92606SRichard Henderson } 24332dd92606SRichard Henderson 2434fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 24354cef72d0SAlex Bennée { 2436fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 2437fc4120a3SRichard Henderson return full_ldl_code(env, addr, oi, 0); 24384cef72d0SAlex Bennée } 24394cef72d0SAlex Bennée 2440fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, 24412dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 24422dd92606SRichard Henderson { 2443fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); 2444eed56642SAlex Bennée } 2445d9bb58e5SYang Zhong 2446fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 2447eed56642SAlex Bennée { 2448fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); 2449fc4120a3SRichard Henderson return full_ldq_code(env, addr, oi, 0); 2450eed56642SAlex Bennée } 2451