1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 22d9bb58e5SYang Zhong #include "cpu.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/address-spaces.h" 26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 27d9bb58e5SYang Zhong #include "exec/cputlb.h" 28d9bb58e5SYang Zhong #include "exec/memory-internal.h" 29d9bb58e5SYang Zhong #include "exec/ram_addr.h" 30d9bb58e5SYang Zhong #include "tcg/tcg.h" 31d9bb58e5SYang Zhong #include "qemu/error-report.h" 32d9bb58e5SYang Zhong #include "exec/log.h" 33d9bb58e5SYang Zhong #include "exec/helper-proto.h" 34d9bb58e5SYang Zhong #include "qemu/atomic.h" 35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 36707526adSRichard Henderson #include "translate-all.h" 37d03f1408SRichard Henderson #include "trace-root.h" 38d03f1408SRichard Henderson #include "trace/mem.h" 39235537faSAlex Bennée #ifdef CONFIG_PLUGIN 40235537faSAlex Bennée #include "qemu/plugin-memory.h" 41235537faSAlex Bennée #endif 42d9bb58e5SYang Zhong 43d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 44d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 45d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 46d9bb58e5SYang Zhong 47d9bb58e5SYang Zhong #ifdef DEBUG_TLB 48d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 49d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 50d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 51d9bb58e5SYang Zhong # else 52d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 53d9bb58e5SYang Zhong # endif 54d9bb58e5SYang Zhong #else 55d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 56d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 57d9bb58e5SYang Zhong #endif 58d9bb58e5SYang Zhong 59d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 60d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 61d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 62d9bb58e5SYang Zhong ## __VA_ARGS__); \ 63d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 64d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 65d9bb58e5SYang Zhong } \ 66d9bb58e5SYang Zhong } while (0) 67d9bb58e5SYang Zhong 68ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 69d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 70ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 71d9bb58e5SYang Zhong } \ 72d9bb58e5SYang Zhong } while (0) 73d9bb58e5SYang Zhong 74d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 75d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 76d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 77d9bb58e5SYang Zhong 78d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 79d9bb58e5SYang Zhong */ 80d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 81d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 82d9bb58e5SYang Zhong 83*722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 847a1efe1bSRichard Henderson { 85*722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 867a1efe1bSRichard Henderson } 877a1efe1bSRichard Henderson 88*722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 8986e1eff8SEmilio G. Cota { 90*722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9186e1eff8SEmilio G. Cota } 9286e1eff8SEmilio G. Cota 9379e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9486e1eff8SEmilio G. Cota size_t max_entries) 9586e1eff8SEmilio G. Cota { 9679e42085SRichard Henderson desc->window_begin_ns = ns; 9779e42085SRichard Henderson desc->window_max_entries = max_entries; 9886e1eff8SEmilio G. Cota } 9986e1eff8SEmilio G. Cota 10086e1eff8SEmilio G. Cota static void tlb_dyn_init(CPUArchState *env) 10186e1eff8SEmilio G. Cota { 10286e1eff8SEmilio G. Cota int i; 10386e1eff8SEmilio G. Cota 10486e1eff8SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 105a40ec84eSRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[i]; 10686e1eff8SEmilio G. Cota size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 10786e1eff8SEmilio G. Cota 10879e42085SRichard Henderson tlb_window_reset(desc, get_clock_realtime(), 0); 10986e1eff8SEmilio G. Cota desc->n_used_entries = 0; 110a40ec84eSRichard Henderson env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 111a40ec84eSRichard Henderson env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries); 112a40ec84eSRichard Henderson env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries); 11386e1eff8SEmilio G. Cota } 11486e1eff8SEmilio G. Cota } 11586e1eff8SEmilio G. Cota 11686e1eff8SEmilio G. Cota /** 11786e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 11886e1eff8SEmilio G. Cota * @env: CPU that owns the TLB 11986e1eff8SEmilio G. Cota * @mmu_idx: MMU index of the TLB 12086e1eff8SEmilio G. Cota * 12186e1eff8SEmilio G. Cota * Called with tlb_lock_held. 12286e1eff8SEmilio G. Cota * 12386e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 12486e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 12586e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 12686e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 12786e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 12886e1eff8SEmilio G. Cota * the resize based on past observations. 12986e1eff8SEmilio G. Cota * 13086e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 13186e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 13286e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 13386e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 13486e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 13586e1eff8SEmilio G. Cota * performance. 13686e1eff8SEmilio G. Cota * 13786e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 13886e1eff8SEmilio G. Cota * 13986e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 14086e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 14186e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 14286e1eff8SEmilio G. Cota * probably be similar. 14386e1eff8SEmilio G. Cota * 14486e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 14586e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 14686e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 14786e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 14886e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 14986e1eff8SEmilio G. Cota * 15086e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 15186e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 15286e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 15386e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 15486e1eff8SEmilio G. Cota * conflict misses. 15586e1eff8SEmilio G. Cota */ 15686e1eff8SEmilio G. Cota static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx) 15786e1eff8SEmilio G. Cota { 158a40ec84eSRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 159*722a1c1eSRichard Henderson size_t old_size = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 16086e1eff8SEmilio G. Cota size_t rate; 16186e1eff8SEmilio G. Cota size_t new_size = old_size; 16286e1eff8SEmilio G. Cota int64_t now = get_clock_realtime(); 16386e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 16486e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 16579e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 16686e1eff8SEmilio G. Cota 16779e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 16879e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 16986e1eff8SEmilio G. Cota } 17079e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 17186e1eff8SEmilio G. Cota 17286e1eff8SEmilio G. Cota if (rate > 70) { 17386e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 17486e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 17579e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 17679e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 17786e1eff8SEmilio G. Cota 17886e1eff8SEmilio G. Cota /* 17986e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 18086e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 18186e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 18286e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 18386e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 18486e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 18586e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 18686e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 18786e1eff8SEmilio G. Cota */ 18886e1eff8SEmilio G. Cota if (expected_rate > 70) { 18986e1eff8SEmilio G. Cota ceil *= 2; 19086e1eff8SEmilio G. Cota } 19186e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 19286e1eff8SEmilio G. Cota } 19386e1eff8SEmilio G. Cota 19486e1eff8SEmilio G. Cota if (new_size == old_size) { 19586e1eff8SEmilio G. Cota if (window_expired) { 19679e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 19786e1eff8SEmilio G. Cota } 19886e1eff8SEmilio G. Cota return; 19986e1eff8SEmilio G. Cota } 20086e1eff8SEmilio G. Cota 201a40ec84eSRichard Henderson g_free(env_tlb(env)->f[mmu_idx].table); 202a40ec84eSRichard Henderson g_free(env_tlb(env)->d[mmu_idx].iotlb); 20386e1eff8SEmilio G. Cota 20479e42085SRichard Henderson tlb_window_reset(desc, now, 0); 20586e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 206a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 207a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size); 208a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size); 20986e1eff8SEmilio G. Cota /* 21086e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 21186e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 21286e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 21386e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 21486e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 21586e1eff8SEmilio G. Cota */ 216a40ec84eSRichard Henderson while (env_tlb(env)->f[mmu_idx].table == NULL || 217a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb == NULL) { 21886e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 21986e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 22086e1eff8SEmilio G. Cota abort(); 22186e1eff8SEmilio G. Cota } 22286e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 223a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 22486e1eff8SEmilio G. Cota 225a40ec84eSRichard Henderson g_free(env_tlb(env)->f[mmu_idx].table); 226a40ec84eSRichard Henderson g_free(env_tlb(env)->d[mmu_idx].iotlb); 227a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size); 228a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size); 22986e1eff8SEmilio G. Cota } 23086e1eff8SEmilio G. Cota } 23186e1eff8SEmilio G. Cota 232f1293145SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) 23386e1eff8SEmilio G. Cota { 23486e1eff8SEmilio G. Cota tlb_mmu_resize_locked(env, mmu_idx); 235a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries = 0; 236f1293145SRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = -1; 237f1293145SRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = -1; 238f1293145SRichard Henderson env_tlb(env)->d[mmu_idx].vindex = 0; 239*722a1c1eSRichard Henderson memset(env_tlb(env)->f[mmu_idx].table, -1, 240*722a1c1eSRichard Henderson sizeof_tlb(&env_tlb(env)->f[mmu_idx])); 241f1293145SRichard Henderson memset(env_tlb(env)->d[mmu_idx].vtable, -1, 242f1293145SRichard Henderson sizeof(env_tlb(env)->d[0].vtable)); 24386e1eff8SEmilio G. Cota } 24486e1eff8SEmilio G. Cota 24586e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 24686e1eff8SEmilio G. Cota { 247a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 24886e1eff8SEmilio G. Cota } 24986e1eff8SEmilio G. Cota 25086e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 25186e1eff8SEmilio G. Cota { 252a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 25386e1eff8SEmilio G. Cota } 25486e1eff8SEmilio G. Cota 2555005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2565005e253SEmilio G. Cota { 25771aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 25871aec354SEmilio G. Cota 259a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2603d1523ceSRichard Henderson 2613d1523ceSRichard Henderson /* Ensure that cpu_reset performs a full flush. */ 262a40ec84eSRichard Henderson env_tlb(env)->c.dirty = ALL_MMUIDX_BITS; 26386e1eff8SEmilio G. Cota 26486e1eff8SEmilio G. Cota tlb_dyn_init(env); 2655005e253SEmilio G. Cota } 2665005e253SEmilio G. Cota 267d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 268d9bb58e5SYang Zhong * 269d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 270d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 271d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 272d9bb58e5SYang Zhong * again. 273d9bb58e5SYang Zhong */ 274d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 275d9bb58e5SYang Zhong run_on_cpu_data d) 276d9bb58e5SYang Zhong { 277d9bb58e5SYang Zhong CPUState *cpu; 278d9bb58e5SYang Zhong 279d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 280d9bb58e5SYang Zhong if (cpu != src) { 281d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 282d9bb58e5SYang Zhong } 283d9bb58e5SYang Zhong } 284d9bb58e5SYang Zhong } 285d9bb58e5SYang Zhong 286e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 28783974cf4SEmilio G. Cota { 28883974cf4SEmilio G. Cota CPUState *cpu; 289e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 29083974cf4SEmilio G. Cota 29183974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 29283974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 29383974cf4SEmilio G. Cota 294a40ec84eSRichard Henderson full += atomic_read(&env_tlb(env)->c.full_flush_count); 295a40ec84eSRichard Henderson part += atomic_read(&env_tlb(env)->c.part_flush_count); 296a40ec84eSRichard Henderson elide += atomic_read(&env_tlb(env)->c.elide_flush_count); 29783974cf4SEmilio G. Cota } 298e09de0a2SRichard Henderson *pfull = full; 299e09de0a2SRichard Henderson *ppart = part; 300e09de0a2SRichard Henderson *pelide = elide; 30183974cf4SEmilio G. Cota } 302d9bb58e5SYang Zhong 303d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 304d9bb58e5SYang Zhong { 305d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3063d1523ceSRichard Henderson uint16_t asked = data.host_int; 3073d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 308d9bb58e5SYang Zhong 309d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 310d9bb58e5SYang Zhong 3113d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 312d9bb58e5SYang Zhong 313a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 31460a2ad7dSRichard Henderson 315a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3163d1523ceSRichard Henderson to_clean = asked & all_dirty; 3173d1523ceSRichard Henderson all_dirty &= ~to_clean; 318a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3193d1523ceSRichard Henderson 3203d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3213d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3221308e026SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx); 323d9bb58e5SYang Zhong } 3243d1523ceSRichard Henderson 325a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 326d9bb58e5SYang Zhong 327f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 32864f2674bSRichard Henderson 3293d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 330a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.full_flush_count, 331a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 332e09de0a2SRichard Henderson } else { 333a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.part_flush_count, 334a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3353d1523ceSRichard Henderson if (to_clean != asked) { 336a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.elide_flush_count, 337a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3383d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3393d1523ceSRichard Henderson } 34064f2674bSRichard Henderson } 341d9bb58e5SYang Zhong } 342d9bb58e5SYang Zhong 343d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 344d9bb58e5SYang Zhong { 345d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 346d9bb58e5SYang Zhong 34764f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 348d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 349ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 350d9bb58e5SYang Zhong } else { 35160a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 352d9bb58e5SYang Zhong } 353d9bb58e5SYang Zhong } 354d9bb58e5SYang Zhong 35564f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 35664f2674bSRichard Henderson { 35764f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 35864f2674bSRichard Henderson } 35964f2674bSRichard Henderson 360d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 361d9bb58e5SYang Zhong { 362d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 363d9bb58e5SYang Zhong 364d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 365d9bb58e5SYang Zhong 366d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 367d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 368d9bb58e5SYang Zhong } 369d9bb58e5SYang Zhong 37064f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 37164f2674bSRichard Henderson { 37264f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 37364f2674bSRichard Henderson } 37464f2674bSRichard Henderson 37564f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 376d9bb58e5SYang Zhong { 377d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 378d9bb58e5SYang Zhong 379d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 380d9bb58e5SYang Zhong 381d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 382d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 383d9bb58e5SYang Zhong } 384d9bb58e5SYang Zhong 38564f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 38664f2674bSRichard Henderson { 38764f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 38864f2674bSRichard Henderson } 38964f2674bSRichard Henderson 39068fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 39168fea038SRichard Henderson target_ulong page) 392d9bb58e5SYang Zhong { 39368fea038SRichard Henderson return tlb_hit_page(tlb_entry->addr_read, page) || 394403f290cSEmilio G. Cota tlb_hit_page(tlb_addr_write(tlb_entry), page) || 39568fea038SRichard Henderson tlb_hit_page(tlb_entry->addr_code, page); 39668fea038SRichard Henderson } 39768fea038SRichard Henderson 3983cea94bbSEmilio G. Cota /** 3993cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4003cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4013cea94bbSEmilio G. Cota */ 4023cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4033cea94bbSEmilio G. Cota { 4043cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4053cea94bbSEmilio G. Cota } 4063cea94bbSEmilio G. Cota 40753d28455SRichard Henderson /* Called with tlb_c.lock held */ 40886e1eff8SEmilio G. Cota static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 40971aec354SEmilio G. Cota target_ulong page) 41068fea038SRichard Henderson { 41168fea038SRichard Henderson if (tlb_hit_page_anyprot(tlb_entry, page)) { 412d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 41386e1eff8SEmilio G. Cota return true; 414d9bb58e5SYang Zhong } 41586e1eff8SEmilio G. Cota return false; 416d9bb58e5SYang Zhong } 417d9bb58e5SYang Zhong 41853d28455SRichard Henderson /* Called with tlb_c.lock held */ 41971aec354SEmilio G. Cota static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 42068fea038SRichard Henderson target_ulong page) 42168fea038SRichard Henderson { 422a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 42368fea038SRichard Henderson int k; 42471aec354SEmilio G. Cota 42529a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 42668fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 427a40ec84eSRichard Henderson if (tlb_flush_entry_locked(&d->vtable[k], page)) { 42886e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 42986e1eff8SEmilio G. Cota } 43068fea038SRichard Henderson } 43168fea038SRichard Henderson } 43268fea038SRichard Henderson 4331308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4341308e026SRichard Henderson target_ulong page) 4351308e026SRichard Henderson { 436a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 437a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 4381308e026SRichard Henderson 4391308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 4401308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 4411308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 4421308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 4431308e026SRichard Henderson midx, lp_addr, lp_mask); 4441308e026SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx); 4451308e026SRichard Henderson } else { 44686e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 44786e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 44886e1eff8SEmilio G. Cota } 4491308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 4501308e026SRichard Henderson } 4511308e026SRichard Henderson } 4521308e026SRichard Henderson 4537b7d00e0SRichard Henderson /** 4547b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 4557b7d00e0SRichard Henderson * @cpu: cpu on which to flush 4567b7d00e0SRichard Henderson * @addr: page of virtual address to flush 4577b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 4587b7d00e0SRichard Henderson * 4597b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 4607b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 461d9bb58e5SYang Zhong */ 4627b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 4637b7d00e0SRichard Henderson target_ulong addr, 4647b7d00e0SRichard Henderson uint16_t idxmap) 465d9bb58e5SYang Zhong { 466d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 467d9bb58e5SYang Zhong int mmu_idx; 468d9bb58e5SYang Zhong 469d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 470d9bb58e5SYang Zhong 4717b7d00e0SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); 472d9bb58e5SYang Zhong 473a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 474d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 4757b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 4761308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 477d9bb58e5SYang Zhong } 478d9bb58e5SYang Zhong } 479a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 480d9bb58e5SYang Zhong 481d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 482d9bb58e5SYang Zhong } 483d9bb58e5SYang Zhong 4847b7d00e0SRichard Henderson /** 4857b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 4867b7d00e0SRichard Henderson * @cpu: cpu on which to flush 4877b7d00e0SRichard Henderson * @data: encoded addr + idxmap 4887b7d00e0SRichard Henderson * 4897b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 4907b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 4917b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 4927b7d00e0SRichard Henderson * that can be passed via this method. 4937b7d00e0SRichard Henderson */ 4947b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 4957b7d00e0SRichard Henderson run_on_cpu_data data) 4967b7d00e0SRichard Henderson { 4977b7d00e0SRichard Henderson target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; 4987b7d00e0SRichard Henderson target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; 4997b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5007b7d00e0SRichard Henderson 5017b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5027b7d00e0SRichard Henderson } 5037b7d00e0SRichard Henderson 5047b7d00e0SRichard Henderson typedef struct { 5057b7d00e0SRichard Henderson target_ulong addr; 5067b7d00e0SRichard Henderson uint16_t idxmap; 5077b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5087b7d00e0SRichard Henderson 5097b7d00e0SRichard Henderson /** 5107b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5117b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5127b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5137b7d00e0SRichard Henderson * 5147b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5157b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5167b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5177b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5187b7d00e0SRichard Henderson */ 5197b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5207b7d00e0SRichard Henderson run_on_cpu_data data) 5217b7d00e0SRichard Henderson { 5227b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5237b7d00e0SRichard Henderson 5247b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5257b7d00e0SRichard Henderson g_free(d); 5267b7d00e0SRichard Henderson } 5277b7d00e0SRichard Henderson 528d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 529d9bb58e5SYang Zhong { 530d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 531d9bb58e5SYang Zhong 532d9bb58e5SYang Zhong /* This should already be page aligned */ 5337b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 534d9bb58e5SYang Zhong 5357b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 5367b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5377b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 5387b7d00e0SRichard Henderson /* 5397b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 5407b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 5417b7d00e0SRichard Henderson * allocating memory for this operation. 5427b7d00e0SRichard Henderson */ 5437b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 5447b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 545d9bb58e5SYang Zhong } else { 5467b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 5477b7d00e0SRichard Henderson 5487b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 5497b7d00e0SRichard Henderson d->addr = addr; 5507b7d00e0SRichard Henderson d->idxmap = idxmap; 5517b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 5527b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 553d9bb58e5SYang Zhong } 554d9bb58e5SYang Zhong } 555d9bb58e5SYang Zhong 556f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 557f8144c6cSRichard Henderson { 558f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 559f8144c6cSRichard Henderson } 560f8144c6cSRichard Henderson 561d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 562d9bb58e5SYang Zhong uint16_t idxmap) 563d9bb58e5SYang Zhong { 564d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 565d9bb58e5SYang Zhong 566d9bb58e5SYang Zhong /* This should already be page aligned */ 5677b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 568d9bb58e5SYang Zhong 5697b7d00e0SRichard Henderson /* 5707b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 5717b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 5727b7d00e0SRichard Henderson */ 5737b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 5747b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 5757b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 5767b7d00e0SRichard Henderson } else { 5777b7d00e0SRichard Henderson CPUState *dst_cpu; 5787b7d00e0SRichard Henderson 5797b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 5807b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 5817b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 5827b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 5837b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 5847b7d00e0SRichard Henderson 5857b7d00e0SRichard Henderson d->addr = addr; 5867b7d00e0SRichard Henderson d->idxmap = idxmap; 5877b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 5887b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 5897b7d00e0SRichard Henderson } 5907b7d00e0SRichard Henderson } 5917b7d00e0SRichard Henderson } 5927b7d00e0SRichard Henderson 5937b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 594d9bb58e5SYang Zhong } 595d9bb58e5SYang Zhong 596f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 597f8144c6cSRichard Henderson { 598f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 599f8144c6cSRichard Henderson } 600f8144c6cSRichard Henderson 601d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 602d9bb58e5SYang Zhong target_ulong addr, 603d9bb58e5SYang Zhong uint16_t idxmap) 604d9bb58e5SYang Zhong { 605d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 606d9bb58e5SYang Zhong 607d9bb58e5SYang Zhong /* This should already be page aligned */ 6087b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 609d9bb58e5SYang Zhong 6107b7d00e0SRichard Henderson /* 6117b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6127b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6137b7d00e0SRichard Henderson */ 6147b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6157b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6167b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6177b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6187b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6197b7d00e0SRichard Henderson } else { 6207b7d00e0SRichard Henderson CPUState *dst_cpu; 6217b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6227b7d00e0SRichard Henderson 6237b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6247b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6257b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6267b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6277b7d00e0SRichard Henderson d->addr = addr; 6287b7d00e0SRichard Henderson d->idxmap = idxmap; 6297b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6307b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6317b7d00e0SRichard Henderson } 6327b7d00e0SRichard Henderson } 6337b7d00e0SRichard Henderson 6347b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6357b7d00e0SRichard Henderson d->addr = addr; 6367b7d00e0SRichard Henderson d->idxmap = idxmap; 6377b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 6387b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6397b7d00e0SRichard Henderson } 640d9bb58e5SYang Zhong } 641d9bb58e5SYang Zhong 642f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 643d9bb58e5SYang Zhong { 644f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 645d9bb58e5SYang Zhong } 646d9bb58e5SYang Zhong 647d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 648d9bb58e5SYang Zhong can be detected */ 649d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 650d9bb58e5SYang Zhong { 651d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 652d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 653d9bb58e5SYang Zhong } 654d9bb58e5SYang Zhong 655d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 656d9bb58e5SYang Zhong tested for self modifying code */ 657d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 658d9bb58e5SYang Zhong { 659d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 660d9bb58e5SYang Zhong } 661d9bb58e5SYang Zhong 662d9bb58e5SYang Zhong 663d9bb58e5SYang Zhong /* 664d9bb58e5SYang Zhong * Dirty write flag handling 665d9bb58e5SYang Zhong * 666d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 667d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 668d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 669d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 670d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 671d9bb58e5SYang Zhong * generated code. 672d9bb58e5SYang Zhong * 67371aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 67471aec354SEmilio G. Cota * te->addr_write with atomic_set. We don't need to worry about this for 67571aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 676d9bb58e5SYang Zhong * 67753d28455SRichard Henderson * Called with tlb_c.lock held. 678d9bb58e5SYang Zhong */ 67971aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 68071aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 681d9bb58e5SYang Zhong { 682d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 683d9bb58e5SYang Zhong 6847b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 6857b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 686d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 687d9bb58e5SYang Zhong addr += tlb_entry->addend; 688d9bb58e5SYang Zhong if ((addr - start) < length) { 689d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 69071aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 691d9bb58e5SYang Zhong #else 69271aec354SEmilio G. Cota atomic_set(&tlb_entry->addr_write, 69371aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 694d9bb58e5SYang Zhong #endif 695d9bb58e5SYang Zhong } 69671aec354SEmilio G. Cota } 69771aec354SEmilio G. Cota } 69871aec354SEmilio G. Cota 69971aec354SEmilio G. Cota /* 70053d28455SRichard Henderson * Called with tlb_c.lock held. 70171aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 70271aec354SEmilio G. Cota */ 70371aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 70471aec354SEmilio G. Cota { 70571aec354SEmilio G. Cota *d = *s; 70671aec354SEmilio G. Cota } 707d9bb58e5SYang Zhong 708d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 70971aec354SEmilio G. Cota * the target vCPU). 71053d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 71171aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 712d9bb58e5SYang Zhong */ 713d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 714d9bb58e5SYang Zhong { 715d9bb58e5SYang Zhong CPUArchState *env; 716d9bb58e5SYang Zhong 717d9bb58e5SYang Zhong int mmu_idx; 718d9bb58e5SYang Zhong 719d9bb58e5SYang Zhong env = cpu->env_ptr; 720a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 721d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 722d9bb58e5SYang Zhong unsigned int i; 723*722a1c1eSRichard Henderson unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 724d9bb58e5SYang Zhong 72586e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 726a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 727a40ec84eSRichard Henderson start1, length); 728d9bb58e5SYang Zhong } 729d9bb58e5SYang Zhong 730d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 731a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 732a40ec84eSRichard Henderson start1, length); 733d9bb58e5SYang Zhong } 734d9bb58e5SYang Zhong } 735a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 736d9bb58e5SYang Zhong } 737d9bb58e5SYang Zhong 73853d28455SRichard Henderson /* Called with tlb_c.lock held */ 73971aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 74071aec354SEmilio G. Cota target_ulong vaddr) 741d9bb58e5SYang Zhong { 742d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 743d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 744d9bb58e5SYang Zhong } 745d9bb58e5SYang Zhong } 746d9bb58e5SYang Zhong 747d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 748d9bb58e5SYang Zhong so that it is no longer dirty */ 749d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 750d9bb58e5SYang Zhong { 751d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 752d9bb58e5SYang Zhong int mmu_idx; 753d9bb58e5SYang Zhong 754d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 755d9bb58e5SYang Zhong 756d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 757a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 758d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 759383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 760d9bb58e5SYang Zhong } 761d9bb58e5SYang Zhong 762d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 763d9bb58e5SYang Zhong int k; 764d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 765a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 766d9bb58e5SYang Zhong } 767d9bb58e5SYang Zhong } 768a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 769d9bb58e5SYang Zhong } 770d9bb58e5SYang Zhong 771d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 772d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 7731308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 7741308e026SRichard Henderson target_ulong vaddr, target_ulong size) 775d9bb58e5SYang Zhong { 776a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 7771308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 778d9bb58e5SYang Zhong 7791308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 7801308e026SRichard Henderson /* No previous large page. */ 7811308e026SRichard Henderson lp_addr = vaddr; 7821308e026SRichard Henderson } else { 783d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 7841308e026SRichard Henderson This is a compromise between unnecessary flushes and 7851308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 786a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 7871308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 7881308e026SRichard Henderson lp_mask <<= 1; 789d9bb58e5SYang Zhong } 7901308e026SRichard Henderson } 791a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 792a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 793d9bb58e5SYang Zhong } 794d9bb58e5SYang Zhong 795d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 796d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 797d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 798d9bb58e5SYang Zhong * 799d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 800d9bb58e5SYang Zhong * critical section. 801d9bb58e5SYang Zhong */ 802d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 803d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 804d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 805d9bb58e5SYang Zhong { 806d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 807a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 808a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 809d9bb58e5SYang Zhong MemoryRegionSection *section; 810d9bb58e5SYang Zhong unsigned int index; 811d9bb58e5SYang Zhong target_ulong address; 8128f5db641SRichard Henderson target_ulong write_address; 813d9bb58e5SYang Zhong uintptr_t addend; 81468fea038SRichard Henderson CPUTLBEntry *te, tn; 81555df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 81655df6fcfSPeter Maydell target_ulong vaddr_page; 817d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 81850b107c5SRichard Henderson int wp_flags; 8198f5db641SRichard Henderson bool is_ram, is_romd; 820d9bb58e5SYang Zhong 821d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 82255df6fcfSPeter Maydell 8231308e026SRichard Henderson if (size <= TARGET_PAGE_SIZE) { 82455df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 82555df6fcfSPeter Maydell } else { 8261308e026SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, size); 827d9bb58e5SYang Zhong sz = size; 82855df6fcfSPeter Maydell } 82955df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 83055df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 83155df6fcfSPeter Maydell 83255df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 83355df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 834d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 835d9bb58e5SYang Zhong 836d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 837d9bb58e5SYang Zhong " prot=%x idx=%d\n", 838d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 839d9bb58e5SYang Zhong 84055df6fcfSPeter Maydell address = vaddr_page; 84155df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 84230d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 84330d7e098SRichard Henderson address |= TLB_INVALID_MASK; 84455df6fcfSPeter Maydell } 845a26fc6f5STony Nguyen if (attrs.byte_swap) { 8465b87b3e6SRichard Henderson address |= TLB_BSWAP; 847a26fc6f5STony Nguyen } 8488f5db641SRichard Henderson 8498f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 8508f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 8518f5db641SRichard Henderson 8528f5db641SRichard Henderson if (is_ram || is_romd) { 8538f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 854d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 8558f5db641SRichard Henderson } else { 8568f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 8578f5db641SRichard Henderson addend = 0; 858d9bb58e5SYang Zhong } 859d9bb58e5SYang Zhong 8608f5db641SRichard Henderson write_address = address; 8618f5db641SRichard Henderson if (is_ram) { 8628f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 8638f5db641SRichard Henderson /* 8648f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 8658f5db641SRichard Henderson * the page is actually writable. 8668f5db641SRichard Henderson */ 8678f5db641SRichard Henderson if (prot & PAGE_WRITE) { 8688f5db641SRichard Henderson if (section->readonly) { 8698f5db641SRichard Henderson write_address |= TLB_DISCARD_WRITE; 8708f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 8718f5db641SRichard Henderson write_address |= TLB_NOTDIRTY; 8728f5db641SRichard Henderson } 8738f5db641SRichard Henderson } 8748f5db641SRichard Henderson } else { 8758f5db641SRichard Henderson /* I/O or ROMD */ 8768f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 8778f5db641SRichard Henderson /* 8788f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 8798f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 8808f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 8818f5db641SRichard Henderson */ 8828f5db641SRichard Henderson write_address |= TLB_MMIO; 8838f5db641SRichard Henderson if (!is_romd) { 8848f5db641SRichard Henderson address = write_address; 8858f5db641SRichard Henderson } 8868f5db641SRichard Henderson } 8878f5db641SRichard Henderson 88850b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 88950b107c5SRichard Henderson TARGET_PAGE_SIZE); 890d9bb58e5SYang Zhong 891383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 892383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 893d9bb58e5SYang Zhong 89468fea038SRichard Henderson /* 89571aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 89671aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 89771aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 89871aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 89971aec354SEmilio G. Cota * is unlikely to be contended. 90071aec354SEmilio G. Cota */ 901a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 90271aec354SEmilio G. Cota 9033d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 904a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 9053d1523ceSRichard Henderson 90671aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 90771aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 90871aec354SEmilio G. Cota 90971aec354SEmilio G. Cota /* 91068fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 91168fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 91268fea038SRichard Henderson */ 9133cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 914a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 915a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 91668fea038SRichard Henderson 91768fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 91871aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 919a40ec84eSRichard Henderson desc->viotlb[vidx] = desc->iotlb[index]; 92086e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 92168fea038SRichard Henderson } 922d9bb58e5SYang Zhong 923d9bb58e5SYang Zhong /* refill the tlb */ 924ace41090SPeter Maydell /* 925ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 926ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 9278f5db641SRichard Henderson * + the ram_addr_t of the page base of the target RAM (RAM) 9288f5db641SRichard Henderson * + the offset within section->mr of the page base (I/O, ROMD) 92955df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 930ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 931ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 932ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 933ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 934ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 935ace41090SPeter Maydell */ 936a40ec84eSRichard Henderson desc->iotlb[index].addr = iotlb - vaddr_page; 937a40ec84eSRichard Henderson desc->iotlb[index].attrs = attrs; 938d9bb58e5SYang Zhong 939d9bb58e5SYang Zhong /* Now calculate the new entry */ 94055df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 941d9bb58e5SYang Zhong if (prot & PAGE_READ) { 942d9bb58e5SYang Zhong tn.addr_read = address; 94350b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 94450b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 94550b107c5SRichard Henderson } 946d9bb58e5SYang Zhong } else { 947d9bb58e5SYang Zhong tn.addr_read = -1; 948d9bb58e5SYang Zhong } 949d9bb58e5SYang Zhong 950d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 9518f5db641SRichard Henderson tn.addr_code = address; 952d9bb58e5SYang Zhong } else { 953d9bb58e5SYang Zhong tn.addr_code = -1; 954d9bb58e5SYang Zhong } 955d9bb58e5SYang Zhong 956d9bb58e5SYang Zhong tn.addr_write = -1; 957d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 9588f5db641SRichard Henderson tn.addr_write = write_address; 959f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 960f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 961f52bfb12SDavid Hildenbrand } 96250b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 96350b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 96450b107c5SRichard Henderson } 965d9bb58e5SYang Zhong } 966d9bb58e5SYang Zhong 96771aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 96886e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 969a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 970d9bb58e5SYang Zhong } 971d9bb58e5SYang Zhong 972d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 973d9bb58e5SYang Zhong * transaction attributes to be used. 974d9bb58e5SYang Zhong */ 975d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 976d9bb58e5SYang Zhong hwaddr paddr, int prot, 977d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 978d9bb58e5SYang Zhong { 979d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 980d9bb58e5SYang Zhong prot, mmu_idx, size); 981d9bb58e5SYang Zhong } 982d9bb58e5SYang Zhong 983d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 984d9bb58e5SYang Zhong { 985d9bb58e5SYang Zhong ram_addr_t ram_addr; 986d9bb58e5SYang Zhong 987d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 988d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 989d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 990d9bb58e5SYang Zhong abort(); 991d9bb58e5SYang Zhong } 992d9bb58e5SYang Zhong return ram_addr; 993d9bb58e5SYang Zhong } 994d9bb58e5SYang Zhong 995c319dc13SRichard Henderson /* 996c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 997c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 998c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 999c319dc13SRichard Henderson */ 1000c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 1001c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1002c319dc13SRichard Henderson { 1003c319dc13SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cpu); 1004c319dc13SRichard Henderson bool ok; 1005c319dc13SRichard Henderson 1006c319dc13SRichard Henderson /* 1007c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1008c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1009c319dc13SRichard Henderson */ 1010c319dc13SRichard Henderson ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr); 1011c319dc13SRichard Henderson assert(ok); 1012c319dc13SRichard Henderson } 1013c319dc13SRichard Henderson 1014d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1015f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 1016be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 1017d9bb58e5SYang Zhong { 101829a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 10192d54f194SPeter Maydell hwaddr mr_offset; 10202d54f194SPeter Maydell MemoryRegionSection *section; 10212d54f194SPeter Maydell MemoryRegion *mr; 1022d9bb58e5SYang Zhong uint64_t val; 1023d9bb58e5SYang Zhong bool locked = false; 102404e3aabdSPeter Maydell MemTxResult r; 1025d9bb58e5SYang Zhong 10262d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 10272d54f194SPeter Maydell mr = section->mr; 10282d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1029d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 103008565552SRichard Henderson if (!cpu->can_do_io) { 1031d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1032d9bb58e5SYang Zhong } 1033d9bb58e5SYang Zhong 10348b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 1035d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1036d9bb58e5SYang Zhong locked = true; 1037d9bb58e5SYang Zhong } 1038be5c4787STony Nguyen r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); 103904e3aabdSPeter Maydell if (r != MEMTX_OK) { 10402d54f194SPeter Maydell hwaddr physaddr = mr_offset + 10412d54f194SPeter Maydell section->offset_within_address_space - 10422d54f194SPeter Maydell section->offset_within_region; 10432d54f194SPeter Maydell 1044be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 104504e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 104604e3aabdSPeter Maydell } 1047d9bb58e5SYang Zhong if (locked) { 1048d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1049d9bb58e5SYang Zhong } 1050d9bb58e5SYang Zhong 1051d9bb58e5SYang Zhong return val; 1052d9bb58e5SYang Zhong } 1053d9bb58e5SYang Zhong 1054d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1055f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 1056be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 1057d9bb58e5SYang Zhong { 105829a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 10592d54f194SPeter Maydell hwaddr mr_offset; 10602d54f194SPeter Maydell MemoryRegionSection *section; 10612d54f194SPeter Maydell MemoryRegion *mr; 1062d9bb58e5SYang Zhong bool locked = false; 106304e3aabdSPeter Maydell MemTxResult r; 1064d9bb58e5SYang Zhong 10652d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 10662d54f194SPeter Maydell mr = section->mr; 10672d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 106808565552SRichard Henderson if (!cpu->can_do_io) { 1069d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1070d9bb58e5SYang Zhong } 1071d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1072d9bb58e5SYang Zhong 10738b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 1074d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1075d9bb58e5SYang Zhong locked = true; 1076d9bb58e5SYang Zhong } 1077be5c4787STony Nguyen r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); 107804e3aabdSPeter Maydell if (r != MEMTX_OK) { 10792d54f194SPeter Maydell hwaddr physaddr = mr_offset + 10802d54f194SPeter Maydell section->offset_within_address_space - 10812d54f194SPeter Maydell section->offset_within_region; 10822d54f194SPeter Maydell 1083be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 1084be5c4787STony Nguyen MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, 1085be5c4787STony Nguyen retaddr); 108604e3aabdSPeter Maydell } 1087d9bb58e5SYang Zhong if (locked) { 1088d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1089d9bb58e5SYang Zhong } 1090d9bb58e5SYang Zhong } 1091d9bb58e5SYang Zhong 10924811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) 10934811e909SRichard Henderson { 10944811e909SRichard Henderson #if TCG_OVERSIZED_GUEST 10954811e909SRichard Henderson return *(target_ulong *)((uintptr_t)entry + ofs); 10964811e909SRichard Henderson #else 10974811e909SRichard Henderson /* ofs might correspond to .addr_write, so use atomic_read */ 10984811e909SRichard Henderson return atomic_read((target_ulong *)((uintptr_t)entry + ofs)); 10994811e909SRichard Henderson #endif 11004811e909SRichard Henderson } 11014811e909SRichard Henderson 1102d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1103d9bb58e5SYang Zhong back to the main tlb. */ 1104d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 1105d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 1106d9bb58e5SYang Zhong { 1107d9bb58e5SYang Zhong size_t vidx; 110871aec354SEmilio G. Cota 110929a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1110d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1111a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 1112a40ec84eSRichard Henderson target_ulong cmp; 1113a40ec84eSRichard Henderson 1114a40ec84eSRichard Henderson /* elt_ofs might correspond to .addr_write, so use atomic_read */ 1115a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST 1116a40ec84eSRichard Henderson cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 1117a40ec84eSRichard Henderson #else 1118a40ec84eSRichard Henderson cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 1119a40ec84eSRichard Henderson #endif 1120d9bb58e5SYang Zhong 1121d9bb58e5SYang Zhong if (cmp == page) { 1122d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1123a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1124d9bb58e5SYang Zhong 1125a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 112671aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 112771aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 112871aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1129a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1130d9bb58e5SYang Zhong 1131a40ec84eSRichard Henderson CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1132a40ec84eSRichard Henderson CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; 1133d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 1134d9bb58e5SYang Zhong return true; 1135d9bb58e5SYang Zhong } 1136d9bb58e5SYang Zhong } 1137d9bb58e5SYang Zhong return false; 1138d9bb58e5SYang Zhong } 1139d9bb58e5SYang Zhong 1140d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 1141d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 1142d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 1143d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 1144d9bb58e5SYang Zhong 114530d7e098SRichard Henderson /* 114630d7e098SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 114730d7e098SRichard Henderson * 114830d7e098SRichard Henderson * Return -1 if we can't translate and execute from an entire page 114930d7e098SRichard Henderson * of RAM. This will force us to execute by loading and translating 115030d7e098SRichard Henderson * one insn at a time, without caching. 115130d7e098SRichard Henderson * 115230d7e098SRichard Henderson * NOTE: This function will trigger an exception if the page is 115330d7e098SRichard Henderson * not executable. 1154f2553f04SKONRAD Frederic */ 11554b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 11564b2190daSEmilio G. Cota void **hostp) 1157f2553f04SKONRAD Frederic { 1158383beda9SRichard Henderson uintptr_t mmu_idx = cpu_mmu_index(env, true); 1159383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1160383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1161f2553f04SKONRAD Frederic void *p; 1162f2553f04SKONRAD Frederic 1163383beda9SRichard Henderson if (unlikely(!tlb_hit(entry->addr_code, addr))) { 1164b493ccf1SPeter Maydell if (!VICTIM_TLB_HIT(addr_code, addr)) { 116529a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 11666d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 11676d967cb8SEmilio G. Cota entry = tlb_entry(env, mmu_idx, addr); 116830d7e098SRichard Henderson 116930d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { 117030d7e098SRichard Henderson /* 117130d7e098SRichard Henderson * The MMU protection covers a smaller range than a target 117230d7e098SRichard Henderson * page, so we must redo the MMU check for every insn. 117330d7e098SRichard Henderson */ 117430d7e098SRichard Henderson return -1; 117530d7e098SRichard Henderson } 117671b9a453SKONRAD Frederic } 1177383beda9SRichard Henderson assert(tlb_hit(entry->addr_code, addr)); 1178f2553f04SKONRAD Frederic } 117955df6fcfSPeter Maydell 118030d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_MMIO)) { 118130d7e098SRichard Henderson /* The region is not backed by RAM. */ 11824b2190daSEmilio G. Cota if (hostp) { 11834b2190daSEmilio G. Cota *hostp = NULL; 11844b2190daSEmilio G. Cota } 118520cb6ae4SPeter Maydell return -1; 118655df6fcfSPeter Maydell } 118755df6fcfSPeter Maydell 1188383beda9SRichard Henderson p = (void *)((uintptr_t)addr + entry->addend); 11894b2190daSEmilio G. Cota if (hostp) { 11904b2190daSEmilio G. Cota *hostp = p; 11914b2190daSEmilio G. Cota } 1192f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 1193f2553f04SKONRAD Frederic } 1194f2553f04SKONRAD Frederic 11954b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 11964b2190daSEmilio G. Cota { 11974b2190daSEmilio G. Cota return get_page_addr_code_hostp(env, addr, NULL); 11984b2190daSEmilio G. Cota } 11994b2190daSEmilio G. Cota 1200707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 1201707526adSRichard Henderson CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) 1202707526adSRichard Henderson { 1203707526adSRichard Henderson ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; 1204707526adSRichard Henderson 1205707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1206707526adSRichard Henderson 1207707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1208707526adSRichard Henderson struct page_collection *pages 1209707526adSRichard Henderson = page_collection_lock(ram_addr, ram_addr + size); 12105a7c27bbSRichard Henderson tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); 1211707526adSRichard Henderson page_collection_unlock(pages); 1212707526adSRichard Henderson } 1213707526adSRichard Henderson 1214707526adSRichard Henderson /* 1215707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1216707526adSRichard Henderson * the notdirty callback faster. 1217707526adSRichard Henderson */ 1218707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1219707526adSRichard Henderson 1220707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1221707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1222707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1223707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1224707526adSRichard Henderson } 1225707526adSRichard Henderson } 1226707526adSRichard Henderson 1227c25c283dSDavid Hildenbrand /* 1228c25c283dSDavid Hildenbrand * Probe for whether the specified guest access is permitted. If it is not 1229c25c283dSDavid Hildenbrand * permitted then an exception will be taken in the same way as if this 1230c25c283dSDavid Hildenbrand * were a real access (and we will not return). 1231fef39ccdSDavid Hildenbrand * If the size is 0 or the page requires I/O access, returns NULL; otherwise, 1232fef39ccdSDavid Hildenbrand * returns the address of the host page similar to tlb_vaddr_to_host(). 1233d9bb58e5SYang Zhong */ 1234c25c283dSDavid Hildenbrand void *probe_access(CPUArchState *env, target_ulong addr, int size, 1235c25c283dSDavid Hildenbrand MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1236d9bb58e5SYang Zhong { 1237383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1238383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1239c25c283dSDavid Hildenbrand target_ulong tlb_addr; 1240c25c283dSDavid Hildenbrand size_t elt_ofs; 1241c25c283dSDavid Hildenbrand int wp_access; 1242d9bb58e5SYang Zhong 1243ca86cf32SDavid Hildenbrand g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1244ca86cf32SDavid Hildenbrand 1245c25c283dSDavid Hildenbrand switch (access_type) { 1246c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 1247c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_read); 1248c25c283dSDavid Hildenbrand wp_access = BP_MEM_READ; 1249c25c283dSDavid Hildenbrand break; 1250c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 1251c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_write); 1252c25c283dSDavid Hildenbrand wp_access = BP_MEM_WRITE; 1253c25c283dSDavid Hildenbrand break; 1254c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 1255c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_code); 1256c25c283dSDavid Hildenbrand wp_access = BP_MEM_READ; 1257c25c283dSDavid Hildenbrand break; 1258c25c283dSDavid Hildenbrand default: 1259c25c283dSDavid Hildenbrand g_assert_not_reached(); 1260c25c283dSDavid Hildenbrand } 1261c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 1262c25c283dSDavid Hildenbrand 126303a98189SDavid Hildenbrand if (unlikely(!tlb_hit(tlb_addr, addr))) { 1264c25c283dSDavid Hildenbrand if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, 1265c25c283dSDavid Hildenbrand addr & TARGET_PAGE_MASK)) { 1266c25c283dSDavid Hildenbrand tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); 126703a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 126803a98189SDavid Hildenbrand index = tlb_index(env, mmu_idx, addr); 126903a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1270d9bb58e5SYang Zhong } 1271c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 127203a98189SDavid Hildenbrand } 127303a98189SDavid Hildenbrand 1274fef39ccdSDavid Hildenbrand if (!size) { 1275fef39ccdSDavid Hildenbrand return NULL; 1276fef39ccdSDavid Hildenbrand } 1277fef39ccdSDavid Hildenbrand 127873bc0bd4SRichard Henderson if (unlikely(tlb_addr & TLB_FLAGS_MASK)) { 127973bc0bd4SRichard Henderson CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 128073bc0bd4SRichard Henderson 128173bc0bd4SRichard Henderson /* Reject I/O access, or other required slow-path. */ 128273bc0bd4SRichard Henderson if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) { 128373bc0bd4SRichard Henderson return NULL; 128473bc0bd4SRichard Henderson } 128573bc0bd4SRichard Henderson 128603a98189SDavid Hildenbrand /* Handle watchpoints. */ 1287fef39ccdSDavid Hildenbrand if (tlb_addr & TLB_WATCHPOINT) { 128803a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 128973bc0bd4SRichard Henderson iotlbentry->attrs, wp_access, retaddr); 1290d9bb58e5SYang Zhong } 1291fef39ccdSDavid Hildenbrand 129273bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 129373bc0bd4SRichard Henderson if (tlb_addr & TLB_NOTDIRTY) { 129473bc0bd4SRichard Henderson notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); 129573bc0bd4SRichard Henderson } 1296fef39ccdSDavid Hildenbrand } 1297fef39ccdSDavid Hildenbrand 1298fef39ccdSDavid Hildenbrand return (void *)((uintptr_t)addr + entry->addend); 1299d9bb58e5SYang Zhong } 1300d9bb58e5SYang Zhong 13014811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 13024811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 13034811e909SRichard Henderson { 13044811e909SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 13057f445c8cSRichard Henderson target_ulong tlb_addr, page; 13064811e909SRichard Henderson size_t elt_ofs; 13074811e909SRichard Henderson 13084811e909SRichard Henderson switch (access_type) { 13094811e909SRichard Henderson case MMU_DATA_LOAD: 13104811e909SRichard Henderson elt_ofs = offsetof(CPUTLBEntry, addr_read); 13114811e909SRichard Henderson break; 13124811e909SRichard Henderson case MMU_DATA_STORE: 13134811e909SRichard Henderson elt_ofs = offsetof(CPUTLBEntry, addr_write); 13144811e909SRichard Henderson break; 13154811e909SRichard Henderson case MMU_INST_FETCH: 13164811e909SRichard Henderson elt_ofs = offsetof(CPUTLBEntry, addr_code); 13174811e909SRichard Henderson break; 13184811e909SRichard Henderson default: 13194811e909SRichard Henderson g_assert_not_reached(); 13204811e909SRichard Henderson } 13214811e909SRichard Henderson 13224811e909SRichard Henderson page = addr & TARGET_PAGE_MASK; 13234811e909SRichard Henderson tlb_addr = tlb_read_ofs(entry, elt_ofs); 13244811e909SRichard Henderson 13254811e909SRichard Henderson if (!tlb_hit_page(tlb_addr, page)) { 13264811e909SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 13274811e909SRichard Henderson 13284811e909SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) { 132929a0af61SRichard Henderson CPUState *cs = env_cpu(env); 13304811e909SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cs); 13314811e909SRichard Henderson 13324811e909SRichard Henderson if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) { 13334811e909SRichard Henderson /* Non-faulting page table read failed. */ 13344811e909SRichard Henderson return NULL; 13354811e909SRichard Henderson } 13364811e909SRichard Henderson 13374811e909SRichard Henderson /* TLB resize via tlb_fill may have moved the entry. */ 13384811e909SRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 13394811e909SRichard Henderson } 13404811e909SRichard Henderson tlb_addr = tlb_read_ofs(entry, elt_ofs); 13414811e909SRichard Henderson } 13424811e909SRichard Henderson 13434811e909SRichard Henderson if (tlb_addr & ~TARGET_PAGE_MASK) { 13444811e909SRichard Henderson /* IO access */ 13454811e909SRichard Henderson return NULL; 13464811e909SRichard Henderson } 13474811e909SRichard Henderson 13484811e909SRichard Henderson return (void *)((uintptr_t)addr + entry->addend); 13494811e909SRichard Henderson } 13504811e909SRichard Henderson 1351235537faSAlex Bennée 1352235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1353235537faSAlex Bennée /* 1354235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1355235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1356235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1357235537faSAlex Bennée * checking the victim table. This is purely informational. 1358235537faSAlex Bennée * 1359235537faSAlex Bennée * This should never fail as the memory access being instrumented 1360235537faSAlex Bennée * should have just filled the TLB. 1361235537faSAlex Bennée */ 1362235537faSAlex Bennée 1363235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, 1364235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1365235537faSAlex Bennée { 1366235537faSAlex Bennée CPUArchState *env = cpu->env_ptr; 1367235537faSAlex Bennée CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1368235537faSAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1369235537faSAlex Bennée target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1370235537faSAlex Bennée 1371235537faSAlex Bennée if (likely(tlb_hit(tlb_addr, addr))) { 1372235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1373235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 1374235537faSAlex Bennée CPUIOTLBEntry *iotlbentry; 1375235537faSAlex Bennée iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1376235537faSAlex Bennée data->is_io = true; 1377235537faSAlex Bennée data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 1378235537faSAlex Bennée data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1379235537faSAlex Bennée } else { 1380235537faSAlex Bennée data->is_io = false; 1381235537faSAlex Bennée data->v.ram.hostaddr = addr + tlbe->addend; 1382235537faSAlex Bennée } 1383235537faSAlex Bennée return true; 1384235537faSAlex Bennée } 1385235537faSAlex Bennée return false; 1386235537faSAlex Bennée } 1387235537faSAlex Bennée 1388235537faSAlex Bennée #endif 1389235537faSAlex Bennée 1390d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 1391d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 1392d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 1393707526adSRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1394d9bb58e5SYang Zhong { 1395d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 1396383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1397383beda9SRichard Henderson CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1398403f290cSEmilio G. Cota target_ulong tlb_addr = tlb_addr_write(tlbe); 139914776ab5STony Nguyen MemOp mop = get_memop(oi); 1400d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 1401d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 140234d49937SPeter Maydell void *hostaddr; 1403d9bb58e5SYang Zhong 1404d9bb58e5SYang Zhong /* Adjust the given return address. */ 1405d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1406d9bb58e5SYang Zhong 1407d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1408d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1409d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 141029a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1411d9bb58e5SYang Zhong mmu_idx, retaddr); 1412d9bb58e5SYang Zhong } 1413d9bb58e5SYang Zhong 1414d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 1415d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 1416d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1417d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1418d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1419d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1420d9bb58e5SYang Zhong goto stop_the_world; 1421d9bb58e5SYang Zhong } 1422d9bb58e5SYang Zhong 1423d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 1424334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1425d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 142629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, 142798670d47SLaurent Vivier mmu_idx, retaddr); 14286d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 14296d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1430d9bb58e5SYang Zhong } 1431403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1432d9bb58e5SYang Zhong } 1433d9bb58e5SYang Zhong 143455df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 143530d7e098SRichard Henderson if (unlikely(tlb_addr & TLB_MMIO)) { 1436d9bb58e5SYang Zhong /* There's really nothing that can be done to 1437d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1438d9bb58e5SYang Zhong goto stop_the_world; 1439d9bb58e5SYang Zhong } 1440d9bb58e5SYang Zhong 1441d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 144234d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 144329a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, 144498670d47SLaurent Vivier mmu_idx, retaddr); 1445d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 1446d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 1447d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 1448d9bb58e5SYang Zhong goto stop_the_world; 1449d9bb58e5SYang Zhong } 1450d9bb58e5SYang Zhong 145134d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 145234d49937SPeter Maydell 145334d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1454707526adSRichard Henderson notdirty_write(env_cpu(env), addr, 1 << s_bits, 1455707526adSRichard Henderson &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); 145634d49937SPeter Maydell } 145734d49937SPeter Maydell 145834d49937SPeter Maydell return hostaddr; 1459d9bb58e5SYang Zhong 1460d9bb58e5SYang Zhong stop_the_world: 146129a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1462d9bb58e5SYang Zhong } 1463d9bb58e5SYang Zhong 1464eed56642SAlex Bennée /* 1465eed56642SAlex Bennée * Load Helpers 1466eed56642SAlex Bennée * 1467eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1468eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1469eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1470eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1471eed56642SAlex Bennée */ 1472d9bb58e5SYang Zhong 14732dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 14742dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr); 14752dd92606SRichard Henderson 1476c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 147780d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op) 147880d9d1c6SRichard Henderson { 147980d9d1c6SRichard Henderson switch (op) { 148080d9d1c6SRichard Henderson case MO_UB: 148180d9d1c6SRichard Henderson return ldub_p(haddr); 148280d9d1c6SRichard Henderson case MO_BEUW: 148380d9d1c6SRichard Henderson return lduw_be_p(haddr); 148480d9d1c6SRichard Henderson case MO_LEUW: 148580d9d1c6SRichard Henderson return lduw_le_p(haddr); 148680d9d1c6SRichard Henderson case MO_BEUL: 148780d9d1c6SRichard Henderson return (uint32_t)ldl_be_p(haddr); 148880d9d1c6SRichard Henderson case MO_LEUL: 148980d9d1c6SRichard Henderson return (uint32_t)ldl_le_p(haddr); 149080d9d1c6SRichard Henderson case MO_BEQ: 149180d9d1c6SRichard Henderson return ldq_be_p(haddr); 149280d9d1c6SRichard Henderson case MO_LEQ: 149380d9d1c6SRichard Henderson return ldq_le_p(haddr); 149480d9d1c6SRichard Henderson default: 149580d9d1c6SRichard Henderson qemu_build_not_reached(); 149680d9d1c6SRichard Henderson } 149780d9d1c6SRichard Henderson } 149880d9d1c6SRichard Henderson 149980d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 15002dd92606SRichard Henderson load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, 1501be5c4787STony Nguyen uintptr_t retaddr, MemOp op, bool code_read, 15022dd92606SRichard Henderson FullLoadHelper *full_load) 1503eed56642SAlex Bennée { 1504eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1505eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1506eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1507eed56642SAlex Bennée target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1508eed56642SAlex Bennée const size_t tlb_off = code_read ? 1509eed56642SAlex Bennée offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1510f1be3696SRichard Henderson const MMUAccessType access_type = 1511f1be3696SRichard Henderson code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1512eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1513eed56642SAlex Bennée void *haddr; 1514eed56642SAlex Bennée uint64_t res; 1515be5c4787STony Nguyen size_t size = memop_size(op); 1516d9bb58e5SYang Zhong 1517eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1518eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 151929a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, access_type, 1520eed56642SAlex Bennée mmu_idx, retaddr); 1521eed56642SAlex Bennée } 1522eed56642SAlex Bennée 1523eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1524eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1525eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1526eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 152729a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, 1528f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1529eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1530eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1531eed56642SAlex Bennée } 1532eed56642SAlex Bennée tlb_addr = code_read ? entry->addr_code : entry->addr_read; 153330d7e098SRichard Henderson tlb_addr &= ~TLB_INVALID_MASK; 1534eed56642SAlex Bennée } 1535eed56642SAlex Bennée 153650b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1537eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 153850b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 15395b87b3e6SRichard Henderson bool need_swap; 154050b107c5SRichard Henderson 154150b107c5SRichard Henderson /* For anything that is unaligned, recurse through full_load. */ 1542eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1543eed56642SAlex Bennée goto do_unaligned_access; 1544eed56642SAlex Bennée } 154550b107c5SRichard Henderson 154650b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 154750b107c5SRichard Henderson 154850b107c5SRichard Henderson /* Handle watchpoints. */ 154950b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 155050b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 155150b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 155250b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_READ, retaddr); 15535b87b3e6SRichard Henderson } 155450b107c5SRichard Henderson 15555b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 155650b107c5SRichard Henderson 155750b107c5SRichard Henderson /* Handle I/O access. */ 15585b87b3e6SRichard Henderson if (likely(tlb_addr & TLB_MMIO)) { 15595b87b3e6SRichard Henderson return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, 15605b87b3e6SRichard Henderson access_type, op ^ (need_swap * MO_BSWAP)); 15615b87b3e6SRichard Henderson } 15625b87b3e6SRichard Henderson 15635b87b3e6SRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 15645b87b3e6SRichard Henderson 15655b87b3e6SRichard Henderson /* 15665b87b3e6SRichard Henderson * Keep these two load_memop separate to ensure that the compiler 15675b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 15685b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 15695b87b3e6SRichard Henderson */ 15705b87b3e6SRichard Henderson if (unlikely(need_swap)) { 15715b87b3e6SRichard Henderson return load_memop(haddr, op ^ MO_BSWAP); 15725b87b3e6SRichard Henderson } 15735b87b3e6SRichard Henderson return load_memop(haddr, op); 1574eed56642SAlex Bennée } 1575eed56642SAlex Bennée 1576eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1577eed56642SAlex Bennée if (size > 1 1578eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1579eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1580eed56642SAlex Bennée target_ulong addr1, addr2; 15818c79b288SAlex Bennée uint64_t r1, r2; 1582eed56642SAlex Bennée unsigned shift; 1583eed56642SAlex Bennée do_unaligned_access: 1584ab7a2009SAlex Bennée addr1 = addr & ~((target_ulong)size - 1); 1585eed56642SAlex Bennée addr2 = addr1 + size; 15862dd92606SRichard Henderson r1 = full_load(env, addr1, oi, retaddr); 15872dd92606SRichard Henderson r2 = full_load(env, addr2, oi, retaddr); 1588eed56642SAlex Bennée shift = (addr & (size - 1)) * 8; 1589eed56642SAlex Bennée 1590be5c4787STony Nguyen if (memop_big_endian(op)) { 1591eed56642SAlex Bennée /* Big-endian combine. */ 1592eed56642SAlex Bennée res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1593eed56642SAlex Bennée } else { 1594eed56642SAlex Bennée /* Little-endian combine. */ 1595eed56642SAlex Bennée res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 1596eed56642SAlex Bennée } 1597eed56642SAlex Bennée return res & MAKE_64BIT_MASK(0, size * 8); 1598eed56642SAlex Bennée } 1599eed56642SAlex Bennée 1600eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 160180d9d1c6SRichard Henderson return load_memop(haddr, op); 1602eed56642SAlex Bennée } 1603eed56642SAlex Bennée 1604eed56642SAlex Bennée /* 1605eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1606eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1607eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1608eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1609eed56642SAlex Bennée * data, and for that we always have uint64_t. 1610eed56642SAlex Bennée * 1611eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1612eed56642SAlex Bennée */ 1613eed56642SAlex Bennée 16142dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 16152dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16162dd92606SRichard Henderson { 1617be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); 16182dd92606SRichard Henderson } 16192dd92606SRichard Henderson 1620fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 1621fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1622eed56642SAlex Bennée { 16232dd92606SRichard Henderson return full_ldub_mmu(env, addr, oi, retaddr); 16242dd92606SRichard Henderson } 16252dd92606SRichard Henderson 16262dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 16272dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16282dd92606SRichard Henderson { 1629be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, false, 16302dd92606SRichard Henderson full_le_lduw_mmu); 1631eed56642SAlex Bennée } 1632eed56642SAlex Bennée 1633fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 1634fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1635eed56642SAlex Bennée { 16362dd92606SRichard Henderson return full_le_lduw_mmu(env, addr, oi, retaddr); 16372dd92606SRichard Henderson } 16382dd92606SRichard Henderson 16392dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 16402dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16412dd92606SRichard Henderson { 1642be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, false, 16432dd92606SRichard Henderson full_be_lduw_mmu); 1644eed56642SAlex Bennée } 1645eed56642SAlex Bennée 1646fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 1647fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1648eed56642SAlex Bennée { 16492dd92606SRichard Henderson return full_be_lduw_mmu(env, addr, oi, retaddr); 16502dd92606SRichard Henderson } 16512dd92606SRichard Henderson 16522dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 16532dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16542dd92606SRichard Henderson { 1655be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, false, 16562dd92606SRichard Henderson full_le_ldul_mmu); 1657eed56642SAlex Bennée } 1658eed56642SAlex Bennée 1659fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 1660fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1661eed56642SAlex Bennée { 16622dd92606SRichard Henderson return full_le_ldul_mmu(env, addr, oi, retaddr); 16632dd92606SRichard Henderson } 16642dd92606SRichard Henderson 16652dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 16662dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 16672dd92606SRichard Henderson { 1668be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, false, 16692dd92606SRichard Henderson full_be_ldul_mmu); 1670eed56642SAlex Bennée } 1671eed56642SAlex Bennée 1672fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 1673fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1674eed56642SAlex Bennée { 16752dd92606SRichard Henderson return full_be_ldul_mmu(env, addr, oi, retaddr); 1676eed56642SAlex Bennée } 1677eed56642SAlex Bennée 1678fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 1679fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1680eed56642SAlex Bennée { 1681be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEQ, false, 16822dd92606SRichard Henderson helper_le_ldq_mmu); 1683eed56642SAlex Bennée } 1684eed56642SAlex Bennée 1685fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 1686fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1687eed56642SAlex Bennée { 1688be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEQ, false, 16892dd92606SRichard Henderson helper_be_ldq_mmu); 1690eed56642SAlex Bennée } 1691eed56642SAlex Bennée 1692eed56642SAlex Bennée /* 1693eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 1694eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 1695eed56642SAlex Bennée */ 1696eed56642SAlex Bennée 1697eed56642SAlex Bennée 1698eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 1699eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1700eed56642SAlex Bennée { 1701eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 1702eed56642SAlex Bennée } 1703eed56642SAlex Bennée 1704eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 1705eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1706eed56642SAlex Bennée { 1707eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 1708eed56642SAlex Bennée } 1709eed56642SAlex Bennée 1710eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 1711eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1712eed56642SAlex Bennée { 1713eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 1714eed56642SAlex Bennée } 1715eed56642SAlex Bennée 1716eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 1717eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1718eed56642SAlex Bennée { 1719eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 1720eed56642SAlex Bennée } 1721eed56642SAlex Bennée 1722eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 1723eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1724eed56642SAlex Bennée { 1725eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 1726eed56642SAlex Bennée } 1727eed56642SAlex Bennée 1728eed56642SAlex Bennée /* 1729d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 1730d03f1408SRichard Henderson */ 1731d03f1408SRichard Henderson 1732d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, 1733d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, 1734d03f1408SRichard Henderson MemOp op, FullLoadHelper *full_load) 1735d03f1408SRichard Henderson { 1736d03f1408SRichard Henderson uint16_t meminfo; 1737d03f1408SRichard Henderson TCGMemOpIdx oi; 1738d03f1408SRichard Henderson uint64_t ret; 1739d03f1408SRichard Henderson 1740d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, false); 1741d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 1742d03f1408SRichard Henderson 1743d03f1408SRichard Henderson op &= ~MO_SIGN; 1744d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 1745d03f1408SRichard Henderson ret = full_load(env, addr, oi, retaddr); 1746d03f1408SRichard Henderson 1747d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 1748d03f1408SRichard Henderson 1749d03f1408SRichard Henderson return ret; 1750d03f1408SRichard Henderson } 1751d03f1408SRichard Henderson 1752d03f1408SRichard Henderson uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1753d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1754d03f1408SRichard Henderson { 1755d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); 1756d03f1408SRichard Henderson } 1757d03f1408SRichard Henderson 1758d03f1408SRichard Henderson int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1759d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1760d03f1408SRichard Henderson { 1761d03f1408SRichard Henderson return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB, 1762d03f1408SRichard Henderson full_ldub_mmu); 1763d03f1408SRichard Henderson } 1764d03f1408SRichard Henderson 1765d03f1408SRichard Henderson uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1766d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1767d03f1408SRichard Henderson { 1768d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUW, 1769d03f1408SRichard Henderson MO_TE == MO_LE 1770d03f1408SRichard Henderson ? full_le_lduw_mmu : full_be_lduw_mmu); 1771d03f1408SRichard Henderson } 1772d03f1408SRichard Henderson 1773d03f1408SRichard Henderson int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1774d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1775d03f1408SRichard Henderson { 1776d03f1408SRichard Henderson return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_TESW, 1777d03f1408SRichard Henderson MO_TE == MO_LE 1778d03f1408SRichard Henderson ? full_le_lduw_mmu : full_be_lduw_mmu); 1779d03f1408SRichard Henderson } 1780d03f1408SRichard Henderson 1781d03f1408SRichard Henderson uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1782d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1783d03f1408SRichard Henderson { 1784d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUL, 1785d03f1408SRichard Henderson MO_TE == MO_LE 1786d03f1408SRichard Henderson ? full_le_ldul_mmu : full_be_ldul_mmu); 1787d03f1408SRichard Henderson } 1788d03f1408SRichard Henderson 1789d03f1408SRichard Henderson uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr, 1790d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 1791d03f1408SRichard Henderson { 1792d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEQ, 1793d03f1408SRichard Henderson MO_TE == MO_LE 1794d03f1408SRichard Henderson ? helper_le_ldq_mmu : helper_be_ldq_mmu); 1795d03f1408SRichard Henderson } 1796d03f1408SRichard Henderson 1797cfe04a4bSRichard Henderson uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr, 1798cfe04a4bSRichard Henderson uintptr_t retaddr) 1799cfe04a4bSRichard Henderson { 1800cfe04a4bSRichard Henderson return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1801cfe04a4bSRichard Henderson } 1802cfe04a4bSRichard Henderson 1803cfe04a4bSRichard Henderson int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 1804cfe04a4bSRichard Henderson { 1805cfe04a4bSRichard Henderson return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1806cfe04a4bSRichard Henderson } 1807cfe04a4bSRichard Henderson 1808cfe04a4bSRichard Henderson uint32_t cpu_lduw_data_ra(CPUArchState *env, target_ulong ptr, 1809cfe04a4bSRichard Henderson uintptr_t retaddr) 1810cfe04a4bSRichard Henderson { 1811cfe04a4bSRichard Henderson return cpu_lduw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1812cfe04a4bSRichard Henderson } 1813cfe04a4bSRichard Henderson 1814cfe04a4bSRichard Henderson int cpu_ldsw_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 1815cfe04a4bSRichard Henderson { 1816cfe04a4bSRichard Henderson return cpu_ldsw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1817cfe04a4bSRichard Henderson } 1818cfe04a4bSRichard Henderson 1819cfe04a4bSRichard Henderson uint32_t cpu_ldl_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 1820cfe04a4bSRichard Henderson { 1821cfe04a4bSRichard Henderson return cpu_ldl_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1822cfe04a4bSRichard Henderson } 1823cfe04a4bSRichard Henderson 1824cfe04a4bSRichard Henderson uint64_t cpu_ldq_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 1825cfe04a4bSRichard Henderson { 1826cfe04a4bSRichard Henderson return cpu_ldq_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 1827cfe04a4bSRichard Henderson } 1828cfe04a4bSRichard Henderson 1829cfe04a4bSRichard Henderson uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr) 1830cfe04a4bSRichard Henderson { 1831cfe04a4bSRichard Henderson return cpu_ldub_data_ra(env, ptr, 0); 1832cfe04a4bSRichard Henderson } 1833cfe04a4bSRichard Henderson 1834cfe04a4bSRichard Henderson int cpu_ldsb_data(CPUArchState *env, target_ulong ptr) 1835cfe04a4bSRichard Henderson { 1836cfe04a4bSRichard Henderson return cpu_ldsb_data_ra(env, ptr, 0); 1837cfe04a4bSRichard Henderson } 1838cfe04a4bSRichard Henderson 1839cfe04a4bSRichard Henderson uint32_t cpu_lduw_data(CPUArchState *env, target_ulong ptr) 1840cfe04a4bSRichard Henderson { 1841cfe04a4bSRichard Henderson return cpu_lduw_data_ra(env, ptr, 0); 1842cfe04a4bSRichard Henderson } 1843cfe04a4bSRichard Henderson 1844cfe04a4bSRichard Henderson int cpu_ldsw_data(CPUArchState *env, target_ulong ptr) 1845cfe04a4bSRichard Henderson { 1846cfe04a4bSRichard Henderson return cpu_ldsw_data_ra(env, ptr, 0); 1847cfe04a4bSRichard Henderson } 1848cfe04a4bSRichard Henderson 1849cfe04a4bSRichard Henderson uint32_t cpu_ldl_data(CPUArchState *env, target_ulong ptr) 1850cfe04a4bSRichard Henderson { 1851cfe04a4bSRichard Henderson return cpu_ldl_data_ra(env, ptr, 0); 1852cfe04a4bSRichard Henderson } 1853cfe04a4bSRichard Henderson 1854cfe04a4bSRichard Henderson uint64_t cpu_ldq_data(CPUArchState *env, target_ulong ptr) 1855cfe04a4bSRichard Henderson { 1856cfe04a4bSRichard Henderson return cpu_ldq_data_ra(env, ptr, 0); 1857cfe04a4bSRichard Henderson } 1858cfe04a4bSRichard Henderson 1859d03f1408SRichard Henderson /* 1860eed56642SAlex Bennée * Store Helpers 1861eed56642SAlex Bennée */ 1862eed56642SAlex Bennée 1863c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE 186480d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op) 186580d9d1c6SRichard Henderson { 186680d9d1c6SRichard Henderson switch (op) { 186780d9d1c6SRichard Henderson case MO_UB: 186880d9d1c6SRichard Henderson stb_p(haddr, val); 186980d9d1c6SRichard Henderson break; 187080d9d1c6SRichard Henderson case MO_BEUW: 187180d9d1c6SRichard Henderson stw_be_p(haddr, val); 187280d9d1c6SRichard Henderson break; 187380d9d1c6SRichard Henderson case MO_LEUW: 187480d9d1c6SRichard Henderson stw_le_p(haddr, val); 187580d9d1c6SRichard Henderson break; 187680d9d1c6SRichard Henderson case MO_BEUL: 187780d9d1c6SRichard Henderson stl_be_p(haddr, val); 187880d9d1c6SRichard Henderson break; 187980d9d1c6SRichard Henderson case MO_LEUL: 188080d9d1c6SRichard Henderson stl_le_p(haddr, val); 188180d9d1c6SRichard Henderson break; 188280d9d1c6SRichard Henderson case MO_BEQ: 188380d9d1c6SRichard Henderson stq_be_p(haddr, val); 188480d9d1c6SRichard Henderson break; 188580d9d1c6SRichard Henderson case MO_LEQ: 188680d9d1c6SRichard Henderson stq_le_p(haddr, val); 188780d9d1c6SRichard Henderson break; 188880d9d1c6SRichard Henderson default: 188980d9d1c6SRichard Henderson qemu_build_not_reached(); 189080d9d1c6SRichard Henderson } 189180d9d1c6SRichard Henderson } 189280d9d1c6SRichard Henderson 189380d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE 18944601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 1895be5c4787STony Nguyen TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) 1896eed56642SAlex Bennée { 1897eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1898eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1899eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1900eed56642SAlex Bennée target_ulong tlb_addr = tlb_addr_write(entry); 1901eed56642SAlex Bennée const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 1902eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1903eed56642SAlex Bennée void *haddr; 1904be5c4787STony Nguyen size_t size = memop_size(op); 1905eed56642SAlex Bennée 1906eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1907eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 190829a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1909eed56642SAlex Bennée mmu_idx, retaddr); 1910eed56642SAlex Bennée } 1911eed56642SAlex Bennée 1912eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1913eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1914eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1915eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 191629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 1917eed56642SAlex Bennée mmu_idx, retaddr); 1918eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1919eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1920eed56642SAlex Bennée } 1921eed56642SAlex Bennée tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 1922eed56642SAlex Bennée } 1923eed56642SAlex Bennée 192450b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1925eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 192650b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 19275b87b3e6SRichard Henderson bool need_swap; 192850b107c5SRichard Henderson 192950b107c5SRichard Henderson /* For anything that is unaligned, recurse through byte stores. */ 1930eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1931eed56642SAlex Bennée goto do_unaligned_access; 1932eed56642SAlex Bennée } 193350b107c5SRichard Henderson 193450b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 193550b107c5SRichard Henderson 193650b107c5SRichard Henderson /* Handle watchpoints. */ 193750b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 193850b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 193950b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 194050b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_WRITE, retaddr); 19415b87b3e6SRichard Henderson } 194250b107c5SRichard Henderson 19435b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 194450b107c5SRichard Henderson 194550b107c5SRichard Henderson /* Handle I/O access. */ 194608565552SRichard Henderson if (tlb_addr & TLB_MMIO) { 19475b87b3e6SRichard Henderson io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, 19485b87b3e6SRichard Henderson op ^ (need_swap * MO_BSWAP)); 19495b87b3e6SRichard Henderson return; 19505b87b3e6SRichard Henderson } 19515b87b3e6SRichard Henderson 19527b0d792cSRichard Henderson /* Ignore writes to ROM. */ 19537b0d792cSRichard Henderson if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { 19547b0d792cSRichard Henderson return; 19557b0d792cSRichard Henderson } 19567b0d792cSRichard Henderson 195708565552SRichard Henderson /* Handle clean RAM pages. */ 195808565552SRichard Henderson if (tlb_addr & TLB_NOTDIRTY) { 1959707526adSRichard Henderson notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); 196008565552SRichard Henderson } 196108565552SRichard Henderson 1962707526adSRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 196308565552SRichard Henderson 19645b87b3e6SRichard Henderson /* 19655b87b3e6SRichard Henderson * Keep these two store_memop separate to ensure that the compiler 19665b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 19675b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 19685b87b3e6SRichard Henderson */ 19695b87b3e6SRichard Henderson if (unlikely(need_swap)) { 19705b87b3e6SRichard Henderson store_memop(haddr, val, op ^ MO_BSWAP); 19715b87b3e6SRichard Henderson } else { 19725b87b3e6SRichard Henderson store_memop(haddr, val, op); 19735b87b3e6SRichard Henderson } 1974eed56642SAlex Bennée return; 1975eed56642SAlex Bennée } 1976eed56642SAlex Bennée 1977eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1978eed56642SAlex Bennée if (size > 1 1979eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1980eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1981eed56642SAlex Bennée int i; 1982eed56642SAlex Bennée uintptr_t index2; 1983eed56642SAlex Bennée CPUTLBEntry *entry2; 1984eed56642SAlex Bennée target_ulong page2, tlb_addr2; 19858f7cd2adSRichard Henderson size_t size2; 19868f7cd2adSRichard Henderson 1987eed56642SAlex Bennée do_unaligned_access: 1988eed56642SAlex Bennée /* 1989eed56642SAlex Bennée * Ensure the second page is in the TLB. Note that the first page 1990eed56642SAlex Bennée * is already guaranteed to be filled, and that the second page 1991eed56642SAlex Bennée * cannot evict the first. 1992eed56642SAlex Bennée */ 1993eed56642SAlex Bennée page2 = (addr + size) & TARGET_PAGE_MASK; 19948f7cd2adSRichard Henderson size2 = (addr + size) & ~TARGET_PAGE_MASK; 1995eed56642SAlex Bennée index2 = tlb_index(env, mmu_idx, page2); 1996eed56642SAlex Bennée entry2 = tlb_entry(env, mmu_idx, page2); 1997eed56642SAlex Bennée tlb_addr2 = tlb_addr_write(entry2); 199850b107c5SRichard Henderson if (!tlb_hit_page(tlb_addr2, page2)) { 199950b107c5SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { 20008f7cd2adSRichard Henderson tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, 2001eed56642SAlex Bennée mmu_idx, retaddr); 200250b107c5SRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 200350b107c5SRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 200450b107c5SRichard Henderson } 200550b107c5SRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 200650b107c5SRichard Henderson } 200750b107c5SRichard Henderson 200850b107c5SRichard Henderson /* 200950b107c5SRichard Henderson * Handle watchpoints. Since this may trap, all checks 201050b107c5SRichard Henderson * must happen before any store. 201150b107c5SRichard Henderson */ 201250b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 201350b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size - size2, 201450b107c5SRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index].attrs, 201550b107c5SRichard Henderson BP_MEM_WRITE, retaddr); 201650b107c5SRichard Henderson } 201750b107c5SRichard Henderson if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { 201850b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), page2, size2, 201950b107c5SRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, 202050b107c5SRichard Henderson BP_MEM_WRITE, retaddr); 2021eed56642SAlex Bennée } 2022eed56642SAlex Bennée 2023eed56642SAlex Bennée /* 2024eed56642SAlex Bennée * XXX: not efficient, but simple. 2025eed56642SAlex Bennée * This loop must go in the forward direction to avoid issues 2026eed56642SAlex Bennée * with self-modifying code in Windows 64-bit. 2027eed56642SAlex Bennée */ 2028eed56642SAlex Bennée for (i = 0; i < size; ++i) { 2029eed56642SAlex Bennée uint8_t val8; 2030be5c4787STony Nguyen if (memop_big_endian(op)) { 2031eed56642SAlex Bennée /* Big-endian extract. */ 2032eed56642SAlex Bennée val8 = val >> (((size - 1) * 8) - (i * 8)); 2033eed56642SAlex Bennée } else { 2034eed56642SAlex Bennée /* Little-endian extract. */ 2035eed56642SAlex Bennée val8 = val >> (i * 8); 2036eed56642SAlex Bennée } 20374601f8d1SRichard Henderson helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); 2038eed56642SAlex Bennée } 2039eed56642SAlex Bennée return; 2040eed56642SAlex Bennée } 2041eed56642SAlex Bennée 2042eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 204380d9d1c6SRichard Henderson store_memop(haddr, val, op); 2044eed56642SAlex Bennée } 2045eed56642SAlex Bennée 2046fc1bc777SRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2047eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2048eed56642SAlex Bennée { 2049be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_UB); 2050eed56642SAlex Bennée } 2051eed56642SAlex Bennée 2052fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2053eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2054eed56642SAlex Bennée { 2055be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUW); 2056eed56642SAlex Bennée } 2057eed56642SAlex Bennée 2058fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2059eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2060eed56642SAlex Bennée { 2061be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUW); 2062eed56642SAlex Bennée } 2063eed56642SAlex Bennée 2064fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2065eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2066eed56642SAlex Bennée { 2067be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUL); 2068eed56642SAlex Bennée } 2069eed56642SAlex Bennée 2070fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2071eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2072eed56642SAlex Bennée { 2073be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUL); 2074eed56642SAlex Bennée } 2075eed56642SAlex Bennée 2076fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2077eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2078eed56642SAlex Bennée { 2079be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEQ); 2080eed56642SAlex Bennée } 2081eed56642SAlex Bennée 2082fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2083eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2084eed56642SAlex Bennée { 2085be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEQ); 2086eed56642SAlex Bennée } 2087d9bb58e5SYang Zhong 2088d03f1408SRichard Henderson /* 2089d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 2090d03f1408SRichard Henderson */ 2091d03f1408SRichard Henderson 2092d03f1408SRichard Henderson static inline void QEMU_ALWAYS_INLINE 2093d03f1408SRichard Henderson cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 2094d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, MemOp op) 2095d03f1408SRichard Henderson { 2096d03f1408SRichard Henderson TCGMemOpIdx oi; 2097d03f1408SRichard Henderson uint16_t meminfo; 2098d03f1408SRichard Henderson 2099d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, true); 2100d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 2101d03f1408SRichard Henderson 2102d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 2103d03f1408SRichard Henderson store_helper(env, addr, val, oi, retaddr, op); 2104d03f1408SRichard Henderson 2105d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 2106d03f1408SRichard Henderson } 2107d03f1408SRichard Henderson 2108d03f1408SRichard Henderson void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2109d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2110d03f1408SRichard Henderson { 2111d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); 2112d03f1408SRichard Henderson } 2113d03f1408SRichard Henderson 2114d03f1408SRichard Henderson void cpu_stw_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2115d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2116d03f1408SRichard Henderson { 2117d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUW); 2118d03f1408SRichard Henderson } 2119d03f1408SRichard Henderson 2120d03f1408SRichard Henderson void cpu_stl_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2121d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2122d03f1408SRichard Henderson { 2123d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUL); 2124d03f1408SRichard Henderson } 2125d03f1408SRichard Henderson 2126d03f1408SRichard Henderson void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, 2127d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2128d03f1408SRichard Henderson { 2129d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEQ); 2130d03f1408SRichard Henderson } 2131d03f1408SRichard Henderson 2132cfe04a4bSRichard Henderson void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, 2133cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2134cfe04a4bSRichard Henderson { 2135cfe04a4bSRichard Henderson cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2136cfe04a4bSRichard Henderson } 2137cfe04a4bSRichard Henderson 2138cfe04a4bSRichard Henderson void cpu_stw_data_ra(CPUArchState *env, target_ulong ptr, 2139cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2140cfe04a4bSRichard Henderson { 2141cfe04a4bSRichard Henderson cpu_stw_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2142cfe04a4bSRichard Henderson } 2143cfe04a4bSRichard Henderson 2144cfe04a4bSRichard Henderson void cpu_stl_data_ra(CPUArchState *env, target_ulong ptr, 2145cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2146cfe04a4bSRichard Henderson { 2147cfe04a4bSRichard Henderson cpu_stl_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2148cfe04a4bSRichard Henderson } 2149cfe04a4bSRichard Henderson 2150cfe04a4bSRichard Henderson void cpu_stq_data_ra(CPUArchState *env, target_ulong ptr, 2151cfe04a4bSRichard Henderson uint64_t val, uintptr_t retaddr) 2152cfe04a4bSRichard Henderson { 2153cfe04a4bSRichard Henderson cpu_stq_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2154cfe04a4bSRichard Henderson } 2155cfe04a4bSRichard Henderson 2156cfe04a4bSRichard Henderson void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2157cfe04a4bSRichard Henderson { 2158cfe04a4bSRichard Henderson cpu_stb_data_ra(env, ptr, val, 0); 2159cfe04a4bSRichard Henderson } 2160cfe04a4bSRichard Henderson 2161cfe04a4bSRichard Henderson void cpu_stw_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2162cfe04a4bSRichard Henderson { 2163cfe04a4bSRichard Henderson cpu_stw_data_ra(env, ptr, val, 0); 2164cfe04a4bSRichard Henderson } 2165cfe04a4bSRichard Henderson 2166cfe04a4bSRichard Henderson void cpu_stl_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2167cfe04a4bSRichard Henderson { 2168cfe04a4bSRichard Henderson cpu_stl_data_ra(env, ptr, val, 0); 2169cfe04a4bSRichard Henderson } 2170cfe04a4bSRichard Henderson 2171cfe04a4bSRichard Henderson void cpu_stq_data(CPUArchState *env, target_ulong ptr, uint64_t val) 2172cfe04a4bSRichard Henderson { 2173cfe04a4bSRichard Henderson cpu_stq_data_ra(env, ptr, val, 0); 2174cfe04a4bSRichard Henderson } 2175cfe04a4bSRichard Henderson 2176d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 2177d9bb58e5SYang Zhong them callable from other helpers. */ 2178d9bb58e5SYang Zhong 2179d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 2180d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 2181d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 2182707526adSRichard Henderson #define ATOMIC_MMU_DECLS 2183707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) 2184707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 2185504f73f7SAlex Bennée #define ATOMIC_MMU_IDX get_mmuidx(oi) 2186d9bb58e5SYang Zhong 2187cfec3885SEmilio G. Cota #include "atomic_common.inc.c" 2188d9bb58e5SYang Zhong 2189d9bb58e5SYang Zhong #define DATA_SIZE 1 2190d9bb58e5SYang Zhong #include "atomic_template.h" 2191d9bb58e5SYang Zhong 2192d9bb58e5SYang Zhong #define DATA_SIZE 2 2193d9bb58e5SYang Zhong #include "atomic_template.h" 2194d9bb58e5SYang Zhong 2195d9bb58e5SYang Zhong #define DATA_SIZE 4 2196d9bb58e5SYang Zhong #include "atomic_template.h" 2197d9bb58e5SYang Zhong 2198d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2199d9bb58e5SYang Zhong #define DATA_SIZE 8 2200d9bb58e5SYang Zhong #include "atomic_template.h" 2201d9bb58e5SYang Zhong #endif 2202d9bb58e5SYang Zhong 2203e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 2204d9bb58e5SYang Zhong #define DATA_SIZE 16 2205d9bb58e5SYang Zhong #include "atomic_template.h" 2206d9bb58e5SYang Zhong #endif 2207d9bb58e5SYang Zhong 2208d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 2209d9bb58e5SYang Zhong 2210d9bb58e5SYang Zhong #undef EXTRA_ARGS 2211d9bb58e5SYang Zhong #undef ATOMIC_NAME 2212d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 2213d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 2214d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 2215707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) 2216d9bb58e5SYang Zhong 2217d9bb58e5SYang Zhong #define DATA_SIZE 1 2218d9bb58e5SYang Zhong #include "atomic_template.h" 2219d9bb58e5SYang Zhong 2220d9bb58e5SYang Zhong #define DATA_SIZE 2 2221d9bb58e5SYang Zhong #include "atomic_template.h" 2222d9bb58e5SYang Zhong 2223d9bb58e5SYang Zhong #define DATA_SIZE 4 2224d9bb58e5SYang Zhong #include "atomic_template.h" 2225d9bb58e5SYang Zhong 2226d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2227d9bb58e5SYang Zhong #define DATA_SIZE 8 2228d9bb58e5SYang Zhong #include "atomic_template.h" 2229d9bb58e5SYang Zhong #endif 2230504f73f7SAlex Bennée #undef ATOMIC_MMU_IDX 2231d9bb58e5SYang Zhong 2232d9bb58e5SYang Zhong /* Code access functions. */ 2233d9bb58e5SYang Zhong 2234fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, 22352dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 22362dd92606SRichard Henderson { 2237fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); 22382dd92606SRichard Henderson } 22392dd92606SRichard Henderson 2240fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 2241eed56642SAlex Bennée { 2242fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 2243fc4120a3SRichard Henderson return full_ldub_code(env, addr, oi, 0); 22442dd92606SRichard Henderson } 22452dd92606SRichard Henderson 2246fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, 22474cef72d0SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 22484cef72d0SAlex Bennée { 2249fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); 22504cef72d0SAlex Bennée } 22514cef72d0SAlex Bennée 2252fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 22532dd92606SRichard Henderson { 2254fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 2255fc4120a3SRichard Henderson return full_lduw_code(env, addr, oi, 0); 2256eed56642SAlex Bennée } 2257d9bb58e5SYang Zhong 2258fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, 2259fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2260eed56642SAlex Bennée { 2261fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); 22622dd92606SRichard Henderson } 22632dd92606SRichard Henderson 2264fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 22654cef72d0SAlex Bennée { 2266fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 2267fc4120a3SRichard Henderson return full_ldl_code(env, addr, oi, 0); 22684cef72d0SAlex Bennée } 22694cef72d0SAlex Bennée 2270fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, 22712dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 22722dd92606SRichard Henderson { 2273fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); 2274eed56642SAlex Bennée } 2275d9bb58e5SYang Zhong 2276fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 2277eed56642SAlex Bennée { 2278fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); 2279fc4120a3SRichard Henderson return full_ldq_code(env, addr, oi, 0); 2280eed56642SAlex Bennée } 2281