1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 22d9bb58e5SYang Zhong #include "cpu.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/address-spaces.h" 26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 27d9bb58e5SYang Zhong #include "exec/cputlb.h" 28d9bb58e5SYang Zhong #include "exec/memory-internal.h" 29d9bb58e5SYang Zhong #include "exec/ram_addr.h" 30d9bb58e5SYang Zhong #include "tcg/tcg.h" 31d9bb58e5SYang Zhong #include "qemu/error-report.h" 32d9bb58e5SYang Zhong #include "exec/log.h" 33d9bb58e5SYang Zhong #include "exec/helper-proto.h" 34d9bb58e5SYang Zhong #include "qemu/atomic.h" 35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 36*707526adSRichard Henderson #include "translate-all.h" 37d9bb58e5SYang Zhong 38d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 39d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 40d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 41d9bb58e5SYang Zhong 42d9bb58e5SYang Zhong #ifdef DEBUG_TLB 43d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 44d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 45d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 46d9bb58e5SYang Zhong # else 47d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 48d9bb58e5SYang Zhong # endif 49d9bb58e5SYang Zhong #else 50d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 51d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 52d9bb58e5SYang Zhong #endif 53d9bb58e5SYang Zhong 54d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 55d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 56d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 57d9bb58e5SYang Zhong ## __VA_ARGS__); \ 58d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 59d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 60d9bb58e5SYang Zhong } \ 61d9bb58e5SYang Zhong } while (0) 62d9bb58e5SYang Zhong 63ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 64d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 65ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 66d9bb58e5SYang Zhong } \ 67d9bb58e5SYang Zhong } while (0) 68d9bb58e5SYang Zhong 69d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 70d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 71d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 72d9bb58e5SYang Zhong 73d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 74d9bb58e5SYang Zhong */ 75d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 76d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 77d9bb58e5SYang Zhong 7886e1eff8SEmilio G. Cota static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx) 7986e1eff8SEmilio G. Cota { 80a40ec84eSRichard Henderson return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS); 8186e1eff8SEmilio G. Cota } 8286e1eff8SEmilio G. Cota 8379e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 8486e1eff8SEmilio G. Cota size_t max_entries) 8586e1eff8SEmilio G. Cota { 8679e42085SRichard Henderson desc->window_begin_ns = ns; 8779e42085SRichard Henderson desc->window_max_entries = max_entries; 8886e1eff8SEmilio G. Cota } 8986e1eff8SEmilio G. Cota 9086e1eff8SEmilio G. Cota static void tlb_dyn_init(CPUArchState *env) 9186e1eff8SEmilio G. Cota { 9286e1eff8SEmilio G. Cota int i; 9386e1eff8SEmilio G. Cota 9486e1eff8SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 95a40ec84eSRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[i]; 9686e1eff8SEmilio G. Cota size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 9786e1eff8SEmilio G. Cota 9879e42085SRichard Henderson tlb_window_reset(desc, get_clock_realtime(), 0); 9986e1eff8SEmilio G. Cota desc->n_used_entries = 0; 100a40ec84eSRichard Henderson env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 101a40ec84eSRichard Henderson env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries); 102a40ec84eSRichard Henderson env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries); 10386e1eff8SEmilio G. Cota } 10486e1eff8SEmilio G. Cota } 10586e1eff8SEmilio G. Cota 10686e1eff8SEmilio G. Cota /** 10786e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 10886e1eff8SEmilio G. Cota * @env: CPU that owns the TLB 10986e1eff8SEmilio G. Cota * @mmu_idx: MMU index of the TLB 11086e1eff8SEmilio G. Cota * 11186e1eff8SEmilio G. Cota * Called with tlb_lock_held. 11286e1eff8SEmilio G. Cota * 11386e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 11486e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 11586e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 11686e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 11786e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 11886e1eff8SEmilio G. Cota * the resize based on past observations. 11986e1eff8SEmilio G. Cota * 12086e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 12186e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 12286e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 12386e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 12486e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 12586e1eff8SEmilio G. Cota * performance. 12686e1eff8SEmilio G. Cota * 12786e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 12886e1eff8SEmilio G. Cota * 12986e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 13086e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 13186e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 13286e1eff8SEmilio G. Cota * probably be similar. 13386e1eff8SEmilio G. Cota * 13486e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 13586e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 13686e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 13786e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 13886e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 13986e1eff8SEmilio G. Cota * 14086e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 14186e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 14286e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 14386e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 14486e1eff8SEmilio G. Cota * conflict misses. 14586e1eff8SEmilio G. Cota */ 14686e1eff8SEmilio G. Cota static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx) 14786e1eff8SEmilio G. Cota { 148a40ec84eSRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 14986e1eff8SEmilio G. Cota size_t old_size = tlb_n_entries(env, mmu_idx); 15086e1eff8SEmilio G. Cota size_t rate; 15186e1eff8SEmilio G. Cota size_t new_size = old_size; 15286e1eff8SEmilio G. Cota int64_t now = get_clock_realtime(); 15386e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 15486e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 15579e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 15686e1eff8SEmilio G. Cota 15779e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 15879e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 15986e1eff8SEmilio G. Cota } 16079e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 16186e1eff8SEmilio G. Cota 16286e1eff8SEmilio G. Cota if (rate > 70) { 16386e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 16486e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 16579e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 16679e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 16786e1eff8SEmilio G. Cota 16886e1eff8SEmilio G. Cota /* 16986e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 17086e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 17186e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 17286e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 17386e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 17486e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 17586e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 17686e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 17786e1eff8SEmilio G. Cota */ 17886e1eff8SEmilio G. Cota if (expected_rate > 70) { 17986e1eff8SEmilio G. Cota ceil *= 2; 18086e1eff8SEmilio G. Cota } 18186e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 18286e1eff8SEmilio G. Cota } 18386e1eff8SEmilio G. Cota 18486e1eff8SEmilio G. Cota if (new_size == old_size) { 18586e1eff8SEmilio G. Cota if (window_expired) { 18679e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 18786e1eff8SEmilio G. Cota } 18886e1eff8SEmilio G. Cota return; 18986e1eff8SEmilio G. Cota } 19086e1eff8SEmilio G. Cota 191a40ec84eSRichard Henderson g_free(env_tlb(env)->f[mmu_idx].table); 192a40ec84eSRichard Henderson g_free(env_tlb(env)->d[mmu_idx].iotlb); 19386e1eff8SEmilio G. Cota 19479e42085SRichard Henderson tlb_window_reset(desc, now, 0); 19586e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 196a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 197a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size); 198a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size); 19986e1eff8SEmilio G. Cota /* 20086e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 20186e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 20286e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 20386e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 20486e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 20586e1eff8SEmilio G. Cota */ 206a40ec84eSRichard Henderson while (env_tlb(env)->f[mmu_idx].table == NULL || 207a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb == NULL) { 20886e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 20986e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 21086e1eff8SEmilio G. Cota abort(); 21186e1eff8SEmilio G. Cota } 21286e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 213a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 21486e1eff8SEmilio G. Cota 215a40ec84eSRichard Henderson g_free(env_tlb(env)->f[mmu_idx].table); 216a40ec84eSRichard Henderson g_free(env_tlb(env)->d[mmu_idx].iotlb); 217a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size); 218a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size); 21986e1eff8SEmilio G. Cota } 22086e1eff8SEmilio G. Cota } 22186e1eff8SEmilio G. Cota 22286e1eff8SEmilio G. Cota static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx) 22386e1eff8SEmilio G. Cota { 22486e1eff8SEmilio G. Cota tlb_mmu_resize_locked(env, mmu_idx); 225a40ec84eSRichard Henderson memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx)); 226a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries = 0; 22786e1eff8SEmilio G. Cota } 22886e1eff8SEmilio G. Cota 22986e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 23086e1eff8SEmilio G. Cota { 231a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 23286e1eff8SEmilio G. Cota } 23386e1eff8SEmilio G. Cota 23486e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 23586e1eff8SEmilio G. Cota { 236a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 23786e1eff8SEmilio G. Cota } 23886e1eff8SEmilio G. Cota 2395005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2405005e253SEmilio G. Cota { 24171aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 24271aec354SEmilio G. Cota 243a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2443d1523ceSRichard Henderson 2453d1523ceSRichard Henderson /* Ensure that cpu_reset performs a full flush. */ 246a40ec84eSRichard Henderson env_tlb(env)->c.dirty = ALL_MMUIDX_BITS; 24786e1eff8SEmilio G. Cota 24886e1eff8SEmilio G. Cota tlb_dyn_init(env); 2495005e253SEmilio G. Cota } 2505005e253SEmilio G. Cota 251d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 252d9bb58e5SYang Zhong * 253d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 254d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 255d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 256d9bb58e5SYang Zhong * again. 257d9bb58e5SYang Zhong */ 258d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 259d9bb58e5SYang Zhong run_on_cpu_data d) 260d9bb58e5SYang Zhong { 261d9bb58e5SYang Zhong CPUState *cpu; 262d9bb58e5SYang Zhong 263d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 264d9bb58e5SYang Zhong if (cpu != src) { 265d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 266d9bb58e5SYang Zhong } 267d9bb58e5SYang Zhong } 268d9bb58e5SYang Zhong } 269d9bb58e5SYang Zhong 270e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 27183974cf4SEmilio G. Cota { 27283974cf4SEmilio G. Cota CPUState *cpu; 273e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 27483974cf4SEmilio G. Cota 27583974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 27683974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 27783974cf4SEmilio G. Cota 278a40ec84eSRichard Henderson full += atomic_read(&env_tlb(env)->c.full_flush_count); 279a40ec84eSRichard Henderson part += atomic_read(&env_tlb(env)->c.part_flush_count); 280a40ec84eSRichard Henderson elide += atomic_read(&env_tlb(env)->c.elide_flush_count); 28183974cf4SEmilio G. Cota } 282e09de0a2SRichard Henderson *pfull = full; 283e09de0a2SRichard Henderson *ppart = part; 284e09de0a2SRichard Henderson *pelide = elide; 28583974cf4SEmilio G. Cota } 286d9bb58e5SYang Zhong 2871308e026SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) 2881308e026SRichard Henderson { 28986e1eff8SEmilio G. Cota tlb_table_flush_by_mmuidx(env, mmu_idx); 290a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = -1; 291a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = -1; 292a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].vindex = 0; 293a40ec84eSRichard Henderson memset(env_tlb(env)->d[mmu_idx].vtable, -1, 294a40ec84eSRichard Henderson sizeof(env_tlb(env)->d[0].vtable)); 2951308e026SRichard Henderson } 2961308e026SRichard Henderson 297d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 298d9bb58e5SYang Zhong { 299d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3003d1523ceSRichard Henderson uint16_t asked = data.host_int; 3013d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 302d9bb58e5SYang Zhong 303d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 304d9bb58e5SYang Zhong 3053d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 306d9bb58e5SYang Zhong 307a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 30860a2ad7dSRichard Henderson 309a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3103d1523ceSRichard Henderson to_clean = asked & all_dirty; 3113d1523ceSRichard Henderson all_dirty &= ~to_clean; 312a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3133d1523ceSRichard Henderson 3143d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3153d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3161308e026SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx); 317d9bb58e5SYang Zhong } 3183d1523ceSRichard Henderson 319a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 320d9bb58e5SYang Zhong 321f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 32264f2674bSRichard Henderson 3233d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 324a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.full_flush_count, 325a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 326e09de0a2SRichard Henderson } else { 327a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.part_flush_count, 328a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3293d1523ceSRichard Henderson if (to_clean != asked) { 330a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.elide_flush_count, 331a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3323d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3333d1523ceSRichard Henderson } 33464f2674bSRichard Henderson } 335d9bb58e5SYang Zhong } 336d9bb58e5SYang Zhong 337d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 338d9bb58e5SYang Zhong { 339d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 340d9bb58e5SYang Zhong 34164f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 342d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 343ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 344d9bb58e5SYang Zhong } else { 34560a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 346d9bb58e5SYang Zhong } 347d9bb58e5SYang Zhong } 348d9bb58e5SYang Zhong 34964f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 35064f2674bSRichard Henderson { 35164f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 35264f2674bSRichard Henderson } 35364f2674bSRichard Henderson 354d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 355d9bb58e5SYang Zhong { 356d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 357d9bb58e5SYang Zhong 358d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 359d9bb58e5SYang Zhong 360d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 361d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 362d9bb58e5SYang Zhong } 363d9bb58e5SYang Zhong 36464f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 36564f2674bSRichard Henderson { 36664f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 36764f2674bSRichard Henderson } 36864f2674bSRichard Henderson 36964f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 370d9bb58e5SYang Zhong { 371d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 372d9bb58e5SYang Zhong 373d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 374d9bb58e5SYang Zhong 375d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 376d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 377d9bb58e5SYang Zhong } 378d9bb58e5SYang Zhong 37964f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 38064f2674bSRichard Henderson { 38164f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 38264f2674bSRichard Henderson } 38364f2674bSRichard Henderson 38468fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 38568fea038SRichard Henderson target_ulong page) 386d9bb58e5SYang Zhong { 38768fea038SRichard Henderson return tlb_hit_page(tlb_entry->addr_read, page) || 388403f290cSEmilio G. Cota tlb_hit_page(tlb_addr_write(tlb_entry), page) || 38968fea038SRichard Henderson tlb_hit_page(tlb_entry->addr_code, page); 39068fea038SRichard Henderson } 39168fea038SRichard Henderson 3923cea94bbSEmilio G. Cota /** 3933cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 3943cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 3953cea94bbSEmilio G. Cota */ 3963cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 3973cea94bbSEmilio G. Cota { 3983cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 3993cea94bbSEmilio G. Cota } 4003cea94bbSEmilio G. Cota 40153d28455SRichard Henderson /* Called with tlb_c.lock held */ 40286e1eff8SEmilio G. Cota static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 40371aec354SEmilio G. Cota target_ulong page) 40468fea038SRichard Henderson { 40568fea038SRichard Henderson if (tlb_hit_page_anyprot(tlb_entry, page)) { 406d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 40786e1eff8SEmilio G. Cota return true; 408d9bb58e5SYang Zhong } 40986e1eff8SEmilio G. Cota return false; 410d9bb58e5SYang Zhong } 411d9bb58e5SYang Zhong 41253d28455SRichard Henderson /* Called with tlb_c.lock held */ 41371aec354SEmilio G. Cota static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 41468fea038SRichard Henderson target_ulong page) 41568fea038SRichard Henderson { 416a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 41768fea038SRichard Henderson int k; 41871aec354SEmilio G. Cota 41929a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 42068fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 421a40ec84eSRichard Henderson if (tlb_flush_entry_locked(&d->vtable[k], page)) { 42286e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 42386e1eff8SEmilio G. Cota } 42468fea038SRichard Henderson } 42568fea038SRichard Henderson } 42668fea038SRichard Henderson 4271308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4281308e026SRichard Henderson target_ulong page) 4291308e026SRichard Henderson { 430a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 431a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 4321308e026SRichard Henderson 4331308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 4341308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 4351308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 4361308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 4371308e026SRichard Henderson midx, lp_addr, lp_mask); 4381308e026SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx); 4391308e026SRichard Henderson } else { 44086e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 44186e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 44286e1eff8SEmilio G. Cota } 4431308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 4441308e026SRichard Henderson } 4451308e026SRichard Henderson } 4461308e026SRichard Henderson 447d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a 448d9bb58e5SYang Zhong * mmuidx bit mask we need to fail to build if we can't do that 449d9bb58e5SYang Zhong */ 450d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN); 451d9bb58e5SYang Zhong 452d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, 453d9bb58e5SYang Zhong run_on_cpu_data data) 454d9bb58e5SYang Zhong { 455d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 456d9bb58e5SYang Zhong target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; 457d9bb58e5SYang Zhong target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; 458d9bb58e5SYang Zhong unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; 459d9bb58e5SYang Zhong int mmu_idx; 460d9bb58e5SYang Zhong 461d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 462d9bb58e5SYang Zhong 4631308e026SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n", 464383beda9SRichard Henderson addr, mmu_idx_bitmap); 465d9bb58e5SYang Zhong 466a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 467d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 468d9bb58e5SYang Zhong if (test_bit(mmu_idx, &mmu_idx_bitmap)) { 4691308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 470d9bb58e5SYang Zhong } 471d9bb58e5SYang Zhong } 472a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 473d9bb58e5SYang Zhong 474d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 475d9bb58e5SYang Zhong } 476d9bb58e5SYang Zhong 477d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 478d9bb58e5SYang Zhong { 479d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 480d9bb58e5SYang Zhong 481d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 482d9bb58e5SYang Zhong 483d9bb58e5SYang Zhong /* This should already be page aligned */ 484d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 485d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 486d9bb58e5SYang Zhong 487d9bb58e5SYang Zhong if (!qemu_cpu_is_self(cpu)) { 4881308e026SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work, 489d9bb58e5SYang Zhong RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 490d9bb58e5SYang Zhong } else { 4911308e026SRichard Henderson tlb_flush_page_by_mmuidx_async_work( 492d9bb58e5SYang Zhong cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 493d9bb58e5SYang Zhong } 494d9bb58e5SYang Zhong } 495d9bb58e5SYang Zhong 496f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 497f8144c6cSRichard Henderson { 498f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 499f8144c6cSRichard Henderson } 500f8144c6cSRichard Henderson 501d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 502d9bb58e5SYang Zhong uint16_t idxmap) 503d9bb58e5SYang Zhong { 5041308e026SRichard Henderson const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; 505d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 506d9bb58e5SYang Zhong 507d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 508d9bb58e5SYang Zhong 509d9bb58e5SYang Zhong /* This should already be page aligned */ 510d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 511d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 512d9bb58e5SYang Zhong 513d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 514d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 515d9bb58e5SYang Zhong } 516d9bb58e5SYang Zhong 517f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 518f8144c6cSRichard Henderson { 519f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 520f8144c6cSRichard Henderson } 521f8144c6cSRichard Henderson 522d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 523d9bb58e5SYang Zhong target_ulong addr, 524d9bb58e5SYang Zhong uint16_t idxmap) 525d9bb58e5SYang Zhong { 5261308e026SRichard Henderson const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; 527d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 528d9bb58e5SYang Zhong 529d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 530d9bb58e5SYang Zhong 531d9bb58e5SYang Zhong /* This should already be page aligned */ 532d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 533d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 534d9bb58e5SYang Zhong 535d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 536d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 537d9bb58e5SYang Zhong } 538d9bb58e5SYang Zhong 539f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 540d9bb58e5SYang Zhong { 541f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 542d9bb58e5SYang Zhong } 543d9bb58e5SYang Zhong 544d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 545d9bb58e5SYang Zhong can be detected */ 546d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 547d9bb58e5SYang Zhong { 548d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 549d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 550d9bb58e5SYang Zhong } 551d9bb58e5SYang Zhong 552d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 553d9bb58e5SYang Zhong tested for self modifying code */ 554d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 555d9bb58e5SYang Zhong { 556d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 557d9bb58e5SYang Zhong } 558d9bb58e5SYang Zhong 559d9bb58e5SYang Zhong 560d9bb58e5SYang Zhong /* 561d9bb58e5SYang Zhong * Dirty write flag handling 562d9bb58e5SYang Zhong * 563d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 564d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 565d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 566d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 567d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 568d9bb58e5SYang Zhong * generated code. 569d9bb58e5SYang Zhong * 57071aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 57171aec354SEmilio G. Cota * te->addr_write with atomic_set. We don't need to worry about this for 57271aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 573d9bb58e5SYang Zhong * 57453d28455SRichard Henderson * Called with tlb_c.lock held. 575d9bb58e5SYang Zhong */ 57671aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 57771aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 578d9bb58e5SYang Zhong { 579d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 580d9bb58e5SYang Zhong 5817b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 5827b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 583d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 584d9bb58e5SYang Zhong addr += tlb_entry->addend; 585d9bb58e5SYang Zhong if ((addr - start) < length) { 586d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 58771aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 588d9bb58e5SYang Zhong #else 58971aec354SEmilio G. Cota atomic_set(&tlb_entry->addr_write, 59071aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 591d9bb58e5SYang Zhong #endif 592d9bb58e5SYang Zhong } 59371aec354SEmilio G. Cota } 59471aec354SEmilio G. Cota } 59571aec354SEmilio G. Cota 59671aec354SEmilio G. Cota /* 59753d28455SRichard Henderson * Called with tlb_c.lock held. 59871aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 59971aec354SEmilio G. Cota */ 60071aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 60171aec354SEmilio G. Cota { 60271aec354SEmilio G. Cota *d = *s; 60371aec354SEmilio G. Cota } 604d9bb58e5SYang Zhong 605d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 60671aec354SEmilio G. Cota * the target vCPU). 60753d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 60871aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 609d9bb58e5SYang Zhong */ 610d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 611d9bb58e5SYang Zhong { 612d9bb58e5SYang Zhong CPUArchState *env; 613d9bb58e5SYang Zhong 614d9bb58e5SYang Zhong int mmu_idx; 615d9bb58e5SYang Zhong 616d9bb58e5SYang Zhong env = cpu->env_ptr; 617a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 618d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 619d9bb58e5SYang Zhong unsigned int i; 62086e1eff8SEmilio G. Cota unsigned int n = tlb_n_entries(env, mmu_idx); 621d9bb58e5SYang Zhong 62286e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 623a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 624a40ec84eSRichard Henderson start1, length); 625d9bb58e5SYang Zhong } 626d9bb58e5SYang Zhong 627d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 628a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 629a40ec84eSRichard Henderson start1, length); 630d9bb58e5SYang Zhong } 631d9bb58e5SYang Zhong } 632a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 633d9bb58e5SYang Zhong } 634d9bb58e5SYang Zhong 63553d28455SRichard Henderson /* Called with tlb_c.lock held */ 63671aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 63771aec354SEmilio G. Cota target_ulong vaddr) 638d9bb58e5SYang Zhong { 639d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 640d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 641d9bb58e5SYang Zhong } 642d9bb58e5SYang Zhong } 643d9bb58e5SYang Zhong 644d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 645d9bb58e5SYang Zhong so that it is no longer dirty */ 646d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 647d9bb58e5SYang Zhong { 648d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 649d9bb58e5SYang Zhong int mmu_idx; 650d9bb58e5SYang Zhong 651d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 652d9bb58e5SYang Zhong 653d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 654a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 655d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 656383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 657d9bb58e5SYang Zhong } 658d9bb58e5SYang Zhong 659d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 660d9bb58e5SYang Zhong int k; 661d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 662a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 663d9bb58e5SYang Zhong } 664d9bb58e5SYang Zhong } 665a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 666d9bb58e5SYang Zhong } 667d9bb58e5SYang Zhong 668d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 669d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 6701308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 6711308e026SRichard Henderson target_ulong vaddr, target_ulong size) 672d9bb58e5SYang Zhong { 673a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 6741308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 675d9bb58e5SYang Zhong 6761308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 6771308e026SRichard Henderson /* No previous large page. */ 6781308e026SRichard Henderson lp_addr = vaddr; 6791308e026SRichard Henderson } else { 680d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 6811308e026SRichard Henderson This is a compromise between unnecessary flushes and 6821308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 683a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 6841308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 6851308e026SRichard Henderson lp_mask <<= 1; 686d9bb58e5SYang Zhong } 6871308e026SRichard Henderson } 688a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 689a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 690d9bb58e5SYang Zhong } 691d9bb58e5SYang Zhong 692d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 693d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 694d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 695d9bb58e5SYang Zhong * 696d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 697d9bb58e5SYang Zhong * critical section. 698d9bb58e5SYang Zhong */ 699d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 700d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 701d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 702d9bb58e5SYang Zhong { 703d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 704a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 705a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 706d9bb58e5SYang Zhong MemoryRegionSection *section; 707d9bb58e5SYang Zhong unsigned int index; 708d9bb58e5SYang Zhong target_ulong address; 7098f5db641SRichard Henderson target_ulong write_address; 710d9bb58e5SYang Zhong uintptr_t addend; 71168fea038SRichard Henderson CPUTLBEntry *te, tn; 71255df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 71355df6fcfSPeter Maydell target_ulong vaddr_page; 714d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 71550b107c5SRichard Henderson int wp_flags; 7168f5db641SRichard Henderson bool is_ram, is_romd; 717d9bb58e5SYang Zhong 718d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 71955df6fcfSPeter Maydell 7201308e026SRichard Henderson if (size <= TARGET_PAGE_SIZE) { 72155df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 72255df6fcfSPeter Maydell } else { 7231308e026SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, size); 724d9bb58e5SYang Zhong sz = size; 72555df6fcfSPeter Maydell } 72655df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 72755df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 72855df6fcfSPeter Maydell 72955df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 73055df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 731d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 732d9bb58e5SYang Zhong 733d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 734d9bb58e5SYang Zhong " prot=%x idx=%d\n", 735d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 736d9bb58e5SYang Zhong 73755df6fcfSPeter Maydell address = vaddr_page; 73855df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 73930d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 74030d7e098SRichard Henderson address |= TLB_INVALID_MASK; 74155df6fcfSPeter Maydell } 742a26fc6f5STony Nguyen if (attrs.byte_swap) { 7435b87b3e6SRichard Henderson address |= TLB_BSWAP; 744a26fc6f5STony Nguyen } 7458f5db641SRichard Henderson 7468f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 7478f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 7488f5db641SRichard Henderson 7498f5db641SRichard Henderson if (is_ram || is_romd) { 7508f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 751d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 7528f5db641SRichard Henderson } else { 7538f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 7548f5db641SRichard Henderson addend = 0; 755d9bb58e5SYang Zhong } 756d9bb58e5SYang Zhong 7578f5db641SRichard Henderson write_address = address; 7588f5db641SRichard Henderson if (is_ram) { 7598f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 7608f5db641SRichard Henderson /* 7618f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 7628f5db641SRichard Henderson * the page is actually writable. 7638f5db641SRichard Henderson */ 7648f5db641SRichard Henderson if (prot & PAGE_WRITE) { 7658f5db641SRichard Henderson if (section->readonly) { 7668f5db641SRichard Henderson write_address |= TLB_DISCARD_WRITE; 7678f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 7688f5db641SRichard Henderson write_address |= TLB_NOTDIRTY; 7698f5db641SRichard Henderson } 7708f5db641SRichard Henderson } 7718f5db641SRichard Henderson } else { 7728f5db641SRichard Henderson /* I/O or ROMD */ 7738f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 7748f5db641SRichard Henderson /* 7758f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 7768f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 7778f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 7788f5db641SRichard Henderson */ 7798f5db641SRichard Henderson write_address |= TLB_MMIO; 7808f5db641SRichard Henderson if (!is_romd) { 7818f5db641SRichard Henderson address = write_address; 7828f5db641SRichard Henderson } 7838f5db641SRichard Henderson } 7848f5db641SRichard Henderson 78550b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 78650b107c5SRichard Henderson TARGET_PAGE_SIZE); 787d9bb58e5SYang Zhong 788383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 789383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 790d9bb58e5SYang Zhong 79168fea038SRichard Henderson /* 79271aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 79371aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 79471aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 79571aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 79671aec354SEmilio G. Cota * is unlikely to be contended. 79771aec354SEmilio G. Cota */ 798a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 79971aec354SEmilio G. Cota 8003d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 801a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 8023d1523ceSRichard Henderson 80371aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 80471aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 80571aec354SEmilio G. Cota 80671aec354SEmilio G. Cota /* 80768fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 80868fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 80968fea038SRichard Henderson */ 8103cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 811a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 812a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 81368fea038SRichard Henderson 81468fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 81571aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 816a40ec84eSRichard Henderson desc->viotlb[vidx] = desc->iotlb[index]; 81786e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 81868fea038SRichard Henderson } 819d9bb58e5SYang Zhong 820d9bb58e5SYang Zhong /* refill the tlb */ 821ace41090SPeter Maydell /* 822ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 823ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 8248f5db641SRichard Henderson * + the ram_addr_t of the page base of the target RAM (RAM) 8258f5db641SRichard Henderson * + the offset within section->mr of the page base (I/O, ROMD) 82655df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 827ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 828ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 829ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 830ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 831ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 832ace41090SPeter Maydell */ 833a40ec84eSRichard Henderson desc->iotlb[index].addr = iotlb - vaddr_page; 834a40ec84eSRichard Henderson desc->iotlb[index].attrs = attrs; 835d9bb58e5SYang Zhong 836d9bb58e5SYang Zhong /* Now calculate the new entry */ 83755df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 838d9bb58e5SYang Zhong if (prot & PAGE_READ) { 839d9bb58e5SYang Zhong tn.addr_read = address; 84050b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 84150b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 84250b107c5SRichard Henderson } 843d9bb58e5SYang Zhong } else { 844d9bb58e5SYang Zhong tn.addr_read = -1; 845d9bb58e5SYang Zhong } 846d9bb58e5SYang Zhong 847d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 8488f5db641SRichard Henderson tn.addr_code = address; 849d9bb58e5SYang Zhong } else { 850d9bb58e5SYang Zhong tn.addr_code = -1; 851d9bb58e5SYang Zhong } 852d9bb58e5SYang Zhong 853d9bb58e5SYang Zhong tn.addr_write = -1; 854d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 8558f5db641SRichard Henderson tn.addr_write = write_address; 856f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 857f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 858f52bfb12SDavid Hildenbrand } 85950b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 86050b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 86150b107c5SRichard Henderson } 862d9bb58e5SYang Zhong } 863d9bb58e5SYang Zhong 86471aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 86586e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 866a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 867d9bb58e5SYang Zhong } 868d9bb58e5SYang Zhong 869d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 870d9bb58e5SYang Zhong * transaction attributes to be used. 871d9bb58e5SYang Zhong */ 872d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 873d9bb58e5SYang Zhong hwaddr paddr, int prot, 874d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 875d9bb58e5SYang Zhong { 876d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 877d9bb58e5SYang Zhong prot, mmu_idx, size); 878d9bb58e5SYang Zhong } 879d9bb58e5SYang Zhong 880d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 881d9bb58e5SYang Zhong { 882d9bb58e5SYang Zhong ram_addr_t ram_addr; 883d9bb58e5SYang Zhong 884d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 885d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 886d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 887d9bb58e5SYang Zhong abort(); 888d9bb58e5SYang Zhong } 889d9bb58e5SYang Zhong return ram_addr; 890d9bb58e5SYang Zhong } 891d9bb58e5SYang Zhong 892c319dc13SRichard Henderson /* 893c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 894c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 895c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 896c319dc13SRichard Henderson */ 897c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 898c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 899c319dc13SRichard Henderson { 900c319dc13SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cpu); 901c319dc13SRichard Henderson bool ok; 902c319dc13SRichard Henderson 903c319dc13SRichard Henderson /* 904c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 905c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 906c319dc13SRichard Henderson */ 907c319dc13SRichard Henderson ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr); 908c319dc13SRichard Henderson assert(ok); 909c319dc13SRichard Henderson } 910c319dc13SRichard Henderson 911d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 912f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 913be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 914d9bb58e5SYang Zhong { 91529a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 9162d54f194SPeter Maydell hwaddr mr_offset; 9172d54f194SPeter Maydell MemoryRegionSection *section; 9182d54f194SPeter Maydell MemoryRegion *mr; 919d9bb58e5SYang Zhong uint64_t val; 920d9bb58e5SYang Zhong bool locked = false; 92104e3aabdSPeter Maydell MemTxResult r; 922d9bb58e5SYang Zhong 9232d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 9242d54f194SPeter Maydell mr = section->mr; 9252d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 926d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 92708565552SRichard Henderson if (!cpu->can_do_io) { 928d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 929d9bb58e5SYang Zhong } 930d9bb58e5SYang Zhong 931d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 932dbea78a4SPeter Maydell cpu->mem_io_access_type = access_type; 933d9bb58e5SYang Zhong 9348b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 935d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 936d9bb58e5SYang Zhong locked = true; 937d9bb58e5SYang Zhong } 938be5c4787STony Nguyen r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); 93904e3aabdSPeter Maydell if (r != MEMTX_OK) { 9402d54f194SPeter Maydell hwaddr physaddr = mr_offset + 9412d54f194SPeter Maydell section->offset_within_address_space - 9422d54f194SPeter Maydell section->offset_within_region; 9432d54f194SPeter Maydell 944be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 94504e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 94604e3aabdSPeter Maydell } 947d9bb58e5SYang Zhong if (locked) { 948d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 949d9bb58e5SYang Zhong } 950d9bb58e5SYang Zhong 951d9bb58e5SYang Zhong return val; 952d9bb58e5SYang Zhong } 953d9bb58e5SYang Zhong 954d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 955f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 956be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 957d9bb58e5SYang Zhong { 95829a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 9592d54f194SPeter Maydell hwaddr mr_offset; 9602d54f194SPeter Maydell MemoryRegionSection *section; 9612d54f194SPeter Maydell MemoryRegion *mr; 962d9bb58e5SYang Zhong bool locked = false; 96304e3aabdSPeter Maydell MemTxResult r; 964d9bb58e5SYang Zhong 9652d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 9662d54f194SPeter Maydell mr = section->mr; 9672d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 96808565552SRichard Henderson if (!cpu->can_do_io) { 969d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 970d9bb58e5SYang Zhong } 971d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 972d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 973d9bb58e5SYang Zhong 9748b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 975d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 976d9bb58e5SYang Zhong locked = true; 977d9bb58e5SYang Zhong } 978be5c4787STony Nguyen r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); 97904e3aabdSPeter Maydell if (r != MEMTX_OK) { 9802d54f194SPeter Maydell hwaddr physaddr = mr_offset + 9812d54f194SPeter Maydell section->offset_within_address_space - 9822d54f194SPeter Maydell section->offset_within_region; 9832d54f194SPeter Maydell 984be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 985be5c4787STony Nguyen MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, 986be5c4787STony Nguyen retaddr); 98704e3aabdSPeter Maydell } 988d9bb58e5SYang Zhong if (locked) { 989d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 990d9bb58e5SYang Zhong } 991d9bb58e5SYang Zhong } 992d9bb58e5SYang Zhong 9934811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) 9944811e909SRichard Henderson { 9954811e909SRichard Henderson #if TCG_OVERSIZED_GUEST 9964811e909SRichard Henderson return *(target_ulong *)((uintptr_t)entry + ofs); 9974811e909SRichard Henderson #else 9984811e909SRichard Henderson /* ofs might correspond to .addr_write, so use atomic_read */ 9994811e909SRichard Henderson return atomic_read((target_ulong *)((uintptr_t)entry + ofs)); 10004811e909SRichard Henderson #endif 10014811e909SRichard Henderson } 10024811e909SRichard Henderson 1003d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1004d9bb58e5SYang Zhong back to the main tlb. */ 1005d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 1006d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 1007d9bb58e5SYang Zhong { 1008d9bb58e5SYang Zhong size_t vidx; 100971aec354SEmilio G. Cota 101029a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1011d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1012a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 1013a40ec84eSRichard Henderson target_ulong cmp; 1014a40ec84eSRichard Henderson 1015a40ec84eSRichard Henderson /* elt_ofs might correspond to .addr_write, so use atomic_read */ 1016a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST 1017a40ec84eSRichard Henderson cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 1018a40ec84eSRichard Henderson #else 1019a40ec84eSRichard Henderson cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 1020a40ec84eSRichard Henderson #endif 1021d9bb58e5SYang Zhong 1022d9bb58e5SYang Zhong if (cmp == page) { 1023d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1024a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1025d9bb58e5SYang Zhong 1026a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 102771aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 102871aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 102971aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1030a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1031d9bb58e5SYang Zhong 1032a40ec84eSRichard Henderson CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1033a40ec84eSRichard Henderson CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; 1034d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 1035d9bb58e5SYang Zhong return true; 1036d9bb58e5SYang Zhong } 1037d9bb58e5SYang Zhong } 1038d9bb58e5SYang Zhong return false; 1039d9bb58e5SYang Zhong } 1040d9bb58e5SYang Zhong 1041d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 1042d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 1043d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 1044d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 1045d9bb58e5SYang Zhong 104630d7e098SRichard Henderson /* 104730d7e098SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 104830d7e098SRichard Henderson * 104930d7e098SRichard Henderson * Return -1 if we can't translate and execute from an entire page 105030d7e098SRichard Henderson * of RAM. This will force us to execute by loading and translating 105130d7e098SRichard Henderson * one insn at a time, without caching. 105230d7e098SRichard Henderson * 105330d7e098SRichard Henderson * NOTE: This function will trigger an exception if the page is 105430d7e098SRichard Henderson * not executable. 1055f2553f04SKONRAD Frederic */ 1056f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 1057f2553f04SKONRAD Frederic { 1058383beda9SRichard Henderson uintptr_t mmu_idx = cpu_mmu_index(env, true); 1059383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1060383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1061f2553f04SKONRAD Frederic void *p; 1062f2553f04SKONRAD Frederic 1063383beda9SRichard Henderson if (unlikely(!tlb_hit(entry->addr_code, addr))) { 1064b493ccf1SPeter Maydell if (!VICTIM_TLB_HIT(addr_code, addr)) { 106529a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 10666d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 10676d967cb8SEmilio G. Cota entry = tlb_entry(env, mmu_idx, addr); 106830d7e098SRichard Henderson 106930d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { 107030d7e098SRichard Henderson /* 107130d7e098SRichard Henderson * The MMU protection covers a smaller range than a target 107230d7e098SRichard Henderson * page, so we must redo the MMU check for every insn. 107330d7e098SRichard Henderson */ 107430d7e098SRichard Henderson return -1; 107530d7e098SRichard Henderson } 107671b9a453SKONRAD Frederic } 1077383beda9SRichard Henderson assert(tlb_hit(entry->addr_code, addr)); 1078f2553f04SKONRAD Frederic } 107955df6fcfSPeter Maydell 108030d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_MMIO)) { 108130d7e098SRichard Henderson /* The region is not backed by RAM. */ 108220cb6ae4SPeter Maydell return -1; 108355df6fcfSPeter Maydell } 108455df6fcfSPeter Maydell 1085383beda9SRichard Henderson p = (void *)((uintptr_t)addr + entry->addend); 1086f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 1087f2553f04SKONRAD Frederic } 1088f2553f04SKONRAD Frederic 1089*707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 1090*707526adSRichard Henderson CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) 1091*707526adSRichard Henderson { 1092*707526adSRichard Henderson ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; 1093*707526adSRichard Henderson 1094*707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1095*707526adSRichard Henderson 1096*707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1097*707526adSRichard Henderson struct page_collection *pages 1098*707526adSRichard Henderson = page_collection_lock(ram_addr, ram_addr + size); 1099*707526adSRichard Henderson 1100*707526adSRichard Henderson /* We require mem_io_pc in tb_invalidate_phys_page_range. */ 1101*707526adSRichard Henderson cpu->mem_io_pc = retaddr; 1102*707526adSRichard Henderson 1103*707526adSRichard Henderson tb_invalidate_phys_page_fast(pages, ram_addr, size); 1104*707526adSRichard Henderson page_collection_unlock(pages); 1105*707526adSRichard Henderson } 1106*707526adSRichard Henderson 1107*707526adSRichard Henderson /* 1108*707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1109*707526adSRichard Henderson * the notdirty callback faster. 1110*707526adSRichard Henderson */ 1111*707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1112*707526adSRichard Henderson 1113*707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1114*707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1115*707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1116*707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1117*707526adSRichard Henderson } 1118*707526adSRichard Henderson } 1119*707526adSRichard Henderson 1120c25c283dSDavid Hildenbrand /* 1121c25c283dSDavid Hildenbrand * Probe for whether the specified guest access is permitted. If it is not 1122c25c283dSDavid Hildenbrand * permitted then an exception will be taken in the same way as if this 1123c25c283dSDavid Hildenbrand * were a real access (and we will not return). 1124fef39ccdSDavid Hildenbrand * If the size is 0 or the page requires I/O access, returns NULL; otherwise, 1125fef39ccdSDavid Hildenbrand * returns the address of the host page similar to tlb_vaddr_to_host(). 1126d9bb58e5SYang Zhong */ 1127c25c283dSDavid Hildenbrand void *probe_access(CPUArchState *env, target_ulong addr, int size, 1128c25c283dSDavid Hildenbrand MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1129d9bb58e5SYang Zhong { 1130383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1131383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1132c25c283dSDavid Hildenbrand target_ulong tlb_addr; 1133c25c283dSDavid Hildenbrand size_t elt_ofs; 1134c25c283dSDavid Hildenbrand int wp_access; 1135d9bb58e5SYang Zhong 1136ca86cf32SDavid Hildenbrand g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1137ca86cf32SDavid Hildenbrand 1138c25c283dSDavid Hildenbrand switch (access_type) { 1139c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 1140c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_read); 1141c25c283dSDavid Hildenbrand wp_access = BP_MEM_READ; 1142c25c283dSDavid Hildenbrand break; 1143c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 1144c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_write); 1145c25c283dSDavid Hildenbrand wp_access = BP_MEM_WRITE; 1146c25c283dSDavid Hildenbrand break; 1147c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 1148c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_code); 1149c25c283dSDavid Hildenbrand wp_access = BP_MEM_READ; 1150c25c283dSDavid Hildenbrand break; 1151c25c283dSDavid Hildenbrand default: 1152c25c283dSDavid Hildenbrand g_assert_not_reached(); 1153c25c283dSDavid Hildenbrand } 1154c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 1155c25c283dSDavid Hildenbrand 115603a98189SDavid Hildenbrand if (unlikely(!tlb_hit(tlb_addr, addr))) { 1157c25c283dSDavid Hildenbrand if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, 1158c25c283dSDavid Hildenbrand addr & TARGET_PAGE_MASK)) { 1159c25c283dSDavid Hildenbrand tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); 116003a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 116103a98189SDavid Hildenbrand index = tlb_index(env, mmu_idx, addr); 116203a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1163d9bb58e5SYang Zhong } 1164c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 116503a98189SDavid Hildenbrand } 116603a98189SDavid Hildenbrand 1167fef39ccdSDavid Hildenbrand if (!size) { 1168fef39ccdSDavid Hildenbrand return NULL; 1169fef39ccdSDavid Hildenbrand } 1170fef39ccdSDavid Hildenbrand 117103a98189SDavid Hildenbrand /* Handle watchpoints. */ 1172fef39ccdSDavid Hildenbrand if (tlb_addr & TLB_WATCHPOINT) { 117303a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 117403a98189SDavid Hildenbrand env_tlb(env)->d[mmu_idx].iotlb[index].attrs, 1175c25c283dSDavid Hildenbrand wp_access, retaddr); 1176d9bb58e5SYang Zhong } 1177fef39ccdSDavid Hildenbrand 11785b87b3e6SRichard Henderson /* Reject I/O access, or other required slow-path. */ 11797b0d792cSRichard Henderson if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) { 1180fef39ccdSDavid Hildenbrand return NULL; 1181fef39ccdSDavid Hildenbrand } 1182fef39ccdSDavid Hildenbrand 1183fef39ccdSDavid Hildenbrand return (void *)((uintptr_t)addr + entry->addend); 1184d9bb58e5SYang Zhong } 1185d9bb58e5SYang Zhong 11864811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 11874811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 11884811e909SRichard Henderson { 11894811e909SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 11904811e909SRichard Henderson uintptr_t tlb_addr, page; 11914811e909SRichard Henderson size_t elt_ofs; 11924811e909SRichard Henderson 11934811e909SRichard Henderson switch (access_type) { 11944811e909SRichard Henderson case MMU_DATA_LOAD: 11954811e909SRichard Henderson elt_ofs = offsetof(CPUTLBEntry, addr_read); 11964811e909SRichard Henderson break; 11974811e909SRichard Henderson case MMU_DATA_STORE: 11984811e909SRichard Henderson elt_ofs = offsetof(CPUTLBEntry, addr_write); 11994811e909SRichard Henderson break; 12004811e909SRichard Henderson case MMU_INST_FETCH: 12014811e909SRichard Henderson elt_ofs = offsetof(CPUTLBEntry, addr_code); 12024811e909SRichard Henderson break; 12034811e909SRichard Henderson default: 12044811e909SRichard Henderson g_assert_not_reached(); 12054811e909SRichard Henderson } 12064811e909SRichard Henderson 12074811e909SRichard Henderson page = addr & TARGET_PAGE_MASK; 12084811e909SRichard Henderson tlb_addr = tlb_read_ofs(entry, elt_ofs); 12094811e909SRichard Henderson 12104811e909SRichard Henderson if (!tlb_hit_page(tlb_addr, page)) { 12114811e909SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 12124811e909SRichard Henderson 12134811e909SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) { 121429a0af61SRichard Henderson CPUState *cs = env_cpu(env); 12154811e909SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cs); 12164811e909SRichard Henderson 12174811e909SRichard Henderson if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) { 12184811e909SRichard Henderson /* Non-faulting page table read failed. */ 12194811e909SRichard Henderson return NULL; 12204811e909SRichard Henderson } 12214811e909SRichard Henderson 12224811e909SRichard Henderson /* TLB resize via tlb_fill may have moved the entry. */ 12234811e909SRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 12244811e909SRichard Henderson } 12254811e909SRichard Henderson tlb_addr = tlb_read_ofs(entry, elt_ofs); 12264811e909SRichard Henderson } 12274811e909SRichard Henderson 12284811e909SRichard Henderson if (tlb_addr & ~TARGET_PAGE_MASK) { 12294811e909SRichard Henderson /* IO access */ 12304811e909SRichard Henderson return NULL; 12314811e909SRichard Henderson } 12324811e909SRichard Henderson 12334811e909SRichard Henderson return (void *)((uintptr_t)addr + entry->addend); 12344811e909SRichard Henderson } 12354811e909SRichard Henderson 1236d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 1237d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 1238d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 1239*707526adSRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1240d9bb58e5SYang Zhong { 1241d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 1242383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1243383beda9SRichard Henderson CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1244403f290cSEmilio G. Cota target_ulong tlb_addr = tlb_addr_write(tlbe); 124514776ab5STony Nguyen MemOp mop = get_memop(oi); 1246d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 1247d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 124834d49937SPeter Maydell void *hostaddr; 1249d9bb58e5SYang Zhong 1250d9bb58e5SYang Zhong /* Adjust the given return address. */ 1251d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1252d9bb58e5SYang Zhong 1253d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1254d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1255d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 125629a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1257d9bb58e5SYang Zhong mmu_idx, retaddr); 1258d9bb58e5SYang Zhong } 1259d9bb58e5SYang Zhong 1260d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 1261d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 1262d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1263d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1264d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1265d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1266d9bb58e5SYang Zhong goto stop_the_world; 1267d9bb58e5SYang Zhong } 1268d9bb58e5SYang Zhong 1269d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 1270334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1271d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 127229a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, 127398670d47SLaurent Vivier mmu_idx, retaddr); 12746d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 12756d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1276d9bb58e5SYang Zhong } 1277403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1278d9bb58e5SYang Zhong } 1279d9bb58e5SYang Zhong 128055df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 128130d7e098SRichard Henderson if (unlikely(tlb_addr & TLB_MMIO)) { 1282d9bb58e5SYang Zhong /* There's really nothing that can be done to 1283d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1284d9bb58e5SYang Zhong goto stop_the_world; 1285d9bb58e5SYang Zhong } 1286d9bb58e5SYang Zhong 1287d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 128834d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 128929a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, 129098670d47SLaurent Vivier mmu_idx, retaddr); 1291d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 1292d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 1293d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 1294d9bb58e5SYang Zhong goto stop_the_world; 1295d9bb58e5SYang Zhong } 1296d9bb58e5SYang Zhong 129734d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 129834d49937SPeter Maydell 129934d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1300*707526adSRichard Henderson notdirty_write(env_cpu(env), addr, 1 << s_bits, 1301*707526adSRichard Henderson &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); 130234d49937SPeter Maydell } 130334d49937SPeter Maydell 130434d49937SPeter Maydell return hostaddr; 1305d9bb58e5SYang Zhong 1306d9bb58e5SYang Zhong stop_the_world: 130729a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1308d9bb58e5SYang Zhong } 1309d9bb58e5SYang Zhong 1310eed56642SAlex Bennée /* 1311eed56642SAlex Bennée * Load Helpers 1312eed56642SAlex Bennée * 1313eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1314eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1315eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1316eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1317eed56642SAlex Bennée */ 1318d9bb58e5SYang Zhong 13192dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 13202dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr); 13212dd92606SRichard Henderson 1322c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 132380d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op) 132480d9d1c6SRichard Henderson { 132580d9d1c6SRichard Henderson switch (op) { 132680d9d1c6SRichard Henderson case MO_UB: 132780d9d1c6SRichard Henderson return ldub_p(haddr); 132880d9d1c6SRichard Henderson case MO_BEUW: 132980d9d1c6SRichard Henderson return lduw_be_p(haddr); 133080d9d1c6SRichard Henderson case MO_LEUW: 133180d9d1c6SRichard Henderson return lduw_le_p(haddr); 133280d9d1c6SRichard Henderson case MO_BEUL: 133380d9d1c6SRichard Henderson return (uint32_t)ldl_be_p(haddr); 133480d9d1c6SRichard Henderson case MO_LEUL: 133580d9d1c6SRichard Henderson return (uint32_t)ldl_le_p(haddr); 133680d9d1c6SRichard Henderson case MO_BEQ: 133780d9d1c6SRichard Henderson return ldq_be_p(haddr); 133880d9d1c6SRichard Henderson case MO_LEQ: 133980d9d1c6SRichard Henderson return ldq_le_p(haddr); 134080d9d1c6SRichard Henderson default: 134180d9d1c6SRichard Henderson qemu_build_not_reached(); 134280d9d1c6SRichard Henderson } 134380d9d1c6SRichard Henderson } 134480d9d1c6SRichard Henderson 134580d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 13462dd92606SRichard Henderson load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, 1347be5c4787STony Nguyen uintptr_t retaddr, MemOp op, bool code_read, 13482dd92606SRichard Henderson FullLoadHelper *full_load) 1349eed56642SAlex Bennée { 1350eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1351eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1352eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1353eed56642SAlex Bennée target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1354eed56642SAlex Bennée const size_t tlb_off = code_read ? 1355eed56642SAlex Bennée offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1356f1be3696SRichard Henderson const MMUAccessType access_type = 1357f1be3696SRichard Henderson code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1358eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1359eed56642SAlex Bennée void *haddr; 1360eed56642SAlex Bennée uint64_t res; 1361be5c4787STony Nguyen size_t size = memop_size(op); 1362d9bb58e5SYang Zhong 1363eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1364eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 136529a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, access_type, 1366eed56642SAlex Bennée mmu_idx, retaddr); 1367eed56642SAlex Bennée } 1368eed56642SAlex Bennée 1369eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1370eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1371eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1372eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 137329a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, 1374f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1375eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1376eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1377eed56642SAlex Bennée } 1378eed56642SAlex Bennée tlb_addr = code_read ? entry->addr_code : entry->addr_read; 137930d7e098SRichard Henderson tlb_addr &= ~TLB_INVALID_MASK; 1380eed56642SAlex Bennée } 1381eed56642SAlex Bennée 138250b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1383eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 138450b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 13855b87b3e6SRichard Henderson bool need_swap; 138650b107c5SRichard Henderson 138750b107c5SRichard Henderson /* For anything that is unaligned, recurse through full_load. */ 1388eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1389eed56642SAlex Bennée goto do_unaligned_access; 1390eed56642SAlex Bennée } 139150b107c5SRichard Henderson 139250b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 139350b107c5SRichard Henderson 139450b107c5SRichard Henderson /* Handle watchpoints. */ 139550b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 139650b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 139750b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 139850b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_READ, retaddr); 13995b87b3e6SRichard Henderson } 140050b107c5SRichard Henderson 14015b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 140250b107c5SRichard Henderson 140350b107c5SRichard Henderson /* Handle I/O access. */ 14045b87b3e6SRichard Henderson if (likely(tlb_addr & TLB_MMIO)) { 14055b87b3e6SRichard Henderson return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, 14065b87b3e6SRichard Henderson access_type, op ^ (need_swap * MO_BSWAP)); 14075b87b3e6SRichard Henderson } 14085b87b3e6SRichard Henderson 14095b87b3e6SRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 14105b87b3e6SRichard Henderson 14115b87b3e6SRichard Henderson /* 14125b87b3e6SRichard Henderson * Keep these two load_memop separate to ensure that the compiler 14135b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 14145b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 14155b87b3e6SRichard Henderson */ 14165b87b3e6SRichard Henderson if (unlikely(need_swap)) { 14175b87b3e6SRichard Henderson return load_memop(haddr, op ^ MO_BSWAP); 14185b87b3e6SRichard Henderson } 14195b87b3e6SRichard Henderson return load_memop(haddr, op); 1420eed56642SAlex Bennée } 1421eed56642SAlex Bennée 1422eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1423eed56642SAlex Bennée if (size > 1 1424eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1425eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1426eed56642SAlex Bennée target_ulong addr1, addr2; 14278c79b288SAlex Bennée uint64_t r1, r2; 1428eed56642SAlex Bennée unsigned shift; 1429eed56642SAlex Bennée do_unaligned_access: 1430ab7a2009SAlex Bennée addr1 = addr & ~((target_ulong)size - 1); 1431eed56642SAlex Bennée addr2 = addr1 + size; 14322dd92606SRichard Henderson r1 = full_load(env, addr1, oi, retaddr); 14332dd92606SRichard Henderson r2 = full_load(env, addr2, oi, retaddr); 1434eed56642SAlex Bennée shift = (addr & (size - 1)) * 8; 1435eed56642SAlex Bennée 1436be5c4787STony Nguyen if (memop_big_endian(op)) { 1437eed56642SAlex Bennée /* Big-endian combine. */ 1438eed56642SAlex Bennée res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1439eed56642SAlex Bennée } else { 1440eed56642SAlex Bennée /* Little-endian combine. */ 1441eed56642SAlex Bennée res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 1442eed56642SAlex Bennée } 1443eed56642SAlex Bennée return res & MAKE_64BIT_MASK(0, size * 8); 1444eed56642SAlex Bennée } 1445eed56642SAlex Bennée 1446eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 144780d9d1c6SRichard Henderson return load_memop(haddr, op); 1448eed56642SAlex Bennée } 1449eed56642SAlex Bennée 1450eed56642SAlex Bennée /* 1451eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1452eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1453eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1454eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1455eed56642SAlex Bennée * data, and for that we always have uint64_t. 1456eed56642SAlex Bennée * 1457eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1458eed56642SAlex Bennée */ 1459eed56642SAlex Bennée 14602dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 14612dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 14622dd92606SRichard Henderson { 1463be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); 14642dd92606SRichard Henderson } 14652dd92606SRichard Henderson 1466fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 1467fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1468eed56642SAlex Bennée { 14692dd92606SRichard Henderson return full_ldub_mmu(env, addr, oi, retaddr); 14702dd92606SRichard Henderson } 14712dd92606SRichard Henderson 14722dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 14732dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 14742dd92606SRichard Henderson { 1475be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, false, 14762dd92606SRichard Henderson full_le_lduw_mmu); 1477eed56642SAlex Bennée } 1478eed56642SAlex Bennée 1479fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 1480fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1481eed56642SAlex Bennée { 14822dd92606SRichard Henderson return full_le_lduw_mmu(env, addr, oi, retaddr); 14832dd92606SRichard Henderson } 14842dd92606SRichard Henderson 14852dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 14862dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 14872dd92606SRichard Henderson { 1488be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, false, 14892dd92606SRichard Henderson full_be_lduw_mmu); 1490eed56642SAlex Bennée } 1491eed56642SAlex Bennée 1492fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 1493fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1494eed56642SAlex Bennée { 14952dd92606SRichard Henderson return full_be_lduw_mmu(env, addr, oi, retaddr); 14962dd92606SRichard Henderson } 14972dd92606SRichard Henderson 14982dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 14992dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 15002dd92606SRichard Henderson { 1501be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, false, 15022dd92606SRichard Henderson full_le_ldul_mmu); 1503eed56642SAlex Bennée } 1504eed56642SAlex Bennée 1505fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 1506fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1507eed56642SAlex Bennée { 15082dd92606SRichard Henderson return full_le_ldul_mmu(env, addr, oi, retaddr); 15092dd92606SRichard Henderson } 15102dd92606SRichard Henderson 15112dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 15122dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 15132dd92606SRichard Henderson { 1514be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, false, 15152dd92606SRichard Henderson full_be_ldul_mmu); 1516eed56642SAlex Bennée } 1517eed56642SAlex Bennée 1518fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 1519fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1520eed56642SAlex Bennée { 15212dd92606SRichard Henderson return full_be_ldul_mmu(env, addr, oi, retaddr); 1522eed56642SAlex Bennée } 1523eed56642SAlex Bennée 1524fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 1525fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1526eed56642SAlex Bennée { 1527be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEQ, false, 15282dd92606SRichard Henderson helper_le_ldq_mmu); 1529eed56642SAlex Bennée } 1530eed56642SAlex Bennée 1531fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 1532fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1533eed56642SAlex Bennée { 1534be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEQ, false, 15352dd92606SRichard Henderson helper_be_ldq_mmu); 1536eed56642SAlex Bennée } 1537eed56642SAlex Bennée 1538eed56642SAlex Bennée /* 1539eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 1540eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 1541eed56642SAlex Bennée */ 1542eed56642SAlex Bennée 1543eed56642SAlex Bennée 1544eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 1545eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1546eed56642SAlex Bennée { 1547eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 1548eed56642SAlex Bennée } 1549eed56642SAlex Bennée 1550eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 1551eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1552eed56642SAlex Bennée { 1553eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 1554eed56642SAlex Bennée } 1555eed56642SAlex Bennée 1556eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 1557eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1558eed56642SAlex Bennée { 1559eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 1560eed56642SAlex Bennée } 1561eed56642SAlex Bennée 1562eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 1563eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1564eed56642SAlex Bennée { 1565eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 1566eed56642SAlex Bennée } 1567eed56642SAlex Bennée 1568eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 1569eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1570eed56642SAlex Bennée { 1571eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 1572eed56642SAlex Bennée } 1573eed56642SAlex Bennée 1574eed56642SAlex Bennée /* 1575eed56642SAlex Bennée * Store Helpers 1576eed56642SAlex Bennée */ 1577eed56642SAlex Bennée 1578c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE 157980d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op) 158080d9d1c6SRichard Henderson { 158180d9d1c6SRichard Henderson switch (op) { 158280d9d1c6SRichard Henderson case MO_UB: 158380d9d1c6SRichard Henderson stb_p(haddr, val); 158480d9d1c6SRichard Henderson break; 158580d9d1c6SRichard Henderson case MO_BEUW: 158680d9d1c6SRichard Henderson stw_be_p(haddr, val); 158780d9d1c6SRichard Henderson break; 158880d9d1c6SRichard Henderson case MO_LEUW: 158980d9d1c6SRichard Henderson stw_le_p(haddr, val); 159080d9d1c6SRichard Henderson break; 159180d9d1c6SRichard Henderson case MO_BEUL: 159280d9d1c6SRichard Henderson stl_be_p(haddr, val); 159380d9d1c6SRichard Henderson break; 159480d9d1c6SRichard Henderson case MO_LEUL: 159580d9d1c6SRichard Henderson stl_le_p(haddr, val); 159680d9d1c6SRichard Henderson break; 159780d9d1c6SRichard Henderson case MO_BEQ: 159880d9d1c6SRichard Henderson stq_be_p(haddr, val); 159980d9d1c6SRichard Henderson break; 160080d9d1c6SRichard Henderson case MO_LEQ: 160180d9d1c6SRichard Henderson stq_le_p(haddr, val); 160280d9d1c6SRichard Henderson break; 160380d9d1c6SRichard Henderson default: 160480d9d1c6SRichard Henderson qemu_build_not_reached(); 160580d9d1c6SRichard Henderson } 160680d9d1c6SRichard Henderson } 160780d9d1c6SRichard Henderson 160880d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE 16094601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 1610be5c4787STony Nguyen TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) 1611eed56642SAlex Bennée { 1612eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1613eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1614eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1615eed56642SAlex Bennée target_ulong tlb_addr = tlb_addr_write(entry); 1616eed56642SAlex Bennée const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 1617eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1618eed56642SAlex Bennée void *haddr; 1619be5c4787STony Nguyen size_t size = memop_size(op); 1620eed56642SAlex Bennée 1621eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1622eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 162329a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1624eed56642SAlex Bennée mmu_idx, retaddr); 1625eed56642SAlex Bennée } 1626eed56642SAlex Bennée 1627eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1628eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1629eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1630eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 163129a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 1632eed56642SAlex Bennée mmu_idx, retaddr); 1633eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1634eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1635eed56642SAlex Bennée } 1636eed56642SAlex Bennée tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 1637eed56642SAlex Bennée } 1638eed56642SAlex Bennée 163950b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1640eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 164150b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 16425b87b3e6SRichard Henderson bool need_swap; 164350b107c5SRichard Henderson 164450b107c5SRichard Henderson /* For anything that is unaligned, recurse through byte stores. */ 1645eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1646eed56642SAlex Bennée goto do_unaligned_access; 1647eed56642SAlex Bennée } 164850b107c5SRichard Henderson 164950b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 165050b107c5SRichard Henderson 165150b107c5SRichard Henderson /* Handle watchpoints. */ 165250b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 165350b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 165450b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 165550b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_WRITE, retaddr); 16565b87b3e6SRichard Henderson } 165750b107c5SRichard Henderson 16585b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 165950b107c5SRichard Henderson 166050b107c5SRichard Henderson /* Handle I/O access. */ 166108565552SRichard Henderson if (tlb_addr & TLB_MMIO) { 16625b87b3e6SRichard Henderson io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, 16635b87b3e6SRichard Henderson op ^ (need_swap * MO_BSWAP)); 16645b87b3e6SRichard Henderson return; 16655b87b3e6SRichard Henderson } 16665b87b3e6SRichard Henderson 16677b0d792cSRichard Henderson /* Ignore writes to ROM. */ 16687b0d792cSRichard Henderson if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { 16697b0d792cSRichard Henderson return; 16707b0d792cSRichard Henderson } 16717b0d792cSRichard Henderson 167208565552SRichard Henderson /* Handle clean RAM pages. */ 167308565552SRichard Henderson if (tlb_addr & TLB_NOTDIRTY) { 1674*707526adSRichard Henderson notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); 167508565552SRichard Henderson } 167608565552SRichard Henderson 1677*707526adSRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 167808565552SRichard Henderson 16795b87b3e6SRichard Henderson /* 16805b87b3e6SRichard Henderson * Keep these two store_memop separate to ensure that the compiler 16815b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 16825b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 16835b87b3e6SRichard Henderson */ 16845b87b3e6SRichard Henderson if (unlikely(need_swap)) { 16855b87b3e6SRichard Henderson store_memop(haddr, val, op ^ MO_BSWAP); 16865b87b3e6SRichard Henderson } else { 16875b87b3e6SRichard Henderson store_memop(haddr, val, op); 16885b87b3e6SRichard Henderson } 1689eed56642SAlex Bennée return; 1690eed56642SAlex Bennée } 1691eed56642SAlex Bennée 1692eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1693eed56642SAlex Bennée if (size > 1 1694eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1695eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1696eed56642SAlex Bennée int i; 1697eed56642SAlex Bennée uintptr_t index2; 1698eed56642SAlex Bennée CPUTLBEntry *entry2; 1699eed56642SAlex Bennée target_ulong page2, tlb_addr2; 17008f7cd2adSRichard Henderson size_t size2; 17018f7cd2adSRichard Henderson 1702eed56642SAlex Bennée do_unaligned_access: 1703eed56642SAlex Bennée /* 1704eed56642SAlex Bennée * Ensure the second page is in the TLB. Note that the first page 1705eed56642SAlex Bennée * is already guaranteed to be filled, and that the second page 1706eed56642SAlex Bennée * cannot evict the first. 1707eed56642SAlex Bennée */ 1708eed56642SAlex Bennée page2 = (addr + size) & TARGET_PAGE_MASK; 17098f7cd2adSRichard Henderson size2 = (addr + size) & ~TARGET_PAGE_MASK; 1710eed56642SAlex Bennée index2 = tlb_index(env, mmu_idx, page2); 1711eed56642SAlex Bennée entry2 = tlb_entry(env, mmu_idx, page2); 1712eed56642SAlex Bennée tlb_addr2 = tlb_addr_write(entry2); 171350b107c5SRichard Henderson if (!tlb_hit_page(tlb_addr2, page2)) { 171450b107c5SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { 17158f7cd2adSRichard Henderson tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, 1716eed56642SAlex Bennée mmu_idx, retaddr); 171750b107c5SRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 171850b107c5SRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 171950b107c5SRichard Henderson } 172050b107c5SRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 172150b107c5SRichard Henderson } 172250b107c5SRichard Henderson 172350b107c5SRichard Henderson /* 172450b107c5SRichard Henderson * Handle watchpoints. Since this may trap, all checks 172550b107c5SRichard Henderson * must happen before any store. 172650b107c5SRichard Henderson */ 172750b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 172850b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size - size2, 172950b107c5SRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index].attrs, 173050b107c5SRichard Henderson BP_MEM_WRITE, retaddr); 173150b107c5SRichard Henderson } 173250b107c5SRichard Henderson if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { 173350b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), page2, size2, 173450b107c5SRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, 173550b107c5SRichard Henderson BP_MEM_WRITE, retaddr); 1736eed56642SAlex Bennée } 1737eed56642SAlex Bennée 1738eed56642SAlex Bennée /* 1739eed56642SAlex Bennée * XXX: not efficient, but simple. 1740eed56642SAlex Bennée * This loop must go in the forward direction to avoid issues 1741eed56642SAlex Bennée * with self-modifying code in Windows 64-bit. 1742eed56642SAlex Bennée */ 1743eed56642SAlex Bennée for (i = 0; i < size; ++i) { 1744eed56642SAlex Bennée uint8_t val8; 1745be5c4787STony Nguyen if (memop_big_endian(op)) { 1746eed56642SAlex Bennée /* Big-endian extract. */ 1747eed56642SAlex Bennée val8 = val >> (((size - 1) * 8) - (i * 8)); 1748eed56642SAlex Bennée } else { 1749eed56642SAlex Bennée /* Little-endian extract. */ 1750eed56642SAlex Bennée val8 = val >> (i * 8); 1751eed56642SAlex Bennée } 17524601f8d1SRichard Henderson helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); 1753eed56642SAlex Bennée } 1754eed56642SAlex Bennée return; 1755eed56642SAlex Bennée } 1756eed56642SAlex Bennée 1757eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 175880d9d1c6SRichard Henderson store_memop(haddr, val, op); 1759eed56642SAlex Bennée } 1760eed56642SAlex Bennée 1761fc1bc777SRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 1762eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1763eed56642SAlex Bennée { 1764be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_UB); 1765eed56642SAlex Bennée } 1766eed56642SAlex Bennée 1767fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 1768eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1769eed56642SAlex Bennée { 1770be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUW); 1771eed56642SAlex Bennée } 1772eed56642SAlex Bennée 1773fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 1774eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1775eed56642SAlex Bennée { 1776be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUW); 1777eed56642SAlex Bennée } 1778eed56642SAlex Bennée 1779fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 1780eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1781eed56642SAlex Bennée { 1782be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUL); 1783eed56642SAlex Bennée } 1784eed56642SAlex Bennée 1785fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 1786eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1787eed56642SAlex Bennée { 1788be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUL); 1789eed56642SAlex Bennée } 1790eed56642SAlex Bennée 1791fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 1792eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1793eed56642SAlex Bennée { 1794be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEQ); 1795eed56642SAlex Bennée } 1796eed56642SAlex Bennée 1797fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 1798eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1799eed56642SAlex Bennée { 1800be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEQ); 1801eed56642SAlex Bennée } 1802d9bb58e5SYang Zhong 1803d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 1804d9bb58e5SYang Zhong them callable from other helpers. */ 1805d9bb58e5SYang Zhong 1806d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 1807d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 1808d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 1809*707526adSRichard Henderson #define ATOMIC_MMU_DECLS 1810*707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) 1811*707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 1812d9bb58e5SYang Zhong 1813d9bb58e5SYang Zhong #define DATA_SIZE 1 1814d9bb58e5SYang Zhong #include "atomic_template.h" 1815d9bb58e5SYang Zhong 1816d9bb58e5SYang Zhong #define DATA_SIZE 2 1817d9bb58e5SYang Zhong #include "atomic_template.h" 1818d9bb58e5SYang Zhong 1819d9bb58e5SYang Zhong #define DATA_SIZE 4 1820d9bb58e5SYang Zhong #include "atomic_template.h" 1821d9bb58e5SYang Zhong 1822d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1823d9bb58e5SYang Zhong #define DATA_SIZE 8 1824d9bb58e5SYang Zhong #include "atomic_template.h" 1825d9bb58e5SYang Zhong #endif 1826d9bb58e5SYang Zhong 1827e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 1828d9bb58e5SYang Zhong #define DATA_SIZE 16 1829d9bb58e5SYang Zhong #include "atomic_template.h" 1830d9bb58e5SYang Zhong #endif 1831d9bb58e5SYang Zhong 1832d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 1833d9bb58e5SYang Zhong 1834d9bb58e5SYang Zhong #undef EXTRA_ARGS 1835d9bb58e5SYang Zhong #undef ATOMIC_NAME 1836d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 1837d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 1838d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 1839*707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) 1840d9bb58e5SYang Zhong 1841d9bb58e5SYang Zhong #define DATA_SIZE 1 1842d9bb58e5SYang Zhong #include "atomic_template.h" 1843d9bb58e5SYang Zhong 1844d9bb58e5SYang Zhong #define DATA_SIZE 2 1845d9bb58e5SYang Zhong #include "atomic_template.h" 1846d9bb58e5SYang Zhong 1847d9bb58e5SYang Zhong #define DATA_SIZE 4 1848d9bb58e5SYang Zhong #include "atomic_template.h" 1849d9bb58e5SYang Zhong 1850d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1851d9bb58e5SYang Zhong #define DATA_SIZE 8 1852d9bb58e5SYang Zhong #include "atomic_template.h" 1853d9bb58e5SYang Zhong #endif 1854d9bb58e5SYang Zhong 1855d9bb58e5SYang Zhong /* Code access functions. */ 1856d9bb58e5SYang Zhong 18572dd92606SRichard Henderson static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr, 18582dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 18592dd92606SRichard Henderson { 1860be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu); 18612dd92606SRichard Henderson } 18622dd92606SRichard Henderson 1863fc1bc777SRichard Henderson uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, 1864fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1865eed56642SAlex Bennée { 18662dd92606SRichard Henderson return full_ldub_cmmu(env, addr, oi, retaddr); 18672dd92606SRichard Henderson } 18682dd92606SRichard Henderson 18692dd92606SRichard Henderson static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr, 18702dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 18712dd92606SRichard Henderson { 1872be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, true, 18732dd92606SRichard Henderson full_le_lduw_cmmu); 1874eed56642SAlex Bennée } 1875d9bb58e5SYang Zhong 1876fc1bc777SRichard Henderson uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, 1877fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1878eed56642SAlex Bennée { 18792dd92606SRichard Henderson return full_le_lduw_cmmu(env, addr, oi, retaddr); 18802dd92606SRichard Henderson } 18812dd92606SRichard Henderson 18822dd92606SRichard Henderson static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr, 18832dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 18842dd92606SRichard Henderson { 1885be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, true, 18862dd92606SRichard Henderson full_be_lduw_cmmu); 1887eed56642SAlex Bennée } 1888d9bb58e5SYang Zhong 1889fc1bc777SRichard Henderson uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, 1890fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1891eed56642SAlex Bennée { 18922dd92606SRichard Henderson return full_be_lduw_cmmu(env, addr, oi, retaddr); 18932dd92606SRichard Henderson } 18942dd92606SRichard Henderson 18952dd92606SRichard Henderson static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr, 18962dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 18972dd92606SRichard Henderson { 1898be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, true, 18992dd92606SRichard Henderson full_le_ldul_cmmu); 1900eed56642SAlex Bennée } 1901d9bb58e5SYang Zhong 1902fc1bc777SRichard Henderson uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, 1903fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1904eed56642SAlex Bennée { 19052dd92606SRichard Henderson return full_le_ldul_cmmu(env, addr, oi, retaddr); 19062dd92606SRichard Henderson } 19072dd92606SRichard Henderson 19082dd92606SRichard Henderson static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr, 19092dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 19102dd92606SRichard Henderson { 1911be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, true, 19122dd92606SRichard Henderson full_be_ldul_cmmu); 1913eed56642SAlex Bennée } 1914d9bb58e5SYang Zhong 1915fc1bc777SRichard Henderson uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, 1916fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1917eed56642SAlex Bennée { 19182dd92606SRichard Henderson return full_be_ldul_cmmu(env, addr, oi, retaddr); 1919eed56642SAlex Bennée } 1920eed56642SAlex Bennée 1921fc1bc777SRichard Henderson uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, 1922fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1923eed56642SAlex Bennée { 1924be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEQ, true, 19252dd92606SRichard Henderson helper_le_ldq_cmmu); 1926eed56642SAlex Bennée } 1927eed56642SAlex Bennée 1928fc1bc777SRichard Henderson uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, 1929fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1930eed56642SAlex Bennée { 1931be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEQ, true, 19322dd92606SRichard Henderson helper_be_ldq_cmmu); 1933eed56642SAlex Bennée } 1934