1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 22d9bb58e5SYang Zhong #include "cpu.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/address-spaces.h" 26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 27d9bb58e5SYang Zhong #include "exec/cputlb.h" 28d9bb58e5SYang Zhong #include "exec/memory-internal.h" 29d9bb58e5SYang Zhong #include "exec/ram_addr.h" 30d9bb58e5SYang Zhong #include "tcg/tcg.h" 31d9bb58e5SYang Zhong #include "qemu/error-report.h" 32d9bb58e5SYang Zhong #include "exec/log.h" 33d9bb58e5SYang Zhong #include "exec/helper-proto.h" 34d9bb58e5SYang Zhong #include "qemu/atomic.h" 35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 36d9bb58e5SYang Zhong 37d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 38d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 39d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 40d9bb58e5SYang Zhong 41d9bb58e5SYang Zhong #ifdef DEBUG_TLB 42d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 43d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 44d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 45d9bb58e5SYang Zhong # else 46d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 47d9bb58e5SYang Zhong # endif 48d9bb58e5SYang Zhong #else 49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 50d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 51d9bb58e5SYang Zhong #endif 52d9bb58e5SYang Zhong 53d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 54d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 55d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 56d9bb58e5SYang Zhong ## __VA_ARGS__); \ 57d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 58d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 59d9bb58e5SYang Zhong } \ 60d9bb58e5SYang Zhong } while (0) 61d9bb58e5SYang Zhong 62ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 63d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 64ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 65d9bb58e5SYang Zhong } \ 66d9bb58e5SYang Zhong } while (0) 67d9bb58e5SYang Zhong 68d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 69d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 70d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 71d9bb58e5SYang Zhong 72d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 73d9bb58e5SYang Zhong */ 74d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 75d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 76d9bb58e5SYang Zhong 7786e1eff8SEmilio G. Cota static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx) 7886e1eff8SEmilio G. Cota { 7986e1eff8SEmilio G. Cota return env->tlb_mask[mmu_idx] + (1 << CPU_TLB_ENTRY_BITS); 8086e1eff8SEmilio G. Cota } 8186e1eff8SEmilio G. Cota 8286e1eff8SEmilio G. Cota static void tlb_window_reset(CPUTLBWindow *window, int64_t ns, 8386e1eff8SEmilio G. Cota size_t max_entries) 8486e1eff8SEmilio G. Cota { 8586e1eff8SEmilio G. Cota window->begin_ns = ns; 8686e1eff8SEmilio G. Cota window->max_entries = max_entries; 8786e1eff8SEmilio G. Cota } 8886e1eff8SEmilio G. Cota 8986e1eff8SEmilio G. Cota static void tlb_dyn_init(CPUArchState *env) 9086e1eff8SEmilio G. Cota { 9186e1eff8SEmilio G. Cota int i; 9286e1eff8SEmilio G. Cota 9386e1eff8SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 9486e1eff8SEmilio G. Cota CPUTLBDesc *desc = &env->tlb_d[i]; 9586e1eff8SEmilio G. Cota size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 9686e1eff8SEmilio G. Cota 9786e1eff8SEmilio G. Cota tlb_window_reset(&desc->window, get_clock_realtime(), 0); 9886e1eff8SEmilio G. Cota desc->n_used_entries = 0; 9986e1eff8SEmilio G. Cota env->tlb_mask[i] = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 10086e1eff8SEmilio G. Cota env->tlb_table[i] = g_new(CPUTLBEntry, n_entries); 10186e1eff8SEmilio G. Cota env->iotlb[i] = g_new(CPUIOTLBEntry, n_entries); 10286e1eff8SEmilio G. Cota } 10386e1eff8SEmilio G. Cota } 10486e1eff8SEmilio G. Cota 10586e1eff8SEmilio G. Cota /** 10686e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 10786e1eff8SEmilio G. Cota * @env: CPU that owns the TLB 10886e1eff8SEmilio G. Cota * @mmu_idx: MMU index of the TLB 10986e1eff8SEmilio G. Cota * 11086e1eff8SEmilio G. Cota * Called with tlb_lock_held. 11186e1eff8SEmilio G. Cota * 11286e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 11386e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 11486e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 11586e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 11686e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 11786e1eff8SEmilio G. Cota * the resize based on past observations. 11886e1eff8SEmilio G. Cota * 11986e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 12086e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 12186e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 12286e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 12386e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 12486e1eff8SEmilio G. Cota * performance. 12586e1eff8SEmilio G. Cota * 12686e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 12786e1eff8SEmilio G. Cota * 12886e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 12986e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 13086e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 13186e1eff8SEmilio G. Cota * probably be similar. 13286e1eff8SEmilio G. Cota * 13386e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 13486e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 13586e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 13686e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 13786e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 13886e1eff8SEmilio G. Cota * 13986e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 14086e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 14186e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 14286e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 14386e1eff8SEmilio G. Cota * conflict misses. 14486e1eff8SEmilio G. Cota */ 14586e1eff8SEmilio G. Cota static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx) 14686e1eff8SEmilio G. Cota { 14786e1eff8SEmilio G. Cota CPUTLBDesc *desc = &env->tlb_d[mmu_idx]; 14886e1eff8SEmilio G. Cota size_t old_size = tlb_n_entries(env, mmu_idx); 14986e1eff8SEmilio G. Cota size_t rate; 15086e1eff8SEmilio G. Cota size_t new_size = old_size; 15186e1eff8SEmilio G. Cota int64_t now = get_clock_realtime(); 15286e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 15386e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 15486e1eff8SEmilio G. Cota bool window_expired = now > desc->window.begin_ns + window_len_ns; 15586e1eff8SEmilio G. Cota 15686e1eff8SEmilio G. Cota if (desc->n_used_entries > desc->window.max_entries) { 15786e1eff8SEmilio G. Cota desc->window.max_entries = desc->n_used_entries; 15886e1eff8SEmilio G. Cota } 15986e1eff8SEmilio G. Cota rate = desc->window.max_entries * 100 / old_size; 16086e1eff8SEmilio G. Cota 16186e1eff8SEmilio G. Cota if (rate > 70) { 16286e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 16386e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 16486e1eff8SEmilio G. Cota size_t ceil = pow2ceil(desc->window.max_entries); 16586e1eff8SEmilio G. Cota size_t expected_rate = desc->window.max_entries * 100 / ceil; 16686e1eff8SEmilio G. Cota 16786e1eff8SEmilio G. Cota /* 16886e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 16986e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 17086e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 17186e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 17286e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 17386e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 17486e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 17586e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 17686e1eff8SEmilio G. Cota */ 17786e1eff8SEmilio G. Cota if (expected_rate > 70) { 17886e1eff8SEmilio G. Cota ceil *= 2; 17986e1eff8SEmilio G. Cota } 18086e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 18186e1eff8SEmilio G. Cota } 18286e1eff8SEmilio G. Cota 18386e1eff8SEmilio G. Cota if (new_size == old_size) { 18486e1eff8SEmilio G. Cota if (window_expired) { 18586e1eff8SEmilio G. Cota tlb_window_reset(&desc->window, now, desc->n_used_entries); 18686e1eff8SEmilio G. Cota } 18786e1eff8SEmilio G. Cota return; 18886e1eff8SEmilio G. Cota } 18986e1eff8SEmilio G. Cota 19086e1eff8SEmilio G. Cota g_free(env->tlb_table[mmu_idx]); 19186e1eff8SEmilio G. Cota g_free(env->iotlb[mmu_idx]); 19286e1eff8SEmilio G. Cota 19386e1eff8SEmilio G. Cota tlb_window_reset(&desc->window, now, 0); 19486e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 19586e1eff8SEmilio G. Cota env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS; 19686e1eff8SEmilio G. Cota env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size); 19786e1eff8SEmilio G. Cota env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size); 19886e1eff8SEmilio G. Cota /* 19986e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 20086e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 20186e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 20286e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 20386e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 20486e1eff8SEmilio G. Cota */ 20586e1eff8SEmilio G. Cota while (env->tlb_table[mmu_idx] == NULL || env->iotlb[mmu_idx] == NULL) { 20686e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 20786e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 20886e1eff8SEmilio G. Cota abort(); 20986e1eff8SEmilio G. Cota } 21086e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 21186e1eff8SEmilio G. Cota env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS; 21286e1eff8SEmilio G. Cota 21386e1eff8SEmilio G. Cota g_free(env->tlb_table[mmu_idx]); 21486e1eff8SEmilio G. Cota g_free(env->iotlb[mmu_idx]); 21586e1eff8SEmilio G. Cota env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size); 21686e1eff8SEmilio G. Cota env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size); 21786e1eff8SEmilio G. Cota } 21886e1eff8SEmilio G. Cota } 21986e1eff8SEmilio G. Cota 22086e1eff8SEmilio G. Cota static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx) 22186e1eff8SEmilio G. Cota { 22286e1eff8SEmilio G. Cota tlb_mmu_resize_locked(env, mmu_idx); 22386e1eff8SEmilio G. Cota memset(env->tlb_table[mmu_idx], -1, sizeof_tlb(env, mmu_idx)); 22486e1eff8SEmilio G. Cota env->tlb_d[mmu_idx].n_used_entries = 0; 22586e1eff8SEmilio G. Cota } 22686e1eff8SEmilio G. Cota 22786e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 22886e1eff8SEmilio G. Cota { 22986e1eff8SEmilio G. Cota env->tlb_d[mmu_idx].n_used_entries++; 23086e1eff8SEmilio G. Cota } 23186e1eff8SEmilio G. Cota 23286e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 23386e1eff8SEmilio G. Cota { 23486e1eff8SEmilio G. Cota env->tlb_d[mmu_idx].n_used_entries--; 23586e1eff8SEmilio G. Cota } 23686e1eff8SEmilio G. Cota 2375005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2385005e253SEmilio G. Cota { 23971aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 24071aec354SEmilio G. Cota 24153d28455SRichard Henderson qemu_spin_init(&env->tlb_c.lock); 2423d1523ceSRichard Henderson 2433d1523ceSRichard Henderson /* Ensure that cpu_reset performs a full flush. */ 2443d1523ceSRichard Henderson env->tlb_c.dirty = ALL_MMUIDX_BITS; 24586e1eff8SEmilio G. Cota 24686e1eff8SEmilio G. Cota tlb_dyn_init(env); 2475005e253SEmilio G. Cota } 2485005e253SEmilio G. Cota 249d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 250d9bb58e5SYang Zhong * 251d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 252d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 253d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 254d9bb58e5SYang Zhong * again. 255d9bb58e5SYang Zhong */ 256d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 257d9bb58e5SYang Zhong run_on_cpu_data d) 258d9bb58e5SYang Zhong { 259d9bb58e5SYang Zhong CPUState *cpu; 260d9bb58e5SYang Zhong 261d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 262d9bb58e5SYang Zhong if (cpu != src) { 263d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 264d9bb58e5SYang Zhong } 265d9bb58e5SYang Zhong } 266d9bb58e5SYang Zhong } 267d9bb58e5SYang Zhong 268e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 26983974cf4SEmilio G. Cota { 27083974cf4SEmilio G. Cota CPUState *cpu; 271e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 27283974cf4SEmilio G. Cota 27383974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 27483974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 27583974cf4SEmilio G. Cota 276e09de0a2SRichard Henderson full += atomic_read(&env->tlb_c.full_flush_count); 277e09de0a2SRichard Henderson part += atomic_read(&env->tlb_c.part_flush_count); 278e09de0a2SRichard Henderson elide += atomic_read(&env->tlb_c.elide_flush_count); 27983974cf4SEmilio G. Cota } 280e09de0a2SRichard Henderson *pfull = full; 281e09de0a2SRichard Henderson *ppart = part; 282e09de0a2SRichard Henderson *pelide = elide; 28383974cf4SEmilio G. Cota } 284d9bb58e5SYang Zhong 2851308e026SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) 2861308e026SRichard Henderson { 28786e1eff8SEmilio G. Cota tlb_table_flush_by_mmuidx(env, mmu_idx); 2881308e026SRichard Henderson memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); 2891308e026SRichard Henderson env->tlb_d[mmu_idx].large_page_addr = -1; 2901308e026SRichard Henderson env->tlb_d[mmu_idx].large_page_mask = -1; 291d5363e58SRichard Henderson env->tlb_d[mmu_idx].vindex = 0; 2921308e026SRichard Henderson } 2931308e026SRichard Henderson 294d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 295d9bb58e5SYang Zhong { 296d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 2973d1523ceSRichard Henderson uint16_t asked = data.host_int; 2983d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 299d9bb58e5SYang Zhong 300d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 301d9bb58e5SYang Zhong 3023d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 303d9bb58e5SYang Zhong 30453d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 30560a2ad7dSRichard Henderson 3063d1523ceSRichard Henderson all_dirty = env->tlb_c.dirty; 3073d1523ceSRichard Henderson to_clean = asked & all_dirty; 3083d1523ceSRichard Henderson all_dirty &= ~to_clean; 3093d1523ceSRichard Henderson env->tlb_c.dirty = all_dirty; 3103d1523ceSRichard Henderson 3113d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3123d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3131308e026SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx); 314d9bb58e5SYang Zhong } 3153d1523ceSRichard Henderson 31653d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 317d9bb58e5SYang Zhong 318f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 31964f2674bSRichard Henderson 3203d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 321e09de0a2SRichard Henderson atomic_set(&env->tlb_c.full_flush_count, 322e09de0a2SRichard Henderson env->tlb_c.full_flush_count + 1); 323e09de0a2SRichard Henderson } else { 324e09de0a2SRichard Henderson atomic_set(&env->tlb_c.part_flush_count, 3253d1523ceSRichard Henderson env->tlb_c.part_flush_count + ctpop16(to_clean)); 3263d1523ceSRichard Henderson if (to_clean != asked) { 3273d1523ceSRichard Henderson atomic_set(&env->tlb_c.elide_flush_count, 3283d1523ceSRichard Henderson env->tlb_c.elide_flush_count + 3293d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3303d1523ceSRichard Henderson } 33164f2674bSRichard Henderson } 332d9bb58e5SYang Zhong } 333d9bb58e5SYang Zhong 334d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 335d9bb58e5SYang Zhong { 336d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 337d9bb58e5SYang Zhong 33864f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 339d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 340ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 341d9bb58e5SYang Zhong } else { 34260a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 343d9bb58e5SYang Zhong } 344d9bb58e5SYang Zhong } 345d9bb58e5SYang Zhong 34664f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 34764f2674bSRichard Henderson { 34864f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 34964f2674bSRichard Henderson } 35064f2674bSRichard Henderson 351d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 352d9bb58e5SYang Zhong { 353d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 354d9bb58e5SYang Zhong 355d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 356d9bb58e5SYang Zhong 357d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 358d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 359d9bb58e5SYang Zhong } 360d9bb58e5SYang Zhong 36164f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 36264f2674bSRichard Henderson { 36364f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 36464f2674bSRichard Henderson } 36564f2674bSRichard Henderson 36664f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 367d9bb58e5SYang Zhong { 368d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 369d9bb58e5SYang Zhong 370d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 371d9bb58e5SYang Zhong 372d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 373d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 374d9bb58e5SYang Zhong } 375d9bb58e5SYang Zhong 37664f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 37764f2674bSRichard Henderson { 37864f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 37964f2674bSRichard Henderson } 38064f2674bSRichard Henderson 38168fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 38268fea038SRichard Henderson target_ulong page) 383d9bb58e5SYang Zhong { 38468fea038SRichard Henderson return tlb_hit_page(tlb_entry->addr_read, page) || 385403f290cSEmilio G. Cota tlb_hit_page(tlb_addr_write(tlb_entry), page) || 38668fea038SRichard Henderson tlb_hit_page(tlb_entry->addr_code, page); 38768fea038SRichard Henderson } 38868fea038SRichard Henderson 3893cea94bbSEmilio G. Cota /** 3903cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 3913cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 3923cea94bbSEmilio G. Cota */ 3933cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 3943cea94bbSEmilio G. Cota { 3953cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 3963cea94bbSEmilio G. Cota } 3973cea94bbSEmilio G. Cota 39853d28455SRichard Henderson /* Called with tlb_c.lock held */ 39986e1eff8SEmilio G. Cota static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 40071aec354SEmilio G. Cota target_ulong page) 40168fea038SRichard Henderson { 40268fea038SRichard Henderson if (tlb_hit_page_anyprot(tlb_entry, page)) { 403d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 40486e1eff8SEmilio G. Cota return true; 405d9bb58e5SYang Zhong } 40686e1eff8SEmilio G. Cota return false; 407d9bb58e5SYang Zhong } 408d9bb58e5SYang Zhong 40953d28455SRichard Henderson /* Called with tlb_c.lock held */ 41071aec354SEmilio G. Cota static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 41168fea038SRichard Henderson target_ulong page) 41268fea038SRichard Henderson { 41368fea038SRichard Henderson int k; 41471aec354SEmilio G. Cota 41571aec354SEmilio G. Cota assert_cpu_is_self(ENV_GET_CPU(env)); 41668fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 41786e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page)) { 41886e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 41986e1eff8SEmilio G. Cota } 42068fea038SRichard Henderson } 42168fea038SRichard Henderson } 42268fea038SRichard Henderson 4231308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4241308e026SRichard Henderson target_ulong page) 4251308e026SRichard Henderson { 4261308e026SRichard Henderson target_ulong lp_addr = env->tlb_d[midx].large_page_addr; 4271308e026SRichard Henderson target_ulong lp_mask = env->tlb_d[midx].large_page_mask; 4281308e026SRichard Henderson 4291308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 4301308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 4311308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 4321308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 4331308e026SRichard Henderson midx, lp_addr, lp_mask); 4341308e026SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx); 4351308e026SRichard Henderson } else { 43686e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 43786e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 43886e1eff8SEmilio G. Cota } 4391308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 4401308e026SRichard Henderson } 4411308e026SRichard Henderson } 4421308e026SRichard Henderson 443d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a 444d9bb58e5SYang Zhong * mmuidx bit mask we need to fail to build if we can't do that 445d9bb58e5SYang Zhong */ 446d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN); 447d9bb58e5SYang Zhong 448d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, 449d9bb58e5SYang Zhong run_on_cpu_data data) 450d9bb58e5SYang Zhong { 451d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 452d9bb58e5SYang Zhong target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; 453d9bb58e5SYang Zhong target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; 454d9bb58e5SYang Zhong unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; 455d9bb58e5SYang Zhong int mmu_idx; 456d9bb58e5SYang Zhong 457d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 458d9bb58e5SYang Zhong 4591308e026SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n", 460383beda9SRichard Henderson addr, mmu_idx_bitmap); 461d9bb58e5SYang Zhong 46253d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 463d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 464d9bb58e5SYang Zhong if (test_bit(mmu_idx, &mmu_idx_bitmap)) { 4651308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 466d9bb58e5SYang Zhong } 467d9bb58e5SYang Zhong } 46853d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 469d9bb58e5SYang Zhong 470d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 471d9bb58e5SYang Zhong } 472d9bb58e5SYang Zhong 473d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 474d9bb58e5SYang Zhong { 475d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 476d9bb58e5SYang Zhong 477d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 478d9bb58e5SYang Zhong 479d9bb58e5SYang Zhong /* This should already be page aligned */ 480d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 481d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 482d9bb58e5SYang Zhong 483d9bb58e5SYang Zhong if (!qemu_cpu_is_self(cpu)) { 4841308e026SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work, 485d9bb58e5SYang Zhong RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 486d9bb58e5SYang Zhong } else { 4871308e026SRichard Henderson tlb_flush_page_by_mmuidx_async_work( 488d9bb58e5SYang Zhong cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 489d9bb58e5SYang Zhong } 490d9bb58e5SYang Zhong } 491d9bb58e5SYang Zhong 492f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 493f8144c6cSRichard Henderson { 494f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 495f8144c6cSRichard Henderson } 496f8144c6cSRichard Henderson 497d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 498d9bb58e5SYang Zhong uint16_t idxmap) 499d9bb58e5SYang Zhong { 5001308e026SRichard Henderson const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; 501d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 502d9bb58e5SYang Zhong 503d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 504d9bb58e5SYang Zhong 505d9bb58e5SYang Zhong /* This should already be page aligned */ 506d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 507d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 508d9bb58e5SYang Zhong 509d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 510d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 511d9bb58e5SYang Zhong } 512d9bb58e5SYang Zhong 513f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 514f8144c6cSRichard Henderson { 515f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 516f8144c6cSRichard Henderson } 517f8144c6cSRichard Henderson 518d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 519d9bb58e5SYang Zhong target_ulong addr, 520d9bb58e5SYang Zhong uint16_t idxmap) 521d9bb58e5SYang Zhong { 5221308e026SRichard Henderson const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; 523d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 524d9bb58e5SYang Zhong 525d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 526d9bb58e5SYang Zhong 527d9bb58e5SYang Zhong /* This should already be page aligned */ 528d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 529d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 530d9bb58e5SYang Zhong 531d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 532d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 533d9bb58e5SYang Zhong } 534d9bb58e5SYang Zhong 535f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 536d9bb58e5SYang Zhong { 537f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 538d9bb58e5SYang Zhong } 539d9bb58e5SYang Zhong 540d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 541d9bb58e5SYang Zhong can be detected */ 542d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 543d9bb58e5SYang Zhong { 544d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 545d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 546d9bb58e5SYang Zhong } 547d9bb58e5SYang Zhong 548d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 549d9bb58e5SYang Zhong tested for self modifying code */ 550d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 551d9bb58e5SYang Zhong { 552d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 553d9bb58e5SYang Zhong } 554d9bb58e5SYang Zhong 555d9bb58e5SYang Zhong 556d9bb58e5SYang Zhong /* 557d9bb58e5SYang Zhong * Dirty write flag handling 558d9bb58e5SYang Zhong * 559d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 560d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 561d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 562d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 563d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 564d9bb58e5SYang Zhong * generated code. 565d9bb58e5SYang Zhong * 56671aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 56771aec354SEmilio G. Cota * te->addr_write with atomic_set. We don't need to worry about this for 56871aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 569d9bb58e5SYang Zhong * 57053d28455SRichard Henderson * Called with tlb_c.lock held. 571d9bb58e5SYang Zhong */ 57271aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 57371aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 574d9bb58e5SYang Zhong { 575d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 576d9bb58e5SYang Zhong 577d9bb58e5SYang Zhong if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { 578d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 579d9bb58e5SYang Zhong addr += tlb_entry->addend; 580d9bb58e5SYang Zhong if ((addr - start) < length) { 581d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 58271aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 583d9bb58e5SYang Zhong #else 58471aec354SEmilio G. Cota atomic_set(&tlb_entry->addr_write, 58571aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 586d9bb58e5SYang Zhong #endif 587d9bb58e5SYang Zhong } 58871aec354SEmilio G. Cota } 58971aec354SEmilio G. Cota } 59071aec354SEmilio G. Cota 59171aec354SEmilio G. Cota /* 59253d28455SRichard Henderson * Called with tlb_c.lock held. 59371aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 59471aec354SEmilio G. Cota */ 59571aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 59671aec354SEmilio G. Cota { 59771aec354SEmilio G. Cota *d = *s; 59871aec354SEmilio G. Cota } 599d9bb58e5SYang Zhong 600d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 60171aec354SEmilio G. Cota * the target vCPU). 60253d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 60371aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 604d9bb58e5SYang Zhong */ 605d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 606d9bb58e5SYang Zhong { 607d9bb58e5SYang Zhong CPUArchState *env; 608d9bb58e5SYang Zhong 609d9bb58e5SYang Zhong int mmu_idx; 610d9bb58e5SYang Zhong 611d9bb58e5SYang Zhong env = cpu->env_ptr; 61253d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 613d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 614d9bb58e5SYang Zhong unsigned int i; 61586e1eff8SEmilio G. Cota unsigned int n = tlb_n_entries(env, mmu_idx); 616d9bb58e5SYang Zhong 61786e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 61871aec354SEmilio G. Cota tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1, 61971aec354SEmilio G. Cota length); 620d9bb58e5SYang Zhong } 621d9bb58e5SYang Zhong 622d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 62371aec354SEmilio G. Cota tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1, 62471aec354SEmilio G. Cota length); 625d9bb58e5SYang Zhong } 626d9bb58e5SYang Zhong } 62753d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 628d9bb58e5SYang Zhong } 629d9bb58e5SYang Zhong 63053d28455SRichard Henderson /* Called with tlb_c.lock held */ 63171aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 63271aec354SEmilio G. Cota target_ulong vaddr) 633d9bb58e5SYang Zhong { 634d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 635d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 636d9bb58e5SYang Zhong } 637d9bb58e5SYang Zhong } 638d9bb58e5SYang Zhong 639d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 640d9bb58e5SYang Zhong so that it is no longer dirty */ 641d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 642d9bb58e5SYang Zhong { 643d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 644d9bb58e5SYang Zhong int mmu_idx; 645d9bb58e5SYang Zhong 646d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 647d9bb58e5SYang Zhong 648d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 64953d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 650d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 651383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 652d9bb58e5SYang Zhong } 653d9bb58e5SYang Zhong 654d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 655d9bb58e5SYang Zhong int k; 656d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 65771aec354SEmilio G. Cota tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr); 658d9bb58e5SYang Zhong } 659d9bb58e5SYang Zhong } 66053d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 661d9bb58e5SYang Zhong } 662d9bb58e5SYang Zhong 663d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 664d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 6651308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 6661308e026SRichard Henderson target_ulong vaddr, target_ulong size) 667d9bb58e5SYang Zhong { 6681308e026SRichard Henderson target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr; 6691308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 670d9bb58e5SYang Zhong 6711308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 6721308e026SRichard Henderson /* No previous large page. */ 6731308e026SRichard Henderson lp_addr = vaddr; 6741308e026SRichard Henderson } else { 675d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 6761308e026SRichard Henderson This is a compromise between unnecessary flushes and 6771308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 6781308e026SRichard Henderson lp_mask &= env->tlb_d[mmu_idx].large_page_mask; 6791308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 6801308e026SRichard Henderson lp_mask <<= 1; 681d9bb58e5SYang Zhong } 6821308e026SRichard Henderson } 6831308e026SRichard Henderson env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask; 6841308e026SRichard Henderson env->tlb_d[mmu_idx].large_page_mask = lp_mask; 685d9bb58e5SYang Zhong } 686d9bb58e5SYang Zhong 687d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 688d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 689d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 690d9bb58e5SYang Zhong * 691d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 692d9bb58e5SYang Zhong * critical section. 693d9bb58e5SYang Zhong */ 694d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 695d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 696d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 697d9bb58e5SYang Zhong { 698d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 699d9bb58e5SYang Zhong MemoryRegionSection *section; 700d9bb58e5SYang Zhong unsigned int index; 701d9bb58e5SYang Zhong target_ulong address; 702d9bb58e5SYang Zhong target_ulong code_address; 703d9bb58e5SYang Zhong uintptr_t addend; 70468fea038SRichard Henderson CPUTLBEntry *te, tn; 70555df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 70655df6fcfSPeter Maydell target_ulong vaddr_page; 707d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 708d9bb58e5SYang Zhong 709d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 71055df6fcfSPeter Maydell 7111308e026SRichard Henderson if (size <= TARGET_PAGE_SIZE) { 71255df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 71355df6fcfSPeter Maydell } else { 7141308e026SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, size); 715d9bb58e5SYang Zhong sz = size; 71655df6fcfSPeter Maydell } 71755df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 71855df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 71955df6fcfSPeter Maydell 72055df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 72155df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 722d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 723d9bb58e5SYang Zhong 724d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 725d9bb58e5SYang Zhong " prot=%x idx=%d\n", 726d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 727d9bb58e5SYang Zhong 72855df6fcfSPeter Maydell address = vaddr_page; 72955df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 73055df6fcfSPeter Maydell /* 73155df6fcfSPeter Maydell * Slow-path the TLB entries; we will repeat the MMU check and TLB 73255df6fcfSPeter Maydell * fill on every access. 73355df6fcfSPeter Maydell */ 73455df6fcfSPeter Maydell address |= TLB_RECHECK; 73555df6fcfSPeter Maydell } 73655df6fcfSPeter Maydell if (!memory_region_is_ram(section->mr) && 73755df6fcfSPeter Maydell !memory_region_is_romd(section->mr)) { 738d9bb58e5SYang Zhong /* IO memory case */ 739d9bb58e5SYang Zhong address |= TLB_MMIO; 740d9bb58e5SYang Zhong addend = 0; 741d9bb58e5SYang Zhong } else { 742d9bb58e5SYang Zhong /* TLB_MMIO for rom/romd handled below */ 743d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 744d9bb58e5SYang Zhong } 745d9bb58e5SYang Zhong 746d9bb58e5SYang Zhong code_address = address; 74755df6fcfSPeter Maydell iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, 74855df6fcfSPeter Maydell paddr_page, xlat, prot, &address); 749d9bb58e5SYang Zhong 750383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 751383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 752d9bb58e5SYang Zhong 75368fea038SRichard Henderson /* 75471aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 75571aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 75671aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 75771aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 75871aec354SEmilio G. Cota * is unlikely to be contended. 75971aec354SEmilio G. Cota */ 76053d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 76171aec354SEmilio G. Cota 7623d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 7633d1523ceSRichard Henderson env->tlb_c.dirty |= 1 << mmu_idx; 7643d1523ceSRichard Henderson 76571aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 76671aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 76771aec354SEmilio G. Cota 76871aec354SEmilio G. Cota /* 76968fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 77068fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 77168fea038SRichard Henderson */ 7723cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 773d5363e58SRichard Henderson unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE; 77468fea038SRichard Henderson CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx]; 77568fea038SRichard Henderson 77668fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 77771aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 778d9bb58e5SYang Zhong env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; 77986e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 78068fea038SRichard Henderson } 781d9bb58e5SYang Zhong 782d9bb58e5SYang Zhong /* refill the tlb */ 783ace41090SPeter Maydell /* 784ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 785ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 786ace41090SPeter Maydell * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM) 787ace41090SPeter Maydell * + the offset within section->mr of the page base (otherwise) 78855df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 789ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 790ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 791ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 792ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 793ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 794ace41090SPeter Maydell */ 79555df6fcfSPeter Maydell env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page; 796d9bb58e5SYang Zhong env->iotlb[mmu_idx][index].attrs = attrs; 797d9bb58e5SYang Zhong 798d9bb58e5SYang Zhong /* Now calculate the new entry */ 79955df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 800d9bb58e5SYang Zhong if (prot & PAGE_READ) { 801d9bb58e5SYang Zhong tn.addr_read = address; 802d9bb58e5SYang Zhong } else { 803d9bb58e5SYang Zhong tn.addr_read = -1; 804d9bb58e5SYang Zhong } 805d9bb58e5SYang Zhong 806d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 807d9bb58e5SYang Zhong tn.addr_code = code_address; 808d9bb58e5SYang Zhong } else { 809d9bb58e5SYang Zhong tn.addr_code = -1; 810d9bb58e5SYang Zhong } 811d9bb58e5SYang Zhong 812d9bb58e5SYang Zhong tn.addr_write = -1; 813d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 814d9bb58e5SYang Zhong if ((memory_region_is_ram(section->mr) && section->readonly) 815d9bb58e5SYang Zhong || memory_region_is_romd(section->mr)) { 816d9bb58e5SYang Zhong /* Write access calls the I/O callback. */ 817d9bb58e5SYang Zhong tn.addr_write = address | TLB_MMIO; 818d9bb58e5SYang Zhong } else if (memory_region_is_ram(section->mr) 819d9bb58e5SYang Zhong && cpu_physical_memory_is_clean( 820d9bb58e5SYang Zhong memory_region_get_ram_addr(section->mr) + xlat)) { 821d9bb58e5SYang Zhong tn.addr_write = address | TLB_NOTDIRTY; 822d9bb58e5SYang Zhong } else { 823d9bb58e5SYang Zhong tn.addr_write = address; 824d9bb58e5SYang Zhong } 825f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 826f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 827f52bfb12SDavid Hildenbrand } 828d9bb58e5SYang Zhong } 829d9bb58e5SYang Zhong 83071aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 83186e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 83253d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 833d9bb58e5SYang Zhong } 834d9bb58e5SYang Zhong 835d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 836d9bb58e5SYang Zhong * transaction attributes to be used. 837d9bb58e5SYang Zhong */ 838d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 839d9bb58e5SYang Zhong hwaddr paddr, int prot, 840d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 841d9bb58e5SYang Zhong { 842d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 843d9bb58e5SYang Zhong prot, mmu_idx, size); 844d9bb58e5SYang Zhong } 845d9bb58e5SYang Zhong 846d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 847d9bb58e5SYang Zhong { 848d9bb58e5SYang Zhong ram_addr_t ram_addr; 849d9bb58e5SYang Zhong 850d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 851d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 852d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 853d9bb58e5SYang Zhong abort(); 854d9bb58e5SYang Zhong } 855d9bb58e5SYang Zhong return ram_addr; 856d9bb58e5SYang Zhong } 857d9bb58e5SYang Zhong 858d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 859f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 860f1be3696SRichard Henderson MMUAccessType access_type, int size) 861d9bb58e5SYang Zhong { 862d9bb58e5SYang Zhong CPUState *cpu = ENV_GET_CPU(env); 8632d54f194SPeter Maydell hwaddr mr_offset; 8642d54f194SPeter Maydell MemoryRegionSection *section; 8652d54f194SPeter Maydell MemoryRegion *mr; 866d9bb58e5SYang Zhong uint64_t val; 867d9bb58e5SYang Zhong bool locked = false; 86804e3aabdSPeter Maydell MemTxResult r; 869d9bb58e5SYang Zhong 8702d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 8712d54f194SPeter Maydell mr = section->mr; 8722d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 873d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 874d9bb58e5SYang Zhong if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { 875d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 876d9bb58e5SYang Zhong } 877d9bb58e5SYang Zhong 878d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 879dbea78a4SPeter Maydell cpu->mem_io_access_type = access_type; 880d9bb58e5SYang Zhong 8818b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 882d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 883d9bb58e5SYang Zhong locked = true; 884d9bb58e5SYang Zhong } 8852d54f194SPeter Maydell r = memory_region_dispatch_read(mr, mr_offset, 88604e3aabdSPeter Maydell &val, size, iotlbentry->attrs); 88704e3aabdSPeter Maydell if (r != MEMTX_OK) { 8882d54f194SPeter Maydell hwaddr physaddr = mr_offset + 8892d54f194SPeter Maydell section->offset_within_address_space - 8902d54f194SPeter Maydell section->offset_within_region; 8912d54f194SPeter Maydell 892dbea78a4SPeter Maydell cpu_transaction_failed(cpu, physaddr, addr, size, access_type, 89304e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 89404e3aabdSPeter Maydell } 895d9bb58e5SYang Zhong if (locked) { 896d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 897d9bb58e5SYang Zhong } 898d9bb58e5SYang Zhong 899d9bb58e5SYang Zhong return val; 900d9bb58e5SYang Zhong } 901d9bb58e5SYang Zhong 902d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 903f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 904f1be3696SRichard Henderson uintptr_t retaddr, int size) 905d9bb58e5SYang Zhong { 906d9bb58e5SYang Zhong CPUState *cpu = ENV_GET_CPU(env); 9072d54f194SPeter Maydell hwaddr mr_offset; 9082d54f194SPeter Maydell MemoryRegionSection *section; 9092d54f194SPeter Maydell MemoryRegion *mr; 910d9bb58e5SYang Zhong bool locked = false; 91104e3aabdSPeter Maydell MemTxResult r; 912d9bb58e5SYang Zhong 9132d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 9142d54f194SPeter Maydell mr = section->mr; 9152d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 916d9bb58e5SYang Zhong if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { 917d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 918d9bb58e5SYang Zhong } 919d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 920d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 921d9bb58e5SYang Zhong 9228b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 923d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 924d9bb58e5SYang Zhong locked = true; 925d9bb58e5SYang Zhong } 9262d54f194SPeter Maydell r = memory_region_dispatch_write(mr, mr_offset, 92704e3aabdSPeter Maydell val, size, iotlbentry->attrs); 92804e3aabdSPeter Maydell if (r != MEMTX_OK) { 9292d54f194SPeter Maydell hwaddr physaddr = mr_offset + 9302d54f194SPeter Maydell section->offset_within_address_space - 9312d54f194SPeter Maydell section->offset_within_region; 9322d54f194SPeter Maydell 93304e3aabdSPeter Maydell cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE, 93404e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 93504e3aabdSPeter Maydell } 936d9bb58e5SYang Zhong if (locked) { 937d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 938d9bb58e5SYang Zhong } 939d9bb58e5SYang Zhong } 940d9bb58e5SYang Zhong 941d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 942d9bb58e5SYang Zhong back to the main tlb. */ 943d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 944d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 945d9bb58e5SYang Zhong { 946d9bb58e5SYang Zhong size_t vidx; 94771aec354SEmilio G. Cota 94871aec354SEmilio G. Cota assert_cpu_is_self(ENV_GET_CPU(env)); 949d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 950d9bb58e5SYang Zhong CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; 951403f290cSEmilio G. Cota target_ulong cmp; 952403f290cSEmilio G. Cota 953403f290cSEmilio G. Cota /* elt_ofs might correspond to .addr_write, so use atomic_read */ 954403f290cSEmilio G. Cota #if TCG_OVERSIZED_GUEST 955403f290cSEmilio G. Cota cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 956403f290cSEmilio G. Cota #else 957403f290cSEmilio G. Cota cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 958403f290cSEmilio G. Cota #endif 959d9bb58e5SYang Zhong 960d9bb58e5SYang Zhong if (cmp == page) { 961d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 962d9bb58e5SYang Zhong CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; 963d9bb58e5SYang Zhong 96453d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 96571aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 96671aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 96771aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 96853d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 969d9bb58e5SYang Zhong 970d9bb58e5SYang Zhong CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; 971d9bb58e5SYang Zhong CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; 972d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 973d9bb58e5SYang Zhong return true; 974d9bb58e5SYang Zhong } 975d9bb58e5SYang Zhong } 976d9bb58e5SYang Zhong return false; 977d9bb58e5SYang Zhong } 978d9bb58e5SYang Zhong 979d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 980d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 981d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 982d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 983d9bb58e5SYang Zhong 984f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */ 985f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it 986f2553f04SKONRAD Frederic * is actually a ram_addr_t (in system mode; the user mode emulation 987f2553f04SKONRAD Frederic * version of this function returns a guest virtual address). 988f2553f04SKONRAD Frederic */ 989f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 990f2553f04SKONRAD Frederic { 991383beda9SRichard Henderson uintptr_t mmu_idx = cpu_mmu_index(env, true); 992383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 993383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 994f2553f04SKONRAD Frederic void *p; 995f2553f04SKONRAD Frederic 996383beda9SRichard Henderson if (unlikely(!tlb_hit(entry->addr_code, addr))) { 997b493ccf1SPeter Maydell if (!VICTIM_TLB_HIT(addr_code, addr)) { 99898670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 9996d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 10006d967cb8SEmilio G. Cota entry = tlb_entry(env, mmu_idx, addr); 100171b9a453SKONRAD Frederic } 1002383beda9SRichard Henderson assert(tlb_hit(entry->addr_code, addr)); 1003f2553f04SKONRAD Frederic } 100455df6fcfSPeter Maydell 1005383beda9SRichard Henderson if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) { 100655df6fcfSPeter Maydell /* 100755a7cb14SPeter Maydell * Return -1 if we can't translate and execute from an entire 100855a7cb14SPeter Maydell * page of RAM here, which will cause us to execute by loading 100955a7cb14SPeter Maydell * and translating one insn at a time, without caching: 101055a7cb14SPeter Maydell * - TLB_RECHECK: means the MMU protection covers a smaller range 101155a7cb14SPeter Maydell * than a target page, so we must redo the MMU check every insn 101255a7cb14SPeter Maydell * - TLB_MMIO: region is not backed by RAM 101355df6fcfSPeter Maydell */ 101420cb6ae4SPeter Maydell return -1; 101555df6fcfSPeter Maydell } 101655df6fcfSPeter Maydell 1017383beda9SRichard Henderson p = (void *)((uintptr_t)addr + entry->addend); 1018f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 1019f2553f04SKONRAD Frederic } 1020f2553f04SKONRAD Frederic 1021d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted. 1022d9bb58e5SYang Zhong * If it is not permitted then an exception will be taken in the same 1023d9bb58e5SYang Zhong * way as if this were a real write access (and we will not return). 1024d9bb58e5SYang Zhong * Otherwise the function will return, and there will be a valid 1025d9bb58e5SYang Zhong * entry in the TLB for this access. 1026d9bb58e5SYang Zhong */ 102798670d47SLaurent Vivier void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, 1028d9bb58e5SYang Zhong uintptr_t retaddr) 1029d9bb58e5SYang Zhong { 1030383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1031383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1032d9bb58e5SYang Zhong 1033403f290cSEmilio G. Cota if (!tlb_hit(tlb_addr_write(entry), addr)) { 1034d9bb58e5SYang Zhong /* TLB entry is for a different page */ 1035d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 103698670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, 103798670d47SLaurent Vivier mmu_idx, retaddr); 1038d9bb58e5SYang Zhong } 1039d9bb58e5SYang Zhong } 1040d9bb58e5SYang Zhong } 1041d9bb58e5SYang Zhong 1042d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 1043d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 1044d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 104534d49937SPeter Maydell TCGMemOpIdx oi, uintptr_t retaddr, 104634d49937SPeter Maydell NotDirtyInfo *ndi) 1047d9bb58e5SYang Zhong { 1048d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 1049383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1050383beda9SRichard Henderson CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1051403f290cSEmilio G. Cota target_ulong tlb_addr = tlb_addr_write(tlbe); 1052d9bb58e5SYang Zhong TCGMemOp mop = get_memop(oi); 1053d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 1054d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 105534d49937SPeter Maydell void *hostaddr; 1056d9bb58e5SYang Zhong 1057d9bb58e5SYang Zhong /* Adjust the given return address. */ 1058d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1059d9bb58e5SYang Zhong 1060d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1061d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1062d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 1063d9bb58e5SYang Zhong cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, 1064d9bb58e5SYang Zhong mmu_idx, retaddr); 1065d9bb58e5SYang Zhong } 1066d9bb58e5SYang Zhong 1067d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 1068d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 1069d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1070d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1071d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1072d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1073d9bb58e5SYang Zhong goto stop_the_world; 1074d9bb58e5SYang Zhong } 1075d9bb58e5SYang Zhong 1076d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 1077334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1078d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 107998670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, 108098670d47SLaurent Vivier mmu_idx, retaddr); 10816d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 10826d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1083d9bb58e5SYang Zhong } 1084403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1085d9bb58e5SYang Zhong } 1086d9bb58e5SYang Zhong 108755df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 108855df6fcfSPeter Maydell if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { 1089d9bb58e5SYang Zhong /* There's really nothing that can be done to 1090d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1091d9bb58e5SYang Zhong goto stop_the_world; 1092d9bb58e5SYang Zhong } 1093d9bb58e5SYang Zhong 1094d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 109534d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 109698670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD, 109798670d47SLaurent Vivier mmu_idx, retaddr); 1098d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 1099d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 1100d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 1101d9bb58e5SYang Zhong goto stop_the_world; 1102d9bb58e5SYang Zhong } 1103d9bb58e5SYang Zhong 110434d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 110534d49937SPeter Maydell 110634d49937SPeter Maydell ndi->active = false; 110734d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 110834d49937SPeter Maydell ndi->active = true; 110934d49937SPeter Maydell memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr, 111034d49937SPeter Maydell qemu_ram_addr_from_host_nofail(hostaddr), 111134d49937SPeter Maydell 1 << s_bits); 111234d49937SPeter Maydell } 111334d49937SPeter Maydell 111434d49937SPeter Maydell return hostaddr; 1115d9bb58e5SYang Zhong 1116d9bb58e5SYang Zhong stop_the_world: 1117d9bb58e5SYang Zhong cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); 1118d9bb58e5SYang Zhong } 1119d9bb58e5SYang Zhong 1120d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN 1121eed56642SAlex Bennée #define NEED_BE_BSWAP 0 1122eed56642SAlex Bennée #define NEED_LE_BSWAP 1 1123d9bb58e5SYang Zhong #else 1124eed56642SAlex Bennée #define NEED_BE_BSWAP 1 1125eed56642SAlex Bennée #define NEED_LE_BSWAP 0 1126d9bb58e5SYang Zhong #endif 1127d9bb58e5SYang Zhong 1128eed56642SAlex Bennée /* 1129eed56642SAlex Bennée * Byte Swap Helper 1130eed56642SAlex Bennée * 1131eed56642SAlex Bennée * This should all dead code away depending on the build host and 1132eed56642SAlex Bennée * access type. 1133eed56642SAlex Bennée */ 1134d9bb58e5SYang Zhong 1135eed56642SAlex Bennée static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian) 1136eed56642SAlex Bennée { 1137eed56642SAlex Bennée if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) { 1138eed56642SAlex Bennée switch (size) { 1139eed56642SAlex Bennée case 1: return val; 1140eed56642SAlex Bennée case 2: return bswap16(val); 1141eed56642SAlex Bennée case 4: return bswap32(val); 1142eed56642SAlex Bennée case 8: return bswap64(val); 1143eed56642SAlex Bennée default: 1144eed56642SAlex Bennée g_assert_not_reached(); 1145eed56642SAlex Bennée } 1146eed56642SAlex Bennée } else { 1147eed56642SAlex Bennée return val; 1148eed56642SAlex Bennée } 1149eed56642SAlex Bennée } 1150d9bb58e5SYang Zhong 1151eed56642SAlex Bennée /* 1152eed56642SAlex Bennée * Load Helpers 1153eed56642SAlex Bennée * 1154eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1155eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1156eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1157eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1158eed56642SAlex Bennée */ 1159d9bb58e5SYang Zhong 1160*2dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 1161*2dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr); 1162*2dd92606SRichard Henderson 1163*2dd92606SRichard Henderson static inline uint64_t __attribute__((always_inline)) 1164*2dd92606SRichard Henderson load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, 1165*2dd92606SRichard Henderson uintptr_t retaddr, size_t size, bool big_endian, bool code_read, 1166*2dd92606SRichard Henderson FullLoadHelper *full_load) 1167eed56642SAlex Bennée { 1168eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1169eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1170eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1171eed56642SAlex Bennée target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1172eed56642SAlex Bennée const size_t tlb_off = code_read ? 1173eed56642SAlex Bennée offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1174f1be3696SRichard Henderson const MMUAccessType access_type = 1175f1be3696SRichard Henderson code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1176eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1177eed56642SAlex Bennée void *haddr; 1178eed56642SAlex Bennée uint64_t res; 1179d9bb58e5SYang Zhong 1180eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1181eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 1182f1be3696SRichard Henderson cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type, 1183eed56642SAlex Bennée mmu_idx, retaddr); 1184eed56642SAlex Bennée } 1185eed56642SAlex Bennée 1186eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1187eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1188eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1189eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 1190eed56642SAlex Bennée tlb_fill(ENV_GET_CPU(env), addr, size, 1191f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1192eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1193eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1194eed56642SAlex Bennée } 1195eed56642SAlex Bennée tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1196eed56642SAlex Bennée } 1197eed56642SAlex Bennée 1198eed56642SAlex Bennée /* Handle an IO access. */ 1199eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 1200eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1201eed56642SAlex Bennée goto do_unaligned_access; 1202eed56642SAlex Bennée } 1203eed56642SAlex Bennée 1204f1be3696SRichard Henderson if (tlb_addr & TLB_RECHECK) { 1205f1be3696SRichard Henderson /* 1206f1be3696SRichard Henderson * This is a TLB_RECHECK access, where the MMU protection 1207f1be3696SRichard Henderson * covers a smaller range than a target page, and we must 1208f1be3696SRichard Henderson * repeat the MMU check here. This tlb_fill() call might 1209f1be3696SRichard Henderson * longjump out if this access should cause a guest exception. 1210f1be3696SRichard Henderson */ 1211f1be3696SRichard Henderson tlb_fill(ENV_GET_CPU(env), addr, size, 1212f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1213f1be3696SRichard Henderson index = tlb_index(env, mmu_idx, addr); 1214f1be3696SRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 1215f1be3696SRichard Henderson 1216f1be3696SRichard Henderson tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1217f1be3696SRichard Henderson tlb_addr &= ~TLB_RECHECK; 1218f1be3696SRichard Henderson if (!(tlb_addr & ~TARGET_PAGE_MASK)) { 1219f1be3696SRichard Henderson /* RAM access */ 1220f1be3696SRichard Henderson goto do_aligned_access; 1221f1be3696SRichard Henderson } 1222f1be3696SRichard Henderson } 1223f1be3696SRichard Henderson 1224f1be3696SRichard Henderson res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr, 1225f1be3696SRichard Henderson retaddr, access_type, size); 1226f1be3696SRichard Henderson return handle_bswap(res, size, big_endian); 1227eed56642SAlex Bennée } 1228eed56642SAlex Bennée 1229eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1230eed56642SAlex Bennée if (size > 1 1231eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1232eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1233eed56642SAlex Bennée target_ulong addr1, addr2; 1234eed56642SAlex Bennée tcg_target_ulong r1, r2; 1235eed56642SAlex Bennée unsigned shift; 1236eed56642SAlex Bennée do_unaligned_access: 1237eed56642SAlex Bennée addr1 = addr & ~(size - 1); 1238eed56642SAlex Bennée addr2 = addr1 + size; 1239*2dd92606SRichard Henderson r1 = full_load(env, addr1, oi, retaddr); 1240*2dd92606SRichard Henderson r2 = full_load(env, addr2, oi, retaddr); 1241eed56642SAlex Bennée shift = (addr & (size - 1)) * 8; 1242eed56642SAlex Bennée 1243eed56642SAlex Bennée if (big_endian) { 1244eed56642SAlex Bennée /* Big-endian combine. */ 1245eed56642SAlex Bennée res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1246eed56642SAlex Bennée } else { 1247eed56642SAlex Bennée /* Little-endian combine. */ 1248eed56642SAlex Bennée res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 1249eed56642SAlex Bennée } 1250eed56642SAlex Bennée return res & MAKE_64BIT_MASK(0, size * 8); 1251eed56642SAlex Bennée } 1252eed56642SAlex Bennée 1253f1be3696SRichard Henderson do_aligned_access: 1254eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 1255eed56642SAlex Bennée switch (size) { 1256eed56642SAlex Bennée case 1: 1257eed56642SAlex Bennée res = ldub_p(haddr); 1258eed56642SAlex Bennée break; 1259eed56642SAlex Bennée case 2: 1260eed56642SAlex Bennée if (big_endian) { 1261eed56642SAlex Bennée res = lduw_be_p(haddr); 1262eed56642SAlex Bennée } else { 1263eed56642SAlex Bennée res = lduw_le_p(haddr); 1264eed56642SAlex Bennée } 1265eed56642SAlex Bennée break; 1266eed56642SAlex Bennée case 4: 1267eed56642SAlex Bennée if (big_endian) { 1268eed56642SAlex Bennée res = (uint32_t)ldl_be_p(haddr); 1269eed56642SAlex Bennée } else { 1270eed56642SAlex Bennée res = (uint32_t)ldl_le_p(haddr); 1271eed56642SAlex Bennée } 1272eed56642SAlex Bennée break; 1273eed56642SAlex Bennée case 8: 1274eed56642SAlex Bennée if (big_endian) { 1275eed56642SAlex Bennée res = ldq_be_p(haddr); 1276eed56642SAlex Bennée } else { 1277eed56642SAlex Bennée res = ldq_le_p(haddr); 1278eed56642SAlex Bennée } 1279eed56642SAlex Bennée break; 1280eed56642SAlex Bennée default: 1281eed56642SAlex Bennée g_assert_not_reached(); 1282eed56642SAlex Bennée } 1283eed56642SAlex Bennée 1284eed56642SAlex Bennée return res; 1285eed56642SAlex Bennée } 1286eed56642SAlex Bennée 1287eed56642SAlex Bennée /* 1288eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1289eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1290eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1291eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1292eed56642SAlex Bennée * data, and for that we always have uint64_t. 1293eed56642SAlex Bennée * 1294eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1295eed56642SAlex Bennée */ 1296eed56642SAlex Bennée 1297*2dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 1298*2dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1299*2dd92606SRichard Henderson { 1300*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 1, false, false, 1301*2dd92606SRichard Henderson full_ldub_mmu); 1302*2dd92606SRichard Henderson } 1303*2dd92606SRichard Henderson 1304fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 1305fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1306eed56642SAlex Bennée { 1307*2dd92606SRichard Henderson return full_ldub_mmu(env, addr, oi, retaddr); 1308*2dd92606SRichard Henderson } 1309*2dd92606SRichard Henderson 1310*2dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 1311*2dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1312*2dd92606SRichard Henderson { 1313*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 2, false, false, 1314*2dd92606SRichard Henderson full_le_lduw_mmu); 1315eed56642SAlex Bennée } 1316eed56642SAlex Bennée 1317fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 1318fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1319eed56642SAlex Bennée { 1320*2dd92606SRichard Henderson return full_le_lduw_mmu(env, addr, oi, retaddr); 1321*2dd92606SRichard Henderson } 1322*2dd92606SRichard Henderson 1323*2dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 1324*2dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1325*2dd92606SRichard Henderson { 1326*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 2, true, false, 1327*2dd92606SRichard Henderson full_be_lduw_mmu); 1328eed56642SAlex Bennée } 1329eed56642SAlex Bennée 1330fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 1331fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1332eed56642SAlex Bennée { 1333*2dd92606SRichard Henderson return full_be_lduw_mmu(env, addr, oi, retaddr); 1334*2dd92606SRichard Henderson } 1335*2dd92606SRichard Henderson 1336*2dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 1337*2dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1338*2dd92606SRichard Henderson { 1339*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 4, false, false, 1340*2dd92606SRichard Henderson full_le_ldul_mmu); 1341eed56642SAlex Bennée } 1342eed56642SAlex Bennée 1343fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 1344fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1345eed56642SAlex Bennée { 1346*2dd92606SRichard Henderson return full_le_ldul_mmu(env, addr, oi, retaddr); 1347*2dd92606SRichard Henderson } 1348*2dd92606SRichard Henderson 1349*2dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 1350*2dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1351*2dd92606SRichard Henderson { 1352*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 4, true, false, 1353*2dd92606SRichard Henderson full_be_ldul_mmu); 1354eed56642SAlex Bennée } 1355eed56642SAlex Bennée 1356fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 1357fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1358eed56642SAlex Bennée { 1359*2dd92606SRichard Henderson return full_be_ldul_mmu(env, addr, oi, retaddr); 1360eed56642SAlex Bennée } 1361eed56642SAlex Bennée 1362fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 1363fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1364eed56642SAlex Bennée { 1365*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 8, false, false, 1366*2dd92606SRichard Henderson helper_le_ldq_mmu); 1367eed56642SAlex Bennée } 1368eed56642SAlex Bennée 1369fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 1370fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1371eed56642SAlex Bennée { 1372*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 8, true, false, 1373*2dd92606SRichard Henderson helper_be_ldq_mmu); 1374eed56642SAlex Bennée } 1375eed56642SAlex Bennée 1376eed56642SAlex Bennée /* 1377eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 1378eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 1379eed56642SAlex Bennée */ 1380eed56642SAlex Bennée 1381eed56642SAlex Bennée 1382eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 1383eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1384eed56642SAlex Bennée { 1385eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 1386eed56642SAlex Bennée } 1387eed56642SAlex Bennée 1388eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 1389eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1390eed56642SAlex Bennée { 1391eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 1392eed56642SAlex Bennée } 1393eed56642SAlex Bennée 1394eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 1395eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1396eed56642SAlex Bennée { 1397eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 1398eed56642SAlex Bennée } 1399eed56642SAlex Bennée 1400eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 1401eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1402eed56642SAlex Bennée { 1403eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 1404eed56642SAlex Bennée } 1405eed56642SAlex Bennée 1406eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 1407eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1408eed56642SAlex Bennée { 1409eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 1410eed56642SAlex Bennée } 1411eed56642SAlex Bennée 1412eed56642SAlex Bennée /* 1413eed56642SAlex Bennée * Store Helpers 1414eed56642SAlex Bennée */ 1415eed56642SAlex Bennée 1416eed56642SAlex Bennée static void store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 1417eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr, size_t size, 1418eed56642SAlex Bennée bool big_endian) 1419eed56642SAlex Bennée { 1420eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1421eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1422eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1423eed56642SAlex Bennée target_ulong tlb_addr = tlb_addr_write(entry); 1424eed56642SAlex Bennée const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 1425eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1426eed56642SAlex Bennée void *haddr; 1427eed56642SAlex Bennée 1428eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1429eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 1430eed56642SAlex Bennée cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, 1431eed56642SAlex Bennée mmu_idx, retaddr); 1432eed56642SAlex Bennée } 1433eed56642SAlex Bennée 1434eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1435eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1436eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1437eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 1438eed56642SAlex Bennée tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, 1439eed56642SAlex Bennée mmu_idx, retaddr); 1440eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1441eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1442eed56642SAlex Bennée } 1443eed56642SAlex Bennée tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 1444eed56642SAlex Bennée } 1445eed56642SAlex Bennée 1446eed56642SAlex Bennée /* Handle an IO access. */ 1447eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 1448eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1449eed56642SAlex Bennée goto do_unaligned_access; 1450eed56642SAlex Bennée } 1451eed56642SAlex Bennée 1452f1be3696SRichard Henderson if (tlb_addr & TLB_RECHECK) { 1453f1be3696SRichard Henderson /* 1454f1be3696SRichard Henderson * This is a TLB_RECHECK access, where the MMU protection 1455f1be3696SRichard Henderson * covers a smaller range than a target page, and we must 1456f1be3696SRichard Henderson * repeat the MMU check here. This tlb_fill() call might 1457f1be3696SRichard Henderson * longjump out if this access should cause a guest exception. 1458f1be3696SRichard Henderson */ 1459f1be3696SRichard Henderson tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, 1460f1be3696SRichard Henderson mmu_idx, retaddr); 1461f1be3696SRichard Henderson index = tlb_index(env, mmu_idx, addr); 1462f1be3696SRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 1463f1be3696SRichard Henderson 1464f1be3696SRichard Henderson tlb_addr = tlb_addr_write(entry); 1465f1be3696SRichard Henderson tlb_addr &= ~TLB_RECHECK; 1466f1be3696SRichard Henderson if (!(tlb_addr & ~TARGET_PAGE_MASK)) { 1467f1be3696SRichard Henderson /* RAM access */ 1468f1be3696SRichard Henderson goto do_aligned_access; 1469f1be3696SRichard Henderson } 1470f1be3696SRichard Henderson } 1471f1be3696SRichard Henderson 1472f1be3696SRichard Henderson io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx, 1473eed56642SAlex Bennée handle_bswap(val, size, big_endian), 1474f1be3696SRichard Henderson addr, retaddr, size); 1475eed56642SAlex Bennée return; 1476eed56642SAlex Bennée } 1477eed56642SAlex Bennée 1478eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1479eed56642SAlex Bennée if (size > 1 1480eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1481eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1482eed56642SAlex Bennée int i; 1483eed56642SAlex Bennée uintptr_t index2; 1484eed56642SAlex Bennée CPUTLBEntry *entry2; 1485eed56642SAlex Bennée target_ulong page2, tlb_addr2; 1486eed56642SAlex Bennée do_unaligned_access: 1487eed56642SAlex Bennée /* 1488eed56642SAlex Bennée * Ensure the second page is in the TLB. Note that the first page 1489eed56642SAlex Bennée * is already guaranteed to be filled, and that the second page 1490eed56642SAlex Bennée * cannot evict the first. 1491eed56642SAlex Bennée */ 1492eed56642SAlex Bennée page2 = (addr + size) & TARGET_PAGE_MASK; 1493eed56642SAlex Bennée index2 = tlb_index(env, mmu_idx, page2); 1494eed56642SAlex Bennée entry2 = tlb_entry(env, mmu_idx, page2); 1495eed56642SAlex Bennée tlb_addr2 = tlb_addr_write(entry2); 1496eed56642SAlex Bennée if (!tlb_hit_page(tlb_addr2, page2) 1497eed56642SAlex Bennée && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, 1498eed56642SAlex Bennée page2 & TARGET_PAGE_MASK)) { 1499eed56642SAlex Bennée tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE, 1500eed56642SAlex Bennée mmu_idx, retaddr); 1501eed56642SAlex Bennée } 1502eed56642SAlex Bennée 1503eed56642SAlex Bennée /* 1504eed56642SAlex Bennée * XXX: not efficient, but simple. 1505eed56642SAlex Bennée * This loop must go in the forward direction to avoid issues 1506eed56642SAlex Bennée * with self-modifying code in Windows 64-bit. 1507eed56642SAlex Bennée */ 1508eed56642SAlex Bennée for (i = 0; i < size; ++i) { 1509eed56642SAlex Bennée uint8_t val8; 1510eed56642SAlex Bennée if (big_endian) { 1511eed56642SAlex Bennée /* Big-endian extract. */ 1512eed56642SAlex Bennée val8 = val >> (((size - 1) * 8) - (i * 8)); 1513eed56642SAlex Bennée } else { 1514eed56642SAlex Bennée /* Little-endian extract. */ 1515eed56642SAlex Bennée val8 = val >> (i * 8); 1516eed56642SAlex Bennée } 1517eed56642SAlex Bennée store_helper(env, addr + i, val8, oi, retaddr, 1, big_endian); 1518eed56642SAlex Bennée } 1519eed56642SAlex Bennée return; 1520eed56642SAlex Bennée } 1521eed56642SAlex Bennée 1522f1be3696SRichard Henderson do_aligned_access: 1523eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 1524eed56642SAlex Bennée switch (size) { 1525eed56642SAlex Bennée case 1: 1526eed56642SAlex Bennée stb_p(haddr, val); 1527eed56642SAlex Bennée break; 1528eed56642SAlex Bennée case 2: 1529eed56642SAlex Bennée if (big_endian) { 1530eed56642SAlex Bennée stw_be_p(haddr, val); 1531eed56642SAlex Bennée } else { 1532eed56642SAlex Bennée stw_le_p(haddr, val); 1533eed56642SAlex Bennée } 1534eed56642SAlex Bennée break; 1535eed56642SAlex Bennée case 4: 1536eed56642SAlex Bennée if (big_endian) { 1537eed56642SAlex Bennée stl_be_p(haddr, val); 1538eed56642SAlex Bennée } else { 1539eed56642SAlex Bennée stl_le_p(haddr, val); 1540eed56642SAlex Bennée } 1541eed56642SAlex Bennée break; 1542eed56642SAlex Bennée case 8: 1543eed56642SAlex Bennée if (big_endian) { 1544eed56642SAlex Bennée stq_be_p(haddr, val); 1545eed56642SAlex Bennée } else { 1546eed56642SAlex Bennée stq_le_p(haddr, val); 1547eed56642SAlex Bennée } 1548eed56642SAlex Bennée break; 1549eed56642SAlex Bennée default: 1550eed56642SAlex Bennée g_assert_not_reached(); 1551eed56642SAlex Bennée break; 1552eed56642SAlex Bennée } 1553eed56642SAlex Bennée } 1554eed56642SAlex Bennée 1555fc1bc777SRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 1556eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1557eed56642SAlex Bennée { 1558eed56642SAlex Bennée store_helper(env, addr, val, oi, retaddr, 1, false); 1559eed56642SAlex Bennée } 1560eed56642SAlex Bennée 1561fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 1562eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1563eed56642SAlex Bennée { 1564eed56642SAlex Bennée store_helper(env, addr, val, oi, retaddr, 2, false); 1565eed56642SAlex Bennée } 1566eed56642SAlex Bennée 1567fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 1568eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1569eed56642SAlex Bennée { 1570eed56642SAlex Bennée store_helper(env, addr, val, oi, retaddr, 2, true); 1571eed56642SAlex Bennée } 1572eed56642SAlex Bennée 1573fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 1574eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1575eed56642SAlex Bennée { 1576eed56642SAlex Bennée store_helper(env, addr, val, oi, retaddr, 4, false); 1577eed56642SAlex Bennée } 1578eed56642SAlex Bennée 1579fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 1580eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1581eed56642SAlex Bennée { 1582eed56642SAlex Bennée store_helper(env, addr, val, oi, retaddr, 4, true); 1583eed56642SAlex Bennée } 1584eed56642SAlex Bennée 1585fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 1586eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1587eed56642SAlex Bennée { 1588eed56642SAlex Bennée store_helper(env, addr, val, oi, retaddr, 8, false); 1589eed56642SAlex Bennée } 1590eed56642SAlex Bennée 1591fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 1592eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1593eed56642SAlex Bennée { 1594eed56642SAlex Bennée store_helper(env, addr, val, oi, retaddr, 8, true); 1595eed56642SAlex Bennée } 1596d9bb58e5SYang Zhong 1597d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 1598d9bb58e5SYang Zhong them callable from other helpers. */ 1599d9bb58e5SYang Zhong 1600d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 1601d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 1602d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 160334d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi 160434d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi) 160534d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP \ 160634d49937SPeter Maydell do { \ 160734d49937SPeter Maydell if (unlikely(ndi.active)) { \ 160834d49937SPeter Maydell memory_notdirty_write_complete(&ndi); \ 160934d49937SPeter Maydell } \ 161034d49937SPeter Maydell } while (0) 1611d9bb58e5SYang Zhong 1612d9bb58e5SYang Zhong #define DATA_SIZE 1 1613d9bb58e5SYang Zhong #include "atomic_template.h" 1614d9bb58e5SYang Zhong 1615d9bb58e5SYang Zhong #define DATA_SIZE 2 1616d9bb58e5SYang Zhong #include "atomic_template.h" 1617d9bb58e5SYang Zhong 1618d9bb58e5SYang Zhong #define DATA_SIZE 4 1619d9bb58e5SYang Zhong #include "atomic_template.h" 1620d9bb58e5SYang Zhong 1621d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1622d9bb58e5SYang Zhong #define DATA_SIZE 8 1623d9bb58e5SYang Zhong #include "atomic_template.h" 1624d9bb58e5SYang Zhong #endif 1625d9bb58e5SYang Zhong 1626e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 1627d9bb58e5SYang Zhong #define DATA_SIZE 16 1628d9bb58e5SYang Zhong #include "atomic_template.h" 1629d9bb58e5SYang Zhong #endif 1630d9bb58e5SYang Zhong 1631d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 1632d9bb58e5SYang Zhong 1633d9bb58e5SYang Zhong #undef EXTRA_ARGS 1634d9bb58e5SYang Zhong #undef ATOMIC_NAME 1635d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 1636d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 1637d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 163834d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi) 1639d9bb58e5SYang Zhong 1640d9bb58e5SYang Zhong #define DATA_SIZE 1 1641d9bb58e5SYang Zhong #include "atomic_template.h" 1642d9bb58e5SYang Zhong 1643d9bb58e5SYang Zhong #define DATA_SIZE 2 1644d9bb58e5SYang Zhong #include "atomic_template.h" 1645d9bb58e5SYang Zhong 1646d9bb58e5SYang Zhong #define DATA_SIZE 4 1647d9bb58e5SYang Zhong #include "atomic_template.h" 1648d9bb58e5SYang Zhong 1649d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1650d9bb58e5SYang Zhong #define DATA_SIZE 8 1651d9bb58e5SYang Zhong #include "atomic_template.h" 1652d9bb58e5SYang Zhong #endif 1653d9bb58e5SYang Zhong 1654d9bb58e5SYang Zhong /* Code access functions. */ 1655d9bb58e5SYang Zhong 1656*2dd92606SRichard Henderson static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr, 1657*2dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1658*2dd92606SRichard Henderson { 1659*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 1, false, true, 1660*2dd92606SRichard Henderson full_ldub_cmmu); 1661*2dd92606SRichard Henderson } 1662*2dd92606SRichard Henderson 1663fc1bc777SRichard Henderson uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, 1664fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1665eed56642SAlex Bennée { 1666*2dd92606SRichard Henderson return full_ldub_cmmu(env, addr, oi, retaddr); 1667*2dd92606SRichard Henderson } 1668*2dd92606SRichard Henderson 1669*2dd92606SRichard Henderson static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr, 1670*2dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1671*2dd92606SRichard Henderson { 1672*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 2, false, true, 1673*2dd92606SRichard Henderson full_le_lduw_cmmu); 1674eed56642SAlex Bennée } 1675d9bb58e5SYang Zhong 1676fc1bc777SRichard Henderson uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, 1677fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1678eed56642SAlex Bennée { 1679*2dd92606SRichard Henderson return full_le_lduw_cmmu(env, addr, oi, retaddr); 1680*2dd92606SRichard Henderson } 1681*2dd92606SRichard Henderson 1682*2dd92606SRichard Henderson static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr, 1683*2dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1684*2dd92606SRichard Henderson { 1685*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 2, true, true, 1686*2dd92606SRichard Henderson full_be_lduw_cmmu); 1687eed56642SAlex Bennée } 1688d9bb58e5SYang Zhong 1689fc1bc777SRichard Henderson uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, 1690fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1691eed56642SAlex Bennée { 1692*2dd92606SRichard Henderson return full_be_lduw_cmmu(env, addr, oi, retaddr); 1693*2dd92606SRichard Henderson } 1694*2dd92606SRichard Henderson 1695*2dd92606SRichard Henderson static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr, 1696*2dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1697*2dd92606SRichard Henderson { 1698*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 4, false, true, 1699*2dd92606SRichard Henderson full_le_ldul_cmmu); 1700eed56642SAlex Bennée } 1701d9bb58e5SYang Zhong 1702fc1bc777SRichard Henderson uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, 1703fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1704eed56642SAlex Bennée { 1705*2dd92606SRichard Henderson return full_le_ldul_cmmu(env, addr, oi, retaddr); 1706*2dd92606SRichard Henderson } 1707*2dd92606SRichard Henderson 1708*2dd92606SRichard Henderson static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr, 1709*2dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1710*2dd92606SRichard Henderson { 1711*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 4, true, true, 1712*2dd92606SRichard Henderson full_be_ldul_cmmu); 1713eed56642SAlex Bennée } 1714d9bb58e5SYang Zhong 1715fc1bc777SRichard Henderson uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, 1716fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1717eed56642SAlex Bennée { 1718*2dd92606SRichard Henderson return full_be_ldul_cmmu(env, addr, oi, retaddr); 1719eed56642SAlex Bennée } 1720eed56642SAlex Bennée 1721fc1bc777SRichard Henderson uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, 1722fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1723eed56642SAlex Bennée { 1724*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 8, false, true, 1725*2dd92606SRichard Henderson helper_le_ldq_cmmu); 1726eed56642SAlex Bennée } 1727eed56642SAlex Bennée 1728fc1bc777SRichard Henderson uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, 1729fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1730eed56642SAlex Bennée { 1731*2dd92606SRichard Henderson return load_helper(env, addr, oi, retaddr, 8, true, true, 1732*2dd92606SRichard Henderson helper_be_ldq_cmmu); 1733eed56642SAlex Bennée } 1734