1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 22d9bb58e5SYang Zhong #include "cpu.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/address-spaces.h" 26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 27d9bb58e5SYang Zhong #include "exec/cputlb.h" 28d9bb58e5SYang Zhong #include "exec/memory-internal.h" 29d9bb58e5SYang Zhong #include "exec/ram_addr.h" 30d9bb58e5SYang Zhong #include "tcg/tcg.h" 31d9bb58e5SYang Zhong #include "qemu/error-report.h" 32d9bb58e5SYang Zhong #include "exec/log.h" 33d9bb58e5SYang Zhong #include "exec/helper-proto.h" 34d9bb58e5SYang Zhong #include "qemu/atomic.h" 35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 36d9bb58e5SYang Zhong 37d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 38d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 39d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 40d9bb58e5SYang Zhong 41d9bb58e5SYang Zhong #ifdef DEBUG_TLB 42d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 43d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 44d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 45d9bb58e5SYang Zhong # else 46d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 47d9bb58e5SYang Zhong # endif 48d9bb58e5SYang Zhong #else 49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 50d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 51d9bb58e5SYang Zhong #endif 52d9bb58e5SYang Zhong 53d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 54d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 55d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 56d9bb58e5SYang Zhong ## __VA_ARGS__); \ 57d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 58d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 59d9bb58e5SYang Zhong } \ 60d9bb58e5SYang Zhong } while (0) 61d9bb58e5SYang Zhong 62ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 63d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 64ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 65d9bb58e5SYang Zhong } \ 66d9bb58e5SYang Zhong } while (0) 67d9bb58e5SYang Zhong 68d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 69d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 70d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 71d9bb58e5SYang Zhong 72d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 73d9bb58e5SYang Zhong */ 74d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 75d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 76d9bb58e5SYang Zhong 7786e1eff8SEmilio G. Cota static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx) 7886e1eff8SEmilio G. Cota { 79a40ec84eSRichard Henderson return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS); 8086e1eff8SEmilio G. Cota } 8186e1eff8SEmilio G. Cota 8279e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 8386e1eff8SEmilio G. Cota size_t max_entries) 8486e1eff8SEmilio G. Cota { 8579e42085SRichard Henderson desc->window_begin_ns = ns; 8679e42085SRichard Henderson desc->window_max_entries = max_entries; 8786e1eff8SEmilio G. Cota } 8886e1eff8SEmilio G. Cota 8986e1eff8SEmilio G. Cota static void tlb_dyn_init(CPUArchState *env) 9086e1eff8SEmilio G. Cota { 9186e1eff8SEmilio G. Cota int i; 9286e1eff8SEmilio G. Cota 9386e1eff8SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 94a40ec84eSRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[i]; 9586e1eff8SEmilio G. Cota size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 9686e1eff8SEmilio G. Cota 9779e42085SRichard Henderson tlb_window_reset(desc, get_clock_realtime(), 0); 9886e1eff8SEmilio G. Cota desc->n_used_entries = 0; 99a40ec84eSRichard Henderson env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 100a40ec84eSRichard Henderson env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries); 101a40ec84eSRichard Henderson env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries); 10286e1eff8SEmilio G. Cota } 10386e1eff8SEmilio G. Cota } 10486e1eff8SEmilio G. Cota 10586e1eff8SEmilio G. Cota /** 10686e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 10786e1eff8SEmilio G. Cota * @env: CPU that owns the TLB 10886e1eff8SEmilio G. Cota * @mmu_idx: MMU index of the TLB 10986e1eff8SEmilio G. Cota * 11086e1eff8SEmilio G. Cota * Called with tlb_lock_held. 11186e1eff8SEmilio G. Cota * 11286e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 11386e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 11486e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 11586e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 11686e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 11786e1eff8SEmilio G. Cota * the resize based on past observations. 11886e1eff8SEmilio G. Cota * 11986e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 12086e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 12186e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 12286e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 12386e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 12486e1eff8SEmilio G. Cota * performance. 12586e1eff8SEmilio G. Cota * 12686e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 12786e1eff8SEmilio G. Cota * 12886e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 12986e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 13086e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 13186e1eff8SEmilio G. Cota * probably be similar. 13286e1eff8SEmilio G. Cota * 13386e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 13486e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 13586e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 13686e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 13786e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 13886e1eff8SEmilio G. Cota * 13986e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 14086e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 14186e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 14286e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 14386e1eff8SEmilio G. Cota * conflict misses. 14486e1eff8SEmilio G. Cota */ 14586e1eff8SEmilio G. Cota static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx) 14686e1eff8SEmilio G. Cota { 147a40ec84eSRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 14886e1eff8SEmilio G. Cota size_t old_size = tlb_n_entries(env, mmu_idx); 14986e1eff8SEmilio G. Cota size_t rate; 15086e1eff8SEmilio G. Cota size_t new_size = old_size; 15186e1eff8SEmilio G. Cota int64_t now = get_clock_realtime(); 15286e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 15386e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 15479e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 15586e1eff8SEmilio G. Cota 15679e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 15779e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 15886e1eff8SEmilio G. Cota } 15979e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 16086e1eff8SEmilio G. Cota 16186e1eff8SEmilio G. Cota if (rate > 70) { 16286e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 16386e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 16479e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 16579e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 16686e1eff8SEmilio G. Cota 16786e1eff8SEmilio G. Cota /* 16886e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 16986e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 17086e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 17186e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 17286e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 17386e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 17486e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 17586e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 17686e1eff8SEmilio G. Cota */ 17786e1eff8SEmilio G. Cota if (expected_rate > 70) { 17886e1eff8SEmilio G. Cota ceil *= 2; 17986e1eff8SEmilio G. Cota } 18086e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 18186e1eff8SEmilio G. Cota } 18286e1eff8SEmilio G. Cota 18386e1eff8SEmilio G. Cota if (new_size == old_size) { 18486e1eff8SEmilio G. Cota if (window_expired) { 18579e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 18686e1eff8SEmilio G. Cota } 18786e1eff8SEmilio G. Cota return; 18886e1eff8SEmilio G. Cota } 18986e1eff8SEmilio G. Cota 190a40ec84eSRichard Henderson g_free(env_tlb(env)->f[mmu_idx].table); 191a40ec84eSRichard Henderson g_free(env_tlb(env)->d[mmu_idx].iotlb); 19286e1eff8SEmilio G. Cota 19379e42085SRichard Henderson tlb_window_reset(desc, now, 0); 19486e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 195a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 196a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size); 197a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size); 19886e1eff8SEmilio G. Cota /* 19986e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 20086e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 20186e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 20286e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 20386e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 20486e1eff8SEmilio G. Cota */ 205a40ec84eSRichard Henderson while (env_tlb(env)->f[mmu_idx].table == NULL || 206a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb == NULL) { 20786e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 20886e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 20986e1eff8SEmilio G. Cota abort(); 21086e1eff8SEmilio G. Cota } 21186e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 212a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 21386e1eff8SEmilio G. Cota 214a40ec84eSRichard Henderson g_free(env_tlb(env)->f[mmu_idx].table); 215a40ec84eSRichard Henderson g_free(env_tlb(env)->d[mmu_idx].iotlb); 216a40ec84eSRichard Henderson env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size); 217a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size); 21886e1eff8SEmilio G. Cota } 21986e1eff8SEmilio G. Cota } 22086e1eff8SEmilio G. Cota 22186e1eff8SEmilio G. Cota static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx) 22286e1eff8SEmilio G. Cota { 22386e1eff8SEmilio G. Cota tlb_mmu_resize_locked(env, mmu_idx); 224a40ec84eSRichard Henderson memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx)); 225a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries = 0; 22686e1eff8SEmilio G. Cota } 22786e1eff8SEmilio G. Cota 22886e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 22986e1eff8SEmilio G. Cota { 230a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 23186e1eff8SEmilio G. Cota } 23286e1eff8SEmilio G. Cota 23386e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 23486e1eff8SEmilio G. Cota { 235a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 23686e1eff8SEmilio G. Cota } 23786e1eff8SEmilio G. Cota 2385005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2395005e253SEmilio G. Cota { 24071aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 24171aec354SEmilio G. Cota 242a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2433d1523ceSRichard Henderson 2443d1523ceSRichard Henderson /* Ensure that cpu_reset performs a full flush. */ 245a40ec84eSRichard Henderson env_tlb(env)->c.dirty = ALL_MMUIDX_BITS; 24686e1eff8SEmilio G. Cota 24786e1eff8SEmilio G. Cota tlb_dyn_init(env); 2485005e253SEmilio G. Cota } 2495005e253SEmilio G. Cota 250d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 251d9bb58e5SYang Zhong * 252d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 253d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 254d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 255d9bb58e5SYang Zhong * again. 256d9bb58e5SYang Zhong */ 257d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 258d9bb58e5SYang Zhong run_on_cpu_data d) 259d9bb58e5SYang Zhong { 260d9bb58e5SYang Zhong CPUState *cpu; 261d9bb58e5SYang Zhong 262d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 263d9bb58e5SYang Zhong if (cpu != src) { 264d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 265d9bb58e5SYang Zhong } 266d9bb58e5SYang Zhong } 267d9bb58e5SYang Zhong } 268d9bb58e5SYang Zhong 269e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 27083974cf4SEmilio G. Cota { 27183974cf4SEmilio G. Cota CPUState *cpu; 272e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 27383974cf4SEmilio G. Cota 27483974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 27583974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 27683974cf4SEmilio G. Cota 277a40ec84eSRichard Henderson full += atomic_read(&env_tlb(env)->c.full_flush_count); 278a40ec84eSRichard Henderson part += atomic_read(&env_tlb(env)->c.part_flush_count); 279a40ec84eSRichard Henderson elide += atomic_read(&env_tlb(env)->c.elide_flush_count); 28083974cf4SEmilio G. Cota } 281e09de0a2SRichard Henderson *pfull = full; 282e09de0a2SRichard Henderson *ppart = part; 283e09de0a2SRichard Henderson *pelide = elide; 28483974cf4SEmilio G. Cota } 285d9bb58e5SYang Zhong 2861308e026SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) 2871308e026SRichard Henderson { 28886e1eff8SEmilio G. Cota tlb_table_flush_by_mmuidx(env, mmu_idx); 289a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = -1; 290a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = -1; 291a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].vindex = 0; 292a40ec84eSRichard Henderson memset(env_tlb(env)->d[mmu_idx].vtable, -1, 293a40ec84eSRichard Henderson sizeof(env_tlb(env)->d[0].vtable)); 2941308e026SRichard Henderson } 2951308e026SRichard Henderson 296d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 297d9bb58e5SYang Zhong { 298d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 2993d1523ceSRichard Henderson uint16_t asked = data.host_int; 3003d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 301d9bb58e5SYang Zhong 302d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 303d9bb58e5SYang Zhong 3043d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 305d9bb58e5SYang Zhong 306a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 30760a2ad7dSRichard Henderson 308a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3093d1523ceSRichard Henderson to_clean = asked & all_dirty; 3103d1523ceSRichard Henderson all_dirty &= ~to_clean; 311a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3123d1523ceSRichard Henderson 3133d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3143d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3151308e026SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx); 316d9bb58e5SYang Zhong } 3173d1523ceSRichard Henderson 318a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 319d9bb58e5SYang Zhong 320f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 32164f2674bSRichard Henderson 3223d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 323a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.full_flush_count, 324a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 325e09de0a2SRichard Henderson } else { 326a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.part_flush_count, 327a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3283d1523ceSRichard Henderson if (to_clean != asked) { 329a40ec84eSRichard Henderson atomic_set(&env_tlb(env)->c.elide_flush_count, 330a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3313d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3323d1523ceSRichard Henderson } 33364f2674bSRichard Henderson } 334d9bb58e5SYang Zhong } 335d9bb58e5SYang Zhong 336d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 337d9bb58e5SYang Zhong { 338d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 339d9bb58e5SYang Zhong 34064f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 341d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 342ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 343d9bb58e5SYang Zhong } else { 34460a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 345d9bb58e5SYang Zhong } 346d9bb58e5SYang Zhong } 347d9bb58e5SYang Zhong 34864f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 34964f2674bSRichard Henderson { 35064f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 35164f2674bSRichard Henderson } 35264f2674bSRichard Henderson 353d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 354d9bb58e5SYang Zhong { 355d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 356d9bb58e5SYang Zhong 357d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 358d9bb58e5SYang Zhong 359d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 360d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 361d9bb58e5SYang Zhong } 362d9bb58e5SYang Zhong 36364f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 36464f2674bSRichard Henderson { 36564f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 36664f2674bSRichard Henderson } 36764f2674bSRichard Henderson 36864f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 369d9bb58e5SYang Zhong { 370d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 371d9bb58e5SYang Zhong 372d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 373d9bb58e5SYang Zhong 374d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 375d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 376d9bb58e5SYang Zhong } 377d9bb58e5SYang Zhong 37864f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 37964f2674bSRichard Henderson { 38064f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 38164f2674bSRichard Henderson } 38264f2674bSRichard Henderson 38368fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 38468fea038SRichard Henderson target_ulong page) 385d9bb58e5SYang Zhong { 38668fea038SRichard Henderson return tlb_hit_page(tlb_entry->addr_read, page) || 387403f290cSEmilio G. Cota tlb_hit_page(tlb_addr_write(tlb_entry), page) || 38868fea038SRichard Henderson tlb_hit_page(tlb_entry->addr_code, page); 38968fea038SRichard Henderson } 39068fea038SRichard Henderson 3913cea94bbSEmilio G. Cota /** 3923cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 3933cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 3943cea94bbSEmilio G. Cota */ 3953cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 3963cea94bbSEmilio G. Cota { 3973cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 3983cea94bbSEmilio G. Cota } 3993cea94bbSEmilio G. Cota 40053d28455SRichard Henderson /* Called with tlb_c.lock held */ 40186e1eff8SEmilio G. Cota static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 40271aec354SEmilio G. Cota target_ulong page) 40368fea038SRichard Henderson { 40468fea038SRichard Henderson if (tlb_hit_page_anyprot(tlb_entry, page)) { 405d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 40686e1eff8SEmilio G. Cota return true; 407d9bb58e5SYang Zhong } 40886e1eff8SEmilio G. Cota return false; 409d9bb58e5SYang Zhong } 410d9bb58e5SYang Zhong 41153d28455SRichard Henderson /* Called with tlb_c.lock held */ 41271aec354SEmilio G. Cota static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 41368fea038SRichard Henderson target_ulong page) 41468fea038SRichard Henderson { 415a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 41668fea038SRichard Henderson int k; 41771aec354SEmilio G. Cota 41829a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 41968fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 420a40ec84eSRichard Henderson if (tlb_flush_entry_locked(&d->vtable[k], page)) { 42186e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 42286e1eff8SEmilio G. Cota } 42368fea038SRichard Henderson } 42468fea038SRichard Henderson } 42568fea038SRichard Henderson 4261308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4271308e026SRichard Henderson target_ulong page) 4281308e026SRichard Henderson { 429a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 430a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 4311308e026SRichard Henderson 4321308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 4331308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 4341308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 4351308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 4361308e026SRichard Henderson midx, lp_addr, lp_mask); 4371308e026SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx); 4381308e026SRichard Henderson } else { 43986e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 44086e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 44186e1eff8SEmilio G. Cota } 4421308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 4431308e026SRichard Henderson } 4441308e026SRichard Henderson } 4451308e026SRichard Henderson 446d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a 447d9bb58e5SYang Zhong * mmuidx bit mask we need to fail to build if we can't do that 448d9bb58e5SYang Zhong */ 449d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN); 450d9bb58e5SYang Zhong 451d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, 452d9bb58e5SYang Zhong run_on_cpu_data data) 453d9bb58e5SYang Zhong { 454d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 455d9bb58e5SYang Zhong target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; 456d9bb58e5SYang Zhong target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; 457d9bb58e5SYang Zhong unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; 458d9bb58e5SYang Zhong int mmu_idx; 459d9bb58e5SYang Zhong 460d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 461d9bb58e5SYang Zhong 4621308e026SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n", 463383beda9SRichard Henderson addr, mmu_idx_bitmap); 464d9bb58e5SYang Zhong 465a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 466d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 467d9bb58e5SYang Zhong if (test_bit(mmu_idx, &mmu_idx_bitmap)) { 4681308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 469d9bb58e5SYang Zhong } 470d9bb58e5SYang Zhong } 471a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 472d9bb58e5SYang Zhong 473d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 474d9bb58e5SYang Zhong } 475d9bb58e5SYang Zhong 476d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 477d9bb58e5SYang Zhong { 478d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 479d9bb58e5SYang Zhong 480d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 481d9bb58e5SYang Zhong 482d9bb58e5SYang Zhong /* This should already be page aligned */ 483d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 484d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 485d9bb58e5SYang Zhong 486d9bb58e5SYang Zhong if (!qemu_cpu_is_self(cpu)) { 4871308e026SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work, 488d9bb58e5SYang Zhong RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 489d9bb58e5SYang Zhong } else { 4901308e026SRichard Henderson tlb_flush_page_by_mmuidx_async_work( 491d9bb58e5SYang Zhong cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 492d9bb58e5SYang Zhong } 493d9bb58e5SYang Zhong } 494d9bb58e5SYang Zhong 495f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 496f8144c6cSRichard Henderson { 497f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 498f8144c6cSRichard Henderson } 499f8144c6cSRichard Henderson 500d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 501d9bb58e5SYang Zhong uint16_t idxmap) 502d9bb58e5SYang Zhong { 5031308e026SRichard Henderson const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; 504d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 505d9bb58e5SYang Zhong 506d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 507d9bb58e5SYang Zhong 508d9bb58e5SYang Zhong /* This should already be page aligned */ 509d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 510d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 511d9bb58e5SYang Zhong 512d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 513d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 514d9bb58e5SYang Zhong } 515d9bb58e5SYang Zhong 516f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 517f8144c6cSRichard Henderson { 518f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 519f8144c6cSRichard Henderson } 520f8144c6cSRichard Henderson 521d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 522d9bb58e5SYang Zhong target_ulong addr, 523d9bb58e5SYang Zhong uint16_t idxmap) 524d9bb58e5SYang Zhong { 5251308e026SRichard Henderson const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; 526d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 527d9bb58e5SYang Zhong 528d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 529d9bb58e5SYang Zhong 530d9bb58e5SYang Zhong /* This should already be page aligned */ 531d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 532d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 533d9bb58e5SYang Zhong 534d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 535d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 536d9bb58e5SYang Zhong } 537d9bb58e5SYang Zhong 538f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 539d9bb58e5SYang Zhong { 540f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 541d9bb58e5SYang Zhong } 542d9bb58e5SYang Zhong 543d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 544d9bb58e5SYang Zhong can be detected */ 545d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 546d9bb58e5SYang Zhong { 547d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 548d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 549d9bb58e5SYang Zhong } 550d9bb58e5SYang Zhong 551d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 552d9bb58e5SYang Zhong tested for self modifying code */ 553d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 554d9bb58e5SYang Zhong { 555d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 556d9bb58e5SYang Zhong } 557d9bb58e5SYang Zhong 558d9bb58e5SYang Zhong 559d9bb58e5SYang Zhong /* 560d9bb58e5SYang Zhong * Dirty write flag handling 561d9bb58e5SYang Zhong * 562d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 563d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 564d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 565d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 566d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 567d9bb58e5SYang Zhong * generated code. 568d9bb58e5SYang Zhong * 56971aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 57071aec354SEmilio G. Cota * te->addr_write with atomic_set. We don't need to worry about this for 57171aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 572d9bb58e5SYang Zhong * 57353d28455SRichard Henderson * Called with tlb_c.lock held. 574d9bb58e5SYang Zhong */ 57571aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 57671aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 577d9bb58e5SYang Zhong { 578d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 579d9bb58e5SYang Zhong 580d9bb58e5SYang Zhong if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { 581d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 582d9bb58e5SYang Zhong addr += tlb_entry->addend; 583d9bb58e5SYang Zhong if ((addr - start) < length) { 584d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 58571aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 586d9bb58e5SYang Zhong #else 58771aec354SEmilio G. Cota atomic_set(&tlb_entry->addr_write, 58871aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 589d9bb58e5SYang Zhong #endif 590d9bb58e5SYang Zhong } 59171aec354SEmilio G. Cota } 59271aec354SEmilio G. Cota } 59371aec354SEmilio G. Cota 59471aec354SEmilio G. Cota /* 59553d28455SRichard Henderson * Called with tlb_c.lock held. 59671aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 59771aec354SEmilio G. Cota */ 59871aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 59971aec354SEmilio G. Cota { 60071aec354SEmilio G. Cota *d = *s; 60171aec354SEmilio G. Cota } 602d9bb58e5SYang Zhong 603d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 60471aec354SEmilio G. Cota * the target vCPU). 60553d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 60671aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 607d9bb58e5SYang Zhong */ 608d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 609d9bb58e5SYang Zhong { 610d9bb58e5SYang Zhong CPUArchState *env; 611d9bb58e5SYang Zhong 612d9bb58e5SYang Zhong int mmu_idx; 613d9bb58e5SYang Zhong 614d9bb58e5SYang Zhong env = cpu->env_ptr; 615a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 616d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 617d9bb58e5SYang Zhong unsigned int i; 61886e1eff8SEmilio G. Cota unsigned int n = tlb_n_entries(env, mmu_idx); 619d9bb58e5SYang Zhong 62086e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 621a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 622a40ec84eSRichard Henderson start1, length); 623d9bb58e5SYang Zhong } 624d9bb58e5SYang Zhong 625d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 626a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 627a40ec84eSRichard Henderson start1, length); 628d9bb58e5SYang Zhong } 629d9bb58e5SYang Zhong } 630a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 631d9bb58e5SYang Zhong } 632d9bb58e5SYang Zhong 63353d28455SRichard Henderson /* Called with tlb_c.lock held */ 63471aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 63571aec354SEmilio G. Cota target_ulong vaddr) 636d9bb58e5SYang Zhong { 637d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 638d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 639d9bb58e5SYang Zhong } 640d9bb58e5SYang Zhong } 641d9bb58e5SYang Zhong 642d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 643d9bb58e5SYang Zhong so that it is no longer dirty */ 644d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 645d9bb58e5SYang Zhong { 646d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 647d9bb58e5SYang Zhong int mmu_idx; 648d9bb58e5SYang Zhong 649d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 650d9bb58e5SYang Zhong 651d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 652a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 653d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 654383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 655d9bb58e5SYang Zhong } 656d9bb58e5SYang Zhong 657d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 658d9bb58e5SYang Zhong int k; 659d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 660a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 661d9bb58e5SYang Zhong } 662d9bb58e5SYang Zhong } 663a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 664d9bb58e5SYang Zhong } 665d9bb58e5SYang Zhong 666d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 667d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 6681308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 6691308e026SRichard Henderson target_ulong vaddr, target_ulong size) 670d9bb58e5SYang Zhong { 671a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 6721308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 673d9bb58e5SYang Zhong 6741308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 6751308e026SRichard Henderson /* No previous large page. */ 6761308e026SRichard Henderson lp_addr = vaddr; 6771308e026SRichard Henderson } else { 678d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 6791308e026SRichard Henderson This is a compromise between unnecessary flushes and 6801308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 681a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 6821308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 6831308e026SRichard Henderson lp_mask <<= 1; 684d9bb58e5SYang Zhong } 6851308e026SRichard Henderson } 686a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 687a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 688d9bb58e5SYang Zhong } 689d9bb58e5SYang Zhong 690d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 691d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 692d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 693d9bb58e5SYang Zhong * 694d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 695d9bb58e5SYang Zhong * critical section. 696d9bb58e5SYang Zhong */ 697d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 698d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 699d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 700d9bb58e5SYang Zhong { 701d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 702a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 703a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 704d9bb58e5SYang Zhong MemoryRegionSection *section; 705d9bb58e5SYang Zhong unsigned int index; 706d9bb58e5SYang Zhong target_ulong address; 707d9bb58e5SYang Zhong target_ulong code_address; 708d9bb58e5SYang Zhong uintptr_t addend; 70968fea038SRichard Henderson CPUTLBEntry *te, tn; 71055df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 71155df6fcfSPeter Maydell target_ulong vaddr_page; 712d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 71350b107c5SRichard Henderson int wp_flags; 714d9bb58e5SYang Zhong 715d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 71655df6fcfSPeter Maydell 7171308e026SRichard Henderson if (size <= TARGET_PAGE_SIZE) { 71855df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 71955df6fcfSPeter Maydell } else { 7201308e026SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, size); 721d9bb58e5SYang Zhong sz = size; 72255df6fcfSPeter Maydell } 72355df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 72455df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 72555df6fcfSPeter Maydell 72655df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 72755df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 728d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 729d9bb58e5SYang Zhong 730d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 731d9bb58e5SYang Zhong " prot=%x idx=%d\n", 732d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 733d9bb58e5SYang Zhong 73455df6fcfSPeter Maydell address = vaddr_page; 73555df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 73630d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 73730d7e098SRichard Henderson address |= TLB_INVALID_MASK; 73855df6fcfSPeter Maydell } 739a26fc6f5STony Nguyen if (attrs.byte_swap) { 740*5b87b3e6SRichard Henderson address |= TLB_BSWAP; 741a26fc6f5STony Nguyen } 74255df6fcfSPeter Maydell if (!memory_region_is_ram(section->mr) && 74355df6fcfSPeter Maydell !memory_region_is_romd(section->mr)) { 744d9bb58e5SYang Zhong /* IO memory case */ 745d9bb58e5SYang Zhong address |= TLB_MMIO; 746d9bb58e5SYang Zhong addend = 0; 747d9bb58e5SYang Zhong } else { 748d9bb58e5SYang Zhong /* TLB_MMIO for rom/romd handled below */ 749d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 750d9bb58e5SYang Zhong } 751d9bb58e5SYang Zhong 752d9bb58e5SYang Zhong code_address = address; 75355df6fcfSPeter Maydell iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, 75455df6fcfSPeter Maydell paddr_page, xlat, prot, &address); 75550b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 75650b107c5SRichard Henderson TARGET_PAGE_SIZE); 757d9bb58e5SYang Zhong 758383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 759383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 760d9bb58e5SYang Zhong 76168fea038SRichard Henderson /* 76271aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 76371aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 76471aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 76571aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 76671aec354SEmilio G. Cota * is unlikely to be contended. 76771aec354SEmilio G. Cota */ 768a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 76971aec354SEmilio G. Cota 7703d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 771a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 7723d1523ceSRichard Henderson 77371aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 77471aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 77571aec354SEmilio G. Cota 77671aec354SEmilio G. Cota /* 77768fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 77868fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 77968fea038SRichard Henderson */ 7803cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 781a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 782a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 78368fea038SRichard Henderson 78468fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 78571aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 786a40ec84eSRichard Henderson desc->viotlb[vidx] = desc->iotlb[index]; 78786e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 78868fea038SRichard Henderson } 789d9bb58e5SYang Zhong 790d9bb58e5SYang Zhong /* refill the tlb */ 791ace41090SPeter Maydell /* 792ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 793ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 794ace41090SPeter Maydell * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM) 795ace41090SPeter Maydell * + the offset within section->mr of the page base (otherwise) 79655df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 797ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 798ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 799ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 800ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 801ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 802ace41090SPeter Maydell */ 803a40ec84eSRichard Henderson desc->iotlb[index].addr = iotlb - vaddr_page; 804a40ec84eSRichard Henderson desc->iotlb[index].attrs = attrs; 805d9bb58e5SYang Zhong 806d9bb58e5SYang Zhong /* Now calculate the new entry */ 80755df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 808d9bb58e5SYang Zhong if (prot & PAGE_READ) { 809d9bb58e5SYang Zhong tn.addr_read = address; 81050b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 81150b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 81250b107c5SRichard Henderson } 813d9bb58e5SYang Zhong } else { 814d9bb58e5SYang Zhong tn.addr_read = -1; 815d9bb58e5SYang Zhong } 816d9bb58e5SYang Zhong 817d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 818d9bb58e5SYang Zhong tn.addr_code = code_address; 819d9bb58e5SYang Zhong } else { 820d9bb58e5SYang Zhong tn.addr_code = -1; 821d9bb58e5SYang Zhong } 822d9bb58e5SYang Zhong 823d9bb58e5SYang Zhong tn.addr_write = -1; 824d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 825d9bb58e5SYang Zhong if ((memory_region_is_ram(section->mr) && section->readonly) 826d9bb58e5SYang Zhong || memory_region_is_romd(section->mr)) { 827d9bb58e5SYang Zhong /* Write access calls the I/O callback. */ 828d9bb58e5SYang Zhong tn.addr_write = address | TLB_MMIO; 829d9bb58e5SYang Zhong } else if (memory_region_is_ram(section->mr) 830d9bb58e5SYang Zhong && cpu_physical_memory_is_clean( 831d9bb58e5SYang Zhong memory_region_get_ram_addr(section->mr) + xlat)) { 832d9bb58e5SYang Zhong tn.addr_write = address | TLB_NOTDIRTY; 833d9bb58e5SYang Zhong } else { 834d9bb58e5SYang Zhong tn.addr_write = address; 835d9bb58e5SYang Zhong } 836f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 837f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 838f52bfb12SDavid Hildenbrand } 83950b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 84050b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 84150b107c5SRichard Henderson } 842d9bb58e5SYang Zhong } 843d9bb58e5SYang Zhong 84471aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 84586e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 846a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 847d9bb58e5SYang Zhong } 848d9bb58e5SYang Zhong 849d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 850d9bb58e5SYang Zhong * transaction attributes to be used. 851d9bb58e5SYang Zhong */ 852d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 853d9bb58e5SYang Zhong hwaddr paddr, int prot, 854d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 855d9bb58e5SYang Zhong { 856d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 857d9bb58e5SYang Zhong prot, mmu_idx, size); 858d9bb58e5SYang Zhong } 859d9bb58e5SYang Zhong 860d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 861d9bb58e5SYang Zhong { 862d9bb58e5SYang Zhong ram_addr_t ram_addr; 863d9bb58e5SYang Zhong 864d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 865d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 866d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 867d9bb58e5SYang Zhong abort(); 868d9bb58e5SYang Zhong } 869d9bb58e5SYang Zhong return ram_addr; 870d9bb58e5SYang Zhong } 871d9bb58e5SYang Zhong 872c319dc13SRichard Henderson /* 873c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 874c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 875c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 876c319dc13SRichard Henderson */ 877c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 878c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 879c319dc13SRichard Henderson { 880c319dc13SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cpu); 881c319dc13SRichard Henderson bool ok; 882c319dc13SRichard Henderson 883c319dc13SRichard Henderson /* 884c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 885c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 886c319dc13SRichard Henderson */ 887c319dc13SRichard Henderson ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr); 888c319dc13SRichard Henderson assert(ok); 889c319dc13SRichard Henderson } 890c319dc13SRichard Henderson 891d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 892f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 893be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 894d9bb58e5SYang Zhong { 89529a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 8962d54f194SPeter Maydell hwaddr mr_offset; 8972d54f194SPeter Maydell MemoryRegionSection *section; 8982d54f194SPeter Maydell MemoryRegion *mr; 899d9bb58e5SYang Zhong uint64_t val; 900d9bb58e5SYang Zhong bool locked = false; 90104e3aabdSPeter Maydell MemTxResult r; 902d9bb58e5SYang Zhong 9032d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 9042d54f194SPeter Maydell mr = section->mr; 9052d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 906d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 907d9bb58e5SYang Zhong if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { 908d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 909d9bb58e5SYang Zhong } 910d9bb58e5SYang Zhong 911d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 912dbea78a4SPeter Maydell cpu->mem_io_access_type = access_type; 913d9bb58e5SYang Zhong 9148b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 915d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 916d9bb58e5SYang Zhong locked = true; 917d9bb58e5SYang Zhong } 918be5c4787STony Nguyen r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); 91904e3aabdSPeter Maydell if (r != MEMTX_OK) { 9202d54f194SPeter Maydell hwaddr physaddr = mr_offset + 9212d54f194SPeter Maydell section->offset_within_address_space - 9222d54f194SPeter Maydell section->offset_within_region; 9232d54f194SPeter Maydell 924be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 92504e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 92604e3aabdSPeter Maydell } 927d9bb58e5SYang Zhong if (locked) { 928d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 929d9bb58e5SYang Zhong } 930d9bb58e5SYang Zhong 931d9bb58e5SYang Zhong return val; 932d9bb58e5SYang Zhong } 933d9bb58e5SYang Zhong 934d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 935f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 936be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 937d9bb58e5SYang Zhong { 93829a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 9392d54f194SPeter Maydell hwaddr mr_offset; 9402d54f194SPeter Maydell MemoryRegionSection *section; 9412d54f194SPeter Maydell MemoryRegion *mr; 942d9bb58e5SYang Zhong bool locked = false; 94304e3aabdSPeter Maydell MemTxResult r; 944d9bb58e5SYang Zhong 9452d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 9462d54f194SPeter Maydell mr = section->mr; 9472d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 948d9bb58e5SYang Zhong if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { 949d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 950d9bb58e5SYang Zhong } 951d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 952d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 953d9bb58e5SYang Zhong 9548b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 955d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 956d9bb58e5SYang Zhong locked = true; 957d9bb58e5SYang Zhong } 958be5c4787STony Nguyen r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); 95904e3aabdSPeter Maydell if (r != MEMTX_OK) { 9602d54f194SPeter Maydell hwaddr physaddr = mr_offset + 9612d54f194SPeter Maydell section->offset_within_address_space - 9622d54f194SPeter Maydell section->offset_within_region; 9632d54f194SPeter Maydell 964be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 965be5c4787STony Nguyen MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, 966be5c4787STony Nguyen retaddr); 96704e3aabdSPeter Maydell } 968d9bb58e5SYang Zhong if (locked) { 969d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 970d9bb58e5SYang Zhong } 971d9bb58e5SYang Zhong } 972d9bb58e5SYang Zhong 9734811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) 9744811e909SRichard Henderson { 9754811e909SRichard Henderson #if TCG_OVERSIZED_GUEST 9764811e909SRichard Henderson return *(target_ulong *)((uintptr_t)entry + ofs); 9774811e909SRichard Henderson #else 9784811e909SRichard Henderson /* ofs might correspond to .addr_write, so use atomic_read */ 9794811e909SRichard Henderson return atomic_read((target_ulong *)((uintptr_t)entry + ofs)); 9804811e909SRichard Henderson #endif 9814811e909SRichard Henderson } 9824811e909SRichard Henderson 983d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 984d9bb58e5SYang Zhong back to the main tlb. */ 985d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 986d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 987d9bb58e5SYang Zhong { 988d9bb58e5SYang Zhong size_t vidx; 98971aec354SEmilio G. Cota 99029a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 991d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 992a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 993a40ec84eSRichard Henderson target_ulong cmp; 994a40ec84eSRichard Henderson 995a40ec84eSRichard Henderson /* elt_ofs might correspond to .addr_write, so use atomic_read */ 996a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST 997a40ec84eSRichard Henderson cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 998a40ec84eSRichard Henderson #else 999a40ec84eSRichard Henderson cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 1000a40ec84eSRichard Henderson #endif 1001d9bb58e5SYang Zhong 1002d9bb58e5SYang Zhong if (cmp == page) { 1003d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1004a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1005d9bb58e5SYang Zhong 1006a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 100771aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 100871aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 100971aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1010a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1011d9bb58e5SYang Zhong 1012a40ec84eSRichard Henderson CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1013a40ec84eSRichard Henderson CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; 1014d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 1015d9bb58e5SYang Zhong return true; 1016d9bb58e5SYang Zhong } 1017d9bb58e5SYang Zhong } 1018d9bb58e5SYang Zhong return false; 1019d9bb58e5SYang Zhong } 1020d9bb58e5SYang Zhong 1021d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 1022d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 1023d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 1024d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 1025d9bb58e5SYang Zhong 102630d7e098SRichard Henderson /* 102730d7e098SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 102830d7e098SRichard Henderson * 102930d7e098SRichard Henderson * Return -1 if we can't translate and execute from an entire page 103030d7e098SRichard Henderson * of RAM. This will force us to execute by loading and translating 103130d7e098SRichard Henderson * one insn at a time, without caching. 103230d7e098SRichard Henderson * 103330d7e098SRichard Henderson * NOTE: This function will trigger an exception if the page is 103430d7e098SRichard Henderson * not executable. 1035f2553f04SKONRAD Frederic */ 1036f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 1037f2553f04SKONRAD Frederic { 1038383beda9SRichard Henderson uintptr_t mmu_idx = cpu_mmu_index(env, true); 1039383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1040383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1041f2553f04SKONRAD Frederic void *p; 1042f2553f04SKONRAD Frederic 1043383beda9SRichard Henderson if (unlikely(!tlb_hit(entry->addr_code, addr))) { 1044b493ccf1SPeter Maydell if (!VICTIM_TLB_HIT(addr_code, addr)) { 104529a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 10466d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 10476d967cb8SEmilio G. Cota entry = tlb_entry(env, mmu_idx, addr); 104830d7e098SRichard Henderson 104930d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { 105030d7e098SRichard Henderson /* 105130d7e098SRichard Henderson * The MMU protection covers a smaller range than a target 105230d7e098SRichard Henderson * page, so we must redo the MMU check for every insn. 105330d7e098SRichard Henderson */ 105430d7e098SRichard Henderson return -1; 105530d7e098SRichard Henderson } 105671b9a453SKONRAD Frederic } 1057383beda9SRichard Henderson assert(tlb_hit(entry->addr_code, addr)); 1058f2553f04SKONRAD Frederic } 105955df6fcfSPeter Maydell 106030d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_MMIO)) { 106130d7e098SRichard Henderson /* The region is not backed by RAM. */ 106220cb6ae4SPeter Maydell return -1; 106355df6fcfSPeter Maydell } 106455df6fcfSPeter Maydell 1065383beda9SRichard Henderson p = (void *)((uintptr_t)addr + entry->addend); 1066f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 1067f2553f04SKONRAD Frederic } 1068f2553f04SKONRAD Frederic 1069c25c283dSDavid Hildenbrand /* 1070c25c283dSDavid Hildenbrand * Probe for whether the specified guest access is permitted. If it is not 1071c25c283dSDavid Hildenbrand * permitted then an exception will be taken in the same way as if this 1072c25c283dSDavid Hildenbrand * were a real access (and we will not return). 1073fef39ccdSDavid Hildenbrand * If the size is 0 or the page requires I/O access, returns NULL; otherwise, 1074fef39ccdSDavid Hildenbrand * returns the address of the host page similar to tlb_vaddr_to_host(). 1075d9bb58e5SYang Zhong */ 1076c25c283dSDavid Hildenbrand void *probe_access(CPUArchState *env, target_ulong addr, int size, 1077c25c283dSDavid Hildenbrand MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1078d9bb58e5SYang Zhong { 1079383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1080383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1081c25c283dSDavid Hildenbrand target_ulong tlb_addr; 1082c25c283dSDavid Hildenbrand size_t elt_ofs; 1083c25c283dSDavid Hildenbrand int wp_access; 1084d9bb58e5SYang Zhong 1085ca86cf32SDavid Hildenbrand g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1086ca86cf32SDavid Hildenbrand 1087c25c283dSDavid Hildenbrand switch (access_type) { 1088c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 1089c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_read); 1090c25c283dSDavid Hildenbrand wp_access = BP_MEM_READ; 1091c25c283dSDavid Hildenbrand break; 1092c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 1093c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_write); 1094c25c283dSDavid Hildenbrand wp_access = BP_MEM_WRITE; 1095c25c283dSDavid Hildenbrand break; 1096c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 1097c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_code); 1098c25c283dSDavid Hildenbrand wp_access = BP_MEM_READ; 1099c25c283dSDavid Hildenbrand break; 1100c25c283dSDavid Hildenbrand default: 1101c25c283dSDavid Hildenbrand g_assert_not_reached(); 1102c25c283dSDavid Hildenbrand } 1103c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 1104c25c283dSDavid Hildenbrand 110503a98189SDavid Hildenbrand if (unlikely(!tlb_hit(tlb_addr, addr))) { 1106c25c283dSDavid Hildenbrand if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, 1107c25c283dSDavid Hildenbrand addr & TARGET_PAGE_MASK)) { 1108c25c283dSDavid Hildenbrand tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr); 110903a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 111003a98189SDavid Hildenbrand index = tlb_index(env, mmu_idx, addr); 111103a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1112d9bb58e5SYang Zhong } 1113c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 111403a98189SDavid Hildenbrand } 111503a98189SDavid Hildenbrand 1116fef39ccdSDavid Hildenbrand if (!size) { 1117fef39ccdSDavid Hildenbrand return NULL; 1118fef39ccdSDavid Hildenbrand } 1119fef39ccdSDavid Hildenbrand 112003a98189SDavid Hildenbrand /* Handle watchpoints. */ 1121fef39ccdSDavid Hildenbrand if (tlb_addr & TLB_WATCHPOINT) { 112203a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 112303a98189SDavid Hildenbrand env_tlb(env)->d[mmu_idx].iotlb[index].attrs, 1124c25c283dSDavid Hildenbrand wp_access, retaddr); 1125d9bb58e5SYang Zhong } 1126fef39ccdSDavid Hildenbrand 1127*5b87b3e6SRichard Henderson /* Reject I/O access, or other required slow-path. */ 1128*5b87b3e6SRichard Henderson if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP)) { 1129fef39ccdSDavid Hildenbrand return NULL; 1130fef39ccdSDavid Hildenbrand } 1131fef39ccdSDavid Hildenbrand 1132fef39ccdSDavid Hildenbrand return (void *)((uintptr_t)addr + entry->addend); 1133d9bb58e5SYang Zhong } 1134d9bb58e5SYang Zhong 11354811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 11364811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 11374811e909SRichard Henderson { 11384811e909SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 11394811e909SRichard Henderson uintptr_t tlb_addr, page; 11404811e909SRichard Henderson size_t elt_ofs; 11414811e909SRichard Henderson 11424811e909SRichard Henderson switch (access_type) { 11434811e909SRichard Henderson case MMU_DATA_LOAD: 11444811e909SRichard Henderson elt_ofs = offsetof(CPUTLBEntry, addr_read); 11454811e909SRichard Henderson break; 11464811e909SRichard Henderson case MMU_DATA_STORE: 11474811e909SRichard Henderson elt_ofs = offsetof(CPUTLBEntry, addr_write); 11484811e909SRichard Henderson break; 11494811e909SRichard Henderson case MMU_INST_FETCH: 11504811e909SRichard Henderson elt_ofs = offsetof(CPUTLBEntry, addr_code); 11514811e909SRichard Henderson break; 11524811e909SRichard Henderson default: 11534811e909SRichard Henderson g_assert_not_reached(); 11544811e909SRichard Henderson } 11554811e909SRichard Henderson 11564811e909SRichard Henderson page = addr & TARGET_PAGE_MASK; 11574811e909SRichard Henderson tlb_addr = tlb_read_ofs(entry, elt_ofs); 11584811e909SRichard Henderson 11594811e909SRichard Henderson if (!tlb_hit_page(tlb_addr, page)) { 11604811e909SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 11614811e909SRichard Henderson 11624811e909SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) { 116329a0af61SRichard Henderson CPUState *cs = env_cpu(env); 11644811e909SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cs); 11654811e909SRichard Henderson 11664811e909SRichard Henderson if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) { 11674811e909SRichard Henderson /* Non-faulting page table read failed. */ 11684811e909SRichard Henderson return NULL; 11694811e909SRichard Henderson } 11704811e909SRichard Henderson 11714811e909SRichard Henderson /* TLB resize via tlb_fill may have moved the entry. */ 11724811e909SRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 11734811e909SRichard Henderson } 11744811e909SRichard Henderson tlb_addr = tlb_read_ofs(entry, elt_ofs); 11754811e909SRichard Henderson } 11764811e909SRichard Henderson 11774811e909SRichard Henderson if (tlb_addr & ~TARGET_PAGE_MASK) { 11784811e909SRichard Henderson /* IO access */ 11794811e909SRichard Henderson return NULL; 11804811e909SRichard Henderson } 11814811e909SRichard Henderson 11824811e909SRichard Henderson return (void *)((uintptr_t)addr + entry->addend); 11834811e909SRichard Henderson } 11844811e909SRichard Henderson 1185d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 1186d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 1187d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 118834d49937SPeter Maydell TCGMemOpIdx oi, uintptr_t retaddr, 118934d49937SPeter Maydell NotDirtyInfo *ndi) 1190d9bb58e5SYang Zhong { 1191d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 1192383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1193383beda9SRichard Henderson CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1194403f290cSEmilio G. Cota target_ulong tlb_addr = tlb_addr_write(tlbe); 119514776ab5STony Nguyen MemOp mop = get_memop(oi); 1196d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 1197d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 119834d49937SPeter Maydell void *hostaddr; 1199d9bb58e5SYang Zhong 1200d9bb58e5SYang Zhong /* Adjust the given return address. */ 1201d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1202d9bb58e5SYang Zhong 1203d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1204d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1205d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 120629a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1207d9bb58e5SYang Zhong mmu_idx, retaddr); 1208d9bb58e5SYang Zhong } 1209d9bb58e5SYang Zhong 1210d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 1211d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 1212d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1213d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1214d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1215d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1216d9bb58e5SYang Zhong goto stop_the_world; 1217d9bb58e5SYang Zhong } 1218d9bb58e5SYang Zhong 1219d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 1220334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1221d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 122229a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, 122398670d47SLaurent Vivier mmu_idx, retaddr); 12246d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 12256d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1226d9bb58e5SYang Zhong } 1227403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1228d9bb58e5SYang Zhong } 1229d9bb58e5SYang Zhong 123055df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 123130d7e098SRichard Henderson if (unlikely(tlb_addr & TLB_MMIO)) { 1232d9bb58e5SYang Zhong /* There's really nothing that can be done to 1233d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1234d9bb58e5SYang Zhong goto stop_the_world; 1235d9bb58e5SYang Zhong } 1236d9bb58e5SYang Zhong 1237d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 123834d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 123929a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, 124098670d47SLaurent Vivier mmu_idx, retaddr); 1241d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 1242d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 1243d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 1244d9bb58e5SYang Zhong goto stop_the_world; 1245d9bb58e5SYang Zhong } 1246d9bb58e5SYang Zhong 124734d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 124834d49937SPeter Maydell 124934d49937SPeter Maydell ndi->active = false; 125034d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 125134d49937SPeter Maydell ndi->active = true; 125229a0af61SRichard Henderson memory_notdirty_write_prepare(ndi, env_cpu(env), addr, 125334d49937SPeter Maydell qemu_ram_addr_from_host_nofail(hostaddr), 125434d49937SPeter Maydell 1 << s_bits); 125534d49937SPeter Maydell } 125634d49937SPeter Maydell 125734d49937SPeter Maydell return hostaddr; 1258d9bb58e5SYang Zhong 1259d9bb58e5SYang Zhong stop_the_world: 126029a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1261d9bb58e5SYang Zhong } 1262d9bb58e5SYang Zhong 1263eed56642SAlex Bennée /* 1264eed56642SAlex Bennée * Load Helpers 1265eed56642SAlex Bennée * 1266eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1267eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1268eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1269eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1270eed56642SAlex Bennée */ 1271d9bb58e5SYang Zhong 12722dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 12732dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr); 12742dd92606SRichard Henderson 1275c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 127680d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op) 127780d9d1c6SRichard Henderson { 127880d9d1c6SRichard Henderson switch (op) { 127980d9d1c6SRichard Henderson case MO_UB: 128080d9d1c6SRichard Henderson return ldub_p(haddr); 128180d9d1c6SRichard Henderson case MO_BEUW: 128280d9d1c6SRichard Henderson return lduw_be_p(haddr); 128380d9d1c6SRichard Henderson case MO_LEUW: 128480d9d1c6SRichard Henderson return lduw_le_p(haddr); 128580d9d1c6SRichard Henderson case MO_BEUL: 128680d9d1c6SRichard Henderson return (uint32_t)ldl_be_p(haddr); 128780d9d1c6SRichard Henderson case MO_LEUL: 128880d9d1c6SRichard Henderson return (uint32_t)ldl_le_p(haddr); 128980d9d1c6SRichard Henderson case MO_BEQ: 129080d9d1c6SRichard Henderson return ldq_be_p(haddr); 129180d9d1c6SRichard Henderson case MO_LEQ: 129280d9d1c6SRichard Henderson return ldq_le_p(haddr); 129380d9d1c6SRichard Henderson default: 129480d9d1c6SRichard Henderson qemu_build_not_reached(); 129580d9d1c6SRichard Henderson } 129680d9d1c6SRichard Henderson } 129780d9d1c6SRichard Henderson 129880d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 12992dd92606SRichard Henderson load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, 1300be5c4787STony Nguyen uintptr_t retaddr, MemOp op, bool code_read, 13012dd92606SRichard Henderson FullLoadHelper *full_load) 1302eed56642SAlex Bennée { 1303eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1304eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1305eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1306eed56642SAlex Bennée target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1307eed56642SAlex Bennée const size_t tlb_off = code_read ? 1308eed56642SAlex Bennée offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1309f1be3696SRichard Henderson const MMUAccessType access_type = 1310f1be3696SRichard Henderson code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1311eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1312eed56642SAlex Bennée void *haddr; 1313eed56642SAlex Bennée uint64_t res; 1314be5c4787STony Nguyen size_t size = memop_size(op); 1315d9bb58e5SYang Zhong 1316eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1317eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 131829a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, access_type, 1319eed56642SAlex Bennée mmu_idx, retaddr); 1320eed56642SAlex Bennée } 1321eed56642SAlex Bennée 1322eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1323eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1324eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1325eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 132629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, 1327f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1328eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1329eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1330eed56642SAlex Bennée } 1331eed56642SAlex Bennée tlb_addr = code_read ? entry->addr_code : entry->addr_read; 133230d7e098SRichard Henderson tlb_addr &= ~TLB_INVALID_MASK; 1333eed56642SAlex Bennée } 1334eed56642SAlex Bennée 133550b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1336eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 133750b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 1338*5b87b3e6SRichard Henderson bool need_swap; 133950b107c5SRichard Henderson 134050b107c5SRichard Henderson /* For anything that is unaligned, recurse through full_load. */ 1341eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1342eed56642SAlex Bennée goto do_unaligned_access; 1343eed56642SAlex Bennée } 134450b107c5SRichard Henderson 134550b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 134650b107c5SRichard Henderson 134750b107c5SRichard Henderson /* Handle watchpoints. */ 134850b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 134950b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 135050b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 135150b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_READ, retaddr); 1352*5b87b3e6SRichard Henderson } 135350b107c5SRichard Henderson 1354*5b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 135550b107c5SRichard Henderson 135650b107c5SRichard Henderson /* Handle I/O access. */ 1357*5b87b3e6SRichard Henderson if (likely(tlb_addr & TLB_MMIO)) { 1358*5b87b3e6SRichard Henderson return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, 1359*5b87b3e6SRichard Henderson access_type, op ^ (need_swap * MO_BSWAP)); 1360*5b87b3e6SRichard Henderson } 1361*5b87b3e6SRichard Henderson 1362*5b87b3e6SRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 1363*5b87b3e6SRichard Henderson 1364*5b87b3e6SRichard Henderson /* 1365*5b87b3e6SRichard Henderson * Keep these two load_memop separate to ensure that the compiler 1366*5b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 1367*5b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 1368*5b87b3e6SRichard Henderson */ 1369*5b87b3e6SRichard Henderson if (unlikely(need_swap)) { 1370*5b87b3e6SRichard Henderson return load_memop(haddr, op ^ MO_BSWAP); 1371*5b87b3e6SRichard Henderson } 1372*5b87b3e6SRichard Henderson return load_memop(haddr, op); 1373eed56642SAlex Bennée } 1374eed56642SAlex Bennée 1375eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1376eed56642SAlex Bennée if (size > 1 1377eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1378eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1379eed56642SAlex Bennée target_ulong addr1, addr2; 13808c79b288SAlex Bennée uint64_t r1, r2; 1381eed56642SAlex Bennée unsigned shift; 1382eed56642SAlex Bennée do_unaligned_access: 1383ab7a2009SAlex Bennée addr1 = addr & ~((target_ulong)size - 1); 1384eed56642SAlex Bennée addr2 = addr1 + size; 13852dd92606SRichard Henderson r1 = full_load(env, addr1, oi, retaddr); 13862dd92606SRichard Henderson r2 = full_load(env, addr2, oi, retaddr); 1387eed56642SAlex Bennée shift = (addr & (size - 1)) * 8; 1388eed56642SAlex Bennée 1389be5c4787STony Nguyen if (memop_big_endian(op)) { 1390eed56642SAlex Bennée /* Big-endian combine. */ 1391eed56642SAlex Bennée res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1392eed56642SAlex Bennée } else { 1393eed56642SAlex Bennée /* Little-endian combine. */ 1394eed56642SAlex Bennée res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 1395eed56642SAlex Bennée } 1396eed56642SAlex Bennée return res & MAKE_64BIT_MASK(0, size * 8); 1397eed56642SAlex Bennée } 1398eed56642SAlex Bennée 1399eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 140080d9d1c6SRichard Henderson return load_memop(haddr, op); 1401eed56642SAlex Bennée } 1402eed56642SAlex Bennée 1403eed56642SAlex Bennée /* 1404eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1405eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1406eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1407eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1408eed56642SAlex Bennée * data, and for that we always have uint64_t. 1409eed56642SAlex Bennée * 1410eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1411eed56642SAlex Bennée */ 1412eed56642SAlex Bennée 14132dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 14142dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 14152dd92606SRichard Henderson { 1416be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); 14172dd92606SRichard Henderson } 14182dd92606SRichard Henderson 1419fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 1420fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1421eed56642SAlex Bennée { 14222dd92606SRichard Henderson return full_ldub_mmu(env, addr, oi, retaddr); 14232dd92606SRichard Henderson } 14242dd92606SRichard Henderson 14252dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 14262dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 14272dd92606SRichard Henderson { 1428be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, false, 14292dd92606SRichard Henderson full_le_lduw_mmu); 1430eed56642SAlex Bennée } 1431eed56642SAlex Bennée 1432fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 1433fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1434eed56642SAlex Bennée { 14352dd92606SRichard Henderson return full_le_lduw_mmu(env, addr, oi, retaddr); 14362dd92606SRichard Henderson } 14372dd92606SRichard Henderson 14382dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 14392dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 14402dd92606SRichard Henderson { 1441be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, false, 14422dd92606SRichard Henderson full_be_lduw_mmu); 1443eed56642SAlex Bennée } 1444eed56642SAlex Bennée 1445fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 1446fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1447eed56642SAlex Bennée { 14482dd92606SRichard Henderson return full_be_lduw_mmu(env, addr, oi, retaddr); 14492dd92606SRichard Henderson } 14502dd92606SRichard Henderson 14512dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 14522dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 14532dd92606SRichard Henderson { 1454be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, false, 14552dd92606SRichard Henderson full_le_ldul_mmu); 1456eed56642SAlex Bennée } 1457eed56642SAlex Bennée 1458fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 1459fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1460eed56642SAlex Bennée { 14612dd92606SRichard Henderson return full_le_ldul_mmu(env, addr, oi, retaddr); 14622dd92606SRichard Henderson } 14632dd92606SRichard Henderson 14642dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 14652dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 14662dd92606SRichard Henderson { 1467be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, false, 14682dd92606SRichard Henderson full_be_ldul_mmu); 1469eed56642SAlex Bennée } 1470eed56642SAlex Bennée 1471fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 1472fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1473eed56642SAlex Bennée { 14742dd92606SRichard Henderson return full_be_ldul_mmu(env, addr, oi, retaddr); 1475eed56642SAlex Bennée } 1476eed56642SAlex Bennée 1477fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 1478fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1479eed56642SAlex Bennée { 1480be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEQ, false, 14812dd92606SRichard Henderson helper_le_ldq_mmu); 1482eed56642SAlex Bennée } 1483eed56642SAlex Bennée 1484fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 1485fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1486eed56642SAlex Bennée { 1487be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEQ, false, 14882dd92606SRichard Henderson helper_be_ldq_mmu); 1489eed56642SAlex Bennée } 1490eed56642SAlex Bennée 1491eed56642SAlex Bennée /* 1492eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 1493eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 1494eed56642SAlex Bennée */ 1495eed56642SAlex Bennée 1496eed56642SAlex Bennée 1497eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 1498eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1499eed56642SAlex Bennée { 1500eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 1501eed56642SAlex Bennée } 1502eed56642SAlex Bennée 1503eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 1504eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1505eed56642SAlex Bennée { 1506eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 1507eed56642SAlex Bennée } 1508eed56642SAlex Bennée 1509eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 1510eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1511eed56642SAlex Bennée { 1512eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 1513eed56642SAlex Bennée } 1514eed56642SAlex Bennée 1515eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 1516eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1517eed56642SAlex Bennée { 1518eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 1519eed56642SAlex Bennée } 1520eed56642SAlex Bennée 1521eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 1522eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1523eed56642SAlex Bennée { 1524eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 1525eed56642SAlex Bennée } 1526eed56642SAlex Bennée 1527eed56642SAlex Bennée /* 1528eed56642SAlex Bennée * Store Helpers 1529eed56642SAlex Bennée */ 1530eed56642SAlex Bennée 1531c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE 153280d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op) 153380d9d1c6SRichard Henderson { 153480d9d1c6SRichard Henderson switch (op) { 153580d9d1c6SRichard Henderson case MO_UB: 153680d9d1c6SRichard Henderson stb_p(haddr, val); 153780d9d1c6SRichard Henderson break; 153880d9d1c6SRichard Henderson case MO_BEUW: 153980d9d1c6SRichard Henderson stw_be_p(haddr, val); 154080d9d1c6SRichard Henderson break; 154180d9d1c6SRichard Henderson case MO_LEUW: 154280d9d1c6SRichard Henderson stw_le_p(haddr, val); 154380d9d1c6SRichard Henderson break; 154480d9d1c6SRichard Henderson case MO_BEUL: 154580d9d1c6SRichard Henderson stl_be_p(haddr, val); 154680d9d1c6SRichard Henderson break; 154780d9d1c6SRichard Henderson case MO_LEUL: 154880d9d1c6SRichard Henderson stl_le_p(haddr, val); 154980d9d1c6SRichard Henderson break; 155080d9d1c6SRichard Henderson case MO_BEQ: 155180d9d1c6SRichard Henderson stq_be_p(haddr, val); 155280d9d1c6SRichard Henderson break; 155380d9d1c6SRichard Henderson case MO_LEQ: 155480d9d1c6SRichard Henderson stq_le_p(haddr, val); 155580d9d1c6SRichard Henderson break; 155680d9d1c6SRichard Henderson default: 155780d9d1c6SRichard Henderson qemu_build_not_reached(); 155880d9d1c6SRichard Henderson } 155980d9d1c6SRichard Henderson } 156080d9d1c6SRichard Henderson 156180d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE 15624601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 1563be5c4787STony Nguyen TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) 1564eed56642SAlex Bennée { 1565eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1566eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1567eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1568eed56642SAlex Bennée target_ulong tlb_addr = tlb_addr_write(entry); 1569eed56642SAlex Bennée const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 1570eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1571eed56642SAlex Bennée void *haddr; 1572be5c4787STony Nguyen size_t size = memop_size(op); 1573eed56642SAlex Bennée 1574eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1575eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 157629a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1577eed56642SAlex Bennée mmu_idx, retaddr); 1578eed56642SAlex Bennée } 1579eed56642SAlex Bennée 1580eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1581eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1582eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1583eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 158429a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 1585eed56642SAlex Bennée mmu_idx, retaddr); 1586eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1587eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1588eed56642SAlex Bennée } 1589eed56642SAlex Bennée tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 1590eed56642SAlex Bennée } 1591eed56642SAlex Bennée 159250b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1593eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 159450b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 1595*5b87b3e6SRichard Henderson bool need_swap; 159650b107c5SRichard Henderson 159750b107c5SRichard Henderson /* For anything that is unaligned, recurse through byte stores. */ 1598eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1599eed56642SAlex Bennée goto do_unaligned_access; 1600eed56642SAlex Bennée } 160150b107c5SRichard Henderson 160250b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 160350b107c5SRichard Henderson 160450b107c5SRichard Henderson /* Handle watchpoints. */ 160550b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 160650b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 160750b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 160850b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_WRITE, retaddr); 1609*5b87b3e6SRichard Henderson } 161050b107c5SRichard Henderson 1611*5b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 161250b107c5SRichard Henderson 161350b107c5SRichard Henderson /* Handle I/O access. */ 1614*5b87b3e6SRichard Henderson if (likely(tlb_addr & (TLB_MMIO | TLB_NOTDIRTY))) { 1615*5b87b3e6SRichard Henderson io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, 1616*5b87b3e6SRichard Henderson op ^ (need_swap * MO_BSWAP)); 1617*5b87b3e6SRichard Henderson return; 1618*5b87b3e6SRichard Henderson } 1619*5b87b3e6SRichard Henderson 1620*5b87b3e6SRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 1621*5b87b3e6SRichard Henderson 1622*5b87b3e6SRichard Henderson /* 1623*5b87b3e6SRichard Henderson * Keep these two store_memop separate to ensure that the compiler 1624*5b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 1625*5b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 1626*5b87b3e6SRichard Henderson */ 1627*5b87b3e6SRichard Henderson if (unlikely(need_swap)) { 1628*5b87b3e6SRichard Henderson store_memop(haddr, val, op ^ MO_BSWAP); 1629*5b87b3e6SRichard Henderson } else { 1630*5b87b3e6SRichard Henderson store_memop(haddr, val, op); 1631*5b87b3e6SRichard Henderson } 1632eed56642SAlex Bennée return; 1633eed56642SAlex Bennée } 1634eed56642SAlex Bennée 1635eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1636eed56642SAlex Bennée if (size > 1 1637eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1638eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1639eed56642SAlex Bennée int i; 1640eed56642SAlex Bennée uintptr_t index2; 1641eed56642SAlex Bennée CPUTLBEntry *entry2; 1642eed56642SAlex Bennée target_ulong page2, tlb_addr2; 16438f7cd2adSRichard Henderson size_t size2; 16448f7cd2adSRichard Henderson 1645eed56642SAlex Bennée do_unaligned_access: 1646eed56642SAlex Bennée /* 1647eed56642SAlex Bennée * Ensure the second page is in the TLB. Note that the first page 1648eed56642SAlex Bennée * is already guaranteed to be filled, and that the second page 1649eed56642SAlex Bennée * cannot evict the first. 1650eed56642SAlex Bennée */ 1651eed56642SAlex Bennée page2 = (addr + size) & TARGET_PAGE_MASK; 16528f7cd2adSRichard Henderson size2 = (addr + size) & ~TARGET_PAGE_MASK; 1653eed56642SAlex Bennée index2 = tlb_index(env, mmu_idx, page2); 1654eed56642SAlex Bennée entry2 = tlb_entry(env, mmu_idx, page2); 1655eed56642SAlex Bennée tlb_addr2 = tlb_addr_write(entry2); 165650b107c5SRichard Henderson if (!tlb_hit_page(tlb_addr2, page2)) { 165750b107c5SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { 16588f7cd2adSRichard Henderson tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, 1659eed56642SAlex Bennée mmu_idx, retaddr); 166050b107c5SRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 166150b107c5SRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 166250b107c5SRichard Henderson } 166350b107c5SRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 166450b107c5SRichard Henderson } 166550b107c5SRichard Henderson 166650b107c5SRichard Henderson /* 166750b107c5SRichard Henderson * Handle watchpoints. Since this may trap, all checks 166850b107c5SRichard Henderson * must happen before any store. 166950b107c5SRichard Henderson */ 167050b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 167150b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size - size2, 167250b107c5SRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index].attrs, 167350b107c5SRichard Henderson BP_MEM_WRITE, retaddr); 167450b107c5SRichard Henderson } 167550b107c5SRichard Henderson if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { 167650b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), page2, size2, 167750b107c5SRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, 167850b107c5SRichard Henderson BP_MEM_WRITE, retaddr); 1679eed56642SAlex Bennée } 1680eed56642SAlex Bennée 1681eed56642SAlex Bennée /* 1682eed56642SAlex Bennée * XXX: not efficient, but simple. 1683eed56642SAlex Bennée * This loop must go in the forward direction to avoid issues 1684eed56642SAlex Bennée * with self-modifying code in Windows 64-bit. 1685eed56642SAlex Bennée */ 1686eed56642SAlex Bennée for (i = 0; i < size; ++i) { 1687eed56642SAlex Bennée uint8_t val8; 1688be5c4787STony Nguyen if (memop_big_endian(op)) { 1689eed56642SAlex Bennée /* Big-endian extract. */ 1690eed56642SAlex Bennée val8 = val >> (((size - 1) * 8) - (i * 8)); 1691eed56642SAlex Bennée } else { 1692eed56642SAlex Bennée /* Little-endian extract. */ 1693eed56642SAlex Bennée val8 = val >> (i * 8); 1694eed56642SAlex Bennée } 16954601f8d1SRichard Henderson helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); 1696eed56642SAlex Bennée } 1697eed56642SAlex Bennée return; 1698eed56642SAlex Bennée } 1699eed56642SAlex Bennée 1700eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 170180d9d1c6SRichard Henderson store_memop(haddr, val, op); 1702eed56642SAlex Bennée } 1703eed56642SAlex Bennée 1704fc1bc777SRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 1705eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1706eed56642SAlex Bennée { 1707be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_UB); 1708eed56642SAlex Bennée } 1709eed56642SAlex Bennée 1710fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 1711eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1712eed56642SAlex Bennée { 1713be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUW); 1714eed56642SAlex Bennée } 1715eed56642SAlex Bennée 1716fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 1717eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1718eed56642SAlex Bennée { 1719be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUW); 1720eed56642SAlex Bennée } 1721eed56642SAlex Bennée 1722fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 1723eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1724eed56642SAlex Bennée { 1725be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUL); 1726eed56642SAlex Bennée } 1727eed56642SAlex Bennée 1728fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 1729eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1730eed56642SAlex Bennée { 1731be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUL); 1732eed56642SAlex Bennée } 1733eed56642SAlex Bennée 1734fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 1735eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1736eed56642SAlex Bennée { 1737be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEQ); 1738eed56642SAlex Bennée } 1739eed56642SAlex Bennée 1740fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 1741eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 1742eed56642SAlex Bennée { 1743be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEQ); 1744eed56642SAlex Bennée } 1745d9bb58e5SYang Zhong 1746d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 1747d9bb58e5SYang Zhong them callable from other helpers. */ 1748d9bb58e5SYang Zhong 1749d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 1750d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 1751d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 175234d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi 175334d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi) 175434d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP \ 175534d49937SPeter Maydell do { \ 175634d49937SPeter Maydell if (unlikely(ndi.active)) { \ 175734d49937SPeter Maydell memory_notdirty_write_complete(&ndi); \ 175834d49937SPeter Maydell } \ 175934d49937SPeter Maydell } while (0) 1760d9bb58e5SYang Zhong 1761d9bb58e5SYang Zhong #define DATA_SIZE 1 1762d9bb58e5SYang Zhong #include "atomic_template.h" 1763d9bb58e5SYang Zhong 1764d9bb58e5SYang Zhong #define DATA_SIZE 2 1765d9bb58e5SYang Zhong #include "atomic_template.h" 1766d9bb58e5SYang Zhong 1767d9bb58e5SYang Zhong #define DATA_SIZE 4 1768d9bb58e5SYang Zhong #include "atomic_template.h" 1769d9bb58e5SYang Zhong 1770d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1771d9bb58e5SYang Zhong #define DATA_SIZE 8 1772d9bb58e5SYang Zhong #include "atomic_template.h" 1773d9bb58e5SYang Zhong #endif 1774d9bb58e5SYang Zhong 1775e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 1776d9bb58e5SYang Zhong #define DATA_SIZE 16 1777d9bb58e5SYang Zhong #include "atomic_template.h" 1778d9bb58e5SYang Zhong #endif 1779d9bb58e5SYang Zhong 1780d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 1781d9bb58e5SYang Zhong 1782d9bb58e5SYang Zhong #undef EXTRA_ARGS 1783d9bb58e5SYang Zhong #undef ATOMIC_NAME 1784d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 1785d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 1786d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 178734d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi) 1788d9bb58e5SYang Zhong 1789d9bb58e5SYang Zhong #define DATA_SIZE 1 1790d9bb58e5SYang Zhong #include "atomic_template.h" 1791d9bb58e5SYang Zhong 1792d9bb58e5SYang Zhong #define DATA_SIZE 2 1793d9bb58e5SYang Zhong #include "atomic_template.h" 1794d9bb58e5SYang Zhong 1795d9bb58e5SYang Zhong #define DATA_SIZE 4 1796d9bb58e5SYang Zhong #include "atomic_template.h" 1797d9bb58e5SYang Zhong 1798d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1799d9bb58e5SYang Zhong #define DATA_SIZE 8 1800d9bb58e5SYang Zhong #include "atomic_template.h" 1801d9bb58e5SYang Zhong #endif 1802d9bb58e5SYang Zhong 1803d9bb58e5SYang Zhong /* Code access functions. */ 1804d9bb58e5SYang Zhong 18052dd92606SRichard Henderson static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr, 18062dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 18072dd92606SRichard Henderson { 1808be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu); 18092dd92606SRichard Henderson } 18102dd92606SRichard Henderson 1811fc1bc777SRichard Henderson uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, 1812fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1813eed56642SAlex Bennée { 18142dd92606SRichard Henderson return full_ldub_cmmu(env, addr, oi, retaddr); 18152dd92606SRichard Henderson } 18162dd92606SRichard Henderson 18172dd92606SRichard Henderson static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr, 18182dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 18192dd92606SRichard Henderson { 1820be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, true, 18212dd92606SRichard Henderson full_le_lduw_cmmu); 1822eed56642SAlex Bennée } 1823d9bb58e5SYang Zhong 1824fc1bc777SRichard Henderson uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, 1825fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1826eed56642SAlex Bennée { 18272dd92606SRichard Henderson return full_le_lduw_cmmu(env, addr, oi, retaddr); 18282dd92606SRichard Henderson } 18292dd92606SRichard Henderson 18302dd92606SRichard Henderson static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr, 18312dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 18322dd92606SRichard Henderson { 1833be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, true, 18342dd92606SRichard Henderson full_be_lduw_cmmu); 1835eed56642SAlex Bennée } 1836d9bb58e5SYang Zhong 1837fc1bc777SRichard Henderson uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, 1838fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1839eed56642SAlex Bennée { 18402dd92606SRichard Henderson return full_be_lduw_cmmu(env, addr, oi, retaddr); 18412dd92606SRichard Henderson } 18422dd92606SRichard Henderson 18432dd92606SRichard Henderson static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr, 18442dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 18452dd92606SRichard Henderson { 1846be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, true, 18472dd92606SRichard Henderson full_le_ldul_cmmu); 1848eed56642SAlex Bennée } 1849d9bb58e5SYang Zhong 1850fc1bc777SRichard Henderson uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, 1851fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1852eed56642SAlex Bennée { 18532dd92606SRichard Henderson return full_le_ldul_cmmu(env, addr, oi, retaddr); 18542dd92606SRichard Henderson } 18552dd92606SRichard Henderson 18562dd92606SRichard Henderson static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr, 18572dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 18582dd92606SRichard Henderson { 1859be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, true, 18602dd92606SRichard Henderson full_be_ldul_cmmu); 1861eed56642SAlex Bennée } 1862d9bb58e5SYang Zhong 1863fc1bc777SRichard Henderson uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, 1864fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1865eed56642SAlex Bennée { 18662dd92606SRichard Henderson return full_be_ldul_cmmu(env, addr, oi, retaddr); 1867eed56642SAlex Bennée } 1868eed56642SAlex Bennée 1869fc1bc777SRichard Henderson uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, 1870fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1871eed56642SAlex Bennée { 1872be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEQ, true, 18732dd92606SRichard Henderson helper_le_ldq_cmmu); 1874eed56642SAlex Bennée } 1875eed56642SAlex Bennée 1876fc1bc777SRichard Henderson uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, 1877fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1878eed56642SAlex Bennée { 1879be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEQ, true, 18802dd92606SRichard Henderson helper_be_ldq_cmmu); 1881eed56642SAlex Bennée } 1882