1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 22d9bb58e5SYang Zhong #include "cpu.h" 23*78271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 24d9bb58e5SYang Zhong #include "exec/exec-all.h" 25d9bb58e5SYang Zhong #include "exec/memory.h" 26d9bb58e5SYang Zhong #include "exec/address-spaces.h" 27d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 28d9bb58e5SYang Zhong #include "exec/cputlb.h" 290f4abea8SRichard Henderson #include "exec/tb-hash.h" 30d9bb58e5SYang Zhong #include "exec/memory-internal.h" 31d9bb58e5SYang Zhong #include "exec/ram_addr.h" 32d9bb58e5SYang Zhong #include "tcg/tcg.h" 33d9bb58e5SYang Zhong #include "qemu/error-report.h" 34d9bb58e5SYang Zhong #include "exec/log.h" 35d9bb58e5SYang Zhong #include "exec/helper-proto.h" 36d9bb58e5SYang Zhong #include "qemu/atomic.h" 37e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 383b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 39243af022SPaolo Bonzini #include "trace/trace-root.h" 40d03f1408SRichard Henderson #include "trace/mem.h" 4165269192SPhilippe Mathieu-Daudé #include "internal.h" 42235537faSAlex Bennée #ifdef CONFIG_PLUGIN 43235537faSAlex Bennée #include "qemu/plugin-memory.h" 44235537faSAlex Bennée #endif 45d9bb58e5SYang Zhong 46d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 47d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 48d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 49d9bb58e5SYang Zhong 50d9bb58e5SYang Zhong #ifdef DEBUG_TLB 51d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 52d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 53d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 54d9bb58e5SYang Zhong # else 55d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 56d9bb58e5SYang Zhong # endif 57d9bb58e5SYang Zhong #else 58d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 59d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 60d9bb58e5SYang Zhong #endif 61d9bb58e5SYang Zhong 62d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 63d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 64d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 65d9bb58e5SYang Zhong ## __VA_ARGS__); \ 66d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 67d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 68d9bb58e5SYang Zhong } \ 69d9bb58e5SYang Zhong } while (0) 70d9bb58e5SYang Zhong 71ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 72d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 73ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 74d9bb58e5SYang Zhong } \ 75d9bb58e5SYang Zhong } while (0) 76d9bb58e5SYang Zhong 77d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 78d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 79d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 80d9bb58e5SYang Zhong 81d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 82d9bb58e5SYang Zhong */ 83d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 84d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 85d9bb58e5SYang Zhong 86722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 877a1efe1bSRichard Henderson { 88722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 897a1efe1bSRichard Henderson } 907a1efe1bSRichard Henderson 91722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 9286e1eff8SEmilio G. Cota { 93722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9486e1eff8SEmilio G. Cota } 9586e1eff8SEmilio G. Cota 9679e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9786e1eff8SEmilio G. Cota size_t max_entries) 9886e1eff8SEmilio G. Cota { 9979e42085SRichard Henderson desc->window_begin_ns = ns; 10079e42085SRichard Henderson desc->window_max_entries = max_entries; 10186e1eff8SEmilio G. Cota } 10286e1eff8SEmilio G. Cota 1030f4abea8SRichard Henderson static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 1040f4abea8SRichard Henderson { 1050f4abea8SRichard Henderson unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); 1060f4abea8SRichard Henderson 1070f4abea8SRichard Henderson for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 1080f4abea8SRichard Henderson qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); 1090f4abea8SRichard Henderson } 1100f4abea8SRichard Henderson } 1110f4abea8SRichard Henderson 1120f4abea8SRichard Henderson static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) 1130f4abea8SRichard Henderson { 1140f4abea8SRichard Henderson /* Discard jump cache entries for any tb which might potentially 1150f4abea8SRichard Henderson overlap the flushed page. */ 1160f4abea8SRichard Henderson tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 1170f4abea8SRichard Henderson tb_jmp_cache_clear_page(cpu, addr); 1180f4abea8SRichard Henderson } 1190f4abea8SRichard Henderson 12086e1eff8SEmilio G. Cota /** 12186e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 12271ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 12371ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 12486e1eff8SEmilio G. Cota * 12586e1eff8SEmilio G. Cota * Called with tlb_lock_held. 12686e1eff8SEmilio G. Cota * 12786e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 12886e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 12986e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 13086e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 13186e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 13286e1eff8SEmilio G. Cota * the resize based on past observations. 13386e1eff8SEmilio G. Cota * 13486e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 13586e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 13686e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 13786e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 13886e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 13986e1eff8SEmilio G. Cota * performance. 14086e1eff8SEmilio G. Cota * 14186e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 14286e1eff8SEmilio G. Cota * 14386e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 14486e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 14586e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 14686e1eff8SEmilio G. Cota * probably be similar. 14786e1eff8SEmilio G. Cota * 14886e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 14986e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 15086e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 15186e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 15286e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 15386e1eff8SEmilio G. Cota * 15486e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 15586e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 15686e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 15786e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 15886e1eff8SEmilio G. Cota * conflict misses. 15986e1eff8SEmilio G. Cota */ 1603c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1613c3959f2SRichard Henderson int64_t now) 16286e1eff8SEmilio G. Cota { 16371ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 16486e1eff8SEmilio G. Cota size_t rate; 16586e1eff8SEmilio G. Cota size_t new_size = old_size; 16686e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 16786e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 16879e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 16986e1eff8SEmilio G. Cota 17079e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 17179e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 17286e1eff8SEmilio G. Cota } 17379e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 17486e1eff8SEmilio G. Cota 17586e1eff8SEmilio G. Cota if (rate > 70) { 17686e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 17786e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 17879e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 17979e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 18086e1eff8SEmilio G. Cota 18186e1eff8SEmilio G. Cota /* 18286e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 18386e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 18486e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 18586e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 18686e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 18786e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 18886e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 18986e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 19086e1eff8SEmilio G. Cota */ 19186e1eff8SEmilio G. Cota if (expected_rate > 70) { 19286e1eff8SEmilio G. Cota ceil *= 2; 19386e1eff8SEmilio G. Cota } 19486e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 19586e1eff8SEmilio G. Cota } 19686e1eff8SEmilio G. Cota 19786e1eff8SEmilio G. Cota if (new_size == old_size) { 19886e1eff8SEmilio G. Cota if (window_expired) { 19979e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 20086e1eff8SEmilio G. Cota } 20186e1eff8SEmilio G. Cota return; 20286e1eff8SEmilio G. Cota } 20386e1eff8SEmilio G. Cota 20471ccd47bSRichard Henderson g_free(fast->table); 20571ccd47bSRichard Henderson g_free(desc->iotlb); 20686e1eff8SEmilio G. Cota 20779e42085SRichard Henderson tlb_window_reset(desc, now, 0); 20886e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 20971ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 21071ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 21171ccd47bSRichard Henderson desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); 21271ccd47bSRichard Henderson 21386e1eff8SEmilio G. Cota /* 21486e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 21586e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 21686e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 21786e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 21886e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 21986e1eff8SEmilio G. Cota */ 22071ccd47bSRichard Henderson while (fast->table == NULL || desc->iotlb == NULL) { 22186e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 22286e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 22386e1eff8SEmilio G. Cota abort(); 22486e1eff8SEmilio G. Cota } 22586e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 22671ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 22786e1eff8SEmilio G. Cota 22871ccd47bSRichard Henderson g_free(fast->table); 22971ccd47bSRichard Henderson g_free(desc->iotlb); 23071ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 23171ccd47bSRichard Henderson desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); 23286e1eff8SEmilio G. Cota } 23386e1eff8SEmilio G. Cota } 23486e1eff8SEmilio G. Cota 235bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 23686e1eff8SEmilio G. Cota { 2375c948e31SRichard Henderson desc->n_used_entries = 0; 2385c948e31SRichard Henderson desc->large_page_addr = -1; 2395c948e31SRichard Henderson desc->large_page_mask = -1; 2405c948e31SRichard Henderson desc->vindex = 0; 2415c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2425c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 24386e1eff8SEmilio G. Cota } 24486e1eff8SEmilio G. Cota 2453c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, 2463c3959f2SRichard Henderson int64_t now) 247bbf021b0SRichard Henderson { 248bbf021b0SRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 249bbf021b0SRichard Henderson CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; 250bbf021b0SRichard Henderson 2513c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 252bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 253bbf021b0SRichard Henderson } 254bbf021b0SRichard Henderson 25556e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 25656e89f76SRichard Henderson { 25756e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 25856e89f76SRichard Henderson 25956e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 26056e89f76SRichard Henderson desc->n_used_entries = 0; 26156e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 26256e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 26356e89f76SRichard Henderson desc->iotlb = g_new(CPUIOTLBEntry, n_entries); 2643c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 26556e89f76SRichard Henderson } 26656e89f76SRichard Henderson 26786e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 26886e1eff8SEmilio G. Cota { 269a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 27086e1eff8SEmilio G. Cota } 27186e1eff8SEmilio G. Cota 27286e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 27386e1eff8SEmilio G. Cota { 274a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 27586e1eff8SEmilio G. Cota } 27686e1eff8SEmilio G. Cota 2775005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2785005e253SEmilio G. Cota { 27971aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 28056e89f76SRichard Henderson int64_t now = get_clock_realtime(); 28156e89f76SRichard Henderson int i; 28271aec354SEmilio G. Cota 283a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2843d1523ceSRichard Henderson 2853c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 2863c16304aSRichard Henderson env_tlb(env)->c.dirty = 0; 28786e1eff8SEmilio G. Cota 28856e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 28956e89f76SRichard Henderson tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); 29056e89f76SRichard Henderson } 2915005e253SEmilio G. Cota } 2925005e253SEmilio G. Cota 293816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu) 294816d9be5SEmilio G. Cota { 295816d9be5SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 296816d9be5SEmilio G. Cota int i; 297816d9be5SEmilio G. Cota 298816d9be5SEmilio G. Cota qemu_spin_destroy(&env_tlb(env)->c.lock); 299816d9be5SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 300816d9be5SEmilio G. Cota CPUTLBDesc *desc = &env_tlb(env)->d[i]; 301816d9be5SEmilio G. Cota CPUTLBDescFast *fast = &env_tlb(env)->f[i]; 302816d9be5SEmilio G. Cota 303816d9be5SEmilio G. Cota g_free(fast->table); 304816d9be5SEmilio G. Cota g_free(desc->iotlb); 305816d9be5SEmilio G. Cota } 306816d9be5SEmilio G. Cota } 307816d9be5SEmilio G. Cota 308d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 309d9bb58e5SYang Zhong * 310d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 311d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 312d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 313d9bb58e5SYang Zhong * again. 314d9bb58e5SYang Zhong */ 315d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 316d9bb58e5SYang Zhong run_on_cpu_data d) 317d9bb58e5SYang Zhong { 318d9bb58e5SYang Zhong CPUState *cpu; 319d9bb58e5SYang Zhong 320d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 321d9bb58e5SYang Zhong if (cpu != src) { 322d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 323d9bb58e5SYang Zhong } 324d9bb58e5SYang Zhong } 325d9bb58e5SYang Zhong } 326d9bb58e5SYang Zhong 327e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 32883974cf4SEmilio G. Cota { 32983974cf4SEmilio G. Cota CPUState *cpu; 330e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 33183974cf4SEmilio G. Cota 33283974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 33383974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 33483974cf4SEmilio G. Cota 335d73415a3SStefan Hajnoczi full += qatomic_read(&env_tlb(env)->c.full_flush_count); 336d73415a3SStefan Hajnoczi part += qatomic_read(&env_tlb(env)->c.part_flush_count); 337d73415a3SStefan Hajnoczi elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); 33883974cf4SEmilio G. Cota } 339e09de0a2SRichard Henderson *pfull = full; 340e09de0a2SRichard Henderson *ppart = part; 341e09de0a2SRichard Henderson *pelide = elide; 34283974cf4SEmilio G. Cota } 343d9bb58e5SYang Zhong 344d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 345d9bb58e5SYang Zhong { 346d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3473d1523ceSRichard Henderson uint16_t asked = data.host_int; 3483d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3493c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 350d9bb58e5SYang Zhong 351d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 352d9bb58e5SYang Zhong 3533d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 354d9bb58e5SYang Zhong 355a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 35660a2ad7dSRichard Henderson 357a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3583d1523ceSRichard Henderson to_clean = asked & all_dirty; 3593d1523ceSRichard Henderson all_dirty &= ~to_clean; 360a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3613d1523ceSRichard Henderson 3623d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3633d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3643c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx, now); 365d9bb58e5SYang Zhong } 3663d1523ceSRichard Henderson 367a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 368d9bb58e5SYang Zhong 369f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 37064f2674bSRichard Henderson 3713d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 372d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.full_flush_count, 373a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 374e09de0a2SRichard Henderson } else { 375d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.part_flush_count, 376a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3773d1523ceSRichard Henderson if (to_clean != asked) { 378d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.elide_flush_count, 379a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3803d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3813d1523ceSRichard Henderson } 38264f2674bSRichard Henderson } 383d9bb58e5SYang Zhong } 384d9bb58e5SYang Zhong 385d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 386d9bb58e5SYang Zhong { 387d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 388d9bb58e5SYang Zhong 38964f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 390d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 391ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 392d9bb58e5SYang Zhong } else { 39360a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 394d9bb58e5SYang Zhong } 395d9bb58e5SYang Zhong } 396d9bb58e5SYang Zhong 39764f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 39864f2674bSRichard Henderson { 39964f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 40064f2674bSRichard Henderson } 40164f2674bSRichard Henderson 402d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 403d9bb58e5SYang Zhong { 404d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 405d9bb58e5SYang Zhong 406d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 407d9bb58e5SYang Zhong 408d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 409d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 410d9bb58e5SYang Zhong } 411d9bb58e5SYang Zhong 41264f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 41364f2674bSRichard Henderson { 41464f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 41564f2674bSRichard Henderson } 41664f2674bSRichard Henderson 41764f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 418d9bb58e5SYang Zhong { 419d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 420d9bb58e5SYang Zhong 421d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 422d9bb58e5SYang Zhong 423d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 424d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 425d9bb58e5SYang Zhong } 426d9bb58e5SYang Zhong 42764f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 42864f2674bSRichard Henderson { 42964f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 43064f2674bSRichard Henderson } 43164f2674bSRichard Henderson 4323ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 4333ab6e68cSRichard Henderson target_ulong page, target_ulong mask) 4343ab6e68cSRichard Henderson { 4353ab6e68cSRichard Henderson page &= mask; 4363ab6e68cSRichard Henderson mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 4373ab6e68cSRichard Henderson 4383ab6e68cSRichard Henderson return (page == (tlb_entry->addr_read & mask) || 4393ab6e68cSRichard Henderson page == (tlb_addr_write(tlb_entry) & mask) || 4403ab6e68cSRichard Henderson page == (tlb_entry->addr_code & mask)); 4413ab6e68cSRichard Henderson } 4423ab6e68cSRichard Henderson 44368fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 44468fea038SRichard Henderson target_ulong page) 445d9bb58e5SYang Zhong { 4463ab6e68cSRichard Henderson return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 44768fea038SRichard Henderson } 44868fea038SRichard Henderson 4493cea94bbSEmilio G. Cota /** 4503cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4513cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4523cea94bbSEmilio G. Cota */ 4533cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4543cea94bbSEmilio G. Cota { 4553cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4563cea94bbSEmilio G. Cota } 4573cea94bbSEmilio G. Cota 45853d28455SRichard Henderson /* Called with tlb_c.lock held */ 4593ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 4603ab6e68cSRichard Henderson target_ulong page, 4613ab6e68cSRichard Henderson target_ulong mask) 46268fea038SRichard Henderson { 4633ab6e68cSRichard Henderson if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 464d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 46586e1eff8SEmilio G. Cota return true; 466d9bb58e5SYang Zhong } 46786e1eff8SEmilio G. Cota return false; 468d9bb58e5SYang Zhong } 469d9bb58e5SYang Zhong 4703ab6e68cSRichard Henderson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 47168fea038SRichard Henderson target_ulong page) 47268fea038SRichard Henderson { 4733ab6e68cSRichard Henderson return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 4743ab6e68cSRichard Henderson } 4753ab6e68cSRichard Henderson 4763ab6e68cSRichard Henderson /* Called with tlb_c.lock held */ 4773ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, 4783ab6e68cSRichard Henderson target_ulong page, 4793ab6e68cSRichard Henderson target_ulong mask) 4803ab6e68cSRichard Henderson { 481a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 48268fea038SRichard Henderson int k; 48371aec354SEmilio G. Cota 48429a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 48568fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 4863ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 48786e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 48886e1eff8SEmilio G. Cota } 48968fea038SRichard Henderson } 49068fea038SRichard Henderson } 49168fea038SRichard Henderson 4923ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 4933ab6e68cSRichard Henderson target_ulong page) 4943ab6e68cSRichard Henderson { 4953ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); 4963ab6e68cSRichard Henderson } 4973ab6e68cSRichard Henderson 4981308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4991308e026SRichard Henderson target_ulong page) 5001308e026SRichard Henderson { 501a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 502a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 5031308e026SRichard Henderson 5041308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 5051308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 5061308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 5071308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 5081308e026SRichard Henderson midx, lp_addr, lp_mask); 5093c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 5101308e026SRichard Henderson } else { 51186e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 51286e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 51386e1eff8SEmilio G. Cota } 5141308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 5151308e026SRichard Henderson } 5161308e026SRichard Henderson } 5171308e026SRichard Henderson 5187b7d00e0SRichard Henderson /** 5197b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 5207b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5217b7d00e0SRichard Henderson * @addr: page of virtual address to flush 5227b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 5237b7d00e0SRichard Henderson * 5247b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 5257b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 526d9bb58e5SYang Zhong */ 5277b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 5287b7d00e0SRichard Henderson target_ulong addr, 5297b7d00e0SRichard Henderson uint16_t idxmap) 530d9bb58e5SYang Zhong { 531d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 532d9bb58e5SYang Zhong int mmu_idx; 533d9bb58e5SYang Zhong 534d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 535d9bb58e5SYang Zhong 5367b7d00e0SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); 537d9bb58e5SYang Zhong 538a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 539d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 5407b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 5411308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 542d9bb58e5SYang Zhong } 543d9bb58e5SYang Zhong } 544a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 545d9bb58e5SYang Zhong 546d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 547d9bb58e5SYang Zhong } 548d9bb58e5SYang Zhong 5497b7d00e0SRichard Henderson /** 5507b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 5517b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5527b7d00e0SRichard Henderson * @data: encoded addr + idxmap 5537b7d00e0SRichard Henderson * 5547b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5557b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 5567b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 5577b7d00e0SRichard Henderson * that can be passed via this method. 5587b7d00e0SRichard Henderson */ 5597b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5607b7d00e0SRichard Henderson run_on_cpu_data data) 5617b7d00e0SRichard Henderson { 5627b7d00e0SRichard Henderson target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; 5637b7d00e0SRichard Henderson target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; 5647b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5657b7d00e0SRichard Henderson 5667b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5677b7d00e0SRichard Henderson } 5687b7d00e0SRichard Henderson 5697b7d00e0SRichard Henderson typedef struct { 5707b7d00e0SRichard Henderson target_ulong addr; 5717b7d00e0SRichard Henderson uint16_t idxmap; 5727b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5737b7d00e0SRichard Henderson 5747b7d00e0SRichard Henderson /** 5757b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5767b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5777b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5787b7d00e0SRichard Henderson * 5797b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5807b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5817b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5827b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5837b7d00e0SRichard Henderson */ 5847b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5857b7d00e0SRichard Henderson run_on_cpu_data data) 5867b7d00e0SRichard Henderson { 5877b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5887b7d00e0SRichard Henderson 5897b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5907b7d00e0SRichard Henderson g_free(d); 5917b7d00e0SRichard Henderson } 5927b7d00e0SRichard Henderson 593d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 594d9bb58e5SYang Zhong { 595d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 596d9bb58e5SYang Zhong 597d9bb58e5SYang Zhong /* This should already be page aligned */ 5987b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 599d9bb58e5SYang Zhong 6007b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 6017b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 6027b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 6037b7d00e0SRichard Henderson /* 6047b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 6057b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 6067b7d00e0SRichard Henderson * allocating memory for this operation. 6077b7d00e0SRichard Henderson */ 6087b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 6097b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 610d9bb58e5SYang Zhong } else { 6117b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 6127b7d00e0SRichard Henderson 6137b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 6147b7d00e0SRichard Henderson d->addr = addr; 6157b7d00e0SRichard Henderson d->idxmap = idxmap; 6167b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 6177b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 618d9bb58e5SYang Zhong } 619d9bb58e5SYang Zhong } 620d9bb58e5SYang Zhong 621f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 622f8144c6cSRichard Henderson { 623f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 624f8144c6cSRichard Henderson } 625f8144c6cSRichard Henderson 626d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 627d9bb58e5SYang Zhong uint16_t idxmap) 628d9bb58e5SYang Zhong { 629d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 630d9bb58e5SYang Zhong 631d9bb58e5SYang Zhong /* This should already be page aligned */ 6327b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 633d9bb58e5SYang Zhong 6347b7d00e0SRichard Henderson /* 6357b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6367b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6377b7d00e0SRichard Henderson */ 6387b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6397b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6407b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6417b7d00e0SRichard Henderson } else { 6427b7d00e0SRichard Henderson CPUState *dst_cpu; 6437b7d00e0SRichard Henderson 6447b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6457b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6467b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6477b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 6487b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 6497b7d00e0SRichard Henderson 6507b7d00e0SRichard Henderson d->addr = addr; 6517b7d00e0SRichard Henderson d->idxmap = idxmap; 6527b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6537b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6547b7d00e0SRichard Henderson } 6557b7d00e0SRichard Henderson } 6567b7d00e0SRichard Henderson } 6577b7d00e0SRichard Henderson 6587b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 659d9bb58e5SYang Zhong } 660d9bb58e5SYang Zhong 661f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 662f8144c6cSRichard Henderson { 663f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 664f8144c6cSRichard Henderson } 665f8144c6cSRichard Henderson 666d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 667d9bb58e5SYang Zhong target_ulong addr, 668d9bb58e5SYang Zhong uint16_t idxmap) 669d9bb58e5SYang Zhong { 670d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 671d9bb58e5SYang Zhong 672d9bb58e5SYang Zhong /* This should already be page aligned */ 6737b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 674d9bb58e5SYang Zhong 6757b7d00e0SRichard Henderson /* 6767b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6777b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6787b7d00e0SRichard Henderson */ 6797b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6807b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6817b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6827b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6837b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6847b7d00e0SRichard Henderson } else { 6857b7d00e0SRichard Henderson CPUState *dst_cpu; 6867b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6877b7d00e0SRichard Henderson 6887b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6897b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6907b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6917b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6927b7d00e0SRichard Henderson d->addr = addr; 6937b7d00e0SRichard Henderson d->idxmap = idxmap; 6947b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6957b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6967b7d00e0SRichard Henderson } 6977b7d00e0SRichard Henderson } 6987b7d00e0SRichard Henderson 6997b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 7007b7d00e0SRichard Henderson d->addr = addr; 7017b7d00e0SRichard Henderson d->idxmap = idxmap; 7027b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 7037b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 7047b7d00e0SRichard Henderson } 705d9bb58e5SYang Zhong } 706d9bb58e5SYang Zhong 707f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 708d9bb58e5SYang Zhong { 709f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 710d9bb58e5SYang Zhong } 711d9bb58e5SYang Zhong 7123ab6e68cSRichard Henderson static void tlb_flush_page_bits_locked(CPUArchState *env, int midx, 7133ab6e68cSRichard Henderson target_ulong page, unsigned bits) 7143ab6e68cSRichard Henderson { 7153ab6e68cSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[midx]; 7163ab6e68cSRichard Henderson CPUTLBDescFast *f = &env_tlb(env)->f[midx]; 7173ab6e68cSRichard Henderson target_ulong mask = MAKE_64BIT_MASK(0, bits); 7183ab6e68cSRichard Henderson 7193ab6e68cSRichard Henderson /* 7203ab6e68cSRichard Henderson * If @bits is smaller than the tlb size, there may be multiple entries 7213ab6e68cSRichard Henderson * within the TLB; otherwise all addresses that match under @mask hit 7223ab6e68cSRichard Henderson * the same TLB entry. 7233ab6e68cSRichard Henderson * 7243ab6e68cSRichard Henderson * TODO: Perhaps allow bits to be a few bits less than the size. 7253ab6e68cSRichard Henderson * For now, just flush the entire TLB. 7263ab6e68cSRichard Henderson */ 7273ab6e68cSRichard Henderson if (mask < f->mask) { 7283ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7293ab6e68cSRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 7303ab6e68cSRichard Henderson midx, page, mask); 7313ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7323ab6e68cSRichard Henderson return; 7333ab6e68cSRichard Henderson } 7343ab6e68cSRichard Henderson 7353ab6e68cSRichard Henderson /* Check if we need to flush due to large pages. */ 7363ab6e68cSRichard Henderson if ((page & d->large_page_mask) == d->large_page_addr) { 7373ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7383ab6e68cSRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 7393ab6e68cSRichard Henderson midx, d->large_page_addr, d->large_page_mask); 7403ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7413ab6e68cSRichard Henderson return; 7423ab6e68cSRichard Henderson } 7433ab6e68cSRichard Henderson 7443ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) { 7453ab6e68cSRichard Henderson tlb_n_used_entries_dec(env, midx); 7463ab6e68cSRichard Henderson } 7473ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); 7483ab6e68cSRichard Henderson } 7493ab6e68cSRichard Henderson 7503ab6e68cSRichard Henderson typedef struct { 7513ab6e68cSRichard Henderson target_ulong addr; 7523ab6e68cSRichard Henderson uint16_t idxmap; 7533ab6e68cSRichard Henderson uint16_t bits; 7543ab6e68cSRichard Henderson } TLBFlushPageBitsByMMUIdxData; 7553ab6e68cSRichard Henderson 7563ab6e68cSRichard Henderson static void 7573ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu, 7583ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData d) 7593ab6e68cSRichard Henderson { 7603ab6e68cSRichard Henderson CPUArchState *env = cpu->env_ptr; 7613ab6e68cSRichard Henderson int mmu_idx; 7623ab6e68cSRichard Henderson 7633ab6e68cSRichard Henderson assert_cpu_is_self(cpu); 7643ab6e68cSRichard Henderson 7653ab6e68cSRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n", 7663ab6e68cSRichard Henderson d.addr, d.bits, d.idxmap); 7673ab6e68cSRichard Henderson 7683ab6e68cSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 7693ab6e68cSRichard Henderson for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 7703ab6e68cSRichard Henderson if ((d.idxmap >> mmu_idx) & 1) { 7713ab6e68cSRichard Henderson tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits); 7723ab6e68cSRichard Henderson } 7733ab6e68cSRichard Henderson } 7743ab6e68cSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 7753ab6e68cSRichard Henderson 7763ab6e68cSRichard Henderson tb_flush_jmp_cache(cpu, d.addr); 7773ab6e68cSRichard Henderson } 7783ab6e68cSRichard Henderson 7793ab6e68cSRichard Henderson static bool encode_pbm_to_runon(run_on_cpu_data *out, 7803ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData d) 7813ab6e68cSRichard Henderson { 7823ab6e68cSRichard Henderson /* We need 6 bits to hold to hold @bits up to 63. */ 7833ab6e68cSRichard Henderson if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) { 7843ab6e68cSRichard Henderson *out = RUN_ON_CPU_TARGET_PTR(d.addr | (d.idxmap << 6) | d.bits); 7853ab6e68cSRichard Henderson return true; 7863ab6e68cSRichard Henderson } 7873ab6e68cSRichard Henderson return false; 7883ab6e68cSRichard Henderson } 7893ab6e68cSRichard Henderson 7903ab6e68cSRichard Henderson static TLBFlushPageBitsByMMUIdxData 7913ab6e68cSRichard Henderson decode_runon_to_pbm(run_on_cpu_data data) 7923ab6e68cSRichard Henderson { 7933ab6e68cSRichard Henderson target_ulong addr_map_bits = (target_ulong) data.target_ptr; 7943ab6e68cSRichard Henderson return (TLBFlushPageBitsByMMUIdxData){ 7953ab6e68cSRichard Henderson .addr = addr_map_bits & TARGET_PAGE_MASK, 7963ab6e68cSRichard Henderson .idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6, 7973ab6e68cSRichard Henderson .bits = addr_map_bits & 0x3f 7983ab6e68cSRichard Henderson }; 7993ab6e68cSRichard Henderson } 8003ab6e68cSRichard Henderson 8013ab6e68cSRichard Henderson static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu, 8023ab6e68cSRichard Henderson run_on_cpu_data runon) 8033ab6e68cSRichard Henderson { 8043ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(cpu, decode_runon_to_pbm(runon)); 8053ab6e68cSRichard Henderson } 8063ab6e68cSRichard Henderson 8073ab6e68cSRichard Henderson static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu, 8083ab6e68cSRichard Henderson run_on_cpu_data data) 8093ab6e68cSRichard Henderson { 8103ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData *d = data.host_ptr; 8113ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d); 8123ab6e68cSRichard Henderson g_free(d); 8133ab6e68cSRichard Henderson } 8143ab6e68cSRichard Henderson 8153ab6e68cSRichard Henderson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, 8163ab6e68cSRichard Henderson uint16_t idxmap, unsigned bits) 8173ab6e68cSRichard Henderson { 8183ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData d; 8193ab6e68cSRichard Henderson run_on_cpu_data runon; 8203ab6e68cSRichard Henderson 8213ab6e68cSRichard Henderson /* If all bits are significant, this devolves to tlb_flush_page. */ 8223ab6e68cSRichard Henderson if (bits >= TARGET_LONG_BITS) { 8233ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 8243ab6e68cSRichard Henderson return; 8253ab6e68cSRichard Henderson } 8263ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8273ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8283ab6e68cSRichard Henderson tlb_flush_by_mmuidx(cpu, idxmap); 8293ab6e68cSRichard Henderson return; 8303ab6e68cSRichard Henderson } 8313ab6e68cSRichard Henderson 8323ab6e68cSRichard Henderson /* This should already be page aligned */ 8333ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 8343ab6e68cSRichard Henderson d.idxmap = idxmap; 8353ab6e68cSRichard Henderson d.bits = bits; 8363ab6e68cSRichard Henderson 8373ab6e68cSRichard Henderson if (qemu_cpu_is_self(cpu)) { 8383ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(cpu, d); 8393ab6e68cSRichard Henderson } else if (encode_pbm_to_runon(&runon, d)) { 8403ab6e68cSRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); 8413ab6e68cSRichard Henderson } else { 8423ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData *p 8433ab6e68cSRichard Henderson = g_new(TLBFlushPageBitsByMMUIdxData, 1); 8443ab6e68cSRichard Henderson 8453ab6e68cSRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 8463ab6e68cSRichard Henderson *p = d; 8473ab6e68cSRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2, 8483ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8493ab6e68cSRichard Henderson } 8503ab6e68cSRichard Henderson } 8513ab6e68cSRichard Henderson 8523ab6e68cSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 8533ab6e68cSRichard Henderson target_ulong addr, 8543ab6e68cSRichard Henderson uint16_t idxmap, 8553ab6e68cSRichard Henderson unsigned bits) 8563ab6e68cSRichard Henderson { 8573ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData d; 8583ab6e68cSRichard Henderson run_on_cpu_data runon; 8593ab6e68cSRichard Henderson 8603ab6e68cSRichard Henderson /* If all bits are significant, this devolves to tlb_flush_page. */ 8613ab6e68cSRichard Henderson if (bits >= TARGET_LONG_BITS) { 8623ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 8633ab6e68cSRichard Henderson return; 8643ab6e68cSRichard Henderson } 8653ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8663ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8673ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 8683ab6e68cSRichard Henderson return; 8693ab6e68cSRichard Henderson } 8703ab6e68cSRichard Henderson 8713ab6e68cSRichard Henderson /* This should already be page aligned */ 8723ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 8733ab6e68cSRichard Henderson d.idxmap = idxmap; 8743ab6e68cSRichard Henderson d.bits = bits; 8753ab6e68cSRichard Henderson 8763ab6e68cSRichard Henderson if (encode_pbm_to_runon(&runon, d)) { 8773ab6e68cSRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); 8783ab6e68cSRichard Henderson } else { 8793ab6e68cSRichard Henderson CPUState *dst_cpu; 8803ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData *p; 8813ab6e68cSRichard Henderson 8823ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 8833ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 8843ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 8853ab6e68cSRichard Henderson p = g_new(TLBFlushPageBitsByMMUIdxData, 1); 8863ab6e68cSRichard Henderson *p = d; 8873ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, 8883ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_2, 8893ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8903ab6e68cSRichard Henderson } 8913ab6e68cSRichard Henderson } 8923ab6e68cSRichard Henderson } 8933ab6e68cSRichard Henderson 8943ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d); 8953ab6e68cSRichard Henderson } 8963ab6e68cSRichard Henderson 8973ab6e68cSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 8983ab6e68cSRichard Henderson target_ulong addr, 8993ab6e68cSRichard Henderson uint16_t idxmap, 9003ab6e68cSRichard Henderson unsigned bits) 9013ab6e68cSRichard Henderson { 9023ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData d; 9033ab6e68cSRichard Henderson run_on_cpu_data runon; 9043ab6e68cSRichard Henderson 9053ab6e68cSRichard Henderson /* If all bits are significant, this devolves to tlb_flush_page. */ 9063ab6e68cSRichard Henderson if (bits >= TARGET_LONG_BITS) { 9073ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 9083ab6e68cSRichard Henderson return; 9093ab6e68cSRichard Henderson } 9103ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 9113ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 9123ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 9133ab6e68cSRichard Henderson return; 9143ab6e68cSRichard Henderson } 9153ab6e68cSRichard Henderson 9163ab6e68cSRichard Henderson /* This should already be page aligned */ 9173ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 9183ab6e68cSRichard Henderson d.idxmap = idxmap; 9193ab6e68cSRichard Henderson d.bits = bits; 9203ab6e68cSRichard Henderson 9213ab6e68cSRichard Henderson if (encode_pbm_to_runon(&runon, d)) { 9223ab6e68cSRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); 9233ab6e68cSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, 9243ab6e68cSRichard Henderson runon); 9253ab6e68cSRichard Henderson } else { 9263ab6e68cSRichard Henderson CPUState *dst_cpu; 9273ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData *p; 9283ab6e68cSRichard Henderson 9293ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 9303ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 9313ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 9323ab6e68cSRichard Henderson p = g_new(TLBFlushPageBitsByMMUIdxData, 1); 9333ab6e68cSRichard Henderson *p = d; 9343ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2, 9353ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9363ab6e68cSRichard Henderson } 9373ab6e68cSRichard Henderson } 9383ab6e68cSRichard Henderson 9393ab6e68cSRichard Henderson p = g_new(TLBFlushPageBitsByMMUIdxData, 1); 9403ab6e68cSRichard Henderson *p = d; 9413ab6e68cSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2, 9423ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9433ab6e68cSRichard Henderson } 9443ab6e68cSRichard Henderson } 9453ab6e68cSRichard Henderson 946d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 947d9bb58e5SYang Zhong can be detected */ 948d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 949d9bb58e5SYang Zhong { 950d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 951d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 952d9bb58e5SYang Zhong } 953d9bb58e5SYang Zhong 954d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 955d9bb58e5SYang Zhong tested for self modifying code */ 956d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 957d9bb58e5SYang Zhong { 958d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 959d9bb58e5SYang Zhong } 960d9bb58e5SYang Zhong 961d9bb58e5SYang Zhong 962d9bb58e5SYang Zhong /* 963d9bb58e5SYang Zhong * Dirty write flag handling 964d9bb58e5SYang Zhong * 965d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 966d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 967d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 968d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 969d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 970d9bb58e5SYang Zhong * generated code. 971d9bb58e5SYang Zhong * 97271aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 973d73415a3SStefan Hajnoczi * te->addr_write with qatomic_set. We don't need to worry about this for 97471aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 975d9bb58e5SYang Zhong * 97653d28455SRichard Henderson * Called with tlb_c.lock held. 977d9bb58e5SYang Zhong */ 97871aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 97971aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 980d9bb58e5SYang Zhong { 981d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 982d9bb58e5SYang Zhong 9837b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 9847b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 985d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 986d9bb58e5SYang Zhong addr += tlb_entry->addend; 987d9bb58e5SYang Zhong if ((addr - start) < length) { 988d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 98971aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 990d9bb58e5SYang Zhong #else 991d73415a3SStefan Hajnoczi qatomic_set(&tlb_entry->addr_write, 99271aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 993d9bb58e5SYang Zhong #endif 994d9bb58e5SYang Zhong } 99571aec354SEmilio G. Cota } 99671aec354SEmilio G. Cota } 99771aec354SEmilio G. Cota 99871aec354SEmilio G. Cota /* 99953d28455SRichard Henderson * Called with tlb_c.lock held. 100071aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 100171aec354SEmilio G. Cota */ 100271aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 100371aec354SEmilio G. Cota { 100471aec354SEmilio G. Cota *d = *s; 100571aec354SEmilio G. Cota } 1006d9bb58e5SYang Zhong 1007d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 100871aec354SEmilio G. Cota * the target vCPU). 100953d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 101071aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 1011d9bb58e5SYang Zhong */ 1012d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 1013d9bb58e5SYang Zhong { 1014d9bb58e5SYang Zhong CPUArchState *env; 1015d9bb58e5SYang Zhong 1016d9bb58e5SYang Zhong int mmu_idx; 1017d9bb58e5SYang Zhong 1018d9bb58e5SYang Zhong env = cpu->env_ptr; 1019a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1020d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1021d9bb58e5SYang Zhong unsigned int i; 1022722a1c1eSRichard Henderson unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 1023d9bb58e5SYang Zhong 102486e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 1025a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 1026a40ec84eSRichard Henderson start1, length); 1027d9bb58e5SYang Zhong } 1028d9bb58e5SYang Zhong 1029d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 1030a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 1031a40ec84eSRichard Henderson start1, length); 1032d9bb58e5SYang Zhong } 1033d9bb58e5SYang Zhong } 1034a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1035d9bb58e5SYang Zhong } 1036d9bb58e5SYang Zhong 103753d28455SRichard Henderson /* Called with tlb_c.lock held */ 103871aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 103971aec354SEmilio G. Cota target_ulong vaddr) 1040d9bb58e5SYang Zhong { 1041d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 1042d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 1043d9bb58e5SYang Zhong } 1044d9bb58e5SYang Zhong } 1045d9bb58e5SYang Zhong 1046d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 1047d9bb58e5SYang Zhong so that it is no longer dirty */ 1048d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 1049d9bb58e5SYang Zhong { 1050d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1051d9bb58e5SYang Zhong int mmu_idx; 1052d9bb58e5SYang Zhong 1053d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 1054d9bb58e5SYang Zhong 1055d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 1056a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1057d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1058383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 1059d9bb58e5SYang Zhong } 1060d9bb58e5SYang Zhong 1061d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1062d9bb58e5SYang Zhong int k; 1063d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 1064a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 1065d9bb58e5SYang Zhong } 1066d9bb58e5SYang Zhong } 1067a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1068d9bb58e5SYang Zhong } 1069d9bb58e5SYang Zhong 1070d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 1071d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 10721308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 10731308e026SRichard Henderson target_ulong vaddr, target_ulong size) 1074d9bb58e5SYang Zhong { 1075a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 10761308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 1077d9bb58e5SYang Zhong 10781308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 10791308e026SRichard Henderson /* No previous large page. */ 10801308e026SRichard Henderson lp_addr = vaddr; 10811308e026SRichard Henderson } else { 1082d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 10831308e026SRichard Henderson This is a compromise between unnecessary flushes and 10841308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 1085a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 10861308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 10871308e026SRichard Henderson lp_mask <<= 1; 1088d9bb58e5SYang Zhong } 10891308e026SRichard Henderson } 1090a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 1091a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 1092d9bb58e5SYang Zhong } 1093d9bb58e5SYang Zhong 1094d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 1095d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1096d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 1097d9bb58e5SYang Zhong * 1098d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 1099d9bb58e5SYang Zhong * critical section. 1100d9bb58e5SYang Zhong */ 1101d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 1102d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 1103d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1104d9bb58e5SYang Zhong { 1105d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1106a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 1107a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1108d9bb58e5SYang Zhong MemoryRegionSection *section; 1109d9bb58e5SYang Zhong unsigned int index; 1110d9bb58e5SYang Zhong target_ulong address; 11118f5db641SRichard Henderson target_ulong write_address; 1112d9bb58e5SYang Zhong uintptr_t addend; 111368fea038SRichard Henderson CPUTLBEntry *te, tn; 111455df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 111555df6fcfSPeter Maydell target_ulong vaddr_page; 1116d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 111750b107c5SRichard Henderson int wp_flags; 11188f5db641SRichard Henderson bool is_ram, is_romd; 1119d9bb58e5SYang Zhong 1120d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 112155df6fcfSPeter Maydell 11221308e026SRichard Henderson if (size <= TARGET_PAGE_SIZE) { 112355df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 112455df6fcfSPeter Maydell } else { 11251308e026SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, size); 1126d9bb58e5SYang Zhong sz = size; 112755df6fcfSPeter Maydell } 112855df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 112955df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 113055df6fcfSPeter Maydell 113155df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 113255df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 1133d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 1134d9bb58e5SYang Zhong 1135d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 1136d9bb58e5SYang Zhong " prot=%x idx=%d\n", 1137d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 1138d9bb58e5SYang Zhong 113955df6fcfSPeter Maydell address = vaddr_page; 114055df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 114130d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 114230d7e098SRichard Henderson address |= TLB_INVALID_MASK; 114355df6fcfSPeter Maydell } 1144a26fc6f5STony Nguyen if (attrs.byte_swap) { 11455b87b3e6SRichard Henderson address |= TLB_BSWAP; 1146a26fc6f5STony Nguyen } 11478f5db641SRichard Henderson 11488f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 11498f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 11508f5db641SRichard Henderson 11518f5db641SRichard Henderson if (is_ram || is_romd) { 11528f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 1153d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 11548f5db641SRichard Henderson } else { 11558f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 11568f5db641SRichard Henderson addend = 0; 1157d9bb58e5SYang Zhong } 1158d9bb58e5SYang Zhong 11598f5db641SRichard Henderson write_address = address; 11608f5db641SRichard Henderson if (is_ram) { 11618f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 11628f5db641SRichard Henderson /* 11638f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 11648f5db641SRichard Henderson * the page is actually writable. 11658f5db641SRichard Henderson */ 11668f5db641SRichard Henderson if (prot & PAGE_WRITE) { 11678f5db641SRichard Henderson if (section->readonly) { 11688f5db641SRichard Henderson write_address |= TLB_DISCARD_WRITE; 11698f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 11708f5db641SRichard Henderson write_address |= TLB_NOTDIRTY; 11718f5db641SRichard Henderson } 11728f5db641SRichard Henderson } 11738f5db641SRichard Henderson } else { 11748f5db641SRichard Henderson /* I/O or ROMD */ 11758f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 11768f5db641SRichard Henderson /* 11778f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 11788f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 11798f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 11808f5db641SRichard Henderson */ 11818f5db641SRichard Henderson write_address |= TLB_MMIO; 11828f5db641SRichard Henderson if (!is_romd) { 11838f5db641SRichard Henderson address = write_address; 11848f5db641SRichard Henderson } 11858f5db641SRichard Henderson } 11868f5db641SRichard Henderson 118750b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 118850b107c5SRichard Henderson TARGET_PAGE_SIZE); 1189d9bb58e5SYang Zhong 1190383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 1191383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 1192d9bb58e5SYang Zhong 119368fea038SRichard Henderson /* 119471aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 119571aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 119671aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 119771aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 119871aec354SEmilio G. Cota * is unlikely to be contended. 119971aec354SEmilio G. Cota */ 1200a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 120171aec354SEmilio G. Cota 12023d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 1203a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 12043d1523ceSRichard Henderson 120571aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 120671aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 120771aec354SEmilio G. Cota 120871aec354SEmilio G. Cota /* 120968fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 121068fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 121168fea038SRichard Henderson */ 12123cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 1213a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1214a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 121568fea038SRichard Henderson 121668fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 121771aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 1218a40ec84eSRichard Henderson desc->viotlb[vidx] = desc->iotlb[index]; 121986e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 122068fea038SRichard Henderson } 1221d9bb58e5SYang Zhong 1222d9bb58e5SYang Zhong /* refill the tlb */ 1223ace41090SPeter Maydell /* 1224ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 1225ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 12268f5db641SRichard Henderson * + the ram_addr_t of the page base of the target RAM (RAM) 12278f5db641SRichard Henderson * + the offset within section->mr of the page base (I/O, ROMD) 122855df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 1229ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 1230ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 1231ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 1232ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 1233ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 1234ace41090SPeter Maydell */ 1235a40ec84eSRichard Henderson desc->iotlb[index].addr = iotlb - vaddr_page; 1236a40ec84eSRichard Henderson desc->iotlb[index].attrs = attrs; 1237d9bb58e5SYang Zhong 1238d9bb58e5SYang Zhong /* Now calculate the new entry */ 123955df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 1240d9bb58e5SYang Zhong if (prot & PAGE_READ) { 1241d9bb58e5SYang Zhong tn.addr_read = address; 124250b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 124350b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 124450b107c5SRichard Henderson } 1245d9bb58e5SYang Zhong } else { 1246d9bb58e5SYang Zhong tn.addr_read = -1; 1247d9bb58e5SYang Zhong } 1248d9bb58e5SYang Zhong 1249d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 12508f5db641SRichard Henderson tn.addr_code = address; 1251d9bb58e5SYang Zhong } else { 1252d9bb58e5SYang Zhong tn.addr_code = -1; 1253d9bb58e5SYang Zhong } 1254d9bb58e5SYang Zhong 1255d9bb58e5SYang Zhong tn.addr_write = -1; 1256d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 12578f5db641SRichard Henderson tn.addr_write = write_address; 1258f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 1259f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 1260f52bfb12SDavid Hildenbrand } 126150b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 126250b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 126350b107c5SRichard Henderson } 1264d9bb58e5SYang Zhong } 1265d9bb58e5SYang Zhong 126671aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 126786e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 1268a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 1269d9bb58e5SYang Zhong } 1270d9bb58e5SYang Zhong 1271d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 1272d9bb58e5SYang Zhong * transaction attributes to be used. 1273d9bb58e5SYang Zhong */ 1274d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 1275d9bb58e5SYang Zhong hwaddr paddr, int prot, 1276d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1277d9bb58e5SYang Zhong { 1278d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 1279d9bb58e5SYang Zhong prot, mmu_idx, size); 1280d9bb58e5SYang Zhong } 1281d9bb58e5SYang Zhong 1282d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 1283d9bb58e5SYang Zhong { 1284d9bb58e5SYang Zhong ram_addr_t ram_addr; 1285d9bb58e5SYang Zhong 1286d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 1287d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 1288d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 1289d9bb58e5SYang Zhong abort(); 1290d9bb58e5SYang Zhong } 1291d9bb58e5SYang Zhong return ram_addr; 1292d9bb58e5SYang Zhong } 1293d9bb58e5SYang Zhong 1294c319dc13SRichard Henderson /* 1295c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1296c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1297c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1298c319dc13SRichard Henderson */ 1299c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 1300c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1301c319dc13SRichard Henderson { 1302c319dc13SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cpu); 1303c319dc13SRichard Henderson bool ok; 1304c319dc13SRichard Henderson 1305c319dc13SRichard Henderson /* 1306c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1307c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1308c319dc13SRichard Henderson */ 1309*78271684SClaudio Fontana ok = cc->tcg_ops->tlb_fill(cpu, addr, size, 1310e124536fSEduardo Habkost access_type, mmu_idx, false, retaddr); 1311c319dc13SRichard Henderson assert(ok); 1312c319dc13SRichard Henderson } 1313c319dc13SRichard Henderson 1314*78271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 1315*78271684SClaudio Fontana MMUAccessType access_type, 1316*78271684SClaudio Fontana int mmu_idx, uintptr_t retaddr) 1317*78271684SClaudio Fontana { 1318*78271684SClaudio Fontana CPUClass *cc = CPU_GET_CLASS(cpu); 1319*78271684SClaudio Fontana 1320*78271684SClaudio Fontana cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); 1321*78271684SClaudio Fontana } 1322*78271684SClaudio Fontana 1323*78271684SClaudio Fontana static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, 1324*78271684SClaudio Fontana vaddr addr, unsigned size, 1325*78271684SClaudio Fontana MMUAccessType access_type, 1326*78271684SClaudio Fontana int mmu_idx, MemTxAttrs attrs, 1327*78271684SClaudio Fontana MemTxResult response, 1328*78271684SClaudio Fontana uintptr_t retaddr) 1329*78271684SClaudio Fontana { 1330*78271684SClaudio Fontana CPUClass *cc = CPU_GET_CLASS(cpu); 1331*78271684SClaudio Fontana 1332*78271684SClaudio Fontana if (!cpu->ignore_memory_transaction_failures && 1333*78271684SClaudio Fontana cc->tcg_ops->do_transaction_failed) { 1334*78271684SClaudio Fontana cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, 1335*78271684SClaudio Fontana access_type, mmu_idx, attrs, 1336*78271684SClaudio Fontana response, retaddr); 1337*78271684SClaudio Fontana } 1338*78271684SClaudio Fontana } 1339*78271684SClaudio Fontana 1340d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1341f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 1342be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 1343d9bb58e5SYang Zhong { 134429a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 13452d54f194SPeter Maydell hwaddr mr_offset; 13462d54f194SPeter Maydell MemoryRegionSection *section; 13472d54f194SPeter Maydell MemoryRegion *mr; 1348d9bb58e5SYang Zhong uint64_t val; 1349d9bb58e5SYang Zhong bool locked = false; 135004e3aabdSPeter Maydell MemTxResult r; 1351d9bb58e5SYang Zhong 13522d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 13532d54f194SPeter Maydell mr = section->mr; 13542d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1355d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 135608565552SRichard Henderson if (!cpu->can_do_io) { 1357d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1358d9bb58e5SYang Zhong } 1359d9bb58e5SYang Zhong 136041744954SPhilippe Mathieu-Daudé if (!qemu_mutex_iothread_locked()) { 1361d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1362d9bb58e5SYang Zhong locked = true; 1363d9bb58e5SYang Zhong } 1364be5c4787STony Nguyen r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); 136504e3aabdSPeter Maydell if (r != MEMTX_OK) { 13662d54f194SPeter Maydell hwaddr physaddr = mr_offset + 13672d54f194SPeter Maydell section->offset_within_address_space - 13682d54f194SPeter Maydell section->offset_within_region; 13692d54f194SPeter Maydell 1370be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 137104e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 137204e3aabdSPeter Maydell } 1373d9bb58e5SYang Zhong if (locked) { 1374d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1375d9bb58e5SYang Zhong } 1376d9bb58e5SYang Zhong 1377d9bb58e5SYang Zhong return val; 1378d9bb58e5SYang Zhong } 1379d9bb58e5SYang Zhong 13802f3a57eeSAlex Bennée /* 13812f3a57eeSAlex Bennée * Save a potentially trashed IOTLB entry for later lookup by plugin. 1382570ef309SAlex Bennée * This is read by tlb_plugin_lookup if the iotlb entry doesn't match 1383570ef309SAlex Bennée * because of the side effect of io_writex changing memory layout. 13842f3a57eeSAlex Bennée */ 13852f3a57eeSAlex Bennée static void save_iotlb_data(CPUState *cs, hwaddr addr, 13862f3a57eeSAlex Bennée MemoryRegionSection *section, hwaddr mr_offset) 13872f3a57eeSAlex Bennée { 13882f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN 13892f3a57eeSAlex Bennée SavedIOTLB *saved = &cs->saved_iotlb; 13902f3a57eeSAlex Bennée saved->addr = addr; 13912f3a57eeSAlex Bennée saved->section = section; 13922f3a57eeSAlex Bennée saved->mr_offset = mr_offset; 13932f3a57eeSAlex Bennée #endif 13942f3a57eeSAlex Bennée } 13952f3a57eeSAlex Bennée 1396d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1397f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 1398be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 1399d9bb58e5SYang Zhong { 140029a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 14012d54f194SPeter Maydell hwaddr mr_offset; 14022d54f194SPeter Maydell MemoryRegionSection *section; 14032d54f194SPeter Maydell MemoryRegion *mr; 1404d9bb58e5SYang Zhong bool locked = false; 140504e3aabdSPeter Maydell MemTxResult r; 1406d9bb58e5SYang Zhong 14072d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 14082d54f194SPeter Maydell mr = section->mr; 14092d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 141008565552SRichard Henderson if (!cpu->can_do_io) { 1411d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1412d9bb58e5SYang Zhong } 1413d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1414d9bb58e5SYang Zhong 14152f3a57eeSAlex Bennée /* 14162f3a57eeSAlex Bennée * The memory_region_dispatch may trigger a flush/resize 14172f3a57eeSAlex Bennée * so for plugins we save the iotlb_data just in case. 14182f3a57eeSAlex Bennée */ 14192f3a57eeSAlex Bennée save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); 14202f3a57eeSAlex Bennée 142141744954SPhilippe Mathieu-Daudé if (!qemu_mutex_iothread_locked()) { 1422d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1423d9bb58e5SYang Zhong locked = true; 1424d9bb58e5SYang Zhong } 1425be5c4787STony Nguyen r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); 142604e3aabdSPeter Maydell if (r != MEMTX_OK) { 14272d54f194SPeter Maydell hwaddr physaddr = mr_offset + 14282d54f194SPeter Maydell section->offset_within_address_space - 14292d54f194SPeter Maydell section->offset_within_region; 14302d54f194SPeter Maydell 1431be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 1432be5c4787STony Nguyen MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, 1433be5c4787STony Nguyen retaddr); 143404e3aabdSPeter Maydell } 1435d9bb58e5SYang Zhong if (locked) { 1436d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1437d9bb58e5SYang Zhong } 1438d9bb58e5SYang Zhong } 1439d9bb58e5SYang Zhong 14404811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) 14414811e909SRichard Henderson { 14424811e909SRichard Henderson #if TCG_OVERSIZED_GUEST 14434811e909SRichard Henderson return *(target_ulong *)((uintptr_t)entry + ofs); 14444811e909SRichard Henderson #else 1445d73415a3SStefan Hajnoczi /* ofs might correspond to .addr_write, so use qatomic_read */ 1446d73415a3SStefan Hajnoczi return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); 14474811e909SRichard Henderson #endif 14484811e909SRichard Henderson } 14494811e909SRichard Henderson 1450d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1451d9bb58e5SYang Zhong back to the main tlb. */ 1452d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 1453d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 1454d9bb58e5SYang Zhong { 1455d9bb58e5SYang Zhong size_t vidx; 145671aec354SEmilio G. Cota 145729a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1458d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1459a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 1460a40ec84eSRichard Henderson target_ulong cmp; 1461a40ec84eSRichard Henderson 1462d73415a3SStefan Hajnoczi /* elt_ofs might correspond to .addr_write, so use qatomic_read */ 1463a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST 1464a40ec84eSRichard Henderson cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 1465a40ec84eSRichard Henderson #else 1466d73415a3SStefan Hajnoczi cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 1467a40ec84eSRichard Henderson #endif 1468d9bb58e5SYang Zhong 1469d9bb58e5SYang Zhong if (cmp == page) { 1470d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1471a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1472d9bb58e5SYang Zhong 1473a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 147471aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 147571aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 147671aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1477a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1478d9bb58e5SYang Zhong 1479a40ec84eSRichard Henderson CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1480a40ec84eSRichard Henderson CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; 1481d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 1482d9bb58e5SYang Zhong return true; 1483d9bb58e5SYang Zhong } 1484d9bb58e5SYang Zhong } 1485d9bb58e5SYang Zhong return false; 1486d9bb58e5SYang Zhong } 1487d9bb58e5SYang Zhong 1488d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 1489d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 1490d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 1491d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 1492d9bb58e5SYang Zhong 149330d7e098SRichard Henderson /* 149430d7e098SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 149530d7e098SRichard Henderson * 149630d7e098SRichard Henderson * Return -1 if we can't translate and execute from an entire page 149730d7e098SRichard Henderson * of RAM. This will force us to execute by loading and translating 149830d7e098SRichard Henderson * one insn at a time, without caching. 149930d7e098SRichard Henderson * 150030d7e098SRichard Henderson * NOTE: This function will trigger an exception if the page is 150130d7e098SRichard Henderson * not executable. 1502f2553f04SKONRAD Frederic */ 15034b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 15044b2190daSEmilio G. Cota void **hostp) 1505f2553f04SKONRAD Frederic { 1506383beda9SRichard Henderson uintptr_t mmu_idx = cpu_mmu_index(env, true); 1507383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1508383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1509f2553f04SKONRAD Frederic void *p; 1510f2553f04SKONRAD Frederic 1511383beda9SRichard Henderson if (unlikely(!tlb_hit(entry->addr_code, addr))) { 1512b493ccf1SPeter Maydell if (!VICTIM_TLB_HIT(addr_code, addr)) { 151329a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 15146d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 15156d967cb8SEmilio G. Cota entry = tlb_entry(env, mmu_idx, addr); 151630d7e098SRichard Henderson 151730d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { 151830d7e098SRichard Henderson /* 151930d7e098SRichard Henderson * The MMU protection covers a smaller range than a target 152030d7e098SRichard Henderson * page, so we must redo the MMU check for every insn. 152130d7e098SRichard Henderson */ 152230d7e098SRichard Henderson return -1; 152330d7e098SRichard Henderson } 152471b9a453SKONRAD Frederic } 1525383beda9SRichard Henderson assert(tlb_hit(entry->addr_code, addr)); 1526f2553f04SKONRAD Frederic } 152755df6fcfSPeter Maydell 152830d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_MMIO)) { 152930d7e098SRichard Henderson /* The region is not backed by RAM. */ 15304b2190daSEmilio G. Cota if (hostp) { 15314b2190daSEmilio G. Cota *hostp = NULL; 15324b2190daSEmilio G. Cota } 153320cb6ae4SPeter Maydell return -1; 153455df6fcfSPeter Maydell } 153555df6fcfSPeter Maydell 1536383beda9SRichard Henderson p = (void *)((uintptr_t)addr + entry->addend); 15374b2190daSEmilio G. Cota if (hostp) { 15384b2190daSEmilio G. Cota *hostp = p; 15394b2190daSEmilio G. Cota } 1540f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 1541f2553f04SKONRAD Frederic } 1542f2553f04SKONRAD Frederic 15434b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 15444b2190daSEmilio G. Cota { 15454b2190daSEmilio G. Cota return get_page_addr_code_hostp(env, addr, NULL); 15464b2190daSEmilio G. Cota } 15474b2190daSEmilio G. Cota 1548707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 1549707526adSRichard Henderson CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) 1550707526adSRichard Henderson { 1551707526adSRichard Henderson ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; 1552707526adSRichard Henderson 1553707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1554707526adSRichard Henderson 1555707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1556707526adSRichard Henderson struct page_collection *pages 1557707526adSRichard Henderson = page_collection_lock(ram_addr, ram_addr + size); 15585a7c27bbSRichard Henderson tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); 1559707526adSRichard Henderson page_collection_unlock(pages); 1560707526adSRichard Henderson } 1561707526adSRichard Henderson 1562707526adSRichard Henderson /* 1563707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1564707526adSRichard Henderson * the notdirty callback faster. 1565707526adSRichard Henderson */ 1566707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1567707526adSRichard Henderson 1568707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1569707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1570707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1571707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1572707526adSRichard Henderson } 1573707526adSRichard Henderson } 1574707526adSRichard Henderson 1575069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 1576069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1577069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1578069cfe77SRichard Henderson void **phost, uintptr_t retaddr) 1579d9bb58e5SYang Zhong { 1580383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1581383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1582069cfe77SRichard Henderson target_ulong tlb_addr, page_addr; 1583c25c283dSDavid Hildenbrand size_t elt_ofs; 1584069cfe77SRichard Henderson int flags; 1585ca86cf32SDavid Hildenbrand 1586c25c283dSDavid Hildenbrand switch (access_type) { 1587c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 1588c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_read); 1589c25c283dSDavid Hildenbrand break; 1590c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 1591c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_write); 1592c25c283dSDavid Hildenbrand break; 1593c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 1594c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_code); 1595c25c283dSDavid Hildenbrand break; 1596c25c283dSDavid Hildenbrand default: 1597c25c283dSDavid Hildenbrand g_assert_not_reached(); 1598c25c283dSDavid Hildenbrand } 1599c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 1600c25c283dSDavid Hildenbrand 1601069cfe77SRichard Henderson page_addr = addr & TARGET_PAGE_MASK; 1602069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 1603069cfe77SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { 1604069cfe77SRichard Henderson CPUState *cs = env_cpu(env); 1605069cfe77SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cs); 1606069cfe77SRichard Henderson 1607*78271684SClaudio Fontana if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, 1608069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1609069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1610069cfe77SRichard Henderson *phost = NULL; 1611069cfe77SRichard Henderson return TLB_INVALID_MASK; 1612069cfe77SRichard Henderson } 1613069cfe77SRichard Henderson 161403a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 161503a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1616d9bb58e5SYang Zhong } 1617c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 161803a98189SDavid Hildenbrand } 1619069cfe77SRichard Henderson flags = tlb_addr & TLB_FLAGS_MASK; 162003a98189SDavid Hildenbrand 1621069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 1622069cfe77SRichard Henderson if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1623069cfe77SRichard Henderson *phost = NULL; 1624069cfe77SRichard Henderson return TLB_MMIO; 1625fef39ccdSDavid Hildenbrand } 1626fef39ccdSDavid Hildenbrand 1627069cfe77SRichard Henderson /* Everything else is RAM. */ 1628069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1629069cfe77SRichard Henderson return flags; 1630069cfe77SRichard Henderson } 1631069cfe77SRichard Henderson 1632069cfe77SRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr, 1633069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1634069cfe77SRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1635069cfe77SRichard Henderson { 1636069cfe77SRichard Henderson int flags; 1637069cfe77SRichard Henderson 1638069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, 1639069cfe77SRichard Henderson nonfault, phost, retaddr); 1640069cfe77SRichard Henderson 1641069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1642069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1643069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 164473bc0bd4SRichard Henderson CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 164573bc0bd4SRichard Henderson 1646069cfe77SRichard Henderson notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); 1647069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1648069cfe77SRichard Henderson } 1649069cfe77SRichard Henderson 1650069cfe77SRichard Henderson return flags; 1651069cfe77SRichard Henderson } 1652069cfe77SRichard Henderson 1653069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 1654069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1655069cfe77SRichard Henderson { 1656069cfe77SRichard Henderson void *host; 1657069cfe77SRichard Henderson int flags; 1658069cfe77SRichard Henderson 1659069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1660069cfe77SRichard Henderson 1661069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1662069cfe77SRichard Henderson false, &host, retaddr); 1663069cfe77SRichard Henderson 1664069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1665069cfe77SRichard Henderson if (size == 0) { 166673bc0bd4SRichard Henderson return NULL; 166773bc0bd4SRichard Henderson } 166873bc0bd4SRichard Henderson 1669069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 1670069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1671069cfe77SRichard Henderson CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1672069cfe77SRichard Henderson 167303a98189SDavid Hildenbrand /* Handle watchpoints. */ 1674069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1675069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1676069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 167703a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 167873bc0bd4SRichard Henderson iotlbentry->attrs, wp_access, retaddr); 1679d9bb58e5SYang Zhong } 1680fef39ccdSDavid Hildenbrand 168173bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1682069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 1683069cfe77SRichard Henderson notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); 168473bc0bd4SRichard Henderson } 1685fef39ccdSDavid Hildenbrand } 1686fef39ccdSDavid Hildenbrand 1687069cfe77SRichard Henderson return host; 1688d9bb58e5SYang Zhong } 1689d9bb58e5SYang Zhong 16904811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 16914811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 16924811e909SRichard Henderson { 1693069cfe77SRichard Henderson void *host; 1694069cfe77SRichard Henderson int flags; 16954811e909SRichard Henderson 1696069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, 1697069cfe77SRichard Henderson mmu_idx, true, &host, 0); 1698069cfe77SRichard Henderson 1699069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1700069cfe77SRichard Henderson return flags ? NULL : host; 17014811e909SRichard Henderson } 17024811e909SRichard Henderson 1703235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1704235537faSAlex Bennée /* 1705235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1706235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1707235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1708235537faSAlex Bennée * checking the victim table. This is purely informational. 1709235537faSAlex Bennée * 17102f3a57eeSAlex Bennée * This almost never fails as the memory access being instrumented 17112f3a57eeSAlex Bennée * should have just filled the TLB. The one corner case is io_writex 17122f3a57eeSAlex Bennée * which can cause TLB flushes and potential resizing of the TLBs 1713570ef309SAlex Bennée * losing the information we need. In those cases we need to recover 1714570ef309SAlex Bennée * data from a copy of the iotlbentry. As long as this always occurs 1715570ef309SAlex Bennée * from the same thread (which a mem callback will be) this is safe. 1716235537faSAlex Bennée */ 1717235537faSAlex Bennée 1718235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, 1719235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1720235537faSAlex Bennée { 1721235537faSAlex Bennée CPUArchState *env = cpu->env_ptr; 1722235537faSAlex Bennée CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1723235537faSAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1724235537faSAlex Bennée target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1725235537faSAlex Bennée 1726235537faSAlex Bennée if (likely(tlb_hit(tlb_addr, addr))) { 1727235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1728235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 1729235537faSAlex Bennée CPUIOTLBEntry *iotlbentry; 1730235537faSAlex Bennée iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1731235537faSAlex Bennée data->is_io = true; 1732235537faSAlex Bennée data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 1733235537faSAlex Bennée data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1734235537faSAlex Bennée } else { 1735235537faSAlex Bennée data->is_io = false; 1736235537faSAlex Bennée data->v.ram.hostaddr = addr + tlbe->addend; 1737235537faSAlex Bennée } 1738235537faSAlex Bennée return true; 17392f3a57eeSAlex Bennée } else { 17402f3a57eeSAlex Bennée SavedIOTLB *saved = &cpu->saved_iotlb; 17412f3a57eeSAlex Bennée data->is_io = true; 17422f3a57eeSAlex Bennée data->v.io.section = saved->section; 17432f3a57eeSAlex Bennée data->v.io.offset = saved->mr_offset; 17442f3a57eeSAlex Bennée return true; 1745235537faSAlex Bennée } 1746235537faSAlex Bennée } 1747235537faSAlex Bennée 1748235537faSAlex Bennée #endif 1749235537faSAlex Bennée 1750d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 1751d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 1752d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 1753707526adSRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1754d9bb58e5SYang Zhong { 1755d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 1756383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1757383beda9SRichard Henderson CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1758403f290cSEmilio G. Cota target_ulong tlb_addr = tlb_addr_write(tlbe); 175914776ab5STony Nguyen MemOp mop = get_memop(oi); 1760d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 1761d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 176234d49937SPeter Maydell void *hostaddr; 1763d9bb58e5SYang Zhong 1764d9bb58e5SYang Zhong /* Adjust the given return address. */ 1765d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1766d9bb58e5SYang Zhong 1767d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1768d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1769d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 177029a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1771d9bb58e5SYang Zhong mmu_idx, retaddr); 1772d9bb58e5SYang Zhong } 1773d9bb58e5SYang Zhong 1774d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 1775d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 1776d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1777d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1778d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1779d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1780d9bb58e5SYang Zhong goto stop_the_world; 1781d9bb58e5SYang Zhong } 1782d9bb58e5SYang Zhong 1783d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 1784334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1785d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 178629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, 178798670d47SLaurent Vivier mmu_idx, retaddr); 17886d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 17896d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1790d9bb58e5SYang Zhong } 1791403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1792d9bb58e5SYang Zhong } 1793d9bb58e5SYang Zhong 179455df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 179530d7e098SRichard Henderson if (unlikely(tlb_addr & TLB_MMIO)) { 1796d9bb58e5SYang Zhong /* There's really nothing that can be done to 1797d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1798d9bb58e5SYang Zhong goto stop_the_world; 1799d9bb58e5SYang Zhong } 1800d9bb58e5SYang Zhong 1801d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 180234d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 180329a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, 180498670d47SLaurent Vivier mmu_idx, retaddr); 1805d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 1806d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 1807d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 1808d9bb58e5SYang Zhong goto stop_the_world; 1809d9bb58e5SYang Zhong } 1810d9bb58e5SYang Zhong 181134d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 181234d49937SPeter Maydell 181334d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1814707526adSRichard Henderson notdirty_write(env_cpu(env), addr, 1 << s_bits, 1815707526adSRichard Henderson &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); 181634d49937SPeter Maydell } 181734d49937SPeter Maydell 181834d49937SPeter Maydell return hostaddr; 1819d9bb58e5SYang Zhong 1820d9bb58e5SYang Zhong stop_the_world: 182129a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1822d9bb58e5SYang Zhong } 1823d9bb58e5SYang Zhong 1824eed56642SAlex Bennée /* 1825eed56642SAlex Bennée * Load Helpers 1826eed56642SAlex Bennée * 1827eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1828eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1829eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1830eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1831eed56642SAlex Bennée */ 1832d9bb58e5SYang Zhong 18332dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 18342dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr); 18352dd92606SRichard Henderson 1836c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 183780d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op) 183880d9d1c6SRichard Henderson { 183980d9d1c6SRichard Henderson switch (op) { 184080d9d1c6SRichard Henderson case MO_UB: 184180d9d1c6SRichard Henderson return ldub_p(haddr); 184280d9d1c6SRichard Henderson case MO_BEUW: 184380d9d1c6SRichard Henderson return lduw_be_p(haddr); 184480d9d1c6SRichard Henderson case MO_LEUW: 184580d9d1c6SRichard Henderson return lduw_le_p(haddr); 184680d9d1c6SRichard Henderson case MO_BEUL: 184780d9d1c6SRichard Henderson return (uint32_t)ldl_be_p(haddr); 184880d9d1c6SRichard Henderson case MO_LEUL: 184980d9d1c6SRichard Henderson return (uint32_t)ldl_le_p(haddr); 185080d9d1c6SRichard Henderson case MO_BEQ: 185180d9d1c6SRichard Henderson return ldq_be_p(haddr); 185280d9d1c6SRichard Henderson case MO_LEQ: 185380d9d1c6SRichard Henderson return ldq_le_p(haddr); 185480d9d1c6SRichard Henderson default: 185580d9d1c6SRichard Henderson qemu_build_not_reached(); 185680d9d1c6SRichard Henderson } 185780d9d1c6SRichard Henderson } 185880d9d1c6SRichard Henderson 185980d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 18602dd92606SRichard Henderson load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, 1861be5c4787STony Nguyen uintptr_t retaddr, MemOp op, bool code_read, 18622dd92606SRichard Henderson FullLoadHelper *full_load) 1863eed56642SAlex Bennée { 1864eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1865eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1866eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1867eed56642SAlex Bennée target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1868eed56642SAlex Bennée const size_t tlb_off = code_read ? 1869eed56642SAlex Bennée offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1870f1be3696SRichard Henderson const MMUAccessType access_type = 1871f1be3696SRichard Henderson code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1872eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1873eed56642SAlex Bennée void *haddr; 1874eed56642SAlex Bennée uint64_t res; 1875be5c4787STony Nguyen size_t size = memop_size(op); 1876d9bb58e5SYang Zhong 1877eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1878eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 187929a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, access_type, 1880eed56642SAlex Bennée mmu_idx, retaddr); 1881eed56642SAlex Bennée } 1882eed56642SAlex Bennée 1883eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1884eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1885eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1886eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 188729a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, 1888f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1889eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1890eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1891eed56642SAlex Bennée } 1892eed56642SAlex Bennée tlb_addr = code_read ? entry->addr_code : entry->addr_read; 189330d7e098SRichard Henderson tlb_addr &= ~TLB_INVALID_MASK; 1894eed56642SAlex Bennée } 1895eed56642SAlex Bennée 189650b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1897eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 189850b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 18995b87b3e6SRichard Henderson bool need_swap; 190050b107c5SRichard Henderson 190150b107c5SRichard Henderson /* For anything that is unaligned, recurse through full_load. */ 1902eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1903eed56642SAlex Bennée goto do_unaligned_access; 1904eed56642SAlex Bennée } 190550b107c5SRichard Henderson 190650b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 190750b107c5SRichard Henderson 190850b107c5SRichard Henderson /* Handle watchpoints. */ 190950b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 191050b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 191150b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 191250b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_READ, retaddr); 19135b87b3e6SRichard Henderson } 191450b107c5SRichard Henderson 19155b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 191650b107c5SRichard Henderson 191750b107c5SRichard Henderson /* Handle I/O access. */ 19185b87b3e6SRichard Henderson if (likely(tlb_addr & TLB_MMIO)) { 19195b87b3e6SRichard Henderson return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, 19205b87b3e6SRichard Henderson access_type, op ^ (need_swap * MO_BSWAP)); 19215b87b3e6SRichard Henderson } 19225b87b3e6SRichard Henderson 19235b87b3e6SRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 19245b87b3e6SRichard Henderson 19255b87b3e6SRichard Henderson /* 19265b87b3e6SRichard Henderson * Keep these two load_memop separate to ensure that the compiler 19275b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 19285b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 19295b87b3e6SRichard Henderson */ 19305b87b3e6SRichard Henderson if (unlikely(need_swap)) { 19315b87b3e6SRichard Henderson return load_memop(haddr, op ^ MO_BSWAP); 19325b87b3e6SRichard Henderson } 19335b87b3e6SRichard Henderson return load_memop(haddr, op); 1934eed56642SAlex Bennée } 1935eed56642SAlex Bennée 1936eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1937eed56642SAlex Bennée if (size > 1 1938eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1939eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1940eed56642SAlex Bennée target_ulong addr1, addr2; 19418c79b288SAlex Bennée uint64_t r1, r2; 1942eed56642SAlex Bennée unsigned shift; 1943eed56642SAlex Bennée do_unaligned_access: 1944ab7a2009SAlex Bennée addr1 = addr & ~((target_ulong)size - 1); 1945eed56642SAlex Bennée addr2 = addr1 + size; 19462dd92606SRichard Henderson r1 = full_load(env, addr1, oi, retaddr); 19472dd92606SRichard Henderson r2 = full_load(env, addr2, oi, retaddr); 1948eed56642SAlex Bennée shift = (addr & (size - 1)) * 8; 1949eed56642SAlex Bennée 1950be5c4787STony Nguyen if (memop_big_endian(op)) { 1951eed56642SAlex Bennée /* Big-endian combine. */ 1952eed56642SAlex Bennée res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1953eed56642SAlex Bennée } else { 1954eed56642SAlex Bennée /* Little-endian combine. */ 1955eed56642SAlex Bennée res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 1956eed56642SAlex Bennée } 1957eed56642SAlex Bennée return res & MAKE_64BIT_MASK(0, size * 8); 1958eed56642SAlex Bennée } 1959eed56642SAlex Bennée 1960eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 196180d9d1c6SRichard Henderson return load_memop(haddr, op); 1962eed56642SAlex Bennée } 1963eed56642SAlex Bennée 1964eed56642SAlex Bennée /* 1965eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1966eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1967eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1968eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1969eed56642SAlex Bennée * data, and for that we always have uint64_t. 1970eed56642SAlex Bennée * 1971eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1972eed56642SAlex Bennée */ 1973eed56642SAlex Bennée 19742dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 19752dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 19762dd92606SRichard Henderson { 1977be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); 19782dd92606SRichard Henderson } 19792dd92606SRichard Henderson 1980fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 1981fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1982eed56642SAlex Bennée { 19832dd92606SRichard Henderson return full_ldub_mmu(env, addr, oi, retaddr); 19842dd92606SRichard Henderson } 19852dd92606SRichard Henderson 19862dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 19872dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 19882dd92606SRichard Henderson { 1989be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, false, 19902dd92606SRichard Henderson full_le_lduw_mmu); 1991eed56642SAlex Bennée } 1992eed56642SAlex Bennée 1993fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 1994fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1995eed56642SAlex Bennée { 19962dd92606SRichard Henderson return full_le_lduw_mmu(env, addr, oi, retaddr); 19972dd92606SRichard Henderson } 19982dd92606SRichard Henderson 19992dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 20002dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 20012dd92606SRichard Henderson { 2002be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, false, 20032dd92606SRichard Henderson full_be_lduw_mmu); 2004eed56642SAlex Bennée } 2005eed56642SAlex Bennée 2006fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 2007fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2008eed56642SAlex Bennée { 20092dd92606SRichard Henderson return full_be_lduw_mmu(env, addr, oi, retaddr); 20102dd92606SRichard Henderson } 20112dd92606SRichard Henderson 20122dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 20132dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 20142dd92606SRichard Henderson { 2015be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, false, 20162dd92606SRichard Henderson full_le_ldul_mmu); 2017eed56642SAlex Bennée } 2018eed56642SAlex Bennée 2019fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 2020fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2021eed56642SAlex Bennée { 20222dd92606SRichard Henderson return full_le_ldul_mmu(env, addr, oi, retaddr); 20232dd92606SRichard Henderson } 20242dd92606SRichard Henderson 20252dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 20262dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 20272dd92606SRichard Henderson { 2028be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, false, 20292dd92606SRichard Henderson full_be_ldul_mmu); 2030eed56642SAlex Bennée } 2031eed56642SAlex Bennée 2032fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 2033fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2034eed56642SAlex Bennée { 20352dd92606SRichard Henderson return full_be_ldul_mmu(env, addr, oi, retaddr); 2036eed56642SAlex Bennée } 2037eed56642SAlex Bennée 2038fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 2039fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2040eed56642SAlex Bennée { 2041be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEQ, false, 20422dd92606SRichard Henderson helper_le_ldq_mmu); 2043eed56642SAlex Bennée } 2044eed56642SAlex Bennée 2045fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 2046fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2047eed56642SAlex Bennée { 2048be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEQ, false, 20492dd92606SRichard Henderson helper_be_ldq_mmu); 2050eed56642SAlex Bennée } 2051eed56642SAlex Bennée 2052eed56642SAlex Bennée /* 2053eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 2054eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 2055eed56642SAlex Bennée */ 2056eed56642SAlex Bennée 2057eed56642SAlex Bennée 2058eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 2059eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2060eed56642SAlex Bennée { 2061eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 2062eed56642SAlex Bennée } 2063eed56642SAlex Bennée 2064eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 2065eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2066eed56642SAlex Bennée { 2067eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 2068eed56642SAlex Bennée } 2069eed56642SAlex Bennée 2070eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 2071eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2072eed56642SAlex Bennée { 2073eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 2074eed56642SAlex Bennée } 2075eed56642SAlex Bennée 2076eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 2077eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2078eed56642SAlex Bennée { 2079eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 2080eed56642SAlex Bennée } 2081eed56642SAlex Bennée 2082eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 2083eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2084eed56642SAlex Bennée { 2085eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 2086eed56642SAlex Bennée } 2087eed56642SAlex Bennée 2088eed56642SAlex Bennée /* 2089d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 2090d03f1408SRichard Henderson */ 2091d03f1408SRichard Henderson 2092d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, 2093d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, 2094d03f1408SRichard Henderson MemOp op, FullLoadHelper *full_load) 2095d03f1408SRichard Henderson { 2096d03f1408SRichard Henderson uint16_t meminfo; 2097d03f1408SRichard Henderson TCGMemOpIdx oi; 2098d03f1408SRichard Henderson uint64_t ret; 2099d03f1408SRichard Henderson 2100d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, false); 2101d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 2102d03f1408SRichard Henderson 2103d03f1408SRichard Henderson op &= ~MO_SIGN; 2104d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 2105d03f1408SRichard Henderson ret = full_load(env, addr, oi, retaddr); 2106d03f1408SRichard Henderson 2107d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 2108d03f1408SRichard Henderson 2109d03f1408SRichard Henderson return ret; 2110d03f1408SRichard Henderson } 2111d03f1408SRichard Henderson 2112d03f1408SRichard Henderson uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2113d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2114d03f1408SRichard Henderson { 2115d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); 2116d03f1408SRichard Henderson } 2117d03f1408SRichard Henderson 2118d03f1408SRichard Henderson int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2119d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2120d03f1408SRichard Henderson { 2121d03f1408SRichard Henderson return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB, 2122d03f1408SRichard Henderson full_ldub_mmu); 2123d03f1408SRichard Henderson } 2124d03f1408SRichard Henderson 2125b9e60257SRichard Henderson uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2126d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2127d03f1408SRichard Henderson { 2128b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu); 2129d03f1408SRichard Henderson } 2130d03f1408SRichard Henderson 2131b9e60257SRichard Henderson int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2132d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2133d03f1408SRichard Henderson { 2134b9e60257SRichard Henderson return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW, 2135b9e60257SRichard Henderson full_be_lduw_mmu); 2136d03f1408SRichard Henderson } 2137d03f1408SRichard Henderson 2138b9e60257SRichard Henderson uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2139d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2140d03f1408SRichard Henderson { 2141b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu); 2142d03f1408SRichard Henderson } 2143d03f1408SRichard Henderson 2144b9e60257SRichard Henderson uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2145d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2146d03f1408SRichard Henderson { 2147b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu); 2148b9e60257SRichard Henderson } 2149b9e60257SRichard Henderson 2150b9e60257SRichard Henderson uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2151b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2152b9e60257SRichard Henderson { 2153b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu); 2154b9e60257SRichard Henderson } 2155b9e60257SRichard Henderson 2156b9e60257SRichard Henderson int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2157b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2158b9e60257SRichard Henderson { 2159b9e60257SRichard Henderson return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW, 2160b9e60257SRichard Henderson full_le_lduw_mmu); 2161b9e60257SRichard Henderson } 2162b9e60257SRichard Henderson 2163b9e60257SRichard Henderson uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2164b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2165b9e60257SRichard Henderson { 2166b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu); 2167b9e60257SRichard Henderson } 2168b9e60257SRichard Henderson 2169b9e60257SRichard Henderson uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2170b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2171b9e60257SRichard Henderson { 2172b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu); 2173d03f1408SRichard Henderson } 2174d03f1408SRichard Henderson 2175cfe04a4bSRichard Henderson uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr, 2176cfe04a4bSRichard Henderson uintptr_t retaddr) 2177cfe04a4bSRichard Henderson { 2178cfe04a4bSRichard Henderson return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2179cfe04a4bSRichard Henderson } 2180cfe04a4bSRichard Henderson 2181cfe04a4bSRichard Henderson int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 2182cfe04a4bSRichard Henderson { 2183cfe04a4bSRichard Henderson return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2184cfe04a4bSRichard Henderson } 2185cfe04a4bSRichard Henderson 2186b9e60257SRichard Henderson uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr, 2187cfe04a4bSRichard Henderson uintptr_t retaddr) 2188cfe04a4bSRichard Henderson { 2189b9e60257SRichard Henderson return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2190cfe04a4bSRichard Henderson } 2191cfe04a4bSRichard Henderson 2192b9e60257SRichard Henderson int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 2193cfe04a4bSRichard Henderson { 2194b9e60257SRichard Henderson return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2195cfe04a4bSRichard Henderson } 2196cfe04a4bSRichard Henderson 2197b9e60257SRichard Henderson uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr, 2198b9e60257SRichard Henderson uintptr_t retaddr) 2199cfe04a4bSRichard Henderson { 2200b9e60257SRichard Henderson return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2201cfe04a4bSRichard Henderson } 2202cfe04a4bSRichard Henderson 2203b9e60257SRichard Henderson uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr, 2204b9e60257SRichard Henderson uintptr_t retaddr) 2205cfe04a4bSRichard Henderson { 2206b9e60257SRichard Henderson return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2207b9e60257SRichard Henderson } 2208b9e60257SRichard Henderson 2209b9e60257SRichard Henderson uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr, 2210b9e60257SRichard Henderson uintptr_t retaddr) 2211b9e60257SRichard Henderson { 2212b9e60257SRichard Henderson return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2213b9e60257SRichard Henderson } 2214b9e60257SRichard Henderson 2215b9e60257SRichard Henderson int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 2216b9e60257SRichard Henderson { 2217b9e60257SRichard Henderson return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2218b9e60257SRichard Henderson } 2219b9e60257SRichard Henderson 2220b9e60257SRichard Henderson uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr, 2221b9e60257SRichard Henderson uintptr_t retaddr) 2222b9e60257SRichard Henderson { 2223b9e60257SRichard Henderson return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2224b9e60257SRichard Henderson } 2225b9e60257SRichard Henderson 2226b9e60257SRichard Henderson uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr, 2227b9e60257SRichard Henderson uintptr_t retaddr) 2228b9e60257SRichard Henderson { 2229b9e60257SRichard Henderson return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2230cfe04a4bSRichard Henderson } 2231cfe04a4bSRichard Henderson 2232cfe04a4bSRichard Henderson uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr) 2233cfe04a4bSRichard Henderson { 2234cfe04a4bSRichard Henderson return cpu_ldub_data_ra(env, ptr, 0); 2235cfe04a4bSRichard Henderson } 2236cfe04a4bSRichard Henderson 2237cfe04a4bSRichard Henderson int cpu_ldsb_data(CPUArchState *env, target_ulong ptr) 2238cfe04a4bSRichard Henderson { 2239cfe04a4bSRichard Henderson return cpu_ldsb_data_ra(env, ptr, 0); 2240cfe04a4bSRichard Henderson } 2241cfe04a4bSRichard Henderson 2242b9e60257SRichard Henderson uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr) 2243cfe04a4bSRichard Henderson { 2244b9e60257SRichard Henderson return cpu_lduw_be_data_ra(env, ptr, 0); 2245cfe04a4bSRichard Henderson } 2246cfe04a4bSRichard Henderson 2247b9e60257SRichard Henderson int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr) 2248cfe04a4bSRichard Henderson { 2249b9e60257SRichard Henderson return cpu_ldsw_be_data_ra(env, ptr, 0); 2250cfe04a4bSRichard Henderson } 2251cfe04a4bSRichard Henderson 2252b9e60257SRichard Henderson uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr) 2253cfe04a4bSRichard Henderson { 2254b9e60257SRichard Henderson return cpu_ldl_be_data_ra(env, ptr, 0); 2255cfe04a4bSRichard Henderson } 2256cfe04a4bSRichard Henderson 2257b9e60257SRichard Henderson uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr) 2258cfe04a4bSRichard Henderson { 2259b9e60257SRichard Henderson return cpu_ldq_be_data_ra(env, ptr, 0); 2260b9e60257SRichard Henderson } 2261b9e60257SRichard Henderson 2262b9e60257SRichard Henderson uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr) 2263b9e60257SRichard Henderson { 2264b9e60257SRichard Henderson return cpu_lduw_le_data_ra(env, ptr, 0); 2265b9e60257SRichard Henderson } 2266b9e60257SRichard Henderson 2267b9e60257SRichard Henderson int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr) 2268b9e60257SRichard Henderson { 2269b9e60257SRichard Henderson return cpu_ldsw_le_data_ra(env, ptr, 0); 2270b9e60257SRichard Henderson } 2271b9e60257SRichard Henderson 2272b9e60257SRichard Henderson uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr) 2273b9e60257SRichard Henderson { 2274b9e60257SRichard Henderson return cpu_ldl_le_data_ra(env, ptr, 0); 2275b9e60257SRichard Henderson } 2276b9e60257SRichard Henderson 2277b9e60257SRichard Henderson uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr) 2278b9e60257SRichard Henderson { 2279b9e60257SRichard Henderson return cpu_ldq_le_data_ra(env, ptr, 0); 2280cfe04a4bSRichard Henderson } 2281cfe04a4bSRichard Henderson 2282d03f1408SRichard Henderson /* 2283eed56642SAlex Bennée * Store Helpers 2284eed56642SAlex Bennée */ 2285eed56642SAlex Bennée 2286c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE 228780d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op) 228880d9d1c6SRichard Henderson { 228980d9d1c6SRichard Henderson switch (op) { 229080d9d1c6SRichard Henderson case MO_UB: 229180d9d1c6SRichard Henderson stb_p(haddr, val); 229280d9d1c6SRichard Henderson break; 229380d9d1c6SRichard Henderson case MO_BEUW: 229480d9d1c6SRichard Henderson stw_be_p(haddr, val); 229580d9d1c6SRichard Henderson break; 229680d9d1c6SRichard Henderson case MO_LEUW: 229780d9d1c6SRichard Henderson stw_le_p(haddr, val); 229880d9d1c6SRichard Henderson break; 229980d9d1c6SRichard Henderson case MO_BEUL: 230080d9d1c6SRichard Henderson stl_be_p(haddr, val); 230180d9d1c6SRichard Henderson break; 230280d9d1c6SRichard Henderson case MO_LEUL: 230380d9d1c6SRichard Henderson stl_le_p(haddr, val); 230480d9d1c6SRichard Henderson break; 230580d9d1c6SRichard Henderson case MO_BEQ: 230680d9d1c6SRichard Henderson stq_be_p(haddr, val); 230780d9d1c6SRichard Henderson break; 230880d9d1c6SRichard Henderson case MO_LEQ: 230980d9d1c6SRichard Henderson stq_le_p(haddr, val); 231080d9d1c6SRichard Henderson break; 231180d9d1c6SRichard Henderson default: 231280d9d1c6SRichard Henderson qemu_build_not_reached(); 231380d9d1c6SRichard Henderson } 231480d9d1c6SRichard Henderson } 231580d9d1c6SRichard Henderson 23166b8b622eSRichard Henderson static void __attribute__((noinline)) 23176b8b622eSRichard Henderson store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, 23186b8b622eSRichard Henderson uintptr_t retaddr, size_t size, uintptr_t mmu_idx, 23196b8b622eSRichard Henderson bool big_endian) 23206b8b622eSRichard Henderson { 23216b8b622eSRichard Henderson const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 23226b8b622eSRichard Henderson uintptr_t index, index2; 23236b8b622eSRichard Henderson CPUTLBEntry *entry, *entry2; 23246b8b622eSRichard Henderson target_ulong page2, tlb_addr, tlb_addr2; 23256b8b622eSRichard Henderson TCGMemOpIdx oi; 23266b8b622eSRichard Henderson size_t size2; 23276b8b622eSRichard Henderson int i; 23286b8b622eSRichard Henderson 23296b8b622eSRichard Henderson /* 23306b8b622eSRichard Henderson * Ensure the second page is in the TLB. Note that the first page 23316b8b622eSRichard Henderson * is already guaranteed to be filled, and that the second page 23326b8b622eSRichard Henderson * cannot evict the first. 23336b8b622eSRichard Henderson */ 23346b8b622eSRichard Henderson page2 = (addr + size) & TARGET_PAGE_MASK; 23356b8b622eSRichard Henderson size2 = (addr + size) & ~TARGET_PAGE_MASK; 23366b8b622eSRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 23376b8b622eSRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 23386b8b622eSRichard Henderson 23396b8b622eSRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 23406b8b622eSRichard Henderson if (!tlb_hit_page(tlb_addr2, page2)) { 23416b8b622eSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { 23426b8b622eSRichard Henderson tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, 23436b8b622eSRichard Henderson mmu_idx, retaddr); 23446b8b622eSRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 23456b8b622eSRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 23466b8b622eSRichard Henderson } 23476b8b622eSRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 23486b8b622eSRichard Henderson } 23496b8b622eSRichard Henderson 23506b8b622eSRichard Henderson index = tlb_index(env, mmu_idx, addr); 23516b8b622eSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 23526b8b622eSRichard Henderson tlb_addr = tlb_addr_write(entry); 23536b8b622eSRichard Henderson 23546b8b622eSRichard Henderson /* 23556b8b622eSRichard Henderson * Handle watchpoints. Since this may trap, all checks 23566b8b622eSRichard Henderson * must happen before any store. 23576b8b622eSRichard Henderson */ 23586b8b622eSRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 23596b8b622eSRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size - size2, 23606b8b622eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index].attrs, 23616b8b622eSRichard Henderson BP_MEM_WRITE, retaddr); 23626b8b622eSRichard Henderson } 23636b8b622eSRichard Henderson if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { 23646b8b622eSRichard Henderson cpu_check_watchpoint(env_cpu(env), page2, size2, 23656b8b622eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, 23666b8b622eSRichard Henderson BP_MEM_WRITE, retaddr); 23676b8b622eSRichard Henderson } 23686b8b622eSRichard Henderson 23696b8b622eSRichard Henderson /* 23706b8b622eSRichard Henderson * XXX: not efficient, but simple. 23716b8b622eSRichard Henderson * This loop must go in the forward direction to avoid issues 23726b8b622eSRichard Henderson * with self-modifying code in Windows 64-bit. 23736b8b622eSRichard Henderson */ 23746b8b622eSRichard Henderson oi = make_memop_idx(MO_UB, mmu_idx); 23756b8b622eSRichard Henderson if (big_endian) { 23766b8b622eSRichard Henderson for (i = 0; i < size; ++i) { 23776b8b622eSRichard Henderson /* Big-endian extract. */ 23786b8b622eSRichard Henderson uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); 23796b8b622eSRichard Henderson helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); 23806b8b622eSRichard Henderson } 23816b8b622eSRichard Henderson } else { 23826b8b622eSRichard Henderson for (i = 0; i < size; ++i) { 23836b8b622eSRichard Henderson /* Little-endian extract. */ 23846b8b622eSRichard Henderson uint8_t val8 = val >> (i * 8); 23856b8b622eSRichard Henderson helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); 23866b8b622eSRichard Henderson } 23876b8b622eSRichard Henderson } 23886b8b622eSRichard Henderson } 23896b8b622eSRichard Henderson 239080d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE 23914601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 2392be5c4787STony Nguyen TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) 2393eed56642SAlex Bennée { 2394eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 2395eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 2396eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 2397eed56642SAlex Bennée target_ulong tlb_addr = tlb_addr_write(entry); 2398eed56642SAlex Bennée const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 2399eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 2400eed56642SAlex Bennée void *haddr; 2401be5c4787STony Nguyen size_t size = memop_size(op); 2402eed56642SAlex Bennée 2403eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 2404eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 240529a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2406eed56642SAlex Bennée mmu_idx, retaddr); 2407eed56642SAlex Bennée } 2408eed56642SAlex Bennée 2409eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 2410eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 2411eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 2412eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 241329a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 2414eed56642SAlex Bennée mmu_idx, retaddr); 2415eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 2416eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 2417eed56642SAlex Bennée } 2418eed56642SAlex Bennée tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 2419eed56642SAlex Bennée } 2420eed56642SAlex Bennée 242150b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 2422eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 242350b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 24245b87b3e6SRichard Henderson bool need_swap; 242550b107c5SRichard Henderson 242650b107c5SRichard Henderson /* For anything that is unaligned, recurse through byte stores. */ 2427eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 2428eed56642SAlex Bennée goto do_unaligned_access; 2429eed56642SAlex Bennée } 243050b107c5SRichard Henderson 243150b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 243250b107c5SRichard Henderson 243350b107c5SRichard Henderson /* Handle watchpoints. */ 243450b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 243550b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 243650b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 243750b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_WRITE, retaddr); 24385b87b3e6SRichard Henderson } 243950b107c5SRichard Henderson 24405b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 244150b107c5SRichard Henderson 244250b107c5SRichard Henderson /* Handle I/O access. */ 244308565552SRichard Henderson if (tlb_addr & TLB_MMIO) { 24445b87b3e6SRichard Henderson io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, 24455b87b3e6SRichard Henderson op ^ (need_swap * MO_BSWAP)); 24465b87b3e6SRichard Henderson return; 24475b87b3e6SRichard Henderson } 24485b87b3e6SRichard Henderson 24497b0d792cSRichard Henderson /* Ignore writes to ROM. */ 24507b0d792cSRichard Henderson if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { 24517b0d792cSRichard Henderson return; 24527b0d792cSRichard Henderson } 24537b0d792cSRichard Henderson 245408565552SRichard Henderson /* Handle clean RAM pages. */ 245508565552SRichard Henderson if (tlb_addr & TLB_NOTDIRTY) { 2456707526adSRichard Henderson notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); 245708565552SRichard Henderson } 245808565552SRichard Henderson 2459707526adSRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 246008565552SRichard Henderson 24615b87b3e6SRichard Henderson /* 24625b87b3e6SRichard Henderson * Keep these two store_memop separate to ensure that the compiler 24635b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 24645b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 24655b87b3e6SRichard Henderson */ 24665b87b3e6SRichard Henderson if (unlikely(need_swap)) { 24675b87b3e6SRichard Henderson store_memop(haddr, val, op ^ MO_BSWAP); 24685b87b3e6SRichard Henderson } else { 24695b87b3e6SRichard Henderson store_memop(haddr, val, op); 24705b87b3e6SRichard Henderson } 2471eed56642SAlex Bennée return; 2472eed56642SAlex Bennée } 2473eed56642SAlex Bennée 2474eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 2475eed56642SAlex Bennée if (size > 1 2476eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 2477eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 2478eed56642SAlex Bennée do_unaligned_access: 24796b8b622eSRichard Henderson store_helper_unaligned(env, addr, val, retaddr, size, 24806b8b622eSRichard Henderson mmu_idx, memop_big_endian(op)); 2481eed56642SAlex Bennée return; 2482eed56642SAlex Bennée } 2483eed56642SAlex Bennée 2484eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 248580d9d1c6SRichard Henderson store_memop(haddr, val, op); 2486eed56642SAlex Bennée } 2487eed56642SAlex Bennée 24886b8b622eSRichard Henderson void __attribute__((noinline)) 24896b8b622eSRichard Henderson helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2490eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2491eed56642SAlex Bennée { 2492be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_UB); 2493eed56642SAlex Bennée } 2494eed56642SAlex Bennée 2495fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2496eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2497eed56642SAlex Bennée { 2498be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUW); 2499eed56642SAlex Bennée } 2500eed56642SAlex Bennée 2501fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2502eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2503eed56642SAlex Bennée { 2504be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUW); 2505eed56642SAlex Bennée } 2506eed56642SAlex Bennée 2507fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2508eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2509eed56642SAlex Bennée { 2510be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUL); 2511eed56642SAlex Bennée } 2512eed56642SAlex Bennée 2513fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2514eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2515eed56642SAlex Bennée { 2516be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUL); 2517eed56642SAlex Bennée } 2518eed56642SAlex Bennée 2519fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2520eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2521eed56642SAlex Bennée { 2522be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEQ); 2523eed56642SAlex Bennée } 2524eed56642SAlex Bennée 2525fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2526eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2527eed56642SAlex Bennée { 2528be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEQ); 2529eed56642SAlex Bennée } 2530d9bb58e5SYang Zhong 2531d03f1408SRichard Henderson /* 2532d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 2533d03f1408SRichard Henderson */ 2534d03f1408SRichard Henderson 2535d03f1408SRichard Henderson static inline void QEMU_ALWAYS_INLINE 2536d03f1408SRichard Henderson cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 2537d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, MemOp op) 2538d03f1408SRichard Henderson { 2539d03f1408SRichard Henderson TCGMemOpIdx oi; 2540d03f1408SRichard Henderson uint16_t meminfo; 2541d03f1408SRichard Henderson 2542d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, true); 2543d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 2544d03f1408SRichard Henderson 2545d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 2546d03f1408SRichard Henderson store_helper(env, addr, val, oi, retaddr, op); 2547d03f1408SRichard Henderson 2548d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 2549d03f1408SRichard Henderson } 2550d03f1408SRichard Henderson 2551d03f1408SRichard Henderson void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2552d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2553d03f1408SRichard Henderson { 2554d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); 2555d03f1408SRichard Henderson } 2556d03f1408SRichard Henderson 2557b9e60257SRichard Henderson void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2558d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2559d03f1408SRichard Henderson { 2560b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW); 2561d03f1408SRichard Henderson } 2562d03f1408SRichard Henderson 2563b9e60257SRichard Henderson void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2564d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2565d03f1408SRichard Henderson { 2566b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL); 2567d03f1408SRichard Henderson } 2568d03f1408SRichard Henderson 2569b9e60257SRichard Henderson void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, 2570d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2571d03f1408SRichard Henderson { 2572b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ); 2573b9e60257SRichard Henderson } 2574b9e60257SRichard Henderson 2575b9e60257SRichard Henderson void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2576b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2577b9e60257SRichard Henderson { 2578b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW); 2579b9e60257SRichard Henderson } 2580b9e60257SRichard Henderson 2581b9e60257SRichard Henderson void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2582b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2583b9e60257SRichard Henderson { 2584b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL); 2585b9e60257SRichard Henderson } 2586b9e60257SRichard Henderson 2587b9e60257SRichard Henderson void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, 2588b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2589b9e60257SRichard Henderson { 2590b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ); 2591d03f1408SRichard Henderson } 2592d03f1408SRichard Henderson 2593cfe04a4bSRichard Henderson void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, 2594cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2595cfe04a4bSRichard Henderson { 2596cfe04a4bSRichard Henderson cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2597cfe04a4bSRichard Henderson } 2598cfe04a4bSRichard Henderson 2599b9e60257SRichard Henderson void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr, 2600cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2601cfe04a4bSRichard Henderson { 2602b9e60257SRichard Henderson cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2603cfe04a4bSRichard Henderson } 2604cfe04a4bSRichard Henderson 2605b9e60257SRichard Henderson void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr, 2606cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2607cfe04a4bSRichard Henderson { 2608b9e60257SRichard Henderson cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2609cfe04a4bSRichard Henderson } 2610cfe04a4bSRichard Henderson 2611b9e60257SRichard Henderson void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr, 2612cfe04a4bSRichard Henderson uint64_t val, uintptr_t retaddr) 2613cfe04a4bSRichard Henderson { 2614b9e60257SRichard Henderson cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2615b9e60257SRichard Henderson } 2616b9e60257SRichard Henderson 2617b9e60257SRichard Henderson void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr, 2618b9e60257SRichard Henderson uint32_t val, uintptr_t retaddr) 2619b9e60257SRichard Henderson { 2620b9e60257SRichard Henderson cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2621b9e60257SRichard Henderson } 2622b9e60257SRichard Henderson 2623b9e60257SRichard Henderson void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr, 2624b9e60257SRichard Henderson uint32_t val, uintptr_t retaddr) 2625b9e60257SRichard Henderson { 2626b9e60257SRichard Henderson cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2627b9e60257SRichard Henderson } 2628b9e60257SRichard Henderson 2629b9e60257SRichard Henderson void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr, 2630b9e60257SRichard Henderson uint64_t val, uintptr_t retaddr) 2631b9e60257SRichard Henderson { 2632b9e60257SRichard Henderson cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2633cfe04a4bSRichard Henderson } 2634cfe04a4bSRichard Henderson 2635cfe04a4bSRichard Henderson void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2636cfe04a4bSRichard Henderson { 2637cfe04a4bSRichard Henderson cpu_stb_data_ra(env, ptr, val, 0); 2638cfe04a4bSRichard Henderson } 2639cfe04a4bSRichard Henderson 2640b9e60257SRichard Henderson void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2641cfe04a4bSRichard Henderson { 2642b9e60257SRichard Henderson cpu_stw_be_data_ra(env, ptr, val, 0); 2643cfe04a4bSRichard Henderson } 2644cfe04a4bSRichard Henderson 2645b9e60257SRichard Henderson void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2646cfe04a4bSRichard Henderson { 2647b9e60257SRichard Henderson cpu_stl_be_data_ra(env, ptr, val, 0); 2648cfe04a4bSRichard Henderson } 2649cfe04a4bSRichard Henderson 2650b9e60257SRichard Henderson void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val) 2651cfe04a4bSRichard Henderson { 2652b9e60257SRichard Henderson cpu_stq_be_data_ra(env, ptr, val, 0); 2653b9e60257SRichard Henderson } 2654b9e60257SRichard Henderson 2655b9e60257SRichard Henderson void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2656b9e60257SRichard Henderson { 2657b9e60257SRichard Henderson cpu_stw_le_data_ra(env, ptr, val, 0); 2658b9e60257SRichard Henderson } 2659b9e60257SRichard Henderson 2660b9e60257SRichard Henderson void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2661b9e60257SRichard Henderson { 2662b9e60257SRichard Henderson cpu_stl_le_data_ra(env, ptr, val, 0); 2663b9e60257SRichard Henderson } 2664b9e60257SRichard Henderson 2665b9e60257SRichard Henderson void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) 2666b9e60257SRichard Henderson { 2667b9e60257SRichard Henderson cpu_stq_le_data_ra(env, ptr, val, 0); 2668cfe04a4bSRichard Henderson } 2669cfe04a4bSRichard Henderson 2670d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 2671d9bb58e5SYang Zhong them callable from other helpers. */ 2672d9bb58e5SYang Zhong 2673d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 2674d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 2675d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 2676707526adSRichard Henderson #define ATOMIC_MMU_DECLS 2677707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) 2678707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 2679504f73f7SAlex Bennée #define ATOMIC_MMU_IDX get_mmuidx(oi) 2680d9bb58e5SYang Zhong 2681139c1837SPaolo Bonzini #include "atomic_common.c.inc" 2682d9bb58e5SYang Zhong 2683d9bb58e5SYang Zhong #define DATA_SIZE 1 2684d9bb58e5SYang Zhong #include "atomic_template.h" 2685d9bb58e5SYang Zhong 2686d9bb58e5SYang Zhong #define DATA_SIZE 2 2687d9bb58e5SYang Zhong #include "atomic_template.h" 2688d9bb58e5SYang Zhong 2689d9bb58e5SYang Zhong #define DATA_SIZE 4 2690d9bb58e5SYang Zhong #include "atomic_template.h" 2691d9bb58e5SYang Zhong 2692d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2693d9bb58e5SYang Zhong #define DATA_SIZE 8 2694d9bb58e5SYang Zhong #include "atomic_template.h" 2695d9bb58e5SYang Zhong #endif 2696d9bb58e5SYang Zhong 2697e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 2698d9bb58e5SYang Zhong #define DATA_SIZE 16 2699d9bb58e5SYang Zhong #include "atomic_template.h" 2700d9bb58e5SYang Zhong #endif 2701d9bb58e5SYang Zhong 2702d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 2703d9bb58e5SYang Zhong 2704d9bb58e5SYang Zhong #undef EXTRA_ARGS 2705d9bb58e5SYang Zhong #undef ATOMIC_NAME 2706d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 2707d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 2708d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 2709707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) 2710d9bb58e5SYang Zhong 2711d9bb58e5SYang Zhong #define DATA_SIZE 1 2712d9bb58e5SYang Zhong #include "atomic_template.h" 2713d9bb58e5SYang Zhong 2714d9bb58e5SYang Zhong #define DATA_SIZE 2 2715d9bb58e5SYang Zhong #include "atomic_template.h" 2716d9bb58e5SYang Zhong 2717d9bb58e5SYang Zhong #define DATA_SIZE 4 2718d9bb58e5SYang Zhong #include "atomic_template.h" 2719d9bb58e5SYang Zhong 2720d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2721d9bb58e5SYang Zhong #define DATA_SIZE 8 2722d9bb58e5SYang Zhong #include "atomic_template.h" 2723d9bb58e5SYang Zhong #endif 2724504f73f7SAlex Bennée #undef ATOMIC_MMU_IDX 2725d9bb58e5SYang Zhong 2726d9bb58e5SYang Zhong /* Code access functions. */ 2727d9bb58e5SYang Zhong 2728fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, 27292dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 27302dd92606SRichard Henderson { 2731fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); 27322dd92606SRichard Henderson } 27332dd92606SRichard Henderson 2734fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 2735eed56642SAlex Bennée { 2736fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 2737fc4120a3SRichard Henderson return full_ldub_code(env, addr, oi, 0); 27382dd92606SRichard Henderson } 27392dd92606SRichard Henderson 2740fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, 27414cef72d0SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 27424cef72d0SAlex Bennée { 2743fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); 27444cef72d0SAlex Bennée } 27454cef72d0SAlex Bennée 2746fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 27472dd92606SRichard Henderson { 2748fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 2749fc4120a3SRichard Henderson return full_lduw_code(env, addr, oi, 0); 2750eed56642SAlex Bennée } 2751d9bb58e5SYang Zhong 2752fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, 2753fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2754eed56642SAlex Bennée { 2755fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); 27562dd92606SRichard Henderson } 27572dd92606SRichard Henderson 2758fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 27594cef72d0SAlex Bennée { 2760fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 2761fc4120a3SRichard Henderson return full_ldl_code(env, addr, oi, 0); 27624cef72d0SAlex Bennée } 27634cef72d0SAlex Bennée 2764fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, 27652dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 27662dd92606SRichard Henderson { 2767fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); 2768eed56642SAlex Bennée } 2769d9bb58e5SYang Zhong 2770fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 2771eed56642SAlex Bennée { 2772fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); 2773fc4120a3SRichard Henderson return full_ldq_code(env, addr, oi, 0); 2774eed56642SAlex Bennée } 2775