1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 22d9bb58e5SYang Zhong #include "cpu.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/address-spaces.h" 26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 27d9bb58e5SYang Zhong #include "exec/cputlb.h" 280f4abea8SRichard Henderson #include "exec/tb-hash.h" 29d9bb58e5SYang Zhong #include "exec/memory-internal.h" 30d9bb58e5SYang Zhong #include "exec/ram_addr.h" 31d9bb58e5SYang Zhong #include "tcg/tcg.h" 32d9bb58e5SYang Zhong #include "qemu/error-report.h" 33d9bb58e5SYang Zhong #include "exec/log.h" 34d9bb58e5SYang Zhong #include "exec/helper-proto.h" 35d9bb58e5SYang Zhong #include "qemu/atomic.h" 36e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 373b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 38243af022SPaolo Bonzini #include "trace/trace-root.h" 39d03f1408SRichard Henderson #include "trace/mem.h" 4065269192SPhilippe Mathieu-Daudé #include "internal.h" 41235537faSAlex Bennée #ifdef CONFIG_PLUGIN 42235537faSAlex Bennée #include "qemu/plugin-memory.h" 43235537faSAlex Bennée #endif 44d9bb58e5SYang Zhong 45d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 46d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 47d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 48d9bb58e5SYang Zhong 49d9bb58e5SYang Zhong #ifdef DEBUG_TLB 50d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 51d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 52d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 53d9bb58e5SYang Zhong # else 54d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 55d9bb58e5SYang Zhong # endif 56d9bb58e5SYang Zhong #else 57d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 58d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 59d9bb58e5SYang Zhong #endif 60d9bb58e5SYang Zhong 61d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 62d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 63d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 64d9bb58e5SYang Zhong ## __VA_ARGS__); \ 65d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 66d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 67d9bb58e5SYang Zhong } \ 68d9bb58e5SYang Zhong } while (0) 69d9bb58e5SYang Zhong 70ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 71d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 72ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 73d9bb58e5SYang Zhong } \ 74d9bb58e5SYang Zhong } while (0) 75d9bb58e5SYang Zhong 76d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 77d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 78d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 79d9bb58e5SYang Zhong 80d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 81d9bb58e5SYang Zhong */ 82d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 83d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 84d9bb58e5SYang Zhong 85722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 867a1efe1bSRichard Henderson { 87722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 887a1efe1bSRichard Henderson } 897a1efe1bSRichard Henderson 90722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 9186e1eff8SEmilio G. Cota { 92722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9386e1eff8SEmilio G. Cota } 9486e1eff8SEmilio G. Cota 9579e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9686e1eff8SEmilio G. Cota size_t max_entries) 9786e1eff8SEmilio G. Cota { 9879e42085SRichard Henderson desc->window_begin_ns = ns; 9979e42085SRichard Henderson desc->window_max_entries = max_entries; 10086e1eff8SEmilio G. Cota } 10186e1eff8SEmilio G. Cota 1020f4abea8SRichard Henderson static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 1030f4abea8SRichard Henderson { 1040f4abea8SRichard Henderson unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); 1050f4abea8SRichard Henderson 1060f4abea8SRichard Henderson for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 1070f4abea8SRichard Henderson qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); 1080f4abea8SRichard Henderson } 1090f4abea8SRichard Henderson } 1100f4abea8SRichard Henderson 1110f4abea8SRichard Henderson static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) 1120f4abea8SRichard Henderson { 1130f4abea8SRichard Henderson /* Discard jump cache entries for any tb which might potentially 1140f4abea8SRichard Henderson overlap the flushed page. */ 1150f4abea8SRichard Henderson tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 1160f4abea8SRichard Henderson tb_jmp_cache_clear_page(cpu, addr); 1170f4abea8SRichard Henderson } 1180f4abea8SRichard Henderson 11986e1eff8SEmilio G. Cota /** 12086e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 12171ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 12271ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 12386e1eff8SEmilio G. Cota * 12486e1eff8SEmilio G. Cota * Called with tlb_lock_held. 12586e1eff8SEmilio G. Cota * 12686e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 12786e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 12886e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 12986e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 13086e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 13186e1eff8SEmilio G. Cota * the resize based on past observations. 13286e1eff8SEmilio G. Cota * 13386e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 13486e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 13586e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 13686e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 13786e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 13886e1eff8SEmilio G. Cota * performance. 13986e1eff8SEmilio G. Cota * 14086e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 14186e1eff8SEmilio G. Cota * 14286e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 14386e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 14486e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 14586e1eff8SEmilio G. Cota * probably be similar. 14686e1eff8SEmilio G. Cota * 14786e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 14886e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 14986e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 15086e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 15186e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 15286e1eff8SEmilio G. Cota * 15386e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 15486e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 15586e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 15686e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 15786e1eff8SEmilio G. Cota * conflict misses. 15886e1eff8SEmilio G. Cota */ 1593c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1603c3959f2SRichard Henderson int64_t now) 16186e1eff8SEmilio G. Cota { 16271ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 16386e1eff8SEmilio G. Cota size_t rate; 16486e1eff8SEmilio G. Cota size_t new_size = old_size; 16586e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 16686e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 16779e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 16886e1eff8SEmilio G. Cota 16979e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 17079e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 17186e1eff8SEmilio G. Cota } 17279e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 17386e1eff8SEmilio G. Cota 17486e1eff8SEmilio G. Cota if (rate > 70) { 17586e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 17686e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 17779e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 17879e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 17986e1eff8SEmilio G. Cota 18086e1eff8SEmilio G. Cota /* 18186e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 18286e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 18386e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 18486e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 18586e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 18686e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 18786e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 18886e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 18986e1eff8SEmilio G. Cota */ 19086e1eff8SEmilio G. Cota if (expected_rate > 70) { 19186e1eff8SEmilio G. Cota ceil *= 2; 19286e1eff8SEmilio G. Cota } 19386e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 19486e1eff8SEmilio G. Cota } 19586e1eff8SEmilio G. Cota 19686e1eff8SEmilio G. Cota if (new_size == old_size) { 19786e1eff8SEmilio G. Cota if (window_expired) { 19879e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 19986e1eff8SEmilio G. Cota } 20086e1eff8SEmilio G. Cota return; 20186e1eff8SEmilio G. Cota } 20286e1eff8SEmilio G. Cota 20371ccd47bSRichard Henderson g_free(fast->table); 20471ccd47bSRichard Henderson g_free(desc->iotlb); 20586e1eff8SEmilio G. Cota 20679e42085SRichard Henderson tlb_window_reset(desc, now, 0); 20786e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 20871ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 20971ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 21071ccd47bSRichard Henderson desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); 21171ccd47bSRichard Henderson 21286e1eff8SEmilio G. Cota /* 21386e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 21486e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 21586e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 21686e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 21786e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 21886e1eff8SEmilio G. Cota */ 21971ccd47bSRichard Henderson while (fast->table == NULL || desc->iotlb == NULL) { 22086e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 22186e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 22286e1eff8SEmilio G. Cota abort(); 22386e1eff8SEmilio G. Cota } 22486e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 22571ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 22686e1eff8SEmilio G. Cota 22771ccd47bSRichard Henderson g_free(fast->table); 22871ccd47bSRichard Henderson g_free(desc->iotlb); 22971ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 23071ccd47bSRichard Henderson desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); 23186e1eff8SEmilio G. Cota } 23286e1eff8SEmilio G. Cota } 23386e1eff8SEmilio G. Cota 234bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 23586e1eff8SEmilio G. Cota { 2365c948e31SRichard Henderson desc->n_used_entries = 0; 2375c948e31SRichard Henderson desc->large_page_addr = -1; 2385c948e31SRichard Henderson desc->large_page_mask = -1; 2395c948e31SRichard Henderson desc->vindex = 0; 2405c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2415c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 24286e1eff8SEmilio G. Cota } 24386e1eff8SEmilio G. Cota 2443c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, 2453c3959f2SRichard Henderson int64_t now) 246bbf021b0SRichard Henderson { 247bbf021b0SRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 248bbf021b0SRichard Henderson CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; 249bbf021b0SRichard Henderson 2503c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 251bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 252bbf021b0SRichard Henderson } 253bbf021b0SRichard Henderson 25456e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 25556e89f76SRichard Henderson { 25656e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 25756e89f76SRichard Henderson 25856e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 25956e89f76SRichard Henderson desc->n_used_entries = 0; 26056e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 26156e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 26256e89f76SRichard Henderson desc->iotlb = g_new(CPUIOTLBEntry, n_entries); 2633c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 26456e89f76SRichard Henderson } 26556e89f76SRichard Henderson 26686e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 26786e1eff8SEmilio G. Cota { 268a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 26986e1eff8SEmilio G. Cota } 27086e1eff8SEmilio G. Cota 27186e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 27286e1eff8SEmilio G. Cota { 273a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 27486e1eff8SEmilio G. Cota } 27586e1eff8SEmilio G. Cota 2765005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2775005e253SEmilio G. Cota { 27871aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 27956e89f76SRichard Henderson int64_t now = get_clock_realtime(); 28056e89f76SRichard Henderson int i; 28171aec354SEmilio G. Cota 282a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2833d1523ceSRichard Henderson 2843c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 2853c16304aSRichard Henderson env_tlb(env)->c.dirty = 0; 28686e1eff8SEmilio G. Cota 28756e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 28856e89f76SRichard Henderson tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); 28956e89f76SRichard Henderson } 2905005e253SEmilio G. Cota } 2915005e253SEmilio G. Cota 292816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu) 293816d9be5SEmilio G. Cota { 294816d9be5SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 295816d9be5SEmilio G. Cota int i; 296816d9be5SEmilio G. Cota 297816d9be5SEmilio G. Cota qemu_spin_destroy(&env_tlb(env)->c.lock); 298816d9be5SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 299816d9be5SEmilio G. Cota CPUTLBDesc *desc = &env_tlb(env)->d[i]; 300816d9be5SEmilio G. Cota CPUTLBDescFast *fast = &env_tlb(env)->f[i]; 301816d9be5SEmilio G. Cota 302816d9be5SEmilio G. Cota g_free(fast->table); 303816d9be5SEmilio G. Cota g_free(desc->iotlb); 304816d9be5SEmilio G. Cota } 305816d9be5SEmilio G. Cota } 306816d9be5SEmilio G. Cota 307d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 308d9bb58e5SYang Zhong * 309d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 310d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 311d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 312d9bb58e5SYang Zhong * again. 313d9bb58e5SYang Zhong */ 314d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 315d9bb58e5SYang Zhong run_on_cpu_data d) 316d9bb58e5SYang Zhong { 317d9bb58e5SYang Zhong CPUState *cpu; 318d9bb58e5SYang Zhong 319d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 320d9bb58e5SYang Zhong if (cpu != src) { 321d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 322d9bb58e5SYang Zhong } 323d9bb58e5SYang Zhong } 324d9bb58e5SYang Zhong } 325d9bb58e5SYang Zhong 326e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 32783974cf4SEmilio G. Cota { 32883974cf4SEmilio G. Cota CPUState *cpu; 329e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 33083974cf4SEmilio G. Cota 33183974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 33283974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 33383974cf4SEmilio G. Cota 334d73415a3SStefan Hajnoczi full += qatomic_read(&env_tlb(env)->c.full_flush_count); 335d73415a3SStefan Hajnoczi part += qatomic_read(&env_tlb(env)->c.part_flush_count); 336d73415a3SStefan Hajnoczi elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); 33783974cf4SEmilio G. Cota } 338e09de0a2SRichard Henderson *pfull = full; 339e09de0a2SRichard Henderson *ppart = part; 340e09de0a2SRichard Henderson *pelide = elide; 34183974cf4SEmilio G. Cota } 342d9bb58e5SYang Zhong 343d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 344d9bb58e5SYang Zhong { 345d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3463d1523ceSRichard Henderson uint16_t asked = data.host_int; 3473d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3483c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 349d9bb58e5SYang Zhong 350d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 351d9bb58e5SYang Zhong 3523d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 353d9bb58e5SYang Zhong 354a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 35560a2ad7dSRichard Henderson 356a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3573d1523ceSRichard Henderson to_clean = asked & all_dirty; 3583d1523ceSRichard Henderson all_dirty &= ~to_clean; 359a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3603d1523ceSRichard Henderson 3613d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3623d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3633c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx, now); 364d9bb58e5SYang Zhong } 3653d1523ceSRichard Henderson 366a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 367d9bb58e5SYang Zhong 368f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 36964f2674bSRichard Henderson 3703d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 371d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.full_flush_count, 372a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 373e09de0a2SRichard Henderson } else { 374d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.part_flush_count, 375a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3763d1523ceSRichard Henderson if (to_clean != asked) { 377d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.elide_flush_count, 378a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3793d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3803d1523ceSRichard Henderson } 38164f2674bSRichard Henderson } 382d9bb58e5SYang Zhong } 383d9bb58e5SYang Zhong 384d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 385d9bb58e5SYang Zhong { 386d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 387d9bb58e5SYang Zhong 38864f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 389d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 390ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 391d9bb58e5SYang Zhong } else { 39260a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 393d9bb58e5SYang Zhong } 394d9bb58e5SYang Zhong } 395d9bb58e5SYang Zhong 39664f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 39764f2674bSRichard Henderson { 39864f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 39964f2674bSRichard Henderson } 40064f2674bSRichard Henderson 401d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 402d9bb58e5SYang Zhong { 403d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 404d9bb58e5SYang Zhong 405d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 406d9bb58e5SYang Zhong 407d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 408d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 409d9bb58e5SYang Zhong } 410d9bb58e5SYang Zhong 41164f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 41264f2674bSRichard Henderson { 41364f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 41464f2674bSRichard Henderson } 41564f2674bSRichard Henderson 41664f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 417d9bb58e5SYang Zhong { 418d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 419d9bb58e5SYang Zhong 420d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 421d9bb58e5SYang Zhong 422d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 423d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 424d9bb58e5SYang Zhong } 425d9bb58e5SYang Zhong 42664f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 42764f2674bSRichard Henderson { 42864f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 42964f2674bSRichard Henderson } 43064f2674bSRichard Henderson 4313ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 4323ab6e68cSRichard Henderson target_ulong page, target_ulong mask) 4333ab6e68cSRichard Henderson { 4343ab6e68cSRichard Henderson page &= mask; 4353ab6e68cSRichard Henderson mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 4363ab6e68cSRichard Henderson 4373ab6e68cSRichard Henderson return (page == (tlb_entry->addr_read & mask) || 4383ab6e68cSRichard Henderson page == (tlb_addr_write(tlb_entry) & mask) || 4393ab6e68cSRichard Henderson page == (tlb_entry->addr_code & mask)); 4403ab6e68cSRichard Henderson } 4413ab6e68cSRichard Henderson 44268fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 44368fea038SRichard Henderson target_ulong page) 444d9bb58e5SYang Zhong { 4453ab6e68cSRichard Henderson return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 44668fea038SRichard Henderson } 44768fea038SRichard Henderson 4483cea94bbSEmilio G. Cota /** 4493cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4503cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4513cea94bbSEmilio G. Cota */ 4523cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4533cea94bbSEmilio G. Cota { 4543cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4553cea94bbSEmilio G. Cota } 4563cea94bbSEmilio G. Cota 45753d28455SRichard Henderson /* Called with tlb_c.lock held */ 4583ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 4593ab6e68cSRichard Henderson target_ulong page, 4603ab6e68cSRichard Henderson target_ulong mask) 46168fea038SRichard Henderson { 4623ab6e68cSRichard Henderson if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 463d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 46486e1eff8SEmilio G. Cota return true; 465d9bb58e5SYang Zhong } 46686e1eff8SEmilio G. Cota return false; 467d9bb58e5SYang Zhong } 468d9bb58e5SYang Zhong 4693ab6e68cSRichard Henderson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 47068fea038SRichard Henderson target_ulong page) 47168fea038SRichard Henderson { 4723ab6e68cSRichard Henderson return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 4733ab6e68cSRichard Henderson } 4743ab6e68cSRichard Henderson 4753ab6e68cSRichard Henderson /* Called with tlb_c.lock held */ 4763ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, 4773ab6e68cSRichard Henderson target_ulong page, 4783ab6e68cSRichard Henderson target_ulong mask) 4793ab6e68cSRichard Henderson { 480a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 48168fea038SRichard Henderson int k; 48271aec354SEmilio G. Cota 48329a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 48468fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 4853ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 48686e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 48786e1eff8SEmilio G. Cota } 48868fea038SRichard Henderson } 48968fea038SRichard Henderson } 49068fea038SRichard Henderson 4913ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 4923ab6e68cSRichard Henderson target_ulong page) 4933ab6e68cSRichard Henderson { 4943ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); 4953ab6e68cSRichard Henderson } 4963ab6e68cSRichard Henderson 4971308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4981308e026SRichard Henderson target_ulong page) 4991308e026SRichard Henderson { 500a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 501a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 5021308e026SRichard Henderson 5031308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 5041308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 5051308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 5061308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 5071308e026SRichard Henderson midx, lp_addr, lp_mask); 5083c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 5091308e026SRichard Henderson } else { 51086e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 51186e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 51286e1eff8SEmilio G. Cota } 5131308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 5141308e026SRichard Henderson } 5151308e026SRichard Henderson } 5161308e026SRichard Henderson 5177b7d00e0SRichard Henderson /** 5187b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 5197b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5207b7d00e0SRichard Henderson * @addr: page of virtual address to flush 5217b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 5227b7d00e0SRichard Henderson * 5237b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 5247b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 525d9bb58e5SYang Zhong */ 5267b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 5277b7d00e0SRichard Henderson target_ulong addr, 5287b7d00e0SRichard Henderson uint16_t idxmap) 529d9bb58e5SYang Zhong { 530d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 531d9bb58e5SYang Zhong int mmu_idx; 532d9bb58e5SYang Zhong 533d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 534d9bb58e5SYang Zhong 5357b7d00e0SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); 536d9bb58e5SYang Zhong 537a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 538d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 5397b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 5401308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 541d9bb58e5SYang Zhong } 542d9bb58e5SYang Zhong } 543a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 544d9bb58e5SYang Zhong 545d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 546d9bb58e5SYang Zhong } 547d9bb58e5SYang Zhong 5487b7d00e0SRichard Henderson /** 5497b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 5507b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5517b7d00e0SRichard Henderson * @data: encoded addr + idxmap 5527b7d00e0SRichard Henderson * 5537b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5547b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 5557b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 5567b7d00e0SRichard Henderson * that can be passed via this method. 5577b7d00e0SRichard Henderson */ 5587b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5597b7d00e0SRichard Henderson run_on_cpu_data data) 5607b7d00e0SRichard Henderson { 5617b7d00e0SRichard Henderson target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; 5627b7d00e0SRichard Henderson target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; 5637b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5647b7d00e0SRichard Henderson 5657b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5667b7d00e0SRichard Henderson } 5677b7d00e0SRichard Henderson 5687b7d00e0SRichard Henderson typedef struct { 5697b7d00e0SRichard Henderson target_ulong addr; 5707b7d00e0SRichard Henderson uint16_t idxmap; 5717b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5727b7d00e0SRichard Henderson 5737b7d00e0SRichard Henderson /** 5747b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5757b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5767b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5777b7d00e0SRichard Henderson * 5787b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5797b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5807b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5817b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5827b7d00e0SRichard Henderson */ 5837b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5847b7d00e0SRichard Henderson run_on_cpu_data data) 5857b7d00e0SRichard Henderson { 5867b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5877b7d00e0SRichard Henderson 5887b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5897b7d00e0SRichard Henderson g_free(d); 5907b7d00e0SRichard Henderson } 5917b7d00e0SRichard Henderson 592d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 593d9bb58e5SYang Zhong { 594d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 595d9bb58e5SYang Zhong 596d9bb58e5SYang Zhong /* This should already be page aligned */ 5977b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 598d9bb58e5SYang Zhong 5997b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 6007b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 6017b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 6027b7d00e0SRichard Henderson /* 6037b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 6047b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 6057b7d00e0SRichard Henderson * allocating memory for this operation. 6067b7d00e0SRichard Henderson */ 6077b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 6087b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 609d9bb58e5SYang Zhong } else { 6107b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 6117b7d00e0SRichard Henderson 6127b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 6137b7d00e0SRichard Henderson d->addr = addr; 6147b7d00e0SRichard Henderson d->idxmap = idxmap; 6157b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 6167b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 617d9bb58e5SYang Zhong } 618d9bb58e5SYang Zhong } 619d9bb58e5SYang Zhong 620f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 621f8144c6cSRichard Henderson { 622f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 623f8144c6cSRichard Henderson } 624f8144c6cSRichard Henderson 625d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 626d9bb58e5SYang Zhong uint16_t idxmap) 627d9bb58e5SYang Zhong { 628d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 629d9bb58e5SYang Zhong 630d9bb58e5SYang Zhong /* This should already be page aligned */ 6317b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 632d9bb58e5SYang Zhong 6337b7d00e0SRichard Henderson /* 6347b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6357b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6367b7d00e0SRichard Henderson */ 6377b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6387b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6397b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6407b7d00e0SRichard Henderson } else { 6417b7d00e0SRichard Henderson CPUState *dst_cpu; 6427b7d00e0SRichard Henderson 6437b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6447b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6457b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6467b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 6477b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 6487b7d00e0SRichard Henderson 6497b7d00e0SRichard Henderson d->addr = addr; 6507b7d00e0SRichard Henderson d->idxmap = idxmap; 6517b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6527b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6537b7d00e0SRichard Henderson } 6547b7d00e0SRichard Henderson } 6557b7d00e0SRichard Henderson } 6567b7d00e0SRichard Henderson 6577b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 658d9bb58e5SYang Zhong } 659d9bb58e5SYang Zhong 660f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 661f8144c6cSRichard Henderson { 662f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 663f8144c6cSRichard Henderson } 664f8144c6cSRichard Henderson 665d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 666d9bb58e5SYang Zhong target_ulong addr, 667d9bb58e5SYang Zhong uint16_t idxmap) 668d9bb58e5SYang Zhong { 669d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 670d9bb58e5SYang Zhong 671d9bb58e5SYang Zhong /* This should already be page aligned */ 6727b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 673d9bb58e5SYang Zhong 6747b7d00e0SRichard Henderson /* 6757b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6767b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6777b7d00e0SRichard Henderson */ 6787b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6797b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6807b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6817b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6827b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6837b7d00e0SRichard Henderson } else { 6847b7d00e0SRichard Henderson CPUState *dst_cpu; 6857b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6867b7d00e0SRichard Henderson 6877b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6887b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6897b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6907b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6917b7d00e0SRichard Henderson d->addr = addr; 6927b7d00e0SRichard Henderson d->idxmap = idxmap; 6937b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6947b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6957b7d00e0SRichard Henderson } 6967b7d00e0SRichard Henderson } 6977b7d00e0SRichard Henderson 6987b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6997b7d00e0SRichard Henderson d->addr = addr; 7007b7d00e0SRichard Henderson d->idxmap = idxmap; 7017b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 7027b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 7037b7d00e0SRichard Henderson } 704d9bb58e5SYang Zhong } 705d9bb58e5SYang Zhong 706f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 707d9bb58e5SYang Zhong { 708f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 709d9bb58e5SYang Zhong } 710d9bb58e5SYang Zhong 7113ab6e68cSRichard Henderson static void tlb_flush_page_bits_locked(CPUArchState *env, int midx, 7123ab6e68cSRichard Henderson target_ulong page, unsigned bits) 7133ab6e68cSRichard Henderson { 7143ab6e68cSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[midx]; 7153ab6e68cSRichard Henderson CPUTLBDescFast *f = &env_tlb(env)->f[midx]; 7163ab6e68cSRichard Henderson target_ulong mask = MAKE_64BIT_MASK(0, bits); 7173ab6e68cSRichard Henderson 7183ab6e68cSRichard Henderson /* 7193ab6e68cSRichard Henderson * If @bits is smaller than the tlb size, there may be multiple entries 7203ab6e68cSRichard Henderson * within the TLB; otherwise all addresses that match under @mask hit 7213ab6e68cSRichard Henderson * the same TLB entry. 7223ab6e68cSRichard Henderson * 7233ab6e68cSRichard Henderson * TODO: Perhaps allow bits to be a few bits less than the size. 7243ab6e68cSRichard Henderson * For now, just flush the entire TLB. 7253ab6e68cSRichard Henderson */ 7263ab6e68cSRichard Henderson if (mask < f->mask) { 7273ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7283ab6e68cSRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 7293ab6e68cSRichard Henderson midx, page, mask); 7303ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7313ab6e68cSRichard Henderson return; 7323ab6e68cSRichard Henderson } 7333ab6e68cSRichard Henderson 7343ab6e68cSRichard Henderson /* Check if we need to flush due to large pages. */ 7353ab6e68cSRichard Henderson if ((page & d->large_page_mask) == d->large_page_addr) { 7363ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7373ab6e68cSRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 7383ab6e68cSRichard Henderson midx, d->large_page_addr, d->large_page_mask); 7393ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7403ab6e68cSRichard Henderson return; 7413ab6e68cSRichard Henderson } 7423ab6e68cSRichard Henderson 7433ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) { 7443ab6e68cSRichard Henderson tlb_n_used_entries_dec(env, midx); 7453ab6e68cSRichard Henderson } 7463ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); 7473ab6e68cSRichard Henderson } 7483ab6e68cSRichard Henderson 7493ab6e68cSRichard Henderson typedef struct { 7503ab6e68cSRichard Henderson target_ulong addr; 7513ab6e68cSRichard Henderson uint16_t idxmap; 7523ab6e68cSRichard Henderson uint16_t bits; 7533ab6e68cSRichard Henderson } TLBFlushPageBitsByMMUIdxData; 7543ab6e68cSRichard Henderson 7553ab6e68cSRichard Henderson static void 7563ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu, 7573ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData d) 7583ab6e68cSRichard Henderson { 7593ab6e68cSRichard Henderson CPUArchState *env = cpu->env_ptr; 7603ab6e68cSRichard Henderson int mmu_idx; 7613ab6e68cSRichard Henderson 7623ab6e68cSRichard Henderson assert_cpu_is_self(cpu); 7633ab6e68cSRichard Henderson 7643ab6e68cSRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n", 7653ab6e68cSRichard Henderson d.addr, d.bits, d.idxmap); 7663ab6e68cSRichard Henderson 7673ab6e68cSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 7683ab6e68cSRichard Henderson for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 7693ab6e68cSRichard Henderson if ((d.idxmap >> mmu_idx) & 1) { 7703ab6e68cSRichard Henderson tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits); 7713ab6e68cSRichard Henderson } 7723ab6e68cSRichard Henderson } 7733ab6e68cSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 7743ab6e68cSRichard Henderson 7753ab6e68cSRichard Henderson tb_flush_jmp_cache(cpu, d.addr); 7763ab6e68cSRichard Henderson } 7773ab6e68cSRichard Henderson 7783ab6e68cSRichard Henderson static bool encode_pbm_to_runon(run_on_cpu_data *out, 7793ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData d) 7803ab6e68cSRichard Henderson { 7813ab6e68cSRichard Henderson /* We need 6 bits to hold to hold @bits up to 63. */ 7823ab6e68cSRichard Henderson if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) { 7833ab6e68cSRichard Henderson *out = RUN_ON_CPU_TARGET_PTR(d.addr | (d.idxmap << 6) | d.bits); 7843ab6e68cSRichard Henderson return true; 7853ab6e68cSRichard Henderson } 7863ab6e68cSRichard Henderson return false; 7873ab6e68cSRichard Henderson } 7883ab6e68cSRichard Henderson 7893ab6e68cSRichard Henderson static TLBFlushPageBitsByMMUIdxData 7903ab6e68cSRichard Henderson decode_runon_to_pbm(run_on_cpu_data data) 7913ab6e68cSRichard Henderson { 7923ab6e68cSRichard Henderson target_ulong addr_map_bits = (target_ulong) data.target_ptr; 7933ab6e68cSRichard Henderson return (TLBFlushPageBitsByMMUIdxData){ 7943ab6e68cSRichard Henderson .addr = addr_map_bits & TARGET_PAGE_MASK, 7953ab6e68cSRichard Henderson .idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6, 7963ab6e68cSRichard Henderson .bits = addr_map_bits & 0x3f 7973ab6e68cSRichard Henderson }; 7983ab6e68cSRichard Henderson } 7993ab6e68cSRichard Henderson 8003ab6e68cSRichard Henderson static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu, 8013ab6e68cSRichard Henderson run_on_cpu_data runon) 8023ab6e68cSRichard Henderson { 8033ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(cpu, decode_runon_to_pbm(runon)); 8043ab6e68cSRichard Henderson } 8053ab6e68cSRichard Henderson 8063ab6e68cSRichard Henderson static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu, 8073ab6e68cSRichard Henderson run_on_cpu_data data) 8083ab6e68cSRichard Henderson { 8093ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData *d = data.host_ptr; 8103ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d); 8113ab6e68cSRichard Henderson g_free(d); 8123ab6e68cSRichard Henderson } 8133ab6e68cSRichard Henderson 8143ab6e68cSRichard Henderson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, 8153ab6e68cSRichard Henderson uint16_t idxmap, unsigned bits) 8163ab6e68cSRichard Henderson { 8173ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData d; 8183ab6e68cSRichard Henderson run_on_cpu_data runon; 8193ab6e68cSRichard Henderson 8203ab6e68cSRichard Henderson /* If all bits are significant, this devolves to tlb_flush_page. */ 8213ab6e68cSRichard Henderson if (bits >= TARGET_LONG_BITS) { 8223ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 8233ab6e68cSRichard Henderson return; 8243ab6e68cSRichard Henderson } 8253ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8263ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8273ab6e68cSRichard Henderson tlb_flush_by_mmuidx(cpu, idxmap); 8283ab6e68cSRichard Henderson return; 8293ab6e68cSRichard Henderson } 8303ab6e68cSRichard Henderson 8313ab6e68cSRichard Henderson /* This should already be page aligned */ 8323ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 8333ab6e68cSRichard Henderson d.idxmap = idxmap; 8343ab6e68cSRichard Henderson d.bits = bits; 8353ab6e68cSRichard Henderson 8363ab6e68cSRichard Henderson if (qemu_cpu_is_self(cpu)) { 8373ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(cpu, d); 8383ab6e68cSRichard Henderson } else if (encode_pbm_to_runon(&runon, d)) { 8393ab6e68cSRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); 8403ab6e68cSRichard Henderson } else { 8413ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData *p 8423ab6e68cSRichard Henderson = g_new(TLBFlushPageBitsByMMUIdxData, 1); 8433ab6e68cSRichard Henderson 8443ab6e68cSRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 8453ab6e68cSRichard Henderson *p = d; 8463ab6e68cSRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2, 8473ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8483ab6e68cSRichard Henderson } 8493ab6e68cSRichard Henderson } 8503ab6e68cSRichard Henderson 8513ab6e68cSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 8523ab6e68cSRichard Henderson target_ulong addr, 8533ab6e68cSRichard Henderson uint16_t idxmap, 8543ab6e68cSRichard Henderson unsigned bits) 8553ab6e68cSRichard Henderson { 8563ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData d; 8573ab6e68cSRichard Henderson run_on_cpu_data runon; 8583ab6e68cSRichard Henderson 8593ab6e68cSRichard Henderson /* If all bits are significant, this devolves to tlb_flush_page. */ 8603ab6e68cSRichard Henderson if (bits >= TARGET_LONG_BITS) { 8613ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 8623ab6e68cSRichard Henderson return; 8633ab6e68cSRichard Henderson } 8643ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8653ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8663ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 8673ab6e68cSRichard Henderson return; 8683ab6e68cSRichard Henderson } 8693ab6e68cSRichard Henderson 8703ab6e68cSRichard Henderson /* This should already be page aligned */ 8713ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 8723ab6e68cSRichard Henderson d.idxmap = idxmap; 8733ab6e68cSRichard Henderson d.bits = bits; 8743ab6e68cSRichard Henderson 8753ab6e68cSRichard Henderson if (encode_pbm_to_runon(&runon, d)) { 8763ab6e68cSRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); 8773ab6e68cSRichard Henderson } else { 8783ab6e68cSRichard Henderson CPUState *dst_cpu; 8793ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData *p; 8803ab6e68cSRichard Henderson 8813ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 8823ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 8833ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 8843ab6e68cSRichard Henderson p = g_new(TLBFlushPageBitsByMMUIdxData, 1); 8853ab6e68cSRichard Henderson *p = d; 8863ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, 8873ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_2, 8883ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8893ab6e68cSRichard Henderson } 8903ab6e68cSRichard Henderson } 8913ab6e68cSRichard Henderson } 8923ab6e68cSRichard Henderson 8933ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d); 8943ab6e68cSRichard Henderson } 8953ab6e68cSRichard Henderson 8963ab6e68cSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 8973ab6e68cSRichard Henderson target_ulong addr, 8983ab6e68cSRichard Henderson uint16_t idxmap, 8993ab6e68cSRichard Henderson unsigned bits) 9003ab6e68cSRichard Henderson { 9013ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData d; 9023ab6e68cSRichard Henderson run_on_cpu_data runon; 9033ab6e68cSRichard Henderson 9043ab6e68cSRichard Henderson /* If all bits are significant, this devolves to tlb_flush_page. */ 9053ab6e68cSRichard Henderson if (bits >= TARGET_LONG_BITS) { 9063ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 9073ab6e68cSRichard Henderson return; 9083ab6e68cSRichard Henderson } 9093ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 9103ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 9113ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 9123ab6e68cSRichard Henderson return; 9133ab6e68cSRichard Henderson } 9143ab6e68cSRichard Henderson 9153ab6e68cSRichard Henderson /* This should already be page aligned */ 9163ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 9173ab6e68cSRichard Henderson d.idxmap = idxmap; 9183ab6e68cSRichard Henderson d.bits = bits; 9193ab6e68cSRichard Henderson 9203ab6e68cSRichard Henderson if (encode_pbm_to_runon(&runon, d)) { 9213ab6e68cSRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); 9223ab6e68cSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, 9233ab6e68cSRichard Henderson runon); 9243ab6e68cSRichard Henderson } else { 9253ab6e68cSRichard Henderson CPUState *dst_cpu; 9263ab6e68cSRichard Henderson TLBFlushPageBitsByMMUIdxData *p; 9273ab6e68cSRichard Henderson 9283ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 9293ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 9303ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 9313ab6e68cSRichard Henderson p = g_new(TLBFlushPageBitsByMMUIdxData, 1); 9323ab6e68cSRichard Henderson *p = d; 9333ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2, 9343ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9353ab6e68cSRichard Henderson } 9363ab6e68cSRichard Henderson } 9373ab6e68cSRichard Henderson 9383ab6e68cSRichard Henderson p = g_new(TLBFlushPageBitsByMMUIdxData, 1); 9393ab6e68cSRichard Henderson *p = d; 9403ab6e68cSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2, 9413ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9423ab6e68cSRichard Henderson } 9433ab6e68cSRichard Henderson } 9443ab6e68cSRichard Henderson 945d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 946d9bb58e5SYang Zhong can be detected */ 947d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 948d9bb58e5SYang Zhong { 949d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 950d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 951d9bb58e5SYang Zhong } 952d9bb58e5SYang Zhong 953d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 954d9bb58e5SYang Zhong tested for self modifying code */ 955d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 956d9bb58e5SYang Zhong { 957d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 958d9bb58e5SYang Zhong } 959d9bb58e5SYang Zhong 960d9bb58e5SYang Zhong 961d9bb58e5SYang Zhong /* 962d9bb58e5SYang Zhong * Dirty write flag handling 963d9bb58e5SYang Zhong * 964d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 965d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 966d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 967d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 968d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 969d9bb58e5SYang Zhong * generated code. 970d9bb58e5SYang Zhong * 97171aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 972d73415a3SStefan Hajnoczi * te->addr_write with qatomic_set. We don't need to worry about this for 97371aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 974d9bb58e5SYang Zhong * 97553d28455SRichard Henderson * Called with tlb_c.lock held. 976d9bb58e5SYang Zhong */ 97771aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 97871aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 979d9bb58e5SYang Zhong { 980d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 981d9bb58e5SYang Zhong 9827b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 9837b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 984d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 985d9bb58e5SYang Zhong addr += tlb_entry->addend; 986d9bb58e5SYang Zhong if ((addr - start) < length) { 987d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 98871aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 989d9bb58e5SYang Zhong #else 990d73415a3SStefan Hajnoczi qatomic_set(&tlb_entry->addr_write, 99171aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 992d9bb58e5SYang Zhong #endif 993d9bb58e5SYang Zhong } 99471aec354SEmilio G. Cota } 99571aec354SEmilio G. Cota } 99671aec354SEmilio G. Cota 99771aec354SEmilio G. Cota /* 99853d28455SRichard Henderson * Called with tlb_c.lock held. 99971aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 100071aec354SEmilio G. Cota */ 100171aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 100271aec354SEmilio G. Cota { 100371aec354SEmilio G. Cota *d = *s; 100471aec354SEmilio G. Cota } 1005d9bb58e5SYang Zhong 1006d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 100771aec354SEmilio G. Cota * the target vCPU). 100853d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 100971aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 1010d9bb58e5SYang Zhong */ 1011d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 1012d9bb58e5SYang Zhong { 1013d9bb58e5SYang Zhong CPUArchState *env; 1014d9bb58e5SYang Zhong 1015d9bb58e5SYang Zhong int mmu_idx; 1016d9bb58e5SYang Zhong 1017d9bb58e5SYang Zhong env = cpu->env_ptr; 1018a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1019d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1020d9bb58e5SYang Zhong unsigned int i; 1021722a1c1eSRichard Henderson unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 1022d9bb58e5SYang Zhong 102386e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 1024a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 1025a40ec84eSRichard Henderson start1, length); 1026d9bb58e5SYang Zhong } 1027d9bb58e5SYang Zhong 1028d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 1029a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 1030a40ec84eSRichard Henderson start1, length); 1031d9bb58e5SYang Zhong } 1032d9bb58e5SYang Zhong } 1033a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1034d9bb58e5SYang Zhong } 1035d9bb58e5SYang Zhong 103653d28455SRichard Henderson /* Called with tlb_c.lock held */ 103771aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 103871aec354SEmilio G. Cota target_ulong vaddr) 1039d9bb58e5SYang Zhong { 1040d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 1041d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 1042d9bb58e5SYang Zhong } 1043d9bb58e5SYang Zhong } 1044d9bb58e5SYang Zhong 1045d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 1046d9bb58e5SYang Zhong so that it is no longer dirty */ 1047d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 1048d9bb58e5SYang Zhong { 1049d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1050d9bb58e5SYang Zhong int mmu_idx; 1051d9bb58e5SYang Zhong 1052d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 1053d9bb58e5SYang Zhong 1054d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 1055a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1056d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1057383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 1058d9bb58e5SYang Zhong } 1059d9bb58e5SYang Zhong 1060d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1061d9bb58e5SYang Zhong int k; 1062d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 1063a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 1064d9bb58e5SYang Zhong } 1065d9bb58e5SYang Zhong } 1066a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1067d9bb58e5SYang Zhong } 1068d9bb58e5SYang Zhong 1069d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 1070d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 10711308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 10721308e026SRichard Henderson target_ulong vaddr, target_ulong size) 1073d9bb58e5SYang Zhong { 1074a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 10751308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 1076d9bb58e5SYang Zhong 10771308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 10781308e026SRichard Henderson /* No previous large page. */ 10791308e026SRichard Henderson lp_addr = vaddr; 10801308e026SRichard Henderson } else { 1081d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 10821308e026SRichard Henderson This is a compromise between unnecessary flushes and 10831308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 1084a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 10851308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 10861308e026SRichard Henderson lp_mask <<= 1; 1087d9bb58e5SYang Zhong } 10881308e026SRichard Henderson } 1089a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 1090a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 1091d9bb58e5SYang Zhong } 1092d9bb58e5SYang Zhong 1093d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 1094d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1095d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 1096d9bb58e5SYang Zhong * 1097d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 1098d9bb58e5SYang Zhong * critical section. 1099d9bb58e5SYang Zhong */ 1100d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 1101d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 1102d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1103d9bb58e5SYang Zhong { 1104d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1105a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 1106a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1107d9bb58e5SYang Zhong MemoryRegionSection *section; 1108d9bb58e5SYang Zhong unsigned int index; 1109d9bb58e5SYang Zhong target_ulong address; 11108f5db641SRichard Henderson target_ulong write_address; 1111d9bb58e5SYang Zhong uintptr_t addend; 111268fea038SRichard Henderson CPUTLBEntry *te, tn; 111355df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 111455df6fcfSPeter Maydell target_ulong vaddr_page; 1115d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 111650b107c5SRichard Henderson int wp_flags; 11178f5db641SRichard Henderson bool is_ram, is_romd; 1118d9bb58e5SYang Zhong 1119d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 112055df6fcfSPeter Maydell 11211308e026SRichard Henderson if (size <= TARGET_PAGE_SIZE) { 112255df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 112355df6fcfSPeter Maydell } else { 11241308e026SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, size); 1125d9bb58e5SYang Zhong sz = size; 112655df6fcfSPeter Maydell } 112755df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 112855df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 112955df6fcfSPeter Maydell 113055df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 113155df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 1132d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 1133d9bb58e5SYang Zhong 1134d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 1135d9bb58e5SYang Zhong " prot=%x idx=%d\n", 1136d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 1137d9bb58e5SYang Zhong 113855df6fcfSPeter Maydell address = vaddr_page; 113955df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 114030d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 114130d7e098SRichard Henderson address |= TLB_INVALID_MASK; 114255df6fcfSPeter Maydell } 1143a26fc6f5STony Nguyen if (attrs.byte_swap) { 11445b87b3e6SRichard Henderson address |= TLB_BSWAP; 1145a26fc6f5STony Nguyen } 11468f5db641SRichard Henderson 11478f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 11488f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 11498f5db641SRichard Henderson 11508f5db641SRichard Henderson if (is_ram || is_romd) { 11518f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 1152d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 11538f5db641SRichard Henderson } else { 11548f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 11558f5db641SRichard Henderson addend = 0; 1156d9bb58e5SYang Zhong } 1157d9bb58e5SYang Zhong 11588f5db641SRichard Henderson write_address = address; 11598f5db641SRichard Henderson if (is_ram) { 11608f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 11618f5db641SRichard Henderson /* 11628f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 11638f5db641SRichard Henderson * the page is actually writable. 11648f5db641SRichard Henderson */ 11658f5db641SRichard Henderson if (prot & PAGE_WRITE) { 11668f5db641SRichard Henderson if (section->readonly) { 11678f5db641SRichard Henderson write_address |= TLB_DISCARD_WRITE; 11688f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 11698f5db641SRichard Henderson write_address |= TLB_NOTDIRTY; 11708f5db641SRichard Henderson } 11718f5db641SRichard Henderson } 11728f5db641SRichard Henderson } else { 11738f5db641SRichard Henderson /* I/O or ROMD */ 11748f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 11758f5db641SRichard Henderson /* 11768f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 11778f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 11788f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 11798f5db641SRichard Henderson */ 11808f5db641SRichard Henderson write_address |= TLB_MMIO; 11818f5db641SRichard Henderson if (!is_romd) { 11828f5db641SRichard Henderson address = write_address; 11838f5db641SRichard Henderson } 11848f5db641SRichard Henderson } 11858f5db641SRichard Henderson 118650b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 118750b107c5SRichard Henderson TARGET_PAGE_SIZE); 1188d9bb58e5SYang Zhong 1189383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 1190383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 1191d9bb58e5SYang Zhong 119268fea038SRichard Henderson /* 119371aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 119471aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 119571aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 119671aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 119771aec354SEmilio G. Cota * is unlikely to be contended. 119871aec354SEmilio G. Cota */ 1199a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 120071aec354SEmilio G. Cota 12013d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 1202a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 12033d1523ceSRichard Henderson 120471aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 120571aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 120671aec354SEmilio G. Cota 120771aec354SEmilio G. Cota /* 120868fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 120968fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 121068fea038SRichard Henderson */ 12113cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 1212a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1213a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 121468fea038SRichard Henderson 121568fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 121671aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 1217a40ec84eSRichard Henderson desc->viotlb[vidx] = desc->iotlb[index]; 121886e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 121968fea038SRichard Henderson } 1220d9bb58e5SYang Zhong 1221d9bb58e5SYang Zhong /* refill the tlb */ 1222ace41090SPeter Maydell /* 1223ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 1224ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 12258f5db641SRichard Henderson * + the ram_addr_t of the page base of the target RAM (RAM) 12268f5db641SRichard Henderson * + the offset within section->mr of the page base (I/O, ROMD) 122755df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 1228ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 1229ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 1230ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 1231ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 1232ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 1233ace41090SPeter Maydell */ 1234a40ec84eSRichard Henderson desc->iotlb[index].addr = iotlb - vaddr_page; 1235a40ec84eSRichard Henderson desc->iotlb[index].attrs = attrs; 1236d9bb58e5SYang Zhong 1237d9bb58e5SYang Zhong /* Now calculate the new entry */ 123855df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 1239d9bb58e5SYang Zhong if (prot & PAGE_READ) { 1240d9bb58e5SYang Zhong tn.addr_read = address; 124150b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 124250b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 124350b107c5SRichard Henderson } 1244d9bb58e5SYang Zhong } else { 1245d9bb58e5SYang Zhong tn.addr_read = -1; 1246d9bb58e5SYang Zhong } 1247d9bb58e5SYang Zhong 1248d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 12498f5db641SRichard Henderson tn.addr_code = address; 1250d9bb58e5SYang Zhong } else { 1251d9bb58e5SYang Zhong tn.addr_code = -1; 1252d9bb58e5SYang Zhong } 1253d9bb58e5SYang Zhong 1254d9bb58e5SYang Zhong tn.addr_write = -1; 1255d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 12568f5db641SRichard Henderson tn.addr_write = write_address; 1257f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 1258f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 1259f52bfb12SDavid Hildenbrand } 126050b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 126150b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 126250b107c5SRichard Henderson } 1263d9bb58e5SYang Zhong } 1264d9bb58e5SYang Zhong 126571aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 126686e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 1267a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 1268d9bb58e5SYang Zhong } 1269d9bb58e5SYang Zhong 1270d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 1271d9bb58e5SYang Zhong * transaction attributes to be used. 1272d9bb58e5SYang Zhong */ 1273d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 1274d9bb58e5SYang Zhong hwaddr paddr, int prot, 1275d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1276d9bb58e5SYang Zhong { 1277d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 1278d9bb58e5SYang Zhong prot, mmu_idx, size); 1279d9bb58e5SYang Zhong } 1280d9bb58e5SYang Zhong 1281d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 1282d9bb58e5SYang Zhong { 1283d9bb58e5SYang Zhong ram_addr_t ram_addr; 1284d9bb58e5SYang Zhong 1285d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 1286d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 1287d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 1288d9bb58e5SYang Zhong abort(); 1289d9bb58e5SYang Zhong } 1290d9bb58e5SYang Zhong return ram_addr; 1291d9bb58e5SYang Zhong } 1292d9bb58e5SYang Zhong 1293c319dc13SRichard Henderson /* 1294c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1295c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1296c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1297c319dc13SRichard Henderson */ 1298c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 1299c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1300c319dc13SRichard Henderson { 1301c319dc13SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cpu); 1302c319dc13SRichard Henderson bool ok; 1303c319dc13SRichard Henderson 1304c319dc13SRichard Henderson /* 1305c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1306c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1307c319dc13SRichard Henderson */ 1308*e124536fSEduardo Habkost ok = cc->tcg_ops.tlb_fill(cpu, addr, size, 1309*e124536fSEduardo Habkost access_type, mmu_idx, false, retaddr); 1310c319dc13SRichard Henderson assert(ok); 1311c319dc13SRichard Henderson } 1312c319dc13SRichard Henderson 1313d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1314f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 1315be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 1316d9bb58e5SYang Zhong { 131729a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 13182d54f194SPeter Maydell hwaddr mr_offset; 13192d54f194SPeter Maydell MemoryRegionSection *section; 13202d54f194SPeter Maydell MemoryRegion *mr; 1321d9bb58e5SYang Zhong uint64_t val; 1322d9bb58e5SYang Zhong bool locked = false; 132304e3aabdSPeter Maydell MemTxResult r; 1324d9bb58e5SYang Zhong 13252d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 13262d54f194SPeter Maydell mr = section->mr; 13272d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1328d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 132908565552SRichard Henderson if (!cpu->can_do_io) { 1330d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1331d9bb58e5SYang Zhong } 1332d9bb58e5SYang Zhong 133341744954SPhilippe Mathieu-Daudé if (!qemu_mutex_iothread_locked()) { 1334d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1335d9bb58e5SYang Zhong locked = true; 1336d9bb58e5SYang Zhong } 1337be5c4787STony Nguyen r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); 133804e3aabdSPeter Maydell if (r != MEMTX_OK) { 13392d54f194SPeter Maydell hwaddr physaddr = mr_offset + 13402d54f194SPeter Maydell section->offset_within_address_space - 13412d54f194SPeter Maydell section->offset_within_region; 13422d54f194SPeter Maydell 1343be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 134404e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 134504e3aabdSPeter Maydell } 1346d9bb58e5SYang Zhong if (locked) { 1347d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1348d9bb58e5SYang Zhong } 1349d9bb58e5SYang Zhong 1350d9bb58e5SYang Zhong return val; 1351d9bb58e5SYang Zhong } 1352d9bb58e5SYang Zhong 13532f3a57eeSAlex Bennée /* 13542f3a57eeSAlex Bennée * Save a potentially trashed IOTLB entry for later lookup by plugin. 1355570ef309SAlex Bennée * This is read by tlb_plugin_lookup if the iotlb entry doesn't match 1356570ef309SAlex Bennée * because of the side effect of io_writex changing memory layout. 13572f3a57eeSAlex Bennée */ 13582f3a57eeSAlex Bennée static void save_iotlb_data(CPUState *cs, hwaddr addr, 13592f3a57eeSAlex Bennée MemoryRegionSection *section, hwaddr mr_offset) 13602f3a57eeSAlex Bennée { 13612f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN 13622f3a57eeSAlex Bennée SavedIOTLB *saved = &cs->saved_iotlb; 13632f3a57eeSAlex Bennée saved->addr = addr; 13642f3a57eeSAlex Bennée saved->section = section; 13652f3a57eeSAlex Bennée saved->mr_offset = mr_offset; 13662f3a57eeSAlex Bennée #endif 13672f3a57eeSAlex Bennée } 13682f3a57eeSAlex Bennée 1369d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1370f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 1371be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 1372d9bb58e5SYang Zhong { 137329a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 13742d54f194SPeter Maydell hwaddr mr_offset; 13752d54f194SPeter Maydell MemoryRegionSection *section; 13762d54f194SPeter Maydell MemoryRegion *mr; 1377d9bb58e5SYang Zhong bool locked = false; 137804e3aabdSPeter Maydell MemTxResult r; 1379d9bb58e5SYang Zhong 13802d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 13812d54f194SPeter Maydell mr = section->mr; 13822d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 138308565552SRichard Henderson if (!cpu->can_do_io) { 1384d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1385d9bb58e5SYang Zhong } 1386d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1387d9bb58e5SYang Zhong 13882f3a57eeSAlex Bennée /* 13892f3a57eeSAlex Bennée * The memory_region_dispatch may trigger a flush/resize 13902f3a57eeSAlex Bennée * so for plugins we save the iotlb_data just in case. 13912f3a57eeSAlex Bennée */ 13922f3a57eeSAlex Bennée save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); 13932f3a57eeSAlex Bennée 139441744954SPhilippe Mathieu-Daudé if (!qemu_mutex_iothread_locked()) { 1395d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1396d9bb58e5SYang Zhong locked = true; 1397d9bb58e5SYang Zhong } 1398be5c4787STony Nguyen r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); 139904e3aabdSPeter Maydell if (r != MEMTX_OK) { 14002d54f194SPeter Maydell hwaddr physaddr = mr_offset + 14012d54f194SPeter Maydell section->offset_within_address_space - 14022d54f194SPeter Maydell section->offset_within_region; 14032d54f194SPeter Maydell 1404be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 1405be5c4787STony Nguyen MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, 1406be5c4787STony Nguyen retaddr); 140704e3aabdSPeter Maydell } 1408d9bb58e5SYang Zhong if (locked) { 1409d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1410d9bb58e5SYang Zhong } 1411d9bb58e5SYang Zhong } 1412d9bb58e5SYang Zhong 14134811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) 14144811e909SRichard Henderson { 14154811e909SRichard Henderson #if TCG_OVERSIZED_GUEST 14164811e909SRichard Henderson return *(target_ulong *)((uintptr_t)entry + ofs); 14174811e909SRichard Henderson #else 1418d73415a3SStefan Hajnoczi /* ofs might correspond to .addr_write, so use qatomic_read */ 1419d73415a3SStefan Hajnoczi return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); 14204811e909SRichard Henderson #endif 14214811e909SRichard Henderson } 14224811e909SRichard Henderson 1423d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1424d9bb58e5SYang Zhong back to the main tlb. */ 1425d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 1426d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 1427d9bb58e5SYang Zhong { 1428d9bb58e5SYang Zhong size_t vidx; 142971aec354SEmilio G. Cota 143029a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1431d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1432a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 1433a40ec84eSRichard Henderson target_ulong cmp; 1434a40ec84eSRichard Henderson 1435d73415a3SStefan Hajnoczi /* elt_ofs might correspond to .addr_write, so use qatomic_read */ 1436a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST 1437a40ec84eSRichard Henderson cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 1438a40ec84eSRichard Henderson #else 1439d73415a3SStefan Hajnoczi cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 1440a40ec84eSRichard Henderson #endif 1441d9bb58e5SYang Zhong 1442d9bb58e5SYang Zhong if (cmp == page) { 1443d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1444a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1445d9bb58e5SYang Zhong 1446a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 144771aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 144871aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 144971aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1450a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1451d9bb58e5SYang Zhong 1452a40ec84eSRichard Henderson CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1453a40ec84eSRichard Henderson CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; 1454d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 1455d9bb58e5SYang Zhong return true; 1456d9bb58e5SYang Zhong } 1457d9bb58e5SYang Zhong } 1458d9bb58e5SYang Zhong return false; 1459d9bb58e5SYang Zhong } 1460d9bb58e5SYang Zhong 1461d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 1462d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 1463d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 1464d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 1465d9bb58e5SYang Zhong 146630d7e098SRichard Henderson /* 146730d7e098SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 146830d7e098SRichard Henderson * 146930d7e098SRichard Henderson * Return -1 if we can't translate and execute from an entire page 147030d7e098SRichard Henderson * of RAM. This will force us to execute by loading and translating 147130d7e098SRichard Henderson * one insn at a time, without caching. 147230d7e098SRichard Henderson * 147330d7e098SRichard Henderson * NOTE: This function will trigger an exception if the page is 147430d7e098SRichard Henderson * not executable. 1475f2553f04SKONRAD Frederic */ 14764b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 14774b2190daSEmilio G. Cota void **hostp) 1478f2553f04SKONRAD Frederic { 1479383beda9SRichard Henderson uintptr_t mmu_idx = cpu_mmu_index(env, true); 1480383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1481383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1482f2553f04SKONRAD Frederic void *p; 1483f2553f04SKONRAD Frederic 1484383beda9SRichard Henderson if (unlikely(!tlb_hit(entry->addr_code, addr))) { 1485b493ccf1SPeter Maydell if (!VICTIM_TLB_HIT(addr_code, addr)) { 148629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 14876d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 14886d967cb8SEmilio G. Cota entry = tlb_entry(env, mmu_idx, addr); 148930d7e098SRichard Henderson 149030d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { 149130d7e098SRichard Henderson /* 149230d7e098SRichard Henderson * The MMU protection covers a smaller range than a target 149330d7e098SRichard Henderson * page, so we must redo the MMU check for every insn. 149430d7e098SRichard Henderson */ 149530d7e098SRichard Henderson return -1; 149630d7e098SRichard Henderson } 149771b9a453SKONRAD Frederic } 1498383beda9SRichard Henderson assert(tlb_hit(entry->addr_code, addr)); 1499f2553f04SKONRAD Frederic } 150055df6fcfSPeter Maydell 150130d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_MMIO)) { 150230d7e098SRichard Henderson /* The region is not backed by RAM. */ 15034b2190daSEmilio G. Cota if (hostp) { 15044b2190daSEmilio G. Cota *hostp = NULL; 15054b2190daSEmilio G. Cota } 150620cb6ae4SPeter Maydell return -1; 150755df6fcfSPeter Maydell } 150855df6fcfSPeter Maydell 1509383beda9SRichard Henderson p = (void *)((uintptr_t)addr + entry->addend); 15104b2190daSEmilio G. Cota if (hostp) { 15114b2190daSEmilio G. Cota *hostp = p; 15124b2190daSEmilio G. Cota } 1513f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 1514f2553f04SKONRAD Frederic } 1515f2553f04SKONRAD Frederic 15164b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 15174b2190daSEmilio G. Cota { 15184b2190daSEmilio G. Cota return get_page_addr_code_hostp(env, addr, NULL); 15194b2190daSEmilio G. Cota } 15204b2190daSEmilio G. Cota 1521707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 1522707526adSRichard Henderson CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) 1523707526adSRichard Henderson { 1524707526adSRichard Henderson ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; 1525707526adSRichard Henderson 1526707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1527707526adSRichard Henderson 1528707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1529707526adSRichard Henderson struct page_collection *pages 1530707526adSRichard Henderson = page_collection_lock(ram_addr, ram_addr + size); 15315a7c27bbSRichard Henderson tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); 1532707526adSRichard Henderson page_collection_unlock(pages); 1533707526adSRichard Henderson } 1534707526adSRichard Henderson 1535707526adSRichard Henderson /* 1536707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1537707526adSRichard Henderson * the notdirty callback faster. 1538707526adSRichard Henderson */ 1539707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1540707526adSRichard Henderson 1541707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1542707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1543707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1544707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1545707526adSRichard Henderson } 1546707526adSRichard Henderson } 1547707526adSRichard Henderson 1548069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 1549069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1550069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1551069cfe77SRichard Henderson void **phost, uintptr_t retaddr) 1552d9bb58e5SYang Zhong { 1553383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1554383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1555069cfe77SRichard Henderson target_ulong tlb_addr, page_addr; 1556c25c283dSDavid Hildenbrand size_t elt_ofs; 1557069cfe77SRichard Henderson int flags; 1558ca86cf32SDavid Hildenbrand 1559c25c283dSDavid Hildenbrand switch (access_type) { 1560c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 1561c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_read); 1562c25c283dSDavid Hildenbrand break; 1563c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 1564c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_write); 1565c25c283dSDavid Hildenbrand break; 1566c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 1567c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_code); 1568c25c283dSDavid Hildenbrand break; 1569c25c283dSDavid Hildenbrand default: 1570c25c283dSDavid Hildenbrand g_assert_not_reached(); 1571c25c283dSDavid Hildenbrand } 1572c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 1573c25c283dSDavid Hildenbrand 1574069cfe77SRichard Henderson page_addr = addr & TARGET_PAGE_MASK; 1575069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 1576069cfe77SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { 1577069cfe77SRichard Henderson CPUState *cs = env_cpu(env); 1578069cfe77SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cs); 1579069cfe77SRichard Henderson 1580*e124536fSEduardo Habkost if (!cc->tcg_ops.tlb_fill(cs, addr, fault_size, access_type, 1581069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1582069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1583069cfe77SRichard Henderson *phost = NULL; 1584069cfe77SRichard Henderson return TLB_INVALID_MASK; 1585069cfe77SRichard Henderson } 1586069cfe77SRichard Henderson 158703a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 158803a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1589d9bb58e5SYang Zhong } 1590c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 159103a98189SDavid Hildenbrand } 1592069cfe77SRichard Henderson flags = tlb_addr & TLB_FLAGS_MASK; 159303a98189SDavid Hildenbrand 1594069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 1595069cfe77SRichard Henderson if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1596069cfe77SRichard Henderson *phost = NULL; 1597069cfe77SRichard Henderson return TLB_MMIO; 1598fef39ccdSDavid Hildenbrand } 1599fef39ccdSDavid Hildenbrand 1600069cfe77SRichard Henderson /* Everything else is RAM. */ 1601069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1602069cfe77SRichard Henderson return flags; 1603069cfe77SRichard Henderson } 1604069cfe77SRichard Henderson 1605069cfe77SRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr, 1606069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1607069cfe77SRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1608069cfe77SRichard Henderson { 1609069cfe77SRichard Henderson int flags; 1610069cfe77SRichard Henderson 1611069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, 1612069cfe77SRichard Henderson nonfault, phost, retaddr); 1613069cfe77SRichard Henderson 1614069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1615069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1616069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 161773bc0bd4SRichard Henderson CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 161873bc0bd4SRichard Henderson 1619069cfe77SRichard Henderson notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); 1620069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1621069cfe77SRichard Henderson } 1622069cfe77SRichard Henderson 1623069cfe77SRichard Henderson return flags; 1624069cfe77SRichard Henderson } 1625069cfe77SRichard Henderson 1626069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 1627069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1628069cfe77SRichard Henderson { 1629069cfe77SRichard Henderson void *host; 1630069cfe77SRichard Henderson int flags; 1631069cfe77SRichard Henderson 1632069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1633069cfe77SRichard Henderson 1634069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1635069cfe77SRichard Henderson false, &host, retaddr); 1636069cfe77SRichard Henderson 1637069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1638069cfe77SRichard Henderson if (size == 0) { 163973bc0bd4SRichard Henderson return NULL; 164073bc0bd4SRichard Henderson } 164173bc0bd4SRichard Henderson 1642069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 1643069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1644069cfe77SRichard Henderson CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1645069cfe77SRichard Henderson 164603a98189SDavid Hildenbrand /* Handle watchpoints. */ 1647069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1648069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1649069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 165003a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 165173bc0bd4SRichard Henderson iotlbentry->attrs, wp_access, retaddr); 1652d9bb58e5SYang Zhong } 1653fef39ccdSDavid Hildenbrand 165473bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1655069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 1656069cfe77SRichard Henderson notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); 165773bc0bd4SRichard Henderson } 1658fef39ccdSDavid Hildenbrand } 1659fef39ccdSDavid Hildenbrand 1660069cfe77SRichard Henderson return host; 1661d9bb58e5SYang Zhong } 1662d9bb58e5SYang Zhong 16634811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 16644811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 16654811e909SRichard Henderson { 1666069cfe77SRichard Henderson void *host; 1667069cfe77SRichard Henderson int flags; 16684811e909SRichard Henderson 1669069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, 1670069cfe77SRichard Henderson mmu_idx, true, &host, 0); 1671069cfe77SRichard Henderson 1672069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1673069cfe77SRichard Henderson return flags ? NULL : host; 16744811e909SRichard Henderson } 16754811e909SRichard Henderson 1676235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1677235537faSAlex Bennée /* 1678235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1679235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1680235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1681235537faSAlex Bennée * checking the victim table. This is purely informational. 1682235537faSAlex Bennée * 16832f3a57eeSAlex Bennée * This almost never fails as the memory access being instrumented 16842f3a57eeSAlex Bennée * should have just filled the TLB. The one corner case is io_writex 16852f3a57eeSAlex Bennée * which can cause TLB flushes and potential resizing of the TLBs 1686570ef309SAlex Bennée * losing the information we need. In those cases we need to recover 1687570ef309SAlex Bennée * data from a copy of the iotlbentry. As long as this always occurs 1688570ef309SAlex Bennée * from the same thread (which a mem callback will be) this is safe. 1689235537faSAlex Bennée */ 1690235537faSAlex Bennée 1691235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, 1692235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1693235537faSAlex Bennée { 1694235537faSAlex Bennée CPUArchState *env = cpu->env_ptr; 1695235537faSAlex Bennée CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1696235537faSAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1697235537faSAlex Bennée target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1698235537faSAlex Bennée 1699235537faSAlex Bennée if (likely(tlb_hit(tlb_addr, addr))) { 1700235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1701235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 1702235537faSAlex Bennée CPUIOTLBEntry *iotlbentry; 1703235537faSAlex Bennée iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1704235537faSAlex Bennée data->is_io = true; 1705235537faSAlex Bennée data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 1706235537faSAlex Bennée data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1707235537faSAlex Bennée } else { 1708235537faSAlex Bennée data->is_io = false; 1709235537faSAlex Bennée data->v.ram.hostaddr = addr + tlbe->addend; 1710235537faSAlex Bennée } 1711235537faSAlex Bennée return true; 17122f3a57eeSAlex Bennée } else { 17132f3a57eeSAlex Bennée SavedIOTLB *saved = &cpu->saved_iotlb; 17142f3a57eeSAlex Bennée data->is_io = true; 17152f3a57eeSAlex Bennée data->v.io.section = saved->section; 17162f3a57eeSAlex Bennée data->v.io.offset = saved->mr_offset; 17172f3a57eeSAlex Bennée return true; 1718235537faSAlex Bennée } 1719235537faSAlex Bennée } 1720235537faSAlex Bennée 1721235537faSAlex Bennée #endif 1722235537faSAlex Bennée 1723d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 1724d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 1725d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 1726707526adSRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1727d9bb58e5SYang Zhong { 1728d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 1729383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1730383beda9SRichard Henderson CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1731403f290cSEmilio G. Cota target_ulong tlb_addr = tlb_addr_write(tlbe); 173214776ab5STony Nguyen MemOp mop = get_memop(oi); 1733d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 1734d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 173534d49937SPeter Maydell void *hostaddr; 1736d9bb58e5SYang Zhong 1737d9bb58e5SYang Zhong /* Adjust the given return address. */ 1738d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1739d9bb58e5SYang Zhong 1740d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1741d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1742d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 174329a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1744d9bb58e5SYang Zhong mmu_idx, retaddr); 1745d9bb58e5SYang Zhong } 1746d9bb58e5SYang Zhong 1747d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 1748d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 1749d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1750d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1751d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1752d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1753d9bb58e5SYang Zhong goto stop_the_world; 1754d9bb58e5SYang Zhong } 1755d9bb58e5SYang Zhong 1756d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 1757334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1758d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 175929a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, 176098670d47SLaurent Vivier mmu_idx, retaddr); 17616d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 17626d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1763d9bb58e5SYang Zhong } 1764403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1765d9bb58e5SYang Zhong } 1766d9bb58e5SYang Zhong 176755df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 176830d7e098SRichard Henderson if (unlikely(tlb_addr & TLB_MMIO)) { 1769d9bb58e5SYang Zhong /* There's really nothing that can be done to 1770d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1771d9bb58e5SYang Zhong goto stop_the_world; 1772d9bb58e5SYang Zhong } 1773d9bb58e5SYang Zhong 1774d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 177534d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 177629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, 177798670d47SLaurent Vivier mmu_idx, retaddr); 1778d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 1779d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 1780d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 1781d9bb58e5SYang Zhong goto stop_the_world; 1782d9bb58e5SYang Zhong } 1783d9bb58e5SYang Zhong 178434d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 178534d49937SPeter Maydell 178634d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1787707526adSRichard Henderson notdirty_write(env_cpu(env), addr, 1 << s_bits, 1788707526adSRichard Henderson &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); 178934d49937SPeter Maydell } 179034d49937SPeter Maydell 179134d49937SPeter Maydell return hostaddr; 1792d9bb58e5SYang Zhong 1793d9bb58e5SYang Zhong stop_the_world: 179429a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1795d9bb58e5SYang Zhong } 1796d9bb58e5SYang Zhong 1797eed56642SAlex Bennée /* 1798eed56642SAlex Bennée * Load Helpers 1799eed56642SAlex Bennée * 1800eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1801eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1802eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1803eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1804eed56642SAlex Bennée */ 1805d9bb58e5SYang Zhong 18062dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 18072dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr); 18082dd92606SRichard Henderson 1809c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 181080d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op) 181180d9d1c6SRichard Henderson { 181280d9d1c6SRichard Henderson switch (op) { 181380d9d1c6SRichard Henderson case MO_UB: 181480d9d1c6SRichard Henderson return ldub_p(haddr); 181580d9d1c6SRichard Henderson case MO_BEUW: 181680d9d1c6SRichard Henderson return lduw_be_p(haddr); 181780d9d1c6SRichard Henderson case MO_LEUW: 181880d9d1c6SRichard Henderson return lduw_le_p(haddr); 181980d9d1c6SRichard Henderson case MO_BEUL: 182080d9d1c6SRichard Henderson return (uint32_t)ldl_be_p(haddr); 182180d9d1c6SRichard Henderson case MO_LEUL: 182280d9d1c6SRichard Henderson return (uint32_t)ldl_le_p(haddr); 182380d9d1c6SRichard Henderson case MO_BEQ: 182480d9d1c6SRichard Henderson return ldq_be_p(haddr); 182580d9d1c6SRichard Henderson case MO_LEQ: 182680d9d1c6SRichard Henderson return ldq_le_p(haddr); 182780d9d1c6SRichard Henderson default: 182880d9d1c6SRichard Henderson qemu_build_not_reached(); 182980d9d1c6SRichard Henderson } 183080d9d1c6SRichard Henderson } 183180d9d1c6SRichard Henderson 183280d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 18332dd92606SRichard Henderson load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, 1834be5c4787STony Nguyen uintptr_t retaddr, MemOp op, bool code_read, 18352dd92606SRichard Henderson FullLoadHelper *full_load) 1836eed56642SAlex Bennée { 1837eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1838eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1839eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1840eed56642SAlex Bennée target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1841eed56642SAlex Bennée const size_t tlb_off = code_read ? 1842eed56642SAlex Bennée offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1843f1be3696SRichard Henderson const MMUAccessType access_type = 1844f1be3696SRichard Henderson code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1845eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1846eed56642SAlex Bennée void *haddr; 1847eed56642SAlex Bennée uint64_t res; 1848be5c4787STony Nguyen size_t size = memop_size(op); 1849d9bb58e5SYang Zhong 1850eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1851eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 185229a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, access_type, 1853eed56642SAlex Bennée mmu_idx, retaddr); 1854eed56642SAlex Bennée } 1855eed56642SAlex Bennée 1856eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1857eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1858eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1859eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 186029a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, 1861f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1862eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1863eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1864eed56642SAlex Bennée } 1865eed56642SAlex Bennée tlb_addr = code_read ? entry->addr_code : entry->addr_read; 186630d7e098SRichard Henderson tlb_addr &= ~TLB_INVALID_MASK; 1867eed56642SAlex Bennée } 1868eed56642SAlex Bennée 186950b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1870eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 187150b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 18725b87b3e6SRichard Henderson bool need_swap; 187350b107c5SRichard Henderson 187450b107c5SRichard Henderson /* For anything that is unaligned, recurse through full_load. */ 1875eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1876eed56642SAlex Bennée goto do_unaligned_access; 1877eed56642SAlex Bennée } 187850b107c5SRichard Henderson 187950b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 188050b107c5SRichard Henderson 188150b107c5SRichard Henderson /* Handle watchpoints. */ 188250b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 188350b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 188450b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 188550b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_READ, retaddr); 18865b87b3e6SRichard Henderson } 188750b107c5SRichard Henderson 18885b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 188950b107c5SRichard Henderson 189050b107c5SRichard Henderson /* Handle I/O access. */ 18915b87b3e6SRichard Henderson if (likely(tlb_addr & TLB_MMIO)) { 18925b87b3e6SRichard Henderson return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, 18935b87b3e6SRichard Henderson access_type, op ^ (need_swap * MO_BSWAP)); 18945b87b3e6SRichard Henderson } 18955b87b3e6SRichard Henderson 18965b87b3e6SRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 18975b87b3e6SRichard Henderson 18985b87b3e6SRichard Henderson /* 18995b87b3e6SRichard Henderson * Keep these two load_memop separate to ensure that the compiler 19005b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 19015b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 19025b87b3e6SRichard Henderson */ 19035b87b3e6SRichard Henderson if (unlikely(need_swap)) { 19045b87b3e6SRichard Henderson return load_memop(haddr, op ^ MO_BSWAP); 19055b87b3e6SRichard Henderson } 19065b87b3e6SRichard Henderson return load_memop(haddr, op); 1907eed56642SAlex Bennée } 1908eed56642SAlex Bennée 1909eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1910eed56642SAlex Bennée if (size > 1 1911eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1912eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1913eed56642SAlex Bennée target_ulong addr1, addr2; 19148c79b288SAlex Bennée uint64_t r1, r2; 1915eed56642SAlex Bennée unsigned shift; 1916eed56642SAlex Bennée do_unaligned_access: 1917ab7a2009SAlex Bennée addr1 = addr & ~((target_ulong)size - 1); 1918eed56642SAlex Bennée addr2 = addr1 + size; 19192dd92606SRichard Henderson r1 = full_load(env, addr1, oi, retaddr); 19202dd92606SRichard Henderson r2 = full_load(env, addr2, oi, retaddr); 1921eed56642SAlex Bennée shift = (addr & (size - 1)) * 8; 1922eed56642SAlex Bennée 1923be5c4787STony Nguyen if (memop_big_endian(op)) { 1924eed56642SAlex Bennée /* Big-endian combine. */ 1925eed56642SAlex Bennée res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1926eed56642SAlex Bennée } else { 1927eed56642SAlex Bennée /* Little-endian combine. */ 1928eed56642SAlex Bennée res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 1929eed56642SAlex Bennée } 1930eed56642SAlex Bennée return res & MAKE_64BIT_MASK(0, size * 8); 1931eed56642SAlex Bennée } 1932eed56642SAlex Bennée 1933eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 193480d9d1c6SRichard Henderson return load_memop(haddr, op); 1935eed56642SAlex Bennée } 1936eed56642SAlex Bennée 1937eed56642SAlex Bennée /* 1938eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1939eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1940eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1941eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1942eed56642SAlex Bennée * data, and for that we always have uint64_t. 1943eed56642SAlex Bennée * 1944eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1945eed56642SAlex Bennée */ 1946eed56642SAlex Bennée 19472dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 19482dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 19492dd92606SRichard Henderson { 1950be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); 19512dd92606SRichard Henderson } 19522dd92606SRichard Henderson 1953fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 1954fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1955eed56642SAlex Bennée { 19562dd92606SRichard Henderson return full_ldub_mmu(env, addr, oi, retaddr); 19572dd92606SRichard Henderson } 19582dd92606SRichard Henderson 19592dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 19602dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 19612dd92606SRichard Henderson { 1962be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, false, 19632dd92606SRichard Henderson full_le_lduw_mmu); 1964eed56642SAlex Bennée } 1965eed56642SAlex Bennée 1966fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 1967fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1968eed56642SAlex Bennée { 19692dd92606SRichard Henderson return full_le_lduw_mmu(env, addr, oi, retaddr); 19702dd92606SRichard Henderson } 19712dd92606SRichard Henderson 19722dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 19732dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 19742dd92606SRichard Henderson { 1975be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, false, 19762dd92606SRichard Henderson full_be_lduw_mmu); 1977eed56642SAlex Bennée } 1978eed56642SAlex Bennée 1979fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 1980fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1981eed56642SAlex Bennée { 19822dd92606SRichard Henderson return full_be_lduw_mmu(env, addr, oi, retaddr); 19832dd92606SRichard Henderson } 19842dd92606SRichard Henderson 19852dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 19862dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 19872dd92606SRichard Henderson { 1988be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, false, 19892dd92606SRichard Henderson full_le_ldul_mmu); 1990eed56642SAlex Bennée } 1991eed56642SAlex Bennée 1992fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 1993fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1994eed56642SAlex Bennée { 19952dd92606SRichard Henderson return full_le_ldul_mmu(env, addr, oi, retaddr); 19962dd92606SRichard Henderson } 19972dd92606SRichard Henderson 19982dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 19992dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 20002dd92606SRichard Henderson { 2001be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, false, 20022dd92606SRichard Henderson full_be_ldul_mmu); 2003eed56642SAlex Bennée } 2004eed56642SAlex Bennée 2005fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 2006fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2007eed56642SAlex Bennée { 20082dd92606SRichard Henderson return full_be_ldul_mmu(env, addr, oi, retaddr); 2009eed56642SAlex Bennée } 2010eed56642SAlex Bennée 2011fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 2012fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2013eed56642SAlex Bennée { 2014be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEQ, false, 20152dd92606SRichard Henderson helper_le_ldq_mmu); 2016eed56642SAlex Bennée } 2017eed56642SAlex Bennée 2018fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 2019fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2020eed56642SAlex Bennée { 2021be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEQ, false, 20222dd92606SRichard Henderson helper_be_ldq_mmu); 2023eed56642SAlex Bennée } 2024eed56642SAlex Bennée 2025eed56642SAlex Bennée /* 2026eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 2027eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 2028eed56642SAlex Bennée */ 2029eed56642SAlex Bennée 2030eed56642SAlex Bennée 2031eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 2032eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2033eed56642SAlex Bennée { 2034eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 2035eed56642SAlex Bennée } 2036eed56642SAlex Bennée 2037eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 2038eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2039eed56642SAlex Bennée { 2040eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 2041eed56642SAlex Bennée } 2042eed56642SAlex Bennée 2043eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 2044eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2045eed56642SAlex Bennée { 2046eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 2047eed56642SAlex Bennée } 2048eed56642SAlex Bennée 2049eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 2050eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2051eed56642SAlex Bennée { 2052eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 2053eed56642SAlex Bennée } 2054eed56642SAlex Bennée 2055eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 2056eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2057eed56642SAlex Bennée { 2058eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 2059eed56642SAlex Bennée } 2060eed56642SAlex Bennée 2061eed56642SAlex Bennée /* 2062d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 2063d03f1408SRichard Henderson */ 2064d03f1408SRichard Henderson 2065d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, 2066d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, 2067d03f1408SRichard Henderson MemOp op, FullLoadHelper *full_load) 2068d03f1408SRichard Henderson { 2069d03f1408SRichard Henderson uint16_t meminfo; 2070d03f1408SRichard Henderson TCGMemOpIdx oi; 2071d03f1408SRichard Henderson uint64_t ret; 2072d03f1408SRichard Henderson 2073d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, false); 2074d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 2075d03f1408SRichard Henderson 2076d03f1408SRichard Henderson op &= ~MO_SIGN; 2077d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 2078d03f1408SRichard Henderson ret = full_load(env, addr, oi, retaddr); 2079d03f1408SRichard Henderson 2080d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 2081d03f1408SRichard Henderson 2082d03f1408SRichard Henderson return ret; 2083d03f1408SRichard Henderson } 2084d03f1408SRichard Henderson 2085d03f1408SRichard Henderson uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2086d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2087d03f1408SRichard Henderson { 2088d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); 2089d03f1408SRichard Henderson } 2090d03f1408SRichard Henderson 2091d03f1408SRichard Henderson int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2092d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2093d03f1408SRichard Henderson { 2094d03f1408SRichard Henderson return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB, 2095d03f1408SRichard Henderson full_ldub_mmu); 2096d03f1408SRichard Henderson } 2097d03f1408SRichard Henderson 2098b9e60257SRichard Henderson uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2099d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2100d03f1408SRichard Henderson { 2101b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu); 2102d03f1408SRichard Henderson } 2103d03f1408SRichard Henderson 2104b9e60257SRichard Henderson int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2105d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2106d03f1408SRichard Henderson { 2107b9e60257SRichard Henderson return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW, 2108b9e60257SRichard Henderson full_be_lduw_mmu); 2109d03f1408SRichard Henderson } 2110d03f1408SRichard Henderson 2111b9e60257SRichard Henderson uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2112d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2113d03f1408SRichard Henderson { 2114b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu); 2115d03f1408SRichard Henderson } 2116d03f1408SRichard Henderson 2117b9e60257SRichard Henderson uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2118d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2119d03f1408SRichard Henderson { 2120b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu); 2121b9e60257SRichard Henderson } 2122b9e60257SRichard Henderson 2123b9e60257SRichard Henderson uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2124b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2125b9e60257SRichard Henderson { 2126b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu); 2127b9e60257SRichard Henderson } 2128b9e60257SRichard Henderson 2129b9e60257SRichard Henderson int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2130b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2131b9e60257SRichard Henderson { 2132b9e60257SRichard Henderson return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW, 2133b9e60257SRichard Henderson full_le_lduw_mmu); 2134b9e60257SRichard Henderson } 2135b9e60257SRichard Henderson 2136b9e60257SRichard Henderson uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2137b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2138b9e60257SRichard Henderson { 2139b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu); 2140b9e60257SRichard Henderson } 2141b9e60257SRichard Henderson 2142b9e60257SRichard Henderson uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2143b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2144b9e60257SRichard Henderson { 2145b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu); 2146d03f1408SRichard Henderson } 2147d03f1408SRichard Henderson 2148cfe04a4bSRichard Henderson uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr, 2149cfe04a4bSRichard Henderson uintptr_t retaddr) 2150cfe04a4bSRichard Henderson { 2151cfe04a4bSRichard Henderson return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2152cfe04a4bSRichard Henderson } 2153cfe04a4bSRichard Henderson 2154cfe04a4bSRichard Henderson int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 2155cfe04a4bSRichard Henderson { 2156cfe04a4bSRichard Henderson return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2157cfe04a4bSRichard Henderson } 2158cfe04a4bSRichard Henderson 2159b9e60257SRichard Henderson uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr, 2160cfe04a4bSRichard Henderson uintptr_t retaddr) 2161cfe04a4bSRichard Henderson { 2162b9e60257SRichard Henderson return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2163cfe04a4bSRichard Henderson } 2164cfe04a4bSRichard Henderson 2165b9e60257SRichard Henderson int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 2166cfe04a4bSRichard Henderson { 2167b9e60257SRichard Henderson return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2168cfe04a4bSRichard Henderson } 2169cfe04a4bSRichard Henderson 2170b9e60257SRichard Henderson uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr, 2171b9e60257SRichard Henderson uintptr_t retaddr) 2172cfe04a4bSRichard Henderson { 2173b9e60257SRichard Henderson return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2174cfe04a4bSRichard Henderson } 2175cfe04a4bSRichard Henderson 2176b9e60257SRichard Henderson uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr, 2177b9e60257SRichard Henderson uintptr_t retaddr) 2178cfe04a4bSRichard Henderson { 2179b9e60257SRichard Henderson return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2180b9e60257SRichard Henderson } 2181b9e60257SRichard Henderson 2182b9e60257SRichard Henderson uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr, 2183b9e60257SRichard Henderson uintptr_t retaddr) 2184b9e60257SRichard Henderson { 2185b9e60257SRichard Henderson return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2186b9e60257SRichard Henderson } 2187b9e60257SRichard Henderson 2188b9e60257SRichard Henderson int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 2189b9e60257SRichard Henderson { 2190b9e60257SRichard Henderson return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2191b9e60257SRichard Henderson } 2192b9e60257SRichard Henderson 2193b9e60257SRichard Henderson uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr, 2194b9e60257SRichard Henderson uintptr_t retaddr) 2195b9e60257SRichard Henderson { 2196b9e60257SRichard Henderson return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2197b9e60257SRichard Henderson } 2198b9e60257SRichard Henderson 2199b9e60257SRichard Henderson uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr, 2200b9e60257SRichard Henderson uintptr_t retaddr) 2201b9e60257SRichard Henderson { 2202b9e60257SRichard Henderson return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2203cfe04a4bSRichard Henderson } 2204cfe04a4bSRichard Henderson 2205cfe04a4bSRichard Henderson uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr) 2206cfe04a4bSRichard Henderson { 2207cfe04a4bSRichard Henderson return cpu_ldub_data_ra(env, ptr, 0); 2208cfe04a4bSRichard Henderson } 2209cfe04a4bSRichard Henderson 2210cfe04a4bSRichard Henderson int cpu_ldsb_data(CPUArchState *env, target_ulong ptr) 2211cfe04a4bSRichard Henderson { 2212cfe04a4bSRichard Henderson return cpu_ldsb_data_ra(env, ptr, 0); 2213cfe04a4bSRichard Henderson } 2214cfe04a4bSRichard Henderson 2215b9e60257SRichard Henderson uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr) 2216cfe04a4bSRichard Henderson { 2217b9e60257SRichard Henderson return cpu_lduw_be_data_ra(env, ptr, 0); 2218cfe04a4bSRichard Henderson } 2219cfe04a4bSRichard Henderson 2220b9e60257SRichard Henderson int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr) 2221cfe04a4bSRichard Henderson { 2222b9e60257SRichard Henderson return cpu_ldsw_be_data_ra(env, ptr, 0); 2223cfe04a4bSRichard Henderson } 2224cfe04a4bSRichard Henderson 2225b9e60257SRichard Henderson uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr) 2226cfe04a4bSRichard Henderson { 2227b9e60257SRichard Henderson return cpu_ldl_be_data_ra(env, ptr, 0); 2228cfe04a4bSRichard Henderson } 2229cfe04a4bSRichard Henderson 2230b9e60257SRichard Henderson uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr) 2231cfe04a4bSRichard Henderson { 2232b9e60257SRichard Henderson return cpu_ldq_be_data_ra(env, ptr, 0); 2233b9e60257SRichard Henderson } 2234b9e60257SRichard Henderson 2235b9e60257SRichard Henderson uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr) 2236b9e60257SRichard Henderson { 2237b9e60257SRichard Henderson return cpu_lduw_le_data_ra(env, ptr, 0); 2238b9e60257SRichard Henderson } 2239b9e60257SRichard Henderson 2240b9e60257SRichard Henderson int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr) 2241b9e60257SRichard Henderson { 2242b9e60257SRichard Henderson return cpu_ldsw_le_data_ra(env, ptr, 0); 2243b9e60257SRichard Henderson } 2244b9e60257SRichard Henderson 2245b9e60257SRichard Henderson uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr) 2246b9e60257SRichard Henderson { 2247b9e60257SRichard Henderson return cpu_ldl_le_data_ra(env, ptr, 0); 2248b9e60257SRichard Henderson } 2249b9e60257SRichard Henderson 2250b9e60257SRichard Henderson uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr) 2251b9e60257SRichard Henderson { 2252b9e60257SRichard Henderson return cpu_ldq_le_data_ra(env, ptr, 0); 2253cfe04a4bSRichard Henderson } 2254cfe04a4bSRichard Henderson 2255d03f1408SRichard Henderson /* 2256eed56642SAlex Bennée * Store Helpers 2257eed56642SAlex Bennée */ 2258eed56642SAlex Bennée 2259c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE 226080d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op) 226180d9d1c6SRichard Henderson { 226280d9d1c6SRichard Henderson switch (op) { 226380d9d1c6SRichard Henderson case MO_UB: 226480d9d1c6SRichard Henderson stb_p(haddr, val); 226580d9d1c6SRichard Henderson break; 226680d9d1c6SRichard Henderson case MO_BEUW: 226780d9d1c6SRichard Henderson stw_be_p(haddr, val); 226880d9d1c6SRichard Henderson break; 226980d9d1c6SRichard Henderson case MO_LEUW: 227080d9d1c6SRichard Henderson stw_le_p(haddr, val); 227180d9d1c6SRichard Henderson break; 227280d9d1c6SRichard Henderson case MO_BEUL: 227380d9d1c6SRichard Henderson stl_be_p(haddr, val); 227480d9d1c6SRichard Henderson break; 227580d9d1c6SRichard Henderson case MO_LEUL: 227680d9d1c6SRichard Henderson stl_le_p(haddr, val); 227780d9d1c6SRichard Henderson break; 227880d9d1c6SRichard Henderson case MO_BEQ: 227980d9d1c6SRichard Henderson stq_be_p(haddr, val); 228080d9d1c6SRichard Henderson break; 228180d9d1c6SRichard Henderson case MO_LEQ: 228280d9d1c6SRichard Henderson stq_le_p(haddr, val); 228380d9d1c6SRichard Henderson break; 228480d9d1c6SRichard Henderson default: 228580d9d1c6SRichard Henderson qemu_build_not_reached(); 228680d9d1c6SRichard Henderson } 228780d9d1c6SRichard Henderson } 228880d9d1c6SRichard Henderson 22896b8b622eSRichard Henderson static void __attribute__((noinline)) 22906b8b622eSRichard Henderson store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, 22916b8b622eSRichard Henderson uintptr_t retaddr, size_t size, uintptr_t mmu_idx, 22926b8b622eSRichard Henderson bool big_endian) 22936b8b622eSRichard Henderson { 22946b8b622eSRichard Henderson const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 22956b8b622eSRichard Henderson uintptr_t index, index2; 22966b8b622eSRichard Henderson CPUTLBEntry *entry, *entry2; 22976b8b622eSRichard Henderson target_ulong page2, tlb_addr, tlb_addr2; 22986b8b622eSRichard Henderson TCGMemOpIdx oi; 22996b8b622eSRichard Henderson size_t size2; 23006b8b622eSRichard Henderson int i; 23016b8b622eSRichard Henderson 23026b8b622eSRichard Henderson /* 23036b8b622eSRichard Henderson * Ensure the second page is in the TLB. Note that the first page 23046b8b622eSRichard Henderson * is already guaranteed to be filled, and that the second page 23056b8b622eSRichard Henderson * cannot evict the first. 23066b8b622eSRichard Henderson */ 23076b8b622eSRichard Henderson page2 = (addr + size) & TARGET_PAGE_MASK; 23086b8b622eSRichard Henderson size2 = (addr + size) & ~TARGET_PAGE_MASK; 23096b8b622eSRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 23106b8b622eSRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 23116b8b622eSRichard Henderson 23126b8b622eSRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 23136b8b622eSRichard Henderson if (!tlb_hit_page(tlb_addr2, page2)) { 23146b8b622eSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { 23156b8b622eSRichard Henderson tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, 23166b8b622eSRichard Henderson mmu_idx, retaddr); 23176b8b622eSRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 23186b8b622eSRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 23196b8b622eSRichard Henderson } 23206b8b622eSRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 23216b8b622eSRichard Henderson } 23226b8b622eSRichard Henderson 23236b8b622eSRichard Henderson index = tlb_index(env, mmu_idx, addr); 23246b8b622eSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 23256b8b622eSRichard Henderson tlb_addr = tlb_addr_write(entry); 23266b8b622eSRichard Henderson 23276b8b622eSRichard Henderson /* 23286b8b622eSRichard Henderson * Handle watchpoints. Since this may trap, all checks 23296b8b622eSRichard Henderson * must happen before any store. 23306b8b622eSRichard Henderson */ 23316b8b622eSRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 23326b8b622eSRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size - size2, 23336b8b622eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index].attrs, 23346b8b622eSRichard Henderson BP_MEM_WRITE, retaddr); 23356b8b622eSRichard Henderson } 23366b8b622eSRichard Henderson if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { 23376b8b622eSRichard Henderson cpu_check_watchpoint(env_cpu(env), page2, size2, 23386b8b622eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, 23396b8b622eSRichard Henderson BP_MEM_WRITE, retaddr); 23406b8b622eSRichard Henderson } 23416b8b622eSRichard Henderson 23426b8b622eSRichard Henderson /* 23436b8b622eSRichard Henderson * XXX: not efficient, but simple. 23446b8b622eSRichard Henderson * This loop must go in the forward direction to avoid issues 23456b8b622eSRichard Henderson * with self-modifying code in Windows 64-bit. 23466b8b622eSRichard Henderson */ 23476b8b622eSRichard Henderson oi = make_memop_idx(MO_UB, mmu_idx); 23486b8b622eSRichard Henderson if (big_endian) { 23496b8b622eSRichard Henderson for (i = 0; i < size; ++i) { 23506b8b622eSRichard Henderson /* Big-endian extract. */ 23516b8b622eSRichard Henderson uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); 23526b8b622eSRichard Henderson helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); 23536b8b622eSRichard Henderson } 23546b8b622eSRichard Henderson } else { 23556b8b622eSRichard Henderson for (i = 0; i < size; ++i) { 23566b8b622eSRichard Henderson /* Little-endian extract. */ 23576b8b622eSRichard Henderson uint8_t val8 = val >> (i * 8); 23586b8b622eSRichard Henderson helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); 23596b8b622eSRichard Henderson } 23606b8b622eSRichard Henderson } 23616b8b622eSRichard Henderson } 23626b8b622eSRichard Henderson 236380d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE 23644601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 2365be5c4787STony Nguyen TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) 2366eed56642SAlex Bennée { 2367eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 2368eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 2369eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 2370eed56642SAlex Bennée target_ulong tlb_addr = tlb_addr_write(entry); 2371eed56642SAlex Bennée const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 2372eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 2373eed56642SAlex Bennée void *haddr; 2374be5c4787STony Nguyen size_t size = memop_size(op); 2375eed56642SAlex Bennée 2376eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 2377eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 237829a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2379eed56642SAlex Bennée mmu_idx, retaddr); 2380eed56642SAlex Bennée } 2381eed56642SAlex Bennée 2382eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 2383eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 2384eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 2385eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 238629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 2387eed56642SAlex Bennée mmu_idx, retaddr); 2388eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 2389eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 2390eed56642SAlex Bennée } 2391eed56642SAlex Bennée tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 2392eed56642SAlex Bennée } 2393eed56642SAlex Bennée 239450b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 2395eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 239650b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 23975b87b3e6SRichard Henderson bool need_swap; 239850b107c5SRichard Henderson 239950b107c5SRichard Henderson /* For anything that is unaligned, recurse through byte stores. */ 2400eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 2401eed56642SAlex Bennée goto do_unaligned_access; 2402eed56642SAlex Bennée } 240350b107c5SRichard Henderson 240450b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 240550b107c5SRichard Henderson 240650b107c5SRichard Henderson /* Handle watchpoints. */ 240750b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 240850b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 240950b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 241050b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_WRITE, retaddr); 24115b87b3e6SRichard Henderson } 241250b107c5SRichard Henderson 24135b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 241450b107c5SRichard Henderson 241550b107c5SRichard Henderson /* Handle I/O access. */ 241608565552SRichard Henderson if (tlb_addr & TLB_MMIO) { 24175b87b3e6SRichard Henderson io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, 24185b87b3e6SRichard Henderson op ^ (need_swap * MO_BSWAP)); 24195b87b3e6SRichard Henderson return; 24205b87b3e6SRichard Henderson } 24215b87b3e6SRichard Henderson 24227b0d792cSRichard Henderson /* Ignore writes to ROM. */ 24237b0d792cSRichard Henderson if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { 24247b0d792cSRichard Henderson return; 24257b0d792cSRichard Henderson } 24267b0d792cSRichard Henderson 242708565552SRichard Henderson /* Handle clean RAM pages. */ 242808565552SRichard Henderson if (tlb_addr & TLB_NOTDIRTY) { 2429707526adSRichard Henderson notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); 243008565552SRichard Henderson } 243108565552SRichard Henderson 2432707526adSRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 243308565552SRichard Henderson 24345b87b3e6SRichard Henderson /* 24355b87b3e6SRichard Henderson * Keep these two store_memop separate to ensure that the compiler 24365b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 24375b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 24385b87b3e6SRichard Henderson */ 24395b87b3e6SRichard Henderson if (unlikely(need_swap)) { 24405b87b3e6SRichard Henderson store_memop(haddr, val, op ^ MO_BSWAP); 24415b87b3e6SRichard Henderson } else { 24425b87b3e6SRichard Henderson store_memop(haddr, val, op); 24435b87b3e6SRichard Henderson } 2444eed56642SAlex Bennée return; 2445eed56642SAlex Bennée } 2446eed56642SAlex Bennée 2447eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 2448eed56642SAlex Bennée if (size > 1 2449eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 2450eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 2451eed56642SAlex Bennée do_unaligned_access: 24526b8b622eSRichard Henderson store_helper_unaligned(env, addr, val, retaddr, size, 24536b8b622eSRichard Henderson mmu_idx, memop_big_endian(op)); 2454eed56642SAlex Bennée return; 2455eed56642SAlex Bennée } 2456eed56642SAlex Bennée 2457eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 245880d9d1c6SRichard Henderson store_memop(haddr, val, op); 2459eed56642SAlex Bennée } 2460eed56642SAlex Bennée 24616b8b622eSRichard Henderson void __attribute__((noinline)) 24626b8b622eSRichard Henderson helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2463eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2464eed56642SAlex Bennée { 2465be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_UB); 2466eed56642SAlex Bennée } 2467eed56642SAlex Bennée 2468fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2469eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2470eed56642SAlex Bennée { 2471be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUW); 2472eed56642SAlex Bennée } 2473eed56642SAlex Bennée 2474fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2475eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2476eed56642SAlex Bennée { 2477be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUW); 2478eed56642SAlex Bennée } 2479eed56642SAlex Bennée 2480fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2481eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2482eed56642SAlex Bennée { 2483be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUL); 2484eed56642SAlex Bennée } 2485eed56642SAlex Bennée 2486fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2487eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2488eed56642SAlex Bennée { 2489be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUL); 2490eed56642SAlex Bennée } 2491eed56642SAlex Bennée 2492fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2493eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2494eed56642SAlex Bennée { 2495be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEQ); 2496eed56642SAlex Bennée } 2497eed56642SAlex Bennée 2498fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2499eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2500eed56642SAlex Bennée { 2501be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEQ); 2502eed56642SAlex Bennée } 2503d9bb58e5SYang Zhong 2504d03f1408SRichard Henderson /* 2505d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 2506d03f1408SRichard Henderson */ 2507d03f1408SRichard Henderson 2508d03f1408SRichard Henderson static inline void QEMU_ALWAYS_INLINE 2509d03f1408SRichard Henderson cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 2510d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, MemOp op) 2511d03f1408SRichard Henderson { 2512d03f1408SRichard Henderson TCGMemOpIdx oi; 2513d03f1408SRichard Henderson uint16_t meminfo; 2514d03f1408SRichard Henderson 2515d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, true); 2516d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 2517d03f1408SRichard Henderson 2518d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 2519d03f1408SRichard Henderson store_helper(env, addr, val, oi, retaddr, op); 2520d03f1408SRichard Henderson 2521d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 2522d03f1408SRichard Henderson } 2523d03f1408SRichard Henderson 2524d03f1408SRichard Henderson void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2525d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2526d03f1408SRichard Henderson { 2527d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); 2528d03f1408SRichard Henderson } 2529d03f1408SRichard Henderson 2530b9e60257SRichard Henderson void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2531d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2532d03f1408SRichard Henderson { 2533b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW); 2534d03f1408SRichard Henderson } 2535d03f1408SRichard Henderson 2536b9e60257SRichard Henderson void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2537d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2538d03f1408SRichard Henderson { 2539b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL); 2540d03f1408SRichard Henderson } 2541d03f1408SRichard Henderson 2542b9e60257SRichard Henderson void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, 2543d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2544d03f1408SRichard Henderson { 2545b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ); 2546b9e60257SRichard Henderson } 2547b9e60257SRichard Henderson 2548b9e60257SRichard Henderson void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2549b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2550b9e60257SRichard Henderson { 2551b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW); 2552b9e60257SRichard Henderson } 2553b9e60257SRichard Henderson 2554b9e60257SRichard Henderson void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2555b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2556b9e60257SRichard Henderson { 2557b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL); 2558b9e60257SRichard Henderson } 2559b9e60257SRichard Henderson 2560b9e60257SRichard Henderson void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, 2561b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2562b9e60257SRichard Henderson { 2563b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ); 2564d03f1408SRichard Henderson } 2565d03f1408SRichard Henderson 2566cfe04a4bSRichard Henderson void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, 2567cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2568cfe04a4bSRichard Henderson { 2569cfe04a4bSRichard Henderson cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2570cfe04a4bSRichard Henderson } 2571cfe04a4bSRichard Henderson 2572b9e60257SRichard Henderson void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr, 2573cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2574cfe04a4bSRichard Henderson { 2575b9e60257SRichard Henderson cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2576cfe04a4bSRichard Henderson } 2577cfe04a4bSRichard Henderson 2578b9e60257SRichard Henderson void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr, 2579cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2580cfe04a4bSRichard Henderson { 2581b9e60257SRichard Henderson cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2582cfe04a4bSRichard Henderson } 2583cfe04a4bSRichard Henderson 2584b9e60257SRichard Henderson void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr, 2585cfe04a4bSRichard Henderson uint64_t val, uintptr_t retaddr) 2586cfe04a4bSRichard Henderson { 2587b9e60257SRichard Henderson cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2588b9e60257SRichard Henderson } 2589b9e60257SRichard Henderson 2590b9e60257SRichard Henderson void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr, 2591b9e60257SRichard Henderson uint32_t val, uintptr_t retaddr) 2592b9e60257SRichard Henderson { 2593b9e60257SRichard Henderson cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2594b9e60257SRichard Henderson } 2595b9e60257SRichard Henderson 2596b9e60257SRichard Henderson void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr, 2597b9e60257SRichard Henderson uint32_t val, uintptr_t retaddr) 2598b9e60257SRichard Henderson { 2599b9e60257SRichard Henderson cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2600b9e60257SRichard Henderson } 2601b9e60257SRichard Henderson 2602b9e60257SRichard Henderson void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr, 2603b9e60257SRichard Henderson uint64_t val, uintptr_t retaddr) 2604b9e60257SRichard Henderson { 2605b9e60257SRichard Henderson cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2606cfe04a4bSRichard Henderson } 2607cfe04a4bSRichard Henderson 2608cfe04a4bSRichard Henderson void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2609cfe04a4bSRichard Henderson { 2610cfe04a4bSRichard Henderson cpu_stb_data_ra(env, ptr, val, 0); 2611cfe04a4bSRichard Henderson } 2612cfe04a4bSRichard Henderson 2613b9e60257SRichard Henderson void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2614cfe04a4bSRichard Henderson { 2615b9e60257SRichard Henderson cpu_stw_be_data_ra(env, ptr, val, 0); 2616cfe04a4bSRichard Henderson } 2617cfe04a4bSRichard Henderson 2618b9e60257SRichard Henderson void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2619cfe04a4bSRichard Henderson { 2620b9e60257SRichard Henderson cpu_stl_be_data_ra(env, ptr, val, 0); 2621cfe04a4bSRichard Henderson } 2622cfe04a4bSRichard Henderson 2623b9e60257SRichard Henderson void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val) 2624cfe04a4bSRichard Henderson { 2625b9e60257SRichard Henderson cpu_stq_be_data_ra(env, ptr, val, 0); 2626b9e60257SRichard Henderson } 2627b9e60257SRichard Henderson 2628b9e60257SRichard Henderson void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2629b9e60257SRichard Henderson { 2630b9e60257SRichard Henderson cpu_stw_le_data_ra(env, ptr, val, 0); 2631b9e60257SRichard Henderson } 2632b9e60257SRichard Henderson 2633b9e60257SRichard Henderson void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2634b9e60257SRichard Henderson { 2635b9e60257SRichard Henderson cpu_stl_le_data_ra(env, ptr, val, 0); 2636b9e60257SRichard Henderson } 2637b9e60257SRichard Henderson 2638b9e60257SRichard Henderson void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) 2639b9e60257SRichard Henderson { 2640b9e60257SRichard Henderson cpu_stq_le_data_ra(env, ptr, val, 0); 2641cfe04a4bSRichard Henderson } 2642cfe04a4bSRichard Henderson 2643d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 2644d9bb58e5SYang Zhong them callable from other helpers. */ 2645d9bb58e5SYang Zhong 2646d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 2647d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 2648d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 2649707526adSRichard Henderson #define ATOMIC_MMU_DECLS 2650707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) 2651707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 2652504f73f7SAlex Bennée #define ATOMIC_MMU_IDX get_mmuidx(oi) 2653d9bb58e5SYang Zhong 2654139c1837SPaolo Bonzini #include "atomic_common.c.inc" 2655d9bb58e5SYang Zhong 2656d9bb58e5SYang Zhong #define DATA_SIZE 1 2657d9bb58e5SYang Zhong #include "atomic_template.h" 2658d9bb58e5SYang Zhong 2659d9bb58e5SYang Zhong #define DATA_SIZE 2 2660d9bb58e5SYang Zhong #include "atomic_template.h" 2661d9bb58e5SYang Zhong 2662d9bb58e5SYang Zhong #define DATA_SIZE 4 2663d9bb58e5SYang Zhong #include "atomic_template.h" 2664d9bb58e5SYang Zhong 2665d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2666d9bb58e5SYang Zhong #define DATA_SIZE 8 2667d9bb58e5SYang Zhong #include "atomic_template.h" 2668d9bb58e5SYang Zhong #endif 2669d9bb58e5SYang Zhong 2670e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 2671d9bb58e5SYang Zhong #define DATA_SIZE 16 2672d9bb58e5SYang Zhong #include "atomic_template.h" 2673d9bb58e5SYang Zhong #endif 2674d9bb58e5SYang Zhong 2675d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 2676d9bb58e5SYang Zhong 2677d9bb58e5SYang Zhong #undef EXTRA_ARGS 2678d9bb58e5SYang Zhong #undef ATOMIC_NAME 2679d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 2680d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 2681d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 2682707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) 2683d9bb58e5SYang Zhong 2684d9bb58e5SYang Zhong #define DATA_SIZE 1 2685d9bb58e5SYang Zhong #include "atomic_template.h" 2686d9bb58e5SYang Zhong 2687d9bb58e5SYang Zhong #define DATA_SIZE 2 2688d9bb58e5SYang Zhong #include "atomic_template.h" 2689d9bb58e5SYang Zhong 2690d9bb58e5SYang Zhong #define DATA_SIZE 4 2691d9bb58e5SYang Zhong #include "atomic_template.h" 2692d9bb58e5SYang Zhong 2693d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2694d9bb58e5SYang Zhong #define DATA_SIZE 8 2695d9bb58e5SYang Zhong #include "atomic_template.h" 2696d9bb58e5SYang Zhong #endif 2697504f73f7SAlex Bennée #undef ATOMIC_MMU_IDX 2698d9bb58e5SYang Zhong 2699d9bb58e5SYang Zhong /* Code access functions. */ 2700d9bb58e5SYang Zhong 2701fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, 27022dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 27032dd92606SRichard Henderson { 2704fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); 27052dd92606SRichard Henderson } 27062dd92606SRichard Henderson 2707fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 2708eed56642SAlex Bennée { 2709fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 2710fc4120a3SRichard Henderson return full_ldub_code(env, addr, oi, 0); 27112dd92606SRichard Henderson } 27122dd92606SRichard Henderson 2713fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, 27144cef72d0SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 27154cef72d0SAlex Bennée { 2716fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); 27174cef72d0SAlex Bennée } 27184cef72d0SAlex Bennée 2719fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 27202dd92606SRichard Henderson { 2721fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 2722fc4120a3SRichard Henderson return full_lduw_code(env, addr, oi, 0); 2723eed56642SAlex Bennée } 2724d9bb58e5SYang Zhong 2725fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, 2726fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2727eed56642SAlex Bennée { 2728fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); 27292dd92606SRichard Henderson } 27302dd92606SRichard Henderson 2731fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 27324cef72d0SAlex Bennée { 2733fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 2734fc4120a3SRichard Henderson return full_ldl_code(env, addr, oi, 0); 27354cef72d0SAlex Bennée } 27364cef72d0SAlex Bennée 2737fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, 27382dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 27392dd92606SRichard Henderson { 2740fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); 2741eed56642SAlex Bennée } 2742d9bb58e5SYang Zhong 2743fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 2744eed56642SAlex Bennée { 2745fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); 2746fc4120a3SRichard Henderson return full_ldq_code(env, addr, oi, 0); 2747eed56642SAlex Bennée } 2748