1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 26d9bb58e5SYang Zhong #include "exec/cputlb.h" 270f4abea8SRichard Henderson #include "exec/tb-hash.h" 28d9bb58e5SYang Zhong #include "exec/memory-internal.h" 29d9bb58e5SYang Zhong #include "exec/ram_addr.h" 30d9bb58e5SYang Zhong #include "tcg/tcg.h" 31d9bb58e5SYang Zhong #include "qemu/error-report.h" 32d9bb58e5SYang Zhong #include "exec/log.h" 33d9bb58e5SYang Zhong #include "exec/helper-proto.h" 34d9bb58e5SYang Zhong #include "qemu/atomic.h" 35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 363b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 37243af022SPaolo Bonzini #include "trace/trace-root.h" 38d03f1408SRichard Henderson #include "trace/mem.h" 3965269192SPhilippe Mathieu-Daudé #include "internal.h" 40235537faSAlex Bennée #ifdef CONFIG_PLUGIN 41235537faSAlex Bennée #include "qemu/plugin-memory.h" 42235537faSAlex Bennée #endif 43d9bb58e5SYang Zhong 44d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 45d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 46d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 47d9bb58e5SYang Zhong 48d9bb58e5SYang Zhong #ifdef DEBUG_TLB 49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 50d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 51d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 52d9bb58e5SYang Zhong # else 53d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 54d9bb58e5SYang Zhong # endif 55d9bb58e5SYang Zhong #else 56d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 57d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 58d9bb58e5SYang Zhong #endif 59d9bb58e5SYang Zhong 60d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 61d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 62d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 63d9bb58e5SYang Zhong ## __VA_ARGS__); \ 64d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 65d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 66d9bb58e5SYang Zhong } \ 67d9bb58e5SYang Zhong } while (0) 68d9bb58e5SYang Zhong 69ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 70d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 71ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 72d9bb58e5SYang Zhong } \ 73d9bb58e5SYang Zhong } while (0) 74d9bb58e5SYang Zhong 75d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 76d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 77d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 78d9bb58e5SYang Zhong 79d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 80d9bb58e5SYang Zhong */ 81d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 82d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 83d9bb58e5SYang Zhong 84722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 857a1efe1bSRichard Henderson { 86722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 877a1efe1bSRichard Henderson } 887a1efe1bSRichard Henderson 89722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 9086e1eff8SEmilio G. Cota { 91722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9286e1eff8SEmilio G. Cota } 9386e1eff8SEmilio G. Cota 9479e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9586e1eff8SEmilio G. Cota size_t max_entries) 9686e1eff8SEmilio G. Cota { 9779e42085SRichard Henderson desc->window_begin_ns = ns; 9879e42085SRichard Henderson desc->window_max_entries = max_entries; 9986e1eff8SEmilio G. Cota } 10086e1eff8SEmilio G. Cota 1010f4abea8SRichard Henderson static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 1020f4abea8SRichard Henderson { 1030f4abea8SRichard Henderson unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); 1040f4abea8SRichard Henderson 1050f4abea8SRichard Henderson for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 1060f4abea8SRichard Henderson qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); 1070f4abea8SRichard Henderson } 1080f4abea8SRichard Henderson } 1090f4abea8SRichard Henderson 1100f4abea8SRichard Henderson static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) 1110f4abea8SRichard Henderson { 1120f4abea8SRichard Henderson /* Discard jump cache entries for any tb which might potentially 1130f4abea8SRichard Henderson overlap the flushed page. */ 1140f4abea8SRichard Henderson tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 1150f4abea8SRichard Henderson tb_jmp_cache_clear_page(cpu, addr); 1160f4abea8SRichard Henderson } 1170f4abea8SRichard Henderson 11886e1eff8SEmilio G. Cota /** 11986e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 12071ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 12171ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 12286e1eff8SEmilio G. Cota * 12386e1eff8SEmilio G. Cota * Called with tlb_lock_held. 12486e1eff8SEmilio G. Cota * 12586e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 12686e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 12786e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 12886e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 12986e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 13086e1eff8SEmilio G. Cota * the resize based on past observations. 13186e1eff8SEmilio G. Cota * 13286e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 13386e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 13486e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 13586e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 13686e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 13786e1eff8SEmilio G. Cota * performance. 13886e1eff8SEmilio G. Cota * 13986e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 14086e1eff8SEmilio G. Cota * 14186e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 14286e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 14386e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 14486e1eff8SEmilio G. Cota * probably be similar. 14586e1eff8SEmilio G. Cota * 14686e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 14786e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 14886e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 14986e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 15086e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 15186e1eff8SEmilio G. Cota * 15286e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 15386e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 15486e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 15586e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 15686e1eff8SEmilio G. Cota * conflict misses. 15786e1eff8SEmilio G. Cota */ 1583c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1593c3959f2SRichard Henderson int64_t now) 16086e1eff8SEmilio G. Cota { 16171ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 16286e1eff8SEmilio G. Cota size_t rate; 16386e1eff8SEmilio G. Cota size_t new_size = old_size; 16486e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 16586e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 16679e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 16786e1eff8SEmilio G. Cota 16879e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 16979e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 17086e1eff8SEmilio G. Cota } 17179e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 17286e1eff8SEmilio G. Cota 17386e1eff8SEmilio G. Cota if (rate > 70) { 17486e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 17586e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 17679e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 17779e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 17886e1eff8SEmilio G. Cota 17986e1eff8SEmilio G. Cota /* 18086e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 18186e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 18286e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 18386e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 18486e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 18586e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 18686e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 18786e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 18886e1eff8SEmilio G. Cota */ 18986e1eff8SEmilio G. Cota if (expected_rate > 70) { 19086e1eff8SEmilio G. Cota ceil *= 2; 19186e1eff8SEmilio G. Cota } 19286e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 19386e1eff8SEmilio G. Cota } 19486e1eff8SEmilio G. Cota 19586e1eff8SEmilio G. Cota if (new_size == old_size) { 19686e1eff8SEmilio G. Cota if (window_expired) { 19779e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 19886e1eff8SEmilio G. Cota } 19986e1eff8SEmilio G. Cota return; 20086e1eff8SEmilio G. Cota } 20186e1eff8SEmilio G. Cota 20271ccd47bSRichard Henderson g_free(fast->table); 20371ccd47bSRichard Henderson g_free(desc->iotlb); 20486e1eff8SEmilio G. Cota 20579e42085SRichard Henderson tlb_window_reset(desc, now, 0); 20686e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 20771ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 20871ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 20971ccd47bSRichard Henderson desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); 21071ccd47bSRichard Henderson 21186e1eff8SEmilio G. Cota /* 21286e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 21386e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 21486e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 21586e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 21686e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 21786e1eff8SEmilio G. Cota */ 21871ccd47bSRichard Henderson while (fast->table == NULL || desc->iotlb == NULL) { 21986e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 22086e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 22186e1eff8SEmilio G. Cota abort(); 22286e1eff8SEmilio G. Cota } 22386e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 22471ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 22586e1eff8SEmilio G. Cota 22671ccd47bSRichard Henderson g_free(fast->table); 22771ccd47bSRichard Henderson g_free(desc->iotlb); 22871ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 22971ccd47bSRichard Henderson desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); 23086e1eff8SEmilio G. Cota } 23186e1eff8SEmilio G. Cota } 23286e1eff8SEmilio G. Cota 233bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 23486e1eff8SEmilio G. Cota { 2355c948e31SRichard Henderson desc->n_used_entries = 0; 2365c948e31SRichard Henderson desc->large_page_addr = -1; 2375c948e31SRichard Henderson desc->large_page_mask = -1; 2385c948e31SRichard Henderson desc->vindex = 0; 2395c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2405c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 24186e1eff8SEmilio G. Cota } 24286e1eff8SEmilio G. Cota 2433c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, 2443c3959f2SRichard Henderson int64_t now) 245bbf021b0SRichard Henderson { 246bbf021b0SRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 247bbf021b0SRichard Henderson CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; 248bbf021b0SRichard Henderson 2493c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 250bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 251bbf021b0SRichard Henderson } 252bbf021b0SRichard Henderson 25356e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 25456e89f76SRichard Henderson { 25556e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 25656e89f76SRichard Henderson 25756e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 25856e89f76SRichard Henderson desc->n_used_entries = 0; 25956e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 26056e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 26156e89f76SRichard Henderson desc->iotlb = g_new(CPUIOTLBEntry, n_entries); 2623c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 26356e89f76SRichard Henderson } 26456e89f76SRichard Henderson 26586e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 26686e1eff8SEmilio G. Cota { 267a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 26886e1eff8SEmilio G. Cota } 26986e1eff8SEmilio G. Cota 27086e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 27186e1eff8SEmilio G. Cota { 272a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 27386e1eff8SEmilio G. Cota } 27486e1eff8SEmilio G. Cota 2755005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2765005e253SEmilio G. Cota { 27771aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 27856e89f76SRichard Henderson int64_t now = get_clock_realtime(); 27956e89f76SRichard Henderson int i; 28071aec354SEmilio G. Cota 281a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2823d1523ceSRichard Henderson 2833c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 2843c16304aSRichard Henderson env_tlb(env)->c.dirty = 0; 28586e1eff8SEmilio G. Cota 28656e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 28756e89f76SRichard Henderson tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); 28856e89f76SRichard Henderson } 2895005e253SEmilio G. Cota } 2905005e253SEmilio G. Cota 291816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu) 292816d9be5SEmilio G. Cota { 293816d9be5SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 294816d9be5SEmilio G. Cota int i; 295816d9be5SEmilio G. Cota 296816d9be5SEmilio G. Cota qemu_spin_destroy(&env_tlb(env)->c.lock); 297816d9be5SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 298816d9be5SEmilio G. Cota CPUTLBDesc *desc = &env_tlb(env)->d[i]; 299816d9be5SEmilio G. Cota CPUTLBDescFast *fast = &env_tlb(env)->f[i]; 300816d9be5SEmilio G. Cota 301816d9be5SEmilio G. Cota g_free(fast->table); 302816d9be5SEmilio G. Cota g_free(desc->iotlb); 303816d9be5SEmilio G. Cota } 304816d9be5SEmilio G. Cota } 305816d9be5SEmilio G. Cota 306d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 307d9bb58e5SYang Zhong * 308d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 309d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 310d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 311d9bb58e5SYang Zhong * again. 312d9bb58e5SYang Zhong */ 313d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 314d9bb58e5SYang Zhong run_on_cpu_data d) 315d9bb58e5SYang Zhong { 316d9bb58e5SYang Zhong CPUState *cpu; 317d9bb58e5SYang Zhong 318d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 319d9bb58e5SYang Zhong if (cpu != src) { 320d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 321d9bb58e5SYang Zhong } 322d9bb58e5SYang Zhong } 323d9bb58e5SYang Zhong } 324d9bb58e5SYang Zhong 325e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 32683974cf4SEmilio G. Cota { 32783974cf4SEmilio G. Cota CPUState *cpu; 328e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 32983974cf4SEmilio G. Cota 33083974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 33183974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 33283974cf4SEmilio G. Cota 333d73415a3SStefan Hajnoczi full += qatomic_read(&env_tlb(env)->c.full_flush_count); 334d73415a3SStefan Hajnoczi part += qatomic_read(&env_tlb(env)->c.part_flush_count); 335d73415a3SStefan Hajnoczi elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); 33683974cf4SEmilio G. Cota } 337e09de0a2SRichard Henderson *pfull = full; 338e09de0a2SRichard Henderson *ppart = part; 339e09de0a2SRichard Henderson *pelide = elide; 34083974cf4SEmilio G. Cota } 341d9bb58e5SYang Zhong 342d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 343d9bb58e5SYang Zhong { 344d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3453d1523ceSRichard Henderson uint16_t asked = data.host_int; 3463d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3473c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 348d9bb58e5SYang Zhong 349d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 350d9bb58e5SYang Zhong 3513d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 352d9bb58e5SYang Zhong 353a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 35460a2ad7dSRichard Henderson 355a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3563d1523ceSRichard Henderson to_clean = asked & all_dirty; 3573d1523ceSRichard Henderson all_dirty &= ~to_clean; 358a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3593d1523ceSRichard Henderson 3603d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3613d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3623c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx, now); 363d9bb58e5SYang Zhong } 3643d1523ceSRichard Henderson 365a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 366d9bb58e5SYang Zhong 367f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 36864f2674bSRichard Henderson 3693d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 370d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.full_flush_count, 371a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 372e09de0a2SRichard Henderson } else { 373d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.part_flush_count, 374a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3753d1523ceSRichard Henderson if (to_clean != asked) { 376d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.elide_flush_count, 377a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3783d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3793d1523ceSRichard Henderson } 38064f2674bSRichard Henderson } 381d9bb58e5SYang Zhong } 382d9bb58e5SYang Zhong 383d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 384d9bb58e5SYang Zhong { 385d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 386d9bb58e5SYang Zhong 38764f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 388d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 389ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 390d9bb58e5SYang Zhong } else { 39160a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 392d9bb58e5SYang Zhong } 393d9bb58e5SYang Zhong } 394d9bb58e5SYang Zhong 39564f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 39664f2674bSRichard Henderson { 39764f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 39864f2674bSRichard Henderson } 39964f2674bSRichard Henderson 400d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 401d9bb58e5SYang Zhong { 402d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 403d9bb58e5SYang Zhong 404d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 405d9bb58e5SYang Zhong 406d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 407d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 408d9bb58e5SYang Zhong } 409d9bb58e5SYang Zhong 41064f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 41164f2674bSRichard Henderson { 41264f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 41364f2674bSRichard Henderson } 41464f2674bSRichard Henderson 41564f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 416d9bb58e5SYang Zhong { 417d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 418d9bb58e5SYang Zhong 419d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 420d9bb58e5SYang Zhong 421d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 422d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 423d9bb58e5SYang Zhong } 424d9bb58e5SYang Zhong 42564f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 42664f2674bSRichard Henderson { 42764f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 42864f2674bSRichard Henderson } 42964f2674bSRichard Henderson 4303ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 4313ab6e68cSRichard Henderson target_ulong page, target_ulong mask) 4323ab6e68cSRichard Henderson { 4333ab6e68cSRichard Henderson page &= mask; 4343ab6e68cSRichard Henderson mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 4353ab6e68cSRichard Henderson 4363ab6e68cSRichard Henderson return (page == (tlb_entry->addr_read & mask) || 4373ab6e68cSRichard Henderson page == (tlb_addr_write(tlb_entry) & mask) || 4383ab6e68cSRichard Henderson page == (tlb_entry->addr_code & mask)); 4393ab6e68cSRichard Henderson } 4403ab6e68cSRichard Henderson 44168fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 44268fea038SRichard Henderson target_ulong page) 443d9bb58e5SYang Zhong { 4443ab6e68cSRichard Henderson return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 44568fea038SRichard Henderson } 44668fea038SRichard Henderson 4473cea94bbSEmilio G. Cota /** 4483cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4493cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4503cea94bbSEmilio G. Cota */ 4513cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4523cea94bbSEmilio G. Cota { 4533cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4543cea94bbSEmilio G. Cota } 4553cea94bbSEmilio G. Cota 45653d28455SRichard Henderson /* Called with tlb_c.lock held */ 4573ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 4583ab6e68cSRichard Henderson target_ulong page, 4593ab6e68cSRichard Henderson target_ulong mask) 46068fea038SRichard Henderson { 4613ab6e68cSRichard Henderson if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 462d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 46386e1eff8SEmilio G. Cota return true; 464d9bb58e5SYang Zhong } 46586e1eff8SEmilio G. Cota return false; 466d9bb58e5SYang Zhong } 467d9bb58e5SYang Zhong 4683ab6e68cSRichard Henderson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 46968fea038SRichard Henderson target_ulong page) 47068fea038SRichard Henderson { 4713ab6e68cSRichard Henderson return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 4723ab6e68cSRichard Henderson } 4733ab6e68cSRichard Henderson 4743ab6e68cSRichard Henderson /* Called with tlb_c.lock held */ 4753ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, 4763ab6e68cSRichard Henderson target_ulong page, 4773ab6e68cSRichard Henderson target_ulong mask) 4783ab6e68cSRichard Henderson { 479a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 48068fea038SRichard Henderson int k; 48171aec354SEmilio G. Cota 48229a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 48368fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 4843ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 48586e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 48686e1eff8SEmilio G. Cota } 48768fea038SRichard Henderson } 48868fea038SRichard Henderson } 48968fea038SRichard Henderson 4903ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 4913ab6e68cSRichard Henderson target_ulong page) 4923ab6e68cSRichard Henderson { 4933ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); 4943ab6e68cSRichard Henderson } 4953ab6e68cSRichard Henderson 4961308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4971308e026SRichard Henderson target_ulong page) 4981308e026SRichard Henderson { 499a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 500a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 5011308e026SRichard Henderson 5021308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 5031308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 5041308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 5051308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 5061308e026SRichard Henderson midx, lp_addr, lp_mask); 5073c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 5081308e026SRichard Henderson } else { 50986e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 51086e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 51186e1eff8SEmilio G. Cota } 5121308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 5131308e026SRichard Henderson } 5141308e026SRichard Henderson } 5151308e026SRichard Henderson 5167b7d00e0SRichard Henderson /** 5177b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 5187b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5197b7d00e0SRichard Henderson * @addr: page of virtual address to flush 5207b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 5217b7d00e0SRichard Henderson * 5227b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 5237b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 524d9bb58e5SYang Zhong */ 5257b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 5267b7d00e0SRichard Henderson target_ulong addr, 5277b7d00e0SRichard Henderson uint16_t idxmap) 528d9bb58e5SYang Zhong { 529d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 530d9bb58e5SYang Zhong int mmu_idx; 531d9bb58e5SYang Zhong 532d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 533d9bb58e5SYang Zhong 5347b7d00e0SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); 535d9bb58e5SYang Zhong 536a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 537d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 5387b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 5391308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 540d9bb58e5SYang Zhong } 541d9bb58e5SYang Zhong } 542a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 543d9bb58e5SYang Zhong 544d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 545d9bb58e5SYang Zhong } 546d9bb58e5SYang Zhong 5477b7d00e0SRichard Henderson /** 5487b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 5497b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5507b7d00e0SRichard Henderson * @data: encoded addr + idxmap 5517b7d00e0SRichard Henderson * 5527b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5537b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 5547b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 5557b7d00e0SRichard Henderson * that can be passed via this method. 5567b7d00e0SRichard Henderson */ 5577b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5587b7d00e0SRichard Henderson run_on_cpu_data data) 5597b7d00e0SRichard Henderson { 5607b7d00e0SRichard Henderson target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; 5617b7d00e0SRichard Henderson target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; 5627b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5637b7d00e0SRichard Henderson 5647b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5657b7d00e0SRichard Henderson } 5667b7d00e0SRichard Henderson 5677b7d00e0SRichard Henderson typedef struct { 5687b7d00e0SRichard Henderson target_ulong addr; 5697b7d00e0SRichard Henderson uint16_t idxmap; 5707b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5717b7d00e0SRichard Henderson 5727b7d00e0SRichard Henderson /** 5737b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5747b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5757b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5767b7d00e0SRichard Henderson * 5777b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5787b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5797b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5807b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5817b7d00e0SRichard Henderson */ 5827b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5837b7d00e0SRichard Henderson run_on_cpu_data data) 5847b7d00e0SRichard Henderson { 5857b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5867b7d00e0SRichard Henderson 5877b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5887b7d00e0SRichard Henderson g_free(d); 5897b7d00e0SRichard Henderson } 5907b7d00e0SRichard Henderson 591d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 592d9bb58e5SYang Zhong { 593d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 594d9bb58e5SYang Zhong 595d9bb58e5SYang Zhong /* This should already be page aligned */ 5967b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 597d9bb58e5SYang Zhong 5987b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 5997b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 6007b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 6017b7d00e0SRichard Henderson /* 6027b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 6037b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 6047b7d00e0SRichard Henderson * allocating memory for this operation. 6057b7d00e0SRichard Henderson */ 6067b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 6077b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 608d9bb58e5SYang Zhong } else { 6097b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 6107b7d00e0SRichard Henderson 6117b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 6127b7d00e0SRichard Henderson d->addr = addr; 6137b7d00e0SRichard Henderson d->idxmap = idxmap; 6147b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 6157b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 616d9bb58e5SYang Zhong } 617d9bb58e5SYang Zhong } 618d9bb58e5SYang Zhong 619f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 620f8144c6cSRichard Henderson { 621f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 622f8144c6cSRichard Henderson } 623f8144c6cSRichard Henderson 624d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 625d9bb58e5SYang Zhong uint16_t idxmap) 626d9bb58e5SYang Zhong { 627d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 628d9bb58e5SYang Zhong 629d9bb58e5SYang Zhong /* This should already be page aligned */ 6307b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 631d9bb58e5SYang Zhong 6327b7d00e0SRichard Henderson /* 6337b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6347b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6357b7d00e0SRichard Henderson */ 6367b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6377b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6387b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6397b7d00e0SRichard Henderson } else { 6407b7d00e0SRichard Henderson CPUState *dst_cpu; 6417b7d00e0SRichard Henderson 6427b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6437b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6447b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6457b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 6467b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 6477b7d00e0SRichard Henderson 6487b7d00e0SRichard Henderson d->addr = addr; 6497b7d00e0SRichard Henderson d->idxmap = idxmap; 6507b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6517b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6527b7d00e0SRichard Henderson } 6537b7d00e0SRichard Henderson } 6547b7d00e0SRichard Henderson } 6557b7d00e0SRichard Henderson 6567b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 657d9bb58e5SYang Zhong } 658d9bb58e5SYang Zhong 659f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 660f8144c6cSRichard Henderson { 661f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 662f8144c6cSRichard Henderson } 663f8144c6cSRichard Henderson 664d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 665d9bb58e5SYang Zhong target_ulong addr, 666d9bb58e5SYang Zhong uint16_t idxmap) 667d9bb58e5SYang Zhong { 668d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 669d9bb58e5SYang Zhong 670d9bb58e5SYang Zhong /* This should already be page aligned */ 6717b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 672d9bb58e5SYang Zhong 6737b7d00e0SRichard Henderson /* 6747b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6757b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6767b7d00e0SRichard Henderson */ 6777b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6787b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6797b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6807b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6817b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6827b7d00e0SRichard Henderson } else { 6837b7d00e0SRichard Henderson CPUState *dst_cpu; 6847b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6857b7d00e0SRichard Henderson 6867b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6877b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6887b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6897b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6907b7d00e0SRichard Henderson d->addr = addr; 6917b7d00e0SRichard Henderson d->idxmap = idxmap; 6927b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6937b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6947b7d00e0SRichard Henderson } 6957b7d00e0SRichard Henderson } 6967b7d00e0SRichard Henderson 6977b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6987b7d00e0SRichard Henderson d->addr = addr; 6997b7d00e0SRichard Henderson d->idxmap = idxmap; 7007b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 7017b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 7027b7d00e0SRichard Henderson } 703d9bb58e5SYang Zhong } 704d9bb58e5SYang Zhong 705f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 706d9bb58e5SYang Zhong { 707f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 708d9bb58e5SYang Zhong } 709d9bb58e5SYang Zhong 7103c4ddec1SRichard Henderson static void tlb_flush_range_locked(CPUArchState *env, int midx, 7113c4ddec1SRichard Henderson target_ulong addr, target_ulong len, 7123c4ddec1SRichard Henderson unsigned bits) 7133ab6e68cSRichard Henderson { 7143ab6e68cSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[midx]; 7153ab6e68cSRichard Henderson CPUTLBDescFast *f = &env_tlb(env)->f[midx]; 7163ab6e68cSRichard Henderson target_ulong mask = MAKE_64BIT_MASK(0, bits); 7173ab6e68cSRichard Henderson 7183ab6e68cSRichard Henderson /* 7193ab6e68cSRichard Henderson * If @bits is smaller than the tlb size, there may be multiple entries 7203ab6e68cSRichard Henderson * within the TLB; otherwise all addresses that match under @mask hit 7213ab6e68cSRichard Henderson * the same TLB entry. 7223ab6e68cSRichard Henderson * TODO: Perhaps allow bits to be a few bits less than the size. 7233ab6e68cSRichard Henderson * For now, just flush the entire TLB. 7243c4ddec1SRichard Henderson * 7253c4ddec1SRichard Henderson * If @len is larger than the tlb size, then it will take longer to 7263c4ddec1SRichard Henderson * test all of the entries in the TLB than it will to flush it all. 7273ab6e68cSRichard Henderson */ 7283c4ddec1SRichard Henderson if (mask < f->mask || len > f->mask) { 7293ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7303c4ddec1SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", 7313c4ddec1SRichard Henderson midx, addr, mask, len); 7323ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7333ab6e68cSRichard Henderson return; 7343ab6e68cSRichard Henderson } 7353ab6e68cSRichard Henderson 7363c4ddec1SRichard Henderson /* 7373c4ddec1SRichard Henderson * Check if we need to flush due to large pages. 7383c4ddec1SRichard Henderson * Because large_page_mask contains all 1's from the msb, 7393c4ddec1SRichard Henderson * we only need to test the end of the range. 7403c4ddec1SRichard Henderson */ 7413c4ddec1SRichard Henderson if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { 7423ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7433ab6e68cSRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 7443ab6e68cSRichard Henderson midx, d->large_page_addr, d->large_page_mask); 7453ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7463ab6e68cSRichard Henderson return; 7473ab6e68cSRichard Henderson } 7483ab6e68cSRichard Henderson 7493c4ddec1SRichard Henderson for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { 7503c4ddec1SRichard Henderson target_ulong page = addr + i; 7513c4ddec1SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, midx, page); 7523c4ddec1SRichard Henderson 7533c4ddec1SRichard Henderson if (tlb_flush_entry_mask_locked(entry, page, mask)) { 7543ab6e68cSRichard Henderson tlb_n_used_entries_dec(env, midx); 7553ab6e68cSRichard Henderson } 7563ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); 7573ab6e68cSRichard Henderson } 7583c4ddec1SRichard Henderson } 7593ab6e68cSRichard Henderson 7603ab6e68cSRichard Henderson typedef struct { 7613ab6e68cSRichard Henderson target_ulong addr; 7623c4ddec1SRichard Henderson target_ulong len; 7633ab6e68cSRichard Henderson uint16_t idxmap; 7643ab6e68cSRichard Henderson uint16_t bits; 7653960a59fSRichard Henderson } TLBFlushRangeData; 7663ab6e68cSRichard Henderson 7673ab6e68cSRichard Henderson static void 7683ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu, 7693960a59fSRichard Henderson TLBFlushRangeData d) 7703ab6e68cSRichard Henderson { 7713ab6e68cSRichard Henderson CPUArchState *env = cpu->env_ptr; 7723ab6e68cSRichard Henderson int mmu_idx; 7733ab6e68cSRichard Henderson 7743ab6e68cSRichard Henderson assert_cpu_is_self(cpu); 7753ab6e68cSRichard Henderson 7763c4ddec1SRichard Henderson tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", 7773c4ddec1SRichard Henderson d.addr, d.bits, d.len, d.idxmap); 7783ab6e68cSRichard Henderson 7793ab6e68cSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 7803ab6e68cSRichard Henderson for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 7813ab6e68cSRichard Henderson if ((d.idxmap >> mmu_idx) & 1) { 7823c4ddec1SRichard Henderson tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); 7833ab6e68cSRichard Henderson } 7843ab6e68cSRichard Henderson } 7853ab6e68cSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 7863ab6e68cSRichard Henderson 7873c4ddec1SRichard Henderson for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { 7883c4ddec1SRichard Henderson tb_flush_jmp_cache(cpu, d.addr + i); 7893c4ddec1SRichard Henderson } 7903ab6e68cSRichard Henderson } 7913ab6e68cSRichard Henderson 7923ab6e68cSRichard Henderson static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu, 7933ab6e68cSRichard Henderson run_on_cpu_data data) 7943ab6e68cSRichard Henderson { 7953960a59fSRichard Henderson TLBFlushRangeData *d = data.host_ptr; 7963ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d); 7973ab6e68cSRichard Henderson g_free(d); 7983ab6e68cSRichard Henderson } 7993ab6e68cSRichard Henderson 800e5b1921bSRichard Henderson void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, 801e5b1921bSRichard Henderson target_ulong len, uint16_t idxmap, 802e5b1921bSRichard Henderson unsigned bits) 8033ab6e68cSRichard Henderson { 8043960a59fSRichard Henderson TLBFlushRangeData d; 8053ab6e68cSRichard Henderson 806e5b1921bSRichard Henderson /* 807e5b1921bSRichard Henderson * If all bits are significant, and len is small, 808e5b1921bSRichard Henderson * this devolves to tlb_flush_page. 809e5b1921bSRichard Henderson */ 810e5b1921bSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8113ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 8123ab6e68cSRichard Henderson return; 8133ab6e68cSRichard Henderson } 8143ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8153ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8163ab6e68cSRichard Henderson tlb_flush_by_mmuidx(cpu, idxmap); 8173ab6e68cSRichard Henderson return; 8183ab6e68cSRichard Henderson } 8193ab6e68cSRichard Henderson 8203ab6e68cSRichard Henderson /* This should already be page aligned */ 8213ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 822e5b1921bSRichard Henderson d.len = len; 8233ab6e68cSRichard Henderson d.idxmap = idxmap; 8243ab6e68cSRichard Henderson d.bits = bits; 8253ab6e68cSRichard Henderson 8263ab6e68cSRichard Henderson if (qemu_cpu_is_self(cpu)) { 8273ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(cpu, d); 8283ab6e68cSRichard Henderson } else { 8293ab6e68cSRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 8303960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 8313ab6e68cSRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2, 8323ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8333ab6e68cSRichard Henderson } 8343ab6e68cSRichard Henderson } 8353ab6e68cSRichard Henderson 836e5b1921bSRichard Henderson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, 837e5b1921bSRichard Henderson uint16_t idxmap, unsigned bits) 838e5b1921bSRichard Henderson { 839e5b1921bSRichard Henderson tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); 840e5b1921bSRichard Henderson } 841e5b1921bSRichard Henderson 842*600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, 843*600b819fSRichard Henderson target_ulong addr, target_ulong len, 844*600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 8453ab6e68cSRichard Henderson { 8463960a59fSRichard Henderson TLBFlushRangeData d; 847d34e4d1aSRichard Henderson CPUState *dst_cpu; 8483ab6e68cSRichard Henderson 849*600b819fSRichard Henderson /* 850*600b819fSRichard Henderson * If all bits are significant, and len is small, 851*600b819fSRichard Henderson * this devolves to tlb_flush_page. 852*600b819fSRichard Henderson */ 853*600b819fSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8543ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 8553ab6e68cSRichard Henderson return; 8563ab6e68cSRichard Henderson } 8573ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8583ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8593ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 8603ab6e68cSRichard Henderson return; 8613ab6e68cSRichard Henderson } 8623ab6e68cSRichard Henderson 8633ab6e68cSRichard Henderson /* This should already be page aligned */ 8643ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 865*600b819fSRichard Henderson d.len = len; 8663ab6e68cSRichard Henderson d.idxmap = idxmap; 8673ab6e68cSRichard Henderson d.bits = bits; 8683ab6e68cSRichard Henderson 8693ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 8703ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 8713ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 8723960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 8733ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, 8743ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_2, 8753ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8763ab6e68cSRichard Henderson } 8773ab6e68cSRichard Henderson } 8783ab6e68cSRichard Henderson 8793ab6e68cSRichard Henderson tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d); 8803ab6e68cSRichard Henderson } 8813ab6e68cSRichard Henderson 882*600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 883*600b819fSRichard Henderson target_ulong addr, 884*600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 885*600b819fSRichard Henderson { 886*600b819fSRichard Henderson tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, 887*600b819fSRichard Henderson idxmap, bits); 888*600b819fSRichard Henderson } 889*600b819fSRichard Henderson 8903ab6e68cSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 8913ab6e68cSRichard Henderson target_ulong addr, 8923ab6e68cSRichard Henderson uint16_t idxmap, 8933ab6e68cSRichard Henderson unsigned bits) 8943ab6e68cSRichard Henderson { 895d34e4d1aSRichard Henderson TLBFlushRangeData d, *p; 896d34e4d1aSRichard Henderson CPUState *dst_cpu; 8973ab6e68cSRichard Henderson 8983ab6e68cSRichard Henderson /* If all bits are significant, this devolves to tlb_flush_page. */ 8993ab6e68cSRichard Henderson if (bits >= TARGET_LONG_BITS) { 9003ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 9013ab6e68cSRichard Henderson return; 9023ab6e68cSRichard Henderson } 9033ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 9043ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 9053ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 9063ab6e68cSRichard Henderson return; 9073ab6e68cSRichard Henderson } 9083ab6e68cSRichard Henderson 9093ab6e68cSRichard Henderson /* This should already be page aligned */ 9103ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 9113c4ddec1SRichard Henderson d.len = TARGET_PAGE_SIZE; 9123ab6e68cSRichard Henderson d.idxmap = idxmap; 9133ab6e68cSRichard Henderson d.bits = bits; 9143ab6e68cSRichard Henderson 9153ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 9163ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 9173ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 9186d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 9193ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2, 9203ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9213ab6e68cSRichard Henderson } 9223ab6e68cSRichard Henderson } 9233ab6e68cSRichard Henderson 9246d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 9253ab6e68cSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2, 9263ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9273ab6e68cSRichard Henderson } 9283ab6e68cSRichard Henderson 929d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 930d9bb58e5SYang Zhong can be detected */ 931d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 932d9bb58e5SYang Zhong { 933d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 934d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 935d9bb58e5SYang Zhong } 936d9bb58e5SYang Zhong 937d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 938d9bb58e5SYang Zhong tested for self modifying code */ 939d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 940d9bb58e5SYang Zhong { 941d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 942d9bb58e5SYang Zhong } 943d9bb58e5SYang Zhong 944d9bb58e5SYang Zhong 945d9bb58e5SYang Zhong /* 946d9bb58e5SYang Zhong * Dirty write flag handling 947d9bb58e5SYang Zhong * 948d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 949d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 950d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 951d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 952d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 953d9bb58e5SYang Zhong * generated code. 954d9bb58e5SYang Zhong * 95571aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 956d73415a3SStefan Hajnoczi * te->addr_write with qatomic_set. We don't need to worry about this for 95771aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 958d9bb58e5SYang Zhong * 95953d28455SRichard Henderson * Called with tlb_c.lock held. 960d9bb58e5SYang Zhong */ 96171aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 96271aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 963d9bb58e5SYang Zhong { 964d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 965d9bb58e5SYang Zhong 9667b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 9677b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 968d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 969d9bb58e5SYang Zhong addr += tlb_entry->addend; 970d9bb58e5SYang Zhong if ((addr - start) < length) { 971d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 97271aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 973d9bb58e5SYang Zhong #else 974d73415a3SStefan Hajnoczi qatomic_set(&tlb_entry->addr_write, 97571aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 976d9bb58e5SYang Zhong #endif 977d9bb58e5SYang Zhong } 97871aec354SEmilio G. Cota } 97971aec354SEmilio G. Cota } 98071aec354SEmilio G. Cota 98171aec354SEmilio G. Cota /* 98253d28455SRichard Henderson * Called with tlb_c.lock held. 98371aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 98471aec354SEmilio G. Cota */ 98571aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 98671aec354SEmilio G. Cota { 98771aec354SEmilio G. Cota *d = *s; 98871aec354SEmilio G. Cota } 989d9bb58e5SYang Zhong 990d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 99171aec354SEmilio G. Cota * the target vCPU). 99253d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 99371aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 994d9bb58e5SYang Zhong */ 995d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 996d9bb58e5SYang Zhong { 997d9bb58e5SYang Zhong CPUArchState *env; 998d9bb58e5SYang Zhong 999d9bb58e5SYang Zhong int mmu_idx; 1000d9bb58e5SYang Zhong 1001d9bb58e5SYang Zhong env = cpu->env_ptr; 1002a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1003d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1004d9bb58e5SYang Zhong unsigned int i; 1005722a1c1eSRichard Henderson unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 1006d9bb58e5SYang Zhong 100786e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 1008a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 1009a40ec84eSRichard Henderson start1, length); 1010d9bb58e5SYang Zhong } 1011d9bb58e5SYang Zhong 1012d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 1013a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 1014a40ec84eSRichard Henderson start1, length); 1015d9bb58e5SYang Zhong } 1016d9bb58e5SYang Zhong } 1017a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1018d9bb58e5SYang Zhong } 1019d9bb58e5SYang Zhong 102053d28455SRichard Henderson /* Called with tlb_c.lock held */ 102171aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 102271aec354SEmilio G. Cota target_ulong vaddr) 1023d9bb58e5SYang Zhong { 1024d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 1025d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 1026d9bb58e5SYang Zhong } 1027d9bb58e5SYang Zhong } 1028d9bb58e5SYang Zhong 1029d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 1030d9bb58e5SYang Zhong so that it is no longer dirty */ 1031d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 1032d9bb58e5SYang Zhong { 1033d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1034d9bb58e5SYang Zhong int mmu_idx; 1035d9bb58e5SYang Zhong 1036d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 1037d9bb58e5SYang Zhong 1038d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 1039a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1040d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1041383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 1042d9bb58e5SYang Zhong } 1043d9bb58e5SYang Zhong 1044d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1045d9bb58e5SYang Zhong int k; 1046d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 1047a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 1048d9bb58e5SYang Zhong } 1049d9bb58e5SYang Zhong } 1050a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1051d9bb58e5SYang Zhong } 1052d9bb58e5SYang Zhong 1053d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 1054d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 10551308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 10561308e026SRichard Henderson target_ulong vaddr, target_ulong size) 1057d9bb58e5SYang Zhong { 1058a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 10591308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 1060d9bb58e5SYang Zhong 10611308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 10621308e026SRichard Henderson /* No previous large page. */ 10631308e026SRichard Henderson lp_addr = vaddr; 10641308e026SRichard Henderson } else { 1065d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 10661308e026SRichard Henderson This is a compromise between unnecessary flushes and 10671308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 1068a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 10691308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 10701308e026SRichard Henderson lp_mask <<= 1; 1071d9bb58e5SYang Zhong } 10721308e026SRichard Henderson } 1073a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 1074a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 1075d9bb58e5SYang Zhong } 1076d9bb58e5SYang Zhong 1077d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 1078d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1079d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 1080d9bb58e5SYang Zhong * 1081d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 1082d9bb58e5SYang Zhong * critical section. 1083d9bb58e5SYang Zhong */ 1084d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 1085d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 1086d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1087d9bb58e5SYang Zhong { 1088d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1089a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 1090a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1091d9bb58e5SYang Zhong MemoryRegionSection *section; 1092d9bb58e5SYang Zhong unsigned int index; 1093d9bb58e5SYang Zhong target_ulong address; 10948f5db641SRichard Henderson target_ulong write_address; 1095d9bb58e5SYang Zhong uintptr_t addend; 109668fea038SRichard Henderson CPUTLBEntry *te, tn; 109755df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 109855df6fcfSPeter Maydell target_ulong vaddr_page; 1099d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 110050b107c5SRichard Henderson int wp_flags; 11018f5db641SRichard Henderson bool is_ram, is_romd; 1102d9bb58e5SYang Zhong 1103d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 110455df6fcfSPeter Maydell 11051308e026SRichard Henderson if (size <= TARGET_PAGE_SIZE) { 110655df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 110755df6fcfSPeter Maydell } else { 11081308e026SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, size); 1109d9bb58e5SYang Zhong sz = size; 111055df6fcfSPeter Maydell } 111155df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 111255df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 111355df6fcfSPeter Maydell 111455df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 111555df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 1116d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 1117d9bb58e5SYang Zhong 1118d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 1119d9bb58e5SYang Zhong " prot=%x idx=%d\n", 1120d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 1121d9bb58e5SYang Zhong 112255df6fcfSPeter Maydell address = vaddr_page; 112355df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 112430d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 112530d7e098SRichard Henderson address |= TLB_INVALID_MASK; 112655df6fcfSPeter Maydell } 1127a26fc6f5STony Nguyen if (attrs.byte_swap) { 11285b87b3e6SRichard Henderson address |= TLB_BSWAP; 1129a26fc6f5STony Nguyen } 11308f5db641SRichard Henderson 11318f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 11328f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 11338f5db641SRichard Henderson 11348f5db641SRichard Henderson if (is_ram || is_romd) { 11358f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 1136d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 11378f5db641SRichard Henderson } else { 11388f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 11398f5db641SRichard Henderson addend = 0; 1140d9bb58e5SYang Zhong } 1141d9bb58e5SYang Zhong 11428f5db641SRichard Henderson write_address = address; 11438f5db641SRichard Henderson if (is_ram) { 11448f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 11458f5db641SRichard Henderson /* 11468f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 11478f5db641SRichard Henderson * the page is actually writable. 11488f5db641SRichard Henderson */ 11498f5db641SRichard Henderson if (prot & PAGE_WRITE) { 11508f5db641SRichard Henderson if (section->readonly) { 11518f5db641SRichard Henderson write_address |= TLB_DISCARD_WRITE; 11528f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 11538f5db641SRichard Henderson write_address |= TLB_NOTDIRTY; 11548f5db641SRichard Henderson } 11558f5db641SRichard Henderson } 11568f5db641SRichard Henderson } else { 11578f5db641SRichard Henderson /* I/O or ROMD */ 11588f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 11598f5db641SRichard Henderson /* 11608f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 11618f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 11628f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 11638f5db641SRichard Henderson */ 11648f5db641SRichard Henderson write_address |= TLB_MMIO; 11658f5db641SRichard Henderson if (!is_romd) { 11668f5db641SRichard Henderson address = write_address; 11678f5db641SRichard Henderson } 11688f5db641SRichard Henderson } 11698f5db641SRichard Henderson 117050b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 117150b107c5SRichard Henderson TARGET_PAGE_SIZE); 1172d9bb58e5SYang Zhong 1173383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 1174383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 1175d9bb58e5SYang Zhong 117668fea038SRichard Henderson /* 117771aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 117871aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 117971aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 118071aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 118171aec354SEmilio G. Cota * is unlikely to be contended. 118271aec354SEmilio G. Cota */ 1183a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 118471aec354SEmilio G. Cota 11853d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 1186a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 11873d1523ceSRichard Henderson 118871aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 118971aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 119071aec354SEmilio G. Cota 119171aec354SEmilio G. Cota /* 119268fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 119368fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 119468fea038SRichard Henderson */ 11953cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 1196a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1197a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 119868fea038SRichard Henderson 119968fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 120071aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 1201a40ec84eSRichard Henderson desc->viotlb[vidx] = desc->iotlb[index]; 120286e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 120368fea038SRichard Henderson } 1204d9bb58e5SYang Zhong 1205d9bb58e5SYang Zhong /* refill the tlb */ 1206ace41090SPeter Maydell /* 1207ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 1208ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 12098f5db641SRichard Henderson * + the ram_addr_t of the page base of the target RAM (RAM) 12108f5db641SRichard Henderson * + the offset within section->mr of the page base (I/O, ROMD) 121155df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 1212ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 1213ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 1214ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 1215ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 1216ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 1217ace41090SPeter Maydell */ 1218a40ec84eSRichard Henderson desc->iotlb[index].addr = iotlb - vaddr_page; 1219a40ec84eSRichard Henderson desc->iotlb[index].attrs = attrs; 1220d9bb58e5SYang Zhong 1221d9bb58e5SYang Zhong /* Now calculate the new entry */ 122255df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 1223d9bb58e5SYang Zhong if (prot & PAGE_READ) { 1224d9bb58e5SYang Zhong tn.addr_read = address; 122550b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 122650b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 122750b107c5SRichard Henderson } 1228d9bb58e5SYang Zhong } else { 1229d9bb58e5SYang Zhong tn.addr_read = -1; 1230d9bb58e5SYang Zhong } 1231d9bb58e5SYang Zhong 1232d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 12338f5db641SRichard Henderson tn.addr_code = address; 1234d9bb58e5SYang Zhong } else { 1235d9bb58e5SYang Zhong tn.addr_code = -1; 1236d9bb58e5SYang Zhong } 1237d9bb58e5SYang Zhong 1238d9bb58e5SYang Zhong tn.addr_write = -1; 1239d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 12408f5db641SRichard Henderson tn.addr_write = write_address; 1241f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 1242f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 1243f52bfb12SDavid Hildenbrand } 124450b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 124550b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 124650b107c5SRichard Henderson } 1247d9bb58e5SYang Zhong } 1248d9bb58e5SYang Zhong 124971aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 125086e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 1251a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 1252d9bb58e5SYang Zhong } 1253d9bb58e5SYang Zhong 1254d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 1255d9bb58e5SYang Zhong * transaction attributes to be used. 1256d9bb58e5SYang Zhong */ 1257d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 1258d9bb58e5SYang Zhong hwaddr paddr, int prot, 1259d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1260d9bb58e5SYang Zhong { 1261d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 1262d9bb58e5SYang Zhong prot, mmu_idx, size); 1263d9bb58e5SYang Zhong } 1264d9bb58e5SYang Zhong 1265d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 1266d9bb58e5SYang Zhong { 1267d9bb58e5SYang Zhong ram_addr_t ram_addr; 1268d9bb58e5SYang Zhong 1269d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 1270d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 1271d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 1272d9bb58e5SYang Zhong abort(); 1273d9bb58e5SYang Zhong } 1274d9bb58e5SYang Zhong return ram_addr; 1275d9bb58e5SYang Zhong } 1276d9bb58e5SYang Zhong 1277c319dc13SRichard Henderson /* 1278c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1279c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1280c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1281c319dc13SRichard Henderson */ 1282c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 1283c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1284c319dc13SRichard Henderson { 1285c319dc13SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cpu); 1286c319dc13SRichard Henderson bool ok; 1287c319dc13SRichard Henderson 1288c319dc13SRichard Henderson /* 1289c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1290c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1291c319dc13SRichard Henderson */ 129278271684SClaudio Fontana ok = cc->tcg_ops->tlb_fill(cpu, addr, size, 1293e124536fSEduardo Habkost access_type, mmu_idx, false, retaddr); 1294c319dc13SRichard Henderson assert(ok); 1295c319dc13SRichard Henderson } 1296c319dc13SRichard Henderson 129778271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 129878271684SClaudio Fontana MMUAccessType access_type, 129978271684SClaudio Fontana int mmu_idx, uintptr_t retaddr) 130078271684SClaudio Fontana { 130178271684SClaudio Fontana CPUClass *cc = CPU_GET_CLASS(cpu); 130278271684SClaudio Fontana 130378271684SClaudio Fontana cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); 130478271684SClaudio Fontana } 130578271684SClaudio Fontana 130678271684SClaudio Fontana static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, 130778271684SClaudio Fontana vaddr addr, unsigned size, 130878271684SClaudio Fontana MMUAccessType access_type, 130978271684SClaudio Fontana int mmu_idx, MemTxAttrs attrs, 131078271684SClaudio Fontana MemTxResult response, 131178271684SClaudio Fontana uintptr_t retaddr) 131278271684SClaudio Fontana { 131378271684SClaudio Fontana CPUClass *cc = CPU_GET_CLASS(cpu); 131478271684SClaudio Fontana 131578271684SClaudio Fontana if (!cpu->ignore_memory_transaction_failures && 131678271684SClaudio Fontana cc->tcg_ops->do_transaction_failed) { 131778271684SClaudio Fontana cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, 131878271684SClaudio Fontana access_type, mmu_idx, attrs, 131978271684SClaudio Fontana response, retaddr); 132078271684SClaudio Fontana } 132178271684SClaudio Fontana } 132278271684SClaudio Fontana 1323d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1324f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 1325be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 1326d9bb58e5SYang Zhong { 132729a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 13282d54f194SPeter Maydell hwaddr mr_offset; 13292d54f194SPeter Maydell MemoryRegionSection *section; 13302d54f194SPeter Maydell MemoryRegion *mr; 1331d9bb58e5SYang Zhong uint64_t val; 1332d9bb58e5SYang Zhong bool locked = false; 133304e3aabdSPeter Maydell MemTxResult r; 1334d9bb58e5SYang Zhong 13352d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 13362d54f194SPeter Maydell mr = section->mr; 13372d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1338d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 133908565552SRichard Henderson if (!cpu->can_do_io) { 1340d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1341d9bb58e5SYang Zhong } 1342d9bb58e5SYang Zhong 134341744954SPhilippe Mathieu-Daudé if (!qemu_mutex_iothread_locked()) { 1344d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1345d9bb58e5SYang Zhong locked = true; 1346d9bb58e5SYang Zhong } 1347be5c4787STony Nguyen r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); 134804e3aabdSPeter Maydell if (r != MEMTX_OK) { 13492d54f194SPeter Maydell hwaddr physaddr = mr_offset + 13502d54f194SPeter Maydell section->offset_within_address_space - 13512d54f194SPeter Maydell section->offset_within_region; 13522d54f194SPeter Maydell 1353be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 135404e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 135504e3aabdSPeter Maydell } 1356d9bb58e5SYang Zhong if (locked) { 1357d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1358d9bb58e5SYang Zhong } 1359d9bb58e5SYang Zhong 1360d9bb58e5SYang Zhong return val; 1361d9bb58e5SYang Zhong } 1362d9bb58e5SYang Zhong 13632f3a57eeSAlex Bennée /* 13642f3a57eeSAlex Bennée * Save a potentially trashed IOTLB entry for later lookup by plugin. 1365570ef309SAlex Bennée * This is read by tlb_plugin_lookup if the iotlb entry doesn't match 1366570ef309SAlex Bennée * because of the side effect of io_writex changing memory layout. 13672f3a57eeSAlex Bennée */ 13682f3a57eeSAlex Bennée static void save_iotlb_data(CPUState *cs, hwaddr addr, 13692f3a57eeSAlex Bennée MemoryRegionSection *section, hwaddr mr_offset) 13702f3a57eeSAlex Bennée { 13712f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN 13722f3a57eeSAlex Bennée SavedIOTLB *saved = &cs->saved_iotlb; 13732f3a57eeSAlex Bennée saved->addr = addr; 13742f3a57eeSAlex Bennée saved->section = section; 13752f3a57eeSAlex Bennée saved->mr_offset = mr_offset; 13762f3a57eeSAlex Bennée #endif 13772f3a57eeSAlex Bennée } 13782f3a57eeSAlex Bennée 1379d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 1380f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 1381be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 1382d9bb58e5SYang Zhong { 138329a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 13842d54f194SPeter Maydell hwaddr mr_offset; 13852d54f194SPeter Maydell MemoryRegionSection *section; 13862d54f194SPeter Maydell MemoryRegion *mr; 1387d9bb58e5SYang Zhong bool locked = false; 138804e3aabdSPeter Maydell MemTxResult r; 1389d9bb58e5SYang Zhong 13902d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 13912d54f194SPeter Maydell mr = section->mr; 13922d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 139308565552SRichard Henderson if (!cpu->can_do_io) { 1394d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1395d9bb58e5SYang Zhong } 1396d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1397d9bb58e5SYang Zhong 13982f3a57eeSAlex Bennée /* 13992f3a57eeSAlex Bennée * The memory_region_dispatch may trigger a flush/resize 14002f3a57eeSAlex Bennée * so for plugins we save the iotlb_data just in case. 14012f3a57eeSAlex Bennée */ 14022f3a57eeSAlex Bennée save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); 14032f3a57eeSAlex Bennée 140441744954SPhilippe Mathieu-Daudé if (!qemu_mutex_iothread_locked()) { 1405d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1406d9bb58e5SYang Zhong locked = true; 1407d9bb58e5SYang Zhong } 1408be5c4787STony Nguyen r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); 140904e3aabdSPeter Maydell if (r != MEMTX_OK) { 14102d54f194SPeter Maydell hwaddr physaddr = mr_offset + 14112d54f194SPeter Maydell section->offset_within_address_space - 14122d54f194SPeter Maydell section->offset_within_region; 14132d54f194SPeter Maydell 1414be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 1415be5c4787STony Nguyen MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, 1416be5c4787STony Nguyen retaddr); 141704e3aabdSPeter Maydell } 1418d9bb58e5SYang Zhong if (locked) { 1419d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1420d9bb58e5SYang Zhong } 1421d9bb58e5SYang Zhong } 1422d9bb58e5SYang Zhong 14234811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) 14244811e909SRichard Henderson { 14254811e909SRichard Henderson #if TCG_OVERSIZED_GUEST 14264811e909SRichard Henderson return *(target_ulong *)((uintptr_t)entry + ofs); 14274811e909SRichard Henderson #else 1428d73415a3SStefan Hajnoczi /* ofs might correspond to .addr_write, so use qatomic_read */ 1429d73415a3SStefan Hajnoczi return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); 14304811e909SRichard Henderson #endif 14314811e909SRichard Henderson } 14324811e909SRichard Henderson 1433d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1434d9bb58e5SYang Zhong back to the main tlb. */ 1435d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 1436d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 1437d9bb58e5SYang Zhong { 1438d9bb58e5SYang Zhong size_t vidx; 143971aec354SEmilio G. Cota 144029a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1441d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1442a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 1443a40ec84eSRichard Henderson target_ulong cmp; 1444a40ec84eSRichard Henderson 1445d73415a3SStefan Hajnoczi /* elt_ofs might correspond to .addr_write, so use qatomic_read */ 1446a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST 1447a40ec84eSRichard Henderson cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 1448a40ec84eSRichard Henderson #else 1449d73415a3SStefan Hajnoczi cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 1450a40ec84eSRichard Henderson #endif 1451d9bb58e5SYang Zhong 1452d9bb58e5SYang Zhong if (cmp == page) { 1453d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1454a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1455d9bb58e5SYang Zhong 1456a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 145771aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 145871aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 145971aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1460a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1461d9bb58e5SYang Zhong 1462a40ec84eSRichard Henderson CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1463a40ec84eSRichard Henderson CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; 1464d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 1465d9bb58e5SYang Zhong return true; 1466d9bb58e5SYang Zhong } 1467d9bb58e5SYang Zhong } 1468d9bb58e5SYang Zhong return false; 1469d9bb58e5SYang Zhong } 1470d9bb58e5SYang Zhong 1471d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 1472d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 1473d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 1474d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 1475d9bb58e5SYang Zhong 147630d7e098SRichard Henderson /* 147730d7e098SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 147830d7e098SRichard Henderson * 147930d7e098SRichard Henderson * Return -1 if we can't translate and execute from an entire page 148030d7e098SRichard Henderson * of RAM. This will force us to execute by loading and translating 148130d7e098SRichard Henderson * one insn at a time, without caching. 148230d7e098SRichard Henderson * 148330d7e098SRichard Henderson * NOTE: This function will trigger an exception if the page is 148430d7e098SRichard Henderson * not executable. 1485f2553f04SKONRAD Frederic */ 14864b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 14874b2190daSEmilio G. Cota void **hostp) 1488f2553f04SKONRAD Frederic { 1489383beda9SRichard Henderson uintptr_t mmu_idx = cpu_mmu_index(env, true); 1490383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1491383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1492f2553f04SKONRAD Frederic void *p; 1493f2553f04SKONRAD Frederic 1494383beda9SRichard Henderson if (unlikely(!tlb_hit(entry->addr_code, addr))) { 1495b493ccf1SPeter Maydell if (!VICTIM_TLB_HIT(addr_code, addr)) { 149629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 14976d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 14986d967cb8SEmilio G. Cota entry = tlb_entry(env, mmu_idx, addr); 149930d7e098SRichard Henderson 150030d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { 150130d7e098SRichard Henderson /* 150230d7e098SRichard Henderson * The MMU protection covers a smaller range than a target 150330d7e098SRichard Henderson * page, so we must redo the MMU check for every insn. 150430d7e098SRichard Henderson */ 150530d7e098SRichard Henderson return -1; 150630d7e098SRichard Henderson } 150771b9a453SKONRAD Frederic } 1508383beda9SRichard Henderson assert(tlb_hit(entry->addr_code, addr)); 1509f2553f04SKONRAD Frederic } 151055df6fcfSPeter Maydell 151130d7e098SRichard Henderson if (unlikely(entry->addr_code & TLB_MMIO)) { 151230d7e098SRichard Henderson /* The region is not backed by RAM. */ 15134b2190daSEmilio G. Cota if (hostp) { 15144b2190daSEmilio G. Cota *hostp = NULL; 15154b2190daSEmilio G. Cota } 151620cb6ae4SPeter Maydell return -1; 151755df6fcfSPeter Maydell } 151855df6fcfSPeter Maydell 1519383beda9SRichard Henderson p = (void *)((uintptr_t)addr + entry->addend); 15204b2190daSEmilio G. Cota if (hostp) { 15214b2190daSEmilio G. Cota *hostp = p; 15224b2190daSEmilio G. Cota } 1523f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 1524f2553f04SKONRAD Frederic } 1525f2553f04SKONRAD Frederic 15264b2190daSEmilio G. Cota tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 15274b2190daSEmilio G. Cota { 15284b2190daSEmilio G. Cota return get_page_addr_code_hostp(env, addr, NULL); 15294b2190daSEmilio G. Cota } 15304b2190daSEmilio G. Cota 1531707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 1532707526adSRichard Henderson CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) 1533707526adSRichard Henderson { 1534707526adSRichard Henderson ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; 1535707526adSRichard Henderson 1536707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1537707526adSRichard Henderson 1538707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1539707526adSRichard Henderson struct page_collection *pages 1540707526adSRichard Henderson = page_collection_lock(ram_addr, ram_addr + size); 15415a7c27bbSRichard Henderson tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); 1542707526adSRichard Henderson page_collection_unlock(pages); 1543707526adSRichard Henderson } 1544707526adSRichard Henderson 1545707526adSRichard Henderson /* 1546707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1547707526adSRichard Henderson * the notdirty callback faster. 1548707526adSRichard Henderson */ 1549707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1550707526adSRichard Henderson 1551707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1552707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1553707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1554707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1555707526adSRichard Henderson } 1556707526adSRichard Henderson } 1557707526adSRichard Henderson 1558069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 1559069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1560069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1561069cfe77SRichard Henderson void **phost, uintptr_t retaddr) 1562d9bb58e5SYang Zhong { 1563383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1564383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1565069cfe77SRichard Henderson target_ulong tlb_addr, page_addr; 1566c25c283dSDavid Hildenbrand size_t elt_ofs; 1567069cfe77SRichard Henderson int flags; 1568ca86cf32SDavid Hildenbrand 1569c25c283dSDavid Hildenbrand switch (access_type) { 1570c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 1571c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_read); 1572c25c283dSDavid Hildenbrand break; 1573c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 1574c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_write); 1575c25c283dSDavid Hildenbrand break; 1576c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 1577c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_code); 1578c25c283dSDavid Hildenbrand break; 1579c25c283dSDavid Hildenbrand default: 1580c25c283dSDavid Hildenbrand g_assert_not_reached(); 1581c25c283dSDavid Hildenbrand } 1582c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 1583c25c283dSDavid Hildenbrand 1584069cfe77SRichard Henderson page_addr = addr & TARGET_PAGE_MASK; 1585069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 1586069cfe77SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { 1587069cfe77SRichard Henderson CPUState *cs = env_cpu(env); 1588069cfe77SRichard Henderson CPUClass *cc = CPU_GET_CLASS(cs); 1589069cfe77SRichard Henderson 159078271684SClaudio Fontana if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, 1591069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1592069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1593069cfe77SRichard Henderson *phost = NULL; 1594069cfe77SRichard Henderson return TLB_INVALID_MASK; 1595069cfe77SRichard Henderson } 1596069cfe77SRichard Henderson 159703a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 159803a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1599d9bb58e5SYang Zhong } 1600c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 160103a98189SDavid Hildenbrand } 1602069cfe77SRichard Henderson flags = tlb_addr & TLB_FLAGS_MASK; 160303a98189SDavid Hildenbrand 1604069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 1605069cfe77SRichard Henderson if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1606069cfe77SRichard Henderson *phost = NULL; 1607069cfe77SRichard Henderson return TLB_MMIO; 1608fef39ccdSDavid Hildenbrand } 1609fef39ccdSDavid Hildenbrand 1610069cfe77SRichard Henderson /* Everything else is RAM. */ 1611069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1612069cfe77SRichard Henderson return flags; 1613069cfe77SRichard Henderson } 1614069cfe77SRichard Henderson 1615069cfe77SRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr, 1616069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1617069cfe77SRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1618069cfe77SRichard Henderson { 1619069cfe77SRichard Henderson int flags; 1620069cfe77SRichard Henderson 1621069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, 1622069cfe77SRichard Henderson nonfault, phost, retaddr); 1623069cfe77SRichard Henderson 1624069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1625069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1626069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 162773bc0bd4SRichard Henderson CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 162873bc0bd4SRichard Henderson 1629069cfe77SRichard Henderson notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); 1630069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1631069cfe77SRichard Henderson } 1632069cfe77SRichard Henderson 1633069cfe77SRichard Henderson return flags; 1634069cfe77SRichard Henderson } 1635069cfe77SRichard Henderson 1636069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 1637069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1638069cfe77SRichard Henderson { 1639069cfe77SRichard Henderson void *host; 1640069cfe77SRichard Henderson int flags; 1641069cfe77SRichard Henderson 1642069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1643069cfe77SRichard Henderson 1644069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1645069cfe77SRichard Henderson false, &host, retaddr); 1646069cfe77SRichard Henderson 1647069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1648069cfe77SRichard Henderson if (size == 0) { 164973bc0bd4SRichard Henderson return NULL; 165073bc0bd4SRichard Henderson } 165173bc0bd4SRichard Henderson 1652069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 1653069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1654069cfe77SRichard Henderson CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1655069cfe77SRichard Henderson 165603a98189SDavid Hildenbrand /* Handle watchpoints. */ 1657069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1658069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1659069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 166003a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 166173bc0bd4SRichard Henderson iotlbentry->attrs, wp_access, retaddr); 1662d9bb58e5SYang Zhong } 1663fef39ccdSDavid Hildenbrand 166473bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1665069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 1666069cfe77SRichard Henderson notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); 166773bc0bd4SRichard Henderson } 1668fef39ccdSDavid Hildenbrand } 1669fef39ccdSDavid Hildenbrand 1670069cfe77SRichard Henderson return host; 1671d9bb58e5SYang Zhong } 1672d9bb58e5SYang Zhong 16734811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 16744811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 16754811e909SRichard Henderson { 1676069cfe77SRichard Henderson void *host; 1677069cfe77SRichard Henderson int flags; 16784811e909SRichard Henderson 1679069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, 1680069cfe77SRichard Henderson mmu_idx, true, &host, 0); 1681069cfe77SRichard Henderson 1682069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1683069cfe77SRichard Henderson return flags ? NULL : host; 16844811e909SRichard Henderson } 16854811e909SRichard Henderson 1686235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1687235537faSAlex Bennée /* 1688235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1689235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1690235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1691235537faSAlex Bennée * checking the victim table. This is purely informational. 1692235537faSAlex Bennée * 16932f3a57eeSAlex Bennée * This almost never fails as the memory access being instrumented 16942f3a57eeSAlex Bennée * should have just filled the TLB. The one corner case is io_writex 16952f3a57eeSAlex Bennée * which can cause TLB flushes and potential resizing of the TLBs 1696570ef309SAlex Bennée * losing the information we need. In those cases we need to recover 1697570ef309SAlex Bennée * data from a copy of the iotlbentry. As long as this always occurs 1698570ef309SAlex Bennée * from the same thread (which a mem callback will be) this is safe. 1699235537faSAlex Bennée */ 1700235537faSAlex Bennée 1701235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, 1702235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1703235537faSAlex Bennée { 1704235537faSAlex Bennée CPUArchState *env = cpu->env_ptr; 1705235537faSAlex Bennée CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1706235537faSAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1707235537faSAlex Bennée target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1708235537faSAlex Bennée 1709235537faSAlex Bennée if (likely(tlb_hit(tlb_addr, addr))) { 1710235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1711235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 1712235537faSAlex Bennée CPUIOTLBEntry *iotlbentry; 1713235537faSAlex Bennée iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1714235537faSAlex Bennée data->is_io = true; 1715235537faSAlex Bennée data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 1716235537faSAlex Bennée data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 1717235537faSAlex Bennée } else { 1718235537faSAlex Bennée data->is_io = false; 1719235537faSAlex Bennée data->v.ram.hostaddr = addr + tlbe->addend; 1720235537faSAlex Bennée } 1721235537faSAlex Bennée return true; 17222f3a57eeSAlex Bennée } else { 17232f3a57eeSAlex Bennée SavedIOTLB *saved = &cpu->saved_iotlb; 17242f3a57eeSAlex Bennée data->is_io = true; 17252f3a57eeSAlex Bennée data->v.io.section = saved->section; 17262f3a57eeSAlex Bennée data->v.io.offset = saved->mr_offset; 17272f3a57eeSAlex Bennée return true; 1728235537faSAlex Bennée } 1729235537faSAlex Bennée } 1730235537faSAlex Bennée 1731235537faSAlex Bennée #endif 1732235537faSAlex Bennée 1733d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 1734d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 1735d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 1736707526adSRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1737d9bb58e5SYang Zhong { 1738d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 1739383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1740383beda9SRichard Henderson CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1741403f290cSEmilio G. Cota target_ulong tlb_addr = tlb_addr_write(tlbe); 174214776ab5STony Nguyen MemOp mop = get_memop(oi); 1743d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 1744d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 174534d49937SPeter Maydell void *hostaddr; 1746d9bb58e5SYang Zhong 1747d9bb58e5SYang Zhong /* Adjust the given return address. */ 1748d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1749d9bb58e5SYang Zhong 1750d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1751d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1752d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 175329a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1754d9bb58e5SYang Zhong mmu_idx, retaddr); 1755d9bb58e5SYang Zhong } 1756d9bb58e5SYang Zhong 1757d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 1758d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 1759d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1760d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1761d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1762d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1763d9bb58e5SYang Zhong goto stop_the_world; 1764d9bb58e5SYang Zhong } 1765d9bb58e5SYang Zhong 1766d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 1767334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1768d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 176929a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, 177098670d47SLaurent Vivier mmu_idx, retaddr); 17716d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 17726d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1773d9bb58e5SYang Zhong } 1774403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1775d9bb58e5SYang Zhong } 1776d9bb58e5SYang Zhong 177755df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 177830d7e098SRichard Henderson if (unlikely(tlb_addr & TLB_MMIO)) { 1779d9bb58e5SYang Zhong /* There's really nothing that can be done to 1780d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1781d9bb58e5SYang Zhong goto stop_the_world; 1782d9bb58e5SYang Zhong } 1783d9bb58e5SYang Zhong 1784d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 178534d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 178629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, 178798670d47SLaurent Vivier mmu_idx, retaddr); 1788d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 1789d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 1790d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 1791d9bb58e5SYang Zhong goto stop_the_world; 1792d9bb58e5SYang Zhong } 1793d9bb58e5SYang Zhong 179434d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 179534d49937SPeter Maydell 179634d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1797707526adSRichard Henderson notdirty_write(env_cpu(env), addr, 1 << s_bits, 1798707526adSRichard Henderson &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); 179934d49937SPeter Maydell } 180034d49937SPeter Maydell 180134d49937SPeter Maydell return hostaddr; 1802d9bb58e5SYang Zhong 1803d9bb58e5SYang Zhong stop_the_world: 180429a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1805d9bb58e5SYang Zhong } 1806d9bb58e5SYang Zhong 1807eed56642SAlex Bennée /* 1808eed56642SAlex Bennée * Load Helpers 1809eed56642SAlex Bennée * 1810eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1811eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1812eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1813eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1814eed56642SAlex Bennée */ 1815d9bb58e5SYang Zhong 18162dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 18172dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr); 18182dd92606SRichard Henderson 1819c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 182080d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op) 182180d9d1c6SRichard Henderson { 182280d9d1c6SRichard Henderson switch (op) { 182380d9d1c6SRichard Henderson case MO_UB: 182480d9d1c6SRichard Henderson return ldub_p(haddr); 182580d9d1c6SRichard Henderson case MO_BEUW: 182680d9d1c6SRichard Henderson return lduw_be_p(haddr); 182780d9d1c6SRichard Henderson case MO_LEUW: 182880d9d1c6SRichard Henderson return lduw_le_p(haddr); 182980d9d1c6SRichard Henderson case MO_BEUL: 183080d9d1c6SRichard Henderson return (uint32_t)ldl_be_p(haddr); 183180d9d1c6SRichard Henderson case MO_LEUL: 183280d9d1c6SRichard Henderson return (uint32_t)ldl_le_p(haddr); 183380d9d1c6SRichard Henderson case MO_BEQ: 183480d9d1c6SRichard Henderson return ldq_be_p(haddr); 183580d9d1c6SRichard Henderson case MO_LEQ: 183680d9d1c6SRichard Henderson return ldq_le_p(haddr); 183780d9d1c6SRichard Henderson default: 183880d9d1c6SRichard Henderson qemu_build_not_reached(); 183980d9d1c6SRichard Henderson } 184080d9d1c6SRichard Henderson } 184180d9d1c6SRichard Henderson 184280d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 18432dd92606SRichard Henderson load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, 1844be5c4787STony Nguyen uintptr_t retaddr, MemOp op, bool code_read, 18452dd92606SRichard Henderson FullLoadHelper *full_load) 1846eed56642SAlex Bennée { 1847eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 1848eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1849eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1850eed56642SAlex Bennée target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1851eed56642SAlex Bennée const size_t tlb_off = code_read ? 1852eed56642SAlex Bennée offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1853f1be3696SRichard Henderson const MMUAccessType access_type = 1854f1be3696SRichard Henderson code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1855eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 1856eed56642SAlex Bennée void *haddr; 1857eed56642SAlex Bennée uint64_t res; 1858be5c4787STony Nguyen size_t size = memop_size(op); 1859d9bb58e5SYang Zhong 1860eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1861eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 186229a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, access_type, 1863eed56642SAlex Bennée mmu_idx, retaddr); 1864eed56642SAlex Bennée } 1865eed56642SAlex Bennée 1866eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1867eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1868eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1869eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 187029a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, 1871f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1872eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1873eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1874eed56642SAlex Bennée } 1875eed56642SAlex Bennée tlb_addr = code_read ? entry->addr_code : entry->addr_read; 187630d7e098SRichard Henderson tlb_addr &= ~TLB_INVALID_MASK; 1877eed56642SAlex Bennée } 1878eed56642SAlex Bennée 187950b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1880eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 188150b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 18825b87b3e6SRichard Henderson bool need_swap; 188350b107c5SRichard Henderson 188450b107c5SRichard Henderson /* For anything that is unaligned, recurse through full_load. */ 1885eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1886eed56642SAlex Bennée goto do_unaligned_access; 1887eed56642SAlex Bennée } 188850b107c5SRichard Henderson 188950b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 189050b107c5SRichard Henderson 189150b107c5SRichard Henderson /* Handle watchpoints. */ 189250b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 189350b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 189450b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 189550b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_READ, retaddr); 18965b87b3e6SRichard Henderson } 189750b107c5SRichard Henderson 18985b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 189950b107c5SRichard Henderson 190050b107c5SRichard Henderson /* Handle I/O access. */ 19015b87b3e6SRichard Henderson if (likely(tlb_addr & TLB_MMIO)) { 19025b87b3e6SRichard Henderson return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, 19035b87b3e6SRichard Henderson access_type, op ^ (need_swap * MO_BSWAP)); 19045b87b3e6SRichard Henderson } 19055b87b3e6SRichard Henderson 19065b87b3e6SRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 19075b87b3e6SRichard Henderson 19085b87b3e6SRichard Henderson /* 19095b87b3e6SRichard Henderson * Keep these two load_memop separate to ensure that the compiler 19105b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 19115b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 19125b87b3e6SRichard Henderson */ 19135b87b3e6SRichard Henderson if (unlikely(need_swap)) { 19145b87b3e6SRichard Henderson return load_memop(haddr, op ^ MO_BSWAP); 19155b87b3e6SRichard Henderson } 19165b87b3e6SRichard Henderson return load_memop(haddr, op); 1917eed56642SAlex Bennée } 1918eed56642SAlex Bennée 1919eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1920eed56642SAlex Bennée if (size > 1 1921eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1922eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1923eed56642SAlex Bennée target_ulong addr1, addr2; 19248c79b288SAlex Bennée uint64_t r1, r2; 1925eed56642SAlex Bennée unsigned shift; 1926eed56642SAlex Bennée do_unaligned_access: 1927ab7a2009SAlex Bennée addr1 = addr & ~((target_ulong)size - 1); 1928eed56642SAlex Bennée addr2 = addr1 + size; 19292dd92606SRichard Henderson r1 = full_load(env, addr1, oi, retaddr); 19302dd92606SRichard Henderson r2 = full_load(env, addr2, oi, retaddr); 1931eed56642SAlex Bennée shift = (addr & (size - 1)) * 8; 1932eed56642SAlex Bennée 1933be5c4787STony Nguyen if (memop_big_endian(op)) { 1934eed56642SAlex Bennée /* Big-endian combine. */ 1935eed56642SAlex Bennée res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1936eed56642SAlex Bennée } else { 1937eed56642SAlex Bennée /* Little-endian combine. */ 1938eed56642SAlex Bennée res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 1939eed56642SAlex Bennée } 1940eed56642SAlex Bennée return res & MAKE_64BIT_MASK(0, size * 8); 1941eed56642SAlex Bennée } 1942eed56642SAlex Bennée 1943eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 194480d9d1c6SRichard Henderson return load_memop(haddr, op); 1945eed56642SAlex Bennée } 1946eed56642SAlex Bennée 1947eed56642SAlex Bennée /* 1948eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1949eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1950eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1951eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1952eed56642SAlex Bennée * data, and for that we always have uint64_t. 1953eed56642SAlex Bennée * 1954eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1955eed56642SAlex Bennée */ 1956eed56642SAlex Bennée 19572dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 19582dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 19592dd92606SRichard Henderson { 1960be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); 19612dd92606SRichard Henderson } 19622dd92606SRichard Henderson 1963fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 1964fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1965eed56642SAlex Bennée { 19662dd92606SRichard Henderson return full_ldub_mmu(env, addr, oi, retaddr); 19672dd92606SRichard Henderson } 19682dd92606SRichard Henderson 19692dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 19702dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 19712dd92606SRichard Henderson { 1972be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, false, 19732dd92606SRichard Henderson full_le_lduw_mmu); 1974eed56642SAlex Bennée } 1975eed56642SAlex Bennée 1976fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 1977fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1978eed56642SAlex Bennée { 19792dd92606SRichard Henderson return full_le_lduw_mmu(env, addr, oi, retaddr); 19802dd92606SRichard Henderson } 19812dd92606SRichard Henderson 19822dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 19832dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 19842dd92606SRichard Henderson { 1985be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, false, 19862dd92606SRichard Henderson full_be_lduw_mmu); 1987eed56642SAlex Bennée } 1988eed56642SAlex Bennée 1989fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 1990fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 1991eed56642SAlex Bennée { 19922dd92606SRichard Henderson return full_be_lduw_mmu(env, addr, oi, retaddr); 19932dd92606SRichard Henderson } 19942dd92606SRichard Henderson 19952dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 19962dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 19972dd92606SRichard Henderson { 1998be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, false, 19992dd92606SRichard Henderson full_le_ldul_mmu); 2000eed56642SAlex Bennée } 2001eed56642SAlex Bennée 2002fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 2003fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2004eed56642SAlex Bennée { 20052dd92606SRichard Henderson return full_le_ldul_mmu(env, addr, oi, retaddr); 20062dd92606SRichard Henderson } 20072dd92606SRichard Henderson 20082dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 20092dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 20102dd92606SRichard Henderson { 2011be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, false, 20122dd92606SRichard Henderson full_be_ldul_mmu); 2013eed56642SAlex Bennée } 2014eed56642SAlex Bennée 2015fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 2016fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2017eed56642SAlex Bennée { 20182dd92606SRichard Henderson return full_be_ldul_mmu(env, addr, oi, retaddr); 2019eed56642SAlex Bennée } 2020eed56642SAlex Bennée 2021fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 2022fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2023eed56642SAlex Bennée { 2024be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEQ, false, 20252dd92606SRichard Henderson helper_le_ldq_mmu); 2026eed56642SAlex Bennée } 2027eed56642SAlex Bennée 2028fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 2029fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2030eed56642SAlex Bennée { 2031be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEQ, false, 20322dd92606SRichard Henderson helper_be_ldq_mmu); 2033eed56642SAlex Bennée } 2034eed56642SAlex Bennée 2035eed56642SAlex Bennée /* 2036eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 2037eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 2038eed56642SAlex Bennée */ 2039eed56642SAlex Bennée 2040eed56642SAlex Bennée 2041eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 2042eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2043eed56642SAlex Bennée { 2044eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 2045eed56642SAlex Bennée } 2046eed56642SAlex Bennée 2047eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 2048eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2049eed56642SAlex Bennée { 2050eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 2051eed56642SAlex Bennée } 2052eed56642SAlex Bennée 2053eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 2054eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2055eed56642SAlex Bennée { 2056eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 2057eed56642SAlex Bennée } 2058eed56642SAlex Bennée 2059eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 2060eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2061eed56642SAlex Bennée { 2062eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 2063eed56642SAlex Bennée } 2064eed56642SAlex Bennée 2065eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 2066eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2067eed56642SAlex Bennée { 2068eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 2069eed56642SAlex Bennée } 2070eed56642SAlex Bennée 2071eed56642SAlex Bennée /* 2072d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 2073d03f1408SRichard Henderson */ 2074d03f1408SRichard Henderson 2075d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, 2076d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, 2077d03f1408SRichard Henderson MemOp op, FullLoadHelper *full_load) 2078d03f1408SRichard Henderson { 2079d03f1408SRichard Henderson uint16_t meminfo; 2080d03f1408SRichard Henderson TCGMemOpIdx oi; 2081d03f1408SRichard Henderson uint64_t ret; 2082d03f1408SRichard Henderson 2083d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, false); 2084d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 2085d03f1408SRichard Henderson 2086d03f1408SRichard Henderson op &= ~MO_SIGN; 2087d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 2088d03f1408SRichard Henderson ret = full_load(env, addr, oi, retaddr); 2089d03f1408SRichard Henderson 2090d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 2091d03f1408SRichard Henderson 2092d03f1408SRichard Henderson return ret; 2093d03f1408SRichard Henderson } 2094d03f1408SRichard Henderson 2095d03f1408SRichard Henderson uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2096d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2097d03f1408SRichard Henderson { 2098d03f1408SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); 2099d03f1408SRichard Henderson } 2100d03f1408SRichard Henderson 2101d03f1408SRichard Henderson int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2102d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2103d03f1408SRichard Henderson { 2104d03f1408SRichard Henderson return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB, 2105d03f1408SRichard Henderson full_ldub_mmu); 2106d03f1408SRichard Henderson } 2107d03f1408SRichard Henderson 2108b9e60257SRichard Henderson uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2109d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2110d03f1408SRichard Henderson { 2111b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu); 2112d03f1408SRichard Henderson } 2113d03f1408SRichard Henderson 2114b9e60257SRichard Henderson int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2115d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2116d03f1408SRichard Henderson { 2117b9e60257SRichard Henderson return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW, 2118b9e60257SRichard Henderson full_be_lduw_mmu); 2119d03f1408SRichard Henderson } 2120d03f1408SRichard Henderson 2121b9e60257SRichard Henderson uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2122d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2123d03f1408SRichard Henderson { 2124b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu); 2125d03f1408SRichard Henderson } 2126d03f1408SRichard Henderson 2127b9e60257SRichard Henderson uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2128d03f1408SRichard Henderson int mmu_idx, uintptr_t ra) 2129d03f1408SRichard Henderson { 2130b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu); 2131b9e60257SRichard Henderson } 2132b9e60257SRichard Henderson 2133b9e60257SRichard Henderson uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2134b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2135b9e60257SRichard Henderson { 2136b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu); 2137b9e60257SRichard Henderson } 2138b9e60257SRichard Henderson 2139b9e60257SRichard Henderson int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2140b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2141b9e60257SRichard Henderson { 2142b9e60257SRichard Henderson return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW, 2143b9e60257SRichard Henderson full_le_lduw_mmu); 2144b9e60257SRichard Henderson } 2145b9e60257SRichard Henderson 2146b9e60257SRichard Henderson uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2147b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2148b9e60257SRichard Henderson { 2149b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu); 2150b9e60257SRichard Henderson } 2151b9e60257SRichard Henderson 2152b9e60257SRichard Henderson uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 2153b9e60257SRichard Henderson int mmu_idx, uintptr_t ra) 2154b9e60257SRichard Henderson { 2155b9e60257SRichard Henderson return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu); 2156d03f1408SRichard Henderson } 2157d03f1408SRichard Henderson 2158cfe04a4bSRichard Henderson uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr, 2159cfe04a4bSRichard Henderson uintptr_t retaddr) 2160cfe04a4bSRichard Henderson { 2161cfe04a4bSRichard Henderson return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2162cfe04a4bSRichard Henderson } 2163cfe04a4bSRichard Henderson 2164cfe04a4bSRichard Henderson int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 2165cfe04a4bSRichard Henderson { 2166cfe04a4bSRichard Henderson return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2167cfe04a4bSRichard Henderson } 2168cfe04a4bSRichard Henderson 2169b9e60257SRichard Henderson uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr, 2170cfe04a4bSRichard Henderson uintptr_t retaddr) 2171cfe04a4bSRichard Henderson { 2172b9e60257SRichard Henderson return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2173cfe04a4bSRichard Henderson } 2174cfe04a4bSRichard Henderson 2175b9e60257SRichard Henderson int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 2176cfe04a4bSRichard Henderson { 2177b9e60257SRichard Henderson return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2178cfe04a4bSRichard Henderson } 2179cfe04a4bSRichard Henderson 2180b9e60257SRichard Henderson uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr, 2181b9e60257SRichard Henderson uintptr_t retaddr) 2182cfe04a4bSRichard Henderson { 2183b9e60257SRichard Henderson return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2184cfe04a4bSRichard Henderson } 2185cfe04a4bSRichard Henderson 2186b9e60257SRichard Henderson uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr, 2187b9e60257SRichard Henderson uintptr_t retaddr) 2188cfe04a4bSRichard Henderson { 2189b9e60257SRichard Henderson return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2190b9e60257SRichard Henderson } 2191b9e60257SRichard Henderson 2192b9e60257SRichard Henderson uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr, 2193b9e60257SRichard Henderson uintptr_t retaddr) 2194b9e60257SRichard Henderson { 2195b9e60257SRichard Henderson return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2196b9e60257SRichard Henderson } 2197b9e60257SRichard Henderson 2198b9e60257SRichard Henderson int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) 2199b9e60257SRichard Henderson { 2200b9e60257SRichard Henderson return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2201b9e60257SRichard Henderson } 2202b9e60257SRichard Henderson 2203b9e60257SRichard Henderson uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr, 2204b9e60257SRichard Henderson uintptr_t retaddr) 2205b9e60257SRichard Henderson { 2206b9e60257SRichard Henderson return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2207b9e60257SRichard Henderson } 2208b9e60257SRichard Henderson 2209b9e60257SRichard Henderson uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr, 2210b9e60257SRichard Henderson uintptr_t retaddr) 2211b9e60257SRichard Henderson { 2212b9e60257SRichard Henderson return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); 2213cfe04a4bSRichard Henderson } 2214cfe04a4bSRichard Henderson 2215cfe04a4bSRichard Henderson uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr) 2216cfe04a4bSRichard Henderson { 2217cfe04a4bSRichard Henderson return cpu_ldub_data_ra(env, ptr, 0); 2218cfe04a4bSRichard Henderson } 2219cfe04a4bSRichard Henderson 2220cfe04a4bSRichard Henderson int cpu_ldsb_data(CPUArchState *env, target_ulong ptr) 2221cfe04a4bSRichard Henderson { 2222cfe04a4bSRichard Henderson return cpu_ldsb_data_ra(env, ptr, 0); 2223cfe04a4bSRichard Henderson } 2224cfe04a4bSRichard Henderson 2225b9e60257SRichard Henderson uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr) 2226cfe04a4bSRichard Henderson { 2227b9e60257SRichard Henderson return cpu_lduw_be_data_ra(env, ptr, 0); 2228cfe04a4bSRichard Henderson } 2229cfe04a4bSRichard Henderson 2230b9e60257SRichard Henderson int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr) 2231cfe04a4bSRichard Henderson { 2232b9e60257SRichard Henderson return cpu_ldsw_be_data_ra(env, ptr, 0); 2233cfe04a4bSRichard Henderson } 2234cfe04a4bSRichard Henderson 2235b9e60257SRichard Henderson uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr) 2236cfe04a4bSRichard Henderson { 2237b9e60257SRichard Henderson return cpu_ldl_be_data_ra(env, ptr, 0); 2238cfe04a4bSRichard Henderson } 2239cfe04a4bSRichard Henderson 2240b9e60257SRichard Henderson uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr) 2241cfe04a4bSRichard Henderson { 2242b9e60257SRichard Henderson return cpu_ldq_be_data_ra(env, ptr, 0); 2243b9e60257SRichard Henderson } 2244b9e60257SRichard Henderson 2245b9e60257SRichard Henderson uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr) 2246b9e60257SRichard Henderson { 2247b9e60257SRichard Henderson return cpu_lduw_le_data_ra(env, ptr, 0); 2248b9e60257SRichard Henderson } 2249b9e60257SRichard Henderson 2250b9e60257SRichard Henderson int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr) 2251b9e60257SRichard Henderson { 2252b9e60257SRichard Henderson return cpu_ldsw_le_data_ra(env, ptr, 0); 2253b9e60257SRichard Henderson } 2254b9e60257SRichard Henderson 2255b9e60257SRichard Henderson uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr) 2256b9e60257SRichard Henderson { 2257b9e60257SRichard Henderson return cpu_ldl_le_data_ra(env, ptr, 0); 2258b9e60257SRichard Henderson } 2259b9e60257SRichard Henderson 2260b9e60257SRichard Henderson uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr) 2261b9e60257SRichard Henderson { 2262b9e60257SRichard Henderson return cpu_ldq_le_data_ra(env, ptr, 0); 2263cfe04a4bSRichard Henderson } 2264cfe04a4bSRichard Henderson 2265d03f1408SRichard Henderson /* 2266eed56642SAlex Bennée * Store Helpers 2267eed56642SAlex Bennée */ 2268eed56642SAlex Bennée 2269c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE 227080d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op) 227180d9d1c6SRichard Henderson { 227280d9d1c6SRichard Henderson switch (op) { 227380d9d1c6SRichard Henderson case MO_UB: 227480d9d1c6SRichard Henderson stb_p(haddr, val); 227580d9d1c6SRichard Henderson break; 227680d9d1c6SRichard Henderson case MO_BEUW: 227780d9d1c6SRichard Henderson stw_be_p(haddr, val); 227880d9d1c6SRichard Henderson break; 227980d9d1c6SRichard Henderson case MO_LEUW: 228080d9d1c6SRichard Henderson stw_le_p(haddr, val); 228180d9d1c6SRichard Henderson break; 228280d9d1c6SRichard Henderson case MO_BEUL: 228380d9d1c6SRichard Henderson stl_be_p(haddr, val); 228480d9d1c6SRichard Henderson break; 228580d9d1c6SRichard Henderson case MO_LEUL: 228680d9d1c6SRichard Henderson stl_le_p(haddr, val); 228780d9d1c6SRichard Henderson break; 228880d9d1c6SRichard Henderson case MO_BEQ: 228980d9d1c6SRichard Henderson stq_be_p(haddr, val); 229080d9d1c6SRichard Henderson break; 229180d9d1c6SRichard Henderson case MO_LEQ: 229280d9d1c6SRichard Henderson stq_le_p(haddr, val); 229380d9d1c6SRichard Henderson break; 229480d9d1c6SRichard Henderson default: 229580d9d1c6SRichard Henderson qemu_build_not_reached(); 229680d9d1c6SRichard Henderson } 229780d9d1c6SRichard Henderson } 229880d9d1c6SRichard Henderson 22996b8b622eSRichard Henderson static void __attribute__((noinline)) 23006b8b622eSRichard Henderson store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, 23016b8b622eSRichard Henderson uintptr_t retaddr, size_t size, uintptr_t mmu_idx, 23026b8b622eSRichard Henderson bool big_endian) 23036b8b622eSRichard Henderson { 23046b8b622eSRichard Henderson const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 23056b8b622eSRichard Henderson uintptr_t index, index2; 23066b8b622eSRichard Henderson CPUTLBEntry *entry, *entry2; 23076b8b622eSRichard Henderson target_ulong page2, tlb_addr, tlb_addr2; 23086b8b622eSRichard Henderson TCGMemOpIdx oi; 23096b8b622eSRichard Henderson size_t size2; 23106b8b622eSRichard Henderson int i; 23116b8b622eSRichard Henderson 23126b8b622eSRichard Henderson /* 23136b8b622eSRichard Henderson * Ensure the second page is in the TLB. Note that the first page 23146b8b622eSRichard Henderson * is already guaranteed to be filled, and that the second page 23156b8b622eSRichard Henderson * cannot evict the first. 23166b8b622eSRichard Henderson */ 23176b8b622eSRichard Henderson page2 = (addr + size) & TARGET_PAGE_MASK; 23186b8b622eSRichard Henderson size2 = (addr + size) & ~TARGET_PAGE_MASK; 23196b8b622eSRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 23206b8b622eSRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 23216b8b622eSRichard Henderson 23226b8b622eSRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 23236b8b622eSRichard Henderson if (!tlb_hit_page(tlb_addr2, page2)) { 23246b8b622eSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { 23256b8b622eSRichard Henderson tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, 23266b8b622eSRichard Henderson mmu_idx, retaddr); 23276b8b622eSRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 23286b8b622eSRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 23296b8b622eSRichard Henderson } 23306b8b622eSRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 23316b8b622eSRichard Henderson } 23326b8b622eSRichard Henderson 23336b8b622eSRichard Henderson index = tlb_index(env, mmu_idx, addr); 23346b8b622eSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 23356b8b622eSRichard Henderson tlb_addr = tlb_addr_write(entry); 23366b8b622eSRichard Henderson 23376b8b622eSRichard Henderson /* 23386b8b622eSRichard Henderson * Handle watchpoints. Since this may trap, all checks 23396b8b622eSRichard Henderson * must happen before any store. 23406b8b622eSRichard Henderson */ 23416b8b622eSRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 23426b8b622eSRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size - size2, 23436b8b622eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index].attrs, 23446b8b622eSRichard Henderson BP_MEM_WRITE, retaddr); 23456b8b622eSRichard Henderson } 23466b8b622eSRichard Henderson if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { 23476b8b622eSRichard Henderson cpu_check_watchpoint(env_cpu(env), page2, size2, 23486b8b622eSRichard Henderson env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, 23496b8b622eSRichard Henderson BP_MEM_WRITE, retaddr); 23506b8b622eSRichard Henderson } 23516b8b622eSRichard Henderson 23526b8b622eSRichard Henderson /* 23536b8b622eSRichard Henderson * XXX: not efficient, but simple. 23546b8b622eSRichard Henderson * This loop must go in the forward direction to avoid issues 23556b8b622eSRichard Henderson * with self-modifying code in Windows 64-bit. 23566b8b622eSRichard Henderson */ 23576b8b622eSRichard Henderson oi = make_memop_idx(MO_UB, mmu_idx); 23586b8b622eSRichard Henderson if (big_endian) { 23596b8b622eSRichard Henderson for (i = 0; i < size; ++i) { 23606b8b622eSRichard Henderson /* Big-endian extract. */ 23616b8b622eSRichard Henderson uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); 23626b8b622eSRichard Henderson helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); 23636b8b622eSRichard Henderson } 23646b8b622eSRichard Henderson } else { 23656b8b622eSRichard Henderson for (i = 0; i < size; ++i) { 23666b8b622eSRichard Henderson /* Little-endian extract. */ 23676b8b622eSRichard Henderson uint8_t val8 = val >> (i * 8); 23686b8b622eSRichard Henderson helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); 23696b8b622eSRichard Henderson } 23706b8b622eSRichard Henderson } 23716b8b622eSRichard Henderson } 23726b8b622eSRichard Henderson 237380d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE 23744601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 2375be5c4787STony Nguyen TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) 2376eed56642SAlex Bennée { 2377eed56642SAlex Bennée uintptr_t mmu_idx = get_mmuidx(oi); 2378eed56642SAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 2379eed56642SAlex Bennée CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 2380eed56642SAlex Bennée target_ulong tlb_addr = tlb_addr_write(entry); 2381eed56642SAlex Bennée const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 2382eed56642SAlex Bennée unsigned a_bits = get_alignment_bits(get_memop(oi)); 2383eed56642SAlex Bennée void *haddr; 2384be5c4787STony Nguyen size_t size = memop_size(op); 2385eed56642SAlex Bennée 2386eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 2387eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 238829a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2389eed56642SAlex Bennée mmu_idx, retaddr); 2390eed56642SAlex Bennée } 2391eed56642SAlex Bennée 2392eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 2393eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 2394eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 2395eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 239629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 2397eed56642SAlex Bennée mmu_idx, retaddr); 2398eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 2399eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 2400eed56642SAlex Bennée } 2401eed56642SAlex Bennée tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 2402eed56642SAlex Bennée } 2403eed56642SAlex Bennée 240450b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 2405eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 240650b107c5SRichard Henderson CPUIOTLBEntry *iotlbentry; 24075b87b3e6SRichard Henderson bool need_swap; 240850b107c5SRichard Henderson 240950b107c5SRichard Henderson /* For anything that is unaligned, recurse through byte stores. */ 2410eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 2411eed56642SAlex Bennée goto do_unaligned_access; 2412eed56642SAlex Bennée } 241350b107c5SRichard Henderson 241450b107c5SRichard Henderson iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; 241550b107c5SRichard Henderson 241650b107c5SRichard Henderson /* Handle watchpoints. */ 241750b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 241850b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 241950b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 242050b107c5SRichard Henderson iotlbentry->attrs, BP_MEM_WRITE, retaddr); 24215b87b3e6SRichard Henderson } 242250b107c5SRichard Henderson 24235b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 242450b107c5SRichard Henderson 242550b107c5SRichard Henderson /* Handle I/O access. */ 242608565552SRichard Henderson if (tlb_addr & TLB_MMIO) { 24275b87b3e6SRichard Henderson io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, 24285b87b3e6SRichard Henderson op ^ (need_swap * MO_BSWAP)); 24295b87b3e6SRichard Henderson return; 24305b87b3e6SRichard Henderson } 24315b87b3e6SRichard Henderson 24327b0d792cSRichard Henderson /* Ignore writes to ROM. */ 24337b0d792cSRichard Henderson if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { 24347b0d792cSRichard Henderson return; 24357b0d792cSRichard Henderson } 24367b0d792cSRichard Henderson 243708565552SRichard Henderson /* Handle clean RAM pages. */ 243808565552SRichard Henderson if (tlb_addr & TLB_NOTDIRTY) { 2439707526adSRichard Henderson notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); 244008565552SRichard Henderson } 244108565552SRichard Henderson 2442707526adSRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 244308565552SRichard Henderson 24445b87b3e6SRichard Henderson /* 24455b87b3e6SRichard Henderson * Keep these two store_memop separate to ensure that the compiler 24465b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 24475b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 24485b87b3e6SRichard Henderson */ 24495b87b3e6SRichard Henderson if (unlikely(need_swap)) { 24505b87b3e6SRichard Henderson store_memop(haddr, val, op ^ MO_BSWAP); 24515b87b3e6SRichard Henderson } else { 24525b87b3e6SRichard Henderson store_memop(haddr, val, op); 24535b87b3e6SRichard Henderson } 2454eed56642SAlex Bennée return; 2455eed56642SAlex Bennée } 2456eed56642SAlex Bennée 2457eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 2458eed56642SAlex Bennée if (size > 1 2459eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 2460eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 2461eed56642SAlex Bennée do_unaligned_access: 24626b8b622eSRichard Henderson store_helper_unaligned(env, addr, val, retaddr, size, 24636b8b622eSRichard Henderson mmu_idx, memop_big_endian(op)); 2464eed56642SAlex Bennée return; 2465eed56642SAlex Bennée } 2466eed56642SAlex Bennée 2467eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 246880d9d1c6SRichard Henderson store_memop(haddr, val, op); 2469eed56642SAlex Bennée } 2470eed56642SAlex Bennée 24716b8b622eSRichard Henderson void __attribute__((noinline)) 24726b8b622eSRichard Henderson helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2473eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2474eed56642SAlex Bennée { 2475be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_UB); 2476eed56642SAlex Bennée } 2477eed56642SAlex Bennée 2478fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2479eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2480eed56642SAlex Bennée { 2481be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUW); 2482eed56642SAlex Bennée } 2483eed56642SAlex Bennée 2484fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2485eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2486eed56642SAlex Bennée { 2487be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUW); 2488eed56642SAlex Bennée } 2489eed56642SAlex Bennée 2490fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2491eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2492eed56642SAlex Bennée { 2493be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEUL); 2494eed56642SAlex Bennée } 2495eed56642SAlex Bennée 2496fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2497eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2498eed56642SAlex Bennée { 2499be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEUL); 2500eed56642SAlex Bennée } 2501eed56642SAlex Bennée 2502fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2503eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2504eed56642SAlex Bennée { 2505be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_LEQ); 2506eed56642SAlex Bennée } 2507eed56642SAlex Bennée 2508fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2509eed56642SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 2510eed56642SAlex Bennée { 2511be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_BEQ); 2512eed56642SAlex Bennée } 2513d9bb58e5SYang Zhong 2514d03f1408SRichard Henderson /* 2515d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 2516d03f1408SRichard Henderson */ 2517d03f1408SRichard Henderson 2518d03f1408SRichard Henderson static inline void QEMU_ALWAYS_INLINE 2519d03f1408SRichard Henderson cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 2520d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr, MemOp op) 2521d03f1408SRichard Henderson { 2522d03f1408SRichard Henderson TCGMemOpIdx oi; 2523d03f1408SRichard Henderson uint16_t meminfo; 2524d03f1408SRichard Henderson 2525d03f1408SRichard Henderson meminfo = trace_mem_get_info(op, mmu_idx, true); 2526d03f1408SRichard Henderson trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); 2527d03f1408SRichard Henderson 2528d03f1408SRichard Henderson oi = make_memop_idx(op, mmu_idx); 2529d03f1408SRichard Henderson store_helper(env, addr, val, oi, retaddr, op); 2530d03f1408SRichard Henderson 2531d03f1408SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); 2532d03f1408SRichard Henderson } 2533d03f1408SRichard Henderson 2534d03f1408SRichard Henderson void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2535d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2536d03f1408SRichard Henderson { 2537d03f1408SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); 2538d03f1408SRichard Henderson } 2539d03f1408SRichard Henderson 2540b9e60257SRichard Henderson void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2541d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2542d03f1408SRichard Henderson { 2543b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW); 2544d03f1408SRichard Henderson } 2545d03f1408SRichard Henderson 2546b9e60257SRichard Henderson void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2547d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2548d03f1408SRichard Henderson { 2549b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL); 2550d03f1408SRichard Henderson } 2551d03f1408SRichard Henderson 2552b9e60257SRichard Henderson void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, 2553d03f1408SRichard Henderson int mmu_idx, uintptr_t retaddr) 2554d03f1408SRichard Henderson { 2555b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ); 2556b9e60257SRichard Henderson } 2557b9e60257SRichard Henderson 2558b9e60257SRichard Henderson void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2559b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2560b9e60257SRichard Henderson { 2561b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW); 2562b9e60257SRichard Henderson } 2563b9e60257SRichard Henderson 2564b9e60257SRichard Henderson void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, 2565b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2566b9e60257SRichard Henderson { 2567b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL); 2568b9e60257SRichard Henderson } 2569b9e60257SRichard Henderson 2570b9e60257SRichard Henderson void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, 2571b9e60257SRichard Henderson int mmu_idx, uintptr_t retaddr) 2572b9e60257SRichard Henderson { 2573b9e60257SRichard Henderson cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ); 2574d03f1408SRichard Henderson } 2575d03f1408SRichard Henderson 2576cfe04a4bSRichard Henderson void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, 2577cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2578cfe04a4bSRichard Henderson { 2579cfe04a4bSRichard Henderson cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2580cfe04a4bSRichard Henderson } 2581cfe04a4bSRichard Henderson 2582b9e60257SRichard Henderson void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr, 2583cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2584cfe04a4bSRichard Henderson { 2585b9e60257SRichard Henderson cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2586cfe04a4bSRichard Henderson } 2587cfe04a4bSRichard Henderson 2588b9e60257SRichard Henderson void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr, 2589cfe04a4bSRichard Henderson uint32_t val, uintptr_t retaddr) 2590cfe04a4bSRichard Henderson { 2591b9e60257SRichard Henderson cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2592cfe04a4bSRichard Henderson } 2593cfe04a4bSRichard Henderson 2594b9e60257SRichard Henderson void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr, 2595cfe04a4bSRichard Henderson uint64_t val, uintptr_t retaddr) 2596cfe04a4bSRichard Henderson { 2597b9e60257SRichard Henderson cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2598b9e60257SRichard Henderson } 2599b9e60257SRichard Henderson 2600b9e60257SRichard Henderson void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr, 2601b9e60257SRichard Henderson uint32_t val, uintptr_t retaddr) 2602b9e60257SRichard Henderson { 2603b9e60257SRichard Henderson cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2604b9e60257SRichard Henderson } 2605b9e60257SRichard Henderson 2606b9e60257SRichard Henderson void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr, 2607b9e60257SRichard Henderson uint32_t val, uintptr_t retaddr) 2608b9e60257SRichard Henderson { 2609b9e60257SRichard Henderson cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2610b9e60257SRichard Henderson } 2611b9e60257SRichard Henderson 2612b9e60257SRichard Henderson void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr, 2613b9e60257SRichard Henderson uint64_t val, uintptr_t retaddr) 2614b9e60257SRichard Henderson { 2615b9e60257SRichard Henderson cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); 2616cfe04a4bSRichard Henderson } 2617cfe04a4bSRichard Henderson 2618cfe04a4bSRichard Henderson void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2619cfe04a4bSRichard Henderson { 2620cfe04a4bSRichard Henderson cpu_stb_data_ra(env, ptr, val, 0); 2621cfe04a4bSRichard Henderson } 2622cfe04a4bSRichard Henderson 2623b9e60257SRichard Henderson void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2624cfe04a4bSRichard Henderson { 2625b9e60257SRichard Henderson cpu_stw_be_data_ra(env, ptr, val, 0); 2626cfe04a4bSRichard Henderson } 2627cfe04a4bSRichard Henderson 2628b9e60257SRichard Henderson void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2629cfe04a4bSRichard Henderson { 2630b9e60257SRichard Henderson cpu_stl_be_data_ra(env, ptr, val, 0); 2631cfe04a4bSRichard Henderson } 2632cfe04a4bSRichard Henderson 2633b9e60257SRichard Henderson void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val) 2634cfe04a4bSRichard Henderson { 2635b9e60257SRichard Henderson cpu_stq_be_data_ra(env, ptr, val, 0); 2636b9e60257SRichard Henderson } 2637b9e60257SRichard Henderson 2638b9e60257SRichard Henderson void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2639b9e60257SRichard Henderson { 2640b9e60257SRichard Henderson cpu_stw_le_data_ra(env, ptr, val, 0); 2641b9e60257SRichard Henderson } 2642b9e60257SRichard Henderson 2643b9e60257SRichard Henderson void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) 2644b9e60257SRichard Henderson { 2645b9e60257SRichard Henderson cpu_stl_le_data_ra(env, ptr, val, 0); 2646b9e60257SRichard Henderson } 2647b9e60257SRichard Henderson 2648b9e60257SRichard Henderson void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) 2649b9e60257SRichard Henderson { 2650b9e60257SRichard Henderson cpu_stq_le_data_ra(env, ptr, val, 0); 2651cfe04a4bSRichard Henderson } 2652cfe04a4bSRichard Henderson 2653d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 2654d9bb58e5SYang Zhong them callable from other helpers. */ 2655d9bb58e5SYang Zhong 2656d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 2657d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 2658d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 2659707526adSRichard Henderson #define ATOMIC_MMU_DECLS 2660707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr) 2661707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 2662504f73f7SAlex Bennée #define ATOMIC_MMU_IDX get_mmuidx(oi) 2663d9bb58e5SYang Zhong 2664139c1837SPaolo Bonzini #include "atomic_common.c.inc" 2665d9bb58e5SYang Zhong 2666d9bb58e5SYang Zhong #define DATA_SIZE 1 2667d9bb58e5SYang Zhong #include "atomic_template.h" 2668d9bb58e5SYang Zhong 2669d9bb58e5SYang Zhong #define DATA_SIZE 2 2670d9bb58e5SYang Zhong #include "atomic_template.h" 2671d9bb58e5SYang Zhong 2672d9bb58e5SYang Zhong #define DATA_SIZE 4 2673d9bb58e5SYang Zhong #include "atomic_template.h" 2674d9bb58e5SYang Zhong 2675d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2676d9bb58e5SYang Zhong #define DATA_SIZE 8 2677d9bb58e5SYang Zhong #include "atomic_template.h" 2678d9bb58e5SYang Zhong #endif 2679d9bb58e5SYang Zhong 2680e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 2681d9bb58e5SYang Zhong #define DATA_SIZE 16 2682d9bb58e5SYang Zhong #include "atomic_template.h" 2683d9bb58e5SYang Zhong #endif 2684d9bb58e5SYang Zhong 2685d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 2686d9bb58e5SYang Zhong 2687d9bb58e5SYang Zhong #undef EXTRA_ARGS 2688d9bb58e5SYang Zhong #undef ATOMIC_NAME 2689d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 2690d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 2691d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 2692707526adSRichard Henderson #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC()) 2693d9bb58e5SYang Zhong 2694d9bb58e5SYang Zhong #define DATA_SIZE 1 2695d9bb58e5SYang Zhong #include "atomic_template.h" 2696d9bb58e5SYang Zhong 2697d9bb58e5SYang Zhong #define DATA_SIZE 2 2698d9bb58e5SYang Zhong #include "atomic_template.h" 2699d9bb58e5SYang Zhong 2700d9bb58e5SYang Zhong #define DATA_SIZE 4 2701d9bb58e5SYang Zhong #include "atomic_template.h" 2702d9bb58e5SYang Zhong 2703d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2704d9bb58e5SYang Zhong #define DATA_SIZE 8 2705d9bb58e5SYang Zhong #include "atomic_template.h" 2706d9bb58e5SYang Zhong #endif 2707504f73f7SAlex Bennée #undef ATOMIC_MMU_IDX 2708d9bb58e5SYang Zhong 2709d9bb58e5SYang Zhong /* Code access functions. */ 2710d9bb58e5SYang Zhong 2711fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, 27122dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 27132dd92606SRichard Henderson { 2714fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); 27152dd92606SRichard Henderson } 27162dd92606SRichard Henderson 2717fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 2718eed56642SAlex Bennée { 2719fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 2720fc4120a3SRichard Henderson return full_ldub_code(env, addr, oi, 0); 27212dd92606SRichard Henderson } 27222dd92606SRichard Henderson 2723fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, 27244cef72d0SAlex Bennée TCGMemOpIdx oi, uintptr_t retaddr) 27254cef72d0SAlex Bennée { 2726fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); 27274cef72d0SAlex Bennée } 27284cef72d0SAlex Bennée 2729fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 27302dd92606SRichard Henderson { 2731fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 2732fc4120a3SRichard Henderson return full_lduw_code(env, addr, oi, 0); 2733eed56642SAlex Bennée } 2734d9bb58e5SYang Zhong 2735fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, 2736fc1bc777SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 2737eed56642SAlex Bennée { 2738fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); 27392dd92606SRichard Henderson } 27402dd92606SRichard Henderson 2741fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 27424cef72d0SAlex Bennée { 2743fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 2744fc4120a3SRichard Henderson return full_ldl_code(env, addr, oi, 0); 27454cef72d0SAlex Bennée } 27464cef72d0SAlex Bennée 2747fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, 27482dd92606SRichard Henderson TCGMemOpIdx oi, uintptr_t retaddr) 27492dd92606SRichard Henderson { 2750fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); 2751eed56642SAlex Bennée } 2752d9bb58e5SYang Zhong 2753fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 2754eed56642SAlex Bennée { 2755fc4120a3SRichard Henderson TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); 2756fc4120a3SRichard Henderson return full_ldq_code(env, addr, oi, 0); 2757eed56642SAlex Bennée } 2758