1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 26d9bb58e5SYang Zhong #include "exec/cputlb.h" 27d9bb58e5SYang Zhong #include "exec/memory-internal.h" 28d9bb58e5SYang Zhong #include "exec/ram_addr.h" 29d9bb58e5SYang Zhong #include "tcg/tcg.h" 30d9bb58e5SYang Zhong #include "qemu/error-report.h" 31d9bb58e5SYang Zhong #include "exec/log.h" 32d9bb58e5SYang Zhong #include "exec/helper-proto.h" 33d9bb58e5SYang Zhong #include "qemu/atomic.h" 34e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 353b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 36243af022SPaolo Bonzini #include "trace/trace-root.h" 37e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h" 3865269192SPhilippe Mathieu-Daudé #include "internal.h" 39235537faSAlex Bennée #ifdef CONFIG_PLUGIN 40235537faSAlex Bennée #include "qemu/plugin-memory.h" 41235537faSAlex Bennée #endif 42d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h" 43d9bb58e5SYang Zhong 44d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 45d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 46d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 47d9bb58e5SYang Zhong 48d9bb58e5SYang Zhong #ifdef DEBUG_TLB 49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 50d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 51d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 52d9bb58e5SYang Zhong # else 53d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 54d9bb58e5SYang Zhong # endif 55d9bb58e5SYang Zhong #else 56d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 57d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 58d9bb58e5SYang Zhong #endif 59d9bb58e5SYang Zhong 60d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 61d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 62d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 63d9bb58e5SYang Zhong ## __VA_ARGS__); \ 64d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 65d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 66d9bb58e5SYang Zhong } \ 67d9bb58e5SYang Zhong } while (0) 68d9bb58e5SYang Zhong 69ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 70d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 71ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 72d9bb58e5SYang Zhong } \ 73d9bb58e5SYang Zhong } while (0) 74d9bb58e5SYang Zhong 75d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 76d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 77d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 78d9bb58e5SYang Zhong 79d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 80d9bb58e5SYang Zhong */ 81d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 82d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 83d9bb58e5SYang Zhong 84722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 857a1efe1bSRichard Henderson { 86722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 877a1efe1bSRichard Henderson } 887a1efe1bSRichard Henderson 89722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 9086e1eff8SEmilio G. Cota { 91722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9286e1eff8SEmilio G. Cota } 9386e1eff8SEmilio G. Cota 9479e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9586e1eff8SEmilio G. Cota size_t max_entries) 9686e1eff8SEmilio G. Cota { 9779e42085SRichard Henderson desc->window_begin_ns = ns; 9879e42085SRichard Henderson desc->window_max_entries = max_entries; 9986e1eff8SEmilio G. Cota } 10086e1eff8SEmilio G. Cota 1010f4abea8SRichard Henderson static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 1020f4abea8SRichard Henderson { 1030f4abea8SRichard Henderson unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); 1040f4abea8SRichard Henderson 1050f4abea8SRichard Henderson for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 1060f4abea8SRichard Henderson qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); 1070f4abea8SRichard Henderson } 1080f4abea8SRichard Henderson } 1090f4abea8SRichard Henderson 1100f4abea8SRichard Henderson static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) 1110f4abea8SRichard Henderson { 1120f4abea8SRichard Henderson /* Discard jump cache entries for any tb which might potentially 1130f4abea8SRichard Henderson overlap the flushed page. */ 1140f4abea8SRichard Henderson tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 1150f4abea8SRichard Henderson tb_jmp_cache_clear_page(cpu, addr); 1160f4abea8SRichard Henderson } 1170f4abea8SRichard Henderson 11886e1eff8SEmilio G. Cota /** 11986e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 12071ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 12171ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 12286e1eff8SEmilio G. Cota * 12386e1eff8SEmilio G. Cota * Called with tlb_lock_held. 12486e1eff8SEmilio G. Cota * 12586e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 12686e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 12786e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 12886e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 12986e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 13086e1eff8SEmilio G. Cota * the resize based on past observations. 13186e1eff8SEmilio G. Cota * 13286e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 13386e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 13486e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 13586e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 13686e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 13786e1eff8SEmilio G. Cota * performance. 13886e1eff8SEmilio G. Cota * 13986e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 14086e1eff8SEmilio G. Cota * 14186e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 14286e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 14386e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 14486e1eff8SEmilio G. Cota * probably be similar. 14586e1eff8SEmilio G. Cota * 14686e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 14786e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 14886e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 14986e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 15086e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 15186e1eff8SEmilio G. Cota * 15286e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 15386e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 15486e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 15586e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 15686e1eff8SEmilio G. Cota * conflict misses. 15786e1eff8SEmilio G. Cota */ 1583c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1593c3959f2SRichard Henderson int64_t now) 16086e1eff8SEmilio G. Cota { 16171ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 16286e1eff8SEmilio G. Cota size_t rate; 16386e1eff8SEmilio G. Cota size_t new_size = old_size; 16486e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 16586e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 16679e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 16786e1eff8SEmilio G. Cota 16879e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 16979e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 17086e1eff8SEmilio G. Cota } 17179e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 17286e1eff8SEmilio G. Cota 17386e1eff8SEmilio G. Cota if (rate > 70) { 17486e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 17586e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 17679e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 17779e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 17886e1eff8SEmilio G. Cota 17986e1eff8SEmilio G. Cota /* 18086e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 18186e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 18286e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 18386e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 18486e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 18586e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 18686e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 18786e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 18886e1eff8SEmilio G. Cota */ 18986e1eff8SEmilio G. Cota if (expected_rate > 70) { 19086e1eff8SEmilio G. Cota ceil *= 2; 19186e1eff8SEmilio G. Cota } 19286e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 19386e1eff8SEmilio G. Cota } 19486e1eff8SEmilio G. Cota 19586e1eff8SEmilio G. Cota if (new_size == old_size) { 19686e1eff8SEmilio G. Cota if (window_expired) { 19779e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 19886e1eff8SEmilio G. Cota } 19986e1eff8SEmilio G. Cota return; 20086e1eff8SEmilio G. Cota } 20186e1eff8SEmilio G. Cota 20271ccd47bSRichard Henderson g_free(fast->table); 203*25d3ec58SRichard Henderson g_free(desc->fulltlb); 20486e1eff8SEmilio G. Cota 20579e42085SRichard Henderson tlb_window_reset(desc, now, 0); 20686e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 20771ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 20871ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 209*25d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 21071ccd47bSRichard Henderson 21186e1eff8SEmilio G. Cota /* 21286e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 21386e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 21486e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 21586e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 21686e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 21786e1eff8SEmilio G. Cota */ 218*25d3ec58SRichard Henderson while (fast->table == NULL || desc->fulltlb == NULL) { 21986e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 22086e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 22186e1eff8SEmilio G. Cota abort(); 22286e1eff8SEmilio G. Cota } 22386e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 22471ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 22586e1eff8SEmilio G. Cota 22671ccd47bSRichard Henderson g_free(fast->table); 227*25d3ec58SRichard Henderson g_free(desc->fulltlb); 22871ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 229*25d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 23086e1eff8SEmilio G. Cota } 23186e1eff8SEmilio G. Cota } 23286e1eff8SEmilio G. Cota 233bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 23486e1eff8SEmilio G. Cota { 2355c948e31SRichard Henderson desc->n_used_entries = 0; 2365c948e31SRichard Henderson desc->large_page_addr = -1; 2375c948e31SRichard Henderson desc->large_page_mask = -1; 2385c948e31SRichard Henderson desc->vindex = 0; 2395c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2405c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 24186e1eff8SEmilio G. Cota } 24286e1eff8SEmilio G. Cota 2433c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, 2443c3959f2SRichard Henderson int64_t now) 245bbf021b0SRichard Henderson { 246bbf021b0SRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 247bbf021b0SRichard Henderson CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; 248bbf021b0SRichard Henderson 2493c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 250bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 251bbf021b0SRichard Henderson } 252bbf021b0SRichard Henderson 25356e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 25456e89f76SRichard Henderson { 25556e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 25656e89f76SRichard Henderson 25756e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 25856e89f76SRichard Henderson desc->n_used_entries = 0; 25956e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 26056e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 261*25d3ec58SRichard Henderson desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); 2623c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 26356e89f76SRichard Henderson } 26456e89f76SRichard Henderson 26586e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 26686e1eff8SEmilio G. Cota { 267a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 26886e1eff8SEmilio G. Cota } 26986e1eff8SEmilio G. Cota 27086e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 27186e1eff8SEmilio G. Cota { 272a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 27386e1eff8SEmilio G. Cota } 27486e1eff8SEmilio G. Cota 2755005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2765005e253SEmilio G. Cota { 27771aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 27856e89f76SRichard Henderson int64_t now = get_clock_realtime(); 27956e89f76SRichard Henderson int i; 28071aec354SEmilio G. Cota 281a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2823d1523ceSRichard Henderson 2833c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 2843c16304aSRichard Henderson env_tlb(env)->c.dirty = 0; 28586e1eff8SEmilio G. Cota 28656e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 28756e89f76SRichard Henderson tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); 28856e89f76SRichard Henderson } 2895005e253SEmilio G. Cota } 2905005e253SEmilio G. Cota 291816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu) 292816d9be5SEmilio G. Cota { 293816d9be5SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 294816d9be5SEmilio G. Cota int i; 295816d9be5SEmilio G. Cota 296816d9be5SEmilio G. Cota qemu_spin_destroy(&env_tlb(env)->c.lock); 297816d9be5SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 298816d9be5SEmilio G. Cota CPUTLBDesc *desc = &env_tlb(env)->d[i]; 299816d9be5SEmilio G. Cota CPUTLBDescFast *fast = &env_tlb(env)->f[i]; 300816d9be5SEmilio G. Cota 301816d9be5SEmilio G. Cota g_free(fast->table); 302*25d3ec58SRichard Henderson g_free(desc->fulltlb); 303816d9be5SEmilio G. Cota } 304816d9be5SEmilio G. Cota } 305816d9be5SEmilio G. Cota 306d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 307d9bb58e5SYang Zhong * 308d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 309d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 310d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 311d9bb58e5SYang Zhong * again. 312d9bb58e5SYang Zhong */ 313d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 314d9bb58e5SYang Zhong run_on_cpu_data d) 315d9bb58e5SYang Zhong { 316d9bb58e5SYang Zhong CPUState *cpu; 317d9bb58e5SYang Zhong 318d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 319d9bb58e5SYang Zhong if (cpu != src) { 320d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 321d9bb58e5SYang Zhong } 322d9bb58e5SYang Zhong } 323d9bb58e5SYang Zhong } 324d9bb58e5SYang Zhong 325e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 32683974cf4SEmilio G. Cota { 32783974cf4SEmilio G. Cota CPUState *cpu; 328e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 32983974cf4SEmilio G. Cota 33083974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 33183974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 33283974cf4SEmilio G. Cota 333d73415a3SStefan Hajnoczi full += qatomic_read(&env_tlb(env)->c.full_flush_count); 334d73415a3SStefan Hajnoczi part += qatomic_read(&env_tlb(env)->c.part_flush_count); 335d73415a3SStefan Hajnoczi elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); 33683974cf4SEmilio G. Cota } 337e09de0a2SRichard Henderson *pfull = full; 338e09de0a2SRichard Henderson *ppart = part; 339e09de0a2SRichard Henderson *pelide = elide; 34083974cf4SEmilio G. Cota } 341d9bb58e5SYang Zhong 342d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 343d9bb58e5SYang Zhong { 344d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3453d1523ceSRichard Henderson uint16_t asked = data.host_int; 3463d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3473c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 348d9bb58e5SYang Zhong 349d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 350d9bb58e5SYang Zhong 3513d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 352d9bb58e5SYang Zhong 353a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 35460a2ad7dSRichard Henderson 355a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3563d1523ceSRichard Henderson to_clean = asked & all_dirty; 3573d1523ceSRichard Henderson all_dirty &= ~to_clean; 358a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3593d1523ceSRichard Henderson 3603d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3613d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3623c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx, now); 363d9bb58e5SYang Zhong } 3643d1523ceSRichard Henderson 365a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 366d9bb58e5SYang Zhong 367f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 36864f2674bSRichard Henderson 3693d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 370d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.full_flush_count, 371a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 372e09de0a2SRichard Henderson } else { 373d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.part_flush_count, 374a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3753d1523ceSRichard Henderson if (to_clean != asked) { 376d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.elide_flush_count, 377a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3783d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3793d1523ceSRichard Henderson } 38064f2674bSRichard Henderson } 381d9bb58e5SYang Zhong } 382d9bb58e5SYang Zhong 383d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 384d9bb58e5SYang Zhong { 385d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 386d9bb58e5SYang Zhong 38764f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 388d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 389ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 390d9bb58e5SYang Zhong } else { 39160a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 392d9bb58e5SYang Zhong } 393d9bb58e5SYang Zhong } 394d9bb58e5SYang Zhong 39564f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 39664f2674bSRichard Henderson { 39764f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 39864f2674bSRichard Henderson } 39964f2674bSRichard Henderson 400d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 401d9bb58e5SYang Zhong { 402d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 403d9bb58e5SYang Zhong 404d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 405d9bb58e5SYang Zhong 406d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 407d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 408d9bb58e5SYang Zhong } 409d9bb58e5SYang Zhong 41064f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 41164f2674bSRichard Henderson { 41264f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 41364f2674bSRichard Henderson } 41464f2674bSRichard Henderson 41564f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 416d9bb58e5SYang Zhong { 417d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 418d9bb58e5SYang Zhong 419d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 420d9bb58e5SYang Zhong 421d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 422d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 423d9bb58e5SYang Zhong } 424d9bb58e5SYang Zhong 42564f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 42664f2674bSRichard Henderson { 42764f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 42864f2674bSRichard Henderson } 42964f2674bSRichard Henderson 4303ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 4313ab6e68cSRichard Henderson target_ulong page, target_ulong mask) 4323ab6e68cSRichard Henderson { 4333ab6e68cSRichard Henderson page &= mask; 4343ab6e68cSRichard Henderson mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 4353ab6e68cSRichard Henderson 4363ab6e68cSRichard Henderson return (page == (tlb_entry->addr_read & mask) || 4373ab6e68cSRichard Henderson page == (tlb_addr_write(tlb_entry) & mask) || 4383ab6e68cSRichard Henderson page == (tlb_entry->addr_code & mask)); 4393ab6e68cSRichard Henderson } 4403ab6e68cSRichard Henderson 44168fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 44268fea038SRichard Henderson target_ulong page) 443d9bb58e5SYang Zhong { 4443ab6e68cSRichard Henderson return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 44568fea038SRichard Henderson } 44668fea038SRichard Henderson 4473cea94bbSEmilio G. Cota /** 4483cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4493cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4503cea94bbSEmilio G. Cota */ 4513cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4523cea94bbSEmilio G. Cota { 4533cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4543cea94bbSEmilio G. Cota } 4553cea94bbSEmilio G. Cota 45653d28455SRichard Henderson /* Called with tlb_c.lock held */ 4573ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 4583ab6e68cSRichard Henderson target_ulong page, 4593ab6e68cSRichard Henderson target_ulong mask) 46068fea038SRichard Henderson { 4613ab6e68cSRichard Henderson if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 462d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 46386e1eff8SEmilio G. Cota return true; 464d9bb58e5SYang Zhong } 46586e1eff8SEmilio G. Cota return false; 466d9bb58e5SYang Zhong } 467d9bb58e5SYang Zhong 4683ab6e68cSRichard Henderson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 46968fea038SRichard Henderson target_ulong page) 47068fea038SRichard Henderson { 4713ab6e68cSRichard Henderson return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 4723ab6e68cSRichard Henderson } 4733ab6e68cSRichard Henderson 4743ab6e68cSRichard Henderson /* Called with tlb_c.lock held */ 4753ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, 4763ab6e68cSRichard Henderson target_ulong page, 4773ab6e68cSRichard Henderson target_ulong mask) 4783ab6e68cSRichard Henderson { 479a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 48068fea038SRichard Henderson int k; 48171aec354SEmilio G. Cota 48229a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 48368fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 4843ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 48586e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 48686e1eff8SEmilio G. Cota } 48768fea038SRichard Henderson } 48868fea038SRichard Henderson } 48968fea038SRichard Henderson 4903ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 4913ab6e68cSRichard Henderson target_ulong page) 4923ab6e68cSRichard Henderson { 4933ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); 4943ab6e68cSRichard Henderson } 4953ab6e68cSRichard Henderson 4961308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4971308e026SRichard Henderson target_ulong page) 4981308e026SRichard Henderson { 499a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 500a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 5011308e026SRichard Henderson 5021308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 5031308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 5041308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 5051308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 5061308e026SRichard Henderson midx, lp_addr, lp_mask); 5073c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 5081308e026SRichard Henderson } else { 50986e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 51086e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 51186e1eff8SEmilio G. Cota } 5121308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 5131308e026SRichard Henderson } 5141308e026SRichard Henderson } 5151308e026SRichard Henderson 5167b7d00e0SRichard Henderson /** 5177b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 5187b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5197b7d00e0SRichard Henderson * @addr: page of virtual address to flush 5207b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 5217b7d00e0SRichard Henderson * 5227b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 5237b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 524d9bb58e5SYang Zhong */ 5257b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 5267b7d00e0SRichard Henderson target_ulong addr, 5277b7d00e0SRichard Henderson uint16_t idxmap) 528d9bb58e5SYang Zhong { 529d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 530d9bb58e5SYang Zhong int mmu_idx; 531d9bb58e5SYang Zhong 532d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 533d9bb58e5SYang Zhong 5347b7d00e0SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); 535d9bb58e5SYang Zhong 536a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 537d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 5387b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 5391308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 540d9bb58e5SYang Zhong } 541d9bb58e5SYang Zhong } 542a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 543d9bb58e5SYang Zhong 544d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 545d9bb58e5SYang Zhong } 546d9bb58e5SYang Zhong 5477b7d00e0SRichard Henderson /** 5487b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 5497b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5507b7d00e0SRichard Henderson * @data: encoded addr + idxmap 5517b7d00e0SRichard Henderson * 5527b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5537b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 5547b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 5557b7d00e0SRichard Henderson * that can be passed via this method. 5567b7d00e0SRichard Henderson */ 5577b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5587b7d00e0SRichard Henderson run_on_cpu_data data) 5597b7d00e0SRichard Henderson { 5607b7d00e0SRichard Henderson target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; 5617b7d00e0SRichard Henderson target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; 5627b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5637b7d00e0SRichard Henderson 5647b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5657b7d00e0SRichard Henderson } 5667b7d00e0SRichard Henderson 5677b7d00e0SRichard Henderson typedef struct { 5687b7d00e0SRichard Henderson target_ulong addr; 5697b7d00e0SRichard Henderson uint16_t idxmap; 5707b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5717b7d00e0SRichard Henderson 5727b7d00e0SRichard Henderson /** 5737b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5747b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5757b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5767b7d00e0SRichard Henderson * 5777b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5787b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5797b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5807b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5817b7d00e0SRichard Henderson */ 5827b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5837b7d00e0SRichard Henderson run_on_cpu_data data) 5847b7d00e0SRichard Henderson { 5857b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5867b7d00e0SRichard Henderson 5877b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5887b7d00e0SRichard Henderson g_free(d); 5897b7d00e0SRichard Henderson } 5907b7d00e0SRichard Henderson 591d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 592d9bb58e5SYang Zhong { 593d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 594d9bb58e5SYang Zhong 595d9bb58e5SYang Zhong /* This should already be page aligned */ 5967b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 597d9bb58e5SYang Zhong 5987b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 5997b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 6007b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 6017b7d00e0SRichard Henderson /* 6027b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 6037b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 6047b7d00e0SRichard Henderson * allocating memory for this operation. 6057b7d00e0SRichard Henderson */ 6067b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 6077b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 608d9bb58e5SYang Zhong } else { 6097b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 6107b7d00e0SRichard Henderson 6117b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 6127b7d00e0SRichard Henderson d->addr = addr; 6137b7d00e0SRichard Henderson d->idxmap = idxmap; 6147b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 6157b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 616d9bb58e5SYang Zhong } 617d9bb58e5SYang Zhong } 618d9bb58e5SYang Zhong 619f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 620f8144c6cSRichard Henderson { 621f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 622f8144c6cSRichard Henderson } 623f8144c6cSRichard Henderson 624d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 625d9bb58e5SYang Zhong uint16_t idxmap) 626d9bb58e5SYang Zhong { 627d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 628d9bb58e5SYang Zhong 629d9bb58e5SYang Zhong /* This should already be page aligned */ 6307b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 631d9bb58e5SYang Zhong 6327b7d00e0SRichard Henderson /* 6337b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6347b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6357b7d00e0SRichard Henderson */ 6367b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6377b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6387b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6397b7d00e0SRichard Henderson } else { 6407b7d00e0SRichard Henderson CPUState *dst_cpu; 6417b7d00e0SRichard Henderson 6427b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6437b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6447b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6457b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 6467b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 6477b7d00e0SRichard Henderson 6487b7d00e0SRichard Henderson d->addr = addr; 6497b7d00e0SRichard Henderson d->idxmap = idxmap; 6507b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6517b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6527b7d00e0SRichard Henderson } 6537b7d00e0SRichard Henderson } 6547b7d00e0SRichard Henderson } 6557b7d00e0SRichard Henderson 6567b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 657d9bb58e5SYang Zhong } 658d9bb58e5SYang Zhong 659f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 660f8144c6cSRichard Henderson { 661f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 662f8144c6cSRichard Henderson } 663f8144c6cSRichard Henderson 664d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 665d9bb58e5SYang Zhong target_ulong addr, 666d9bb58e5SYang Zhong uint16_t idxmap) 667d9bb58e5SYang Zhong { 668d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 669d9bb58e5SYang Zhong 670d9bb58e5SYang Zhong /* This should already be page aligned */ 6717b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 672d9bb58e5SYang Zhong 6737b7d00e0SRichard Henderson /* 6747b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6757b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6767b7d00e0SRichard Henderson */ 6777b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6787b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6797b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6807b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6817b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6827b7d00e0SRichard Henderson } else { 6837b7d00e0SRichard Henderson CPUState *dst_cpu; 6847b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6857b7d00e0SRichard Henderson 6867b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6877b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6887b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6897b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6907b7d00e0SRichard Henderson d->addr = addr; 6917b7d00e0SRichard Henderson d->idxmap = idxmap; 6927b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6937b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6947b7d00e0SRichard Henderson } 6957b7d00e0SRichard Henderson } 6967b7d00e0SRichard Henderson 6977b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6987b7d00e0SRichard Henderson d->addr = addr; 6997b7d00e0SRichard Henderson d->idxmap = idxmap; 7007b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 7017b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 7027b7d00e0SRichard Henderson } 703d9bb58e5SYang Zhong } 704d9bb58e5SYang Zhong 705f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 706d9bb58e5SYang Zhong { 707f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 708d9bb58e5SYang Zhong } 709d9bb58e5SYang Zhong 7103c4ddec1SRichard Henderson static void tlb_flush_range_locked(CPUArchState *env, int midx, 7113c4ddec1SRichard Henderson target_ulong addr, target_ulong len, 7123c4ddec1SRichard Henderson unsigned bits) 7133ab6e68cSRichard Henderson { 7143ab6e68cSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[midx]; 7153ab6e68cSRichard Henderson CPUTLBDescFast *f = &env_tlb(env)->f[midx]; 7163ab6e68cSRichard Henderson target_ulong mask = MAKE_64BIT_MASK(0, bits); 7173ab6e68cSRichard Henderson 7183ab6e68cSRichard Henderson /* 7193ab6e68cSRichard Henderson * If @bits is smaller than the tlb size, there may be multiple entries 7203ab6e68cSRichard Henderson * within the TLB; otherwise all addresses that match under @mask hit 7213ab6e68cSRichard Henderson * the same TLB entry. 7223ab6e68cSRichard Henderson * TODO: Perhaps allow bits to be a few bits less than the size. 7233ab6e68cSRichard Henderson * For now, just flush the entire TLB. 7243c4ddec1SRichard Henderson * 7253c4ddec1SRichard Henderson * If @len is larger than the tlb size, then it will take longer to 7263c4ddec1SRichard Henderson * test all of the entries in the TLB than it will to flush it all. 7273ab6e68cSRichard Henderson */ 7283c4ddec1SRichard Henderson if (mask < f->mask || len > f->mask) { 7293ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7303c4ddec1SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", 7313c4ddec1SRichard Henderson midx, addr, mask, len); 7323ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7333ab6e68cSRichard Henderson return; 7343ab6e68cSRichard Henderson } 7353ab6e68cSRichard Henderson 7363c4ddec1SRichard Henderson /* 7373c4ddec1SRichard Henderson * Check if we need to flush due to large pages. 7383c4ddec1SRichard Henderson * Because large_page_mask contains all 1's from the msb, 7393c4ddec1SRichard Henderson * we only need to test the end of the range. 7403c4ddec1SRichard Henderson */ 7413c4ddec1SRichard Henderson if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { 7423ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7433ab6e68cSRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 7443ab6e68cSRichard Henderson midx, d->large_page_addr, d->large_page_mask); 7453ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7463ab6e68cSRichard Henderson return; 7473ab6e68cSRichard Henderson } 7483ab6e68cSRichard Henderson 7493c4ddec1SRichard Henderson for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { 7503c4ddec1SRichard Henderson target_ulong page = addr + i; 7513c4ddec1SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, midx, page); 7523c4ddec1SRichard Henderson 7533c4ddec1SRichard Henderson if (tlb_flush_entry_mask_locked(entry, page, mask)) { 7543ab6e68cSRichard Henderson tlb_n_used_entries_dec(env, midx); 7553ab6e68cSRichard Henderson } 7563ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); 7573ab6e68cSRichard Henderson } 7583c4ddec1SRichard Henderson } 7593ab6e68cSRichard Henderson 7603ab6e68cSRichard Henderson typedef struct { 7613ab6e68cSRichard Henderson target_ulong addr; 7623c4ddec1SRichard Henderson target_ulong len; 7633ab6e68cSRichard Henderson uint16_t idxmap; 7643ab6e68cSRichard Henderson uint16_t bits; 7653960a59fSRichard Henderson } TLBFlushRangeData; 7663ab6e68cSRichard Henderson 7676be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, 7683960a59fSRichard Henderson TLBFlushRangeData d) 7693ab6e68cSRichard Henderson { 7703ab6e68cSRichard Henderson CPUArchState *env = cpu->env_ptr; 7713ab6e68cSRichard Henderson int mmu_idx; 7723ab6e68cSRichard Henderson 7733ab6e68cSRichard Henderson assert_cpu_is_self(cpu); 7743ab6e68cSRichard Henderson 7753c4ddec1SRichard Henderson tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", 7763c4ddec1SRichard Henderson d.addr, d.bits, d.len, d.idxmap); 7773ab6e68cSRichard Henderson 7783ab6e68cSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 7793ab6e68cSRichard Henderson for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 7803ab6e68cSRichard Henderson if ((d.idxmap >> mmu_idx) & 1) { 7813c4ddec1SRichard Henderson tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); 7823ab6e68cSRichard Henderson } 7833ab6e68cSRichard Henderson } 7843ab6e68cSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 7853ab6e68cSRichard Henderson 786cfc2a2d6SIdan Horowitz /* 787cfc2a2d6SIdan Horowitz * If the length is larger than the jump cache size, then it will take 788cfc2a2d6SIdan Horowitz * longer to clear each entry individually than it will to clear it all. 789cfc2a2d6SIdan Horowitz */ 790cfc2a2d6SIdan Horowitz if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { 791cfc2a2d6SIdan Horowitz cpu_tb_jmp_cache_clear(cpu); 792cfc2a2d6SIdan Horowitz return; 793cfc2a2d6SIdan Horowitz } 794cfc2a2d6SIdan Horowitz 7953c4ddec1SRichard Henderson for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { 7963c4ddec1SRichard Henderson tb_flush_jmp_cache(cpu, d.addr + i); 7973c4ddec1SRichard Henderson } 7983ab6e68cSRichard Henderson } 7993ab6e68cSRichard Henderson 800206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, 8013ab6e68cSRichard Henderson run_on_cpu_data data) 8023ab6e68cSRichard Henderson { 8033960a59fSRichard Henderson TLBFlushRangeData *d = data.host_ptr; 8046be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, *d); 8053ab6e68cSRichard Henderson g_free(d); 8063ab6e68cSRichard Henderson } 8073ab6e68cSRichard Henderson 808e5b1921bSRichard Henderson void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, 809e5b1921bSRichard Henderson target_ulong len, uint16_t idxmap, 810e5b1921bSRichard Henderson unsigned bits) 8113ab6e68cSRichard Henderson { 8123960a59fSRichard Henderson TLBFlushRangeData d; 8133ab6e68cSRichard Henderson 814e5b1921bSRichard Henderson /* 815e5b1921bSRichard Henderson * If all bits are significant, and len is small, 816e5b1921bSRichard Henderson * this devolves to tlb_flush_page. 817e5b1921bSRichard Henderson */ 818e5b1921bSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8193ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 8203ab6e68cSRichard Henderson return; 8213ab6e68cSRichard Henderson } 8223ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8233ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8243ab6e68cSRichard Henderson tlb_flush_by_mmuidx(cpu, idxmap); 8253ab6e68cSRichard Henderson return; 8263ab6e68cSRichard Henderson } 8273ab6e68cSRichard Henderson 8283ab6e68cSRichard Henderson /* This should already be page aligned */ 8293ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 830e5b1921bSRichard Henderson d.len = len; 8313ab6e68cSRichard Henderson d.idxmap = idxmap; 8323ab6e68cSRichard Henderson d.bits = bits; 8333ab6e68cSRichard Henderson 8343ab6e68cSRichard Henderson if (qemu_cpu_is_self(cpu)) { 8356be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, d); 8363ab6e68cSRichard Henderson } else { 8373ab6e68cSRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 8383960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 839206a583dSRichard Henderson async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, 8403ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8413ab6e68cSRichard Henderson } 8423ab6e68cSRichard Henderson } 8433ab6e68cSRichard Henderson 844e5b1921bSRichard Henderson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, 845e5b1921bSRichard Henderson uint16_t idxmap, unsigned bits) 846e5b1921bSRichard Henderson { 847e5b1921bSRichard Henderson tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); 848e5b1921bSRichard Henderson } 849e5b1921bSRichard Henderson 850600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, 851600b819fSRichard Henderson target_ulong addr, target_ulong len, 852600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 8533ab6e68cSRichard Henderson { 8543960a59fSRichard Henderson TLBFlushRangeData d; 855d34e4d1aSRichard Henderson CPUState *dst_cpu; 8563ab6e68cSRichard Henderson 857600b819fSRichard Henderson /* 858600b819fSRichard Henderson * If all bits are significant, and len is small, 859600b819fSRichard Henderson * this devolves to tlb_flush_page. 860600b819fSRichard Henderson */ 861600b819fSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8623ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 8633ab6e68cSRichard Henderson return; 8643ab6e68cSRichard Henderson } 8653ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8663ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8673ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 8683ab6e68cSRichard Henderson return; 8693ab6e68cSRichard Henderson } 8703ab6e68cSRichard Henderson 8713ab6e68cSRichard Henderson /* This should already be page aligned */ 8723ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 873600b819fSRichard Henderson d.len = len; 8743ab6e68cSRichard Henderson d.idxmap = idxmap; 8753ab6e68cSRichard Henderson d.bits = bits; 8763ab6e68cSRichard Henderson 8773ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 8783ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 8793ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 8803960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 8813ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, 882206a583dSRichard Henderson tlb_flush_range_by_mmuidx_async_1, 8833ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8843ab6e68cSRichard Henderson } 8853ab6e68cSRichard Henderson } 8863ab6e68cSRichard Henderson 8876be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(src_cpu, d); 8883ab6e68cSRichard Henderson } 8893ab6e68cSRichard Henderson 890600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 891600b819fSRichard Henderson target_ulong addr, 892600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 893600b819fSRichard Henderson { 894600b819fSRichard Henderson tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, 895600b819fSRichard Henderson idxmap, bits); 896600b819fSRichard Henderson } 897600b819fSRichard Henderson 898c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 8993ab6e68cSRichard Henderson target_ulong addr, 900c13b27d8SRichard Henderson target_ulong len, 9013ab6e68cSRichard Henderson uint16_t idxmap, 9023ab6e68cSRichard Henderson unsigned bits) 9033ab6e68cSRichard Henderson { 904d34e4d1aSRichard Henderson TLBFlushRangeData d, *p; 905d34e4d1aSRichard Henderson CPUState *dst_cpu; 9063ab6e68cSRichard Henderson 907c13b27d8SRichard Henderson /* 908c13b27d8SRichard Henderson * If all bits are significant, and len is small, 909c13b27d8SRichard Henderson * this devolves to tlb_flush_page. 910c13b27d8SRichard Henderson */ 911c13b27d8SRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 9123ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 9133ab6e68cSRichard Henderson return; 9143ab6e68cSRichard Henderson } 9153ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 9163ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 9173ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 9183ab6e68cSRichard Henderson return; 9193ab6e68cSRichard Henderson } 9203ab6e68cSRichard Henderson 9213ab6e68cSRichard Henderson /* This should already be page aligned */ 9223ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 923c13b27d8SRichard Henderson d.len = len; 9243ab6e68cSRichard Henderson d.idxmap = idxmap; 9253ab6e68cSRichard Henderson d.bits = bits; 9263ab6e68cSRichard Henderson 9273ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 9283ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 9293ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 9306d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 931206a583dSRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, 9323ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9333ab6e68cSRichard Henderson } 9343ab6e68cSRichard Henderson } 9353ab6e68cSRichard Henderson 9366d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 937206a583dSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, 9383ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9393ab6e68cSRichard Henderson } 9403ab6e68cSRichard Henderson 941c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 942c13b27d8SRichard Henderson target_ulong addr, 943c13b27d8SRichard Henderson uint16_t idxmap, 944c13b27d8SRichard Henderson unsigned bits) 945c13b27d8SRichard Henderson { 946c13b27d8SRichard Henderson tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, 947c13b27d8SRichard Henderson idxmap, bits); 948c13b27d8SRichard Henderson } 949c13b27d8SRichard Henderson 950d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 951d9bb58e5SYang Zhong can be detected */ 952d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 953d9bb58e5SYang Zhong { 954d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 955d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 956d9bb58e5SYang Zhong } 957d9bb58e5SYang Zhong 958d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 959d9bb58e5SYang Zhong tested for self modifying code */ 960d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 961d9bb58e5SYang Zhong { 962d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 963d9bb58e5SYang Zhong } 964d9bb58e5SYang Zhong 965d9bb58e5SYang Zhong 966d9bb58e5SYang Zhong /* 967d9bb58e5SYang Zhong * Dirty write flag handling 968d9bb58e5SYang Zhong * 969d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 970d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 971d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 972d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 973d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 974d9bb58e5SYang Zhong * generated code. 975d9bb58e5SYang Zhong * 97671aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 977d73415a3SStefan Hajnoczi * te->addr_write with qatomic_set. We don't need to worry about this for 97871aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 979d9bb58e5SYang Zhong * 98053d28455SRichard Henderson * Called with tlb_c.lock held. 981d9bb58e5SYang Zhong */ 98271aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 98371aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 984d9bb58e5SYang Zhong { 985d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 986d9bb58e5SYang Zhong 9877b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 9887b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 989d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 990d9bb58e5SYang Zhong addr += tlb_entry->addend; 991d9bb58e5SYang Zhong if ((addr - start) < length) { 992d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 99371aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 994d9bb58e5SYang Zhong #else 995d73415a3SStefan Hajnoczi qatomic_set(&tlb_entry->addr_write, 99671aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 997d9bb58e5SYang Zhong #endif 998d9bb58e5SYang Zhong } 99971aec354SEmilio G. Cota } 100071aec354SEmilio G. Cota } 100171aec354SEmilio G. Cota 100271aec354SEmilio G. Cota /* 100353d28455SRichard Henderson * Called with tlb_c.lock held. 100471aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 100571aec354SEmilio G. Cota */ 100671aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 100771aec354SEmilio G. Cota { 100871aec354SEmilio G. Cota *d = *s; 100971aec354SEmilio G. Cota } 1010d9bb58e5SYang Zhong 1011d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 101271aec354SEmilio G. Cota * the target vCPU). 101353d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 101471aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 1015d9bb58e5SYang Zhong */ 1016d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 1017d9bb58e5SYang Zhong { 1018d9bb58e5SYang Zhong CPUArchState *env; 1019d9bb58e5SYang Zhong 1020d9bb58e5SYang Zhong int mmu_idx; 1021d9bb58e5SYang Zhong 1022d9bb58e5SYang Zhong env = cpu->env_ptr; 1023a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1024d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1025d9bb58e5SYang Zhong unsigned int i; 1026722a1c1eSRichard Henderson unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 1027d9bb58e5SYang Zhong 102886e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 1029a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 1030a40ec84eSRichard Henderson start1, length); 1031d9bb58e5SYang Zhong } 1032d9bb58e5SYang Zhong 1033d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 1034a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 1035a40ec84eSRichard Henderson start1, length); 1036d9bb58e5SYang Zhong } 1037d9bb58e5SYang Zhong } 1038a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1039d9bb58e5SYang Zhong } 1040d9bb58e5SYang Zhong 104153d28455SRichard Henderson /* Called with tlb_c.lock held */ 104271aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 104371aec354SEmilio G. Cota target_ulong vaddr) 1044d9bb58e5SYang Zhong { 1045d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 1046d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 1047d9bb58e5SYang Zhong } 1048d9bb58e5SYang Zhong } 1049d9bb58e5SYang Zhong 1050d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 1051d9bb58e5SYang Zhong so that it is no longer dirty */ 1052d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 1053d9bb58e5SYang Zhong { 1054d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1055d9bb58e5SYang Zhong int mmu_idx; 1056d9bb58e5SYang Zhong 1057d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 1058d9bb58e5SYang Zhong 1059d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 1060a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1061d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1062383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 1063d9bb58e5SYang Zhong } 1064d9bb58e5SYang Zhong 1065d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1066d9bb58e5SYang Zhong int k; 1067d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 1068a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 1069d9bb58e5SYang Zhong } 1070d9bb58e5SYang Zhong } 1071a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1072d9bb58e5SYang Zhong } 1073d9bb58e5SYang Zhong 1074d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 1075d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 10761308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 10771308e026SRichard Henderson target_ulong vaddr, target_ulong size) 1078d9bb58e5SYang Zhong { 1079a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 10801308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 1081d9bb58e5SYang Zhong 10821308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 10831308e026SRichard Henderson /* No previous large page. */ 10841308e026SRichard Henderson lp_addr = vaddr; 10851308e026SRichard Henderson } else { 1086d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 10871308e026SRichard Henderson This is a compromise between unnecessary flushes and 10881308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 1089a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 10901308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 10911308e026SRichard Henderson lp_mask <<= 1; 1092d9bb58e5SYang Zhong } 10931308e026SRichard Henderson } 1094a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 1095a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 1096d9bb58e5SYang Zhong } 1097d9bb58e5SYang Zhong 1098d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 1099d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1100d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 1101d9bb58e5SYang Zhong * 1102d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 1103d9bb58e5SYang Zhong * critical section. 1104d9bb58e5SYang Zhong */ 1105d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 1106d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 1107d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1108d9bb58e5SYang Zhong { 1109d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1110a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 1111a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1112d9bb58e5SYang Zhong MemoryRegionSection *section; 1113d9bb58e5SYang Zhong unsigned int index; 1114d9bb58e5SYang Zhong target_ulong address; 11158f5db641SRichard Henderson target_ulong write_address; 1116d9bb58e5SYang Zhong uintptr_t addend; 111768fea038SRichard Henderson CPUTLBEntry *te, tn; 111855df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 111955df6fcfSPeter Maydell target_ulong vaddr_page; 1120d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 112150b107c5SRichard Henderson int wp_flags; 11228f5db641SRichard Henderson bool is_ram, is_romd; 1123d9bb58e5SYang Zhong 1124d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 112555df6fcfSPeter Maydell 11261308e026SRichard Henderson if (size <= TARGET_PAGE_SIZE) { 112755df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 112855df6fcfSPeter Maydell } else { 11291308e026SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, size); 1130d9bb58e5SYang Zhong sz = size; 113155df6fcfSPeter Maydell } 113255df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 113355df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 113455df6fcfSPeter Maydell 113555df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 113655df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 1137d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 1138d9bb58e5SYang Zhong 1139d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 1140d9bb58e5SYang Zhong " prot=%x idx=%d\n", 1141d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 1142d9bb58e5SYang Zhong 114355df6fcfSPeter Maydell address = vaddr_page; 114455df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 114530d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 114630d7e098SRichard Henderson address |= TLB_INVALID_MASK; 114755df6fcfSPeter Maydell } 1148a26fc6f5STony Nguyen if (attrs.byte_swap) { 11495b87b3e6SRichard Henderson address |= TLB_BSWAP; 1150a26fc6f5STony Nguyen } 11518f5db641SRichard Henderson 11528f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 11538f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 11548f5db641SRichard Henderson 11558f5db641SRichard Henderson if (is_ram || is_romd) { 11568f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 1157d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 11588f5db641SRichard Henderson } else { 11598f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 11608f5db641SRichard Henderson addend = 0; 1161d9bb58e5SYang Zhong } 1162d9bb58e5SYang Zhong 11638f5db641SRichard Henderson write_address = address; 11648f5db641SRichard Henderson if (is_ram) { 11658f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 11668f5db641SRichard Henderson /* 11678f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 11688f5db641SRichard Henderson * the page is actually writable. 11698f5db641SRichard Henderson */ 11708f5db641SRichard Henderson if (prot & PAGE_WRITE) { 11718f5db641SRichard Henderson if (section->readonly) { 11728f5db641SRichard Henderson write_address |= TLB_DISCARD_WRITE; 11738f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 11748f5db641SRichard Henderson write_address |= TLB_NOTDIRTY; 11758f5db641SRichard Henderson } 11768f5db641SRichard Henderson } 11778f5db641SRichard Henderson } else { 11788f5db641SRichard Henderson /* I/O or ROMD */ 11798f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 11808f5db641SRichard Henderson /* 11818f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 11828f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 11838f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 11848f5db641SRichard Henderson */ 11858f5db641SRichard Henderson write_address |= TLB_MMIO; 11868f5db641SRichard Henderson if (!is_romd) { 11878f5db641SRichard Henderson address = write_address; 11888f5db641SRichard Henderson } 11898f5db641SRichard Henderson } 11908f5db641SRichard Henderson 119150b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 119250b107c5SRichard Henderson TARGET_PAGE_SIZE); 1193d9bb58e5SYang Zhong 1194383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 1195383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 1196d9bb58e5SYang Zhong 119768fea038SRichard Henderson /* 119871aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 119971aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 120071aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 120171aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 120271aec354SEmilio G. Cota * is unlikely to be contended. 120371aec354SEmilio G. Cota */ 1204a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 120571aec354SEmilio G. Cota 12063d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 1207a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 12083d1523ceSRichard Henderson 120971aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 121071aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 121171aec354SEmilio G. Cota 121271aec354SEmilio G. Cota /* 121368fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 121468fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 121568fea038SRichard Henderson */ 12163cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 1217a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1218a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 121968fea038SRichard Henderson 122068fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 122171aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 1222*25d3ec58SRichard Henderson desc->vfulltlb[vidx] = desc->fulltlb[index]; 122386e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 122468fea038SRichard Henderson } 1225d9bb58e5SYang Zhong 1226d9bb58e5SYang Zhong /* refill the tlb */ 1227ace41090SPeter Maydell /* 1228ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 1229ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 12308f5db641SRichard Henderson * + the ram_addr_t of the page base of the target RAM (RAM) 12318f5db641SRichard Henderson * + the offset within section->mr of the page base (I/O, ROMD) 123255df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 1233ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 1234ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 1235ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 1236ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 1237ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 1238ace41090SPeter Maydell */ 1239*25d3ec58SRichard Henderson desc->fulltlb[index].xlat_section = iotlb - vaddr_page; 1240*25d3ec58SRichard Henderson desc->fulltlb[index].attrs = attrs; 1241d9bb58e5SYang Zhong 1242d9bb58e5SYang Zhong /* Now calculate the new entry */ 124355df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 1244d9bb58e5SYang Zhong if (prot & PAGE_READ) { 1245d9bb58e5SYang Zhong tn.addr_read = address; 124650b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 124750b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 124850b107c5SRichard Henderson } 1249d9bb58e5SYang Zhong } else { 1250d9bb58e5SYang Zhong tn.addr_read = -1; 1251d9bb58e5SYang Zhong } 1252d9bb58e5SYang Zhong 1253d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 12548f5db641SRichard Henderson tn.addr_code = address; 1255d9bb58e5SYang Zhong } else { 1256d9bb58e5SYang Zhong tn.addr_code = -1; 1257d9bb58e5SYang Zhong } 1258d9bb58e5SYang Zhong 1259d9bb58e5SYang Zhong tn.addr_write = -1; 1260d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 12618f5db641SRichard Henderson tn.addr_write = write_address; 1262f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 1263f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 1264f52bfb12SDavid Hildenbrand } 126550b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 126650b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 126750b107c5SRichard Henderson } 1268d9bb58e5SYang Zhong } 1269d9bb58e5SYang Zhong 127071aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 127186e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 1272a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 1273d9bb58e5SYang Zhong } 1274d9bb58e5SYang Zhong 1275d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 1276d9bb58e5SYang Zhong * transaction attributes to be used. 1277d9bb58e5SYang Zhong */ 1278d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 1279d9bb58e5SYang Zhong hwaddr paddr, int prot, 1280d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1281d9bb58e5SYang Zhong { 1282d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 1283d9bb58e5SYang Zhong prot, mmu_idx, size); 1284d9bb58e5SYang Zhong } 1285d9bb58e5SYang Zhong 1286c319dc13SRichard Henderson /* 1287c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1288c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1289c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1290c319dc13SRichard Henderson */ 1291c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 1292c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1293c319dc13SRichard Henderson { 1294c319dc13SRichard Henderson bool ok; 1295c319dc13SRichard Henderson 1296c319dc13SRichard Henderson /* 1297c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1298c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1299c319dc13SRichard Henderson */ 13008810ee2aSAlex Bennée ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, 1301e124536fSEduardo Habkost access_type, mmu_idx, false, retaddr); 1302c319dc13SRichard Henderson assert(ok); 1303c319dc13SRichard Henderson } 1304c319dc13SRichard Henderson 130578271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 130678271684SClaudio Fontana MMUAccessType access_type, 130778271684SClaudio Fontana int mmu_idx, uintptr_t retaddr) 130878271684SClaudio Fontana { 13098810ee2aSAlex Bennée cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, 13108810ee2aSAlex Bennée mmu_idx, retaddr); 131178271684SClaudio Fontana } 131278271684SClaudio Fontana 131378271684SClaudio Fontana static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, 131478271684SClaudio Fontana vaddr addr, unsigned size, 131578271684SClaudio Fontana MMUAccessType access_type, 131678271684SClaudio Fontana int mmu_idx, MemTxAttrs attrs, 131778271684SClaudio Fontana MemTxResult response, 131878271684SClaudio Fontana uintptr_t retaddr) 131978271684SClaudio Fontana { 132078271684SClaudio Fontana CPUClass *cc = CPU_GET_CLASS(cpu); 132178271684SClaudio Fontana 132278271684SClaudio Fontana if (!cpu->ignore_memory_transaction_failures && 132378271684SClaudio Fontana cc->tcg_ops->do_transaction_failed) { 132478271684SClaudio Fontana cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, 132578271684SClaudio Fontana access_type, mmu_idx, attrs, 132678271684SClaudio Fontana response, retaddr); 132778271684SClaudio Fontana } 132878271684SClaudio Fontana } 132978271684SClaudio Fontana 1330*25d3ec58SRichard Henderson static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full, 1331f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 1332be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 1333d9bb58e5SYang Zhong { 133429a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 13352d54f194SPeter Maydell hwaddr mr_offset; 13362d54f194SPeter Maydell MemoryRegionSection *section; 13372d54f194SPeter Maydell MemoryRegion *mr; 1338d9bb58e5SYang Zhong uint64_t val; 1339d9bb58e5SYang Zhong bool locked = false; 134004e3aabdSPeter Maydell MemTxResult r; 1341d9bb58e5SYang Zhong 1342*25d3ec58SRichard Henderson section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 13432d54f194SPeter Maydell mr = section->mr; 1344*25d3ec58SRichard Henderson mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1345d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 134608565552SRichard Henderson if (!cpu->can_do_io) { 1347d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1348d9bb58e5SYang Zhong } 1349d9bb58e5SYang Zhong 135041744954SPhilippe Mathieu-Daudé if (!qemu_mutex_iothread_locked()) { 1351d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1352d9bb58e5SYang Zhong locked = true; 1353d9bb58e5SYang Zhong } 1354*25d3ec58SRichard Henderson r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs); 135504e3aabdSPeter Maydell if (r != MEMTX_OK) { 13562d54f194SPeter Maydell hwaddr physaddr = mr_offset + 13572d54f194SPeter Maydell section->offset_within_address_space - 13582d54f194SPeter Maydell section->offset_within_region; 13592d54f194SPeter Maydell 1360be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 1361*25d3ec58SRichard Henderson mmu_idx, full->attrs, r, retaddr); 136204e3aabdSPeter Maydell } 1363d9bb58e5SYang Zhong if (locked) { 1364d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1365d9bb58e5SYang Zhong } 1366d9bb58e5SYang Zhong 1367d9bb58e5SYang Zhong return val; 1368d9bb58e5SYang Zhong } 1369d9bb58e5SYang Zhong 13702f3a57eeSAlex Bennée /* 1371*25d3ec58SRichard Henderson * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin. 1372*25d3ec58SRichard Henderson * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match 1373570ef309SAlex Bennée * because of the side effect of io_writex changing memory layout. 13742f3a57eeSAlex Bennée */ 13752f3a57eeSAlex Bennée static void save_iotlb_data(CPUState *cs, hwaddr addr, 13762f3a57eeSAlex Bennée MemoryRegionSection *section, hwaddr mr_offset) 13772f3a57eeSAlex Bennée { 13782f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN 13792f3a57eeSAlex Bennée SavedIOTLB *saved = &cs->saved_iotlb; 13802f3a57eeSAlex Bennée saved->addr = addr; 13812f3a57eeSAlex Bennée saved->section = section; 13822f3a57eeSAlex Bennée saved->mr_offset = mr_offset; 13832f3a57eeSAlex Bennée #endif 13842f3a57eeSAlex Bennée } 13852f3a57eeSAlex Bennée 1386*25d3ec58SRichard Henderson static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, 1387f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 1388be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 1389d9bb58e5SYang Zhong { 139029a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 13912d54f194SPeter Maydell hwaddr mr_offset; 13922d54f194SPeter Maydell MemoryRegionSection *section; 13932d54f194SPeter Maydell MemoryRegion *mr; 1394d9bb58e5SYang Zhong bool locked = false; 139504e3aabdSPeter Maydell MemTxResult r; 1396d9bb58e5SYang Zhong 1397*25d3ec58SRichard Henderson section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 13982d54f194SPeter Maydell mr = section->mr; 1399*25d3ec58SRichard Henderson mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 140008565552SRichard Henderson if (!cpu->can_do_io) { 1401d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1402d9bb58e5SYang Zhong } 1403d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1404d9bb58e5SYang Zhong 14052f3a57eeSAlex Bennée /* 14062f3a57eeSAlex Bennée * The memory_region_dispatch may trigger a flush/resize 14072f3a57eeSAlex Bennée * so for plugins we save the iotlb_data just in case. 14082f3a57eeSAlex Bennée */ 1409*25d3ec58SRichard Henderson save_iotlb_data(cpu, full->xlat_section, section, mr_offset); 14102f3a57eeSAlex Bennée 141141744954SPhilippe Mathieu-Daudé if (!qemu_mutex_iothread_locked()) { 1412d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 1413d9bb58e5SYang Zhong locked = true; 1414d9bb58e5SYang Zhong } 1415*25d3ec58SRichard Henderson r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs); 141604e3aabdSPeter Maydell if (r != MEMTX_OK) { 14172d54f194SPeter Maydell hwaddr physaddr = mr_offset + 14182d54f194SPeter Maydell section->offset_within_address_space - 14192d54f194SPeter Maydell section->offset_within_region; 14202d54f194SPeter Maydell 1421be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 1422*25d3ec58SRichard Henderson MMU_DATA_STORE, mmu_idx, full->attrs, r, 1423be5c4787STony Nguyen retaddr); 142404e3aabdSPeter Maydell } 1425d9bb58e5SYang Zhong if (locked) { 1426d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 1427d9bb58e5SYang Zhong } 1428d9bb58e5SYang Zhong } 1429d9bb58e5SYang Zhong 14304811e909SRichard Henderson static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) 14314811e909SRichard Henderson { 14324811e909SRichard Henderson #if TCG_OVERSIZED_GUEST 14334811e909SRichard Henderson return *(target_ulong *)((uintptr_t)entry + ofs); 14344811e909SRichard Henderson #else 1435d73415a3SStefan Hajnoczi /* ofs might correspond to .addr_write, so use qatomic_read */ 1436d73415a3SStefan Hajnoczi return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); 14374811e909SRichard Henderson #endif 14384811e909SRichard Henderson } 14394811e909SRichard Henderson 1440d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1441d9bb58e5SYang Zhong back to the main tlb. */ 1442d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 1443d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 1444d9bb58e5SYang Zhong { 1445d9bb58e5SYang Zhong size_t vidx; 144671aec354SEmilio G. Cota 144729a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1448d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1449a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 1450a40ec84eSRichard Henderson target_ulong cmp; 1451a40ec84eSRichard Henderson 1452d73415a3SStefan Hajnoczi /* elt_ofs might correspond to .addr_write, so use qatomic_read */ 1453a40ec84eSRichard Henderson #if TCG_OVERSIZED_GUEST 1454a40ec84eSRichard Henderson cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 1455a40ec84eSRichard Henderson #else 1456d73415a3SStefan Hajnoczi cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 1457a40ec84eSRichard Henderson #endif 1458d9bb58e5SYang Zhong 1459d9bb58e5SYang Zhong if (cmp == page) { 1460d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1461a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1462d9bb58e5SYang Zhong 1463a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 146471aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 146571aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 146671aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1467a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1468d9bb58e5SYang Zhong 1469*25d3ec58SRichard Henderson CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1470*25d3ec58SRichard Henderson CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx]; 1471*25d3ec58SRichard Henderson CPUTLBEntryFull tmpf; 1472*25d3ec58SRichard Henderson tmpf = *f1; *f1 = *f2; *f2 = tmpf; 1473d9bb58e5SYang Zhong return true; 1474d9bb58e5SYang Zhong } 1475d9bb58e5SYang Zhong } 1476d9bb58e5SYang Zhong return false; 1477d9bb58e5SYang Zhong } 1478d9bb58e5SYang Zhong 1479d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 1480d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 1481d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 1482d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 1483d9bb58e5SYang Zhong 1484707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 1485*25d3ec58SRichard Henderson CPUTLBEntryFull *full, uintptr_t retaddr) 1486707526adSRichard Henderson { 1487*25d3ec58SRichard Henderson ram_addr_t ram_addr = mem_vaddr + full->xlat_section; 1488707526adSRichard Henderson 1489707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1490707526adSRichard Henderson 1491707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1492707526adSRichard Henderson struct page_collection *pages 1493707526adSRichard Henderson = page_collection_lock(ram_addr, ram_addr + size); 14945a7c27bbSRichard Henderson tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); 1495707526adSRichard Henderson page_collection_unlock(pages); 1496707526adSRichard Henderson } 1497707526adSRichard Henderson 1498707526adSRichard Henderson /* 1499707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1500707526adSRichard Henderson * the notdirty callback faster. 1501707526adSRichard Henderson */ 1502707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1503707526adSRichard Henderson 1504707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1505707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1506707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1507707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1508707526adSRichard Henderson } 1509707526adSRichard Henderson } 1510707526adSRichard Henderson 1511069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 1512069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1513069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1514069cfe77SRichard Henderson void **phost, uintptr_t retaddr) 1515d9bb58e5SYang Zhong { 1516383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1517383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1518069cfe77SRichard Henderson target_ulong tlb_addr, page_addr; 1519c25c283dSDavid Hildenbrand size_t elt_ofs; 1520069cfe77SRichard Henderson int flags; 1521ca86cf32SDavid Hildenbrand 1522c25c283dSDavid Hildenbrand switch (access_type) { 1523c25c283dSDavid Hildenbrand case MMU_DATA_LOAD: 1524c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_read); 1525c25c283dSDavid Hildenbrand break; 1526c25c283dSDavid Hildenbrand case MMU_DATA_STORE: 1527c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_write); 1528c25c283dSDavid Hildenbrand break; 1529c25c283dSDavid Hildenbrand case MMU_INST_FETCH: 1530c25c283dSDavid Hildenbrand elt_ofs = offsetof(CPUTLBEntry, addr_code); 1531c25c283dSDavid Hildenbrand break; 1532c25c283dSDavid Hildenbrand default: 1533c25c283dSDavid Hildenbrand g_assert_not_reached(); 1534c25c283dSDavid Hildenbrand } 1535c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 1536c25c283dSDavid Hildenbrand 1537069cfe77SRichard Henderson page_addr = addr & TARGET_PAGE_MASK; 1538069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 1539069cfe77SRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { 1540069cfe77SRichard Henderson CPUState *cs = env_cpu(env); 1541069cfe77SRichard Henderson 15428810ee2aSAlex Bennée if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, 1543069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1544069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1545069cfe77SRichard Henderson *phost = NULL; 1546069cfe77SRichard Henderson return TLB_INVALID_MASK; 1547069cfe77SRichard Henderson } 1548069cfe77SRichard Henderson 154903a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 155003a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1551d9bb58e5SYang Zhong } 1552c25c283dSDavid Hildenbrand tlb_addr = tlb_read_ofs(entry, elt_ofs); 155303a98189SDavid Hildenbrand } 1554069cfe77SRichard Henderson flags = tlb_addr & TLB_FLAGS_MASK; 155503a98189SDavid Hildenbrand 1556069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 1557069cfe77SRichard Henderson if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1558069cfe77SRichard Henderson *phost = NULL; 1559069cfe77SRichard Henderson return TLB_MMIO; 1560fef39ccdSDavid Hildenbrand } 1561fef39ccdSDavid Hildenbrand 1562069cfe77SRichard Henderson /* Everything else is RAM. */ 1563069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1564069cfe77SRichard Henderson return flags; 1565069cfe77SRichard Henderson } 1566069cfe77SRichard Henderson 1567069cfe77SRichard Henderson int probe_access_flags(CPUArchState *env, target_ulong addr, 1568069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1569069cfe77SRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1570069cfe77SRichard Henderson { 1571069cfe77SRichard Henderson int flags; 1572069cfe77SRichard Henderson 1573069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, 1574069cfe77SRichard Henderson nonfault, phost, retaddr); 1575069cfe77SRichard Henderson 1576069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1577069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1578069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1579*25d3ec58SRichard Henderson CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 158073bc0bd4SRichard Henderson 1581*25d3ec58SRichard Henderson notdirty_write(env_cpu(env), addr, 1, full, retaddr); 1582069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1583069cfe77SRichard Henderson } 1584069cfe77SRichard Henderson 1585069cfe77SRichard Henderson return flags; 1586069cfe77SRichard Henderson } 1587069cfe77SRichard Henderson 1588069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 1589069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1590069cfe77SRichard Henderson { 1591069cfe77SRichard Henderson void *host; 1592069cfe77SRichard Henderson int flags; 1593069cfe77SRichard Henderson 1594069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1595069cfe77SRichard Henderson 1596069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1597069cfe77SRichard Henderson false, &host, retaddr); 1598069cfe77SRichard Henderson 1599069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1600069cfe77SRichard Henderson if (size == 0) { 160173bc0bd4SRichard Henderson return NULL; 160273bc0bd4SRichard Henderson } 160373bc0bd4SRichard Henderson 1604069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 1605069cfe77SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1606*25d3ec58SRichard Henderson CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1607069cfe77SRichard Henderson 160803a98189SDavid Hildenbrand /* Handle watchpoints. */ 1609069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1610069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1611069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 161203a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 1613*25d3ec58SRichard Henderson full->attrs, wp_access, retaddr); 1614d9bb58e5SYang Zhong } 1615fef39ccdSDavid Hildenbrand 161673bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1617069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 1618*25d3ec58SRichard Henderson notdirty_write(env_cpu(env), addr, 1, full, retaddr); 161973bc0bd4SRichard Henderson } 1620fef39ccdSDavid Hildenbrand } 1621fef39ccdSDavid Hildenbrand 1622069cfe77SRichard Henderson return host; 1623d9bb58e5SYang Zhong } 1624d9bb58e5SYang Zhong 16254811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 16264811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 16274811e909SRichard Henderson { 1628069cfe77SRichard Henderson void *host; 1629069cfe77SRichard Henderson int flags; 16304811e909SRichard Henderson 1631069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, 1632069cfe77SRichard Henderson mmu_idx, true, &host, 0); 1633069cfe77SRichard Henderson 1634069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1635069cfe77SRichard Henderson return flags ? NULL : host; 16364811e909SRichard Henderson } 16374811e909SRichard Henderson 16387e0d9973SRichard Henderson /* 16397e0d9973SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 16407e0d9973SRichard Henderson * 16417e0d9973SRichard Henderson * Return -1 if we can't translate and execute from an entire page 16427e0d9973SRichard Henderson * of RAM. This will force us to execute by loading and translating 16437e0d9973SRichard Henderson * one insn at a time, without caching. 16447e0d9973SRichard Henderson * 16457e0d9973SRichard Henderson * NOTE: This function will trigger an exception if the page is 16467e0d9973SRichard Henderson * not executable. 16477e0d9973SRichard Henderson */ 16487e0d9973SRichard Henderson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 16497e0d9973SRichard Henderson void **hostp) 16507e0d9973SRichard Henderson { 16517e0d9973SRichard Henderson void *p; 16527e0d9973SRichard Henderson 16537e0d9973SRichard Henderson (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH, 16547e0d9973SRichard Henderson cpu_mmu_index(env, true), false, &p, 0); 16557e0d9973SRichard Henderson if (p == NULL) { 16567e0d9973SRichard Henderson return -1; 16577e0d9973SRichard Henderson } 16587e0d9973SRichard Henderson if (hostp) { 16597e0d9973SRichard Henderson *hostp = p; 16607e0d9973SRichard Henderson } 16617e0d9973SRichard Henderson return qemu_ram_addr_from_host_nofail(p); 16627e0d9973SRichard Henderson } 16637e0d9973SRichard Henderson 1664235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1665235537faSAlex Bennée /* 1666235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1667235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1668235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1669235537faSAlex Bennée * checking the victim table. This is purely informational. 1670235537faSAlex Bennée * 16712f3a57eeSAlex Bennée * This almost never fails as the memory access being instrumented 16722f3a57eeSAlex Bennée * should have just filled the TLB. The one corner case is io_writex 16732f3a57eeSAlex Bennée * which can cause TLB flushes and potential resizing of the TLBs 1674570ef309SAlex Bennée * losing the information we need. In those cases we need to recover 1675*25d3ec58SRichard Henderson * data from a copy of the CPUTLBEntryFull. As long as this always occurs 1676570ef309SAlex Bennée * from the same thread (which a mem callback will be) this is safe. 1677235537faSAlex Bennée */ 1678235537faSAlex Bennée 1679235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, 1680235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1681235537faSAlex Bennée { 1682235537faSAlex Bennée CPUArchState *env = cpu->env_ptr; 1683235537faSAlex Bennée CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1684235537faSAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1685235537faSAlex Bennée target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1686235537faSAlex Bennée 1687235537faSAlex Bennée if (likely(tlb_hit(tlb_addr, addr))) { 1688235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1689235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 1690*25d3ec58SRichard Henderson CPUTLBEntryFull *full; 1691*25d3ec58SRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1692235537faSAlex Bennée data->is_io = true; 1693*25d3ec58SRichard Henderson data->v.io.section = 1694*25d3ec58SRichard Henderson iotlb_to_section(cpu, full->xlat_section, full->attrs); 1695*25d3ec58SRichard Henderson data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1696235537faSAlex Bennée } else { 1697235537faSAlex Bennée data->is_io = false; 16982d932039SAlex Bennée data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1699235537faSAlex Bennée } 1700235537faSAlex Bennée return true; 17012f3a57eeSAlex Bennée } else { 17022f3a57eeSAlex Bennée SavedIOTLB *saved = &cpu->saved_iotlb; 17032f3a57eeSAlex Bennée data->is_io = true; 17042f3a57eeSAlex Bennée data->v.io.section = saved->section; 17052f3a57eeSAlex Bennée data->v.io.offset = saved->mr_offset; 17062f3a57eeSAlex Bennée return true; 1707235537faSAlex Bennée } 1708235537faSAlex Bennée } 1709235537faSAlex Bennée 1710235537faSAlex Bennée #endif 1711235537faSAlex Bennée 171208dff435SRichard Henderson /* 171308dff435SRichard Henderson * Probe for an atomic operation. Do not allow unaligned operations, 171408dff435SRichard Henderson * or io operations to proceed. Return the host address. 171508dff435SRichard Henderson * 171608dff435SRichard Henderson * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. 171708dff435SRichard Henderson */ 1718d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 17199002ffcbSRichard Henderson MemOpIdx oi, int size, int prot, 172008dff435SRichard Henderson uintptr_t retaddr) 1721d9bb58e5SYang Zhong { 1722b826044fSRichard Henderson uintptr_t mmu_idx = get_mmuidx(oi); 172314776ab5STony Nguyen MemOp mop = get_memop(oi); 1724d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 172508dff435SRichard Henderson uintptr_t index; 172608dff435SRichard Henderson CPUTLBEntry *tlbe; 172708dff435SRichard Henderson target_ulong tlb_addr; 172834d49937SPeter Maydell void *hostaddr; 1729d9bb58e5SYang Zhong 1730b826044fSRichard Henderson tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1731b826044fSRichard Henderson 1732d9bb58e5SYang Zhong /* Adjust the given return address. */ 1733d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1734d9bb58e5SYang Zhong 1735d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1736d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1737d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 173829a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1739d9bb58e5SYang Zhong mmu_idx, retaddr); 1740d9bb58e5SYang Zhong } 1741d9bb58e5SYang Zhong 1742d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 174308dff435SRichard Henderson if (unlikely(addr & (size - 1))) { 1744d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1745d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1746d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1747d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1748d9bb58e5SYang Zhong goto stop_the_world; 1749d9bb58e5SYang Zhong } 1750d9bb58e5SYang Zhong 175108dff435SRichard Henderson index = tlb_index(env, mmu_idx, addr); 175208dff435SRichard Henderson tlbe = tlb_entry(env, mmu_idx, addr); 175308dff435SRichard Henderson 1754d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 175508dff435SRichard Henderson if (prot & PAGE_WRITE) { 175608dff435SRichard Henderson tlb_addr = tlb_addr_write(tlbe); 1757334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1758d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 175908dff435SRichard Henderson tlb_fill(env_cpu(env), addr, size, 176008dff435SRichard Henderson MMU_DATA_STORE, mmu_idx, retaddr); 17616d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 17626d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1763d9bb58e5SYang Zhong } 1764403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1765d9bb58e5SYang Zhong } 1766d9bb58e5SYang Zhong 176708dff435SRichard Henderson /* Let the guest notice RMW on a write-only page. */ 176808dff435SRichard Henderson if ((prot & PAGE_READ) && 176908dff435SRichard Henderson unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 177008dff435SRichard Henderson tlb_fill(env_cpu(env), addr, size, 177108dff435SRichard Henderson MMU_DATA_LOAD, mmu_idx, retaddr); 177208dff435SRichard Henderson /* 177308dff435SRichard Henderson * Since we don't support reads and writes to different addresses, 177408dff435SRichard Henderson * and we do have the proper page loaded for write, this shouldn't 177508dff435SRichard Henderson * ever return. But just in case, handle via stop-the-world. 177608dff435SRichard Henderson */ 177708dff435SRichard Henderson goto stop_the_world; 177808dff435SRichard Henderson } 177908dff435SRichard Henderson } else /* if (prot & PAGE_READ) */ { 178008dff435SRichard Henderson tlb_addr = tlbe->addr_read; 178108dff435SRichard Henderson if (!tlb_hit(tlb_addr, addr)) { 178208dff435SRichard Henderson if (!VICTIM_TLB_HIT(addr_write, addr)) { 178308dff435SRichard Henderson tlb_fill(env_cpu(env), addr, size, 178408dff435SRichard Henderson MMU_DATA_LOAD, mmu_idx, retaddr); 178508dff435SRichard Henderson index = tlb_index(env, mmu_idx, addr); 178608dff435SRichard Henderson tlbe = tlb_entry(env, mmu_idx, addr); 178708dff435SRichard Henderson } 178808dff435SRichard Henderson tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK; 178908dff435SRichard Henderson } 179008dff435SRichard Henderson } 179108dff435SRichard Henderson 179255df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 179330d7e098SRichard Henderson if (unlikely(tlb_addr & TLB_MMIO)) { 1794d9bb58e5SYang Zhong /* There's really nothing that can be done to 1795d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1796d9bb58e5SYang Zhong goto stop_the_world; 1797d9bb58e5SYang Zhong } 1798d9bb58e5SYang Zhong 179934d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 180034d49937SPeter Maydell 180134d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 180208dff435SRichard Henderson notdirty_write(env_cpu(env), addr, size, 1803*25d3ec58SRichard Henderson &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr); 180434d49937SPeter Maydell } 180534d49937SPeter Maydell 180634d49937SPeter Maydell return hostaddr; 1807d9bb58e5SYang Zhong 1808d9bb58e5SYang Zhong stop_the_world: 180929a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1810d9bb58e5SYang Zhong } 1811d9bb58e5SYang Zhong 1812eed56642SAlex Bennée /* 1813f83bcecbSRichard Henderson * Verify that we have passed the correct MemOp to the correct function. 1814f83bcecbSRichard Henderson * 1815f83bcecbSRichard Henderson * In the case of the helper_*_mmu functions, we will have done this by 1816f83bcecbSRichard Henderson * using the MemOp to look up the helper during code generation. 1817f83bcecbSRichard Henderson * 1818f83bcecbSRichard Henderson * In the case of the cpu_*_mmu functions, this is up to the caller. 1819f83bcecbSRichard Henderson * We could present one function to target code, and dispatch based on 1820f83bcecbSRichard Henderson * the MemOp, but so far we have worked hard to avoid an indirect function 1821f83bcecbSRichard Henderson * call along the memory path. 1822f83bcecbSRichard Henderson */ 1823f83bcecbSRichard Henderson static void validate_memop(MemOpIdx oi, MemOp expected) 1824f83bcecbSRichard Henderson { 1825f83bcecbSRichard Henderson #ifdef CONFIG_DEBUG_TCG 1826f83bcecbSRichard Henderson MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); 1827f83bcecbSRichard Henderson assert(have == expected); 1828f83bcecbSRichard Henderson #endif 1829f83bcecbSRichard Henderson } 1830f83bcecbSRichard Henderson 1831f83bcecbSRichard Henderson /* 1832eed56642SAlex Bennée * Load Helpers 1833eed56642SAlex Bennée * 1834eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1835eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1836eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1837eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1838eed56642SAlex Bennée */ 1839d9bb58e5SYang Zhong 18402dd92606SRichard Henderson typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 18419002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr); 18422dd92606SRichard Henderson 1843c6b716cdSRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 184480d9d1c6SRichard Henderson load_memop(const void *haddr, MemOp op) 184580d9d1c6SRichard Henderson { 184680d9d1c6SRichard Henderson switch (op) { 184780d9d1c6SRichard Henderson case MO_UB: 184880d9d1c6SRichard Henderson return ldub_p(haddr); 184980d9d1c6SRichard Henderson case MO_BEUW: 185080d9d1c6SRichard Henderson return lduw_be_p(haddr); 185180d9d1c6SRichard Henderson case MO_LEUW: 185280d9d1c6SRichard Henderson return lduw_le_p(haddr); 185380d9d1c6SRichard Henderson case MO_BEUL: 185480d9d1c6SRichard Henderson return (uint32_t)ldl_be_p(haddr); 185580d9d1c6SRichard Henderson case MO_LEUL: 185680d9d1c6SRichard Henderson return (uint32_t)ldl_le_p(haddr); 1857fc313c64SFrédéric Pétrot case MO_BEUQ: 185880d9d1c6SRichard Henderson return ldq_be_p(haddr); 1859fc313c64SFrédéric Pétrot case MO_LEUQ: 186080d9d1c6SRichard Henderson return ldq_le_p(haddr); 186180d9d1c6SRichard Henderson default: 186280d9d1c6SRichard Henderson qemu_build_not_reached(); 186380d9d1c6SRichard Henderson } 186480d9d1c6SRichard Henderson } 186580d9d1c6SRichard Henderson 186680d9d1c6SRichard Henderson static inline uint64_t QEMU_ALWAYS_INLINE 18679002ffcbSRichard Henderson load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, 1868be5c4787STony Nguyen uintptr_t retaddr, MemOp op, bool code_read, 18692dd92606SRichard Henderson FullLoadHelper *full_load) 1870eed56642SAlex Bennée { 1871eed56642SAlex Bennée const size_t tlb_off = code_read ? 1872eed56642SAlex Bennée offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1873f1be3696SRichard Henderson const MMUAccessType access_type = 1874f1be3696SRichard Henderson code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1875b826044fSRichard Henderson const unsigned a_bits = get_alignment_bits(get_memop(oi)); 1876b826044fSRichard Henderson const size_t size = memop_size(op); 1877b826044fSRichard Henderson uintptr_t mmu_idx = get_mmuidx(oi); 1878b826044fSRichard Henderson uintptr_t index; 1879b826044fSRichard Henderson CPUTLBEntry *entry; 1880b826044fSRichard Henderson target_ulong tlb_addr; 1881eed56642SAlex Bennée void *haddr; 1882eed56642SAlex Bennée uint64_t res; 1883b826044fSRichard Henderson 1884b826044fSRichard Henderson tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1885d9bb58e5SYang Zhong 1886eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 1887eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 188829a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, access_type, 1889eed56642SAlex Bennée mmu_idx, retaddr); 1890eed56642SAlex Bennée } 1891eed56642SAlex Bennée 1892b826044fSRichard Henderson index = tlb_index(env, mmu_idx, addr); 1893b826044fSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 1894b826044fSRichard Henderson tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1895b826044fSRichard Henderson 1896eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 1897eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 1898eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1899eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 190029a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, 1901f1be3696SRichard Henderson access_type, mmu_idx, retaddr); 1902eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 1903eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 1904eed56642SAlex Bennée } 1905eed56642SAlex Bennée tlb_addr = code_read ? entry->addr_code : entry->addr_read; 190630d7e098SRichard Henderson tlb_addr &= ~TLB_INVALID_MASK; 1907eed56642SAlex Bennée } 1908eed56642SAlex Bennée 190950b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 1910eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 1911*25d3ec58SRichard Henderson CPUTLBEntryFull *full; 19125b87b3e6SRichard Henderson bool need_swap; 191350b107c5SRichard Henderson 191450b107c5SRichard Henderson /* For anything that is unaligned, recurse through full_load. */ 1915eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 1916eed56642SAlex Bennée goto do_unaligned_access; 1917eed56642SAlex Bennée } 191850b107c5SRichard Henderson 1919*25d3ec58SRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 192050b107c5SRichard Henderson 192150b107c5SRichard Henderson /* Handle watchpoints. */ 192250b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 192350b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 192450b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 1925*25d3ec58SRichard Henderson full->attrs, BP_MEM_READ, retaddr); 19265b87b3e6SRichard Henderson } 192750b107c5SRichard Henderson 19285b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 192950b107c5SRichard Henderson 193050b107c5SRichard Henderson /* Handle I/O access. */ 19315b87b3e6SRichard Henderson if (likely(tlb_addr & TLB_MMIO)) { 1932*25d3ec58SRichard Henderson return io_readx(env, full, mmu_idx, addr, retaddr, 19335b87b3e6SRichard Henderson access_type, op ^ (need_swap * MO_BSWAP)); 19345b87b3e6SRichard Henderson } 19355b87b3e6SRichard Henderson 19365b87b3e6SRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 19375b87b3e6SRichard Henderson 19385b87b3e6SRichard Henderson /* 19395b87b3e6SRichard Henderson * Keep these two load_memop separate to ensure that the compiler 19405b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 19415b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 19425b87b3e6SRichard Henderson */ 19435b87b3e6SRichard Henderson if (unlikely(need_swap)) { 19445b87b3e6SRichard Henderson return load_memop(haddr, op ^ MO_BSWAP); 19455b87b3e6SRichard Henderson } 19465b87b3e6SRichard Henderson return load_memop(haddr, op); 1947eed56642SAlex Bennée } 1948eed56642SAlex Bennée 1949eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 1950eed56642SAlex Bennée if (size > 1 1951eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1952eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 1953eed56642SAlex Bennée target_ulong addr1, addr2; 19548c79b288SAlex Bennée uint64_t r1, r2; 1955eed56642SAlex Bennée unsigned shift; 1956eed56642SAlex Bennée do_unaligned_access: 1957ab7a2009SAlex Bennée addr1 = addr & ~((target_ulong)size - 1); 1958eed56642SAlex Bennée addr2 = addr1 + size; 19592dd92606SRichard Henderson r1 = full_load(env, addr1, oi, retaddr); 19602dd92606SRichard Henderson r2 = full_load(env, addr2, oi, retaddr); 1961eed56642SAlex Bennée shift = (addr & (size - 1)) * 8; 1962eed56642SAlex Bennée 1963be5c4787STony Nguyen if (memop_big_endian(op)) { 1964eed56642SAlex Bennée /* Big-endian combine. */ 1965eed56642SAlex Bennée res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1966eed56642SAlex Bennée } else { 1967eed56642SAlex Bennée /* Little-endian combine. */ 1968eed56642SAlex Bennée res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 1969eed56642SAlex Bennée } 1970eed56642SAlex Bennée return res & MAKE_64BIT_MASK(0, size * 8); 1971eed56642SAlex Bennée } 1972eed56642SAlex Bennée 1973eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 197480d9d1c6SRichard Henderson return load_memop(haddr, op); 1975eed56642SAlex Bennée } 1976eed56642SAlex Bennée 1977eed56642SAlex Bennée /* 1978eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1979eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1980eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1981eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1982eed56642SAlex Bennée * data, and for that we always have uint64_t. 1983eed56642SAlex Bennée * 1984eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1985eed56642SAlex Bennée */ 1986eed56642SAlex Bennée 19872dd92606SRichard Henderson static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 19889002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 19892dd92606SRichard Henderson { 1990f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 1991be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); 19922dd92606SRichard Henderson } 19932dd92606SRichard Henderson 1994fc1bc777SRichard Henderson tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 19959002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 1996eed56642SAlex Bennée { 19972dd92606SRichard Henderson return full_ldub_mmu(env, addr, oi, retaddr); 19982dd92606SRichard Henderson } 19992dd92606SRichard Henderson 20002dd92606SRichard Henderson static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 20019002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 20022dd92606SRichard Henderson { 2003f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 2004be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUW, false, 20052dd92606SRichard Henderson full_le_lduw_mmu); 2006eed56642SAlex Bennée } 2007eed56642SAlex Bennée 2008fc1bc777SRichard Henderson tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 20099002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2010eed56642SAlex Bennée { 20112dd92606SRichard Henderson return full_le_lduw_mmu(env, addr, oi, retaddr); 20122dd92606SRichard Henderson } 20132dd92606SRichard Henderson 20142dd92606SRichard Henderson static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 20159002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 20162dd92606SRichard Henderson { 2017f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 2018be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUW, false, 20192dd92606SRichard Henderson full_be_lduw_mmu); 2020eed56642SAlex Bennée } 2021eed56642SAlex Bennée 2022fc1bc777SRichard Henderson tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 20239002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2024eed56642SAlex Bennée { 20252dd92606SRichard Henderson return full_be_lduw_mmu(env, addr, oi, retaddr); 20262dd92606SRichard Henderson } 20272dd92606SRichard Henderson 20282dd92606SRichard Henderson static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 20299002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 20302dd92606SRichard Henderson { 2031f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 2032be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_LEUL, false, 20332dd92606SRichard Henderson full_le_ldul_mmu); 2034eed56642SAlex Bennée } 2035eed56642SAlex Bennée 2036fc1bc777SRichard Henderson tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 20379002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2038eed56642SAlex Bennée { 20392dd92606SRichard Henderson return full_le_ldul_mmu(env, addr, oi, retaddr); 20402dd92606SRichard Henderson } 20412dd92606SRichard Henderson 20422dd92606SRichard Henderson static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 20439002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 20442dd92606SRichard Henderson { 2045f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 2046be5c4787STony Nguyen return load_helper(env, addr, oi, retaddr, MO_BEUL, false, 20472dd92606SRichard Henderson full_be_ldul_mmu); 2048eed56642SAlex Bennée } 2049eed56642SAlex Bennée 2050fc1bc777SRichard Henderson tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 20519002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2052eed56642SAlex Bennée { 20532dd92606SRichard Henderson return full_be_ldul_mmu(env, addr, oi, retaddr); 2054eed56642SAlex Bennée } 2055eed56642SAlex Bennée 2056fc1bc777SRichard Henderson uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 20579002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2058eed56642SAlex Bennée { 2059fc313c64SFrédéric Pétrot validate_memop(oi, MO_LEUQ); 2060fc313c64SFrédéric Pétrot return load_helper(env, addr, oi, retaddr, MO_LEUQ, false, 20612dd92606SRichard Henderson helper_le_ldq_mmu); 2062eed56642SAlex Bennée } 2063eed56642SAlex Bennée 2064fc1bc777SRichard Henderson uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 20659002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2066eed56642SAlex Bennée { 2067fc313c64SFrédéric Pétrot validate_memop(oi, MO_BEUQ); 2068fc313c64SFrédéric Pétrot return load_helper(env, addr, oi, retaddr, MO_BEUQ, false, 20692dd92606SRichard Henderson helper_be_ldq_mmu); 2070eed56642SAlex Bennée } 2071eed56642SAlex Bennée 2072eed56642SAlex Bennée /* 2073eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 2074eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 2075eed56642SAlex Bennée */ 2076eed56642SAlex Bennée 2077eed56642SAlex Bennée 2078eed56642SAlex Bennée tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 20799002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2080eed56642SAlex Bennée { 2081eed56642SAlex Bennée return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 2082eed56642SAlex Bennée } 2083eed56642SAlex Bennée 2084eed56642SAlex Bennée tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 20859002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2086eed56642SAlex Bennée { 2087eed56642SAlex Bennée return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 2088eed56642SAlex Bennée } 2089eed56642SAlex Bennée 2090eed56642SAlex Bennée tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 20919002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2092eed56642SAlex Bennée { 2093eed56642SAlex Bennée return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 2094eed56642SAlex Bennée } 2095eed56642SAlex Bennée 2096eed56642SAlex Bennée tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 20979002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2098eed56642SAlex Bennée { 2099eed56642SAlex Bennée return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 2100eed56642SAlex Bennée } 2101eed56642SAlex Bennée 2102eed56642SAlex Bennée tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 21039002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2104eed56642SAlex Bennée { 2105eed56642SAlex Bennée return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 2106eed56642SAlex Bennée } 2107eed56642SAlex Bennée 2108eed56642SAlex Bennée /* 2109d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 2110d03f1408SRichard Henderson */ 2111d03f1408SRichard Henderson 2112d03f1408SRichard Henderson static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, 2113f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr, 2114f83bcecbSRichard Henderson FullLoadHelper *full_load) 2115d03f1408SRichard Henderson { 2116d03f1408SRichard Henderson uint64_t ret; 2117d03f1408SRichard Henderson 2118d03f1408SRichard Henderson ret = full_load(env, addr, oi, retaddr); 211937aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2120d03f1408SRichard Henderson return ret; 2121d03f1408SRichard Henderson } 2122d03f1408SRichard Henderson 2123f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) 2124d03f1408SRichard Henderson { 2125f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu); 2126d03f1408SRichard Henderson } 2127d03f1408SRichard Henderson 2128f83bcecbSRichard Henderson uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, 2129f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2130d03f1408SRichard Henderson { 2131f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu); 2132d03f1408SRichard Henderson } 2133d03f1408SRichard Henderson 2134f83bcecbSRichard Henderson uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, 2135f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2136d03f1408SRichard Henderson { 2137f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu); 2138d03f1408SRichard Henderson } 2139d03f1408SRichard Henderson 2140f83bcecbSRichard Henderson uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, 2141f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2142d03f1408SRichard Henderson { 214346697cb9SRichard Henderson return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu); 2144d03f1408SRichard Henderson } 2145d03f1408SRichard Henderson 2146f83bcecbSRichard Henderson uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, 2147f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2148d03f1408SRichard Henderson { 2149f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu); 2150d03f1408SRichard Henderson } 2151d03f1408SRichard Henderson 2152f83bcecbSRichard Henderson uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, 2153f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2154d03f1408SRichard Henderson { 2155f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu); 2156b9e60257SRichard Henderson } 2157b9e60257SRichard Henderson 2158f83bcecbSRichard Henderson uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, 2159f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2160b9e60257SRichard Henderson { 2161f83bcecbSRichard Henderson return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); 2162cfe04a4bSRichard Henderson } 2163cfe04a4bSRichard Henderson 2164d03f1408SRichard Henderson /* 2165eed56642SAlex Bennée * Store Helpers 2166eed56642SAlex Bennée */ 2167eed56642SAlex Bennée 2168c6b716cdSRichard Henderson static inline void QEMU_ALWAYS_INLINE 216980d9d1c6SRichard Henderson store_memop(void *haddr, uint64_t val, MemOp op) 217080d9d1c6SRichard Henderson { 217180d9d1c6SRichard Henderson switch (op) { 217280d9d1c6SRichard Henderson case MO_UB: 217380d9d1c6SRichard Henderson stb_p(haddr, val); 217480d9d1c6SRichard Henderson break; 217580d9d1c6SRichard Henderson case MO_BEUW: 217680d9d1c6SRichard Henderson stw_be_p(haddr, val); 217780d9d1c6SRichard Henderson break; 217880d9d1c6SRichard Henderson case MO_LEUW: 217980d9d1c6SRichard Henderson stw_le_p(haddr, val); 218080d9d1c6SRichard Henderson break; 218180d9d1c6SRichard Henderson case MO_BEUL: 218280d9d1c6SRichard Henderson stl_be_p(haddr, val); 218380d9d1c6SRichard Henderson break; 218480d9d1c6SRichard Henderson case MO_LEUL: 218580d9d1c6SRichard Henderson stl_le_p(haddr, val); 218680d9d1c6SRichard Henderson break; 2187fc313c64SFrédéric Pétrot case MO_BEUQ: 218880d9d1c6SRichard Henderson stq_be_p(haddr, val); 218980d9d1c6SRichard Henderson break; 2190fc313c64SFrédéric Pétrot case MO_LEUQ: 219180d9d1c6SRichard Henderson stq_le_p(haddr, val); 219280d9d1c6SRichard Henderson break; 219380d9d1c6SRichard Henderson default: 219480d9d1c6SRichard Henderson qemu_build_not_reached(); 219580d9d1c6SRichard Henderson } 219680d9d1c6SRichard Henderson } 219780d9d1c6SRichard Henderson 2198f83bcecbSRichard Henderson static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2199f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr); 2200f83bcecbSRichard Henderson 22016b8b622eSRichard Henderson static void __attribute__((noinline)) 22026b8b622eSRichard Henderson store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, 22036b8b622eSRichard Henderson uintptr_t retaddr, size_t size, uintptr_t mmu_idx, 22046b8b622eSRichard Henderson bool big_endian) 22056b8b622eSRichard Henderson { 22066b8b622eSRichard Henderson const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 22076b8b622eSRichard Henderson uintptr_t index, index2; 22086b8b622eSRichard Henderson CPUTLBEntry *entry, *entry2; 2209b0f650f0SIlya Leoshkevich target_ulong page1, page2, tlb_addr, tlb_addr2; 22109002ffcbSRichard Henderson MemOpIdx oi; 22116b8b622eSRichard Henderson size_t size2; 22126b8b622eSRichard Henderson int i; 22136b8b622eSRichard Henderson 22146b8b622eSRichard Henderson /* 22156b8b622eSRichard Henderson * Ensure the second page is in the TLB. Note that the first page 22166b8b622eSRichard Henderson * is already guaranteed to be filled, and that the second page 2217b0f650f0SIlya Leoshkevich * cannot evict the first. An exception to this rule is PAGE_WRITE_INV 2218b0f650f0SIlya Leoshkevich * handling: the first page could have evicted itself. 22196b8b622eSRichard Henderson */ 2220b0f650f0SIlya Leoshkevich page1 = addr & TARGET_PAGE_MASK; 22216b8b622eSRichard Henderson page2 = (addr + size) & TARGET_PAGE_MASK; 22226b8b622eSRichard Henderson size2 = (addr + size) & ~TARGET_PAGE_MASK; 22236b8b622eSRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 22246b8b622eSRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 22256b8b622eSRichard Henderson 22266b8b622eSRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 2227b0f650f0SIlya Leoshkevich if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) { 22286b8b622eSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { 22296b8b622eSRichard Henderson tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, 22306b8b622eSRichard Henderson mmu_idx, retaddr); 22316b8b622eSRichard Henderson index2 = tlb_index(env, mmu_idx, page2); 22326b8b622eSRichard Henderson entry2 = tlb_entry(env, mmu_idx, page2); 22336b8b622eSRichard Henderson } 22346b8b622eSRichard Henderson tlb_addr2 = tlb_addr_write(entry2); 22356b8b622eSRichard Henderson } 22366b8b622eSRichard Henderson 22376b8b622eSRichard Henderson index = tlb_index(env, mmu_idx, addr); 22386b8b622eSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 22396b8b622eSRichard Henderson tlb_addr = tlb_addr_write(entry); 22406b8b622eSRichard Henderson 22416b8b622eSRichard Henderson /* 22426b8b622eSRichard Henderson * Handle watchpoints. Since this may trap, all checks 22436b8b622eSRichard Henderson * must happen before any store. 22446b8b622eSRichard Henderson */ 22456b8b622eSRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 22466b8b622eSRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size - size2, 2247*25d3ec58SRichard Henderson env_tlb(env)->d[mmu_idx].fulltlb[index].attrs, 22486b8b622eSRichard Henderson BP_MEM_WRITE, retaddr); 22496b8b622eSRichard Henderson } 22506b8b622eSRichard Henderson if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { 22516b8b622eSRichard Henderson cpu_check_watchpoint(env_cpu(env), page2, size2, 2252*25d3ec58SRichard Henderson env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs, 22536b8b622eSRichard Henderson BP_MEM_WRITE, retaddr); 22546b8b622eSRichard Henderson } 22556b8b622eSRichard Henderson 22566b8b622eSRichard Henderson /* 22576b8b622eSRichard Henderson * XXX: not efficient, but simple. 22586b8b622eSRichard Henderson * This loop must go in the forward direction to avoid issues 22596b8b622eSRichard Henderson * with self-modifying code in Windows 64-bit. 22606b8b622eSRichard Henderson */ 22616b8b622eSRichard Henderson oi = make_memop_idx(MO_UB, mmu_idx); 22626b8b622eSRichard Henderson if (big_endian) { 22636b8b622eSRichard Henderson for (i = 0; i < size; ++i) { 22646b8b622eSRichard Henderson /* Big-endian extract. */ 22656b8b622eSRichard Henderson uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); 2266f83bcecbSRichard Henderson full_stb_mmu(env, addr + i, val8, oi, retaddr); 22676b8b622eSRichard Henderson } 22686b8b622eSRichard Henderson } else { 22696b8b622eSRichard Henderson for (i = 0; i < size; ++i) { 22706b8b622eSRichard Henderson /* Little-endian extract. */ 22716b8b622eSRichard Henderson uint8_t val8 = val >> (i * 8); 2272f83bcecbSRichard Henderson full_stb_mmu(env, addr + i, val8, oi, retaddr); 22736b8b622eSRichard Henderson } 22746b8b622eSRichard Henderson } 22756b8b622eSRichard Henderson } 22766b8b622eSRichard Henderson 227780d9d1c6SRichard Henderson static inline void QEMU_ALWAYS_INLINE 22784601f8d1SRichard Henderson store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 22799002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr, MemOp op) 2280eed56642SAlex Bennée { 2281eed56642SAlex Bennée const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 2282b826044fSRichard Henderson const unsigned a_bits = get_alignment_bits(get_memop(oi)); 2283b826044fSRichard Henderson const size_t size = memop_size(op); 2284b826044fSRichard Henderson uintptr_t mmu_idx = get_mmuidx(oi); 2285b826044fSRichard Henderson uintptr_t index; 2286b826044fSRichard Henderson CPUTLBEntry *entry; 2287b826044fSRichard Henderson target_ulong tlb_addr; 2288eed56642SAlex Bennée void *haddr; 2289b826044fSRichard Henderson 2290b826044fSRichard Henderson tcg_debug_assert(mmu_idx < NB_MMU_MODES); 2291eed56642SAlex Bennée 2292eed56642SAlex Bennée /* Handle CPU specific unaligned behaviour */ 2293eed56642SAlex Bennée if (addr & ((1 << a_bits) - 1)) { 229429a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2295eed56642SAlex Bennée mmu_idx, retaddr); 2296eed56642SAlex Bennée } 2297eed56642SAlex Bennée 2298b826044fSRichard Henderson index = tlb_index(env, mmu_idx, addr); 2299b826044fSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 2300b826044fSRichard Henderson tlb_addr = tlb_addr_write(entry); 2301b826044fSRichard Henderson 2302eed56642SAlex Bennée /* If the TLB entry is for a different page, reload and try again. */ 2303eed56642SAlex Bennée if (!tlb_hit(tlb_addr, addr)) { 2304eed56642SAlex Bennée if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 2305eed56642SAlex Bennée addr & TARGET_PAGE_MASK)) { 230629a0af61SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 2307eed56642SAlex Bennée mmu_idx, retaddr); 2308eed56642SAlex Bennée index = tlb_index(env, mmu_idx, addr); 2309eed56642SAlex Bennée entry = tlb_entry(env, mmu_idx, addr); 2310eed56642SAlex Bennée } 2311eed56642SAlex Bennée tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 2312eed56642SAlex Bennée } 2313eed56642SAlex Bennée 231450b107c5SRichard Henderson /* Handle anything that isn't just a straight memory access. */ 2315eed56642SAlex Bennée if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 2316*25d3ec58SRichard Henderson CPUTLBEntryFull *full; 23175b87b3e6SRichard Henderson bool need_swap; 231850b107c5SRichard Henderson 231950b107c5SRichard Henderson /* For anything that is unaligned, recurse through byte stores. */ 2320eed56642SAlex Bennée if ((addr & (size - 1)) != 0) { 2321eed56642SAlex Bennée goto do_unaligned_access; 2322eed56642SAlex Bennée } 232350b107c5SRichard Henderson 2324*25d3ec58SRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 232550b107c5SRichard Henderson 232650b107c5SRichard Henderson /* Handle watchpoints. */ 232750b107c5SRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 232850b107c5SRichard Henderson /* On watchpoint hit, this will longjmp out. */ 232950b107c5SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 2330*25d3ec58SRichard Henderson full->attrs, BP_MEM_WRITE, retaddr); 23315b87b3e6SRichard Henderson } 233250b107c5SRichard Henderson 23335b87b3e6SRichard Henderson need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 233450b107c5SRichard Henderson 233550b107c5SRichard Henderson /* Handle I/O access. */ 233608565552SRichard Henderson if (tlb_addr & TLB_MMIO) { 2337*25d3ec58SRichard Henderson io_writex(env, full, mmu_idx, val, addr, retaddr, 23385b87b3e6SRichard Henderson op ^ (need_swap * MO_BSWAP)); 23395b87b3e6SRichard Henderson return; 23405b87b3e6SRichard Henderson } 23415b87b3e6SRichard Henderson 23427b0d792cSRichard Henderson /* Ignore writes to ROM. */ 23437b0d792cSRichard Henderson if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { 23447b0d792cSRichard Henderson return; 23457b0d792cSRichard Henderson } 23467b0d792cSRichard Henderson 234708565552SRichard Henderson /* Handle clean RAM pages. */ 234808565552SRichard Henderson if (tlb_addr & TLB_NOTDIRTY) { 2349*25d3ec58SRichard Henderson notdirty_write(env_cpu(env), addr, size, full, retaddr); 235008565552SRichard Henderson } 235108565552SRichard Henderson 2352707526adSRichard Henderson haddr = (void *)((uintptr_t)addr + entry->addend); 235308565552SRichard Henderson 23545b87b3e6SRichard Henderson /* 23555b87b3e6SRichard Henderson * Keep these two store_memop separate to ensure that the compiler 23565b87b3e6SRichard Henderson * is able to fold the entire function to a single instruction. 23575b87b3e6SRichard Henderson * There is a build-time assert inside to remind you of this. ;-) 23585b87b3e6SRichard Henderson */ 23595b87b3e6SRichard Henderson if (unlikely(need_swap)) { 23605b87b3e6SRichard Henderson store_memop(haddr, val, op ^ MO_BSWAP); 23615b87b3e6SRichard Henderson } else { 23625b87b3e6SRichard Henderson store_memop(haddr, val, op); 23635b87b3e6SRichard Henderson } 2364eed56642SAlex Bennée return; 2365eed56642SAlex Bennée } 2366eed56642SAlex Bennée 2367eed56642SAlex Bennée /* Handle slow unaligned access (it spans two pages or IO). */ 2368eed56642SAlex Bennée if (size > 1 2369eed56642SAlex Bennée && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 2370eed56642SAlex Bennée >= TARGET_PAGE_SIZE)) { 2371eed56642SAlex Bennée do_unaligned_access: 23726b8b622eSRichard Henderson store_helper_unaligned(env, addr, val, retaddr, size, 23736b8b622eSRichard Henderson mmu_idx, memop_big_endian(op)); 2374eed56642SAlex Bennée return; 2375eed56642SAlex Bennée } 2376eed56642SAlex Bennée 2377eed56642SAlex Bennée haddr = (void *)((uintptr_t)addr + entry->addend); 237880d9d1c6SRichard Henderson store_memop(haddr, val, op); 2379eed56642SAlex Bennée } 2380eed56642SAlex Bennée 2381f83bcecbSRichard Henderson static void __attribute__((noinline)) 2382f83bcecbSRichard Henderson full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 23839002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2384eed56642SAlex Bennée { 2385f83bcecbSRichard Henderson validate_memop(oi, MO_UB); 2386be5c4787STony Nguyen store_helper(env, addr, val, oi, retaddr, MO_UB); 2387eed56642SAlex Bennée } 2388eed56642SAlex Bennée 2389f83bcecbSRichard Henderson void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2390f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2391f83bcecbSRichard Henderson { 2392f83bcecbSRichard Henderson full_stb_mmu(env, addr, val, oi, retaddr); 2393f83bcecbSRichard Henderson } 2394f83bcecbSRichard Henderson 2395f83bcecbSRichard Henderson static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2396f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2397f83bcecbSRichard Henderson { 2398f83bcecbSRichard Henderson validate_memop(oi, MO_LEUW); 2399f83bcecbSRichard Henderson store_helper(env, addr, val, oi, retaddr, MO_LEUW); 2400f83bcecbSRichard Henderson } 2401f83bcecbSRichard Henderson 2402fc1bc777SRichard Henderson void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 24039002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2404eed56642SAlex Bennée { 2405f83bcecbSRichard Henderson full_le_stw_mmu(env, addr, val, oi, retaddr); 2406f83bcecbSRichard Henderson } 2407f83bcecbSRichard Henderson 2408f83bcecbSRichard Henderson static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2409f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2410f83bcecbSRichard Henderson { 2411f83bcecbSRichard Henderson validate_memop(oi, MO_BEUW); 2412f83bcecbSRichard Henderson store_helper(env, addr, val, oi, retaddr, MO_BEUW); 2413eed56642SAlex Bennée } 2414eed56642SAlex Bennée 2415fc1bc777SRichard Henderson void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 24169002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2417eed56642SAlex Bennée { 2418f83bcecbSRichard Henderson full_be_stw_mmu(env, addr, val, oi, retaddr); 2419f83bcecbSRichard Henderson } 2420f83bcecbSRichard Henderson 2421f83bcecbSRichard Henderson static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2422f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2423f83bcecbSRichard Henderson { 2424f83bcecbSRichard Henderson validate_memop(oi, MO_LEUL); 2425f83bcecbSRichard Henderson store_helper(env, addr, val, oi, retaddr, MO_LEUL); 2426eed56642SAlex Bennée } 2427eed56642SAlex Bennée 2428fc1bc777SRichard Henderson void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 24299002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2430eed56642SAlex Bennée { 2431f83bcecbSRichard Henderson full_le_stl_mmu(env, addr, val, oi, retaddr); 2432f83bcecbSRichard Henderson } 2433f83bcecbSRichard Henderson 2434f83bcecbSRichard Henderson static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2435f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2436f83bcecbSRichard Henderson { 2437f83bcecbSRichard Henderson validate_memop(oi, MO_BEUL); 2438f83bcecbSRichard Henderson store_helper(env, addr, val, oi, retaddr, MO_BEUL); 2439eed56642SAlex Bennée } 2440eed56642SAlex Bennée 2441fc1bc777SRichard Henderson void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 24429002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2443eed56642SAlex Bennée { 2444f83bcecbSRichard Henderson full_be_stl_mmu(env, addr, val, oi, retaddr); 2445eed56642SAlex Bennée } 2446eed56642SAlex Bennée 2447fc1bc777SRichard Henderson void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 24489002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2449eed56642SAlex Bennée { 2450fc313c64SFrédéric Pétrot validate_memop(oi, MO_LEUQ); 2451fc313c64SFrédéric Pétrot store_helper(env, addr, val, oi, retaddr, MO_LEUQ); 2452eed56642SAlex Bennée } 2453eed56642SAlex Bennée 2454fc1bc777SRichard Henderson void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 24559002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2456eed56642SAlex Bennée { 2457fc313c64SFrédéric Pétrot validate_memop(oi, MO_BEUQ); 2458fc313c64SFrédéric Pétrot store_helper(env, addr, val, oi, retaddr, MO_BEUQ); 2459eed56642SAlex Bennée } 2460d9bb58e5SYang Zhong 2461d03f1408SRichard Henderson /* 2462d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 2463d03f1408SRichard Henderson */ 2464d03f1408SRichard Henderson 2465f83bcecbSRichard Henderson typedef void FullStoreHelper(CPUArchState *env, target_ulong addr, 2466f83bcecbSRichard Henderson uint64_t val, MemOpIdx oi, uintptr_t retaddr); 2467f83bcecbSRichard Henderson 2468f83bcecbSRichard Henderson static inline void cpu_store_helper(CPUArchState *env, target_ulong addr, 2469f83bcecbSRichard Henderson uint64_t val, MemOpIdx oi, uintptr_t ra, 2470f83bcecbSRichard Henderson FullStoreHelper *full_store) 2471d03f1408SRichard Henderson { 2472f83bcecbSRichard Henderson full_store(env, addr, val, oi, ra); 247337aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 2474d03f1408SRichard Henderson } 2475d03f1408SRichard Henderson 2476f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2477f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2478d03f1408SRichard Henderson { 2479f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu); 2480d03f1408SRichard Henderson } 2481d03f1408SRichard Henderson 2482f83bcecbSRichard Henderson void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2483f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2484d03f1408SRichard Henderson { 2485f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu); 2486d03f1408SRichard Henderson } 2487d03f1408SRichard Henderson 2488f83bcecbSRichard Henderson void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2489f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2490d03f1408SRichard Henderson { 2491f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu); 2492d03f1408SRichard Henderson } 2493d03f1408SRichard Henderson 2494f83bcecbSRichard Henderson void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2495f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2496d03f1408SRichard Henderson { 2497f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu); 2498b9e60257SRichard Henderson } 2499b9e60257SRichard Henderson 2500f83bcecbSRichard Henderson void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2501f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2502b9e60257SRichard Henderson { 2503f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu); 2504b9e60257SRichard Henderson } 2505b9e60257SRichard Henderson 2506f83bcecbSRichard Henderson void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2507f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2508b9e60257SRichard Henderson { 2509f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu); 2510b9e60257SRichard Henderson } 2511b9e60257SRichard Henderson 2512f83bcecbSRichard Henderson void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2513f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2514b9e60257SRichard Henderson { 2515f83bcecbSRichard Henderson cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); 2516d03f1408SRichard Henderson } 2517d03f1408SRichard Henderson 2518f83bcecbSRichard Henderson #include "ldst_common.c.inc" 2519cfe04a4bSRichard Henderson 2520be9568b4SRichard Henderson /* 2521be9568b4SRichard Henderson * First set of functions passes in OI and RETADDR. 2522be9568b4SRichard Henderson * This makes them callable from other helpers. 2523be9568b4SRichard Henderson */ 2524d9bb58e5SYang Zhong 2525d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 2526be9568b4SRichard Henderson glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 2527a754f7f3SRichard Henderson 2528707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 2529d9bb58e5SYang Zhong 2530139c1837SPaolo Bonzini #include "atomic_common.c.inc" 2531d9bb58e5SYang Zhong 2532d9bb58e5SYang Zhong #define DATA_SIZE 1 2533d9bb58e5SYang Zhong #include "atomic_template.h" 2534d9bb58e5SYang Zhong 2535d9bb58e5SYang Zhong #define DATA_SIZE 2 2536d9bb58e5SYang Zhong #include "atomic_template.h" 2537d9bb58e5SYang Zhong 2538d9bb58e5SYang Zhong #define DATA_SIZE 4 2539d9bb58e5SYang Zhong #include "atomic_template.h" 2540d9bb58e5SYang Zhong 2541d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2542d9bb58e5SYang Zhong #define DATA_SIZE 8 2543d9bb58e5SYang Zhong #include "atomic_template.h" 2544d9bb58e5SYang Zhong #endif 2545d9bb58e5SYang Zhong 2546e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 2547d9bb58e5SYang Zhong #define DATA_SIZE 16 2548d9bb58e5SYang Zhong #include "atomic_template.h" 2549d9bb58e5SYang Zhong #endif 2550d9bb58e5SYang Zhong 2551d9bb58e5SYang Zhong /* Code access functions. */ 2552d9bb58e5SYang Zhong 2553fc4120a3SRichard Henderson static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, 25549002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 25552dd92606SRichard Henderson { 2556fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); 25572dd92606SRichard Henderson } 25582dd92606SRichard Henderson 2559fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 2560eed56642SAlex Bennée { 25619002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 2562fc4120a3SRichard Henderson return full_ldub_code(env, addr, oi, 0); 25632dd92606SRichard Henderson } 25642dd92606SRichard Henderson 2565fc4120a3SRichard Henderson static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, 25669002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 25674cef72d0SAlex Bennée { 2568fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); 25694cef72d0SAlex Bennée } 25704cef72d0SAlex Bennée 2571fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 25722dd92606SRichard Henderson { 25739002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 2574fc4120a3SRichard Henderson return full_lduw_code(env, addr, oi, 0); 2575eed56642SAlex Bennée } 2576d9bb58e5SYang Zhong 2577fc4120a3SRichard Henderson static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, 25789002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2579eed56642SAlex Bennée { 2580fc4120a3SRichard Henderson return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); 25812dd92606SRichard Henderson } 25822dd92606SRichard Henderson 2583fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 25844cef72d0SAlex Bennée { 25859002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 2586fc4120a3SRichard Henderson return full_ldl_code(env, addr, oi, 0); 25874cef72d0SAlex Bennée } 25884cef72d0SAlex Bennée 2589fc4120a3SRichard Henderson static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, 25909002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 25912dd92606SRichard Henderson { 2592fc313c64SFrédéric Pétrot return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code); 2593eed56642SAlex Bennée } 2594d9bb58e5SYang Zhong 2595fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 2596eed56642SAlex Bennée { 2597fc313c64SFrédéric Pétrot MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); 2598fc4120a3SRichard Henderson return full_ldq_code(env, addr, oi, 0); 2599eed56642SAlex Bennée } 2600