1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 26d9bb58e5SYang Zhong #include "exec/cputlb.h" 27d9bb58e5SYang Zhong #include "exec/memory-internal.h" 28d9bb58e5SYang Zhong #include "exec/ram_addr.h" 29d9bb58e5SYang Zhong #include "tcg/tcg.h" 30d9bb58e5SYang Zhong #include "qemu/error-report.h" 31d9bb58e5SYang Zhong #include "exec/log.h" 32c213ee2dSRichard Henderson #include "exec/helper-proto-common.h" 33d9bb58e5SYang Zhong #include "qemu/atomic.h" 34e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 353b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 3651807763SPhilippe Mathieu-Daudé #include "trace.h" 37e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h" 3865269192SPhilippe Mathieu-Daudé #include "internal.h" 39235537faSAlex Bennée #ifdef CONFIG_PLUGIN 40235537faSAlex Bennée #include "qemu/plugin-memory.h" 41235537faSAlex Bennée #endif 42d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h" 4370f168f8SRichard Henderson #include "tcg/oversized-guest.h" 44d9bb58e5SYang Zhong 45d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 46d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 47d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 48d9bb58e5SYang Zhong 49d9bb58e5SYang Zhong #ifdef DEBUG_TLB 50d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 51d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 52d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 53d9bb58e5SYang Zhong # else 54d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 55d9bb58e5SYang Zhong # endif 56d9bb58e5SYang Zhong #else 57d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 58d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 59d9bb58e5SYang Zhong #endif 60d9bb58e5SYang Zhong 61d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 62d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 63d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 64d9bb58e5SYang Zhong ## __VA_ARGS__); \ 65d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 66d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 67d9bb58e5SYang Zhong } \ 68d9bb58e5SYang Zhong } while (0) 69d9bb58e5SYang Zhong 70ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 71d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 72ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 73d9bb58e5SYang Zhong } \ 74d9bb58e5SYang Zhong } while (0) 75d9bb58e5SYang Zhong 76d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 77e79f8142SAnton Johansson * vaddr even on 32 bit builds 78e79f8142SAnton Johansson */ 79e79f8142SAnton Johansson QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data)); 80d9bb58e5SYang Zhong 81d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 82d9bb58e5SYang Zhong */ 83d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 84d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 85d9bb58e5SYang Zhong 86722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 877a1efe1bSRichard Henderson { 88722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 897a1efe1bSRichard Henderson } 907a1efe1bSRichard Henderson 91722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 9286e1eff8SEmilio G. Cota { 93722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9486e1eff8SEmilio G. Cota } 9586e1eff8SEmilio G. Cota 9679e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9786e1eff8SEmilio G. Cota size_t max_entries) 9886e1eff8SEmilio G. Cota { 9979e42085SRichard Henderson desc->window_begin_ns = ns; 10079e42085SRichard Henderson desc->window_max_entries = max_entries; 10186e1eff8SEmilio G. Cota } 10286e1eff8SEmilio G. Cota 10306f3831cSAnton Johansson static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr) 1040f4abea8SRichard Henderson { 105a976a99aSRichard Henderson CPUJumpCache *jc = cpu->tb_jmp_cache; 10699ab4d50SEric Auger int i, i0; 1070f4abea8SRichard Henderson 10899ab4d50SEric Auger if (unlikely(!jc)) { 10999ab4d50SEric Auger return; 11099ab4d50SEric Auger } 11199ab4d50SEric Auger 11299ab4d50SEric Auger i0 = tb_jmp_cache_hash_page(page_addr); 1130f4abea8SRichard Henderson for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 114a976a99aSRichard Henderson qatomic_set(&jc->array[i0 + i].tb, NULL); 1150f4abea8SRichard Henderson } 1160f4abea8SRichard Henderson } 1170f4abea8SRichard Henderson 11886e1eff8SEmilio G. Cota /** 11986e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 12071ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 12171ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 12286e1eff8SEmilio G. Cota * 12386e1eff8SEmilio G. Cota * Called with tlb_lock_held. 12486e1eff8SEmilio G. Cota * 12586e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 12686e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 12786e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 12886e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 12986e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 13086e1eff8SEmilio G. Cota * the resize based on past observations. 13186e1eff8SEmilio G. Cota * 13286e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 13386e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 13486e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 13586e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 13686e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 13786e1eff8SEmilio G. Cota * performance. 13886e1eff8SEmilio G. Cota * 13986e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 14086e1eff8SEmilio G. Cota * 14186e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 14286e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 14386e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 14486e1eff8SEmilio G. Cota * probably be similar. 14586e1eff8SEmilio G. Cota * 14686e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 14786e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 14886e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 14986e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 15086e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 15186e1eff8SEmilio G. Cota * 15286e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 15386e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 15486e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 15586e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 15686e1eff8SEmilio G. Cota * conflict misses. 15786e1eff8SEmilio G. Cota */ 1583c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1593c3959f2SRichard Henderson int64_t now) 16086e1eff8SEmilio G. Cota { 16171ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 16286e1eff8SEmilio G. Cota size_t rate; 16386e1eff8SEmilio G. Cota size_t new_size = old_size; 16486e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 16586e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 16679e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 16786e1eff8SEmilio G. Cota 16879e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 16979e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 17086e1eff8SEmilio G. Cota } 17179e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 17286e1eff8SEmilio G. Cota 17386e1eff8SEmilio G. Cota if (rate > 70) { 17486e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 17586e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 17679e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 17779e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 17886e1eff8SEmilio G. Cota 17986e1eff8SEmilio G. Cota /* 18086e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 18186e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 18286e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 18386e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 18486e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 18586e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 18686e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 18786e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 18886e1eff8SEmilio G. Cota */ 18986e1eff8SEmilio G. Cota if (expected_rate > 70) { 19086e1eff8SEmilio G. Cota ceil *= 2; 19186e1eff8SEmilio G. Cota } 19286e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 19386e1eff8SEmilio G. Cota } 19486e1eff8SEmilio G. Cota 19586e1eff8SEmilio G. Cota if (new_size == old_size) { 19686e1eff8SEmilio G. Cota if (window_expired) { 19779e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 19886e1eff8SEmilio G. Cota } 19986e1eff8SEmilio G. Cota return; 20086e1eff8SEmilio G. Cota } 20186e1eff8SEmilio G. Cota 20271ccd47bSRichard Henderson g_free(fast->table); 20325d3ec58SRichard Henderson g_free(desc->fulltlb); 20486e1eff8SEmilio G. Cota 20579e42085SRichard Henderson tlb_window_reset(desc, now, 0); 20686e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 20771ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 20871ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 20925d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 21071ccd47bSRichard Henderson 21186e1eff8SEmilio G. Cota /* 21286e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 21386e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 21486e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 21586e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 21686e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 21786e1eff8SEmilio G. Cota */ 21825d3ec58SRichard Henderson while (fast->table == NULL || desc->fulltlb == NULL) { 21986e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 22086e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 22186e1eff8SEmilio G. Cota abort(); 22286e1eff8SEmilio G. Cota } 22386e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 22471ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 22586e1eff8SEmilio G. Cota 22671ccd47bSRichard Henderson g_free(fast->table); 22725d3ec58SRichard Henderson g_free(desc->fulltlb); 22871ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 22925d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 23086e1eff8SEmilio G. Cota } 23186e1eff8SEmilio G. Cota } 23286e1eff8SEmilio G. Cota 233bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 23486e1eff8SEmilio G. Cota { 2355c948e31SRichard Henderson desc->n_used_entries = 0; 2365c948e31SRichard Henderson desc->large_page_addr = -1; 2375c948e31SRichard Henderson desc->large_page_mask = -1; 2385c948e31SRichard Henderson desc->vindex = 0; 2395c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2405c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 24186e1eff8SEmilio G. Cota } 24286e1eff8SEmilio G. Cota 2433c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, 2443c3959f2SRichard Henderson int64_t now) 245bbf021b0SRichard Henderson { 246bbf021b0SRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 247bbf021b0SRichard Henderson CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; 248bbf021b0SRichard Henderson 2493c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 250bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 251bbf021b0SRichard Henderson } 252bbf021b0SRichard Henderson 25356e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 25456e89f76SRichard Henderson { 25556e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 25656e89f76SRichard Henderson 25756e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 25856e89f76SRichard Henderson desc->n_used_entries = 0; 25956e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 26056e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 26125d3ec58SRichard Henderson desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); 2623c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 26356e89f76SRichard Henderson } 26456e89f76SRichard Henderson 26586e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 26686e1eff8SEmilio G. Cota { 267a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 26886e1eff8SEmilio G. Cota } 26986e1eff8SEmilio G. Cota 27086e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 27186e1eff8SEmilio G. Cota { 272a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 27386e1eff8SEmilio G. Cota } 27486e1eff8SEmilio G. Cota 2755005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2765005e253SEmilio G. Cota { 27771aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 27856e89f76SRichard Henderson int64_t now = get_clock_realtime(); 27956e89f76SRichard Henderson int i; 28071aec354SEmilio G. Cota 281a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2823d1523ceSRichard Henderson 2833c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 2843c16304aSRichard Henderson env_tlb(env)->c.dirty = 0; 28586e1eff8SEmilio G. Cota 28656e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 28756e89f76SRichard Henderson tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); 28856e89f76SRichard Henderson } 2895005e253SEmilio G. Cota } 2905005e253SEmilio G. Cota 291816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu) 292816d9be5SEmilio G. Cota { 293816d9be5SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 294816d9be5SEmilio G. Cota int i; 295816d9be5SEmilio G. Cota 296816d9be5SEmilio G. Cota qemu_spin_destroy(&env_tlb(env)->c.lock); 297816d9be5SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 298816d9be5SEmilio G. Cota CPUTLBDesc *desc = &env_tlb(env)->d[i]; 299816d9be5SEmilio G. Cota CPUTLBDescFast *fast = &env_tlb(env)->f[i]; 300816d9be5SEmilio G. Cota 301816d9be5SEmilio G. Cota g_free(fast->table); 30225d3ec58SRichard Henderson g_free(desc->fulltlb); 303816d9be5SEmilio G. Cota } 304816d9be5SEmilio G. Cota } 305816d9be5SEmilio G. Cota 306d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 307d9bb58e5SYang Zhong * 308d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 309d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 310d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 311d9bb58e5SYang Zhong * again. 312d9bb58e5SYang Zhong */ 313d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 314d9bb58e5SYang Zhong run_on_cpu_data d) 315d9bb58e5SYang Zhong { 316d9bb58e5SYang Zhong CPUState *cpu; 317d9bb58e5SYang Zhong 318d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 319d9bb58e5SYang Zhong if (cpu != src) { 320d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 321d9bb58e5SYang Zhong } 322d9bb58e5SYang Zhong } 323d9bb58e5SYang Zhong } 324d9bb58e5SYang Zhong 325e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 32683974cf4SEmilio G. Cota { 32783974cf4SEmilio G. Cota CPUState *cpu; 328e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 32983974cf4SEmilio G. Cota 33083974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 33183974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 33283974cf4SEmilio G. Cota 333d73415a3SStefan Hajnoczi full += qatomic_read(&env_tlb(env)->c.full_flush_count); 334d73415a3SStefan Hajnoczi part += qatomic_read(&env_tlb(env)->c.part_flush_count); 335d73415a3SStefan Hajnoczi elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); 33683974cf4SEmilio G. Cota } 337e09de0a2SRichard Henderson *pfull = full; 338e09de0a2SRichard Henderson *ppart = part; 339e09de0a2SRichard Henderson *pelide = elide; 34083974cf4SEmilio G. Cota } 341d9bb58e5SYang Zhong 342d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 343d9bb58e5SYang Zhong { 344d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3453d1523ceSRichard Henderson uint16_t asked = data.host_int; 3463d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3473c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 348d9bb58e5SYang Zhong 349d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 350d9bb58e5SYang Zhong 3513d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 352d9bb58e5SYang Zhong 353a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 35460a2ad7dSRichard Henderson 355a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3563d1523ceSRichard Henderson to_clean = asked & all_dirty; 3573d1523ceSRichard Henderson all_dirty &= ~to_clean; 358a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3593d1523ceSRichard Henderson 3603d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3613d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3623c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx, now); 363d9bb58e5SYang Zhong } 3643d1523ceSRichard Henderson 365a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 366d9bb58e5SYang Zhong 367a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 36864f2674bSRichard Henderson 3693d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 370d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.full_flush_count, 371a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 372e09de0a2SRichard Henderson } else { 373d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.part_flush_count, 374a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3753d1523ceSRichard Henderson if (to_clean != asked) { 376d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.elide_flush_count, 377a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3783d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3793d1523ceSRichard Henderson } 38064f2674bSRichard Henderson } 381d9bb58e5SYang Zhong } 382d9bb58e5SYang Zhong 383d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 384d9bb58e5SYang Zhong { 385d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 386d9bb58e5SYang Zhong 38764f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 388d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 389ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 390d9bb58e5SYang Zhong } else { 39160a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 392d9bb58e5SYang Zhong } 393d9bb58e5SYang Zhong } 394d9bb58e5SYang Zhong 39564f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 39664f2674bSRichard Henderson { 39764f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 39864f2674bSRichard Henderson } 39964f2674bSRichard Henderson 400d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 401d9bb58e5SYang Zhong { 402d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 403d9bb58e5SYang Zhong 404d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 405d9bb58e5SYang Zhong 406d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 407d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 408d9bb58e5SYang Zhong } 409d9bb58e5SYang Zhong 41064f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 41164f2674bSRichard Henderson { 41264f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 41364f2674bSRichard Henderson } 41464f2674bSRichard Henderson 41564f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 416d9bb58e5SYang Zhong { 417d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 418d9bb58e5SYang Zhong 419d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 420d9bb58e5SYang Zhong 421d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 422d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 423d9bb58e5SYang Zhong } 424d9bb58e5SYang Zhong 42564f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 42664f2674bSRichard Henderson { 42764f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 42864f2674bSRichard Henderson } 42964f2674bSRichard Henderson 4303ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 431732d5487SAnton Johansson vaddr page, vaddr mask) 4323ab6e68cSRichard Henderson { 4333ab6e68cSRichard Henderson page &= mask; 4343ab6e68cSRichard Henderson mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 4353ab6e68cSRichard Henderson 4363ab6e68cSRichard Henderson return (page == (tlb_entry->addr_read & mask) || 4373ab6e68cSRichard Henderson page == (tlb_addr_write(tlb_entry) & mask) || 4383ab6e68cSRichard Henderson page == (tlb_entry->addr_code & mask)); 4393ab6e68cSRichard Henderson } 4403ab6e68cSRichard Henderson 441732d5487SAnton Johansson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page) 442d9bb58e5SYang Zhong { 4433ab6e68cSRichard Henderson return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 44468fea038SRichard Henderson } 44568fea038SRichard Henderson 4463cea94bbSEmilio G. Cota /** 4473cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4483cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4493cea94bbSEmilio G. Cota */ 4503cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4513cea94bbSEmilio G. Cota { 4523cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4533cea94bbSEmilio G. Cota } 4543cea94bbSEmilio G. Cota 45553d28455SRichard Henderson /* Called with tlb_c.lock held */ 4563ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 457732d5487SAnton Johansson vaddr page, 458732d5487SAnton Johansson vaddr mask) 45968fea038SRichard Henderson { 4603ab6e68cSRichard Henderson if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 461d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 46286e1eff8SEmilio G. Cota return true; 463d9bb58e5SYang Zhong } 46486e1eff8SEmilio G. Cota return false; 465d9bb58e5SYang Zhong } 466d9bb58e5SYang Zhong 467732d5487SAnton Johansson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page) 46868fea038SRichard Henderson { 4693ab6e68cSRichard Henderson return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 4703ab6e68cSRichard Henderson } 4713ab6e68cSRichard Henderson 4723ab6e68cSRichard Henderson /* Called with tlb_c.lock held */ 4733ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, 474732d5487SAnton Johansson vaddr page, 475732d5487SAnton Johansson vaddr mask) 4763ab6e68cSRichard Henderson { 477a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 47868fea038SRichard Henderson int k; 47971aec354SEmilio G. Cota 48029a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 48168fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 4823ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 48386e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 48486e1eff8SEmilio G. Cota } 48568fea038SRichard Henderson } 48668fea038SRichard Henderson } 48768fea038SRichard Henderson 4883ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 489732d5487SAnton Johansson vaddr page) 4903ab6e68cSRichard Henderson { 4913ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); 4923ab6e68cSRichard Henderson } 4933ab6e68cSRichard Henderson 494732d5487SAnton Johansson static void tlb_flush_page_locked(CPUArchState *env, int midx, vaddr page) 4951308e026SRichard Henderson { 496732d5487SAnton Johansson vaddr lp_addr = env_tlb(env)->d[midx].large_page_addr; 497732d5487SAnton Johansson vaddr lp_mask = env_tlb(env)->d[midx].large_page_mask; 4981308e026SRichard Henderson 4991308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 5001308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 5018c605cf1SAnton Johansson tlb_debug("forcing full flush midx %d (%016" 5028c605cf1SAnton Johansson VADDR_PRIx "/%016" VADDR_PRIx ")\n", 5031308e026SRichard Henderson midx, lp_addr, lp_mask); 5043c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 5051308e026SRichard Henderson } else { 50686e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 50786e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 50886e1eff8SEmilio G. Cota } 5091308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 5101308e026SRichard Henderson } 5111308e026SRichard Henderson } 5121308e026SRichard Henderson 5137b7d00e0SRichard Henderson /** 5147b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 5157b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5167b7d00e0SRichard Henderson * @addr: page of virtual address to flush 5177b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 5187b7d00e0SRichard Henderson * 5197b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 5207b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 521d9bb58e5SYang Zhong */ 5227b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 523732d5487SAnton Johansson vaddr addr, 5247b7d00e0SRichard Henderson uint16_t idxmap) 525d9bb58e5SYang Zhong { 526d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 527d9bb58e5SYang Zhong int mmu_idx; 528d9bb58e5SYang Zhong 529d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 530d9bb58e5SYang Zhong 5318c605cf1SAnton Johansson tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap); 532d9bb58e5SYang Zhong 533a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 534d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 5357b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 5361308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 537d9bb58e5SYang Zhong } 538d9bb58e5SYang Zhong } 539a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 540d9bb58e5SYang Zhong 5411d41a79bSRichard Henderson /* 5421d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 5431d41a79bSRichard Henderson * overlap the flushed page, which includes the previous. 5441d41a79bSRichard Henderson */ 5451d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 5461d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr); 547d9bb58e5SYang Zhong } 548d9bb58e5SYang Zhong 5497b7d00e0SRichard Henderson /** 5507b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 5517b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5527b7d00e0SRichard Henderson * @data: encoded addr + idxmap 5537b7d00e0SRichard Henderson * 5547b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5557b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 5567b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 5577b7d00e0SRichard Henderson * that can be passed via this method. 5587b7d00e0SRichard Henderson */ 5597b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5607b7d00e0SRichard Henderson run_on_cpu_data data) 5617b7d00e0SRichard Henderson { 562732d5487SAnton Johansson vaddr addr_and_idxmap = data.target_ptr; 563732d5487SAnton Johansson vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK; 5647b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5657b7d00e0SRichard Henderson 5667b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5677b7d00e0SRichard Henderson } 5687b7d00e0SRichard Henderson 5697b7d00e0SRichard Henderson typedef struct { 570732d5487SAnton Johansson vaddr addr; 5717b7d00e0SRichard Henderson uint16_t idxmap; 5727b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5737b7d00e0SRichard Henderson 5747b7d00e0SRichard Henderson /** 5757b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5767b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5777b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5787b7d00e0SRichard Henderson * 5797b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5807b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5817b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5827b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5837b7d00e0SRichard Henderson */ 5847b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5857b7d00e0SRichard Henderson run_on_cpu_data data) 5867b7d00e0SRichard Henderson { 5877b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5887b7d00e0SRichard Henderson 5897b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5907b7d00e0SRichard Henderson g_free(d); 5917b7d00e0SRichard Henderson } 5927b7d00e0SRichard Henderson 593732d5487SAnton Johansson void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap) 594d9bb58e5SYang Zhong { 5958c605cf1SAnton Johansson tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap); 596d9bb58e5SYang Zhong 597d9bb58e5SYang Zhong /* This should already be page aligned */ 5987b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 599d9bb58e5SYang Zhong 6007b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 6017b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 6027b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 6037b7d00e0SRichard Henderson /* 6047b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 6057b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 6067b7d00e0SRichard Henderson * allocating memory for this operation. 6077b7d00e0SRichard Henderson */ 6087b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 6097b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 610d9bb58e5SYang Zhong } else { 6117b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 6127b7d00e0SRichard Henderson 6137b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 6147b7d00e0SRichard Henderson d->addr = addr; 6157b7d00e0SRichard Henderson d->idxmap = idxmap; 6167b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 6177b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 618d9bb58e5SYang Zhong } 619d9bb58e5SYang Zhong } 620d9bb58e5SYang Zhong 621732d5487SAnton Johansson void tlb_flush_page(CPUState *cpu, vaddr addr) 622f8144c6cSRichard Henderson { 623f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 624f8144c6cSRichard Henderson } 625f8144c6cSRichard Henderson 626732d5487SAnton Johansson void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr, 627d9bb58e5SYang Zhong uint16_t idxmap) 628d9bb58e5SYang Zhong { 6298c605cf1SAnton Johansson tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap); 630d9bb58e5SYang Zhong 631d9bb58e5SYang Zhong /* This should already be page aligned */ 6327b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 633d9bb58e5SYang Zhong 6347b7d00e0SRichard Henderson /* 6357b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6367b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6377b7d00e0SRichard Henderson */ 6387b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6397b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6407b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6417b7d00e0SRichard Henderson } else { 6427b7d00e0SRichard Henderson CPUState *dst_cpu; 6437b7d00e0SRichard Henderson 6447b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6457b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6467b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6477b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 6487b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 6497b7d00e0SRichard Henderson 6507b7d00e0SRichard Henderson d->addr = addr; 6517b7d00e0SRichard Henderson d->idxmap = idxmap; 6527b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6537b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6547b7d00e0SRichard Henderson } 6557b7d00e0SRichard Henderson } 6567b7d00e0SRichard Henderson } 6577b7d00e0SRichard Henderson 6587b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 659d9bb58e5SYang Zhong } 660d9bb58e5SYang Zhong 661732d5487SAnton Johansson void tlb_flush_page_all_cpus(CPUState *src, vaddr addr) 662f8144c6cSRichard Henderson { 663f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 664f8144c6cSRichard Henderson } 665f8144c6cSRichard Henderson 666d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 667732d5487SAnton Johansson vaddr addr, 668d9bb58e5SYang Zhong uint16_t idxmap) 669d9bb58e5SYang Zhong { 6708c605cf1SAnton Johansson tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap); 671d9bb58e5SYang Zhong 672d9bb58e5SYang Zhong /* This should already be page aligned */ 6737b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 674d9bb58e5SYang Zhong 6757b7d00e0SRichard Henderson /* 6767b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6777b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6787b7d00e0SRichard Henderson */ 6797b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6807b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6817b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6827b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6837b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6847b7d00e0SRichard Henderson } else { 6857b7d00e0SRichard Henderson CPUState *dst_cpu; 6867b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6877b7d00e0SRichard Henderson 6887b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6897b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6907b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6917b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6927b7d00e0SRichard Henderson d->addr = addr; 6937b7d00e0SRichard Henderson d->idxmap = idxmap; 6947b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6957b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6967b7d00e0SRichard Henderson } 6977b7d00e0SRichard Henderson } 6987b7d00e0SRichard Henderson 6997b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 7007b7d00e0SRichard Henderson d->addr = addr; 7017b7d00e0SRichard Henderson d->idxmap = idxmap; 7027b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 7037b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 7047b7d00e0SRichard Henderson } 705d9bb58e5SYang Zhong } 706d9bb58e5SYang Zhong 707732d5487SAnton Johansson void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) 708d9bb58e5SYang Zhong { 709f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 710d9bb58e5SYang Zhong } 711d9bb58e5SYang Zhong 7123c4ddec1SRichard Henderson static void tlb_flush_range_locked(CPUArchState *env, int midx, 713732d5487SAnton Johansson vaddr addr, vaddr len, 7143c4ddec1SRichard Henderson unsigned bits) 7153ab6e68cSRichard Henderson { 7163ab6e68cSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[midx]; 7173ab6e68cSRichard Henderson CPUTLBDescFast *f = &env_tlb(env)->f[midx]; 718732d5487SAnton Johansson vaddr mask = MAKE_64BIT_MASK(0, bits); 7193ab6e68cSRichard Henderson 7203ab6e68cSRichard Henderson /* 7213ab6e68cSRichard Henderson * If @bits is smaller than the tlb size, there may be multiple entries 7223ab6e68cSRichard Henderson * within the TLB; otherwise all addresses that match under @mask hit 7233ab6e68cSRichard Henderson * the same TLB entry. 7243ab6e68cSRichard Henderson * TODO: Perhaps allow bits to be a few bits less than the size. 7253ab6e68cSRichard Henderson * For now, just flush the entire TLB. 7263c4ddec1SRichard Henderson * 7273c4ddec1SRichard Henderson * If @len is larger than the tlb size, then it will take longer to 7283c4ddec1SRichard Henderson * test all of the entries in the TLB than it will to flush it all. 7293ab6e68cSRichard Henderson */ 7303c4ddec1SRichard Henderson if (mask < f->mask || len > f->mask) { 7313ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7328c605cf1SAnton Johansson "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n", 7333c4ddec1SRichard Henderson midx, addr, mask, len); 7343ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7353ab6e68cSRichard Henderson return; 7363ab6e68cSRichard Henderson } 7373ab6e68cSRichard Henderson 7383c4ddec1SRichard Henderson /* 7393c4ddec1SRichard Henderson * Check if we need to flush due to large pages. 7403c4ddec1SRichard Henderson * Because large_page_mask contains all 1's from the msb, 7413c4ddec1SRichard Henderson * we only need to test the end of the range. 7423c4ddec1SRichard Henderson */ 7433c4ddec1SRichard Henderson if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { 7443ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7458c605cf1SAnton Johansson "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n", 7463ab6e68cSRichard Henderson midx, d->large_page_addr, d->large_page_mask); 7473ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7483ab6e68cSRichard Henderson return; 7493ab6e68cSRichard Henderson } 7503ab6e68cSRichard Henderson 751732d5487SAnton Johansson for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) { 752732d5487SAnton Johansson vaddr page = addr + i; 7533c4ddec1SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, midx, page); 7543c4ddec1SRichard Henderson 7553c4ddec1SRichard Henderson if (tlb_flush_entry_mask_locked(entry, page, mask)) { 7563ab6e68cSRichard Henderson tlb_n_used_entries_dec(env, midx); 7573ab6e68cSRichard Henderson } 7583ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); 7593ab6e68cSRichard Henderson } 7603c4ddec1SRichard Henderson } 7613ab6e68cSRichard Henderson 7623ab6e68cSRichard Henderson typedef struct { 763732d5487SAnton Johansson vaddr addr; 764732d5487SAnton Johansson vaddr len; 7653ab6e68cSRichard Henderson uint16_t idxmap; 7663ab6e68cSRichard Henderson uint16_t bits; 7673960a59fSRichard Henderson } TLBFlushRangeData; 7683ab6e68cSRichard Henderson 7696be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, 7703960a59fSRichard Henderson TLBFlushRangeData d) 7713ab6e68cSRichard Henderson { 7723ab6e68cSRichard Henderson CPUArchState *env = cpu->env_ptr; 7733ab6e68cSRichard Henderson int mmu_idx; 7743ab6e68cSRichard Henderson 7753ab6e68cSRichard Henderson assert_cpu_is_self(cpu); 7763ab6e68cSRichard Henderson 7778c605cf1SAnton Johansson tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n", 7783c4ddec1SRichard Henderson d.addr, d.bits, d.len, d.idxmap); 7793ab6e68cSRichard Henderson 7803ab6e68cSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 7813ab6e68cSRichard Henderson for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 7823ab6e68cSRichard Henderson if ((d.idxmap >> mmu_idx) & 1) { 7833c4ddec1SRichard Henderson tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); 7843ab6e68cSRichard Henderson } 7853ab6e68cSRichard Henderson } 7863ab6e68cSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 7873ab6e68cSRichard Henderson 788cfc2a2d6SIdan Horowitz /* 789cfc2a2d6SIdan Horowitz * If the length is larger than the jump cache size, then it will take 790cfc2a2d6SIdan Horowitz * longer to clear each entry individually than it will to clear it all. 791cfc2a2d6SIdan Horowitz */ 792cfc2a2d6SIdan Horowitz if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { 793a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 794cfc2a2d6SIdan Horowitz return; 795cfc2a2d6SIdan Horowitz } 796cfc2a2d6SIdan Horowitz 7971d41a79bSRichard Henderson /* 7981d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 7991d41a79bSRichard Henderson * overlap the flushed pages, which includes the previous. 8001d41a79bSRichard Henderson */ 8011d41a79bSRichard Henderson d.addr -= TARGET_PAGE_SIZE; 802732d5487SAnton Johansson for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { 8031d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, d.addr); 8041d41a79bSRichard Henderson d.addr += TARGET_PAGE_SIZE; 8053c4ddec1SRichard Henderson } 8063ab6e68cSRichard Henderson } 8073ab6e68cSRichard Henderson 808206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, 8093ab6e68cSRichard Henderson run_on_cpu_data data) 8103ab6e68cSRichard Henderson { 8113960a59fSRichard Henderson TLBFlushRangeData *d = data.host_ptr; 8126be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, *d); 8133ab6e68cSRichard Henderson g_free(d); 8143ab6e68cSRichard Henderson } 8153ab6e68cSRichard Henderson 816732d5487SAnton Johansson void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, 817732d5487SAnton Johansson vaddr len, uint16_t idxmap, 818e5b1921bSRichard Henderson unsigned bits) 8193ab6e68cSRichard Henderson { 8203960a59fSRichard Henderson TLBFlushRangeData d; 8213ab6e68cSRichard Henderson 822e5b1921bSRichard Henderson /* 823e5b1921bSRichard Henderson * If all bits are significant, and len is small, 824e5b1921bSRichard Henderson * this devolves to tlb_flush_page. 825e5b1921bSRichard Henderson */ 826e5b1921bSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8273ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 8283ab6e68cSRichard Henderson return; 8293ab6e68cSRichard Henderson } 8303ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8313ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8323ab6e68cSRichard Henderson tlb_flush_by_mmuidx(cpu, idxmap); 8333ab6e68cSRichard Henderson return; 8343ab6e68cSRichard Henderson } 8353ab6e68cSRichard Henderson 8363ab6e68cSRichard Henderson /* This should already be page aligned */ 8373ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 838e5b1921bSRichard Henderson d.len = len; 8393ab6e68cSRichard Henderson d.idxmap = idxmap; 8403ab6e68cSRichard Henderson d.bits = bits; 8413ab6e68cSRichard Henderson 8423ab6e68cSRichard Henderson if (qemu_cpu_is_self(cpu)) { 8436be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, d); 8443ab6e68cSRichard Henderson } else { 8453ab6e68cSRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 8463960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 847206a583dSRichard Henderson async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, 8483ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8493ab6e68cSRichard Henderson } 8503ab6e68cSRichard Henderson } 8513ab6e68cSRichard Henderson 852732d5487SAnton Johansson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, 853e5b1921bSRichard Henderson uint16_t idxmap, unsigned bits) 854e5b1921bSRichard Henderson { 855e5b1921bSRichard Henderson tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); 856e5b1921bSRichard Henderson } 857e5b1921bSRichard Henderson 858600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, 859732d5487SAnton Johansson vaddr addr, vaddr len, 860600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 8613ab6e68cSRichard Henderson { 8623960a59fSRichard Henderson TLBFlushRangeData d; 863d34e4d1aSRichard Henderson CPUState *dst_cpu; 8643ab6e68cSRichard Henderson 865600b819fSRichard Henderson /* 866600b819fSRichard Henderson * If all bits are significant, and len is small, 867600b819fSRichard Henderson * this devolves to tlb_flush_page. 868600b819fSRichard Henderson */ 869600b819fSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8703ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 8713ab6e68cSRichard Henderson return; 8723ab6e68cSRichard Henderson } 8733ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8743ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8753ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 8763ab6e68cSRichard Henderson return; 8773ab6e68cSRichard Henderson } 8783ab6e68cSRichard Henderson 8793ab6e68cSRichard Henderson /* This should already be page aligned */ 8803ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 881600b819fSRichard Henderson d.len = len; 8823ab6e68cSRichard Henderson d.idxmap = idxmap; 8833ab6e68cSRichard Henderson d.bits = bits; 8843ab6e68cSRichard Henderson 8853ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 8863ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 8873ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 8883960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 8893ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, 890206a583dSRichard Henderson tlb_flush_range_by_mmuidx_async_1, 8913ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8923ab6e68cSRichard Henderson } 8933ab6e68cSRichard Henderson } 8943ab6e68cSRichard Henderson 8956be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(src_cpu, d); 8963ab6e68cSRichard Henderson } 8973ab6e68cSRichard Henderson 898600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 899732d5487SAnton Johansson vaddr addr, uint16_t idxmap, 900732d5487SAnton Johansson unsigned bits) 901600b819fSRichard Henderson { 902600b819fSRichard Henderson tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, 903600b819fSRichard Henderson idxmap, bits); 904600b819fSRichard Henderson } 905600b819fSRichard Henderson 906c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 907732d5487SAnton Johansson vaddr addr, 908732d5487SAnton Johansson vaddr len, 9093ab6e68cSRichard Henderson uint16_t idxmap, 9103ab6e68cSRichard Henderson unsigned bits) 9113ab6e68cSRichard Henderson { 912d34e4d1aSRichard Henderson TLBFlushRangeData d, *p; 913d34e4d1aSRichard Henderson CPUState *dst_cpu; 9143ab6e68cSRichard Henderson 915c13b27d8SRichard Henderson /* 916c13b27d8SRichard Henderson * If all bits are significant, and len is small, 917c13b27d8SRichard Henderson * this devolves to tlb_flush_page. 918c13b27d8SRichard Henderson */ 919c13b27d8SRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 9203ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 9213ab6e68cSRichard Henderson return; 9223ab6e68cSRichard Henderson } 9233ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 9243ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 9253ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 9263ab6e68cSRichard Henderson return; 9273ab6e68cSRichard Henderson } 9283ab6e68cSRichard Henderson 9293ab6e68cSRichard Henderson /* This should already be page aligned */ 9303ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 931c13b27d8SRichard Henderson d.len = len; 9323ab6e68cSRichard Henderson d.idxmap = idxmap; 9333ab6e68cSRichard Henderson d.bits = bits; 9343ab6e68cSRichard Henderson 9353ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 9363ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 9373ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 9386d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 939206a583dSRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, 9403ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9413ab6e68cSRichard Henderson } 9423ab6e68cSRichard Henderson } 9433ab6e68cSRichard Henderson 9446d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 945206a583dSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, 9463ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9473ab6e68cSRichard Henderson } 9483ab6e68cSRichard Henderson 949c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 950732d5487SAnton Johansson vaddr addr, 951c13b27d8SRichard Henderson uint16_t idxmap, 952c13b27d8SRichard Henderson unsigned bits) 953c13b27d8SRichard Henderson { 954c13b27d8SRichard Henderson tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, 955c13b27d8SRichard Henderson idxmap, bits); 956c13b27d8SRichard Henderson } 957c13b27d8SRichard Henderson 958d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 959d9bb58e5SYang Zhong can be detected */ 960d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 961d9bb58e5SYang Zhong { 96293b99616SRichard Henderson cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, 96393b99616SRichard Henderson TARGET_PAGE_SIZE, 964d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 965d9bb58e5SYang Zhong } 966d9bb58e5SYang Zhong 967d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 968d9bb58e5SYang Zhong tested for self modifying code */ 969d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 970d9bb58e5SYang Zhong { 971d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 972d9bb58e5SYang Zhong } 973d9bb58e5SYang Zhong 974d9bb58e5SYang Zhong 975d9bb58e5SYang Zhong /* 976d9bb58e5SYang Zhong * Dirty write flag handling 977d9bb58e5SYang Zhong * 978d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 979d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 980d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 981d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 982d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 983d9bb58e5SYang Zhong * generated code. 984d9bb58e5SYang Zhong * 98571aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 986d73415a3SStefan Hajnoczi * te->addr_write with qatomic_set. We don't need to worry about this for 98771aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 988d9bb58e5SYang Zhong * 98953d28455SRichard Henderson * Called with tlb_c.lock held. 990d9bb58e5SYang Zhong */ 99171aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 99271aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 993d9bb58e5SYang Zhong { 994d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 995d9bb58e5SYang Zhong 9967b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 9977b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 998d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 999d9bb58e5SYang Zhong addr += tlb_entry->addend; 1000d9bb58e5SYang Zhong if ((addr - start) < length) { 1001238f4380SRichard Henderson #if TARGET_LONG_BITS == 32 1002238f4380SRichard Henderson uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write; 1003238f4380SRichard Henderson ptr_write += HOST_BIG_ENDIAN; 1004238f4380SRichard Henderson qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY); 1005238f4380SRichard Henderson #elif TCG_OVERSIZED_GUEST 100671aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 1007d9bb58e5SYang Zhong #else 1008d73415a3SStefan Hajnoczi qatomic_set(&tlb_entry->addr_write, 100971aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 1010d9bb58e5SYang Zhong #endif 1011d9bb58e5SYang Zhong } 101271aec354SEmilio G. Cota } 101371aec354SEmilio G. Cota } 101471aec354SEmilio G. Cota 101571aec354SEmilio G. Cota /* 101653d28455SRichard Henderson * Called with tlb_c.lock held. 101771aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 101871aec354SEmilio G. Cota */ 101971aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 102071aec354SEmilio G. Cota { 102171aec354SEmilio G. Cota *d = *s; 102271aec354SEmilio G. Cota } 1023d9bb58e5SYang Zhong 1024d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 102571aec354SEmilio G. Cota * the target vCPU). 102653d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 102771aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 1028d9bb58e5SYang Zhong */ 1029d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 1030d9bb58e5SYang Zhong { 1031d9bb58e5SYang Zhong CPUArchState *env; 1032d9bb58e5SYang Zhong 1033d9bb58e5SYang Zhong int mmu_idx; 1034d9bb58e5SYang Zhong 1035d9bb58e5SYang Zhong env = cpu->env_ptr; 1036a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1037d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1038d9bb58e5SYang Zhong unsigned int i; 1039722a1c1eSRichard Henderson unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 1040d9bb58e5SYang Zhong 104186e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 1042a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 1043a40ec84eSRichard Henderson start1, length); 1044d9bb58e5SYang Zhong } 1045d9bb58e5SYang Zhong 1046d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 1047a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 1048a40ec84eSRichard Henderson start1, length); 1049d9bb58e5SYang Zhong } 1050d9bb58e5SYang Zhong } 1051a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1052d9bb58e5SYang Zhong } 1053d9bb58e5SYang Zhong 105453d28455SRichard Henderson /* Called with tlb_c.lock held */ 105571aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 1056732d5487SAnton Johansson vaddr addr) 1057d9bb58e5SYang Zhong { 1058732d5487SAnton Johansson if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) { 1059732d5487SAnton Johansson tlb_entry->addr_write = addr; 1060d9bb58e5SYang Zhong } 1061d9bb58e5SYang Zhong } 1062d9bb58e5SYang Zhong 1063d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 1064d9bb58e5SYang Zhong so that it is no longer dirty */ 1065732d5487SAnton Johansson void tlb_set_dirty(CPUState *cpu, vaddr addr) 1066d9bb58e5SYang Zhong { 1067d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1068d9bb58e5SYang Zhong int mmu_idx; 1069d9bb58e5SYang Zhong 1070d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 1071d9bb58e5SYang Zhong 1072732d5487SAnton Johansson addr &= TARGET_PAGE_MASK; 1073a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1074d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1075732d5487SAnton Johansson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, addr), addr); 1076d9bb58e5SYang Zhong } 1077d9bb58e5SYang Zhong 1078d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1079d9bb58e5SYang Zhong int k; 1080d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 1081732d5487SAnton Johansson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], addr); 1082d9bb58e5SYang Zhong } 1083d9bb58e5SYang Zhong } 1084a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1085d9bb58e5SYang Zhong } 1086d9bb58e5SYang Zhong 1087d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 1088d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 10891308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 1090732d5487SAnton Johansson vaddr addr, uint64_t size) 1091d9bb58e5SYang Zhong { 1092732d5487SAnton Johansson vaddr lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 1093732d5487SAnton Johansson vaddr lp_mask = ~(size - 1); 1094d9bb58e5SYang Zhong 1095732d5487SAnton Johansson if (lp_addr == (vaddr)-1) { 10961308e026SRichard Henderson /* No previous large page. */ 1097732d5487SAnton Johansson lp_addr = addr; 10981308e026SRichard Henderson } else { 1099d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 11001308e026SRichard Henderson This is a compromise between unnecessary flushes and 11011308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 1102a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 1103732d5487SAnton Johansson while (((lp_addr ^ addr) & lp_mask) != 0) { 11041308e026SRichard Henderson lp_mask <<= 1; 1105d9bb58e5SYang Zhong } 11061308e026SRichard Henderson } 1107a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 1108a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 1109d9bb58e5SYang Zhong } 1110d9bb58e5SYang Zhong 111158e8f1f6SRichard Henderson static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent, 1112d712b116SAnton Johansson vaddr address, int flags, 111358e8f1f6SRichard Henderson MMUAccessType access_type, bool enable) 111458e8f1f6SRichard Henderson { 111558e8f1f6SRichard Henderson if (enable) { 111658e8f1f6SRichard Henderson address |= flags & TLB_FLAGS_MASK; 111758e8f1f6SRichard Henderson flags &= TLB_SLOW_FLAGS_MASK; 111858e8f1f6SRichard Henderson if (flags) { 111958e8f1f6SRichard Henderson address |= TLB_FORCE_SLOW; 112058e8f1f6SRichard Henderson } 112158e8f1f6SRichard Henderson } else { 112258e8f1f6SRichard Henderson address = -1; 112358e8f1f6SRichard Henderson flags = 0; 112458e8f1f6SRichard Henderson } 112558e8f1f6SRichard Henderson ent->addr_idx[access_type] = address; 112658e8f1f6SRichard Henderson full->slow_flags[access_type] = flags; 112758e8f1f6SRichard Henderson } 112858e8f1f6SRichard Henderson 112940473689SRichard Henderson /* 113040473689SRichard Henderson * Add a new TLB entry. At most one entry for a given virtual address 1131d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1132d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 1133d9bb58e5SYang Zhong * 1134d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 1135d9bb58e5SYang Zhong * critical section. 1136d9bb58e5SYang Zhong */ 113740473689SRichard Henderson void tlb_set_page_full(CPUState *cpu, int mmu_idx, 1138732d5487SAnton Johansson vaddr addr, CPUTLBEntryFull *full) 1139d9bb58e5SYang Zhong { 1140d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1141a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 1142a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1143d9bb58e5SYang Zhong MemoryRegionSection *section; 114458e8f1f6SRichard Henderson unsigned int index, read_flags, write_flags; 1145d9bb58e5SYang Zhong uintptr_t addend; 114668fea038SRichard Henderson CPUTLBEntry *te, tn; 114755df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 1148732d5487SAnton Johansson vaddr addr_page; 114940473689SRichard Henderson int asidx, wp_flags, prot; 11508f5db641SRichard Henderson bool is_ram, is_romd; 1151d9bb58e5SYang Zhong 1152d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 115355df6fcfSPeter Maydell 115440473689SRichard Henderson if (full->lg_page_size <= TARGET_PAGE_BITS) { 115555df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 115655df6fcfSPeter Maydell } else { 115740473689SRichard Henderson sz = (hwaddr)1 << full->lg_page_size; 1158732d5487SAnton Johansson tlb_add_large_page(env, mmu_idx, addr, sz); 115955df6fcfSPeter Maydell } 1160732d5487SAnton Johansson addr_page = addr & TARGET_PAGE_MASK; 116140473689SRichard Henderson paddr_page = full->phys_addr & TARGET_PAGE_MASK; 116255df6fcfSPeter Maydell 116340473689SRichard Henderson prot = full->prot; 116440473689SRichard Henderson asidx = cpu_asidx_from_attrs(cpu, full->attrs); 116555df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 116640473689SRichard Henderson &xlat, &sz, full->attrs, &prot); 1167d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 1168d9bb58e5SYang Zhong 11698c605cf1SAnton Johansson tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx 1170d9bb58e5SYang Zhong " prot=%x idx=%d\n", 1171732d5487SAnton Johansson addr, full->phys_addr, prot, mmu_idx); 1172d9bb58e5SYang Zhong 117358e8f1f6SRichard Henderson read_flags = 0; 117440473689SRichard Henderson if (full->lg_page_size < TARGET_PAGE_BITS) { 117530d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 117658e8f1f6SRichard Henderson read_flags |= TLB_INVALID_MASK; 117755df6fcfSPeter Maydell } 117840473689SRichard Henderson if (full->attrs.byte_swap) { 117958e8f1f6SRichard Henderson read_flags |= TLB_BSWAP; 1180a26fc6f5STony Nguyen } 11818f5db641SRichard Henderson 11828f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 11838f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 11848f5db641SRichard Henderson 11858f5db641SRichard Henderson if (is_ram || is_romd) { 11868f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 1187d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 11888f5db641SRichard Henderson } else { 11898f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 11908f5db641SRichard Henderson addend = 0; 1191d9bb58e5SYang Zhong } 1192d9bb58e5SYang Zhong 119358e8f1f6SRichard Henderson write_flags = read_flags; 11948f5db641SRichard Henderson if (is_ram) { 11958f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 1196*dff1ab68SLIU Zhiwei assert(!(iotlb & ~TARGET_PAGE_MASK)); 11978f5db641SRichard Henderson /* 11988f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 11998f5db641SRichard Henderson * the page is actually writable. 12008f5db641SRichard Henderson */ 12018f5db641SRichard Henderson if (prot & PAGE_WRITE) { 12028f5db641SRichard Henderson if (section->readonly) { 120358e8f1f6SRichard Henderson write_flags |= TLB_DISCARD_WRITE; 12048f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 120558e8f1f6SRichard Henderson write_flags |= TLB_NOTDIRTY; 12068f5db641SRichard Henderson } 12078f5db641SRichard Henderson } 12088f5db641SRichard Henderson } else { 12098f5db641SRichard Henderson /* I/O or ROMD */ 12108f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 12118f5db641SRichard Henderson /* 12128f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 12138f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 12148f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 12158f5db641SRichard Henderson */ 121658e8f1f6SRichard Henderson write_flags |= TLB_MMIO; 12178f5db641SRichard Henderson if (!is_romd) { 121858e8f1f6SRichard Henderson read_flags = write_flags; 12198f5db641SRichard Henderson } 12208f5db641SRichard Henderson } 12218f5db641SRichard Henderson 1222732d5487SAnton Johansson wp_flags = cpu_watchpoint_address_matches(cpu, addr_page, 122350b107c5SRichard Henderson TARGET_PAGE_SIZE); 1224d9bb58e5SYang Zhong 1225732d5487SAnton Johansson index = tlb_index(env, mmu_idx, addr_page); 1226732d5487SAnton Johansson te = tlb_entry(env, mmu_idx, addr_page); 1227d9bb58e5SYang Zhong 122868fea038SRichard Henderson /* 122971aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 123071aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 123171aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 123271aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 123371aec354SEmilio G. Cota * is unlikely to be contended. 123471aec354SEmilio G. Cota */ 1235a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 123671aec354SEmilio G. Cota 12373d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 1238a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 12393d1523ceSRichard Henderson 124071aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 1241732d5487SAnton Johansson tlb_flush_vtlb_page_locked(env, mmu_idx, addr_page); 124271aec354SEmilio G. Cota 124371aec354SEmilio G. Cota /* 124468fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 124568fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 124668fea038SRichard Henderson */ 1247732d5487SAnton Johansson if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) { 1248a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1249a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 125068fea038SRichard Henderson 125168fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 125271aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 125325d3ec58SRichard Henderson desc->vfulltlb[vidx] = desc->fulltlb[index]; 125486e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 125568fea038SRichard Henderson } 1256d9bb58e5SYang Zhong 1257d9bb58e5SYang Zhong /* refill the tlb */ 1258ace41090SPeter Maydell /* 1259*dff1ab68SLIU Zhiwei * When memory region is ram, iotlb contains a TARGET_PAGE_BITS 1260*dff1ab68SLIU Zhiwei * aligned ram_addr_t of the page base of the target RAM. 1261*dff1ab68SLIU Zhiwei * Otherwise, iotlb contains 1262*dff1ab68SLIU Zhiwei * - a physical section number in the lower TARGET_PAGE_BITS 1263*dff1ab68SLIU Zhiwei * - the offset within section->mr of the page base (I/O, ROMD) with the 1264*dff1ab68SLIU Zhiwei * TARGET_PAGE_BITS masked off. 126558e8f1f6SRichard Henderson * We subtract addr_page (which is page aligned and thus won't 1266ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 1267ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 1268ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 1269ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 1270ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 1271ace41090SPeter Maydell */ 127240473689SRichard Henderson desc->fulltlb[index] = *full; 127358e8f1f6SRichard Henderson full = &desc->fulltlb[index]; 127458e8f1f6SRichard Henderson full->xlat_section = iotlb - addr_page; 127558e8f1f6SRichard Henderson full->phys_addr = paddr_page; 1276d9bb58e5SYang Zhong 1277d9bb58e5SYang Zhong /* Now calculate the new entry */ 1278732d5487SAnton Johansson tn.addend = addend - addr_page; 127958e8f1f6SRichard Henderson 128058e8f1f6SRichard Henderson tlb_set_compare(full, &tn, addr_page, read_flags, 128158e8f1f6SRichard Henderson MMU_INST_FETCH, prot & PAGE_EXEC); 128258e8f1f6SRichard Henderson 128350b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 128458e8f1f6SRichard Henderson read_flags |= TLB_WATCHPOINT; 128550b107c5SRichard Henderson } 128658e8f1f6SRichard Henderson tlb_set_compare(full, &tn, addr_page, read_flags, 128758e8f1f6SRichard Henderson MMU_DATA_LOAD, prot & PAGE_READ); 1288d9bb58e5SYang Zhong 1289f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 129058e8f1f6SRichard Henderson write_flags |= TLB_INVALID_MASK; 1291f52bfb12SDavid Hildenbrand } 129250b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 129358e8f1f6SRichard Henderson write_flags |= TLB_WATCHPOINT; 129450b107c5SRichard Henderson } 129558e8f1f6SRichard Henderson tlb_set_compare(full, &tn, addr_page, write_flags, 129658e8f1f6SRichard Henderson MMU_DATA_STORE, prot & PAGE_WRITE); 1297d9bb58e5SYang Zhong 129871aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 129986e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 1300a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 1301d9bb58e5SYang Zhong } 1302d9bb58e5SYang Zhong 1303732d5487SAnton Johansson void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, 130440473689SRichard Henderson hwaddr paddr, MemTxAttrs attrs, int prot, 1305732d5487SAnton Johansson int mmu_idx, uint64_t size) 130640473689SRichard Henderson { 130740473689SRichard Henderson CPUTLBEntryFull full = { 130840473689SRichard Henderson .phys_addr = paddr, 130940473689SRichard Henderson .attrs = attrs, 131040473689SRichard Henderson .prot = prot, 131140473689SRichard Henderson .lg_page_size = ctz64(size) 131240473689SRichard Henderson }; 131340473689SRichard Henderson 131440473689SRichard Henderson assert(is_power_of_2(size)); 1315732d5487SAnton Johansson tlb_set_page_full(cpu, mmu_idx, addr, &full); 131640473689SRichard Henderson } 131740473689SRichard Henderson 1318732d5487SAnton Johansson void tlb_set_page(CPUState *cpu, vaddr addr, 1319d9bb58e5SYang Zhong hwaddr paddr, int prot, 1320732d5487SAnton Johansson int mmu_idx, uint64_t size) 1321d9bb58e5SYang Zhong { 1322732d5487SAnton Johansson tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED, 1323d9bb58e5SYang Zhong prot, mmu_idx, size); 1324d9bb58e5SYang Zhong } 1325d9bb58e5SYang Zhong 1326c319dc13SRichard Henderson /* 1327c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1328c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1329c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1330c319dc13SRichard Henderson */ 1331732d5487SAnton Johansson static void tlb_fill(CPUState *cpu, vaddr addr, int size, 1332c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1333c319dc13SRichard Henderson { 1334c319dc13SRichard Henderson bool ok; 1335c319dc13SRichard Henderson 1336c319dc13SRichard Henderson /* 1337c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1338c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1339c319dc13SRichard Henderson */ 13408810ee2aSAlex Bennée ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, 1341e124536fSEduardo Habkost access_type, mmu_idx, false, retaddr); 1342c319dc13SRichard Henderson assert(ok); 1343c319dc13SRichard Henderson } 1344c319dc13SRichard Henderson 134578271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 134678271684SClaudio Fontana MMUAccessType access_type, 134778271684SClaudio Fontana int mmu_idx, uintptr_t retaddr) 134878271684SClaudio Fontana { 13498810ee2aSAlex Bennée cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, 13508810ee2aSAlex Bennée mmu_idx, retaddr); 135178271684SClaudio Fontana } 135278271684SClaudio Fontana 135378271684SClaudio Fontana static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, 135478271684SClaudio Fontana vaddr addr, unsigned size, 135578271684SClaudio Fontana MMUAccessType access_type, 135678271684SClaudio Fontana int mmu_idx, MemTxAttrs attrs, 135778271684SClaudio Fontana MemTxResult response, 135878271684SClaudio Fontana uintptr_t retaddr) 135978271684SClaudio Fontana { 136078271684SClaudio Fontana CPUClass *cc = CPU_GET_CLASS(cpu); 136178271684SClaudio Fontana 136278271684SClaudio Fontana if (!cpu->ignore_memory_transaction_failures && 136378271684SClaudio Fontana cc->tcg_ops->do_transaction_failed) { 136478271684SClaudio Fontana cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, 136578271684SClaudio Fontana access_type, mmu_idx, attrs, 136678271684SClaudio Fontana response, retaddr); 136778271684SClaudio Fontana } 136878271684SClaudio Fontana } 136978271684SClaudio Fontana 1370c30d0b86SMikhail Tyutin /* 1371c30d0b86SMikhail Tyutin * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin. 1372c30d0b86SMikhail Tyutin * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match 1373c30d0b86SMikhail Tyutin * because of the side effect of io_writex changing memory layout. 1374c30d0b86SMikhail Tyutin */ 1375c30d0b86SMikhail Tyutin static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section, 1376c30d0b86SMikhail Tyutin hwaddr mr_offset) 1377c30d0b86SMikhail Tyutin { 1378c30d0b86SMikhail Tyutin #ifdef CONFIG_PLUGIN 1379c30d0b86SMikhail Tyutin SavedIOTLB *saved = &cs->saved_iotlb; 1380c30d0b86SMikhail Tyutin saved->section = section; 1381c30d0b86SMikhail Tyutin saved->mr_offset = mr_offset; 1382c30d0b86SMikhail Tyutin #endif 1383c30d0b86SMikhail Tyutin } 1384c30d0b86SMikhail Tyutin 138525d3ec58SRichard Henderson static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full, 1386732d5487SAnton Johansson int mmu_idx, vaddr addr, uintptr_t retaddr, 1387be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 1388d9bb58e5SYang Zhong { 138929a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 13902d54f194SPeter Maydell hwaddr mr_offset; 13912d54f194SPeter Maydell MemoryRegionSection *section; 13922d54f194SPeter Maydell MemoryRegion *mr; 1393d9bb58e5SYang Zhong uint64_t val; 139404e3aabdSPeter Maydell MemTxResult r; 1395d9bb58e5SYang Zhong 139625d3ec58SRichard Henderson section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 13972d54f194SPeter Maydell mr = section->mr; 139825d3ec58SRichard Henderson mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1399d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 140008565552SRichard Henderson if (!cpu->can_do_io) { 1401d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1402d9bb58e5SYang Zhong } 1403d9bb58e5SYang Zhong 1404c30d0b86SMikhail Tyutin /* 1405c30d0b86SMikhail Tyutin * The memory_region_dispatch may trigger a flush/resize 1406c30d0b86SMikhail Tyutin * so for plugins we save the iotlb_data just in case. 1407c30d0b86SMikhail Tyutin */ 1408c30d0b86SMikhail Tyutin save_iotlb_data(cpu, section, mr_offset); 1409c30d0b86SMikhail Tyutin 141061b59fb2SRichard Henderson { 141161b59fb2SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 141225d3ec58SRichard Henderson r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs); 141361b59fb2SRichard Henderson } 141461b59fb2SRichard Henderson 141504e3aabdSPeter Maydell if (r != MEMTX_OK) { 14162d54f194SPeter Maydell hwaddr physaddr = mr_offset + 14172d54f194SPeter Maydell section->offset_within_address_space - 14182d54f194SPeter Maydell section->offset_within_region; 14192d54f194SPeter Maydell 1420be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 142125d3ec58SRichard Henderson mmu_idx, full->attrs, r, retaddr); 142204e3aabdSPeter Maydell } 1423d9bb58e5SYang Zhong return val; 1424d9bb58e5SYang Zhong } 1425d9bb58e5SYang Zhong 142625d3ec58SRichard Henderson static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, 1427732d5487SAnton Johansson int mmu_idx, uint64_t val, vaddr addr, 1428be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 1429d9bb58e5SYang Zhong { 143029a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 14312d54f194SPeter Maydell hwaddr mr_offset; 14322d54f194SPeter Maydell MemoryRegionSection *section; 14332d54f194SPeter Maydell MemoryRegion *mr; 143404e3aabdSPeter Maydell MemTxResult r; 1435d9bb58e5SYang Zhong 143625d3ec58SRichard Henderson section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 14372d54f194SPeter Maydell mr = section->mr; 143825d3ec58SRichard Henderson mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 143908565552SRichard Henderson if (!cpu->can_do_io) { 1440d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1441d9bb58e5SYang Zhong } 1442d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1443d9bb58e5SYang Zhong 14442f3a57eeSAlex Bennée /* 14452f3a57eeSAlex Bennée * The memory_region_dispatch may trigger a flush/resize 14462f3a57eeSAlex Bennée * so for plugins we save the iotlb_data just in case. 14472f3a57eeSAlex Bennée */ 144837523ff7SRichard Henderson save_iotlb_data(cpu, section, mr_offset); 14492f3a57eeSAlex Bennée 145061b59fb2SRichard Henderson { 145161b59fb2SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 145225d3ec58SRichard Henderson r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs); 145361b59fb2SRichard Henderson } 145461b59fb2SRichard Henderson 145504e3aabdSPeter Maydell if (r != MEMTX_OK) { 14562d54f194SPeter Maydell hwaddr physaddr = mr_offset + 14572d54f194SPeter Maydell section->offset_within_address_space - 14582d54f194SPeter Maydell section->offset_within_region; 14592d54f194SPeter Maydell 1460be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 146125d3ec58SRichard Henderson MMU_DATA_STORE, mmu_idx, full->attrs, r, 1462be5c4787STony Nguyen retaddr); 146304e3aabdSPeter Maydell } 1464d9bb58e5SYang Zhong } 1465d9bb58e5SYang Zhong 1466d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1467d9bb58e5SYang Zhong back to the main tlb. */ 1468d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 1469732d5487SAnton Johansson MMUAccessType access_type, vaddr page) 1470d9bb58e5SYang Zhong { 1471d9bb58e5SYang Zhong size_t vidx; 147271aec354SEmilio G. Cota 147329a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1474d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1475a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 14769e39de98SAnton Johansson uint64_t cmp = tlb_read_idx(vtlb, access_type); 1477d9bb58e5SYang Zhong 1478d9bb58e5SYang Zhong if (cmp == page) { 1479d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1480a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1481d9bb58e5SYang Zhong 1482a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 148371aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 148471aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 148571aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1486a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1487d9bb58e5SYang Zhong 148825d3ec58SRichard Henderson CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 148925d3ec58SRichard Henderson CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx]; 149025d3ec58SRichard Henderson CPUTLBEntryFull tmpf; 149125d3ec58SRichard Henderson tmpf = *f1; *f1 = *f2; *f2 = tmpf; 1492d9bb58e5SYang Zhong return true; 1493d9bb58e5SYang Zhong } 1494d9bb58e5SYang Zhong } 1495d9bb58e5SYang Zhong return false; 1496d9bb58e5SYang Zhong } 1497d9bb58e5SYang Zhong 1498707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 149925d3ec58SRichard Henderson CPUTLBEntryFull *full, uintptr_t retaddr) 1500707526adSRichard Henderson { 150125d3ec58SRichard Henderson ram_addr_t ram_addr = mem_vaddr + full->xlat_section; 1502707526adSRichard Henderson 1503707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1504707526adSRichard Henderson 1505707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1506f349e92eSPhilippe Mathieu-Daudé tb_invalidate_phys_range_fast(ram_addr, size, retaddr); 1507707526adSRichard Henderson } 1508707526adSRichard Henderson 1509707526adSRichard Henderson /* 1510707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1511707526adSRichard Henderson * the notdirty callback faster. 1512707526adSRichard Henderson */ 1513707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1514707526adSRichard Henderson 1515707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1516707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1517707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1518707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1519707526adSRichard Henderson } 1520707526adSRichard Henderson } 1521707526adSRichard Henderson 15224f8f4127SAnton Johansson static int probe_access_internal(CPUArchState *env, vaddr addr, 1523069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1524069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1525af803a4fSRichard Henderson void **phost, CPUTLBEntryFull **pfull, 15266d03226bSAlex Bennée uintptr_t retaddr, bool check_mem_cbs) 1527d9bb58e5SYang Zhong { 1528383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1529383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 15309e39de98SAnton Johansson uint64_t tlb_addr = tlb_read_idx(entry, access_type); 15314f8f4127SAnton Johansson vaddr page_addr = addr & TARGET_PAGE_MASK; 153258e8f1f6SRichard Henderson int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW; 15336d03226bSAlex Bennée bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(env_cpu(env)); 153458e8f1f6SRichard Henderson CPUTLBEntryFull *full; 1535ca86cf32SDavid Hildenbrand 1536069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 15370b3c75adSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) { 1538069cfe77SRichard Henderson CPUState *cs = env_cpu(env); 1539069cfe77SRichard Henderson 15408810ee2aSAlex Bennée if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, 1541069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1542069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1543069cfe77SRichard Henderson *phost = NULL; 1544af803a4fSRichard Henderson *pfull = NULL; 1545069cfe77SRichard Henderson return TLB_INVALID_MASK; 1546069cfe77SRichard Henderson } 1547069cfe77SRichard Henderson 154803a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 1549af803a4fSRichard Henderson index = tlb_index(env, mmu_idx, addr); 155003a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1551c3c8bf57SRichard Henderson 1552c3c8bf57SRichard Henderson /* 1553c3c8bf57SRichard Henderson * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, 1554c3c8bf57SRichard Henderson * to force the next access through tlb_fill. We've just 1555c3c8bf57SRichard Henderson * called tlb_fill, so we know that this entry *is* valid. 1556c3c8bf57SRichard Henderson */ 1557c3c8bf57SRichard Henderson flags &= ~TLB_INVALID_MASK; 1558d9bb58e5SYang Zhong } 15590b3c75adSRichard Henderson tlb_addr = tlb_read_idx(entry, access_type); 156003a98189SDavid Hildenbrand } 1561c3c8bf57SRichard Henderson flags &= tlb_addr; 156203a98189SDavid Hildenbrand 156358e8f1f6SRichard Henderson *pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 156458e8f1f6SRichard Henderson flags |= full->slow_flags[access_type]; 1565af803a4fSRichard Henderson 1566069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 15676d03226bSAlex Bennée if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY)) 15686d03226bSAlex Bennée || 15696d03226bSAlex Bennée (access_type != MMU_INST_FETCH && force_mmio)) { 1570069cfe77SRichard Henderson *phost = NULL; 1571069cfe77SRichard Henderson return TLB_MMIO; 1572fef39ccdSDavid Hildenbrand } 1573fef39ccdSDavid Hildenbrand 1574069cfe77SRichard Henderson /* Everything else is RAM. */ 1575069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1576069cfe77SRichard Henderson return flags; 1577069cfe77SRichard Henderson } 1578069cfe77SRichard Henderson 15794f8f4127SAnton Johansson int probe_access_full(CPUArchState *env, vaddr addr, int size, 1580069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1581af803a4fSRichard Henderson bool nonfault, void **phost, CPUTLBEntryFull **pfull, 1582af803a4fSRichard Henderson uintptr_t retaddr) 1583069cfe77SRichard Henderson { 1584d507e6c5SRichard Henderson int flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 15856d03226bSAlex Bennée nonfault, phost, pfull, retaddr, true); 1586069cfe77SRichard Henderson 1587069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1588069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1589af803a4fSRichard Henderson notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr); 1590069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1591069cfe77SRichard Henderson } 1592069cfe77SRichard Henderson 1593069cfe77SRichard Henderson return flags; 1594069cfe77SRichard Henderson } 1595069cfe77SRichard Henderson 15966d03226bSAlex Bennée int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size, 15976d03226bSAlex Bennée MMUAccessType access_type, int mmu_idx, 15986d03226bSAlex Bennée void **phost, CPUTLBEntryFull **pfull) 15996d03226bSAlex Bennée { 16006d03226bSAlex Bennée void *discard_phost; 16016d03226bSAlex Bennée CPUTLBEntryFull *discard_tlb; 16026d03226bSAlex Bennée 16036d03226bSAlex Bennée /* privately handle users that don't need full results */ 16046d03226bSAlex Bennée phost = phost ? phost : &discard_phost; 16056d03226bSAlex Bennée pfull = pfull ? pfull : &discard_tlb; 16066d03226bSAlex Bennée 16076d03226bSAlex Bennée int flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 16086d03226bSAlex Bennée true, phost, pfull, 0, false); 16096d03226bSAlex Bennée 16106d03226bSAlex Bennée /* Handle clean RAM pages. */ 16116d03226bSAlex Bennée if (unlikely(flags & TLB_NOTDIRTY)) { 16126d03226bSAlex Bennée notdirty_write(env_cpu(env), addr, 1, *pfull, 0); 16136d03226bSAlex Bennée flags &= ~TLB_NOTDIRTY; 16146d03226bSAlex Bennée } 16156d03226bSAlex Bennée 16166d03226bSAlex Bennée return flags; 16176d03226bSAlex Bennée } 16186d03226bSAlex Bennée 16194f8f4127SAnton Johansson int probe_access_flags(CPUArchState *env, vaddr addr, int size, 1620af803a4fSRichard Henderson MMUAccessType access_type, int mmu_idx, 1621af803a4fSRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1622af803a4fSRichard Henderson { 1623af803a4fSRichard Henderson CPUTLBEntryFull *full; 16241770b2f2SDaniel Henrique Barboza int flags; 1625af803a4fSRichard Henderson 16261770b2f2SDaniel Henrique Barboza g_assert(-(addr | TARGET_PAGE_MASK) >= size); 16271770b2f2SDaniel Henrique Barboza 16281770b2f2SDaniel Henrique Barboza flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 16296d03226bSAlex Bennée nonfault, phost, &full, retaddr, true); 16301770b2f2SDaniel Henrique Barboza 16311770b2f2SDaniel Henrique Barboza /* Handle clean RAM pages. */ 16321770b2f2SDaniel Henrique Barboza if (unlikely(flags & TLB_NOTDIRTY)) { 16331770b2f2SDaniel Henrique Barboza notdirty_write(env_cpu(env), addr, 1, full, retaddr); 16341770b2f2SDaniel Henrique Barboza flags &= ~TLB_NOTDIRTY; 16351770b2f2SDaniel Henrique Barboza } 16361770b2f2SDaniel Henrique Barboza 16371770b2f2SDaniel Henrique Barboza return flags; 1638af803a4fSRichard Henderson } 1639af803a4fSRichard Henderson 16404f8f4127SAnton Johansson void *probe_access(CPUArchState *env, vaddr addr, int size, 1641069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1642069cfe77SRichard Henderson { 1643af803a4fSRichard Henderson CPUTLBEntryFull *full; 1644069cfe77SRichard Henderson void *host; 1645069cfe77SRichard Henderson int flags; 1646069cfe77SRichard Henderson 1647069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1648069cfe77SRichard Henderson 1649069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 16506d03226bSAlex Bennée false, &host, &full, retaddr, true); 1651069cfe77SRichard Henderson 1652069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1653069cfe77SRichard Henderson if (size == 0) { 165473bc0bd4SRichard Henderson return NULL; 165573bc0bd4SRichard Henderson } 165673bc0bd4SRichard Henderson 1657069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 165803a98189SDavid Hildenbrand /* Handle watchpoints. */ 1659069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1660069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1661069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 166203a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 166325d3ec58SRichard Henderson full->attrs, wp_access, retaddr); 1664d9bb58e5SYang Zhong } 1665fef39ccdSDavid Hildenbrand 166673bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1667069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 166825d3ec58SRichard Henderson notdirty_write(env_cpu(env), addr, 1, full, retaddr); 166973bc0bd4SRichard Henderson } 1670fef39ccdSDavid Hildenbrand } 1671fef39ccdSDavid Hildenbrand 1672069cfe77SRichard Henderson return host; 1673d9bb58e5SYang Zhong } 1674d9bb58e5SYang Zhong 16754811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 16764811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 16774811e909SRichard Henderson { 1678af803a4fSRichard Henderson CPUTLBEntryFull *full; 1679069cfe77SRichard Henderson void *host; 1680069cfe77SRichard Henderson int flags; 16814811e909SRichard Henderson 1682069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, 16836d03226bSAlex Bennée mmu_idx, true, &host, &full, 0, false); 1684069cfe77SRichard Henderson 1685069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1686069cfe77SRichard Henderson return flags ? NULL : host; 16874811e909SRichard Henderson } 16884811e909SRichard Henderson 16897e0d9973SRichard Henderson /* 16907e0d9973SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 16917e0d9973SRichard Henderson * 16927e0d9973SRichard Henderson * Return -1 if we can't translate and execute from an entire page 16937e0d9973SRichard Henderson * of RAM. This will force us to execute by loading and translating 16947e0d9973SRichard Henderson * one insn at a time, without caching. 16957e0d9973SRichard Henderson * 16967e0d9973SRichard Henderson * NOTE: This function will trigger an exception if the page is 16977e0d9973SRichard Henderson * not executable. 16987e0d9973SRichard Henderson */ 16994f8f4127SAnton Johansson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, 17007e0d9973SRichard Henderson void **hostp) 17017e0d9973SRichard Henderson { 1702af803a4fSRichard Henderson CPUTLBEntryFull *full; 17037e0d9973SRichard Henderson void *p; 17047e0d9973SRichard Henderson 17057e0d9973SRichard Henderson (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH, 17066d03226bSAlex Bennée cpu_mmu_index(env, true), false, 17076d03226bSAlex Bennée &p, &full, 0, false); 17087e0d9973SRichard Henderson if (p == NULL) { 17097e0d9973SRichard Henderson return -1; 17107e0d9973SRichard Henderson } 1711ac01ec6fSWeiwei Li 1712ac01ec6fSWeiwei Li if (full->lg_page_size < TARGET_PAGE_BITS) { 1713ac01ec6fSWeiwei Li return -1; 1714ac01ec6fSWeiwei Li } 1715ac01ec6fSWeiwei Li 17167e0d9973SRichard Henderson if (hostp) { 17177e0d9973SRichard Henderson *hostp = p; 17187e0d9973SRichard Henderson } 17197e0d9973SRichard Henderson return qemu_ram_addr_from_host_nofail(p); 17207e0d9973SRichard Henderson } 17217e0d9973SRichard Henderson 1722cdfac37bSRichard Henderson /* Load/store with atomicity primitives. */ 1723cdfac37bSRichard Henderson #include "ldst_atomicity.c.inc" 1724cdfac37bSRichard Henderson 1725235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1726235537faSAlex Bennée /* 1727235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1728235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1729235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1730235537faSAlex Bennée * checking the victim table. This is purely informational. 1731235537faSAlex Bennée * 17322f3a57eeSAlex Bennée * This almost never fails as the memory access being instrumented 17332f3a57eeSAlex Bennée * should have just filled the TLB. The one corner case is io_writex 17342f3a57eeSAlex Bennée * which can cause TLB flushes and potential resizing of the TLBs 1735570ef309SAlex Bennée * losing the information we need. In those cases we need to recover 173625d3ec58SRichard Henderson * data from a copy of the CPUTLBEntryFull. As long as this always occurs 1737570ef309SAlex Bennée * from the same thread (which a mem callback will be) this is safe. 1738235537faSAlex Bennée */ 1739235537faSAlex Bennée 1740732d5487SAnton Johansson bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx, 1741235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1742235537faSAlex Bennée { 1743235537faSAlex Bennée CPUArchState *env = cpu->env_ptr; 1744235537faSAlex Bennée CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1745235537faSAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 17469e39de98SAnton Johansson uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1747235537faSAlex Bennée 1748235537faSAlex Bennée if (likely(tlb_hit(tlb_addr, addr))) { 1749235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1750235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 175125d3ec58SRichard Henderson CPUTLBEntryFull *full; 175225d3ec58SRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1753235537faSAlex Bennée data->is_io = true; 175425d3ec58SRichard Henderson data->v.io.section = 175525d3ec58SRichard Henderson iotlb_to_section(cpu, full->xlat_section, full->attrs); 175625d3ec58SRichard Henderson data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1757235537faSAlex Bennée } else { 1758235537faSAlex Bennée data->is_io = false; 17592d932039SAlex Bennée data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1760235537faSAlex Bennée } 1761235537faSAlex Bennée return true; 17622f3a57eeSAlex Bennée } else { 17632f3a57eeSAlex Bennée SavedIOTLB *saved = &cpu->saved_iotlb; 17642f3a57eeSAlex Bennée data->is_io = true; 17652f3a57eeSAlex Bennée data->v.io.section = saved->section; 17662f3a57eeSAlex Bennée data->v.io.offset = saved->mr_offset; 17672f3a57eeSAlex Bennée return true; 1768235537faSAlex Bennée } 1769235537faSAlex Bennée } 1770235537faSAlex Bennée 1771235537faSAlex Bennée #endif 1772235537faSAlex Bennée 177308dff435SRichard Henderson /* 17748cfdacaaSRichard Henderson * Probe for a load/store operation. 17758cfdacaaSRichard Henderson * Return the host address and into @flags. 17768cfdacaaSRichard Henderson */ 17778cfdacaaSRichard Henderson 17788cfdacaaSRichard Henderson typedef struct MMULookupPageData { 17798cfdacaaSRichard Henderson CPUTLBEntryFull *full; 17808cfdacaaSRichard Henderson void *haddr; 1781fb2c53cbSAnton Johansson vaddr addr; 17828cfdacaaSRichard Henderson int flags; 17838cfdacaaSRichard Henderson int size; 17848cfdacaaSRichard Henderson } MMULookupPageData; 17858cfdacaaSRichard Henderson 17868cfdacaaSRichard Henderson typedef struct MMULookupLocals { 17878cfdacaaSRichard Henderson MMULookupPageData page[2]; 17888cfdacaaSRichard Henderson MemOp memop; 17898cfdacaaSRichard Henderson int mmu_idx; 17908cfdacaaSRichard Henderson } MMULookupLocals; 17918cfdacaaSRichard Henderson 17928cfdacaaSRichard Henderson /** 17938cfdacaaSRichard Henderson * mmu_lookup1: translate one page 17948cfdacaaSRichard Henderson * @env: cpu context 17958cfdacaaSRichard Henderson * @data: lookup parameters 17968cfdacaaSRichard Henderson * @mmu_idx: virtual address context 17978cfdacaaSRichard Henderson * @access_type: load/store/code 17988cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 17998cfdacaaSRichard Henderson * 18008cfdacaaSRichard Henderson * Resolve the translation for the one page at @data.addr, filling in 18018cfdacaaSRichard Henderson * the rest of @data with the results. If the translation fails, 18028cfdacaaSRichard Henderson * tlb_fill will longjmp out. Return true if the softmmu tlb for 18038cfdacaaSRichard Henderson * @mmu_idx may have resized. 18048cfdacaaSRichard Henderson */ 18058cfdacaaSRichard Henderson static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data, 18068cfdacaaSRichard Henderson int mmu_idx, MMUAccessType access_type, uintptr_t ra) 18078cfdacaaSRichard Henderson { 1808fb2c53cbSAnton Johansson vaddr addr = data->addr; 18098cfdacaaSRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 18108cfdacaaSRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 18119e39de98SAnton Johansson uint64_t tlb_addr = tlb_read_idx(entry, access_type); 18128cfdacaaSRichard Henderson bool maybe_resized = false; 181358e8f1f6SRichard Henderson CPUTLBEntryFull *full; 181458e8f1f6SRichard Henderson int flags; 18158cfdacaaSRichard Henderson 18168cfdacaaSRichard Henderson /* If the TLB entry is for a different page, reload and try again. */ 18178cfdacaaSRichard Henderson if (!tlb_hit(tlb_addr, addr)) { 18188cfdacaaSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, access_type, 18198cfdacaaSRichard Henderson addr & TARGET_PAGE_MASK)) { 18208cfdacaaSRichard Henderson tlb_fill(env_cpu(env), addr, data->size, access_type, mmu_idx, ra); 18218cfdacaaSRichard Henderson maybe_resized = true; 18228cfdacaaSRichard Henderson index = tlb_index(env, mmu_idx, addr); 18238cfdacaaSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 18248cfdacaaSRichard Henderson } 18258cfdacaaSRichard Henderson tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK; 18268cfdacaaSRichard Henderson } 18278cfdacaaSRichard Henderson 182858e8f1f6SRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 182958e8f1f6SRichard Henderson flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW); 183058e8f1f6SRichard Henderson flags |= full->slow_flags[access_type]; 183158e8f1f6SRichard Henderson 183258e8f1f6SRichard Henderson data->full = full; 183358e8f1f6SRichard Henderson data->flags = flags; 18348cfdacaaSRichard Henderson /* Compute haddr speculatively; depending on flags it might be invalid. */ 18358cfdacaaSRichard Henderson data->haddr = (void *)((uintptr_t)addr + entry->addend); 18368cfdacaaSRichard Henderson 18378cfdacaaSRichard Henderson return maybe_resized; 18388cfdacaaSRichard Henderson } 18398cfdacaaSRichard Henderson 18408cfdacaaSRichard Henderson /** 18418cfdacaaSRichard Henderson * mmu_watch_or_dirty 18428cfdacaaSRichard Henderson * @env: cpu context 18438cfdacaaSRichard Henderson * @data: lookup parameters 18448cfdacaaSRichard Henderson * @access_type: load/store/code 18458cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 18468cfdacaaSRichard Henderson * 18478cfdacaaSRichard Henderson * Trigger watchpoints for @data.addr:@data.size; 18488cfdacaaSRichard Henderson * record writes to protected clean pages. 18498cfdacaaSRichard Henderson */ 18508cfdacaaSRichard Henderson static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data, 18518cfdacaaSRichard Henderson MMUAccessType access_type, uintptr_t ra) 18528cfdacaaSRichard Henderson { 18538cfdacaaSRichard Henderson CPUTLBEntryFull *full = data->full; 1854fb2c53cbSAnton Johansson vaddr addr = data->addr; 18558cfdacaaSRichard Henderson int flags = data->flags; 18568cfdacaaSRichard Henderson int size = data->size; 18578cfdacaaSRichard Henderson 18588cfdacaaSRichard Henderson /* On watchpoint hit, this will longjmp out. */ 18598cfdacaaSRichard Henderson if (flags & TLB_WATCHPOINT) { 18608cfdacaaSRichard Henderson int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ; 18618cfdacaaSRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra); 18628cfdacaaSRichard Henderson flags &= ~TLB_WATCHPOINT; 18638cfdacaaSRichard Henderson } 18648cfdacaaSRichard Henderson 18658cfdacaaSRichard Henderson /* Note that notdirty is only set for writes. */ 18668cfdacaaSRichard Henderson if (flags & TLB_NOTDIRTY) { 18678cfdacaaSRichard Henderson notdirty_write(env_cpu(env), addr, size, full, ra); 18688cfdacaaSRichard Henderson flags &= ~TLB_NOTDIRTY; 18698cfdacaaSRichard Henderson } 18708cfdacaaSRichard Henderson data->flags = flags; 18718cfdacaaSRichard Henderson } 18728cfdacaaSRichard Henderson 18738cfdacaaSRichard Henderson /** 18748cfdacaaSRichard Henderson * mmu_lookup: translate page(s) 18758cfdacaaSRichard Henderson * @env: cpu context 18768cfdacaaSRichard Henderson * @addr: virtual address 18778cfdacaaSRichard Henderson * @oi: combined mmu_idx and MemOp 18788cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 18798cfdacaaSRichard Henderson * @access_type: load/store/code 18808cfdacaaSRichard Henderson * @l: output result 18818cfdacaaSRichard Henderson * 18828cfdacaaSRichard Henderson * Resolve the translation for the page(s) beginning at @addr, for MemOp.size 18838cfdacaaSRichard Henderson * bytes. Return true if the lookup crosses a page boundary. 18848cfdacaaSRichard Henderson */ 1885fb2c53cbSAnton Johansson static bool mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi, 18868cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType type, MMULookupLocals *l) 18878cfdacaaSRichard Henderson { 18888cfdacaaSRichard Henderson unsigned a_bits; 18898cfdacaaSRichard Henderson bool crosspage; 18908cfdacaaSRichard Henderson int flags; 18918cfdacaaSRichard Henderson 18928cfdacaaSRichard Henderson l->memop = get_memop(oi); 18938cfdacaaSRichard Henderson l->mmu_idx = get_mmuidx(oi); 18948cfdacaaSRichard Henderson 18958cfdacaaSRichard Henderson tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); 18968cfdacaaSRichard Henderson 18978cfdacaaSRichard Henderson /* Handle CPU specific unaligned behaviour */ 18988cfdacaaSRichard Henderson a_bits = get_alignment_bits(l->memop); 18998cfdacaaSRichard Henderson if (addr & ((1 << a_bits) - 1)) { 19008cfdacaaSRichard Henderson cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra); 19018cfdacaaSRichard Henderson } 19028cfdacaaSRichard Henderson 19038cfdacaaSRichard Henderson l->page[0].addr = addr; 19048cfdacaaSRichard Henderson l->page[0].size = memop_size(l->memop); 19058cfdacaaSRichard Henderson l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; 19068cfdacaaSRichard Henderson l->page[1].size = 0; 19078cfdacaaSRichard Henderson crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; 19088cfdacaaSRichard Henderson 19098cfdacaaSRichard Henderson if (likely(!crosspage)) { 19108cfdacaaSRichard Henderson mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra); 19118cfdacaaSRichard Henderson 19128cfdacaaSRichard Henderson flags = l->page[0].flags; 19138cfdacaaSRichard Henderson if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 19148cfdacaaSRichard Henderson mmu_watch_or_dirty(env, &l->page[0], type, ra); 19158cfdacaaSRichard Henderson } 19168cfdacaaSRichard Henderson if (unlikely(flags & TLB_BSWAP)) { 19178cfdacaaSRichard Henderson l->memop ^= MO_BSWAP; 19188cfdacaaSRichard Henderson } 19198cfdacaaSRichard Henderson } else { 19208cfdacaaSRichard Henderson /* Finish compute of page crossing. */ 19218cfdacaaSRichard Henderson int size0 = l->page[1].addr - addr; 19228cfdacaaSRichard Henderson l->page[1].size = l->page[0].size - size0; 19238cfdacaaSRichard Henderson l->page[0].size = size0; 19248cfdacaaSRichard Henderson 19258cfdacaaSRichard Henderson /* 19268cfdacaaSRichard Henderson * Lookup both pages, recognizing exceptions from either. If the 19278cfdacaaSRichard Henderson * second lookup potentially resized, refresh first CPUTLBEntryFull. 19288cfdacaaSRichard Henderson */ 19298cfdacaaSRichard Henderson mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra); 19308cfdacaaSRichard Henderson if (mmu_lookup1(env, &l->page[1], l->mmu_idx, type, ra)) { 19318cfdacaaSRichard Henderson uintptr_t index = tlb_index(env, l->mmu_idx, addr); 19328cfdacaaSRichard Henderson l->page[0].full = &env_tlb(env)->d[l->mmu_idx].fulltlb[index]; 19338cfdacaaSRichard Henderson } 19348cfdacaaSRichard Henderson 19358cfdacaaSRichard Henderson flags = l->page[0].flags | l->page[1].flags; 19368cfdacaaSRichard Henderson if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 19378cfdacaaSRichard Henderson mmu_watch_or_dirty(env, &l->page[0], type, ra); 19388cfdacaaSRichard Henderson mmu_watch_or_dirty(env, &l->page[1], type, ra); 19398cfdacaaSRichard Henderson } 19408cfdacaaSRichard Henderson 19418cfdacaaSRichard Henderson /* 19428cfdacaaSRichard Henderson * Since target/sparc is the only user of TLB_BSWAP, and all 19438cfdacaaSRichard Henderson * Sparc accesses are aligned, any treatment across two pages 19448cfdacaaSRichard Henderson * would be arbitrary. Refuse it until there's a use. 19458cfdacaaSRichard Henderson */ 19468cfdacaaSRichard Henderson tcg_debug_assert((flags & TLB_BSWAP) == 0); 19478cfdacaaSRichard Henderson } 19488cfdacaaSRichard Henderson 19498cfdacaaSRichard Henderson return crosspage; 19508cfdacaaSRichard Henderson } 19518cfdacaaSRichard Henderson 19528cfdacaaSRichard Henderson /* 195308dff435SRichard Henderson * Probe for an atomic operation. Do not allow unaligned operations, 195408dff435SRichard Henderson * or io operations to proceed. Return the host address. 195508dff435SRichard Henderson */ 1956b0326eb9SAnton Johansson static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi, 1957b0326eb9SAnton Johansson int size, uintptr_t retaddr) 1958d9bb58e5SYang Zhong { 1959b826044fSRichard Henderson uintptr_t mmu_idx = get_mmuidx(oi); 196014776ab5STony Nguyen MemOp mop = get_memop(oi); 1961d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 196208dff435SRichard Henderson uintptr_t index; 196308dff435SRichard Henderson CPUTLBEntry *tlbe; 1964b0326eb9SAnton Johansson vaddr tlb_addr; 196534d49937SPeter Maydell void *hostaddr; 1966417aeaffSRichard Henderson CPUTLBEntryFull *full; 1967d9bb58e5SYang Zhong 1968b826044fSRichard Henderson tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1969b826044fSRichard Henderson 1970d9bb58e5SYang Zhong /* Adjust the given return address. */ 1971d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1972d9bb58e5SYang Zhong 1973d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1974d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1975d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 197629a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1977d9bb58e5SYang Zhong mmu_idx, retaddr); 1978d9bb58e5SYang Zhong } 1979d9bb58e5SYang Zhong 1980d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 198108dff435SRichard Henderson if (unlikely(addr & (size - 1))) { 1982d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1983d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1984d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1985d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1986d9bb58e5SYang Zhong goto stop_the_world; 1987d9bb58e5SYang Zhong } 1988d9bb58e5SYang Zhong 198908dff435SRichard Henderson index = tlb_index(env, mmu_idx, addr); 199008dff435SRichard Henderson tlbe = tlb_entry(env, mmu_idx, addr); 199108dff435SRichard Henderson 1992d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 199308dff435SRichard Henderson tlb_addr = tlb_addr_write(tlbe); 1994334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 19950b3c75adSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE, 19960b3c75adSRichard Henderson addr & TARGET_PAGE_MASK)) { 199708dff435SRichard Henderson tlb_fill(env_cpu(env), addr, size, 199808dff435SRichard Henderson MMU_DATA_STORE, mmu_idx, retaddr); 19996d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 20006d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 2001d9bb58e5SYang Zhong } 2002403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 2003d9bb58e5SYang Zhong } 2004d9bb58e5SYang Zhong 2005417aeaffSRichard Henderson /* 2006417aeaffSRichard Henderson * Let the guest notice RMW on a write-only page. 2007417aeaffSRichard Henderson * We have just verified that the page is writable. 2008417aeaffSRichard Henderson * Subpage lookups may have left TLB_INVALID_MASK set, 2009417aeaffSRichard Henderson * but addr_read will only be -1 if PAGE_READ was unset. 2010417aeaffSRichard Henderson */ 2011417aeaffSRichard Henderson if (unlikely(tlbe->addr_read == -1)) { 20127bedee32SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 201308dff435SRichard Henderson /* 2014417aeaffSRichard Henderson * Since we don't support reads and writes to different 2015417aeaffSRichard Henderson * addresses, and we do have the proper page loaded for 2016417aeaffSRichard Henderson * write, this shouldn't ever return. But just in case, 2017417aeaffSRichard Henderson * handle via stop-the-world. 201808dff435SRichard Henderson */ 201908dff435SRichard Henderson goto stop_the_world; 202008dff435SRichard Henderson } 2021187ba694SRichard Henderson /* Collect tlb flags for read. */ 2022417aeaffSRichard Henderson tlb_addr |= tlbe->addr_read; 202308dff435SRichard Henderson 202455df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 20250953674eSRichard Henderson if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) { 2026d9bb58e5SYang Zhong /* There's really nothing that can be done to 2027d9bb58e5SYang Zhong support this apart from stop-the-world. */ 2028d9bb58e5SYang Zhong goto stop_the_world; 2029d9bb58e5SYang Zhong } 2030d9bb58e5SYang Zhong 203134d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 2032417aeaffSRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 203334d49937SPeter Maydell 203434d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 2035417aeaffSRichard Henderson notdirty_write(env_cpu(env), addr, size, full, retaddr); 2036417aeaffSRichard Henderson } 2037417aeaffSRichard Henderson 2038187ba694SRichard Henderson if (unlikely(tlb_addr & TLB_FORCE_SLOW)) { 2039187ba694SRichard Henderson int wp_flags = 0; 2040187ba694SRichard Henderson 2041187ba694SRichard Henderson if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) { 2042187ba694SRichard Henderson wp_flags |= BP_MEM_WRITE; 2043187ba694SRichard Henderson } 2044187ba694SRichard Henderson if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) { 2045187ba694SRichard Henderson wp_flags |= BP_MEM_READ; 2046187ba694SRichard Henderson } 2047187ba694SRichard Henderson if (wp_flags) { 2048187ba694SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, 2049187ba694SRichard Henderson full->attrs, wp_flags, retaddr); 2050187ba694SRichard Henderson } 205134d49937SPeter Maydell } 205234d49937SPeter Maydell 205334d49937SPeter Maydell return hostaddr; 2054d9bb58e5SYang Zhong 2055d9bb58e5SYang Zhong stop_the_world: 205629a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 2057d9bb58e5SYang Zhong } 2058d9bb58e5SYang Zhong 2059eed56642SAlex Bennée /* 2060eed56642SAlex Bennée * Load Helpers 2061eed56642SAlex Bennée * 2062eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 2063eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 2064eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 2065eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 2066cdfac37bSRichard Henderson * 2067eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 2068eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 2069eed56642SAlex Bennée * return a value extended to the register size of the host. This is 2070eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 2071eed56642SAlex Bennée * data, and for that we always have uint64_t. 2072eed56642SAlex Bennée * 2073eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 2074eed56642SAlex Bennée */ 2075eed56642SAlex Bennée 20768cfdacaaSRichard Henderson /** 20778cfdacaaSRichard Henderson * do_ld_mmio_beN: 20788cfdacaaSRichard Henderson * @env: cpu context 20791966855eSRichard Henderson * @full: page parameters 20808cfdacaaSRichard Henderson * @ret_be: accumulated data 20811966855eSRichard Henderson * @addr: virtual address 20821966855eSRichard Henderson * @size: number of bytes 20838cfdacaaSRichard Henderson * @mmu_idx: virtual address context 20848cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 20851966855eSRichard Henderson * Context: iothread lock held 20868cfdacaaSRichard Henderson * 20871966855eSRichard Henderson * Load @size bytes from @addr, which is memory-mapped i/o. 20888cfdacaaSRichard Henderson * The bytes are concatenated in big-endian order with @ret_be. 20898cfdacaaSRichard Henderson */ 20901966855eSRichard Henderson static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full, 20911966855eSRichard Henderson uint64_t ret_be, vaddr addr, int size, 20921966855eSRichard Henderson int mmu_idx, MMUAccessType type, uintptr_t ra) 20932dd92606SRichard Henderson { 2094190aba80SRichard Henderson uint64_t t; 2095190aba80SRichard Henderson 2096190aba80SRichard Henderson tcg_debug_assert(size > 0 && size <= 8); 2097190aba80SRichard Henderson do { 2098190aba80SRichard Henderson /* Read aligned pieces up to 8 bytes. */ 2099190aba80SRichard Henderson switch ((size | (int)addr) & 7) { 2100190aba80SRichard Henderson case 1: 2101190aba80SRichard Henderson case 3: 2102190aba80SRichard Henderson case 5: 2103190aba80SRichard Henderson case 7: 2104190aba80SRichard Henderson t = io_readx(env, full, mmu_idx, addr, ra, type, MO_UB); 2105190aba80SRichard Henderson ret_be = (ret_be << 8) | t; 2106190aba80SRichard Henderson size -= 1; 2107190aba80SRichard Henderson addr += 1; 2108190aba80SRichard Henderson break; 2109190aba80SRichard Henderson case 2: 2110190aba80SRichard Henderson case 6: 2111190aba80SRichard Henderson t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUW); 2112190aba80SRichard Henderson ret_be = (ret_be << 16) | t; 2113190aba80SRichard Henderson size -= 2; 2114190aba80SRichard Henderson addr += 2; 2115190aba80SRichard Henderson break; 2116190aba80SRichard Henderson case 4: 2117190aba80SRichard Henderson t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUL); 2118190aba80SRichard Henderson ret_be = (ret_be << 32) | t; 2119190aba80SRichard Henderson size -= 4; 2120190aba80SRichard Henderson addr += 4; 2121190aba80SRichard Henderson break; 2122190aba80SRichard Henderson case 0: 2123190aba80SRichard Henderson return io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUQ); 2124190aba80SRichard Henderson default: 2125190aba80SRichard Henderson qemu_build_not_reached(); 21268cfdacaaSRichard Henderson } 2127190aba80SRichard Henderson } while (size); 21288cfdacaaSRichard Henderson return ret_be; 21298cfdacaaSRichard Henderson } 21308cfdacaaSRichard Henderson 21318cfdacaaSRichard Henderson /** 21328cfdacaaSRichard Henderson * do_ld_bytes_beN 21338cfdacaaSRichard Henderson * @p: translation parameters 21348cfdacaaSRichard Henderson * @ret_be: accumulated data 21358cfdacaaSRichard Henderson * 21368cfdacaaSRichard Henderson * Load @p->size bytes from @p->haddr, which is RAM. 21378cfdacaaSRichard Henderson * The bytes to concatenated in big-endian order with @ret_be. 21388cfdacaaSRichard Henderson */ 21398cfdacaaSRichard Henderson static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be) 21408cfdacaaSRichard Henderson { 21418cfdacaaSRichard Henderson uint8_t *haddr = p->haddr; 21428cfdacaaSRichard Henderson int i, size = p->size; 21438cfdacaaSRichard Henderson 21448cfdacaaSRichard Henderson for (i = 0; i < size; i++) { 21458cfdacaaSRichard Henderson ret_be = (ret_be << 8) | haddr[i]; 21468cfdacaaSRichard Henderson } 21478cfdacaaSRichard Henderson return ret_be; 21488cfdacaaSRichard Henderson } 21498cfdacaaSRichard Henderson 2150cdfac37bSRichard Henderson /** 2151cdfac37bSRichard Henderson * do_ld_parts_beN 2152cdfac37bSRichard Henderson * @p: translation parameters 2153cdfac37bSRichard Henderson * @ret_be: accumulated data 2154cdfac37bSRichard Henderson * 2155cdfac37bSRichard Henderson * As do_ld_bytes_beN, but atomically on each aligned part. 2156cdfac37bSRichard Henderson */ 2157cdfac37bSRichard Henderson static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be) 2158cdfac37bSRichard Henderson { 2159cdfac37bSRichard Henderson void *haddr = p->haddr; 2160cdfac37bSRichard Henderson int size = p->size; 2161cdfac37bSRichard Henderson 2162cdfac37bSRichard Henderson do { 2163cdfac37bSRichard Henderson uint64_t x; 2164cdfac37bSRichard Henderson int n; 2165cdfac37bSRichard Henderson 2166cdfac37bSRichard Henderson /* 2167cdfac37bSRichard Henderson * Find minimum of alignment and size. 2168cdfac37bSRichard Henderson * This is slightly stronger than required by MO_ATOM_SUBALIGN, which 2169cdfac37bSRichard Henderson * would have only checked the low bits of addr|size once at the start, 2170cdfac37bSRichard Henderson * but is just as easy. 2171cdfac37bSRichard Henderson */ 2172cdfac37bSRichard Henderson switch (((uintptr_t)haddr | size) & 7) { 2173cdfac37bSRichard Henderson case 4: 2174cdfac37bSRichard Henderson x = cpu_to_be32(load_atomic4(haddr)); 2175cdfac37bSRichard Henderson ret_be = (ret_be << 32) | x; 2176cdfac37bSRichard Henderson n = 4; 2177cdfac37bSRichard Henderson break; 2178cdfac37bSRichard Henderson case 2: 2179cdfac37bSRichard Henderson case 6: 2180cdfac37bSRichard Henderson x = cpu_to_be16(load_atomic2(haddr)); 2181cdfac37bSRichard Henderson ret_be = (ret_be << 16) | x; 2182cdfac37bSRichard Henderson n = 2; 2183cdfac37bSRichard Henderson break; 2184cdfac37bSRichard Henderson default: 2185cdfac37bSRichard Henderson x = *(uint8_t *)haddr; 2186cdfac37bSRichard Henderson ret_be = (ret_be << 8) | x; 2187cdfac37bSRichard Henderson n = 1; 2188cdfac37bSRichard Henderson break; 2189cdfac37bSRichard Henderson case 0: 2190cdfac37bSRichard Henderson g_assert_not_reached(); 2191cdfac37bSRichard Henderson } 2192cdfac37bSRichard Henderson haddr += n; 2193cdfac37bSRichard Henderson size -= n; 2194cdfac37bSRichard Henderson } while (size != 0); 2195cdfac37bSRichard Henderson return ret_be; 2196cdfac37bSRichard Henderson } 2197cdfac37bSRichard Henderson 2198cdfac37bSRichard Henderson /** 2199cdfac37bSRichard Henderson * do_ld_parts_be4 2200cdfac37bSRichard Henderson * @p: translation parameters 2201cdfac37bSRichard Henderson * @ret_be: accumulated data 2202cdfac37bSRichard Henderson * 2203cdfac37bSRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 2204cdfac37bSRichard Henderson * Four aligned bytes are guaranteed to cover the load. 2205cdfac37bSRichard Henderson */ 2206cdfac37bSRichard Henderson static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be) 2207cdfac37bSRichard Henderson { 2208cdfac37bSRichard Henderson int o = p->addr & 3; 2209cdfac37bSRichard Henderson uint32_t x = load_atomic4(p->haddr - o); 2210cdfac37bSRichard Henderson 2211cdfac37bSRichard Henderson x = cpu_to_be32(x); 2212cdfac37bSRichard Henderson x <<= o * 8; 2213cdfac37bSRichard Henderson x >>= (4 - p->size) * 8; 2214cdfac37bSRichard Henderson return (ret_be << (p->size * 8)) | x; 2215cdfac37bSRichard Henderson } 2216cdfac37bSRichard Henderson 2217cdfac37bSRichard Henderson /** 2218cdfac37bSRichard Henderson * do_ld_parts_be8 2219cdfac37bSRichard Henderson * @p: translation parameters 2220cdfac37bSRichard Henderson * @ret_be: accumulated data 2221cdfac37bSRichard Henderson * 2222cdfac37bSRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 2223cdfac37bSRichard Henderson * Eight aligned bytes are guaranteed to cover the load. 2224cdfac37bSRichard Henderson */ 2225cdfac37bSRichard Henderson static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra, 2226cdfac37bSRichard Henderson MMULookupPageData *p, uint64_t ret_be) 2227cdfac37bSRichard Henderson { 2228cdfac37bSRichard Henderson int o = p->addr & 7; 2229cdfac37bSRichard Henderson uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o); 2230cdfac37bSRichard Henderson 2231cdfac37bSRichard Henderson x = cpu_to_be64(x); 2232cdfac37bSRichard Henderson x <<= o * 8; 2233cdfac37bSRichard Henderson x >>= (8 - p->size) * 8; 2234cdfac37bSRichard Henderson return (ret_be << (p->size * 8)) | x; 2235cdfac37bSRichard Henderson } 2236cdfac37bSRichard Henderson 223735c653c4SRichard Henderson /** 223835c653c4SRichard Henderson * do_ld_parts_be16 223935c653c4SRichard Henderson * @p: translation parameters 224035c653c4SRichard Henderson * @ret_be: accumulated data 224135c653c4SRichard Henderson * 224235c653c4SRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 224335c653c4SRichard Henderson * 16 aligned bytes are guaranteed to cover the load. 224435c653c4SRichard Henderson */ 224535c653c4SRichard Henderson static Int128 do_ld_whole_be16(CPUArchState *env, uintptr_t ra, 224635c653c4SRichard Henderson MMULookupPageData *p, uint64_t ret_be) 224735c653c4SRichard Henderson { 224835c653c4SRichard Henderson int o = p->addr & 15; 224935c653c4SRichard Henderson Int128 x, y = load_atomic16_or_exit(env, ra, p->haddr - o); 225035c653c4SRichard Henderson int size = p->size; 225135c653c4SRichard Henderson 225235c653c4SRichard Henderson if (!HOST_BIG_ENDIAN) { 225335c653c4SRichard Henderson y = bswap128(y); 225435c653c4SRichard Henderson } 225535c653c4SRichard Henderson y = int128_lshift(y, o * 8); 225635c653c4SRichard Henderson y = int128_urshift(y, (16 - size) * 8); 225735c653c4SRichard Henderson x = int128_make64(ret_be); 225835c653c4SRichard Henderson x = int128_lshift(x, size * 8); 225935c653c4SRichard Henderson return int128_or(x, y); 226035c653c4SRichard Henderson } 226135c653c4SRichard Henderson 22628cfdacaaSRichard Henderson /* 22638cfdacaaSRichard Henderson * Wrapper for the above. 22648cfdacaaSRichard Henderson */ 22658cfdacaaSRichard Henderson static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p, 2266cdfac37bSRichard Henderson uint64_t ret_be, int mmu_idx, MMUAccessType type, 2267cdfac37bSRichard Henderson MemOp mop, uintptr_t ra) 22688cfdacaaSRichard Henderson { 2269cdfac37bSRichard Henderson MemOp atom; 2270cdfac37bSRichard Henderson unsigned tmp, half_size; 2271cdfac37bSRichard Henderson 22728cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 22731966855eSRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 22741966855eSRichard Henderson return do_ld_mmio_beN(env, p->full, ret_be, p->addr, p->size, 22751966855eSRichard Henderson mmu_idx, type, ra); 2276cdfac37bSRichard Henderson } 2277cdfac37bSRichard Henderson 2278cdfac37bSRichard Henderson /* 2279cdfac37bSRichard Henderson * It is a given that we cross a page and therefore there is no 2280cdfac37bSRichard Henderson * atomicity for the load as a whole, but subobjects may need attention. 2281cdfac37bSRichard Henderson */ 2282cdfac37bSRichard Henderson atom = mop & MO_ATOM_MASK; 2283cdfac37bSRichard Henderson switch (atom) { 2284cdfac37bSRichard Henderson case MO_ATOM_SUBALIGN: 2285cdfac37bSRichard Henderson return do_ld_parts_beN(p, ret_be); 2286cdfac37bSRichard Henderson 2287cdfac37bSRichard Henderson case MO_ATOM_IFALIGN_PAIR: 2288cdfac37bSRichard Henderson case MO_ATOM_WITHIN16_PAIR: 2289cdfac37bSRichard Henderson tmp = mop & MO_SIZE; 2290cdfac37bSRichard Henderson tmp = tmp ? tmp - 1 : 0; 2291cdfac37bSRichard Henderson half_size = 1 << tmp; 2292cdfac37bSRichard Henderson if (atom == MO_ATOM_IFALIGN_PAIR 2293cdfac37bSRichard Henderson ? p->size == half_size 2294cdfac37bSRichard Henderson : p->size >= half_size) { 2295cdfac37bSRichard Henderson if (!HAVE_al8_fast && p->size < 4) { 2296cdfac37bSRichard Henderson return do_ld_whole_be4(p, ret_be); 22978cfdacaaSRichard Henderson } else { 2298cdfac37bSRichard Henderson return do_ld_whole_be8(env, ra, p, ret_be); 2299cdfac37bSRichard Henderson } 2300cdfac37bSRichard Henderson } 2301cdfac37bSRichard Henderson /* fall through */ 2302cdfac37bSRichard Henderson 2303cdfac37bSRichard Henderson case MO_ATOM_IFALIGN: 2304cdfac37bSRichard Henderson case MO_ATOM_WITHIN16: 2305cdfac37bSRichard Henderson case MO_ATOM_NONE: 23068cfdacaaSRichard Henderson return do_ld_bytes_beN(p, ret_be); 2307cdfac37bSRichard Henderson 2308cdfac37bSRichard Henderson default: 2309cdfac37bSRichard Henderson g_assert_not_reached(); 23108cfdacaaSRichard Henderson } 23118cfdacaaSRichard Henderson } 23128cfdacaaSRichard Henderson 231335c653c4SRichard Henderson /* 231435c653c4SRichard Henderson * Wrapper for the above, for 8 < size < 16. 231535c653c4SRichard Henderson */ 231635c653c4SRichard Henderson static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p, 231735c653c4SRichard Henderson uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra) 231835c653c4SRichard Henderson { 231935c653c4SRichard Henderson int size = p->size; 232035c653c4SRichard Henderson uint64_t b; 232135c653c4SRichard Henderson MemOp atom; 232235c653c4SRichard Henderson 232335c653c4SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 23241966855eSRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 23251966855eSRichard Henderson a = do_ld_mmio_beN(env, p->full, a, p->addr, size - 8, 23261966855eSRichard Henderson mmu_idx, MMU_DATA_LOAD, ra); 23271966855eSRichard Henderson b = do_ld_mmio_beN(env, p->full, 0, p->addr + 8, 8, 23281966855eSRichard Henderson mmu_idx, MMU_DATA_LOAD, ra); 232935c653c4SRichard Henderson return int128_make128(b, a); 233035c653c4SRichard Henderson } 233135c653c4SRichard Henderson 233235c653c4SRichard Henderson /* 233335c653c4SRichard Henderson * It is a given that we cross a page and therefore there is no 233435c653c4SRichard Henderson * atomicity for the load as a whole, but subobjects may need attention. 233535c653c4SRichard Henderson */ 233635c653c4SRichard Henderson atom = mop & MO_ATOM_MASK; 233735c653c4SRichard Henderson switch (atom) { 233835c653c4SRichard Henderson case MO_ATOM_SUBALIGN: 233935c653c4SRichard Henderson p->size = size - 8; 234035c653c4SRichard Henderson a = do_ld_parts_beN(p, a); 234135c653c4SRichard Henderson p->haddr += size - 8; 234235c653c4SRichard Henderson p->size = 8; 234335c653c4SRichard Henderson b = do_ld_parts_beN(p, 0); 234435c653c4SRichard Henderson break; 234535c653c4SRichard Henderson 234635c653c4SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 234735c653c4SRichard Henderson /* Since size > 8, this is the half that must be atomic. */ 234835c653c4SRichard Henderson return do_ld_whole_be16(env, ra, p, a); 234935c653c4SRichard Henderson 235035c653c4SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 235135c653c4SRichard Henderson /* 235235c653c4SRichard Henderson * Since size > 8, both halves are misaligned, 235335c653c4SRichard Henderson * and so neither is atomic. 235435c653c4SRichard Henderson */ 235535c653c4SRichard Henderson case MO_ATOM_IFALIGN: 235635c653c4SRichard Henderson case MO_ATOM_WITHIN16: 235735c653c4SRichard Henderson case MO_ATOM_NONE: 235835c653c4SRichard Henderson p->size = size - 8; 235935c653c4SRichard Henderson a = do_ld_bytes_beN(p, a); 236035c653c4SRichard Henderson b = ldq_be_p(p->haddr + size - 8); 236135c653c4SRichard Henderson break; 236235c653c4SRichard Henderson 236335c653c4SRichard Henderson default: 236435c653c4SRichard Henderson g_assert_not_reached(); 236535c653c4SRichard Henderson } 236635c653c4SRichard Henderson 236735c653c4SRichard Henderson return int128_make128(b, a); 236835c653c4SRichard Henderson } 236935c653c4SRichard Henderson 23708cfdacaaSRichard Henderson static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 23718cfdacaaSRichard Henderson MMUAccessType type, uintptr_t ra) 23728cfdacaaSRichard Henderson { 23738cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 23748cfdacaaSRichard Henderson return io_readx(env, p->full, mmu_idx, p->addr, ra, type, MO_UB); 23758cfdacaaSRichard Henderson } else { 23768cfdacaaSRichard Henderson return *(uint8_t *)p->haddr; 23778cfdacaaSRichard Henderson } 23788cfdacaaSRichard Henderson } 23798cfdacaaSRichard Henderson 23808cfdacaaSRichard Henderson static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 23818cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 23828cfdacaaSRichard Henderson { 2383f7eaf9d7SRichard Henderson uint16_t ret; 23848cfdacaaSRichard Henderson 23858cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2386f7eaf9d7SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 2387f7eaf9d7SRichard Henderson ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 2, mmu_idx, type, ra); 2388f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) == MO_LE) { 2389f7eaf9d7SRichard Henderson ret = bswap16(ret); 23908cfdacaaSRichard Henderson } 2391f7eaf9d7SRichard Henderson } else { 23928cfdacaaSRichard Henderson /* Perform the load host endian, then swap if necessary. */ 2393cdfac37bSRichard Henderson ret = load_atom_2(env, ra, p->haddr, memop); 23948cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 23958cfdacaaSRichard Henderson ret = bswap16(ret); 23968cfdacaaSRichard Henderson } 2397f7eaf9d7SRichard Henderson } 23988cfdacaaSRichard Henderson return ret; 23998cfdacaaSRichard Henderson } 24008cfdacaaSRichard Henderson 24018cfdacaaSRichard Henderson static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 24028cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 24038cfdacaaSRichard Henderson { 24048cfdacaaSRichard Henderson uint32_t ret; 24058cfdacaaSRichard Henderson 24068cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2407f7eaf9d7SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 2408f7eaf9d7SRichard Henderson ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 4, mmu_idx, type, ra); 2409f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) == MO_LE) { 2410f7eaf9d7SRichard Henderson ret = bswap32(ret); 24118cfdacaaSRichard Henderson } 2412f7eaf9d7SRichard Henderson } else { 24138cfdacaaSRichard Henderson /* Perform the load host endian. */ 2414cdfac37bSRichard Henderson ret = load_atom_4(env, ra, p->haddr, memop); 24158cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 24168cfdacaaSRichard Henderson ret = bswap32(ret); 24178cfdacaaSRichard Henderson } 2418f7eaf9d7SRichard Henderson } 24198cfdacaaSRichard Henderson return ret; 24208cfdacaaSRichard Henderson } 24218cfdacaaSRichard Henderson 24228cfdacaaSRichard Henderson static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 24238cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 24248cfdacaaSRichard Henderson { 24258cfdacaaSRichard Henderson uint64_t ret; 24268cfdacaaSRichard Henderson 24278cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2428f7eaf9d7SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 2429f7eaf9d7SRichard Henderson ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 8, mmu_idx, type, ra); 2430f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) == MO_LE) { 2431f7eaf9d7SRichard Henderson ret = bswap64(ret); 24328cfdacaaSRichard Henderson } 2433f7eaf9d7SRichard Henderson } else { 24348cfdacaaSRichard Henderson /* Perform the load host endian. */ 2435cdfac37bSRichard Henderson ret = load_atom_8(env, ra, p->haddr, memop); 24368cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 24378cfdacaaSRichard Henderson ret = bswap64(ret); 24388cfdacaaSRichard Henderson } 2439f7eaf9d7SRichard Henderson } 24408cfdacaaSRichard Henderson return ret; 24418cfdacaaSRichard Henderson } 24428cfdacaaSRichard Henderson 2443fb2c53cbSAnton Johansson static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, 24448cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 24458cfdacaaSRichard Henderson { 24468cfdacaaSRichard Henderson MMULookupLocals l; 24478cfdacaaSRichard Henderson bool crosspage; 24488cfdacaaSRichard Henderson 2449f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 24508cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 24518cfdacaaSRichard Henderson tcg_debug_assert(!crosspage); 24528cfdacaaSRichard Henderson 24538cfdacaaSRichard Henderson return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra); 24542dd92606SRichard Henderson } 24552dd92606SRichard Henderson 245624e46e6cSRichard Henderson tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, 24579002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2458eed56642SAlex Bennée { 24590cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); 24608cfdacaaSRichard Henderson return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 24612dd92606SRichard Henderson } 24622dd92606SRichard Henderson 2463fb2c53cbSAnton Johansson static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, 24648cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 24652dd92606SRichard Henderson { 24668cfdacaaSRichard Henderson MMULookupLocals l; 24678cfdacaaSRichard Henderson bool crosspage; 24688cfdacaaSRichard Henderson uint16_t ret; 24698cfdacaaSRichard Henderson uint8_t a, b; 24708cfdacaaSRichard Henderson 2471f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 24728cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 24738cfdacaaSRichard Henderson if (likely(!crosspage)) { 24748cfdacaaSRichard Henderson return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 24758cfdacaaSRichard Henderson } 24768cfdacaaSRichard Henderson 24778cfdacaaSRichard Henderson a = do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra); 24788cfdacaaSRichard Henderson b = do_ld_1(env, &l.page[1], l.mmu_idx, access_type, ra); 24798cfdacaaSRichard Henderson 24808cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 24818cfdacaaSRichard Henderson ret = a | (b << 8); 24828cfdacaaSRichard Henderson } else { 24838cfdacaaSRichard Henderson ret = b | (a << 8); 24848cfdacaaSRichard Henderson } 24858cfdacaaSRichard Henderson return ret; 2486eed56642SAlex Bennée } 2487eed56642SAlex Bennée 248824e46e6cSRichard Henderson tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, 24899002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2490eed56642SAlex Bennée { 24910cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 24928cfdacaaSRichard Henderson return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 24932dd92606SRichard Henderson } 24942dd92606SRichard Henderson 2495fb2c53cbSAnton Johansson static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, 24968cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 24972dd92606SRichard Henderson { 24988cfdacaaSRichard Henderson MMULookupLocals l; 24998cfdacaaSRichard Henderson bool crosspage; 25008cfdacaaSRichard Henderson uint32_t ret; 25018cfdacaaSRichard Henderson 2502f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 25038cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 25048cfdacaaSRichard Henderson if (likely(!crosspage)) { 25058cfdacaaSRichard Henderson return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 25068cfdacaaSRichard Henderson } 25078cfdacaaSRichard Henderson 2508cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2509cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 25108cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 25118cfdacaaSRichard Henderson ret = bswap32(ret); 25128cfdacaaSRichard Henderson } 25138cfdacaaSRichard Henderson return ret; 2514eed56642SAlex Bennée } 2515eed56642SAlex Bennée 251624e46e6cSRichard Henderson tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, 25179002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2518eed56642SAlex Bennée { 25190cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 25208cfdacaaSRichard Henderson return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 25218cfdacaaSRichard Henderson } 25228cfdacaaSRichard Henderson 2523fb2c53cbSAnton Johansson static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, 25248cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 25258cfdacaaSRichard Henderson { 25268cfdacaaSRichard Henderson MMULookupLocals l; 25278cfdacaaSRichard Henderson bool crosspage; 25288cfdacaaSRichard Henderson uint64_t ret; 25298cfdacaaSRichard Henderson 2530f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 25318cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 25328cfdacaaSRichard Henderson if (likely(!crosspage)) { 25338cfdacaaSRichard Henderson return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 25348cfdacaaSRichard Henderson } 25358cfdacaaSRichard Henderson 2536cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2537cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 25388cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 25398cfdacaaSRichard Henderson ret = bswap64(ret); 25408cfdacaaSRichard Henderson } 25418cfdacaaSRichard Henderson return ret; 2542eed56642SAlex Bennée } 2543eed56642SAlex Bennée 254424e46e6cSRichard Henderson uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, 25459002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2546eed56642SAlex Bennée { 25470cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 25488cfdacaaSRichard Henderson return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 2549eed56642SAlex Bennée } 2550eed56642SAlex Bennée 2551eed56642SAlex Bennée /* 2552eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 2553eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 2554eed56642SAlex Bennée */ 2555eed56642SAlex Bennée 255624e46e6cSRichard Henderson tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, 25579002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2558eed56642SAlex Bennée { 25590cadc1edSRichard Henderson return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr); 2560eed56642SAlex Bennée } 2561eed56642SAlex Bennée 256224e46e6cSRichard Henderson tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, 25639002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2564eed56642SAlex Bennée { 25650cadc1edSRichard Henderson return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr); 2566eed56642SAlex Bennée } 2567eed56642SAlex Bennée 256824e46e6cSRichard Henderson tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, 25699002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2570eed56642SAlex Bennée { 25710cadc1edSRichard Henderson return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr); 2572eed56642SAlex Bennée } 2573eed56642SAlex Bennée 2574fb2c53cbSAnton Johansson static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr, 257535c653c4SRichard Henderson MemOpIdx oi, uintptr_t ra) 257635c653c4SRichard Henderson { 257735c653c4SRichard Henderson MMULookupLocals l; 257835c653c4SRichard Henderson bool crosspage; 257935c653c4SRichard Henderson uint64_t a, b; 258035c653c4SRichard Henderson Int128 ret; 258135c653c4SRichard Henderson int first; 258235c653c4SRichard Henderson 2583f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 258435c653c4SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l); 258535c653c4SRichard Henderson if (likely(!crosspage)) { 258635c653c4SRichard Henderson if (unlikely(l.page[0].flags & TLB_MMIO)) { 258735c653c4SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 2588f7eaf9d7SRichard Henderson a = do_ld_mmio_beN(env, l.page[0].full, 0, addr, 8, 2589f7eaf9d7SRichard Henderson l.mmu_idx, MMU_DATA_LOAD, ra); 2590f7eaf9d7SRichard Henderson b = do_ld_mmio_beN(env, l.page[0].full, 0, addr + 8, 8, 2591f7eaf9d7SRichard Henderson l.mmu_idx, MMU_DATA_LOAD, ra); 2592f7eaf9d7SRichard Henderson ret = int128_make128(b, a); 2593f7eaf9d7SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 2594f7eaf9d7SRichard Henderson ret = bswap128(ret); 259535c653c4SRichard Henderson } 2596f7eaf9d7SRichard Henderson } else { 2597f7eaf9d7SRichard Henderson /* Perform the load host endian. */ 2598f7eaf9d7SRichard Henderson ret = load_atom_16(env, ra, l.page[0].haddr, l.memop); 259935c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 260035c653c4SRichard Henderson ret = bswap128(ret); 260135c653c4SRichard Henderson } 2602f7eaf9d7SRichard Henderson } 260335c653c4SRichard Henderson return ret; 260435c653c4SRichard Henderson } 260535c653c4SRichard Henderson 260635c653c4SRichard Henderson first = l.page[0].size; 260735c653c4SRichard Henderson if (first == 8) { 260835c653c4SRichard Henderson MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64; 260935c653c4SRichard Henderson 261035c653c4SRichard Henderson a = do_ld_8(env, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); 261135c653c4SRichard Henderson b = do_ld_8(env, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); 261235c653c4SRichard Henderson if ((mop8 & MO_BSWAP) == MO_LE) { 261335c653c4SRichard Henderson ret = int128_make128(a, b); 261435c653c4SRichard Henderson } else { 261535c653c4SRichard Henderson ret = int128_make128(b, a); 261635c653c4SRichard Henderson } 261735c653c4SRichard Henderson return ret; 261835c653c4SRichard Henderson } 261935c653c4SRichard Henderson 262035c653c4SRichard Henderson if (first < 8) { 262135c653c4SRichard Henderson a = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, 262235c653c4SRichard Henderson MMU_DATA_LOAD, l.memop, ra); 262335c653c4SRichard Henderson ret = do_ld16_beN(env, &l.page[1], a, l.mmu_idx, l.memop, ra); 262435c653c4SRichard Henderson } else { 262535c653c4SRichard Henderson ret = do_ld16_beN(env, &l.page[0], 0, l.mmu_idx, l.memop, ra); 262635c653c4SRichard Henderson b = int128_getlo(ret); 262735c653c4SRichard Henderson ret = int128_lshift(ret, l.page[1].size * 8); 262835c653c4SRichard Henderson a = int128_gethi(ret); 262935c653c4SRichard Henderson b = do_ld_beN(env, &l.page[1], b, l.mmu_idx, 263035c653c4SRichard Henderson MMU_DATA_LOAD, l.memop, ra); 263135c653c4SRichard Henderson ret = int128_make128(b, a); 263235c653c4SRichard Henderson } 263335c653c4SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 263435c653c4SRichard Henderson ret = bswap128(ret); 263535c653c4SRichard Henderson } 263635c653c4SRichard Henderson return ret; 263735c653c4SRichard Henderson } 263835c653c4SRichard Henderson 263924e46e6cSRichard Henderson Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, 264035c653c4SRichard Henderson uint32_t oi, uintptr_t retaddr) 264135c653c4SRichard Henderson { 264235c653c4SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 264335c653c4SRichard Henderson return do_ld16_mmu(env, addr, oi, retaddr); 264435c653c4SRichard Henderson } 264535c653c4SRichard Henderson 2646e570597aSRichard Henderson Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi) 264735c653c4SRichard Henderson { 264835c653c4SRichard Henderson return helper_ld16_mmu(env, addr, oi, GETPC()); 264935c653c4SRichard Henderson } 265035c653c4SRichard Henderson 2651eed56642SAlex Bennée /* 2652d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 2653d03f1408SRichard Henderson */ 2654d03f1408SRichard Henderson 26558cfdacaaSRichard Henderson static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) 2656d03f1408SRichard Henderson { 265737aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2658d03f1408SRichard Henderson } 2659d03f1408SRichard Henderson 2660f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) 2661d03f1408SRichard Henderson { 26628cfdacaaSRichard Henderson uint8_t ret; 26638cfdacaaSRichard Henderson 26640cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB); 26658cfdacaaSRichard Henderson ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 26668cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 26678cfdacaaSRichard Henderson return ret; 2668d03f1408SRichard Henderson } 2669d03f1408SRichard Henderson 2670fbea7a40SRichard Henderson uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr, 2671f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2672d03f1408SRichard Henderson { 26738cfdacaaSRichard Henderson uint16_t ret; 26748cfdacaaSRichard Henderson 2675fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 26768cfdacaaSRichard Henderson ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 26778cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 26788cfdacaaSRichard Henderson return ret; 2679d03f1408SRichard Henderson } 2680d03f1408SRichard Henderson 2681fbea7a40SRichard Henderson uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr, 2682f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2683d03f1408SRichard Henderson { 26848cfdacaaSRichard Henderson uint32_t ret; 26858cfdacaaSRichard Henderson 2686fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 26878cfdacaaSRichard Henderson ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 26888cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 26898cfdacaaSRichard Henderson return ret; 2690d03f1408SRichard Henderson } 2691d03f1408SRichard Henderson 2692fbea7a40SRichard Henderson uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr, 2693f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2694d03f1408SRichard Henderson { 26958cfdacaaSRichard Henderson uint64_t ret; 26968cfdacaaSRichard Henderson 2697fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 26988cfdacaaSRichard Henderson ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 26998cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 27008cfdacaaSRichard Henderson return ret; 2701d03f1408SRichard Henderson } 2702d03f1408SRichard Henderson 2703fbea7a40SRichard Henderson Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, 2704cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 2705cb48f365SRichard Henderson { 270635c653c4SRichard Henderson Int128 ret; 2707cb48f365SRichard Henderson 2708fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 270935c653c4SRichard Henderson ret = do_ld16_mmu(env, addr, oi, ra); 271035c653c4SRichard Henderson plugin_load_cb(env, addr, oi); 271135c653c4SRichard Henderson return ret; 2712cb48f365SRichard Henderson } 2713cb48f365SRichard Henderson 2714d03f1408SRichard Henderson /* 2715eed56642SAlex Bennée * Store Helpers 2716eed56642SAlex Bennée */ 2717eed56642SAlex Bennée 271859213461SRichard Henderson /** 271959213461SRichard Henderson * do_st_mmio_leN: 272059213461SRichard Henderson * @env: cpu context 27211966855eSRichard Henderson * @full: page parameters 272259213461SRichard Henderson * @val_le: data to store 27231966855eSRichard Henderson * @addr: virtual address 27241966855eSRichard Henderson * @size: number of bytes 272559213461SRichard Henderson * @mmu_idx: virtual address context 272659213461SRichard Henderson * @ra: return address into tcg generated code, or 0 27271966855eSRichard Henderson * Context: iothread lock held 272859213461SRichard Henderson * 27291966855eSRichard Henderson * Store @size bytes at @addr, which is memory-mapped i/o. 273059213461SRichard Henderson * The bytes to store are extracted in little-endian order from @val_le; 273159213461SRichard Henderson * return the bytes of @val_le beyond @p->size that have not been stored. 273259213461SRichard Henderson */ 27331966855eSRichard Henderson static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full, 27341966855eSRichard Henderson uint64_t val_le, vaddr addr, int size, 27351966855eSRichard Henderson int mmu_idx, uintptr_t ra) 27366b8b622eSRichard Henderson { 2737190aba80SRichard Henderson tcg_debug_assert(size > 0 && size <= 8); 2738190aba80SRichard Henderson 2739190aba80SRichard Henderson do { 2740190aba80SRichard Henderson /* Store aligned pieces up to 8 bytes. */ 2741190aba80SRichard Henderson switch ((size | (int)addr) & 7) { 2742190aba80SRichard Henderson case 1: 2743190aba80SRichard Henderson case 3: 2744190aba80SRichard Henderson case 5: 2745190aba80SRichard Henderson case 7: 2746190aba80SRichard Henderson io_writex(env, full, mmu_idx, val_le, addr, ra, MO_UB); 2747190aba80SRichard Henderson val_le >>= 8; 2748190aba80SRichard Henderson size -= 1; 2749190aba80SRichard Henderson addr += 1; 2750190aba80SRichard Henderson break; 2751190aba80SRichard Henderson case 2: 2752190aba80SRichard Henderson case 6: 2753190aba80SRichard Henderson io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUW); 2754190aba80SRichard Henderson val_le >>= 16; 2755190aba80SRichard Henderson size -= 2; 2756190aba80SRichard Henderson addr += 2; 2757190aba80SRichard Henderson break; 2758190aba80SRichard Henderson case 4: 2759190aba80SRichard Henderson io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUL); 2760190aba80SRichard Henderson val_le >>= 32; 2761190aba80SRichard Henderson size -= 4; 2762190aba80SRichard Henderson addr += 4; 2763190aba80SRichard Henderson break; 2764190aba80SRichard Henderson case 0: 2765190aba80SRichard Henderson io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUQ); 2766190aba80SRichard Henderson return 0; 2767190aba80SRichard Henderson default: 2768190aba80SRichard Henderson qemu_build_not_reached(); 276959213461SRichard Henderson } 2770190aba80SRichard Henderson } while (size); 2771190aba80SRichard Henderson 277259213461SRichard Henderson return val_le; 277359213461SRichard Henderson } 277459213461SRichard Henderson 27756b8b622eSRichard Henderson /* 277659213461SRichard Henderson * Wrapper for the above. 27776b8b622eSRichard Henderson */ 277859213461SRichard Henderson static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p, 27795b36f268SRichard Henderson uint64_t val_le, int mmu_idx, 27805b36f268SRichard Henderson MemOp mop, uintptr_t ra) 278159213461SRichard Henderson { 27825b36f268SRichard Henderson MemOp atom; 27835b36f268SRichard Henderson unsigned tmp, half_size; 27845b36f268SRichard Henderson 278559213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 27861966855eSRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 27871966855eSRichard Henderson return do_st_mmio_leN(env, p->full, val_le, p->addr, 27881966855eSRichard Henderson p->size, mmu_idx, ra); 278959213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 279059213461SRichard Henderson return val_le >> (p->size * 8); 27915b36f268SRichard Henderson } 27925b36f268SRichard Henderson 27935b36f268SRichard Henderson /* 27945b36f268SRichard Henderson * It is a given that we cross a page and therefore there is no atomicity 27955b36f268SRichard Henderson * for the store as a whole, but subobjects may need attention. 27965b36f268SRichard Henderson */ 27975b36f268SRichard Henderson atom = mop & MO_ATOM_MASK; 27985b36f268SRichard Henderson switch (atom) { 27995b36f268SRichard Henderson case MO_ATOM_SUBALIGN: 28005b36f268SRichard Henderson return store_parts_leN(p->haddr, p->size, val_le); 28015b36f268SRichard Henderson 28025b36f268SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 28035b36f268SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 28045b36f268SRichard Henderson tmp = mop & MO_SIZE; 28055b36f268SRichard Henderson tmp = tmp ? tmp - 1 : 0; 28065b36f268SRichard Henderson half_size = 1 << tmp; 28075b36f268SRichard Henderson if (atom == MO_ATOM_IFALIGN_PAIR 28085b36f268SRichard Henderson ? p->size == half_size 28095b36f268SRichard Henderson : p->size >= half_size) { 28105b36f268SRichard Henderson if (!HAVE_al8_fast && p->size <= 4) { 28115b36f268SRichard Henderson return store_whole_le4(p->haddr, p->size, val_le); 28125b36f268SRichard Henderson } else if (HAVE_al8) { 28135b36f268SRichard Henderson return store_whole_le8(p->haddr, p->size, val_le); 28146b8b622eSRichard Henderson } else { 28155b36f268SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), ra); 28165b36f268SRichard Henderson } 28175b36f268SRichard Henderson } 28185b36f268SRichard Henderson /* fall through */ 28195b36f268SRichard Henderson 28205b36f268SRichard Henderson case MO_ATOM_IFALIGN: 28215b36f268SRichard Henderson case MO_ATOM_WITHIN16: 28225b36f268SRichard Henderson case MO_ATOM_NONE: 28235b36f268SRichard Henderson return store_bytes_leN(p->haddr, p->size, val_le); 28245b36f268SRichard Henderson 28255b36f268SRichard Henderson default: 28265b36f268SRichard Henderson g_assert_not_reached(); 28276b8b622eSRichard Henderson } 28286b8b622eSRichard Henderson } 28296b8b622eSRichard Henderson 283035c653c4SRichard Henderson /* 283135c653c4SRichard Henderson * Wrapper for the above, for 8 < size < 16. 283235c653c4SRichard Henderson */ 283335c653c4SRichard Henderson static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p, 283435c653c4SRichard Henderson Int128 val_le, int mmu_idx, 283535c653c4SRichard Henderson MemOp mop, uintptr_t ra) 283635c653c4SRichard Henderson { 283735c653c4SRichard Henderson int size = p->size; 283835c653c4SRichard Henderson MemOp atom; 283935c653c4SRichard Henderson 284035c653c4SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 28411966855eSRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 28421966855eSRichard Henderson do_st_mmio_leN(env, p->full, int128_getlo(val_le), 28431966855eSRichard Henderson p->addr, 8, mmu_idx, ra); 28441966855eSRichard Henderson return do_st_mmio_leN(env, p->full, int128_gethi(val_le), 28451966855eSRichard Henderson p->addr + 8, size - 8, mmu_idx, ra); 284635c653c4SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 284735c653c4SRichard Henderson return int128_gethi(val_le) >> ((size - 8) * 8); 284835c653c4SRichard Henderson } 284935c653c4SRichard Henderson 285035c653c4SRichard Henderson /* 285135c653c4SRichard Henderson * It is a given that we cross a page and therefore there is no atomicity 285235c653c4SRichard Henderson * for the store as a whole, but subobjects may need attention. 285335c653c4SRichard Henderson */ 285435c653c4SRichard Henderson atom = mop & MO_ATOM_MASK; 285535c653c4SRichard Henderson switch (atom) { 285635c653c4SRichard Henderson case MO_ATOM_SUBALIGN: 285735c653c4SRichard Henderson store_parts_leN(p->haddr, 8, int128_getlo(val_le)); 285835c653c4SRichard Henderson return store_parts_leN(p->haddr + 8, p->size - 8, 285935c653c4SRichard Henderson int128_gethi(val_le)); 286035c653c4SRichard Henderson 286135c653c4SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 286235c653c4SRichard Henderson /* Since size > 8, this is the half that must be atomic. */ 28638dc24ff4SRichard Henderson if (!HAVE_ATOMIC128_RW) { 286435c653c4SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), ra); 286535c653c4SRichard Henderson } 286635c653c4SRichard Henderson return store_whole_le16(p->haddr, p->size, val_le); 286735c653c4SRichard Henderson 286835c653c4SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 286935c653c4SRichard Henderson /* 287035c653c4SRichard Henderson * Since size > 8, both halves are misaligned, 287135c653c4SRichard Henderson * and so neither is atomic. 287235c653c4SRichard Henderson */ 287335c653c4SRichard Henderson case MO_ATOM_IFALIGN: 28742be6a486SRichard Henderson case MO_ATOM_WITHIN16: 287535c653c4SRichard Henderson case MO_ATOM_NONE: 287635c653c4SRichard Henderson stq_le_p(p->haddr, int128_getlo(val_le)); 287735c653c4SRichard Henderson return store_bytes_leN(p->haddr + 8, p->size - 8, 287835c653c4SRichard Henderson int128_gethi(val_le)); 287935c653c4SRichard Henderson 288035c653c4SRichard Henderson default: 288135c653c4SRichard Henderson g_assert_not_reached(); 288235c653c4SRichard Henderson } 288335c653c4SRichard Henderson } 288435c653c4SRichard Henderson 288559213461SRichard Henderson static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val, 288659213461SRichard Henderson int mmu_idx, uintptr_t ra) 2887eed56642SAlex Bennée { 288859213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 288959213461SRichard Henderson io_writex(env, p->full, mmu_idx, val, p->addr, ra, MO_UB); 289059213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 289159213461SRichard Henderson /* nothing */ 28925b87b3e6SRichard Henderson } else { 289359213461SRichard Henderson *(uint8_t *)p->haddr = val; 28945b87b3e6SRichard Henderson } 2895eed56642SAlex Bennée } 2896eed56642SAlex Bennée 289759213461SRichard Henderson static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val, 289859213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 2899eed56642SAlex Bennée { 290059213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2901f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) != MO_LE) { 2902f7eaf9d7SRichard Henderson val = bswap16(val); 2903f7eaf9d7SRichard Henderson } 2904f7eaf9d7SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 2905f7eaf9d7SRichard Henderson do_st_mmio_leN(env, p->full, val, p->addr, 2, mmu_idx, ra); 290659213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 290759213461SRichard Henderson /* nothing */ 290859213461SRichard Henderson } else { 290959213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 291059213461SRichard Henderson if (memop & MO_BSWAP) { 291159213461SRichard Henderson val = bswap16(val); 291259213461SRichard Henderson } 29135b36f268SRichard Henderson store_atom_2(env, ra, p->haddr, memop, val); 291459213461SRichard Henderson } 291559213461SRichard Henderson } 291659213461SRichard Henderson 291759213461SRichard Henderson static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val, 291859213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 291959213461SRichard Henderson { 292059213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2921f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) != MO_LE) { 2922f7eaf9d7SRichard Henderson val = bswap32(val); 2923f7eaf9d7SRichard Henderson } 2924f7eaf9d7SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 2925f7eaf9d7SRichard Henderson do_st_mmio_leN(env, p->full, val, p->addr, 4, mmu_idx, ra); 292659213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 292759213461SRichard Henderson /* nothing */ 292859213461SRichard Henderson } else { 292959213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 293059213461SRichard Henderson if (memop & MO_BSWAP) { 293159213461SRichard Henderson val = bswap32(val); 293259213461SRichard Henderson } 29335b36f268SRichard Henderson store_atom_4(env, ra, p->haddr, memop, val); 293459213461SRichard Henderson } 293559213461SRichard Henderson } 293659213461SRichard Henderson 293759213461SRichard Henderson static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val, 293859213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 293959213461SRichard Henderson { 294059213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2941f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) != MO_LE) { 2942f7eaf9d7SRichard Henderson val = bswap64(val); 2943f7eaf9d7SRichard Henderson } 2944f7eaf9d7SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 2945f7eaf9d7SRichard Henderson do_st_mmio_leN(env, p->full, val, p->addr, 8, mmu_idx, ra); 294659213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 294759213461SRichard Henderson /* nothing */ 294859213461SRichard Henderson } else { 294959213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 295059213461SRichard Henderson if (memop & MO_BSWAP) { 295159213461SRichard Henderson val = bswap64(val); 295259213461SRichard Henderson } 29535b36f268SRichard Henderson store_atom_8(env, ra, p->haddr, memop, val); 295459213461SRichard Henderson } 2955eed56642SAlex Bennée } 2956eed56642SAlex Bennée 295724e46e6cSRichard Henderson void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 295859213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2959f83bcecbSRichard Henderson { 296059213461SRichard Henderson MMULookupLocals l; 296159213461SRichard Henderson bool crosspage; 296259213461SRichard Henderson 29630cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); 2964f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 296559213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 296659213461SRichard Henderson tcg_debug_assert(!crosspage); 296759213461SRichard Henderson 296859213461SRichard Henderson do_st_1(env, &l.page[0], val, l.mmu_idx, ra); 2969f83bcecbSRichard Henderson } 2970f83bcecbSRichard Henderson 2971fb2c53cbSAnton Johansson static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val, 297259213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2973f83bcecbSRichard Henderson { 297459213461SRichard Henderson MMULookupLocals l; 297559213461SRichard Henderson bool crosspage; 297659213461SRichard Henderson uint8_t a, b; 297759213461SRichard Henderson 2978f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 297959213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 298059213461SRichard Henderson if (likely(!crosspage)) { 298159213461SRichard Henderson do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 298259213461SRichard Henderson return; 298359213461SRichard Henderson } 298459213461SRichard Henderson 298559213461SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 298659213461SRichard Henderson a = val, b = val >> 8; 298759213461SRichard Henderson } else { 298859213461SRichard Henderson b = val, a = val >> 8; 298959213461SRichard Henderson } 299059213461SRichard Henderson do_st_1(env, &l.page[0], a, l.mmu_idx, ra); 299159213461SRichard Henderson do_st_1(env, &l.page[1], b, l.mmu_idx, ra); 2992f83bcecbSRichard Henderson } 2993f83bcecbSRichard Henderson 299424e46e6cSRichard Henderson void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 29959002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2996eed56642SAlex Bennée { 29970cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 299859213461SRichard Henderson do_st2_mmu(env, addr, val, oi, retaddr); 2999f83bcecbSRichard Henderson } 3000f83bcecbSRichard Henderson 3001fb2c53cbSAnton Johansson static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val, 300259213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 3003f83bcecbSRichard Henderson { 300459213461SRichard Henderson MMULookupLocals l; 300559213461SRichard Henderson bool crosspage; 300659213461SRichard Henderson 3007f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 300859213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 300959213461SRichard Henderson if (likely(!crosspage)) { 301059213461SRichard Henderson do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 301159213461SRichard Henderson return; 301259213461SRichard Henderson } 301359213461SRichard Henderson 301459213461SRichard Henderson /* Swap to little endian for simplicity, then store by bytes. */ 301559213461SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 301659213461SRichard Henderson val = bswap32(val); 301759213461SRichard Henderson } 30185b36f268SRichard Henderson val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 30195b36f268SRichard Henderson (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); 3020eed56642SAlex Bennée } 3021eed56642SAlex Bennée 302224e46e6cSRichard Henderson void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 30239002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3024eed56642SAlex Bennée { 30250cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 302659213461SRichard Henderson do_st4_mmu(env, addr, val, oi, retaddr); 302759213461SRichard Henderson } 302859213461SRichard Henderson 3029fb2c53cbSAnton Johansson static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val, 303059213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 303159213461SRichard Henderson { 303259213461SRichard Henderson MMULookupLocals l; 303359213461SRichard Henderson bool crosspage; 303459213461SRichard Henderson 3035f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 303659213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 303759213461SRichard Henderson if (likely(!crosspage)) { 303859213461SRichard Henderson do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 303959213461SRichard Henderson return; 304059213461SRichard Henderson } 304159213461SRichard Henderson 304259213461SRichard Henderson /* Swap to little endian for simplicity, then store by bytes. */ 304359213461SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 304459213461SRichard Henderson val = bswap64(val); 304559213461SRichard Henderson } 30465b36f268SRichard Henderson val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 30475b36f268SRichard Henderson (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); 3048eed56642SAlex Bennée } 3049eed56642SAlex Bennée 305024e46e6cSRichard Henderson void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, 30519002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3052eed56642SAlex Bennée { 30530cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 305459213461SRichard Henderson do_st8_mmu(env, addr, val, oi, retaddr); 3055eed56642SAlex Bennée } 3056d9bb58e5SYang Zhong 3057fb2c53cbSAnton Johansson static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val, 305835c653c4SRichard Henderson MemOpIdx oi, uintptr_t ra) 305935c653c4SRichard Henderson { 306035c653c4SRichard Henderson MMULookupLocals l; 306135c653c4SRichard Henderson bool crosspage; 306235c653c4SRichard Henderson uint64_t a, b; 306335c653c4SRichard Henderson int first; 306435c653c4SRichard Henderson 3065f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 306635c653c4SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 306735c653c4SRichard Henderson if (likely(!crosspage)) { 3068f7eaf9d7SRichard Henderson if (unlikely(l.page[0].flags & TLB_MMIO)) { 3069f7eaf9d7SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 3070f7eaf9d7SRichard Henderson val = bswap128(val); 3071f7eaf9d7SRichard Henderson } 3072f7eaf9d7SRichard Henderson a = int128_getlo(val); 3073f7eaf9d7SRichard Henderson b = int128_gethi(val); 3074f7eaf9d7SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 3075f7eaf9d7SRichard Henderson do_st_mmio_leN(env, l.page[0].full, a, addr, 8, l.mmu_idx, ra); 3076f7eaf9d7SRichard Henderson do_st_mmio_leN(env, l.page[0].full, b, addr + 8, 8, l.mmu_idx, ra); 3077f7eaf9d7SRichard Henderson } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) { 3078f7eaf9d7SRichard Henderson /* nothing */ 3079f7eaf9d7SRichard Henderson } else { 308035c653c4SRichard Henderson /* Swap to host endian if necessary, then store. */ 308135c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 308235c653c4SRichard Henderson val = bswap128(val); 308335c653c4SRichard Henderson } 308435c653c4SRichard Henderson store_atom_16(env, ra, l.page[0].haddr, l.memop, val); 308535c653c4SRichard Henderson } 308635c653c4SRichard Henderson return; 308735c653c4SRichard Henderson } 308835c653c4SRichard Henderson 308935c653c4SRichard Henderson first = l.page[0].size; 309035c653c4SRichard Henderson if (first == 8) { 309135c653c4SRichard Henderson MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64; 309235c653c4SRichard Henderson 309335c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 309435c653c4SRichard Henderson val = bswap128(val); 309535c653c4SRichard Henderson } 309635c653c4SRichard Henderson if (HOST_BIG_ENDIAN) { 309735c653c4SRichard Henderson b = int128_getlo(val), a = int128_gethi(val); 309835c653c4SRichard Henderson } else { 309935c653c4SRichard Henderson a = int128_getlo(val), b = int128_gethi(val); 310035c653c4SRichard Henderson } 310135c653c4SRichard Henderson do_st_8(env, &l.page[0], a, l.mmu_idx, mop8, ra); 310235c653c4SRichard Henderson do_st_8(env, &l.page[1], b, l.mmu_idx, mop8, ra); 310335c653c4SRichard Henderson return; 310435c653c4SRichard Henderson } 310535c653c4SRichard Henderson 310635c653c4SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 310735c653c4SRichard Henderson val = bswap128(val); 310835c653c4SRichard Henderson } 310935c653c4SRichard Henderson if (first < 8) { 311035c653c4SRichard Henderson do_st_leN(env, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra); 311135c653c4SRichard Henderson val = int128_urshift(val, first * 8); 311235c653c4SRichard Henderson do_st16_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); 311335c653c4SRichard Henderson } else { 311435c653c4SRichard Henderson b = do_st16_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 311535c653c4SRichard Henderson do_st_leN(env, &l.page[1], b, l.mmu_idx, l.memop, ra); 311635c653c4SRichard Henderson } 311735c653c4SRichard Henderson } 311835c653c4SRichard Henderson 311924e46e6cSRichard Henderson void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, 312035c653c4SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 312135c653c4SRichard Henderson { 312235c653c4SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 312335c653c4SRichard Henderson do_st16_mmu(env, addr, val, oi, retaddr); 312435c653c4SRichard Henderson } 312535c653c4SRichard Henderson 3126e570597aSRichard Henderson void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) 312735c653c4SRichard Henderson { 312835c653c4SRichard Henderson helper_st16_mmu(env, addr, val, oi, GETPC()); 312935c653c4SRichard Henderson } 313035c653c4SRichard Henderson 3131d03f1408SRichard Henderson /* 3132d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 3133d03f1408SRichard Henderson */ 3134d03f1408SRichard Henderson 313559213461SRichard Henderson static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) 3136d03f1408SRichard Henderson { 313737aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 3138d03f1408SRichard Henderson } 3139d03f1408SRichard Henderson 3140022b9bceSAnton Johansson void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, 3141f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3142d03f1408SRichard Henderson { 31430cadc1edSRichard Henderson helper_stb_mmu(env, addr, val, oi, retaddr); 314459213461SRichard Henderson plugin_store_cb(env, addr, oi); 3145d03f1408SRichard Henderson } 3146d03f1408SRichard Henderson 3147022b9bceSAnton Johansson void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 3148f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3149d03f1408SRichard Henderson { 3150fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 31510cadc1edSRichard Henderson do_st2_mmu(env, addr, val, oi, retaddr); 315259213461SRichard Henderson plugin_store_cb(env, addr, oi); 3153d03f1408SRichard Henderson } 3154d03f1408SRichard Henderson 3155022b9bceSAnton Johansson void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 3156f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3157d03f1408SRichard Henderson { 3158fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 31590cadc1edSRichard Henderson do_st4_mmu(env, addr, val, oi, retaddr); 316059213461SRichard Henderson plugin_store_cb(env, addr, oi); 3161d03f1408SRichard Henderson } 3162d03f1408SRichard Henderson 3163022b9bceSAnton Johansson void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 3164f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3165d03f1408SRichard Henderson { 3166fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 31670cadc1edSRichard Henderson do_st8_mmu(env, addr, val, oi, retaddr); 316859213461SRichard Henderson plugin_store_cb(env, addr, oi); 3169b9e60257SRichard Henderson } 3170b9e60257SRichard Henderson 3171022b9bceSAnton Johansson void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val, 3172f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3173b9e60257SRichard Henderson { 3174fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 317535c653c4SRichard Henderson do_st16_mmu(env, addr, val, oi, retaddr); 317635c653c4SRichard Henderson plugin_store_cb(env, addr, oi); 3177cb48f365SRichard Henderson } 3178cb48f365SRichard Henderson 3179f83bcecbSRichard Henderson #include "ldst_common.c.inc" 3180cfe04a4bSRichard Henderson 3181be9568b4SRichard Henderson /* 3182be9568b4SRichard Henderson * First set of functions passes in OI and RETADDR. 3183be9568b4SRichard Henderson * This makes them callable from other helpers. 3184be9568b4SRichard Henderson */ 3185d9bb58e5SYang Zhong 3186d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 3187be9568b4SRichard Henderson glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 3188a754f7f3SRichard Henderson 3189707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 3190d9bb58e5SYang Zhong 3191139c1837SPaolo Bonzini #include "atomic_common.c.inc" 3192d9bb58e5SYang Zhong 3193d9bb58e5SYang Zhong #define DATA_SIZE 1 3194d9bb58e5SYang Zhong #include "atomic_template.h" 3195d9bb58e5SYang Zhong 3196d9bb58e5SYang Zhong #define DATA_SIZE 2 3197d9bb58e5SYang Zhong #include "atomic_template.h" 3198d9bb58e5SYang Zhong 3199d9bb58e5SYang Zhong #define DATA_SIZE 4 3200d9bb58e5SYang Zhong #include "atomic_template.h" 3201d9bb58e5SYang Zhong 3202d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 3203d9bb58e5SYang Zhong #define DATA_SIZE 8 3204d9bb58e5SYang Zhong #include "atomic_template.h" 3205d9bb58e5SYang Zhong #endif 3206d9bb58e5SYang Zhong 320776f9d6adSRichard Henderson #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128 3208d9bb58e5SYang Zhong #define DATA_SIZE 16 3209d9bb58e5SYang Zhong #include "atomic_template.h" 3210d9bb58e5SYang Zhong #endif 3211d9bb58e5SYang Zhong 3212d9bb58e5SYang Zhong /* Code access functions. */ 3213d9bb58e5SYang Zhong 3214fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 3215eed56642SAlex Bennée { 32169002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 32178cfdacaaSRichard Henderson return do_ld1_mmu(env, addr, oi, 0, MMU_INST_FETCH); 32184cef72d0SAlex Bennée } 32194cef72d0SAlex Bennée 3220fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 32212dd92606SRichard Henderson { 32229002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 32238cfdacaaSRichard Henderson return do_ld2_mmu(env, addr, oi, 0, MMU_INST_FETCH); 32242dd92606SRichard Henderson } 32252dd92606SRichard Henderson 3226fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 32274cef72d0SAlex Bennée { 32289002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 32298cfdacaaSRichard Henderson return do_ld4_mmu(env, addr, oi, 0, MMU_INST_FETCH); 3230eed56642SAlex Bennée } 3231d9bb58e5SYang Zhong 3232fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 3233eed56642SAlex Bennée { 3234fc313c64SFrédéric Pétrot MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); 32358cfdacaaSRichard Henderson return do_ld8_mmu(env, addr, oi, 0, MMU_INST_FETCH); 3236eed56642SAlex Bennée } 323728990626SRichard Henderson 323828990626SRichard Henderson uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, 323928990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 324028990626SRichard Henderson { 32418cfdacaaSRichard Henderson return do_ld1_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 324228990626SRichard Henderson } 324328990626SRichard Henderson 324428990626SRichard Henderson uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, 324528990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 324628990626SRichard Henderson { 32478cfdacaaSRichard Henderson return do_ld2_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 324828990626SRichard Henderson } 324928990626SRichard Henderson 325028990626SRichard Henderson uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, 325128990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 325228990626SRichard Henderson { 32538cfdacaaSRichard Henderson return do_ld4_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 325428990626SRichard Henderson } 325528990626SRichard Henderson 325628990626SRichard Henderson uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, 325728990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 325828990626SRichard Henderson { 32598cfdacaaSRichard Henderson return do_ld8_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 326028990626SRichard Henderson } 3261