1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9d9bb58e5SYang Zhong * version 2 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 22d9bb58e5SYang Zhong #include "cpu.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/address-spaces.h" 26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 27d9bb58e5SYang Zhong #include "exec/cputlb.h" 28d9bb58e5SYang Zhong #include "exec/memory-internal.h" 29d9bb58e5SYang Zhong #include "exec/ram_addr.h" 30d9bb58e5SYang Zhong #include "tcg/tcg.h" 31d9bb58e5SYang Zhong #include "qemu/error-report.h" 32d9bb58e5SYang Zhong #include "exec/log.h" 33d9bb58e5SYang Zhong #include "exec/helper-proto.h" 34d9bb58e5SYang Zhong #include "qemu/atomic.h" 35d9bb58e5SYang Zhong 36d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 37d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 38d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 39d9bb58e5SYang Zhong 40d9bb58e5SYang Zhong #ifdef DEBUG_TLB 41d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 42d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 43d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 44d9bb58e5SYang Zhong # else 45d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 46d9bb58e5SYang Zhong # endif 47d9bb58e5SYang Zhong #else 48d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 49d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 50d9bb58e5SYang Zhong #endif 51d9bb58e5SYang Zhong 52d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 53d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 54d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 55d9bb58e5SYang Zhong ## __VA_ARGS__); \ 56d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 57d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 58d9bb58e5SYang Zhong } \ 59d9bb58e5SYang Zhong } while (0) 60d9bb58e5SYang Zhong 61d9bb58e5SYang Zhong #define assert_cpu_is_self(this_cpu) do { \ 62d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 63d9bb58e5SYang Zhong g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \ 64d9bb58e5SYang Zhong } \ 65d9bb58e5SYang Zhong } while (0) 66d9bb58e5SYang Zhong 67d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 68d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 69d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 70d9bb58e5SYang Zhong 71d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 72d9bb58e5SYang Zhong */ 73d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 74d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 75d9bb58e5SYang Zhong 76d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 77d9bb58e5SYang Zhong * 78d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 79d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 80d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 81d9bb58e5SYang Zhong * again. 82d9bb58e5SYang Zhong */ 83d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 84d9bb58e5SYang Zhong run_on_cpu_data d) 85d9bb58e5SYang Zhong { 86d9bb58e5SYang Zhong CPUState *cpu; 87d9bb58e5SYang Zhong 88d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 89d9bb58e5SYang Zhong if (cpu != src) { 90d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 91d9bb58e5SYang Zhong } 92d9bb58e5SYang Zhong } 93d9bb58e5SYang Zhong } 94d9bb58e5SYang Zhong 9583974cf4SEmilio G. Cota size_t tlb_flush_count(void) 9683974cf4SEmilio G. Cota { 9783974cf4SEmilio G. Cota CPUState *cpu; 9883974cf4SEmilio G. Cota size_t count = 0; 9983974cf4SEmilio G. Cota 10083974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 10183974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 10283974cf4SEmilio G. Cota 10383974cf4SEmilio G. Cota count += atomic_read(&env->tlb_flush_count); 10483974cf4SEmilio G. Cota } 10583974cf4SEmilio G. Cota return count; 10683974cf4SEmilio G. Cota } 107d9bb58e5SYang Zhong 108d9bb58e5SYang Zhong /* This is OK because CPU architectures generally permit an 109d9bb58e5SYang Zhong * implementation to drop entries from the TLB at any time, so 110d9bb58e5SYang Zhong * flushing more entries than required is only an efficiency issue, 111d9bb58e5SYang Zhong * not a correctness issue. 112d9bb58e5SYang Zhong */ 113d9bb58e5SYang Zhong static void tlb_flush_nocheck(CPUState *cpu) 114d9bb58e5SYang Zhong { 115d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 116d9bb58e5SYang Zhong 117d9bb58e5SYang Zhong /* The QOM tests will trigger tlb_flushes without setting up TCG 118d9bb58e5SYang Zhong * so we bug out here in that case. 119d9bb58e5SYang Zhong */ 120d9bb58e5SYang Zhong if (!tcg_enabled()) { 121d9bb58e5SYang Zhong return; 122d9bb58e5SYang Zhong } 123d9bb58e5SYang Zhong 124d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 12583974cf4SEmilio G. Cota atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1); 12683974cf4SEmilio G. Cota tlb_debug("(count: %zu)\n", tlb_flush_count()); 127d9bb58e5SYang Zhong 128d9bb58e5SYang Zhong memset(env->tlb_table, -1, sizeof(env->tlb_table)); 129d9bb58e5SYang Zhong memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); 130f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 131d9bb58e5SYang Zhong 132d9bb58e5SYang Zhong env->vtlb_index = 0; 133d9bb58e5SYang Zhong env->tlb_flush_addr = -1; 134d9bb58e5SYang Zhong env->tlb_flush_mask = 0; 135d9bb58e5SYang Zhong 136d9bb58e5SYang Zhong atomic_mb_set(&cpu->pending_tlb_flush, 0); 137d9bb58e5SYang Zhong } 138d9bb58e5SYang Zhong 139d9bb58e5SYang Zhong static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data) 140d9bb58e5SYang Zhong { 141d9bb58e5SYang Zhong tlb_flush_nocheck(cpu); 142d9bb58e5SYang Zhong } 143d9bb58e5SYang Zhong 144d9bb58e5SYang Zhong void tlb_flush(CPUState *cpu) 145d9bb58e5SYang Zhong { 146d9bb58e5SYang Zhong if (cpu->created && !qemu_cpu_is_self(cpu)) { 147d9bb58e5SYang Zhong if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) { 148d9bb58e5SYang Zhong atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS); 149d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_global_async_work, 150d9bb58e5SYang Zhong RUN_ON_CPU_NULL); 151d9bb58e5SYang Zhong } 152d9bb58e5SYang Zhong } else { 153d9bb58e5SYang Zhong tlb_flush_nocheck(cpu); 154d9bb58e5SYang Zhong } 155d9bb58e5SYang Zhong } 156d9bb58e5SYang Zhong 157d9bb58e5SYang Zhong void tlb_flush_all_cpus(CPUState *src_cpu) 158d9bb58e5SYang Zhong { 159d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_global_async_work; 160d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL); 161d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_NULL); 162d9bb58e5SYang Zhong } 163d9bb58e5SYang Zhong 164d9bb58e5SYang Zhong void tlb_flush_all_cpus_synced(CPUState *src_cpu) 165d9bb58e5SYang Zhong { 166d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_global_async_work; 167d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL); 168d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL); 169d9bb58e5SYang Zhong } 170d9bb58e5SYang Zhong 171d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 172d9bb58e5SYang Zhong { 173d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 174d9bb58e5SYang Zhong unsigned long mmu_idx_bitmask = data.host_int; 175d9bb58e5SYang Zhong int mmu_idx; 176d9bb58e5SYang Zhong 177d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 178d9bb58e5SYang Zhong 179d9bb58e5SYang Zhong tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask); 180d9bb58e5SYang Zhong 181d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 182d9bb58e5SYang Zhong 183d9bb58e5SYang Zhong if (test_bit(mmu_idx, &mmu_idx_bitmask)) { 184d9bb58e5SYang Zhong tlb_debug("%d\n", mmu_idx); 185d9bb58e5SYang Zhong 186d9bb58e5SYang Zhong memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0])); 187d9bb58e5SYang Zhong memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); 188d9bb58e5SYang Zhong } 189d9bb58e5SYang Zhong } 190d9bb58e5SYang Zhong 191f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 192d9bb58e5SYang Zhong 193d9bb58e5SYang Zhong tlb_debug("done\n"); 194d9bb58e5SYang Zhong } 195d9bb58e5SYang Zhong 196d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 197d9bb58e5SYang Zhong { 198d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 199d9bb58e5SYang Zhong 200d9bb58e5SYang Zhong if (!qemu_cpu_is_self(cpu)) { 201d9bb58e5SYang Zhong uint16_t pending_flushes = idxmap; 202d9bb58e5SYang Zhong pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush); 203d9bb58e5SYang Zhong 204d9bb58e5SYang Zhong if (pending_flushes) { 205d9bb58e5SYang Zhong tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes); 206d9bb58e5SYang Zhong 207d9bb58e5SYang Zhong atomic_or(&cpu->pending_tlb_flush, pending_flushes); 208d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 209d9bb58e5SYang Zhong RUN_ON_CPU_HOST_INT(pending_flushes)); 210d9bb58e5SYang Zhong } 211d9bb58e5SYang Zhong } else { 212d9bb58e5SYang Zhong tlb_flush_by_mmuidx_async_work(cpu, 213d9bb58e5SYang Zhong RUN_ON_CPU_HOST_INT(idxmap)); 214d9bb58e5SYang Zhong } 215d9bb58e5SYang Zhong } 216d9bb58e5SYang Zhong 217d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 218d9bb58e5SYang Zhong { 219d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 220d9bb58e5SYang Zhong 221d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 222d9bb58e5SYang Zhong 223d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 224d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 225d9bb58e5SYang Zhong } 226d9bb58e5SYang Zhong 227d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 228d9bb58e5SYang Zhong uint16_t idxmap) 229d9bb58e5SYang Zhong { 230d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 231d9bb58e5SYang Zhong 232d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 233d9bb58e5SYang Zhong 234d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 235d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 236d9bb58e5SYang Zhong } 237d9bb58e5SYang Zhong 23868fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 23968fea038SRichard Henderson target_ulong page) 240d9bb58e5SYang Zhong { 24168fea038SRichard Henderson return tlb_hit_page(tlb_entry->addr_read, page) || 24268fea038SRichard Henderson tlb_hit_page(tlb_entry->addr_write, page) || 24368fea038SRichard Henderson tlb_hit_page(tlb_entry->addr_code, page); 24468fea038SRichard Henderson } 24568fea038SRichard Henderson 24668fea038SRichard Henderson static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong page) 24768fea038SRichard Henderson { 24868fea038SRichard Henderson if (tlb_hit_page_anyprot(tlb_entry, page)) { 249d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 250d9bb58e5SYang Zhong } 251d9bb58e5SYang Zhong } 252d9bb58e5SYang Zhong 25368fea038SRichard Henderson static inline void tlb_flush_vtlb_page(CPUArchState *env, int mmu_idx, 25468fea038SRichard Henderson target_ulong page) 25568fea038SRichard Henderson { 25668fea038SRichard Henderson int k; 25768fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 25868fea038SRichard Henderson tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], page); 25968fea038SRichard Henderson } 26068fea038SRichard Henderson } 26168fea038SRichard Henderson 262d9bb58e5SYang Zhong static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data) 263d9bb58e5SYang Zhong { 264d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 265d9bb58e5SYang Zhong target_ulong addr = (target_ulong) data.target_ptr; 266d9bb58e5SYang Zhong int i; 267d9bb58e5SYang Zhong int mmu_idx; 268d9bb58e5SYang Zhong 269d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 270d9bb58e5SYang Zhong 271d9bb58e5SYang Zhong tlb_debug("page :" TARGET_FMT_lx "\n", addr); 272d9bb58e5SYang Zhong 273d9bb58e5SYang Zhong /* Check if we need to flush due to large pages. */ 274d9bb58e5SYang Zhong if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { 275d9bb58e5SYang Zhong tlb_debug("forcing full flush (" 276d9bb58e5SYang Zhong TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 277d9bb58e5SYang Zhong env->tlb_flush_addr, env->tlb_flush_mask); 278d9bb58e5SYang Zhong 279d9bb58e5SYang Zhong tlb_flush(cpu); 280d9bb58e5SYang Zhong return; 281d9bb58e5SYang Zhong } 282d9bb58e5SYang Zhong 283d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 284d9bb58e5SYang Zhong i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 285d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 286d9bb58e5SYang Zhong tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); 28768fea038SRichard Henderson tlb_flush_vtlb_page(env, mmu_idx, addr); 288d9bb58e5SYang Zhong } 289d9bb58e5SYang Zhong 290d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 291d9bb58e5SYang Zhong } 292d9bb58e5SYang Zhong 293d9bb58e5SYang Zhong void tlb_flush_page(CPUState *cpu, target_ulong addr) 294d9bb58e5SYang Zhong { 295d9bb58e5SYang Zhong tlb_debug("page :" TARGET_FMT_lx "\n", addr); 296d9bb58e5SYang Zhong 297d9bb58e5SYang Zhong if (!qemu_cpu_is_self(cpu)) { 298d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_page_async_work, 299d9bb58e5SYang Zhong RUN_ON_CPU_TARGET_PTR(addr)); 300d9bb58e5SYang Zhong } else { 301d9bb58e5SYang Zhong tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr)); 302d9bb58e5SYang Zhong } 303d9bb58e5SYang Zhong } 304d9bb58e5SYang Zhong 305d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a 306d9bb58e5SYang Zhong * mmuidx bit mask we need to fail to build if we can't do that 307d9bb58e5SYang Zhong */ 308d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN); 309d9bb58e5SYang Zhong 310d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, 311d9bb58e5SYang Zhong run_on_cpu_data data) 312d9bb58e5SYang Zhong { 313d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 314d9bb58e5SYang Zhong target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; 315d9bb58e5SYang Zhong target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; 316d9bb58e5SYang Zhong unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; 317d9bb58e5SYang Zhong int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 318d9bb58e5SYang Zhong int mmu_idx; 319d9bb58e5SYang Zhong 320d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 321d9bb58e5SYang Zhong 322d9bb58e5SYang Zhong tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n", 323d9bb58e5SYang Zhong page, addr, mmu_idx_bitmap); 324d9bb58e5SYang Zhong 325d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 326d9bb58e5SYang Zhong if (test_bit(mmu_idx, &mmu_idx_bitmap)) { 327d9bb58e5SYang Zhong tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr); 32868fea038SRichard Henderson tlb_flush_vtlb_page(env, mmu_idx, addr); 329d9bb58e5SYang Zhong } 330d9bb58e5SYang Zhong } 331d9bb58e5SYang Zhong 332d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 333d9bb58e5SYang Zhong } 334d9bb58e5SYang Zhong 335d9bb58e5SYang Zhong static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu, 336d9bb58e5SYang Zhong run_on_cpu_data data) 337d9bb58e5SYang Zhong { 338d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 339d9bb58e5SYang Zhong target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; 340d9bb58e5SYang Zhong target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; 341d9bb58e5SYang Zhong unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; 342d9bb58e5SYang Zhong 343d9bb58e5SYang Zhong tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap); 344d9bb58e5SYang Zhong 345d9bb58e5SYang Zhong /* Check if we need to flush due to large pages. */ 346d9bb58e5SYang Zhong if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { 347d9bb58e5SYang Zhong tlb_debug("forced full flush (" 348d9bb58e5SYang Zhong TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 349d9bb58e5SYang Zhong env->tlb_flush_addr, env->tlb_flush_mask); 350d9bb58e5SYang Zhong 351d9bb58e5SYang Zhong tlb_flush_by_mmuidx_async_work(cpu, 352d9bb58e5SYang Zhong RUN_ON_CPU_HOST_INT(mmu_idx_bitmap)); 353d9bb58e5SYang Zhong } else { 354d9bb58e5SYang Zhong tlb_flush_page_by_mmuidx_async_work(cpu, data); 355d9bb58e5SYang Zhong } 356d9bb58e5SYang Zhong } 357d9bb58e5SYang Zhong 358d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 359d9bb58e5SYang Zhong { 360d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 361d9bb58e5SYang Zhong 362d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 363d9bb58e5SYang Zhong 364d9bb58e5SYang Zhong /* This should already be page aligned */ 365d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 366d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 367d9bb58e5SYang Zhong 368d9bb58e5SYang Zhong if (!qemu_cpu_is_self(cpu)) { 369d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work, 370d9bb58e5SYang Zhong RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 371d9bb58e5SYang Zhong } else { 372d9bb58e5SYang Zhong tlb_check_page_and_flush_by_mmuidx_async_work( 373d9bb58e5SYang Zhong cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 374d9bb58e5SYang Zhong } 375d9bb58e5SYang Zhong } 376d9bb58e5SYang Zhong 377d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 378d9bb58e5SYang Zhong uint16_t idxmap) 379d9bb58e5SYang Zhong { 380d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work; 381d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 382d9bb58e5SYang Zhong 383d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 384d9bb58e5SYang Zhong 385d9bb58e5SYang Zhong /* This should already be page aligned */ 386d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 387d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 388d9bb58e5SYang Zhong 389d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 390d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 391d9bb58e5SYang Zhong } 392d9bb58e5SYang Zhong 393d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 394d9bb58e5SYang Zhong target_ulong addr, 395d9bb58e5SYang Zhong uint16_t idxmap) 396d9bb58e5SYang Zhong { 397d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work; 398d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 399d9bb58e5SYang Zhong 400d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 401d9bb58e5SYang Zhong 402d9bb58e5SYang Zhong /* This should already be page aligned */ 403d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 404d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 405d9bb58e5SYang Zhong 406d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 407d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 408d9bb58e5SYang Zhong } 409d9bb58e5SYang Zhong 410d9bb58e5SYang Zhong void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 411d9bb58e5SYang Zhong { 412d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_page_async_work; 413d9bb58e5SYang Zhong 414d9bb58e5SYang Zhong flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); 415d9bb58e5SYang Zhong fn(src, RUN_ON_CPU_TARGET_PTR(addr)); 416d9bb58e5SYang Zhong } 417d9bb58e5SYang Zhong 418d9bb58e5SYang Zhong void tlb_flush_page_all_cpus_synced(CPUState *src, 419d9bb58e5SYang Zhong target_ulong addr) 420d9bb58e5SYang Zhong { 421d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_page_async_work; 422d9bb58e5SYang Zhong 423d9bb58e5SYang Zhong flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); 424d9bb58e5SYang Zhong async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); 425d9bb58e5SYang Zhong } 426d9bb58e5SYang Zhong 427d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 428d9bb58e5SYang Zhong can be detected */ 429d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 430d9bb58e5SYang Zhong { 431d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 432d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 433d9bb58e5SYang Zhong } 434d9bb58e5SYang Zhong 435d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 436d9bb58e5SYang Zhong tested for self modifying code */ 437d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 438d9bb58e5SYang Zhong { 439d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 440d9bb58e5SYang Zhong } 441d9bb58e5SYang Zhong 442d9bb58e5SYang Zhong 443d9bb58e5SYang Zhong /* 444d9bb58e5SYang Zhong * Dirty write flag handling 445d9bb58e5SYang Zhong * 446d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 447d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 448d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 449d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 450d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 451d9bb58e5SYang Zhong * generated code. 452d9bb58e5SYang Zhong * 453d9bb58e5SYang Zhong * Because we want other vCPUs to respond to changes straight away we 454d9bb58e5SYang Zhong * update the te->addr_write field atomically. If the TLB entry has 455d9bb58e5SYang Zhong * been changed by the vCPU in the mean time we skip the update. 456d9bb58e5SYang Zhong * 457d9bb58e5SYang Zhong * As this function uses atomic accesses we also need to ensure 458d9bb58e5SYang Zhong * updates to tlb_entries follow the same access rules. We don't need 459d9bb58e5SYang Zhong * to worry about this for oversized guests as MTTCG is disabled for 460d9bb58e5SYang Zhong * them. 461d9bb58e5SYang Zhong */ 462d9bb58e5SYang Zhong 463d9bb58e5SYang Zhong static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, 464d9bb58e5SYang Zhong uintptr_t length) 465d9bb58e5SYang Zhong { 466d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 467d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 468d9bb58e5SYang Zhong 469d9bb58e5SYang Zhong if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { 470d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 471d9bb58e5SYang Zhong addr += tlb_entry->addend; 472d9bb58e5SYang Zhong if ((addr - start) < length) { 473d9bb58e5SYang Zhong tlb_entry->addr_write |= TLB_NOTDIRTY; 474d9bb58e5SYang Zhong } 475d9bb58e5SYang Zhong } 476d9bb58e5SYang Zhong #else 477d9bb58e5SYang Zhong /* paired with atomic_mb_set in tlb_set_page_with_attrs */ 478d9bb58e5SYang Zhong uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write); 479d9bb58e5SYang Zhong uintptr_t addr = orig_addr; 480d9bb58e5SYang Zhong 481d9bb58e5SYang Zhong if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { 482d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 483d9bb58e5SYang Zhong addr += atomic_read(&tlb_entry->addend); 484d9bb58e5SYang Zhong if ((addr - start) < length) { 485d9bb58e5SYang Zhong uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY; 486d9bb58e5SYang Zhong atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr); 487d9bb58e5SYang Zhong } 488d9bb58e5SYang Zhong } 489d9bb58e5SYang Zhong #endif 490d9bb58e5SYang Zhong } 491d9bb58e5SYang Zhong 492d9bb58e5SYang Zhong /* For atomic correctness when running MTTCG we need to use the right 493d9bb58e5SYang Zhong * primitives when copying entries */ 494d9bb58e5SYang Zhong static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s, 495d9bb58e5SYang Zhong bool atomic_set) 496d9bb58e5SYang Zhong { 497d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 498d9bb58e5SYang Zhong *d = *s; 499d9bb58e5SYang Zhong #else 500d9bb58e5SYang Zhong if (atomic_set) { 501d9bb58e5SYang Zhong d->addr_read = s->addr_read; 502d9bb58e5SYang Zhong d->addr_code = s->addr_code; 503d9bb58e5SYang Zhong atomic_set(&d->addend, atomic_read(&s->addend)); 504d9bb58e5SYang Zhong /* Pairs with flag setting in tlb_reset_dirty_range */ 505d9bb58e5SYang Zhong atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write)); 506d9bb58e5SYang Zhong } else { 507d9bb58e5SYang Zhong d->addr_read = s->addr_read; 508d9bb58e5SYang Zhong d->addr_write = atomic_read(&s->addr_write); 509d9bb58e5SYang Zhong d->addr_code = s->addr_code; 510d9bb58e5SYang Zhong d->addend = atomic_read(&s->addend); 511d9bb58e5SYang Zhong } 512d9bb58e5SYang Zhong #endif 513d9bb58e5SYang Zhong } 514d9bb58e5SYang Zhong 515d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 516d9bb58e5SYang Zhong * the target vCPU). As such care needs to be taken that we don't 517d9bb58e5SYang Zhong * dangerously race with another vCPU update. The only thing actually 518d9bb58e5SYang Zhong * updated is the target TLB entry ->addr_write flags. 519d9bb58e5SYang Zhong */ 520d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 521d9bb58e5SYang Zhong { 522d9bb58e5SYang Zhong CPUArchState *env; 523d9bb58e5SYang Zhong 524d9bb58e5SYang Zhong int mmu_idx; 525d9bb58e5SYang Zhong 526d9bb58e5SYang Zhong env = cpu->env_ptr; 527d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 528d9bb58e5SYang Zhong unsigned int i; 529d9bb58e5SYang Zhong 530d9bb58e5SYang Zhong for (i = 0; i < CPU_TLB_SIZE; i++) { 531d9bb58e5SYang Zhong tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], 532d9bb58e5SYang Zhong start1, length); 533d9bb58e5SYang Zhong } 534d9bb58e5SYang Zhong 535d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 536d9bb58e5SYang Zhong tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], 537d9bb58e5SYang Zhong start1, length); 538d9bb58e5SYang Zhong } 539d9bb58e5SYang Zhong } 540d9bb58e5SYang Zhong } 541d9bb58e5SYang Zhong 542d9bb58e5SYang Zhong static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) 543d9bb58e5SYang Zhong { 544d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 545d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 546d9bb58e5SYang Zhong } 547d9bb58e5SYang Zhong } 548d9bb58e5SYang Zhong 549d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 550d9bb58e5SYang Zhong so that it is no longer dirty */ 551d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 552d9bb58e5SYang Zhong { 553d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 554d9bb58e5SYang Zhong int i; 555d9bb58e5SYang Zhong int mmu_idx; 556d9bb58e5SYang Zhong 557d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 558d9bb58e5SYang Zhong 559d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 560d9bb58e5SYang Zhong i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 561d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 562d9bb58e5SYang Zhong tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); 563d9bb58e5SYang Zhong } 564d9bb58e5SYang Zhong 565d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 566d9bb58e5SYang Zhong int k; 567d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 568d9bb58e5SYang Zhong tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr); 569d9bb58e5SYang Zhong } 570d9bb58e5SYang Zhong } 571d9bb58e5SYang Zhong } 572d9bb58e5SYang Zhong 573d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 574d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 575d9bb58e5SYang Zhong static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, 576d9bb58e5SYang Zhong target_ulong size) 577d9bb58e5SYang Zhong { 578d9bb58e5SYang Zhong target_ulong mask = ~(size - 1); 579d9bb58e5SYang Zhong 580d9bb58e5SYang Zhong if (env->tlb_flush_addr == (target_ulong)-1) { 581d9bb58e5SYang Zhong env->tlb_flush_addr = vaddr & mask; 582d9bb58e5SYang Zhong env->tlb_flush_mask = mask; 583d9bb58e5SYang Zhong return; 584d9bb58e5SYang Zhong } 585d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 586d9bb58e5SYang Zhong This is a compromise between unnecessary flushes and the cost 587d9bb58e5SYang Zhong of maintaining a full variable size TLB. */ 588d9bb58e5SYang Zhong mask &= env->tlb_flush_mask; 589d9bb58e5SYang Zhong while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { 590d9bb58e5SYang Zhong mask <<= 1; 591d9bb58e5SYang Zhong } 592d9bb58e5SYang Zhong env->tlb_flush_addr &= mask; 593d9bb58e5SYang Zhong env->tlb_flush_mask = mask; 594d9bb58e5SYang Zhong } 595d9bb58e5SYang Zhong 596d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 597d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 598d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 599d9bb58e5SYang Zhong * 600d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 601d9bb58e5SYang Zhong * critical section. 602d9bb58e5SYang Zhong */ 603d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 604d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 605d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 606d9bb58e5SYang Zhong { 607d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 608d9bb58e5SYang Zhong MemoryRegionSection *section; 609d9bb58e5SYang Zhong unsigned int index; 610d9bb58e5SYang Zhong target_ulong address; 611d9bb58e5SYang Zhong target_ulong code_address; 612d9bb58e5SYang Zhong uintptr_t addend; 61368fea038SRichard Henderson CPUTLBEntry *te, tn; 61455df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 61555df6fcfSPeter Maydell target_ulong vaddr_page; 616d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 617d9bb58e5SYang Zhong 618d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 61955df6fcfSPeter Maydell 62055df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 62155df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 62255df6fcfSPeter Maydell } else { 62355df6fcfSPeter Maydell if (size > TARGET_PAGE_SIZE) { 624d9bb58e5SYang Zhong tlb_add_large_page(env, vaddr, size); 625d9bb58e5SYang Zhong } 626d9bb58e5SYang Zhong sz = size; 62755df6fcfSPeter Maydell } 62855df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 62955df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 63055df6fcfSPeter Maydell 63155df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 63255df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 633d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 634d9bb58e5SYang Zhong 635d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 636d9bb58e5SYang Zhong " prot=%x idx=%d\n", 637d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 638d9bb58e5SYang Zhong 63955df6fcfSPeter Maydell address = vaddr_page; 64055df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 64155df6fcfSPeter Maydell /* 64255df6fcfSPeter Maydell * Slow-path the TLB entries; we will repeat the MMU check and TLB 64355df6fcfSPeter Maydell * fill on every access. 64455df6fcfSPeter Maydell */ 64555df6fcfSPeter Maydell address |= TLB_RECHECK; 64655df6fcfSPeter Maydell } 64755df6fcfSPeter Maydell if (!memory_region_is_ram(section->mr) && 64855df6fcfSPeter Maydell !memory_region_is_romd(section->mr)) { 649d9bb58e5SYang Zhong /* IO memory case */ 650d9bb58e5SYang Zhong address |= TLB_MMIO; 651d9bb58e5SYang Zhong addend = 0; 652d9bb58e5SYang Zhong } else { 653d9bb58e5SYang Zhong /* TLB_MMIO for rom/romd handled below */ 654d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 655d9bb58e5SYang Zhong } 656d9bb58e5SYang Zhong 65768fea038SRichard Henderson /* Make sure there's no cached translation for the new page. */ 65868fea038SRichard Henderson tlb_flush_vtlb_page(env, mmu_idx, vaddr_page); 65968fea038SRichard Henderson 660d9bb58e5SYang Zhong code_address = address; 66155df6fcfSPeter Maydell iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, 66255df6fcfSPeter Maydell paddr_page, xlat, prot, &address); 663d9bb58e5SYang Zhong 66455df6fcfSPeter Maydell index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 665d9bb58e5SYang Zhong te = &env->tlb_table[mmu_idx][index]; 666d9bb58e5SYang Zhong 66768fea038SRichard Henderson /* 66868fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 66968fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 67068fea038SRichard Henderson */ 67168fea038SRichard Henderson if (!tlb_hit_page_anyprot(te, vaddr_page)) { 67268fea038SRichard Henderson unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; 67368fea038SRichard Henderson CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx]; 67468fea038SRichard Henderson 67568fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 676d9bb58e5SYang Zhong copy_tlb_helper(tv, te, true); 677d9bb58e5SYang Zhong env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; 67868fea038SRichard Henderson } 679d9bb58e5SYang Zhong 680d9bb58e5SYang Zhong /* refill the tlb */ 681ace41090SPeter Maydell /* 682ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 683ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 684ace41090SPeter Maydell * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM) 685ace41090SPeter Maydell * + the offset within section->mr of the page base (otherwise) 68655df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 687ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 688ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 689ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 690ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 691ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 692ace41090SPeter Maydell */ 69355df6fcfSPeter Maydell env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page; 694d9bb58e5SYang Zhong env->iotlb[mmu_idx][index].attrs = attrs; 695d9bb58e5SYang Zhong 696d9bb58e5SYang Zhong /* Now calculate the new entry */ 69755df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 698d9bb58e5SYang Zhong if (prot & PAGE_READ) { 699d9bb58e5SYang Zhong tn.addr_read = address; 700d9bb58e5SYang Zhong } else { 701d9bb58e5SYang Zhong tn.addr_read = -1; 702d9bb58e5SYang Zhong } 703d9bb58e5SYang Zhong 704d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 705d9bb58e5SYang Zhong tn.addr_code = code_address; 706d9bb58e5SYang Zhong } else { 707d9bb58e5SYang Zhong tn.addr_code = -1; 708d9bb58e5SYang Zhong } 709d9bb58e5SYang Zhong 710d9bb58e5SYang Zhong tn.addr_write = -1; 711d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 712d9bb58e5SYang Zhong if ((memory_region_is_ram(section->mr) && section->readonly) 713d9bb58e5SYang Zhong || memory_region_is_romd(section->mr)) { 714d9bb58e5SYang Zhong /* Write access calls the I/O callback. */ 715d9bb58e5SYang Zhong tn.addr_write = address | TLB_MMIO; 716d9bb58e5SYang Zhong } else if (memory_region_is_ram(section->mr) 717d9bb58e5SYang Zhong && cpu_physical_memory_is_clean( 718d9bb58e5SYang Zhong memory_region_get_ram_addr(section->mr) + xlat)) { 719d9bb58e5SYang Zhong tn.addr_write = address | TLB_NOTDIRTY; 720d9bb58e5SYang Zhong } else { 721d9bb58e5SYang Zhong tn.addr_write = address; 722d9bb58e5SYang Zhong } 723f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 724f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 725f52bfb12SDavid Hildenbrand } 726d9bb58e5SYang Zhong } 727d9bb58e5SYang Zhong 728d9bb58e5SYang Zhong /* Pairs with flag setting in tlb_reset_dirty_range */ 729d9bb58e5SYang Zhong copy_tlb_helper(te, &tn, true); 730d9bb58e5SYang Zhong /* atomic_mb_set(&te->addr_write, write_address); */ 731d9bb58e5SYang Zhong } 732d9bb58e5SYang Zhong 733d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 734d9bb58e5SYang Zhong * transaction attributes to be used. 735d9bb58e5SYang Zhong */ 736d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 737d9bb58e5SYang Zhong hwaddr paddr, int prot, 738d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 739d9bb58e5SYang Zhong { 740d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 741d9bb58e5SYang Zhong prot, mmu_idx, size); 742d9bb58e5SYang Zhong } 743d9bb58e5SYang Zhong 744d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 745d9bb58e5SYang Zhong { 746d9bb58e5SYang Zhong ram_addr_t ram_addr; 747d9bb58e5SYang Zhong 748d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 749d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 750d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 751d9bb58e5SYang Zhong abort(); 752d9bb58e5SYang Zhong } 753d9bb58e5SYang Zhong return ram_addr; 754d9bb58e5SYang Zhong } 755d9bb58e5SYang Zhong 756d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 75704e3aabdSPeter Maydell int mmu_idx, 75855df6fcfSPeter Maydell target_ulong addr, uintptr_t retaddr, 759dbea78a4SPeter Maydell bool recheck, MMUAccessType access_type, int size) 760d9bb58e5SYang Zhong { 761d9bb58e5SYang Zhong CPUState *cpu = ENV_GET_CPU(env); 7622d54f194SPeter Maydell hwaddr mr_offset; 7632d54f194SPeter Maydell MemoryRegionSection *section; 7642d54f194SPeter Maydell MemoryRegion *mr; 765d9bb58e5SYang Zhong uint64_t val; 766d9bb58e5SYang Zhong bool locked = false; 76704e3aabdSPeter Maydell MemTxResult r; 768d9bb58e5SYang Zhong 76955df6fcfSPeter Maydell if (recheck) { 77055df6fcfSPeter Maydell /* 77155df6fcfSPeter Maydell * This is a TLB_RECHECK access, where the MMU protection 77255df6fcfSPeter Maydell * covers a smaller range than a target page, and we must 77355df6fcfSPeter Maydell * repeat the MMU check here. This tlb_fill() call might 77455df6fcfSPeter Maydell * longjump out if this access should cause a guest exception. 77555df6fcfSPeter Maydell */ 77655df6fcfSPeter Maydell int index; 77755df6fcfSPeter Maydell target_ulong tlb_addr; 77855df6fcfSPeter Maydell 77955df6fcfSPeter Maydell tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 78055df6fcfSPeter Maydell 78155df6fcfSPeter Maydell index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 78255df6fcfSPeter Maydell tlb_addr = env->tlb_table[mmu_idx][index].addr_read; 78355df6fcfSPeter Maydell if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { 78455df6fcfSPeter Maydell /* RAM access */ 78555df6fcfSPeter Maydell uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; 78655df6fcfSPeter Maydell 78755df6fcfSPeter Maydell return ldn_p((void *)haddr, size); 78855df6fcfSPeter Maydell } 78955df6fcfSPeter Maydell /* Fall through for handling IO accesses */ 79055df6fcfSPeter Maydell } 79155df6fcfSPeter Maydell 7922d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 7932d54f194SPeter Maydell mr = section->mr; 7942d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 795d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 796d9bb58e5SYang Zhong if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { 797d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 798d9bb58e5SYang Zhong } 799d9bb58e5SYang Zhong 800d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 801dbea78a4SPeter Maydell cpu->mem_io_access_type = access_type; 802d9bb58e5SYang Zhong 8038b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 804d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 805d9bb58e5SYang Zhong locked = true; 806d9bb58e5SYang Zhong } 8072d54f194SPeter Maydell r = memory_region_dispatch_read(mr, mr_offset, 80804e3aabdSPeter Maydell &val, size, iotlbentry->attrs); 80904e3aabdSPeter Maydell if (r != MEMTX_OK) { 8102d54f194SPeter Maydell hwaddr physaddr = mr_offset + 8112d54f194SPeter Maydell section->offset_within_address_space - 8122d54f194SPeter Maydell section->offset_within_region; 8132d54f194SPeter Maydell 814dbea78a4SPeter Maydell cpu_transaction_failed(cpu, physaddr, addr, size, access_type, 81504e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 81604e3aabdSPeter Maydell } 817d9bb58e5SYang Zhong if (locked) { 818d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 819d9bb58e5SYang Zhong } 820d9bb58e5SYang Zhong 821d9bb58e5SYang Zhong return val; 822d9bb58e5SYang Zhong } 823d9bb58e5SYang Zhong 824d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 82504e3aabdSPeter Maydell int mmu_idx, 826d9bb58e5SYang Zhong uint64_t val, target_ulong addr, 82755df6fcfSPeter Maydell uintptr_t retaddr, bool recheck, int size) 828d9bb58e5SYang Zhong { 829d9bb58e5SYang Zhong CPUState *cpu = ENV_GET_CPU(env); 8302d54f194SPeter Maydell hwaddr mr_offset; 8312d54f194SPeter Maydell MemoryRegionSection *section; 8322d54f194SPeter Maydell MemoryRegion *mr; 833d9bb58e5SYang Zhong bool locked = false; 83404e3aabdSPeter Maydell MemTxResult r; 835d9bb58e5SYang Zhong 83655df6fcfSPeter Maydell if (recheck) { 83755df6fcfSPeter Maydell /* 83855df6fcfSPeter Maydell * This is a TLB_RECHECK access, where the MMU protection 83955df6fcfSPeter Maydell * covers a smaller range than a target page, and we must 84055df6fcfSPeter Maydell * repeat the MMU check here. This tlb_fill() call might 84155df6fcfSPeter Maydell * longjump out if this access should cause a guest exception. 84255df6fcfSPeter Maydell */ 84355df6fcfSPeter Maydell int index; 84455df6fcfSPeter Maydell target_ulong tlb_addr; 84555df6fcfSPeter Maydell 84655df6fcfSPeter Maydell tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); 84755df6fcfSPeter Maydell 84855df6fcfSPeter Maydell index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 84955df6fcfSPeter Maydell tlb_addr = env->tlb_table[mmu_idx][index].addr_write; 85055df6fcfSPeter Maydell if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { 85155df6fcfSPeter Maydell /* RAM access */ 85255df6fcfSPeter Maydell uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; 85355df6fcfSPeter Maydell 85455df6fcfSPeter Maydell stn_p((void *)haddr, size, val); 85555df6fcfSPeter Maydell return; 85655df6fcfSPeter Maydell } 85755df6fcfSPeter Maydell /* Fall through for handling IO accesses */ 85855df6fcfSPeter Maydell } 85955df6fcfSPeter Maydell 8602d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 8612d54f194SPeter Maydell mr = section->mr; 8622d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 863d9bb58e5SYang Zhong if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { 864d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 865d9bb58e5SYang Zhong } 866d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 867d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 868d9bb58e5SYang Zhong 8698b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 870d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 871d9bb58e5SYang Zhong locked = true; 872d9bb58e5SYang Zhong } 8732d54f194SPeter Maydell r = memory_region_dispatch_write(mr, mr_offset, 87404e3aabdSPeter Maydell val, size, iotlbentry->attrs); 87504e3aabdSPeter Maydell if (r != MEMTX_OK) { 8762d54f194SPeter Maydell hwaddr physaddr = mr_offset + 8772d54f194SPeter Maydell section->offset_within_address_space - 8782d54f194SPeter Maydell section->offset_within_region; 8792d54f194SPeter Maydell 88004e3aabdSPeter Maydell cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE, 88104e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 88204e3aabdSPeter Maydell } 883d9bb58e5SYang Zhong if (locked) { 884d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 885d9bb58e5SYang Zhong } 886d9bb58e5SYang Zhong } 887d9bb58e5SYang Zhong 888d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 889d9bb58e5SYang Zhong back to the main tlb. */ 890d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 891d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 892d9bb58e5SYang Zhong { 893d9bb58e5SYang Zhong size_t vidx; 894d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 895d9bb58e5SYang Zhong CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; 896d9bb58e5SYang Zhong target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 897d9bb58e5SYang Zhong 898d9bb58e5SYang Zhong if (cmp == page) { 899d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 900d9bb58e5SYang Zhong CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; 901d9bb58e5SYang Zhong 902d9bb58e5SYang Zhong copy_tlb_helper(&tmptlb, tlb, false); 903d9bb58e5SYang Zhong copy_tlb_helper(tlb, vtlb, true); 904d9bb58e5SYang Zhong copy_tlb_helper(vtlb, &tmptlb, true); 905d9bb58e5SYang Zhong 906d9bb58e5SYang Zhong CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; 907d9bb58e5SYang Zhong CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; 908d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 909d9bb58e5SYang Zhong return true; 910d9bb58e5SYang Zhong } 911d9bb58e5SYang Zhong } 912d9bb58e5SYang Zhong return false; 913d9bb58e5SYang Zhong } 914d9bb58e5SYang Zhong 915d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 916d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 917d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 918d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 919d9bb58e5SYang Zhong 920f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */ 921f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it 922f2553f04SKONRAD Frederic * is actually a ram_addr_t (in system mode; the user mode emulation 923f2553f04SKONRAD Frederic * version of this function returns a guest virtual address). 924f2553f04SKONRAD Frederic */ 925f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 926f2553f04SKONRAD Frederic { 9272d54f194SPeter Maydell int mmu_idx, index; 928f2553f04SKONRAD Frederic void *p; 929f2553f04SKONRAD Frederic 930f2553f04SKONRAD Frederic index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 931f2553f04SKONRAD Frederic mmu_idx = cpu_mmu_index(env, true); 932e4c967a7SPeter Maydell if (unlikely(!tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr))) { 933b493ccf1SPeter Maydell if (!VICTIM_TLB_HIT(addr_code, addr)) { 93498670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 93571b9a453SKONRAD Frederic } 9363474c98aSPeter Maydell assert(tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr)); 937f2553f04SKONRAD Frederic } 93855df6fcfSPeter Maydell 939*55a7cb14SPeter Maydell if (unlikely(env->tlb_table[mmu_idx][index].addr_code & 940*55a7cb14SPeter Maydell (TLB_RECHECK | TLB_MMIO))) { 94155df6fcfSPeter Maydell /* 942*55a7cb14SPeter Maydell * Return -1 if we can't translate and execute from an entire 943*55a7cb14SPeter Maydell * page of RAM here, which will cause us to execute by loading 944*55a7cb14SPeter Maydell * and translating one insn at a time, without caching: 945*55a7cb14SPeter Maydell * - TLB_RECHECK: means the MMU protection covers a smaller range 946*55a7cb14SPeter Maydell * than a target page, so we must redo the MMU check every insn 947*55a7cb14SPeter Maydell * - TLB_MMIO: region is not backed by RAM 94855df6fcfSPeter Maydell */ 94920cb6ae4SPeter Maydell return -1; 95055df6fcfSPeter Maydell } 95155df6fcfSPeter Maydell 952f2553f04SKONRAD Frederic p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend); 953f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 954f2553f04SKONRAD Frederic } 955f2553f04SKONRAD Frederic 956d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted. 957d9bb58e5SYang Zhong * If it is not permitted then an exception will be taken in the same 958d9bb58e5SYang Zhong * way as if this were a real write access (and we will not return). 959d9bb58e5SYang Zhong * Otherwise the function will return, and there will be a valid 960d9bb58e5SYang Zhong * entry in the TLB for this access. 961d9bb58e5SYang Zhong */ 96298670d47SLaurent Vivier void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, 963d9bb58e5SYang Zhong uintptr_t retaddr) 964d9bb58e5SYang Zhong { 965d9bb58e5SYang Zhong int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 966d9bb58e5SYang Zhong target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; 967d9bb58e5SYang Zhong 968334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 969d9bb58e5SYang Zhong /* TLB entry is for a different page */ 970d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 97198670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, 97298670d47SLaurent Vivier mmu_idx, retaddr); 973d9bb58e5SYang Zhong } 974d9bb58e5SYang Zhong } 975d9bb58e5SYang Zhong } 976d9bb58e5SYang Zhong 977d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 978d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 979d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 98034d49937SPeter Maydell TCGMemOpIdx oi, uintptr_t retaddr, 98134d49937SPeter Maydell NotDirtyInfo *ndi) 982d9bb58e5SYang Zhong { 983d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 984d9bb58e5SYang Zhong size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 985d9bb58e5SYang Zhong CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index]; 986d9bb58e5SYang Zhong target_ulong tlb_addr = tlbe->addr_write; 987d9bb58e5SYang Zhong TCGMemOp mop = get_memop(oi); 988d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 989d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 99034d49937SPeter Maydell void *hostaddr; 991d9bb58e5SYang Zhong 992d9bb58e5SYang Zhong /* Adjust the given return address. */ 993d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 994d9bb58e5SYang Zhong 995d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 996d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 997d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 998d9bb58e5SYang Zhong cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, 999d9bb58e5SYang Zhong mmu_idx, retaddr); 1000d9bb58e5SYang Zhong } 1001d9bb58e5SYang Zhong 1002d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 1003d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 1004d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1005d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1006d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1007d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1008d9bb58e5SYang Zhong goto stop_the_world; 1009d9bb58e5SYang Zhong } 1010d9bb58e5SYang Zhong 1011d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 1012334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1013d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 101498670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, 101598670d47SLaurent Vivier mmu_idx, retaddr); 1016d9bb58e5SYang Zhong } 1017f52bfb12SDavid Hildenbrand tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK; 1018d9bb58e5SYang Zhong } 1019d9bb58e5SYang Zhong 102055df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 102155df6fcfSPeter Maydell if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { 1022d9bb58e5SYang Zhong /* There's really nothing that can be done to 1023d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1024d9bb58e5SYang Zhong goto stop_the_world; 1025d9bb58e5SYang Zhong } 1026d9bb58e5SYang Zhong 1027d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 102834d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 102998670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD, 103098670d47SLaurent Vivier mmu_idx, retaddr); 1031d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 1032d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 1033d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 1034d9bb58e5SYang Zhong goto stop_the_world; 1035d9bb58e5SYang Zhong } 1036d9bb58e5SYang Zhong 103734d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 103834d49937SPeter Maydell 103934d49937SPeter Maydell ndi->active = false; 104034d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 104134d49937SPeter Maydell ndi->active = true; 104234d49937SPeter Maydell memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr, 104334d49937SPeter Maydell qemu_ram_addr_from_host_nofail(hostaddr), 104434d49937SPeter Maydell 1 << s_bits); 104534d49937SPeter Maydell } 104634d49937SPeter Maydell 104734d49937SPeter Maydell return hostaddr; 1048d9bb58e5SYang Zhong 1049d9bb58e5SYang Zhong stop_the_world: 1050d9bb58e5SYang Zhong cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); 1051d9bb58e5SYang Zhong } 1052d9bb58e5SYang Zhong 1053d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN 1054d9bb58e5SYang Zhong # define TGT_BE(X) (X) 1055d9bb58e5SYang Zhong # define TGT_LE(X) BSWAP(X) 1056d9bb58e5SYang Zhong #else 1057d9bb58e5SYang Zhong # define TGT_BE(X) BSWAP(X) 1058d9bb58e5SYang Zhong # define TGT_LE(X) (X) 1059d9bb58e5SYang Zhong #endif 1060d9bb58e5SYang Zhong 1061d9bb58e5SYang Zhong #define MMUSUFFIX _mmu 1062d9bb58e5SYang Zhong 1063d9bb58e5SYang Zhong #define DATA_SIZE 1 1064d9bb58e5SYang Zhong #include "softmmu_template.h" 1065d9bb58e5SYang Zhong 1066d9bb58e5SYang Zhong #define DATA_SIZE 2 1067d9bb58e5SYang Zhong #include "softmmu_template.h" 1068d9bb58e5SYang Zhong 1069d9bb58e5SYang Zhong #define DATA_SIZE 4 1070d9bb58e5SYang Zhong #include "softmmu_template.h" 1071d9bb58e5SYang Zhong 1072d9bb58e5SYang Zhong #define DATA_SIZE 8 1073d9bb58e5SYang Zhong #include "softmmu_template.h" 1074d9bb58e5SYang Zhong 1075d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 1076d9bb58e5SYang Zhong them callable from other helpers. */ 1077d9bb58e5SYang Zhong 1078d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 1079d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 1080d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 108134d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi 108234d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi) 108334d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP \ 108434d49937SPeter Maydell do { \ 108534d49937SPeter Maydell if (unlikely(ndi.active)) { \ 108634d49937SPeter Maydell memory_notdirty_write_complete(&ndi); \ 108734d49937SPeter Maydell } \ 108834d49937SPeter Maydell } while (0) 1089d9bb58e5SYang Zhong 1090d9bb58e5SYang Zhong #define DATA_SIZE 1 1091d9bb58e5SYang Zhong #include "atomic_template.h" 1092d9bb58e5SYang Zhong 1093d9bb58e5SYang Zhong #define DATA_SIZE 2 1094d9bb58e5SYang Zhong #include "atomic_template.h" 1095d9bb58e5SYang Zhong 1096d9bb58e5SYang Zhong #define DATA_SIZE 4 1097d9bb58e5SYang Zhong #include "atomic_template.h" 1098d9bb58e5SYang Zhong 1099d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1100d9bb58e5SYang Zhong #define DATA_SIZE 8 1101d9bb58e5SYang Zhong #include "atomic_template.h" 1102d9bb58e5SYang Zhong #endif 1103d9bb58e5SYang Zhong 1104d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC128 1105d9bb58e5SYang Zhong #define DATA_SIZE 16 1106d9bb58e5SYang Zhong #include "atomic_template.h" 1107d9bb58e5SYang Zhong #endif 1108d9bb58e5SYang Zhong 1109d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 1110d9bb58e5SYang Zhong 1111d9bb58e5SYang Zhong #undef EXTRA_ARGS 1112d9bb58e5SYang Zhong #undef ATOMIC_NAME 1113d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 1114d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 1115d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 111634d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi) 1117d9bb58e5SYang Zhong 1118d9bb58e5SYang Zhong #define DATA_SIZE 1 1119d9bb58e5SYang Zhong #include "atomic_template.h" 1120d9bb58e5SYang Zhong 1121d9bb58e5SYang Zhong #define DATA_SIZE 2 1122d9bb58e5SYang Zhong #include "atomic_template.h" 1123d9bb58e5SYang Zhong 1124d9bb58e5SYang Zhong #define DATA_SIZE 4 1125d9bb58e5SYang Zhong #include "atomic_template.h" 1126d9bb58e5SYang Zhong 1127d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1128d9bb58e5SYang Zhong #define DATA_SIZE 8 1129d9bb58e5SYang Zhong #include "atomic_template.h" 1130d9bb58e5SYang Zhong #endif 1131d9bb58e5SYang Zhong 1132d9bb58e5SYang Zhong /* Code access functions. */ 1133d9bb58e5SYang Zhong 1134d9bb58e5SYang Zhong #undef MMUSUFFIX 1135d9bb58e5SYang Zhong #define MMUSUFFIX _cmmu 1136d9bb58e5SYang Zhong #undef GETPC 1137d9bb58e5SYang Zhong #define GETPC() ((uintptr_t)0) 1138d9bb58e5SYang Zhong #define SOFTMMU_CODE_ACCESS 1139d9bb58e5SYang Zhong 1140d9bb58e5SYang Zhong #define DATA_SIZE 1 1141d9bb58e5SYang Zhong #include "softmmu_template.h" 1142d9bb58e5SYang Zhong 1143d9bb58e5SYang Zhong #define DATA_SIZE 2 1144d9bb58e5SYang Zhong #include "softmmu_template.h" 1145d9bb58e5SYang Zhong 1146d9bb58e5SYang Zhong #define DATA_SIZE 4 1147d9bb58e5SYang Zhong #include "softmmu_template.h" 1148d9bb58e5SYang Zhong 1149d9bb58e5SYang Zhong #define DATA_SIZE 8 1150d9bb58e5SYang Zhong #include "softmmu_template.h" 1151