1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9d9bb58e5SYang Zhong * version 2 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 22d9bb58e5SYang Zhong #include "cpu.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/address-spaces.h" 26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 27d9bb58e5SYang Zhong #include "exec/cputlb.h" 28d9bb58e5SYang Zhong #include "exec/memory-internal.h" 29d9bb58e5SYang Zhong #include "exec/ram_addr.h" 30d9bb58e5SYang Zhong #include "tcg/tcg.h" 31d9bb58e5SYang Zhong #include "qemu/error-report.h" 32d9bb58e5SYang Zhong #include "exec/log.h" 33d9bb58e5SYang Zhong #include "exec/helper-proto.h" 34d9bb58e5SYang Zhong #include "qemu/atomic.h" 35d9bb58e5SYang Zhong 36d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 37d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 38d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 39d9bb58e5SYang Zhong 40d9bb58e5SYang Zhong #ifdef DEBUG_TLB 41d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 42d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 43d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 44d9bb58e5SYang Zhong # else 45d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 46d9bb58e5SYang Zhong # endif 47d9bb58e5SYang Zhong #else 48d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 49d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 50d9bb58e5SYang Zhong #endif 51d9bb58e5SYang Zhong 52d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 53d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 54d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 55d9bb58e5SYang Zhong ## __VA_ARGS__); \ 56d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 57d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 58d9bb58e5SYang Zhong } \ 59d9bb58e5SYang Zhong } while (0) 60d9bb58e5SYang Zhong 61d9bb58e5SYang Zhong #define assert_cpu_is_self(this_cpu) do { \ 62d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 63d9bb58e5SYang Zhong g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \ 64d9bb58e5SYang Zhong } \ 65d9bb58e5SYang Zhong } while (0) 66d9bb58e5SYang Zhong 67d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 68d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 69d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 70d9bb58e5SYang Zhong 71d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 72d9bb58e5SYang Zhong */ 73d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 74d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 75d9bb58e5SYang Zhong 76d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 77d9bb58e5SYang Zhong * 78d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 79d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 80d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 81d9bb58e5SYang Zhong * again. 82d9bb58e5SYang Zhong */ 83d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 84d9bb58e5SYang Zhong run_on_cpu_data d) 85d9bb58e5SYang Zhong { 86d9bb58e5SYang Zhong CPUState *cpu; 87d9bb58e5SYang Zhong 88d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 89d9bb58e5SYang Zhong if (cpu != src) { 90d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 91d9bb58e5SYang Zhong } 92d9bb58e5SYang Zhong } 93d9bb58e5SYang Zhong } 94d9bb58e5SYang Zhong 9583974cf4SEmilio G. Cota size_t tlb_flush_count(void) 9683974cf4SEmilio G. Cota { 9783974cf4SEmilio G. Cota CPUState *cpu; 9883974cf4SEmilio G. Cota size_t count = 0; 9983974cf4SEmilio G. Cota 10083974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 10183974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 10283974cf4SEmilio G. Cota 10383974cf4SEmilio G. Cota count += atomic_read(&env->tlb_flush_count); 10483974cf4SEmilio G. Cota } 10583974cf4SEmilio G. Cota return count; 10683974cf4SEmilio G. Cota } 107d9bb58e5SYang Zhong 108d9bb58e5SYang Zhong /* This is OK because CPU architectures generally permit an 109d9bb58e5SYang Zhong * implementation to drop entries from the TLB at any time, so 110d9bb58e5SYang Zhong * flushing more entries than required is only an efficiency issue, 111d9bb58e5SYang Zhong * not a correctness issue. 112d9bb58e5SYang Zhong */ 113d9bb58e5SYang Zhong static void tlb_flush_nocheck(CPUState *cpu) 114d9bb58e5SYang Zhong { 115d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 116d9bb58e5SYang Zhong 117d9bb58e5SYang Zhong /* The QOM tests will trigger tlb_flushes without setting up TCG 118d9bb58e5SYang Zhong * so we bug out here in that case. 119d9bb58e5SYang Zhong */ 120d9bb58e5SYang Zhong if (!tcg_enabled()) { 121d9bb58e5SYang Zhong return; 122d9bb58e5SYang Zhong } 123d9bb58e5SYang Zhong 124d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 12583974cf4SEmilio G. Cota atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1); 12683974cf4SEmilio G. Cota tlb_debug("(count: %zu)\n", tlb_flush_count()); 127d9bb58e5SYang Zhong 128d9bb58e5SYang Zhong memset(env->tlb_table, -1, sizeof(env->tlb_table)); 129d9bb58e5SYang Zhong memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); 130f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 131d9bb58e5SYang Zhong 132d9bb58e5SYang Zhong env->vtlb_index = 0; 133d9bb58e5SYang Zhong env->tlb_flush_addr = -1; 134d9bb58e5SYang Zhong env->tlb_flush_mask = 0; 135d9bb58e5SYang Zhong 136d9bb58e5SYang Zhong atomic_mb_set(&cpu->pending_tlb_flush, 0); 137d9bb58e5SYang Zhong } 138d9bb58e5SYang Zhong 139d9bb58e5SYang Zhong static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data) 140d9bb58e5SYang Zhong { 141d9bb58e5SYang Zhong tlb_flush_nocheck(cpu); 142d9bb58e5SYang Zhong } 143d9bb58e5SYang Zhong 144d9bb58e5SYang Zhong void tlb_flush(CPUState *cpu) 145d9bb58e5SYang Zhong { 146d9bb58e5SYang Zhong if (cpu->created && !qemu_cpu_is_self(cpu)) { 147d9bb58e5SYang Zhong if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) { 148d9bb58e5SYang Zhong atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS); 149d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_global_async_work, 150d9bb58e5SYang Zhong RUN_ON_CPU_NULL); 151d9bb58e5SYang Zhong } 152d9bb58e5SYang Zhong } else { 153d9bb58e5SYang Zhong tlb_flush_nocheck(cpu); 154d9bb58e5SYang Zhong } 155d9bb58e5SYang Zhong } 156d9bb58e5SYang Zhong 157d9bb58e5SYang Zhong void tlb_flush_all_cpus(CPUState *src_cpu) 158d9bb58e5SYang Zhong { 159d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_global_async_work; 160d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL); 161d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_NULL); 162d9bb58e5SYang Zhong } 163d9bb58e5SYang Zhong 164d9bb58e5SYang Zhong void tlb_flush_all_cpus_synced(CPUState *src_cpu) 165d9bb58e5SYang Zhong { 166d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_global_async_work; 167d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL); 168d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL); 169d9bb58e5SYang Zhong } 170d9bb58e5SYang Zhong 171d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 172d9bb58e5SYang Zhong { 173d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 174d9bb58e5SYang Zhong unsigned long mmu_idx_bitmask = data.host_int; 175d9bb58e5SYang Zhong int mmu_idx; 176d9bb58e5SYang Zhong 177d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 178d9bb58e5SYang Zhong 179d9bb58e5SYang Zhong tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask); 180d9bb58e5SYang Zhong 181d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 182d9bb58e5SYang Zhong 183d9bb58e5SYang Zhong if (test_bit(mmu_idx, &mmu_idx_bitmask)) { 184d9bb58e5SYang Zhong tlb_debug("%d\n", mmu_idx); 185d9bb58e5SYang Zhong 186d9bb58e5SYang Zhong memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0])); 187d9bb58e5SYang Zhong memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); 188d9bb58e5SYang Zhong } 189d9bb58e5SYang Zhong } 190d9bb58e5SYang Zhong 191f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 192d9bb58e5SYang Zhong 193d9bb58e5SYang Zhong tlb_debug("done\n"); 194d9bb58e5SYang Zhong } 195d9bb58e5SYang Zhong 196d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 197d9bb58e5SYang Zhong { 198d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 199d9bb58e5SYang Zhong 200d9bb58e5SYang Zhong if (!qemu_cpu_is_self(cpu)) { 201d9bb58e5SYang Zhong uint16_t pending_flushes = idxmap; 202d9bb58e5SYang Zhong pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush); 203d9bb58e5SYang Zhong 204d9bb58e5SYang Zhong if (pending_flushes) { 205d9bb58e5SYang Zhong tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes); 206d9bb58e5SYang Zhong 207d9bb58e5SYang Zhong atomic_or(&cpu->pending_tlb_flush, pending_flushes); 208d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 209d9bb58e5SYang Zhong RUN_ON_CPU_HOST_INT(pending_flushes)); 210d9bb58e5SYang Zhong } 211d9bb58e5SYang Zhong } else { 212d9bb58e5SYang Zhong tlb_flush_by_mmuidx_async_work(cpu, 213d9bb58e5SYang Zhong RUN_ON_CPU_HOST_INT(idxmap)); 214d9bb58e5SYang Zhong } 215d9bb58e5SYang Zhong } 216d9bb58e5SYang Zhong 217d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 218d9bb58e5SYang Zhong { 219d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 220d9bb58e5SYang Zhong 221d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 222d9bb58e5SYang Zhong 223d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 224d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 225d9bb58e5SYang Zhong } 226d9bb58e5SYang Zhong 227d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 228d9bb58e5SYang Zhong uint16_t idxmap) 229d9bb58e5SYang Zhong { 230d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 231d9bb58e5SYang Zhong 232d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 233d9bb58e5SYang Zhong 234d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 235d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 236d9bb58e5SYang Zhong } 237d9bb58e5SYang Zhong 238d9bb58e5SYang Zhong 239d9bb58e5SYang Zhong 240d9bb58e5SYang Zhong static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) 241d9bb58e5SYang Zhong { 242*334692bcSPeter Maydell if (tlb_hit_page(tlb_entry->addr_read, addr) || 243*334692bcSPeter Maydell tlb_hit_page(tlb_entry->addr_write, addr) || 244*334692bcSPeter Maydell tlb_hit_page(tlb_entry->addr_code, addr)) { 245d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 246d9bb58e5SYang Zhong } 247d9bb58e5SYang Zhong } 248d9bb58e5SYang Zhong 249d9bb58e5SYang Zhong static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data) 250d9bb58e5SYang Zhong { 251d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 252d9bb58e5SYang Zhong target_ulong addr = (target_ulong) data.target_ptr; 253d9bb58e5SYang Zhong int i; 254d9bb58e5SYang Zhong int mmu_idx; 255d9bb58e5SYang Zhong 256d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 257d9bb58e5SYang Zhong 258d9bb58e5SYang Zhong tlb_debug("page :" TARGET_FMT_lx "\n", addr); 259d9bb58e5SYang Zhong 260d9bb58e5SYang Zhong /* Check if we need to flush due to large pages. */ 261d9bb58e5SYang Zhong if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { 262d9bb58e5SYang Zhong tlb_debug("forcing full flush (" 263d9bb58e5SYang Zhong TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 264d9bb58e5SYang Zhong env->tlb_flush_addr, env->tlb_flush_mask); 265d9bb58e5SYang Zhong 266d9bb58e5SYang Zhong tlb_flush(cpu); 267d9bb58e5SYang Zhong return; 268d9bb58e5SYang Zhong } 269d9bb58e5SYang Zhong 270d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 271d9bb58e5SYang Zhong i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 272d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 273d9bb58e5SYang Zhong tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); 274d9bb58e5SYang Zhong } 275d9bb58e5SYang Zhong 276d9bb58e5SYang Zhong /* check whether there are entries that need to be flushed in the vtlb */ 277d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 278d9bb58e5SYang Zhong int k; 279d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 280d9bb58e5SYang Zhong tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr); 281d9bb58e5SYang Zhong } 282d9bb58e5SYang Zhong } 283d9bb58e5SYang Zhong 284d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 285d9bb58e5SYang Zhong } 286d9bb58e5SYang Zhong 287d9bb58e5SYang Zhong void tlb_flush_page(CPUState *cpu, target_ulong addr) 288d9bb58e5SYang Zhong { 289d9bb58e5SYang Zhong tlb_debug("page :" TARGET_FMT_lx "\n", addr); 290d9bb58e5SYang Zhong 291d9bb58e5SYang Zhong if (!qemu_cpu_is_self(cpu)) { 292d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_page_async_work, 293d9bb58e5SYang Zhong RUN_ON_CPU_TARGET_PTR(addr)); 294d9bb58e5SYang Zhong } else { 295d9bb58e5SYang Zhong tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr)); 296d9bb58e5SYang Zhong } 297d9bb58e5SYang Zhong } 298d9bb58e5SYang Zhong 299d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a 300d9bb58e5SYang Zhong * mmuidx bit mask we need to fail to build if we can't do that 301d9bb58e5SYang Zhong */ 302d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN); 303d9bb58e5SYang Zhong 304d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, 305d9bb58e5SYang Zhong run_on_cpu_data data) 306d9bb58e5SYang Zhong { 307d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 308d9bb58e5SYang Zhong target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; 309d9bb58e5SYang Zhong target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; 310d9bb58e5SYang Zhong unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; 311d9bb58e5SYang Zhong int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 312d9bb58e5SYang Zhong int mmu_idx; 313d9bb58e5SYang Zhong int i; 314d9bb58e5SYang Zhong 315d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 316d9bb58e5SYang Zhong 317d9bb58e5SYang Zhong tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n", 318d9bb58e5SYang Zhong page, addr, mmu_idx_bitmap); 319d9bb58e5SYang Zhong 320d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 321d9bb58e5SYang Zhong if (test_bit(mmu_idx, &mmu_idx_bitmap)) { 322d9bb58e5SYang Zhong tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr); 323d9bb58e5SYang Zhong 324d9bb58e5SYang Zhong /* check whether there are vltb entries that need to be flushed */ 325d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 326d9bb58e5SYang Zhong tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr); 327d9bb58e5SYang Zhong } 328d9bb58e5SYang Zhong } 329d9bb58e5SYang Zhong } 330d9bb58e5SYang Zhong 331d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 332d9bb58e5SYang Zhong } 333d9bb58e5SYang Zhong 334d9bb58e5SYang Zhong static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu, 335d9bb58e5SYang Zhong run_on_cpu_data data) 336d9bb58e5SYang Zhong { 337d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 338d9bb58e5SYang Zhong target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; 339d9bb58e5SYang Zhong target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; 340d9bb58e5SYang Zhong unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; 341d9bb58e5SYang Zhong 342d9bb58e5SYang Zhong tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap); 343d9bb58e5SYang Zhong 344d9bb58e5SYang Zhong /* Check if we need to flush due to large pages. */ 345d9bb58e5SYang Zhong if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { 346d9bb58e5SYang Zhong tlb_debug("forced full flush (" 347d9bb58e5SYang Zhong TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 348d9bb58e5SYang Zhong env->tlb_flush_addr, env->tlb_flush_mask); 349d9bb58e5SYang Zhong 350d9bb58e5SYang Zhong tlb_flush_by_mmuidx_async_work(cpu, 351d9bb58e5SYang Zhong RUN_ON_CPU_HOST_INT(mmu_idx_bitmap)); 352d9bb58e5SYang Zhong } else { 353d9bb58e5SYang Zhong tlb_flush_page_by_mmuidx_async_work(cpu, data); 354d9bb58e5SYang Zhong } 355d9bb58e5SYang Zhong } 356d9bb58e5SYang Zhong 357d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 358d9bb58e5SYang Zhong { 359d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 360d9bb58e5SYang Zhong 361d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 362d9bb58e5SYang Zhong 363d9bb58e5SYang Zhong /* This should already be page aligned */ 364d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 365d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 366d9bb58e5SYang Zhong 367d9bb58e5SYang Zhong if (!qemu_cpu_is_self(cpu)) { 368d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work, 369d9bb58e5SYang Zhong RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 370d9bb58e5SYang Zhong } else { 371d9bb58e5SYang Zhong tlb_check_page_and_flush_by_mmuidx_async_work( 372d9bb58e5SYang Zhong cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 373d9bb58e5SYang Zhong } 374d9bb58e5SYang Zhong } 375d9bb58e5SYang Zhong 376d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 377d9bb58e5SYang Zhong uint16_t idxmap) 378d9bb58e5SYang Zhong { 379d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work; 380d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 381d9bb58e5SYang Zhong 382d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 383d9bb58e5SYang Zhong 384d9bb58e5SYang Zhong /* This should already be page aligned */ 385d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 386d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 387d9bb58e5SYang Zhong 388d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 389d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 390d9bb58e5SYang Zhong } 391d9bb58e5SYang Zhong 392d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 393d9bb58e5SYang Zhong target_ulong addr, 394d9bb58e5SYang Zhong uint16_t idxmap) 395d9bb58e5SYang Zhong { 396d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work; 397d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 398d9bb58e5SYang Zhong 399d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 400d9bb58e5SYang Zhong 401d9bb58e5SYang Zhong /* This should already be page aligned */ 402d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 403d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 404d9bb58e5SYang Zhong 405d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 406d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 407d9bb58e5SYang Zhong } 408d9bb58e5SYang Zhong 409d9bb58e5SYang Zhong void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 410d9bb58e5SYang Zhong { 411d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_page_async_work; 412d9bb58e5SYang Zhong 413d9bb58e5SYang Zhong flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); 414d9bb58e5SYang Zhong fn(src, RUN_ON_CPU_TARGET_PTR(addr)); 415d9bb58e5SYang Zhong } 416d9bb58e5SYang Zhong 417d9bb58e5SYang Zhong void tlb_flush_page_all_cpus_synced(CPUState *src, 418d9bb58e5SYang Zhong target_ulong addr) 419d9bb58e5SYang Zhong { 420d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_page_async_work; 421d9bb58e5SYang Zhong 422d9bb58e5SYang Zhong flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); 423d9bb58e5SYang Zhong async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); 424d9bb58e5SYang Zhong } 425d9bb58e5SYang Zhong 426d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 427d9bb58e5SYang Zhong can be detected */ 428d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 429d9bb58e5SYang Zhong { 430d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 431d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 432d9bb58e5SYang Zhong } 433d9bb58e5SYang Zhong 434d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 435d9bb58e5SYang Zhong tested for self modifying code */ 436d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 437d9bb58e5SYang Zhong { 438d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 439d9bb58e5SYang Zhong } 440d9bb58e5SYang Zhong 441d9bb58e5SYang Zhong 442d9bb58e5SYang Zhong /* 443d9bb58e5SYang Zhong * Dirty write flag handling 444d9bb58e5SYang Zhong * 445d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 446d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 447d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 448d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 449d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 450d9bb58e5SYang Zhong * generated code. 451d9bb58e5SYang Zhong * 452d9bb58e5SYang Zhong * Because we want other vCPUs to respond to changes straight away we 453d9bb58e5SYang Zhong * update the te->addr_write field atomically. If the TLB entry has 454d9bb58e5SYang Zhong * been changed by the vCPU in the mean time we skip the update. 455d9bb58e5SYang Zhong * 456d9bb58e5SYang Zhong * As this function uses atomic accesses we also need to ensure 457d9bb58e5SYang Zhong * updates to tlb_entries follow the same access rules. We don't need 458d9bb58e5SYang Zhong * to worry about this for oversized guests as MTTCG is disabled for 459d9bb58e5SYang Zhong * them. 460d9bb58e5SYang Zhong */ 461d9bb58e5SYang Zhong 462d9bb58e5SYang Zhong static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, 463d9bb58e5SYang Zhong uintptr_t length) 464d9bb58e5SYang Zhong { 465d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 466d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 467d9bb58e5SYang Zhong 468d9bb58e5SYang Zhong if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { 469d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 470d9bb58e5SYang Zhong addr += tlb_entry->addend; 471d9bb58e5SYang Zhong if ((addr - start) < length) { 472d9bb58e5SYang Zhong tlb_entry->addr_write |= TLB_NOTDIRTY; 473d9bb58e5SYang Zhong } 474d9bb58e5SYang Zhong } 475d9bb58e5SYang Zhong #else 476d9bb58e5SYang Zhong /* paired with atomic_mb_set in tlb_set_page_with_attrs */ 477d9bb58e5SYang Zhong uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write); 478d9bb58e5SYang Zhong uintptr_t addr = orig_addr; 479d9bb58e5SYang Zhong 480d9bb58e5SYang Zhong if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { 481d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 482d9bb58e5SYang Zhong addr += atomic_read(&tlb_entry->addend); 483d9bb58e5SYang Zhong if ((addr - start) < length) { 484d9bb58e5SYang Zhong uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY; 485d9bb58e5SYang Zhong atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr); 486d9bb58e5SYang Zhong } 487d9bb58e5SYang Zhong } 488d9bb58e5SYang Zhong #endif 489d9bb58e5SYang Zhong } 490d9bb58e5SYang Zhong 491d9bb58e5SYang Zhong /* For atomic correctness when running MTTCG we need to use the right 492d9bb58e5SYang Zhong * primitives when copying entries */ 493d9bb58e5SYang Zhong static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s, 494d9bb58e5SYang Zhong bool atomic_set) 495d9bb58e5SYang Zhong { 496d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 497d9bb58e5SYang Zhong *d = *s; 498d9bb58e5SYang Zhong #else 499d9bb58e5SYang Zhong if (atomic_set) { 500d9bb58e5SYang Zhong d->addr_read = s->addr_read; 501d9bb58e5SYang Zhong d->addr_code = s->addr_code; 502d9bb58e5SYang Zhong atomic_set(&d->addend, atomic_read(&s->addend)); 503d9bb58e5SYang Zhong /* Pairs with flag setting in tlb_reset_dirty_range */ 504d9bb58e5SYang Zhong atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write)); 505d9bb58e5SYang Zhong } else { 506d9bb58e5SYang Zhong d->addr_read = s->addr_read; 507d9bb58e5SYang Zhong d->addr_write = atomic_read(&s->addr_write); 508d9bb58e5SYang Zhong d->addr_code = s->addr_code; 509d9bb58e5SYang Zhong d->addend = atomic_read(&s->addend); 510d9bb58e5SYang Zhong } 511d9bb58e5SYang Zhong #endif 512d9bb58e5SYang Zhong } 513d9bb58e5SYang Zhong 514d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 515d9bb58e5SYang Zhong * the target vCPU). As such care needs to be taken that we don't 516d9bb58e5SYang Zhong * dangerously race with another vCPU update. The only thing actually 517d9bb58e5SYang Zhong * updated is the target TLB entry ->addr_write flags. 518d9bb58e5SYang Zhong */ 519d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 520d9bb58e5SYang Zhong { 521d9bb58e5SYang Zhong CPUArchState *env; 522d9bb58e5SYang Zhong 523d9bb58e5SYang Zhong int mmu_idx; 524d9bb58e5SYang Zhong 525d9bb58e5SYang Zhong env = cpu->env_ptr; 526d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 527d9bb58e5SYang Zhong unsigned int i; 528d9bb58e5SYang Zhong 529d9bb58e5SYang Zhong for (i = 0; i < CPU_TLB_SIZE; i++) { 530d9bb58e5SYang Zhong tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], 531d9bb58e5SYang Zhong start1, length); 532d9bb58e5SYang Zhong } 533d9bb58e5SYang Zhong 534d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 535d9bb58e5SYang Zhong tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], 536d9bb58e5SYang Zhong start1, length); 537d9bb58e5SYang Zhong } 538d9bb58e5SYang Zhong } 539d9bb58e5SYang Zhong } 540d9bb58e5SYang Zhong 541d9bb58e5SYang Zhong static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) 542d9bb58e5SYang Zhong { 543d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 544d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 545d9bb58e5SYang Zhong } 546d9bb58e5SYang Zhong } 547d9bb58e5SYang Zhong 548d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 549d9bb58e5SYang Zhong so that it is no longer dirty */ 550d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 551d9bb58e5SYang Zhong { 552d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 553d9bb58e5SYang Zhong int i; 554d9bb58e5SYang Zhong int mmu_idx; 555d9bb58e5SYang Zhong 556d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 557d9bb58e5SYang Zhong 558d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 559d9bb58e5SYang Zhong i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 560d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 561d9bb58e5SYang Zhong tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); 562d9bb58e5SYang Zhong } 563d9bb58e5SYang Zhong 564d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 565d9bb58e5SYang Zhong int k; 566d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 567d9bb58e5SYang Zhong tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr); 568d9bb58e5SYang Zhong } 569d9bb58e5SYang Zhong } 570d9bb58e5SYang Zhong } 571d9bb58e5SYang Zhong 572d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 573d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 574d9bb58e5SYang Zhong static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, 575d9bb58e5SYang Zhong target_ulong size) 576d9bb58e5SYang Zhong { 577d9bb58e5SYang Zhong target_ulong mask = ~(size - 1); 578d9bb58e5SYang Zhong 579d9bb58e5SYang Zhong if (env->tlb_flush_addr == (target_ulong)-1) { 580d9bb58e5SYang Zhong env->tlb_flush_addr = vaddr & mask; 581d9bb58e5SYang Zhong env->tlb_flush_mask = mask; 582d9bb58e5SYang Zhong return; 583d9bb58e5SYang Zhong } 584d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 585d9bb58e5SYang Zhong This is a compromise between unnecessary flushes and the cost 586d9bb58e5SYang Zhong of maintaining a full variable size TLB. */ 587d9bb58e5SYang Zhong mask &= env->tlb_flush_mask; 588d9bb58e5SYang Zhong while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { 589d9bb58e5SYang Zhong mask <<= 1; 590d9bb58e5SYang Zhong } 591d9bb58e5SYang Zhong env->tlb_flush_addr &= mask; 592d9bb58e5SYang Zhong env->tlb_flush_mask = mask; 593d9bb58e5SYang Zhong } 594d9bb58e5SYang Zhong 595d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 596d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 597d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 598d9bb58e5SYang Zhong * 599d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 600d9bb58e5SYang Zhong * critical section. 601d9bb58e5SYang Zhong */ 602d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 603d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 604d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 605d9bb58e5SYang Zhong { 606d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 607d9bb58e5SYang Zhong MemoryRegionSection *section; 608d9bb58e5SYang Zhong unsigned int index; 609d9bb58e5SYang Zhong target_ulong address; 610d9bb58e5SYang Zhong target_ulong code_address; 611d9bb58e5SYang Zhong uintptr_t addend; 612d9bb58e5SYang Zhong CPUTLBEntry *te, *tv, tn; 61355df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 61455df6fcfSPeter Maydell target_ulong vaddr_page; 615d9bb58e5SYang Zhong unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; 616d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 617d9bb58e5SYang Zhong 618d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 61955df6fcfSPeter Maydell 62055df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 62155df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 62255df6fcfSPeter Maydell } else { 62355df6fcfSPeter Maydell if (size > TARGET_PAGE_SIZE) { 624d9bb58e5SYang Zhong tlb_add_large_page(env, vaddr, size); 625d9bb58e5SYang Zhong } 626d9bb58e5SYang Zhong sz = size; 62755df6fcfSPeter Maydell } 62855df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 62955df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 63055df6fcfSPeter Maydell 63155df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 63255df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 633d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 634d9bb58e5SYang Zhong 635d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 636d9bb58e5SYang Zhong " prot=%x idx=%d\n", 637d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 638d9bb58e5SYang Zhong 63955df6fcfSPeter Maydell address = vaddr_page; 64055df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 64155df6fcfSPeter Maydell /* 64255df6fcfSPeter Maydell * Slow-path the TLB entries; we will repeat the MMU check and TLB 64355df6fcfSPeter Maydell * fill on every access. 64455df6fcfSPeter Maydell */ 64555df6fcfSPeter Maydell address |= TLB_RECHECK; 64655df6fcfSPeter Maydell } 64755df6fcfSPeter Maydell if (!memory_region_is_ram(section->mr) && 64855df6fcfSPeter Maydell !memory_region_is_romd(section->mr)) { 649d9bb58e5SYang Zhong /* IO memory case */ 650d9bb58e5SYang Zhong address |= TLB_MMIO; 651d9bb58e5SYang Zhong addend = 0; 652d9bb58e5SYang Zhong } else { 653d9bb58e5SYang Zhong /* TLB_MMIO for rom/romd handled below */ 654d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 655d9bb58e5SYang Zhong } 656d9bb58e5SYang Zhong 657d9bb58e5SYang Zhong code_address = address; 65855df6fcfSPeter Maydell iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, 65955df6fcfSPeter Maydell paddr_page, xlat, prot, &address); 660d9bb58e5SYang Zhong 66155df6fcfSPeter Maydell index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 662d9bb58e5SYang Zhong te = &env->tlb_table[mmu_idx][index]; 663d9bb58e5SYang Zhong /* do not discard the translation in te, evict it into a victim tlb */ 664d9bb58e5SYang Zhong tv = &env->tlb_v_table[mmu_idx][vidx]; 665d9bb58e5SYang Zhong 666d9bb58e5SYang Zhong /* addr_write can race with tlb_reset_dirty_range */ 667d9bb58e5SYang Zhong copy_tlb_helper(tv, te, true); 668d9bb58e5SYang Zhong 669d9bb58e5SYang Zhong env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; 670d9bb58e5SYang Zhong 671d9bb58e5SYang Zhong /* refill the tlb */ 672ace41090SPeter Maydell /* 673ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 674ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 675ace41090SPeter Maydell * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM) 676ace41090SPeter Maydell * + the offset within section->mr of the page base (otherwise) 67755df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 678ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 679ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 680ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 681ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 682ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 683ace41090SPeter Maydell */ 68455df6fcfSPeter Maydell env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page; 685d9bb58e5SYang Zhong env->iotlb[mmu_idx][index].attrs = attrs; 686d9bb58e5SYang Zhong 687d9bb58e5SYang Zhong /* Now calculate the new entry */ 68855df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 689d9bb58e5SYang Zhong if (prot & PAGE_READ) { 690d9bb58e5SYang Zhong tn.addr_read = address; 691d9bb58e5SYang Zhong } else { 692d9bb58e5SYang Zhong tn.addr_read = -1; 693d9bb58e5SYang Zhong } 694d9bb58e5SYang Zhong 695d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 696d9bb58e5SYang Zhong tn.addr_code = code_address; 697d9bb58e5SYang Zhong } else { 698d9bb58e5SYang Zhong tn.addr_code = -1; 699d9bb58e5SYang Zhong } 700d9bb58e5SYang Zhong 701d9bb58e5SYang Zhong tn.addr_write = -1; 702d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 703d9bb58e5SYang Zhong if ((memory_region_is_ram(section->mr) && section->readonly) 704d9bb58e5SYang Zhong || memory_region_is_romd(section->mr)) { 705d9bb58e5SYang Zhong /* Write access calls the I/O callback. */ 706d9bb58e5SYang Zhong tn.addr_write = address | TLB_MMIO; 707d9bb58e5SYang Zhong } else if (memory_region_is_ram(section->mr) 708d9bb58e5SYang Zhong && cpu_physical_memory_is_clean( 709d9bb58e5SYang Zhong memory_region_get_ram_addr(section->mr) + xlat)) { 710d9bb58e5SYang Zhong tn.addr_write = address | TLB_NOTDIRTY; 711d9bb58e5SYang Zhong } else { 712d9bb58e5SYang Zhong tn.addr_write = address; 713d9bb58e5SYang Zhong } 714f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 715f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 716f52bfb12SDavid Hildenbrand } 717d9bb58e5SYang Zhong } 718d9bb58e5SYang Zhong 719d9bb58e5SYang Zhong /* Pairs with flag setting in tlb_reset_dirty_range */ 720d9bb58e5SYang Zhong copy_tlb_helper(te, &tn, true); 721d9bb58e5SYang Zhong /* atomic_mb_set(&te->addr_write, write_address); */ 722d9bb58e5SYang Zhong } 723d9bb58e5SYang Zhong 724d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 725d9bb58e5SYang Zhong * transaction attributes to be used. 726d9bb58e5SYang Zhong */ 727d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 728d9bb58e5SYang Zhong hwaddr paddr, int prot, 729d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 730d9bb58e5SYang Zhong { 731d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 732d9bb58e5SYang Zhong prot, mmu_idx, size); 733d9bb58e5SYang Zhong } 734d9bb58e5SYang Zhong 735d9bb58e5SYang Zhong static void report_bad_exec(CPUState *cpu, target_ulong addr) 736d9bb58e5SYang Zhong { 737d9bb58e5SYang Zhong /* Accidentally executing outside RAM or ROM is quite common for 738d9bb58e5SYang Zhong * several user-error situations, so report it in a way that 739d9bb58e5SYang Zhong * makes it clear that this isn't a QEMU bug and provide suggestions 740d9bb58e5SYang Zhong * about what a user could do to fix things. 741d9bb58e5SYang Zhong */ 742d9bb58e5SYang Zhong error_report("Trying to execute code outside RAM or ROM at 0x" 743d9bb58e5SYang Zhong TARGET_FMT_lx, addr); 744d9bb58e5SYang Zhong error_printf("This usually means one of the following happened:\n\n" 745d9bb58e5SYang Zhong "(1) You told QEMU to execute a kernel for the wrong machine " 746d9bb58e5SYang Zhong "type, and it crashed on startup (eg trying to run a " 747d9bb58e5SYang Zhong "raspberry pi kernel on a versatilepb QEMU machine)\n" 748d9bb58e5SYang Zhong "(2) You didn't give QEMU a kernel or BIOS filename at all, " 749d9bb58e5SYang Zhong "and QEMU executed a ROM full of no-op instructions until " 750d9bb58e5SYang Zhong "it fell off the end\n" 751d9bb58e5SYang Zhong "(3) Your guest kernel has a bug and crashed by jumping " 752d9bb58e5SYang Zhong "off into nowhere\n\n" 753d9bb58e5SYang Zhong "This is almost always one of the first two, so check your " 754d9bb58e5SYang Zhong "command line and that you are using the right type of kernel " 755d9bb58e5SYang Zhong "for this machine.\n" 756d9bb58e5SYang Zhong "If you think option (3) is likely then you can try debugging " 757d9bb58e5SYang Zhong "your guest with the -d debug options; in particular " 758d9bb58e5SYang Zhong "-d guest_errors will cause the log to include a dump of the " 759d9bb58e5SYang Zhong "guest register state at this point.\n\n" 760d9bb58e5SYang Zhong "Execution cannot continue; stopping here.\n\n"); 761d9bb58e5SYang Zhong 762d9bb58e5SYang Zhong /* Report also to the logs, with more detail including register dump */ 763d9bb58e5SYang Zhong qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code " 764d9bb58e5SYang Zhong "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); 765d9bb58e5SYang Zhong log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); 766d9bb58e5SYang Zhong } 767d9bb58e5SYang Zhong 768d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 769d9bb58e5SYang Zhong { 770d9bb58e5SYang Zhong ram_addr_t ram_addr; 771d9bb58e5SYang Zhong 772d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 773d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 774d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 775d9bb58e5SYang Zhong abort(); 776d9bb58e5SYang Zhong } 777d9bb58e5SYang Zhong return ram_addr; 778d9bb58e5SYang Zhong } 779d9bb58e5SYang Zhong 780d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 78104e3aabdSPeter Maydell int mmu_idx, 78255df6fcfSPeter Maydell target_ulong addr, uintptr_t retaddr, 78355df6fcfSPeter Maydell bool recheck, int size) 784d9bb58e5SYang Zhong { 785d9bb58e5SYang Zhong CPUState *cpu = ENV_GET_CPU(env); 7862d54f194SPeter Maydell hwaddr mr_offset; 7872d54f194SPeter Maydell MemoryRegionSection *section; 7882d54f194SPeter Maydell MemoryRegion *mr; 789d9bb58e5SYang Zhong uint64_t val; 790d9bb58e5SYang Zhong bool locked = false; 79104e3aabdSPeter Maydell MemTxResult r; 792d9bb58e5SYang Zhong 79355df6fcfSPeter Maydell if (recheck) { 79455df6fcfSPeter Maydell /* 79555df6fcfSPeter Maydell * This is a TLB_RECHECK access, where the MMU protection 79655df6fcfSPeter Maydell * covers a smaller range than a target page, and we must 79755df6fcfSPeter Maydell * repeat the MMU check here. This tlb_fill() call might 79855df6fcfSPeter Maydell * longjump out if this access should cause a guest exception. 79955df6fcfSPeter Maydell */ 80055df6fcfSPeter Maydell int index; 80155df6fcfSPeter Maydell target_ulong tlb_addr; 80255df6fcfSPeter Maydell 80355df6fcfSPeter Maydell tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 80455df6fcfSPeter Maydell 80555df6fcfSPeter Maydell index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 80655df6fcfSPeter Maydell tlb_addr = env->tlb_table[mmu_idx][index].addr_read; 80755df6fcfSPeter Maydell if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { 80855df6fcfSPeter Maydell /* RAM access */ 80955df6fcfSPeter Maydell uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; 81055df6fcfSPeter Maydell 81155df6fcfSPeter Maydell return ldn_p((void *)haddr, size); 81255df6fcfSPeter Maydell } 81355df6fcfSPeter Maydell /* Fall through for handling IO accesses */ 81455df6fcfSPeter Maydell } 81555df6fcfSPeter Maydell 8162d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 8172d54f194SPeter Maydell mr = section->mr; 8182d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 819d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 820d9bb58e5SYang Zhong if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { 821d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 822d9bb58e5SYang Zhong } 823d9bb58e5SYang Zhong 824d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 825d9bb58e5SYang Zhong 8268b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 827d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 828d9bb58e5SYang Zhong locked = true; 829d9bb58e5SYang Zhong } 8302d54f194SPeter Maydell r = memory_region_dispatch_read(mr, mr_offset, 83104e3aabdSPeter Maydell &val, size, iotlbentry->attrs); 83204e3aabdSPeter Maydell if (r != MEMTX_OK) { 8332d54f194SPeter Maydell hwaddr physaddr = mr_offset + 8342d54f194SPeter Maydell section->offset_within_address_space - 8352d54f194SPeter Maydell section->offset_within_region; 8362d54f194SPeter Maydell 83704e3aabdSPeter Maydell cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD, 83804e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 83904e3aabdSPeter Maydell } 840d9bb58e5SYang Zhong if (locked) { 841d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 842d9bb58e5SYang Zhong } 843d9bb58e5SYang Zhong 844d9bb58e5SYang Zhong return val; 845d9bb58e5SYang Zhong } 846d9bb58e5SYang Zhong 847d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 84804e3aabdSPeter Maydell int mmu_idx, 849d9bb58e5SYang Zhong uint64_t val, target_ulong addr, 85055df6fcfSPeter Maydell uintptr_t retaddr, bool recheck, int size) 851d9bb58e5SYang Zhong { 852d9bb58e5SYang Zhong CPUState *cpu = ENV_GET_CPU(env); 8532d54f194SPeter Maydell hwaddr mr_offset; 8542d54f194SPeter Maydell MemoryRegionSection *section; 8552d54f194SPeter Maydell MemoryRegion *mr; 856d9bb58e5SYang Zhong bool locked = false; 85704e3aabdSPeter Maydell MemTxResult r; 858d9bb58e5SYang Zhong 85955df6fcfSPeter Maydell if (recheck) { 86055df6fcfSPeter Maydell /* 86155df6fcfSPeter Maydell * This is a TLB_RECHECK access, where the MMU protection 86255df6fcfSPeter Maydell * covers a smaller range than a target page, and we must 86355df6fcfSPeter Maydell * repeat the MMU check here. This tlb_fill() call might 86455df6fcfSPeter Maydell * longjump out if this access should cause a guest exception. 86555df6fcfSPeter Maydell */ 86655df6fcfSPeter Maydell int index; 86755df6fcfSPeter Maydell target_ulong tlb_addr; 86855df6fcfSPeter Maydell 86955df6fcfSPeter Maydell tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); 87055df6fcfSPeter Maydell 87155df6fcfSPeter Maydell index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 87255df6fcfSPeter Maydell tlb_addr = env->tlb_table[mmu_idx][index].addr_write; 87355df6fcfSPeter Maydell if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { 87455df6fcfSPeter Maydell /* RAM access */ 87555df6fcfSPeter Maydell uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; 87655df6fcfSPeter Maydell 87755df6fcfSPeter Maydell stn_p((void *)haddr, size, val); 87855df6fcfSPeter Maydell return; 87955df6fcfSPeter Maydell } 88055df6fcfSPeter Maydell /* Fall through for handling IO accesses */ 88155df6fcfSPeter Maydell } 88255df6fcfSPeter Maydell 8832d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 8842d54f194SPeter Maydell mr = section->mr; 8852d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 886d9bb58e5SYang Zhong if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { 887d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 888d9bb58e5SYang Zhong } 889d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 890d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 891d9bb58e5SYang Zhong 8928b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 893d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 894d9bb58e5SYang Zhong locked = true; 895d9bb58e5SYang Zhong } 8962d54f194SPeter Maydell r = memory_region_dispatch_write(mr, mr_offset, 89704e3aabdSPeter Maydell val, size, iotlbentry->attrs); 89804e3aabdSPeter Maydell if (r != MEMTX_OK) { 8992d54f194SPeter Maydell hwaddr physaddr = mr_offset + 9002d54f194SPeter Maydell section->offset_within_address_space - 9012d54f194SPeter Maydell section->offset_within_region; 9022d54f194SPeter Maydell 90304e3aabdSPeter Maydell cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE, 90404e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 90504e3aabdSPeter Maydell } 906d9bb58e5SYang Zhong if (locked) { 907d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 908d9bb58e5SYang Zhong } 909d9bb58e5SYang Zhong } 910d9bb58e5SYang Zhong 911d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 912d9bb58e5SYang Zhong back to the main tlb. */ 913d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 914d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 915d9bb58e5SYang Zhong { 916d9bb58e5SYang Zhong size_t vidx; 917d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 918d9bb58e5SYang Zhong CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; 919d9bb58e5SYang Zhong target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 920d9bb58e5SYang Zhong 921d9bb58e5SYang Zhong if (cmp == page) { 922d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 923d9bb58e5SYang Zhong CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; 924d9bb58e5SYang Zhong 925d9bb58e5SYang Zhong copy_tlb_helper(&tmptlb, tlb, false); 926d9bb58e5SYang Zhong copy_tlb_helper(tlb, vtlb, true); 927d9bb58e5SYang Zhong copy_tlb_helper(vtlb, &tmptlb, true); 928d9bb58e5SYang Zhong 929d9bb58e5SYang Zhong CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; 930d9bb58e5SYang Zhong CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; 931d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 932d9bb58e5SYang Zhong return true; 933d9bb58e5SYang Zhong } 934d9bb58e5SYang Zhong } 935d9bb58e5SYang Zhong return false; 936d9bb58e5SYang Zhong } 937d9bb58e5SYang Zhong 938d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 939d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 940d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 941d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 942d9bb58e5SYang Zhong 943f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */ 944f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it 945f2553f04SKONRAD Frederic * is actually a ram_addr_t (in system mode; the user mode emulation 946f2553f04SKONRAD Frederic * version of this function returns a guest virtual address). 947f2553f04SKONRAD Frederic */ 948f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 949f2553f04SKONRAD Frederic { 9502d54f194SPeter Maydell int mmu_idx, index; 951f2553f04SKONRAD Frederic void *p; 952f2553f04SKONRAD Frederic MemoryRegion *mr; 9532d54f194SPeter Maydell MemoryRegionSection *section; 954f2553f04SKONRAD Frederic CPUState *cpu = ENV_GET_CPU(env); 955f2553f04SKONRAD Frederic CPUIOTLBEntry *iotlbentry; 9562d54f194SPeter Maydell hwaddr physaddr, mr_offset; 957f2553f04SKONRAD Frederic 958f2553f04SKONRAD Frederic index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 959f2553f04SKONRAD Frederic mmu_idx = cpu_mmu_index(env, true); 960f2553f04SKONRAD Frederic if (unlikely(env->tlb_table[mmu_idx][index].addr_code != 96171b9a453SKONRAD Frederic (addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) { 96271b9a453SKONRAD Frederic if (!VICTIM_TLB_HIT(addr_read, addr)) { 96398670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 96471b9a453SKONRAD Frederic } 965f2553f04SKONRAD Frederic } 96655df6fcfSPeter Maydell 96755df6fcfSPeter Maydell if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) { 96855df6fcfSPeter Maydell /* 96955df6fcfSPeter Maydell * This is a TLB_RECHECK access, where the MMU protection 97055df6fcfSPeter Maydell * covers a smaller range than a target page, and we must 97155df6fcfSPeter Maydell * repeat the MMU check here. This tlb_fill() call might 97255df6fcfSPeter Maydell * longjump out if this access should cause a guest exception. 97355df6fcfSPeter Maydell */ 97455df6fcfSPeter Maydell int index; 97555df6fcfSPeter Maydell target_ulong tlb_addr; 97655df6fcfSPeter Maydell 97755df6fcfSPeter Maydell tlb_fill(cpu, addr, 0, MMU_INST_FETCH, mmu_idx, 0); 97855df6fcfSPeter Maydell 97955df6fcfSPeter Maydell index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 98055df6fcfSPeter Maydell tlb_addr = env->tlb_table[mmu_idx][index].addr_code; 98155df6fcfSPeter Maydell if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { 98255df6fcfSPeter Maydell /* RAM access. We can't handle this, so for now just stop */ 98355df6fcfSPeter Maydell cpu_abort(cpu, "Unable to handle guest executing from RAM within " 98455df6fcfSPeter Maydell "a small MPU region at 0x" TARGET_FMT_lx, addr); 98555df6fcfSPeter Maydell } 98655df6fcfSPeter Maydell /* 98755df6fcfSPeter Maydell * Fall through to handle IO accesses (which will almost certainly 98855df6fcfSPeter Maydell * also result in failure) 98955df6fcfSPeter Maydell */ 99055df6fcfSPeter Maydell } 99155df6fcfSPeter Maydell 992f2553f04SKONRAD Frederic iotlbentry = &env->iotlb[mmu_idx][index]; 9932d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 9942d54f194SPeter Maydell mr = section->mr; 995f2553f04SKONRAD Frederic if (memory_region_is_unassigned(mr)) { 996c9356746SKONRAD Frederic qemu_mutex_lock_iothread(); 997c9356746SKONRAD Frederic if (memory_region_request_mmio_ptr(mr, addr)) { 998c9356746SKONRAD Frederic qemu_mutex_unlock_iothread(); 999c9356746SKONRAD Frederic /* A MemoryRegion is potentially added so re-run the 1000c9356746SKONRAD Frederic * get_page_addr_code. 1001c9356746SKONRAD Frederic */ 1002c9356746SKONRAD Frederic return get_page_addr_code(env, addr); 1003c9356746SKONRAD Frederic } 1004c9356746SKONRAD Frederic qemu_mutex_unlock_iothread(); 1005c9356746SKONRAD Frederic 100604e3aabdSPeter Maydell /* Give the new-style cpu_transaction_failed() hook first chance 100704e3aabdSPeter Maydell * to handle this. 100804e3aabdSPeter Maydell * This is not the ideal place to detect and generate CPU 100904e3aabdSPeter Maydell * exceptions for instruction fetch failure (for instance 101004e3aabdSPeter Maydell * we don't know the length of the access that the CPU would 101104e3aabdSPeter Maydell * use, and it would be better to go ahead and try the access 101204e3aabdSPeter Maydell * and use the MemTXResult it produced). However it is the 101304e3aabdSPeter Maydell * simplest place we have currently available for the check. 101404e3aabdSPeter Maydell */ 10152d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 10162d54f194SPeter Maydell physaddr = mr_offset + 10172d54f194SPeter Maydell section->offset_within_address_space - 10182d54f194SPeter Maydell section->offset_within_region; 101904e3aabdSPeter Maydell cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx, 102004e3aabdSPeter Maydell iotlbentry->attrs, MEMTX_DECODE_ERROR, 0); 102104e3aabdSPeter Maydell 1022f2553f04SKONRAD Frederic cpu_unassigned_access(cpu, addr, false, true, 0, 4); 1023f2553f04SKONRAD Frederic /* The CPU's unassigned access hook might have longjumped out 1024f2553f04SKONRAD Frederic * with an exception. If it didn't (or there was no hook) then 1025f2553f04SKONRAD Frederic * we can't proceed further. 1026f2553f04SKONRAD Frederic */ 1027f2553f04SKONRAD Frederic report_bad_exec(cpu, addr); 1028f2553f04SKONRAD Frederic exit(1); 1029f2553f04SKONRAD Frederic } 1030f2553f04SKONRAD Frederic p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend); 1031f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 1032f2553f04SKONRAD Frederic } 1033f2553f04SKONRAD Frederic 1034d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted. 1035d9bb58e5SYang Zhong * If it is not permitted then an exception will be taken in the same 1036d9bb58e5SYang Zhong * way as if this were a real write access (and we will not return). 1037d9bb58e5SYang Zhong * Otherwise the function will return, and there will be a valid 1038d9bb58e5SYang Zhong * entry in the TLB for this access. 1039d9bb58e5SYang Zhong */ 104098670d47SLaurent Vivier void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, 1041d9bb58e5SYang Zhong uintptr_t retaddr) 1042d9bb58e5SYang Zhong { 1043d9bb58e5SYang Zhong int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1044d9bb58e5SYang Zhong target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; 1045d9bb58e5SYang Zhong 1046*334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1047d9bb58e5SYang Zhong /* TLB entry is for a different page */ 1048d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 104998670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, 105098670d47SLaurent Vivier mmu_idx, retaddr); 1051d9bb58e5SYang Zhong } 1052d9bb58e5SYang Zhong } 1053d9bb58e5SYang Zhong } 1054d9bb58e5SYang Zhong 1055d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 1056d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 1057d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 105834d49937SPeter Maydell TCGMemOpIdx oi, uintptr_t retaddr, 105934d49937SPeter Maydell NotDirtyInfo *ndi) 1060d9bb58e5SYang Zhong { 1061d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 1062d9bb58e5SYang Zhong size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); 1063d9bb58e5SYang Zhong CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index]; 1064d9bb58e5SYang Zhong target_ulong tlb_addr = tlbe->addr_write; 1065d9bb58e5SYang Zhong TCGMemOp mop = get_memop(oi); 1066d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 1067d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 106834d49937SPeter Maydell void *hostaddr; 1069d9bb58e5SYang Zhong 1070d9bb58e5SYang Zhong /* Adjust the given return address. */ 1071d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1072d9bb58e5SYang Zhong 1073d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1074d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1075d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 1076d9bb58e5SYang Zhong cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, 1077d9bb58e5SYang Zhong mmu_idx, retaddr); 1078d9bb58e5SYang Zhong } 1079d9bb58e5SYang Zhong 1080d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 1081d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 1082d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1083d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1084d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1085d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1086d9bb58e5SYang Zhong goto stop_the_world; 1087d9bb58e5SYang Zhong } 1088d9bb58e5SYang Zhong 1089d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 1090*334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1091d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 109298670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, 109398670d47SLaurent Vivier mmu_idx, retaddr); 1094d9bb58e5SYang Zhong } 1095f52bfb12SDavid Hildenbrand tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK; 1096d9bb58e5SYang Zhong } 1097d9bb58e5SYang Zhong 109855df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 109955df6fcfSPeter Maydell if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { 1100d9bb58e5SYang Zhong /* There's really nothing that can be done to 1101d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1102d9bb58e5SYang Zhong goto stop_the_world; 1103d9bb58e5SYang Zhong } 1104d9bb58e5SYang Zhong 1105d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 110634d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 110798670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD, 110898670d47SLaurent Vivier mmu_idx, retaddr); 1109d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 1110d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 1111d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 1112d9bb58e5SYang Zhong goto stop_the_world; 1113d9bb58e5SYang Zhong } 1114d9bb58e5SYang Zhong 111534d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 111634d49937SPeter Maydell 111734d49937SPeter Maydell ndi->active = false; 111834d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 111934d49937SPeter Maydell ndi->active = true; 112034d49937SPeter Maydell memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr, 112134d49937SPeter Maydell qemu_ram_addr_from_host_nofail(hostaddr), 112234d49937SPeter Maydell 1 << s_bits); 112334d49937SPeter Maydell } 112434d49937SPeter Maydell 112534d49937SPeter Maydell return hostaddr; 1126d9bb58e5SYang Zhong 1127d9bb58e5SYang Zhong stop_the_world: 1128d9bb58e5SYang Zhong cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); 1129d9bb58e5SYang Zhong } 1130d9bb58e5SYang Zhong 1131d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN 1132d9bb58e5SYang Zhong # define TGT_BE(X) (X) 1133d9bb58e5SYang Zhong # define TGT_LE(X) BSWAP(X) 1134d9bb58e5SYang Zhong #else 1135d9bb58e5SYang Zhong # define TGT_BE(X) BSWAP(X) 1136d9bb58e5SYang Zhong # define TGT_LE(X) (X) 1137d9bb58e5SYang Zhong #endif 1138d9bb58e5SYang Zhong 1139d9bb58e5SYang Zhong #define MMUSUFFIX _mmu 1140d9bb58e5SYang Zhong 1141d9bb58e5SYang Zhong #define DATA_SIZE 1 1142d9bb58e5SYang Zhong #include "softmmu_template.h" 1143d9bb58e5SYang Zhong 1144d9bb58e5SYang Zhong #define DATA_SIZE 2 1145d9bb58e5SYang Zhong #include "softmmu_template.h" 1146d9bb58e5SYang Zhong 1147d9bb58e5SYang Zhong #define DATA_SIZE 4 1148d9bb58e5SYang Zhong #include "softmmu_template.h" 1149d9bb58e5SYang Zhong 1150d9bb58e5SYang Zhong #define DATA_SIZE 8 1151d9bb58e5SYang Zhong #include "softmmu_template.h" 1152d9bb58e5SYang Zhong 1153d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 1154d9bb58e5SYang Zhong them callable from other helpers. */ 1155d9bb58e5SYang Zhong 1156d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 1157d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 1158d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 115934d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi 116034d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi) 116134d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP \ 116234d49937SPeter Maydell do { \ 116334d49937SPeter Maydell if (unlikely(ndi.active)) { \ 116434d49937SPeter Maydell memory_notdirty_write_complete(&ndi); \ 116534d49937SPeter Maydell } \ 116634d49937SPeter Maydell } while (0) 1167d9bb58e5SYang Zhong 1168d9bb58e5SYang Zhong #define DATA_SIZE 1 1169d9bb58e5SYang Zhong #include "atomic_template.h" 1170d9bb58e5SYang Zhong 1171d9bb58e5SYang Zhong #define DATA_SIZE 2 1172d9bb58e5SYang Zhong #include "atomic_template.h" 1173d9bb58e5SYang Zhong 1174d9bb58e5SYang Zhong #define DATA_SIZE 4 1175d9bb58e5SYang Zhong #include "atomic_template.h" 1176d9bb58e5SYang Zhong 1177d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1178d9bb58e5SYang Zhong #define DATA_SIZE 8 1179d9bb58e5SYang Zhong #include "atomic_template.h" 1180d9bb58e5SYang Zhong #endif 1181d9bb58e5SYang Zhong 1182d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC128 1183d9bb58e5SYang Zhong #define DATA_SIZE 16 1184d9bb58e5SYang Zhong #include "atomic_template.h" 1185d9bb58e5SYang Zhong #endif 1186d9bb58e5SYang Zhong 1187d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 1188d9bb58e5SYang Zhong 1189d9bb58e5SYang Zhong #undef EXTRA_ARGS 1190d9bb58e5SYang Zhong #undef ATOMIC_NAME 1191d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 1192d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 1193d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 119434d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi) 1195d9bb58e5SYang Zhong 1196d9bb58e5SYang Zhong #define DATA_SIZE 1 1197d9bb58e5SYang Zhong #include "atomic_template.h" 1198d9bb58e5SYang Zhong 1199d9bb58e5SYang Zhong #define DATA_SIZE 2 1200d9bb58e5SYang Zhong #include "atomic_template.h" 1201d9bb58e5SYang Zhong 1202d9bb58e5SYang Zhong #define DATA_SIZE 4 1203d9bb58e5SYang Zhong #include "atomic_template.h" 1204d9bb58e5SYang Zhong 1205d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1206d9bb58e5SYang Zhong #define DATA_SIZE 8 1207d9bb58e5SYang Zhong #include "atomic_template.h" 1208d9bb58e5SYang Zhong #endif 1209d9bb58e5SYang Zhong 1210d9bb58e5SYang Zhong /* Code access functions. */ 1211d9bb58e5SYang Zhong 1212d9bb58e5SYang Zhong #undef MMUSUFFIX 1213d9bb58e5SYang Zhong #define MMUSUFFIX _cmmu 1214d9bb58e5SYang Zhong #undef GETPC 1215d9bb58e5SYang Zhong #define GETPC() ((uintptr_t)0) 1216d9bb58e5SYang Zhong #define SOFTMMU_CODE_ACCESS 1217d9bb58e5SYang Zhong 1218d9bb58e5SYang Zhong #define DATA_SIZE 1 1219d9bb58e5SYang Zhong #include "softmmu_template.h" 1220d9bb58e5SYang Zhong 1221d9bb58e5SYang Zhong #define DATA_SIZE 2 1222d9bb58e5SYang Zhong #include "softmmu_template.h" 1223d9bb58e5SYang Zhong 1224d9bb58e5SYang Zhong #define DATA_SIZE 4 1225d9bb58e5SYang Zhong #include "softmmu_template.h" 1226d9bb58e5SYang Zhong 1227d9bb58e5SYang Zhong #define DATA_SIZE 8 1228d9bb58e5SYang Zhong #include "softmmu_template.h" 1229