1 /* 2 * Common CPU TLB handling 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/main-loop.h" 22 #include "hw/core/tcg-cpu-ops.h" 23 #include "exec/exec-all.h" 24 #include "exec/memory.h" 25 #include "exec/cpu_ldst.h" 26 #include "exec/cputlb.h" 27 #include "exec/memory-internal.h" 28 #include "exec/ram_addr.h" 29 #include "tcg/tcg.h" 30 #include "qemu/error-report.h" 31 #include "exec/log.h" 32 #include "exec/helper-proto.h" 33 #include "qemu/atomic.h" 34 #include "qemu/atomic128.h" 35 #include "exec/translate-all.h" 36 #include "trace.h" 37 #include "tb-hash.h" 38 #include "internal.h" 39 #ifdef CONFIG_PLUGIN 40 #include "qemu/plugin-memory.h" 41 #endif 42 #include "tcg/tcg-ldst.h" 43 44 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 45 /* #define DEBUG_TLB */ 46 /* #define DEBUG_TLB_LOG */ 47 48 #ifdef DEBUG_TLB 49 # define DEBUG_TLB_GATE 1 50 # ifdef DEBUG_TLB_LOG 51 # define DEBUG_TLB_LOG_GATE 1 52 # else 53 # define DEBUG_TLB_LOG_GATE 0 54 # endif 55 #else 56 # define DEBUG_TLB_GATE 0 57 # define DEBUG_TLB_LOG_GATE 0 58 #endif 59 60 #define tlb_debug(fmt, ...) do { \ 61 if (DEBUG_TLB_LOG_GATE) { \ 62 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 63 ## __VA_ARGS__); \ 64 } else if (DEBUG_TLB_GATE) { \ 65 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 66 } \ 67 } while (0) 68 69 #define assert_cpu_is_self(cpu) do { \ 70 if (DEBUG_TLB_GATE) { \ 71 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 72 } \ 73 } while (0) 74 75 /* run_on_cpu_data.target_ptr should always be big enough for a 76 * target_ulong even on 32 bit builds */ 77 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 78 79 /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 80 */ 81 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 82 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 83 84 static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 85 { 86 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 87 } 88 89 static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 90 { 91 return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 92 } 93 94 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 95 size_t max_entries) 96 { 97 desc->window_begin_ns = ns; 98 desc->window_max_entries = max_entries; 99 } 100 101 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 102 { 103 CPUJumpCache *jc = cpu->tb_jmp_cache; 104 int i, i0; 105 106 if (unlikely(!jc)) { 107 return; 108 } 109 110 i0 = tb_jmp_cache_hash_page(page_addr); 111 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 112 qatomic_set(&jc->array[i0 + i].tb, NULL); 113 } 114 } 115 116 /** 117 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 118 * @desc: The CPUTLBDesc portion of the TLB 119 * @fast: The CPUTLBDescFast portion of the same TLB 120 * 121 * Called with tlb_lock_held. 122 * 123 * We have two main constraints when resizing a TLB: (1) we only resize it 124 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 125 * the array or unnecessarily flushing it), which means we do not control how 126 * frequently the resizing can occur; (2) we don't have access to the guest's 127 * future scheduling decisions, and therefore have to decide the magnitude of 128 * the resize based on past observations. 129 * 130 * In general, a memory-hungry process can benefit greatly from an appropriately 131 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 132 * we just have to make the TLB as large as possible; while an oversized TLB 133 * results in minimal TLB miss rates, it also takes longer to be flushed 134 * (flushes can be _very_ frequent), and the reduced locality can also hurt 135 * performance. 136 * 137 * To achieve near-optimal performance for all kinds of workloads, we: 138 * 139 * 1. Aggressively increase the size of the TLB when the use rate of the 140 * TLB being flushed is high, since it is likely that in the near future this 141 * memory-hungry process will execute again, and its memory hungriness will 142 * probably be similar. 143 * 144 * 2. Slowly reduce the size of the TLB as the use rate declines over a 145 * reasonably large time window. The rationale is that if in such a time window 146 * we have not observed a high TLB use rate, it is likely that we won't observe 147 * it in the near future. In that case, once a time window expires we downsize 148 * the TLB to match the maximum use rate observed in the window. 149 * 150 * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 151 * since in that range performance is likely near-optimal. Recall that the TLB 152 * is direct mapped, so we want the use rate to be low (or at least not too 153 * high), since otherwise we are likely to have a significant amount of 154 * conflict misses. 155 */ 156 static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 157 int64_t now) 158 { 159 size_t old_size = tlb_n_entries(fast); 160 size_t rate; 161 size_t new_size = old_size; 162 int64_t window_len_ms = 100; 163 int64_t window_len_ns = window_len_ms * 1000 * 1000; 164 bool window_expired = now > desc->window_begin_ns + window_len_ns; 165 166 if (desc->n_used_entries > desc->window_max_entries) { 167 desc->window_max_entries = desc->n_used_entries; 168 } 169 rate = desc->window_max_entries * 100 / old_size; 170 171 if (rate > 70) { 172 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 173 } else if (rate < 30 && window_expired) { 174 size_t ceil = pow2ceil(desc->window_max_entries); 175 size_t expected_rate = desc->window_max_entries * 100 / ceil; 176 177 /* 178 * Avoid undersizing when the max number of entries seen is just below 179 * a pow2. For instance, if max_entries == 1025, the expected use rate 180 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 181 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 182 * later. Thus, make sure that the expected use rate remains below 70%. 183 * (and since we double the size, that means the lowest rate we'd 184 * expect to get is 35%, which is still in the 30-70% range where 185 * we consider that the size is appropriate.) 186 */ 187 if (expected_rate > 70) { 188 ceil *= 2; 189 } 190 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 191 } 192 193 if (new_size == old_size) { 194 if (window_expired) { 195 tlb_window_reset(desc, now, desc->n_used_entries); 196 } 197 return; 198 } 199 200 g_free(fast->table); 201 g_free(desc->fulltlb); 202 203 tlb_window_reset(desc, now, 0); 204 /* desc->n_used_entries is cleared by the caller */ 205 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 206 fast->table = g_try_new(CPUTLBEntry, new_size); 207 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 208 209 /* 210 * If the allocations fail, try smaller sizes. We just freed some 211 * memory, so going back to half of new_size has a good chance of working. 212 * Increased memory pressure elsewhere in the system might cause the 213 * allocations to fail though, so we progressively reduce the allocation 214 * size, aborting if we cannot even allocate the smallest TLB we support. 215 */ 216 while (fast->table == NULL || desc->fulltlb == NULL) { 217 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 218 error_report("%s: %s", __func__, strerror(errno)); 219 abort(); 220 } 221 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 222 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 223 224 g_free(fast->table); 225 g_free(desc->fulltlb); 226 fast->table = g_try_new(CPUTLBEntry, new_size); 227 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 228 } 229 } 230 231 static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 232 { 233 desc->n_used_entries = 0; 234 desc->large_page_addr = -1; 235 desc->large_page_mask = -1; 236 desc->vindex = 0; 237 memset(fast->table, -1, sizeof_tlb(fast)); 238 memset(desc->vtable, -1, sizeof(desc->vtable)); 239 } 240 241 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, 242 int64_t now) 243 { 244 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 245 CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; 246 247 tlb_mmu_resize_locked(desc, fast, now); 248 tlb_mmu_flush_locked(desc, fast); 249 } 250 251 static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 252 { 253 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 254 255 tlb_window_reset(desc, now, 0); 256 desc->n_used_entries = 0; 257 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 258 fast->table = g_new(CPUTLBEntry, n_entries); 259 desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); 260 tlb_mmu_flush_locked(desc, fast); 261 } 262 263 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 264 { 265 env_tlb(env)->d[mmu_idx].n_used_entries++; 266 } 267 268 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 269 { 270 env_tlb(env)->d[mmu_idx].n_used_entries--; 271 } 272 273 void tlb_init(CPUState *cpu) 274 { 275 CPUArchState *env = cpu->env_ptr; 276 int64_t now = get_clock_realtime(); 277 int i; 278 279 qemu_spin_init(&env_tlb(env)->c.lock); 280 281 /* All tlbs are initialized flushed. */ 282 env_tlb(env)->c.dirty = 0; 283 284 for (i = 0; i < NB_MMU_MODES; i++) { 285 tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); 286 } 287 } 288 289 void tlb_destroy(CPUState *cpu) 290 { 291 CPUArchState *env = cpu->env_ptr; 292 int i; 293 294 qemu_spin_destroy(&env_tlb(env)->c.lock); 295 for (i = 0; i < NB_MMU_MODES; i++) { 296 CPUTLBDesc *desc = &env_tlb(env)->d[i]; 297 CPUTLBDescFast *fast = &env_tlb(env)->f[i]; 298 299 g_free(fast->table); 300 g_free(desc->fulltlb); 301 } 302 } 303 304 /* flush_all_helper: run fn across all cpus 305 * 306 * If the wait flag is set then the src cpu's helper will be queued as 307 * "safe" work and the loop exited creating a synchronisation point 308 * where all queued work will be finished before execution starts 309 * again. 310 */ 311 static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 312 run_on_cpu_data d) 313 { 314 CPUState *cpu; 315 316 CPU_FOREACH(cpu) { 317 if (cpu != src) { 318 async_run_on_cpu(cpu, fn, d); 319 } 320 } 321 } 322 323 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 324 { 325 CPUState *cpu; 326 size_t full = 0, part = 0, elide = 0; 327 328 CPU_FOREACH(cpu) { 329 CPUArchState *env = cpu->env_ptr; 330 331 full += qatomic_read(&env_tlb(env)->c.full_flush_count); 332 part += qatomic_read(&env_tlb(env)->c.part_flush_count); 333 elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); 334 } 335 *pfull = full; 336 *ppart = part; 337 *pelide = elide; 338 } 339 340 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 341 { 342 CPUArchState *env = cpu->env_ptr; 343 uint16_t asked = data.host_int; 344 uint16_t all_dirty, work, to_clean; 345 int64_t now = get_clock_realtime(); 346 347 assert_cpu_is_self(cpu); 348 349 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 350 351 qemu_spin_lock(&env_tlb(env)->c.lock); 352 353 all_dirty = env_tlb(env)->c.dirty; 354 to_clean = asked & all_dirty; 355 all_dirty &= ~to_clean; 356 env_tlb(env)->c.dirty = all_dirty; 357 358 for (work = to_clean; work != 0; work &= work - 1) { 359 int mmu_idx = ctz32(work); 360 tlb_flush_one_mmuidx_locked(env, mmu_idx, now); 361 } 362 363 qemu_spin_unlock(&env_tlb(env)->c.lock); 364 365 tcg_flush_jmp_cache(cpu); 366 367 if (to_clean == ALL_MMUIDX_BITS) { 368 qatomic_set(&env_tlb(env)->c.full_flush_count, 369 env_tlb(env)->c.full_flush_count + 1); 370 } else { 371 qatomic_set(&env_tlb(env)->c.part_flush_count, 372 env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 373 if (to_clean != asked) { 374 qatomic_set(&env_tlb(env)->c.elide_flush_count, 375 env_tlb(env)->c.elide_flush_count + 376 ctpop16(asked & ~to_clean)); 377 } 378 } 379 } 380 381 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 382 { 383 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 384 385 if (cpu->created && !qemu_cpu_is_self(cpu)) { 386 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 387 RUN_ON_CPU_HOST_INT(idxmap)); 388 } else { 389 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 390 } 391 } 392 393 void tlb_flush(CPUState *cpu) 394 { 395 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 396 } 397 398 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 399 { 400 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 401 402 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 403 404 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 405 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 406 } 407 408 void tlb_flush_all_cpus(CPUState *src_cpu) 409 { 410 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 411 } 412 413 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 414 { 415 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 416 417 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 418 419 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 420 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 421 } 422 423 void tlb_flush_all_cpus_synced(CPUState *src_cpu) 424 { 425 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 426 } 427 428 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 429 target_ulong page, target_ulong mask) 430 { 431 page &= mask; 432 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 433 434 return (page == (tlb_entry->addr_read & mask) || 435 page == (tlb_addr_write(tlb_entry) & mask) || 436 page == (tlb_entry->addr_code & mask)); 437 } 438 439 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 440 target_ulong page) 441 { 442 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 443 } 444 445 /** 446 * tlb_entry_is_empty - return true if the entry is not in use 447 * @te: pointer to CPUTLBEntry 448 */ 449 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 450 { 451 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 452 } 453 454 /* Called with tlb_c.lock held */ 455 static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 456 target_ulong page, 457 target_ulong mask) 458 { 459 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 460 memset(tlb_entry, -1, sizeof(*tlb_entry)); 461 return true; 462 } 463 return false; 464 } 465 466 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 467 target_ulong page) 468 { 469 return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 470 } 471 472 /* Called with tlb_c.lock held */ 473 static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, 474 target_ulong page, 475 target_ulong mask) 476 { 477 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 478 int k; 479 480 assert_cpu_is_self(env_cpu(env)); 481 for (k = 0; k < CPU_VTLB_SIZE; k++) { 482 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 483 tlb_n_used_entries_dec(env, mmu_idx); 484 } 485 } 486 } 487 488 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 489 target_ulong page) 490 { 491 tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); 492 } 493 494 static void tlb_flush_page_locked(CPUArchState *env, int midx, 495 target_ulong page) 496 { 497 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 498 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 499 500 /* Check if we need to flush due to large pages. */ 501 if ((page & lp_mask) == lp_addr) { 502 tlb_debug("forcing full flush midx %d (" 503 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 504 midx, lp_addr, lp_mask); 505 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 506 } else { 507 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 508 tlb_n_used_entries_dec(env, midx); 509 } 510 tlb_flush_vtlb_page_locked(env, midx, page); 511 } 512 } 513 514 /** 515 * tlb_flush_page_by_mmuidx_async_0: 516 * @cpu: cpu on which to flush 517 * @addr: page of virtual address to flush 518 * @idxmap: set of mmu_idx to flush 519 * 520 * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 521 * at @addr from the tlbs indicated by @idxmap from @cpu. 522 */ 523 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 524 target_ulong addr, 525 uint16_t idxmap) 526 { 527 CPUArchState *env = cpu->env_ptr; 528 int mmu_idx; 529 530 assert_cpu_is_self(cpu); 531 532 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); 533 534 qemu_spin_lock(&env_tlb(env)->c.lock); 535 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 536 if ((idxmap >> mmu_idx) & 1) { 537 tlb_flush_page_locked(env, mmu_idx, addr); 538 } 539 } 540 qemu_spin_unlock(&env_tlb(env)->c.lock); 541 542 /* 543 * Discard jump cache entries for any tb which might potentially 544 * overlap the flushed page, which includes the previous. 545 */ 546 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 547 tb_jmp_cache_clear_page(cpu, addr); 548 } 549 550 /** 551 * tlb_flush_page_by_mmuidx_async_1: 552 * @cpu: cpu on which to flush 553 * @data: encoded addr + idxmap 554 * 555 * Helper for tlb_flush_page_by_mmuidx and friends, called through 556 * async_run_on_cpu. The idxmap parameter is encoded in the page 557 * offset of the target_ptr field. This limits the set of mmu_idx 558 * that can be passed via this method. 559 */ 560 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 561 run_on_cpu_data data) 562 { 563 target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; 564 target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; 565 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 566 567 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 568 } 569 570 typedef struct { 571 target_ulong addr; 572 uint16_t idxmap; 573 } TLBFlushPageByMMUIdxData; 574 575 /** 576 * tlb_flush_page_by_mmuidx_async_2: 577 * @cpu: cpu on which to flush 578 * @data: allocated addr + idxmap 579 * 580 * Helper for tlb_flush_page_by_mmuidx and friends, called through 581 * async_run_on_cpu. The addr+idxmap parameters are stored in a 582 * TLBFlushPageByMMUIdxData structure that has been allocated 583 * specifically for this helper. Free the structure when done. 584 */ 585 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 586 run_on_cpu_data data) 587 { 588 TLBFlushPageByMMUIdxData *d = data.host_ptr; 589 590 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 591 g_free(d); 592 } 593 594 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 595 { 596 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 597 598 /* This should already be page aligned */ 599 addr &= TARGET_PAGE_MASK; 600 601 if (qemu_cpu_is_self(cpu)) { 602 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 603 } else if (idxmap < TARGET_PAGE_SIZE) { 604 /* 605 * Most targets have only a few mmu_idx. In the case where 606 * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 607 * allocating memory for this operation. 608 */ 609 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 610 RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 611 } else { 612 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 613 614 /* Otherwise allocate a structure, freed by the worker. */ 615 d->addr = addr; 616 d->idxmap = idxmap; 617 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 618 RUN_ON_CPU_HOST_PTR(d)); 619 } 620 } 621 622 void tlb_flush_page(CPUState *cpu, target_ulong addr) 623 { 624 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 625 } 626 627 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 628 uint16_t idxmap) 629 { 630 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 631 632 /* This should already be page aligned */ 633 addr &= TARGET_PAGE_MASK; 634 635 /* 636 * Allocate memory to hold addr+idxmap only when needed. 637 * See tlb_flush_page_by_mmuidx for details. 638 */ 639 if (idxmap < TARGET_PAGE_SIZE) { 640 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 641 RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 642 } else { 643 CPUState *dst_cpu; 644 645 /* Allocate a separate data block for each destination cpu. */ 646 CPU_FOREACH(dst_cpu) { 647 if (dst_cpu != src_cpu) { 648 TLBFlushPageByMMUIdxData *d 649 = g_new(TLBFlushPageByMMUIdxData, 1); 650 651 d->addr = addr; 652 d->idxmap = idxmap; 653 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 654 RUN_ON_CPU_HOST_PTR(d)); 655 } 656 } 657 } 658 659 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 660 } 661 662 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 663 { 664 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 665 } 666 667 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 668 target_ulong addr, 669 uint16_t idxmap) 670 { 671 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 672 673 /* This should already be page aligned */ 674 addr &= TARGET_PAGE_MASK; 675 676 /* 677 * Allocate memory to hold addr+idxmap only when needed. 678 * See tlb_flush_page_by_mmuidx for details. 679 */ 680 if (idxmap < TARGET_PAGE_SIZE) { 681 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 682 RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 683 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 684 RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 685 } else { 686 CPUState *dst_cpu; 687 TLBFlushPageByMMUIdxData *d; 688 689 /* Allocate a separate data block for each destination cpu. */ 690 CPU_FOREACH(dst_cpu) { 691 if (dst_cpu != src_cpu) { 692 d = g_new(TLBFlushPageByMMUIdxData, 1); 693 d->addr = addr; 694 d->idxmap = idxmap; 695 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 696 RUN_ON_CPU_HOST_PTR(d)); 697 } 698 } 699 700 d = g_new(TLBFlushPageByMMUIdxData, 1); 701 d->addr = addr; 702 d->idxmap = idxmap; 703 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 704 RUN_ON_CPU_HOST_PTR(d)); 705 } 706 } 707 708 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 709 { 710 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 711 } 712 713 static void tlb_flush_range_locked(CPUArchState *env, int midx, 714 target_ulong addr, target_ulong len, 715 unsigned bits) 716 { 717 CPUTLBDesc *d = &env_tlb(env)->d[midx]; 718 CPUTLBDescFast *f = &env_tlb(env)->f[midx]; 719 target_ulong mask = MAKE_64BIT_MASK(0, bits); 720 721 /* 722 * If @bits is smaller than the tlb size, there may be multiple entries 723 * within the TLB; otherwise all addresses that match under @mask hit 724 * the same TLB entry. 725 * TODO: Perhaps allow bits to be a few bits less than the size. 726 * For now, just flush the entire TLB. 727 * 728 * If @len is larger than the tlb size, then it will take longer to 729 * test all of the entries in the TLB than it will to flush it all. 730 */ 731 if (mask < f->mask || len > f->mask) { 732 tlb_debug("forcing full flush midx %d (" 733 TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", 734 midx, addr, mask, len); 735 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 736 return; 737 } 738 739 /* 740 * Check if we need to flush due to large pages. 741 * Because large_page_mask contains all 1's from the msb, 742 * we only need to test the end of the range. 743 */ 744 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { 745 tlb_debug("forcing full flush midx %d (" 746 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 747 midx, d->large_page_addr, d->large_page_mask); 748 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 749 return; 750 } 751 752 for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { 753 target_ulong page = addr + i; 754 CPUTLBEntry *entry = tlb_entry(env, midx, page); 755 756 if (tlb_flush_entry_mask_locked(entry, page, mask)) { 757 tlb_n_used_entries_dec(env, midx); 758 } 759 tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); 760 } 761 } 762 763 typedef struct { 764 target_ulong addr; 765 target_ulong len; 766 uint16_t idxmap; 767 uint16_t bits; 768 } TLBFlushRangeData; 769 770 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, 771 TLBFlushRangeData d) 772 { 773 CPUArchState *env = cpu->env_ptr; 774 int mmu_idx; 775 776 assert_cpu_is_self(cpu); 777 778 tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", 779 d.addr, d.bits, d.len, d.idxmap); 780 781 qemu_spin_lock(&env_tlb(env)->c.lock); 782 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 783 if ((d.idxmap >> mmu_idx) & 1) { 784 tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); 785 } 786 } 787 qemu_spin_unlock(&env_tlb(env)->c.lock); 788 789 /* 790 * If the length is larger than the jump cache size, then it will take 791 * longer to clear each entry individually than it will to clear it all. 792 */ 793 if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { 794 tcg_flush_jmp_cache(cpu); 795 return; 796 } 797 798 /* 799 * Discard jump cache entries for any tb which might potentially 800 * overlap the flushed pages, which includes the previous. 801 */ 802 d.addr -= TARGET_PAGE_SIZE; 803 for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { 804 tb_jmp_cache_clear_page(cpu, d.addr); 805 d.addr += TARGET_PAGE_SIZE; 806 } 807 } 808 809 static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, 810 run_on_cpu_data data) 811 { 812 TLBFlushRangeData *d = data.host_ptr; 813 tlb_flush_range_by_mmuidx_async_0(cpu, *d); 814 g_free(d); 815 } 816 817 void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, 818 target_ulong len, uint16_t idxmap, 819 unsigned bits) 820 { 821 TLBFlushRangeData d; 822 823 /* 824 * If all bits are significant, and len is small, 825 * this devolves to tlb_flush_page. 826 */ 827 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 828 tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 829 return; 830 } 831 /* If no page bits are significant, this devolves to tlb_flush. */ 832 if (bits < TARGET_PAGE_BITS) { 833 tlb_flush_by_mmuidx(cpu, idxmap); 834 return; 835 } 836 837 /* This should already be page aligned */ 838 d.addr = addr & TARGET_PAGE_MASK; 839 d.len = len; 840 d.idxmap = idxmap; 841 d.bits = bits; 842 843 if (qemu_cpu_is_self(cpu)) { 844 tlb_flush_range_by_mmuidx_async_0(cpu, d); 845 } else { 846 /* Otherwise allocate a structure, freed by the worker. */ 847 TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 848 async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, 849 RUN_ON_CPU_HOST_PTR(p)); 850 } 851 } 852 853 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, 854 uint16_t idxmap, unsigned bits) 855 { 856 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); 857 } 858 859 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, 860 target_ulong addr, target_ulong len, 861 uint16_t idxmap, unsigned bits) 862 { 863 TLBFlushRangeData d; 864 CPUState *dst_cpu; 865 866 /* 867 * If all bits are significant, and len is small, 868 * this devolves to tlb_flush_page. 869 */ 870 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 871 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 872 return; 873 } 874 /* If no page bits are significant, this devolves to tlb_flush. */ 875 if (bits < TARGET_PAGE_BITS) { 876 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 877 return; 878 } 879 880 /* This should already be page aligned */ 881 d.addr = addr & TARGET_PAGE_MASK; 882 d.len = len; 883 d.idxmap = idxmap; 884 d.bits = bits; 885 886 /* Allocate a separate data block for each destination cpu. */ 887 CPU_FOREACH(dst_cpu) { 888 if (dst_cpu != src_cpu) { 889 TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 890 async_run_on_cpu(dst_cpu, 891 tlb_flush_range_by_mmuidx_async_1, 892 RUN_ON_CPU_HOST_PTR(p)); 893 } 894 } 895 896 tlb_flush_range_by_mmuidx_async_0(src_cpu, d); 897 } 898 899 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 900 target_ulong addr, 901 uint16_t idxmap, unsigned bits) 902 { 903 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, 904 idxmap, bits); 905 } 906 907 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 908 target_ulong addr, 909 target_ulong len, 910 uint16_t idxmap, 911 unsigned bits) 912 { 913 TLBFlushRangeData d, *p; 914 CPUState *dst_cpu; 915 916 /* 917 * If all bits are significant, and len is small, 918 * this devolves to tlb_flush_page. 919 */ 920 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 921 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 922 return; 923 } 924 /* If no page bits are significant, this devolves to tlb_flush. */ 925 if (bits < TARGET_PAGE_BITS) { 926 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 927 return; 928 } 929 930 /* This should already be page aligned */ 931 d.addr = addr & TARGET_PAGE_MASK; 932 d.len = len; 933 d.idxmap = idxmap; 934 d.bits = bits; 935 936 /* Allocate a separate data block for each destination cpu. */ 937 CPU_FOREACH(dst_cpu) { 938 if (dst_cpu != src_cpu) { 939 p = g_memdup(&d, sizeof(d)); 940 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, 941 RUN_ON_CPU_HOST_PTR(p)); 942 } 943 } 944 945 p = g_memdup(&d, sizeof(d)); 946 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, 947 RUN_ON_CPU_HOST_PTR(p)); 948 } 949 950 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 951 target_ulong addr, 952 uint16_t idxmap, 953 unsigned bits) 954 { 955 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, 956 idxmap, bits); 957 } 958 959 /* update the TLBs so that writes to code in the virtual page 'addr' 960 can be detected */ 961 void tlb_protect_code(ram_addr_t ram_addr) 962 { 963 cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, 964 TARGET_PAGE_SIZE, 965 DIRTY_MEMORY_CODE); 966 } 967 968 /* update the TLB so that writes in physical page 'phys_addr' are no longer 969 tested for self modifying code */ 970 void tlb_unprotect_code(ram_addr_t ram_addr) 971 { 972 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 973 } 974 975 976 /* 977 * Dirty write flag handling 978 * 979 * When the TCG code writes to a location it looks up the address in 980 * the TLB and uses that data to compute the final address. If any of 981 * the lower bits of the address are set then the slow path is forced. 982 * There are a number of reasons to do this but for normal RAM the 983 * most usual is detecting writes to code regions which may invalidate 984 * generated code. 985 * 986 * Other vCPUs might be reading their TLBs during guest execution, so we update 987 * te->addr_write with qatomic_set. We don't need to worry about this for 988 * oversized guests as MTTCG is disabled for them. 989 * 990 * Called with tlb_c.lock held. 991 */ 992 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 993 uintptr_t start, uintptr_t length) 994 { 995 uintptr_t addr = tlb_entry->addr_write; 996 997 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 998 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 999 addr &= TARGET_PAGE_MASK; 1000 addr += tlb_entry->addend; 1001 if ((addr - start) < length) { 1002 #if TCG_OVERSIZED_GUEST 1003 tlb_entry->addr_write |= TLB_NOTDIRTY; 1004 #else 1005 qatomic_set(&tlb_entry->addr_write, 1006 tlb_entry->addr_write | TLB_NOTDIRTY); 1007 #endif 1008 } 1009 } 1010 } 1011 1012 /* 1013 * Called with tlb_c.lock held. 1014 * Called only from the vCPU context, i.e. the TLB's owner thread. 1015 */ 1016 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 1017 { 1018 *d = *s; 1019 } 1020 1021 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 1022 * the target vCPU). 1023 * We must take tlb_c.lock to avoid racing with another vCPU update. The only 1024 * thing actually updated is the target TLB entry ->addr_write flags. 1025 */ 1026 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 1027 { 1028 CPUArchState *env; 1029 1030 int mmu_idx; 1031 1032 env = cpu->env_ptr; 1033 qemu_spin_lock(&env_tlb(env)->c.lock); 1034 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1035 unsigned int i; 1036 unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 1037 1038 for (i = 0; i < n; i++) { 1039 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 1040 start1, length); 1041 } 1042 1043 for (i = 0; i < CPU_VTLB_SIZE; i++) { 1044 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 1045 start1, length); 1046 } 1047 } 1048 qemu_spin_unlock(&env_tlb(env)->c.lock); 1049 } 1050 1051 /* Called with tlb_c.lock held */ 1052 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 1053 target_ulong vaddr) 1054 { 1055 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 1056 tlb_entry->addr_write = vaddr; 1057 } 1058 } 1059 1060 /* update the TLB corresponding to virtual page vaddr 1061 so that it is no longer dirty */ 1062 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 1063 { 1064 CPUArchState *env = cpu->env_ptr; 1065 int mmu_idx; 1066 1067 assert_cpu_is_self(cpu); 1068 1069 vaddr &= TARGET_PAGE_MASK; 1070 qemu_spin_lock(&env_tlb(env)->c.lock); 1071 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1072 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 1073 } 1074 1075 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1076 int k; 1077 for (k = 0; k < CPU_VTLB_SIZE; k++) { 1078 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 1079 } 1080 } 1081 qemu_spin_unlock(&env_tlb(env)->c.lock); 1082 } 1083 1084 /* Our TLB does not support large pages, so remember the area covered by 1085 large pages and trigger a full TLB flush if these are invalidated. */ 1086 static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 1087 target_ulong vaddr, target_ulong size) 1088 { 1089 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 1090 target_ulong lp_mask = ~(size - 1); 1091 1092 if (lp_addr == (target_ulong)-1) { 1093 /* No previous large page. */ 1094 lp_addr = vaddr; 1095 } else { 1096 /* Extend the existing region to include the new page. 1097 This is a compromise between unnecessary flushes and 1098 the cost of maintaining a full variable size TLB. */ 1099 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 1100 while (((lp_addr ^ vaddr) & lp_mask) != 0) { 1101 lp_mask <<= 1; 1102 } 1103 } 1104 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 1105 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 1106 } 1107 1108 /* 1109 * Add a new TLB entry. At most one entry for a given virtual address 1110 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1111 * supplied size is only used by tlb_flush_page. 1112 * 1113 * Called from TCG-generated code, which is under an RCU read-side 1114 * critical section. 1115 */ 1116 void tlb_set_page_full(CPUState *cpu, int mmu_idx, 1117 target_ulong vaddr, CPUTLBEntryFull *full) 1118 { 1119 CPUArchState *env = cpu->env_ptr; 1120 CPUTLB *tlb = env_tlb(env); 1121 CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1122 MemoryRegionSection *section; 1123 unsigned int index; 1124 target_ulong address; 1125 target_ulong write_address; 1126 uintptr_t addend; 1127 CPUTLBEntry *te, tn; 1128 hwaddr iotlb, xlat, sz, paddr_page; 1129 target_ulong vaddr_page; 1130 int asidx, wp_flags, prot; 1131 bool is_ram, is_romd; 1132 1133 assert_cpu_is_self(cpu); 1134 1135 if (full->lg_page_size <= TARGET_PAGE_BITS) { 1136 sz = TARGET_PAGE_SIZE; 1137 } else { 1138 sz = (hwaddr)1 << full->lg_page_size; 1139 tlb_add_large_page(env, mmu_idx, vaddr, sz); 1140 } 1141 vaddr_page = vaddr & TARGET_PAGE_MASK; 1142 paddr_page = full->phys_addr & TARGET_PAGE_MASK; 1143 1144 prot = full->prot; 1145 asidx = cpu_asidx_from_attrs(cpu, full->attrs); 1146 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 1147 &xlat, &sz, full->attrs, &prot); 1148 assert(sz >= TARGET_PAGE_SIZE); 1149 1150 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" HWADDR_FMT_plx 1151 " prot=%x idx=%d\n", 1152 vaddr, full->phys_addr, prot, mmu_idx); 1153 1154 address = vaddr_page; 1155 if (full->lg_page_size < TARGET_PAGE_BITS) { 1156 /* Repeat the MMU check and TLB fill on every access. */ 1157 address |= TLB_INVALID_MASK; 1158 } 1159 if (full->attrs.byte_swap) { 1160 address |= TLB_BSWAP; 1161 } 1162 1163 is_ram = memory_region_is_ram(section->mr); 1164 is_romd = memory_region_is_romd(section->mr); 1165 1166 if (is_ram || is_romd) { 1167 /* RAM and ROMD both have associated host memory. */ 1168 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 1169 } else { 1170 /* I/O does not; force the host address to NULL. */ 1171 addend = 0; 1172 } 1173 1174 write_address = address; 1175 if (is_ram) { 1176 iotlb = memory_region_get_ram_addr(section->mr) + xlat; 1177 /* 1178 * Computing is_clean is expensive; avoid all that unless 1179 * the page is actually writable. 1180 */ 1181 if (prot & PAGE_WRITE) { 1182 if (section->readonly) { 1183 write_address |= TLB_DISCARD_WRITE; 1184 } else if (cpu_physical_memory_is_clean(iotlb)) { 1185 write_address |= TLB_NOTDIRTY; 1186 } 1187 } 1188 } else { 1189 /* I/O or ROMD */ 1190 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 1191 /* 1192 * Writes to romd devices must go through MMIO to enable write. 1193 * Reads to romd devices go through the ram_ptr found above, 1194 * but of course reads to I/O must go through MMIO. 1195 */ 1196 write_address |= TLB_MMIO; 1197 if (!is_romd) { 1198 address = write_address; 1199 } 1200 } 1201 1202 wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 1203 TARGET_PAGE_SIZE); 1204 1205 index = tlb_index(env, mmu_idx, vaddr_page); 1206 te = tlb_entry(env, mmu_idx, vaddr_page); 1207 1208 /* 1209 * Hold the TLB lock for the rest of the function. We could acquire/release 1210 * the lock several times in the function, but it is faster to amortize the 1211 * acquisition cost by acquiring it just once. Note that this leads to 1212 * a longer critical section, but this is not a concern since the TLB lock 1213 * is unlikely to be contended. 1214 */ 1215 qemu_spin_lock(&tlb->c.lock); 1216 1217 /* Note that the tlb is no longer clean. */ 1218 tlb->c.dirty |= 1 << mmu_idx; 1219 1220 /* Make sure there's no cached translation for the new page. */ 1221 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 1222 1223 /* 1224 * Only evict the old entry to the victim tlb if it's for a 1225 * different page; otherwise just overwrite the stale data. 1226 */ 1227 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 1228 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1229 CPUTLBEntry *tv = &desc->vtable[vidx]; 1230 1231 /* Evict the old entry into the victim tlb. */ 1232 copy_tlb_helper_locked(tv, te); 1233 desc->vfulltlb[vidx] = desc->fulltlb[index]; 1234 tlb_n_used_entries_dec(env, mmu_idx); 1235 } 1236 1237 /* refill the tlb */ 1238 /* 1239 * At this point iotlb contains a physical section number in the lower 1240 * TARGET_PAGE_BITS, and either 1241 * + the ram_addr_t of the page base of the target RAM (RAM) 1242 * + the offset within section->mr of the page base (I/O, ROMD) 1243 * We subtract the vaddr_page (which is page aligned and thus won't 1244 * disturb the low bits) to give an offset which can be added to the 1245 * (non-page-aligned) vaddr of the eventual memory access to get 1246 * the MemoryRegion offset for the access. Note that the vaddr we 1247 * subtract here is that of the page base, and not the same as the 1248 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 1249 */ 1250 desc->fulltlb[index] = *full; 1251 desc->fulltlb[index].xlat_section = iotlb - vaddr_page; 1252 desc->fulltlb[index].phys_addr = paddr_page; 1253 desc->fulltlb[index].prot = prot; 1254 1255 /* Now calculate the new entry */ 1256 tn.addend = addend - vaddr_page; 1257 if (prot & PAGE_READ) { 1258 tn.addr_read = address; 1259 if (wp_flags & BP_MEM_READ) { 1260 tn.addr_read |= TLB_WATCHPOINT; 1261 } 1262 } else { 1263 tn.addr_read = -1; 1264 } 1265 1266 if (prot & PAGE_EXEC) { 1267 tn.addr_code = address; 1268 } else { 1269 tn.addr_code = -1; 1270 } 1271 1272 tn.addr_write = -1; 1273 if (prot & PAGE_WRITE) { 1274 tn.addr_write = write_address; 1275 if (prot & PAGE_WRITE_INV) { 1276 tn.addr_write |= TLB_INVALID_MASK; 1277 } 1278 if (wp_flags & BP_MEM_WRITE) { 1279 tn.addr_write |= TLB_WATCHPOINT; 1280 } 1281 } 1282 1283 copy_tlb_helper_locked(te, &tn); 1284 tlb_n_used_entries_inc(env, mmu_idx); 1285 qemu_spin_unlock(&tlb->c.lock); 1286 } 1287 1288 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 1289 hwaddr paddr, MemTxAttrs attrs, int prot, 1290 int mmu_idx, target_ulong size) 1291 { 1292 CPUTLBEntryFull full = { 1293 .phys_addr = paddr, 1294 .attrs = attrs, 1295 .prot = prot, 1296 .lg_page_size = ctz64(size) 1297 }; 1298 1299 assert(is_power_of_2(size)); 1300 tlb_set_page_full(cpu, mmu_idx, vaddr, &full); 1301 } 1302 1303 void tlb_set_page(CPUState *cpu, target_ulong vaddr, 1304 hwaddr paddr, int prot, 1305 int mmu_idx, target_ulong size) 1306 { 1307 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 1308 prot, mmu_idx, size); 1309 } 1310 1311 /* 1312 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1313 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1314 * be discarded and looked up again (e.g. via tlb_entry()). 1315 */ 1316 static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 1317 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1318 { 1319 bool ok; 1320 1321 /* 1322 * This is not a probe, so only valid return is success; failure 1323 * should result in exception + longjmp to the cpu loop. 1324 */ 1325 ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, 1326 access_type, mmu_idx, false, retaddr); 1327 assert(ok); 1328 } 1329 1330 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 1331 MMUAccessType access_type, 1332 int mmu_idx, uintptr_t retaddr) 1333 { 1334 cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, 1335 mmu_idx, retaddr); 1336 } 1337 1338 static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, 1339 vaddr addr, unsigned size, 1340 MMUAccessType access_type, 1341 int mmu_idx, MemTxAttrs attrs, 1342 MemTxResult response, 1343 uintptr_t retaddr) 1344 { 1345 CPUClass *cc = CPU_GET_CLASS(cpu); 1346 1347 if (!cpu->ignore_memory_transaction_failures && 1348 cc->tcg_ops->do_transaction_failed) { 1349 cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, 1350 access_type, mmu_idx, attrs, 1351 response, retaddr); 1352 } 1353 } 1354 1355 static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full, 1356 int mmu_idx, target_ulong addr, uintptr_t retaddr, 1357 MMUAccessType access_type, MemOp op) 1358 { 1359 CPUState *cpu = env_cpu(env); 1360 hwaddr mr_offset; 1361 MemoryRegionSection *section; 1362 MemoryRegion *mr; 1363 uint64_t val; 1364 MemTxResult r; 1365 1366 section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 1367 mr = section->mr; 1368 mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1369 cpu->mem_io_pc = retaddr; 1370 if (!cpu->can_do_io) { 1371 cpu_io_recompile(cpu, retaddr); 1372 } 1373 1374 { 1375 QEMU_IOTHREAD_LOCK_GUARD(); 1376 r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs); 1377 } 1378 1379 if (r != MEMTX_OK) { 1380 hwaddr physaddr = mr_offset + 1381 section->offset_within_address_space - 1382 section->offset_within_region; 1383 1384 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 1385 mmu_idx, full->attrs, r, retaddr); 1386 } 1387 return val; 1388 } 1389 1390 /* 1391 * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin. 1392 * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match 1393 * because of the side effect of io_writex changing memory layout. 1394 */ 1395 static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section, 1396 hwaddr mr_offset) 1397 { 1398 #ifdef CONFIG_PLUGIN 1399 SavedIOTLB *saved = &cs->saved_iotlb; 1400 saved->section = section; 1401 saved->mr_offset = mr_offset; 1402 #endif 1403 } 1404 1405 static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, 1406 int mmu_idx, uint64_t val, target_ulong addr, 1407 uintptr_t retaddr, MemOp op) 1408 { 1409 CPUState *cpu = env_cpu(env); 1410 hwaddr mr_offset; 1411 MemoryRegionSection *section; 1412 MemoryRegion *mr; 1413 MemTxResult r; 1414 1415 section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 1416 mr = section->mr; 1417 mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1418 if (!cpu->can_do_io) { 1419 cpu_io_recompile(cpu, retaddr); 1420 } 1421 cpu->mem_io_pc = retaddr; 1422 1423 /* 1424 * The memory_region_dispatch may trigger a flush/resize 1425 * so for plugins we save the iotlb_data just in case. 1426 */ 1427 save_iotlb_data(cpu, section, mr_offset); 1428 1429 { 1430 QEMU_IOTHREAD_LOCK_GUARD(); 1431 r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs); 1432 } 1433 1434 if (r != MEMTX_OK) { 1435 hwaddr physaddr = mr_offset + 1436 section->offset_within_address_space - 1437 section->offset_within_region; 1438 1439 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 1440 MMU_DATA_STORE, mmu_idx, full->attrs, r, 1441 retaddr); 1442 } 1443 } 1444 1445 static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) 1446 { 1447 #if TCG_OVERSIZED_GUEST 1448 return *(target_ulong *)((uintptr_t)entry + ofs); 1449 #else 1450 /* ofs might correspond to .addr_write, so use qatomic_read */ 1451 return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); 1452 #endif 1453 } 1454 1455 /* Return true if ADDR is present in the victim tlb, and has been copied 1456 back to the main tlb. */ 1457 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 1458 size_t elt_ofs, target_ulong page) 1459 { 1460 size_t vidx; 1461 1462 assert_cpu_is_self(env_cpu(env)); 1463 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1464 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 1465 target_ulong cmp; 1466 1467 /* elt_ofs might correspond to .addr_write, so use qatomic_read */ 1468 #if TCG_OVERSIZED_GUEST 1469 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 1470 #else 1471 cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 1472 #endif 1473 1474 if (cmp == page) { 1475 /* Found entry in victim tlb, swap tlb and iotlb. */ 1476 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1477 1478 qemu_spin_lock(&env_tlb(env)->c.lock); 1479 copy_tlb_helper_locked(&tmptlb, tlb); 1480 copy_tlb_helper_locked(tlb, vtlb); 1481 copy_tlb_helper_locked(vtlb, &tmptlb); 1482 qemu_spin_unlock(&env_tlb(env)->c.lock); 1483 1484 CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1485 CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx]; 1486 CPUTLBEntryFull tmpf; 1487 tmpf = *f1; *f1 = *f2; *f2 = tmpf; 1488 return true; 1489 } 1490 } 1491 return false; 1492 } 1493 1494 /* Macro to call the above, with local variables from the use context. */ 1495 #define VICTIM_TLB_HIT(TY, ADDR) \ 1496 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 1497 (ADDR) & TARGET_PAGE_MASK) 1498 1499 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 1500 CPUTLBEntryFull *full, uintptr_t retaddr) 1501 { 1502 ram_addr_t ram_addr = mem_vaddr + full->xlat_section; 1503 1504 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1505 1506 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1507 tb_invalidate_phys_range_fast(ram_addr, size, retaddr); 1508 } 1509 1510 /* 1511 * Set both VGA and migration bits for simplicity and to remove 1512 * the notdirty callback faster. 1513 */ 1514 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1515 1516 /* We remove the notdirty callback only if the code has been flushed. */ 1517 if (!cpu_physical_memory_is_clean(ram_addr)) { 1518 trace_memory_notdirty_set_dirty(mem_vaddr); 1519 tlb_set_dirty(cpu, mem_vaddr); 1520 } 1521 } 1522 1523 static int probe_access_internal(CPUArchState *env, target_ulong addr, 1524 int fault_size, MMUAccessType access_type, 1525 int mmu_idx, bool nonfault, 1526 void **phost, CPUTLBEntryFull **pfull, 1527 uintptr_t retaddr) 1528 { 1529 uintptr_t index = tlb_index(env, mmu_idx, addr); 1530 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 1531 target_ulong tlb_addr, page_addr; 1532 size_t elt_ofs; 1533 int flags; 1534 1535 switch (access_type) { 1536 case MMU_DATA_LOAD: 1537 elt_ofs = offsetof(CPUTLBEntry, addr_read); 1538 break; 1539 case MMU_DATA_STORE: 1540 elt_ofs = offsetof(CPUTLBEntry, addr_write); 1541 break; 1542 case MMU_INST_FETCH: 1543 elt_ofs = offsetof(CPUTLBEntry, addr_code); 1544 break; 1545 default: 1546 g_assert_not_reached(); 1547 } 1548 tlb_addr = tlb_read_ofs(entry, elt_ofs); 1549 1550 flags = TLB_FLAGS_MASK; 1551 page_addr = addr & TARGET_PAGE_MASK; 1552 if (!tlb_hit_page(tlb_addr, page_addr)) { 1553 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { 1554 CPUState *cs = env_cpu(env); 1555 1556 if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, 1557 mmu_idx, nonfault, retaddr)) { 1558 /* Non-faulting page table read failed. */ 1559 *phost = NULL; 1560 *pfull = NULL; 1561 return TLB_INVALID_MASK; 1562 } 1563 1564 /* TLB resize via tlb_fill may have moved the entry. */ 1565 index = tlb_index(env, mmu_idx, addr); 1566 entry = tlb_entry(env, mmu_idx, addr); 1567 1568 /* 1569 * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, 1570 * to force the next access through tlb_fill. We've just 1571 * called tlb_fill, so we know that this entry *is* valid. 1572 */ 1573 flags &= ~TLB_INVALID_MASK; 1574 } 1575 tlb_addr = tlb_read_ofs(entry, elt_ofs); 1576 } 1577 flags &= tlb_addr; 1578 1579 *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1580 1581 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 1582 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1583 *phost = NULL; 1584 return TLB_MMIO; 1585 } 1586 1587 /* Everything else is RAM. */ 1588 *phost = (void *)((uintptr_t)addr + entry->addend); 1589 return flags; 1590 } 1591 1592 int probe_access_full(CPUArchState *env, target_ulong addr, 1593 MMUAccessType access_type, int mmu_idx, 1594 bool nonfault, void **phost, CPUTLBEntryFull **pfull, 1595 uintptr_t retaddr) 1596 { 1597 int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, 1598 nonfault, phost, pfull, retaddr); 1599 1600 /* Handle clean RAM pages. */ 1601 if (unlikely(flags & TLB_NOTDIRTY)) { 1602 notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr); 1603 flags &= ~TLB_NOTDIRTY; 1604 } 1605 1606 return flags; 1607 } 1608 1609 int probe_access_flags(CPUArchState *env, target_ulong addr, 1610 MMUAccessType access_type, int mmu_idx, 1611 bool nonfault, void **phost, uintptr_t retaddr) 1612 { 1613 CPUTLBEntryFull *full; 1614 1615 return probe_access_full(env, addr, access_type, mmu_idx, 1616 nonfault, phost, &full, retaddr); 1617 } 1618 1619 void *probe_access(CPUArchState *env, target_ulong addr, int size, 1620 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1621 { 1622 CPUTLBEntryFull *full; 1623 void *host; 1624 int flags; 1625 1626 g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1627 1628 flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1629 false, &host, &full, retaddr); 1630 1631 /* Per the interface, size == 0 merely faults the access. */ 1632 if (size == 0) { 1633 return NULL; 1634 } 1635 1636 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 1637 /* Handle watchpoints. */ 1638 if (flags & TLB_WATCHPOINT) { 1639 int wp_access = (access_type == MMU_DATA_STORE 1640 ? BP_MEM_WRITE : BP_MEM_READ); 1641 cpu_check_watchpoint(env_cpu(env), addr, size, 1642 full->attrs, wp_access, retaddr); 1643 } 1644 1645 /* Handle clean RAM pages. */ 1646 if (flags & TLB_NOTDIRTY) { 1647 notdirty_write(env_cpu(env), addr, 1, full, retaddr); 1648 } 1649 } 1650 1651 return host; 1652 } 1653 1654 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 1655 MMUAccessType access_type, int mmu_idx) 1656 { 1657 CPUTLBEntryFull *full; 1658 void *host; 1659 int flags; 1660 1661 flags = probe_access_internal(env, addr, 0, access_type, 1662 mmu_idx, true, &host, &full, 0); 1663 1664 /* No combination of flags are expected by the caller. */ 1665 return flags ? NULL : host; 1666 } 1667 1668 /* 1669 * Return a ram_addr_t for the virtual address for execution. 1670 * 1671 * Return -1 if we can't translate and execute from an entire page 1672 * of RAM. This will force us to execute by loading and translating 1673 * one insn at a time, without caching. 1674 * 1675 * NOTE: This function will trigger an exception if the page is 1676 * not executable. 1677 */ 1678 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 1679 void **hostp) 1680 { 1681 CPUTLBEntryFull *full; 1682 void *p; 1683 1684 (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH, 1685 cpu_mmu_index(env, true), false, &p, &full, 0); 1686 if (p == NULL) { 1687 return -1; 1688 } 1689 if (hostp) { 1690 *hostp = p; 1691 } 1692 return qemu_ram_addr_from_host_nofail(p); 1693 } 1694 1695 #ifdef CONFIG_PLUGIN 1696 /* 1697 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1698 * This should be a hot path as we will have just looked this path up 1699 * in the softmmu lookup code (or helper). We don't handle re-fills or 1700 * checking the victim table. This is purely informational. 1701 * 1702 * This almost never fails as the memory access being instrumented 1703 * should have just filled the TLB. The one corner case is io_writex 1704 * which can cause TLB flushes and potential resizing of the TLBs 1705 * losing the information we need. In those cases we need to recover 1706 * data from a copy of the CPUTLBEntryFull. As long as this always occurs 1707 * from the same thread (which a mem callback will be) this is safe. 1708 */ 1709 1710 bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, 1711 bool is_store, struct qemu_plugin_hwaddr *data) 1712 { 1713 CPUArchState *env = cpu->env_ptr; 1714 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1715 uintptr_t index = tlb_index(env, mmu_idx, addr); 1716 target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1717 1718 if (likely(tlb_hit(tlb_addr, addr))) { 1719 /* We must have an iotlb entry for MMIO */ 1720 if (tlb_addr & TLB_MMIO) { 1721 CPUTLBEntryFull *full; 1722 full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1723 data->is_io = true; 1724 data->v.io.section = 1725 iotlb_to_section(cpu, full->xlat_section, full->attrs); 1726 data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1727 } else { 1728 data->is_io = false; 1729 data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1730 } 1731 return true; 1732 } else { 1733 SavedIOTLB *saved = &cpu->saved_iotlb; 1734 data->is_io = true; 1735 data->v.io.section = saved->section; 1736 data->v.io.offset = saved->mr_offset; 1737 return true; 1738 } 1739 } 1740 1741 #endif 1742 1743 /* 1744 * Probe for an atomic operation. Do not allow unaligned operations, 1745 * or io operations to proceed. Return the host address. 1746 * 1747 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. 1748 */ 1749 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 1750 MemOpIdx oi, int size, int prot, 1751 uintptr_t retaddr) 1752 { 1753 uintptr_t mmu_idx = get_mmuidx(oi); 1754 MemOp mop = get_memop(oi); 1755 int a_bits = get_alignment_bits(mop); 1756 uintptr_t index; 1757 CPUTLBEntry *tlbe; 1758 target_ulong tlb_addr; 1759 void *hostaddr; 1760 1761 tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1762 1763 /* Adjust the given return address. */ 1764 retaddr -= GETPC_ADJ; 1765 1766 /* Enforce guest required alignment. */ 1767 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1768 /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 1769 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1770 mmu_idx, retaddr); 1771 } 1772 1773 /* Enforce qemu required alignment. */ 1774 if (unlikely(addr & (size - 1))) { 1775 /* We get here if guest alignment was not requested, 1776 or was not enforced by cpu_unaligned_access above. 1777 We might widen the access and emulate, but for now 1778 mark an exception and exit the cpu loop. */ 1779 goto stop_the_world; 1780 } 1781 1782 index = tlb_index(env, mmu_idx, addr); 1783 tlbe = tlb_entry(env, mmu_idx, addr); 1784 1785 /* Check TLB entry and enforce page permissions. */ 1786 if (prot & PAGE_WRITE) { 1787 tlb_addr = tlb_addr_write(tlbe); 1788 if (!tlb_hit(tlb_addr, addr)) { 1789 if (!VICTIM_TLB_HIT(addr_write, addr)) { 1790 tlb_fill(env_cpu(env), addr, size, 1791 MMU_DATA_STORE, mmu_idx, retaddr); 1792 index = tlb_index(env, mmu_idx, addr); 1793 tlbe = tlb_entry(env, mmu_idx, addr); 1794 } 1795 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1796 } 1797 1798 /* Let the guest notice RMW on a write-only page. */ 1799 if ((prot & PAGE_READ) && 1800 unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 1801 tlb_fill(env_cpu(env), addr, size, 1802 MMU_DATA_LOAD, mmu_idx, retaddr); 1803 /* 1804 * Since we don't support reads and writes to different addresses, 1805 * and we do have the proper page loaded for write, this shouldn't 1806 * ever return. But just in case, handle via stop-the-world. 1807 */ 1808 goto stop_the_world; 1809 } 1810 } else /* if (prot & PAGE_READ) */ { 1811 tlb_addr = tlbe->addr_read; 1812 if (!tlb_hit(tlb_addr, addr)) { 1813 if (!VICTIM_TLB_HIT(addr_write, addr)) { 1814 tlb_fill(env_cpu(env), addr, size, 1815 MMU_DATA_LOAD, mmu_idx, retaddr); 1816 index = tlb_index(env, mmu_idx, addr); 1817 tlbe = tlb_entry(env, mmu_idx, addr); 1818 } 1819 tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK; 1820 } 1821 } 1822 1823 /* Notice an IO access or a needs-MMU-lookup access */ 1824 if (unlikely(tlb_addr & TLB_MMIO)) { 1825 /* There's really nothing that can be done to 1826 support this apart from stop-the-world. */ 1827 goto stop_the_world; 1828 } 1829 1830 hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1831 1832 if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1833 notdirty_write(env_cpu(env), addr, size, 1834 &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr); 1835 } 1836 1837 return hostaddr; 1838 1839 stop_the_world: 1840 cpu_loop_exit_atomic(env_cpu(env), retaddr); 1841 } 1842 1843 /* 1844 * Verify that we have passed the correct MemOp to the correct function. 1845 * 1846 * In the case of the helper_*_mmu functions, we will have done this by 1847 * using the MemOp to look up the helper during code generation. 1848 * 1849 * In the case of the cpu_*_mmu functions, this is up to the caller. 1850 * We could present one function to target code, and dispatch based on 1851 * the MemOp, but so far we have worked hard to avoid an indirect function 1852 * call along the memory path. 1853 */ 1854 static void validate_memop(MemOpIdx oi, MemOp expected) 1855 { 1856 #ifdef CONFIG_DEBUG_TCG 1857 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); 1858 assert(have == expected); 1859 #endif 1860 } 1861 1862 /* 1863 * Load Helpers 1864 * 1865 * We support two different access types. SOFTMMU_CODE_ACCESS is 1866 * specifically for reading instructions from system memory. It is 1867 * called by the translation loop and in some helpers where the code 1868 * is disassembled. It shouldn't be called directly by guest code. 1869 */ 1870 1871 typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, 1872 MemOpIdx oi, uintptr_t retaddr); 1873 1874 static inline uint64_t QEMU_ALWAYS_INLINE 1875 load_memop(const void *haddr, MemOp op) 1876 { 1877 switch (op) { 1878 case MO_UB: 1879 return ldub_p(haddr); 1880 case MO_BEUW: 1881 return lduw_be_p(haddr); 1882 case MO_LEUW: 1883 return lduw_le_p(haddr); 1884 case MO_BEUL: 1885 return (uint32_t)ldl_be_p(haddr); 1886 case MO_LEUL: 1887 return (uint32_t)ldl_le_p(haddr); 1888 case MO_BEUQ: 1889 return ldq_be_p(haddr); 1890 case MO_LEUQ: 1891 return ldq_le_p(haddr); 1892 default: 1893 qemu_build_not_reached(); 1894 } 1895 } 1896 1897 static inline uint64_t QEMU_ALWAYS_INLINE 1898 load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, 1899 uintptr_t retaddr, MemOp op, bool code_read, 1900 FullLoadHelper *full_load) 1901 { 1902 const size_t tlb_off = code_read ? 1903 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); 1904 const MMUAccessType access_type = 1905 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; 1906 const unsigned a_bits = get_alignment_bits(get_memop(oi)); 1907 const size_t size = memop_size(op); 1908 uintptr_t mmu_idx = get_mmuidx(oi); 1909 uintptr_t index; 1910 CPUTLBEntry *entry; 1911 target_ulong tlb_addr; 1912 void *haddr; 1913 uint64_t res; 1914 1915 tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1916 1917 /* Handle CPU specific unaligned behaviour */ 1918 if (addr & ((1 << a_bits) - 1)) { 1919 cpu_unaligned_access(env_cpu(env), addr, access_type, 1920 mmu_idx, retaddr); 1921 } 1922 1923 index = tlb_index(env, mmu_idx, addr); 1924 entry = tlb_entry(env, mmu_idx, addr); 1925 tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1926 1927 /* If the TLB entry is for a different page, reload and try again. */ 1928 if (!tlb_hit(tlb_addr, addr)) { 1929 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1930 addr & TARGET_PAGE_MASK)) { 1931 tlb_fill(env_cpu(env), addr, size, 1932 access_type, mmu_idx, retaddr); 1933 index = tlb_index(env, mmu_idx, addr); 1934 entry = tlb_entry(env, mmu_idx, addr); 1935 } 1936 tlb_addr = code_read ? entry->addr_code : entry->addr_read; 1937 tlb_addr &= ~TLB_INVALID_MASK; 1938 } 1939 1940 /* Handle anything that isn't just a straight memory access. */ 1941 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 1942 CPUTLBEntryFull *full; 1943 bool need_swap; 1944 1945 /* For anything that is unaligned, recurse through full_load. */ 1946 if ((addr & (size - 1)) != 0) { 1947 goto do_unaligned_access; 1948 } 1949 1950 full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1951 1952 /* Handle watchpoints. */ 1953 if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 1954 /* On watchpoint hit, this will longjmp out. */ 1955 cpu_check_watchpoint(env_cpu(env), addr, size, 1956 full->attrs, BP_MEM_READ, retaddr); 1957 } 1958 1959 need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 1960 1961 /* Handle I/O access. */ 1962 if (likely(tlb_addr & TLB_MMIO)) { 1963 return io_readx(env, full, mmu_idx, addr, retaddr, 1964 access_type, op ^ (need_swap * MO_BSWAP)); 1965 } 1966 1967 haddr = (void *)((uintptr_t)addr + entry->addend); 1968 1969 /* 1970 * Keep these two load_memop separate to ensure that the compiler 1971 * is able to fold the entire function to a single instruction. 1972 * There is a build-time assert inside to remind you of this. ;-) 1973 */ 1974 if (unlikely(need_swap)) { 1975 return load_memop(haddr, op ^ MO_BSWAP); 1976 } 1977 return load_memop(haddr, op); 1978 } 1979 1980 /* Handle slow unaligned access (it spans two pages or IO). */ 1981 if (size > 1 1982 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 1983 >= TARGET_PAGE_SIZE)) { 1984 target_ulong addr1, addr2; 1985 uint64_t r1, r2; 1986 unsigned shift; 1987 do_unaligned_access: 1988 addr1 = addr & ~((target_ulong)size - 1); 1989 addr2 = addr1 + size; 1990 r1 = full_load(env, addr1, oi, retaddr); 1991 r2 = full_load(env, addr2, oi, retaddr); 1992 shift = (addr & (size - 1)) * 8; 1993 1994 if (memop_big_endian(op)) { 1995 /* Big-endian combine. */ 1996 res = (r1 << shift) | (r2 >> ((size * 8) - shift)); 1997 } else { 1998 /* Little-endian combine. */ 1999 res = (r1 >> shift) | (r2 << ((size * 8) - shift)); 2000 } 2001 return res & MAKE_64BIT_MASK(0, size * 8); 2002 } 2003 2004 haddr = (void *)((uintptr_t)addr + entry->addend); 2005 return load_memop(haddr, op); 2006 } 2007 2008 /* 2009 * For the benefit of TCG generated code, we want to avoid the 2010 * complication of ABI-specific return type promotion and always 2011 * return a value extended to the register size of the host. This is 2012 * tcg_target_long, except in the case of a 32-bit host and 64-bit 2013 * data, and for that we always have uint64_t. 2014 * 2015 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 2016 */ 2017 2018 static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, 2019 MemOpIdx oi, uintptr_t retaddr) 2020 { 2021 validate_memop(oi, MO_UB); 2022 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); 2023 } 2024 2025 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 2026 MemOpIdx oi, uintptr_t retaddr) 2027 { 2028 return full_ldub_mmu(env, addr, oi, retaddr); 2029 } 2030 2031 static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, 2032 MemOpIdx oi, uintptr_t retaddr) 2033 { 2034 validate_memop(oi, MO_LEUW); 2035 return load_helper(env, addr, oi, retaddr, MO_LEUW, false, 2036 full_le_lduw_mmu); 2037 } 2038 2039 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 2040 MemOpIdx oi, uintptr_t retaddr) 2041 { 2042 return full_le_lduw_mmu(env, addr, oi, retaddr); 2043 } 2044 2045 static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, 2046 MemOpIdx oi, uintptr_t retaddr) 2047 { 2048 validate_memop(oi, MO_BEUW); 2049 return load_helper(env, addr, oi, retaddr, MO_BEUW, false, 2050 full_be_lduw_mmu); 2051 } 2052 2053 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 2054 MemOpIdx oi, uintptr_t retaddr) 2055 { 2056 return full_be_lduw_mmu(env, addr, oi, retaddr); 2057 } 2058 2059 static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, 2060 MemOpIdx oi, uintptr_t retaddr) 2061 { 2062 validate_memop(oi, MO_LEUL); 2063 return load_helper(env, addr, oi, retaddr, MO_LEUL, false, 2064 full_le_ldul_mmu); 2065 } 2066 2067 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 2068 MemOpIdx oi, uintptr_t retaddr) 2069 { 2070 return full_le_ldul_mmu(env, addr, oi, retaddr); 2071 } 2072 2073 static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, 2074 MemOpIdx oi, uintptr_t retaddr) 2075 { 2076 validate_memop(oi, MO_BEUL); 2077 return load_helper(env, addr, oi, retaddr, MO_BEUL, false, 2078 full_be_ldul_mmu); 2079 } 2080 2081 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 2082 MemOpIdx oi, uintptr_t retaddr) 2083 { 2084 return full_be_ldul_mmu(env, addr, oi, retaddr); 2085 } 2086 2087 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 2088 MemOpIdx oi, uintptr_t retaddr) 2089 { 2090 validate_memop(oi, MO_LEUQ); 2091 return load_helper(env, addr, oi, retaddr, MO_LEUQ, false, 2092 helper_le_ldq_mmu); 2093 } 2094 2095 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 2096 MemOpIdx oi, uintptr_t retaddr) 2097 { 2098 validate_memop(oi, MO_BEUQ); 2099 return load_helper(env, addr, oi, retaddr, MO_BEUQ, false, 2100 helper_be_ldq_mmu); 2101 } 2102 2103 /* 2104 * Provide signed versions of the load routines as well. We can of course 2105 * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 2106 */ 2107 2108 2109 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 2110 MemOpIdx oi, uintptr_t retaddr) 2111 { 2112 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); 2113 } 2114 2115 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 2116 MemOpIdx oi, uintptr_t retaddr) 2117 { 2118 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); 2119 } 2120 2121 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 2122 MemOpIdx oi, uintptr_t retaddr) 2123 { 2124 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); 2125 } 2126 2127 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 2128 MemOpIdx oi, uintptr_t retaddr) 2129 { 2130 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); 2131 } 2132 2133 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 2134 MemOpIdx oi, uintptr_t retaddr) 2135 { 2136 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); 2137 } 2138 2139 /* 2140 * Load helpers for cpu_ldst.h. 2141 */ 2142 2143 static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, 2144 MemOpIdx oi, uintptr_t retaddr, 2145 FullLoadHelper *full_load) 2146 { 2147 uint64_t ret; 2148 2149 ret = full_load(env, addr, oi, retaddr); 2150 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2151 return ret; 2152 } 2153 2154 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) 2155 { 2156 return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu); 2157 } 2158 2159 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, 2160 MemOpIdx oi, uintptr_t ra) 2161 { 2162 return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu); 2163 } 2164 2165 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, 2166 MemOpIdx oi, uintptr_t ra) 2167 { 2168 return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu); 2169 } 2170 2171 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, 2172 MemOpIdx oi, uintptr_t ra) 2173 { 2174 return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu); 2175 } 2176 2177 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, 2178 MemOpIdx oi, uintptr_t ra) 2179 { 2180 return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu); 2181 } 2182 2183 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, 2184 MemOpIdx oi, uintptr_t ra) 2185 { 2186 return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu); 2187 } 2188 2189 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, 2190 MemOpIdx oi, uintptr_t ra) 2191 { 2192 return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); 2193 } 2194 2195 Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, 2196 MemOpIdx oi, uintptr_t ra) 2197 { 2198 MemOp mop = get_memop(oi); 2199 int mmu_idx = get_mmuidx(oi); 2200 MemOpIdx new_oi; 2201 unsigned a_bits; 2202 uint64_t h, l; 2203 2204 tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); 2205 a_bits = get_alignment_bits(mop); 2206 2207 /* Handle CPU specific unaligned behaviour */ 2208 if (addr & ((1 << a_bits) - 1)) { 2209 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, 2210 mmu_idx, ra); 2211 } 2212 2213 /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2214 mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2215 new_oi = make_memop_idx(mop, mmu_idx); 2216 2217 h = helper_be_ldq_mmu(env, addr, new_oi, ra); 2218 l = helper_be_ldq_mmu(env, addr + 8, new_oi, ra); 2219 2220 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2221 return int128_make128(l, h); 2222 } 2223 2224 Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, 2225 MemOpIdx oi, uintptr_t ra) 2226 { 2227 MemOp mop = get_memop(oi); 2228 int mmu_idx = get_mmuidx(oi); 2229 MemOpIdx new_oi; 2230 unsigned a_bits; 2231 uint64_t h, l; 2232 2233 tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); 2234 a_bits = get_alignment_bits(mop); 2235 2236 /* Handle CPU specific unaligned behaviour */ 2237 if (addr & ((1 << a_bits) - 1)) { 2238 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD, 2239 mmu_idx, ra); 2240 } 2241 2242 /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2243 mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2244 new_oi = make_memop_idx(mop, mmu_idx); 2245 2246 l = helper_le_ldq_mmu(env, addr, new_oi, ra); 2247 h = helper_le_ldq_mmu(env, addr + 8, new_oi, ra); 2248 2249 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2250 return int128_make128(l, h); 2251 } 2252 2253 /* 2254 * Store Helpers 2255 */ 2256 2257 static inline void QEMU_ALWAYS_INLINE 2258 store_memop(void *haddr, uint64_t val, MemOp op) 2259 { 2260 switch (op) { 2261 case MO_UB: 2262 stb_p(haddr, val); 2263 break; 2264 case MO_BEUW: 2265 stw_be_p(haddr, val); 2266 break; 2267 case MO_LEUW: 2268 stw_le_p(haddr, val); 2269 break; 2270 case MO_BEUL: 2271 stl_be_p(haddr, val); 2272 break; 2273 case MO_LEUL: 2274 stl_le_p(haddr, val); 2275 break; 2276 case MO_BEUQ: 2277 stq_be_p(haddr, val); 2278 break; 2279 case MO_LEUQ: 2280 stq_le_p(haddr, val); 2281 break; 2282 default: 2283 qemu_build_not_reached(); 2284 } 2285 } 2286 2287 static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2288 MemOpIdx oi, uintptr_t retaddr); 2289 2290 static void __attribute__((noinline)) 2291 store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, 2292 uintptr_t retaddr, size_t size, uintptr_t mmu_idx, 2293 bool big_endian) 2294 { 2295 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 2296 uintptr_t index, index2; 2297 CPUTLBEntry *entry, *entry2; 2298 target_ulong page1, page2, tlb_addr, tlb_addr2; 2299 MemOpIdx oi; 2300 size_t size2; 2301 int i; 2302 2303 /* 2304 * Ensure the second page is in the TLB. Note that the first page 2305 * is already guaranteed to be filled, and that the second page 2306 * cannot evict the first. An exception to this rule is PAGE_WRITE_INV 2307 * handling: the first page could have evicted itself. 2308 */ 2309 page1 = addr & TARGET_PAGE_MASK; 2310 page2 = (addr + size) & TARGET_PAGE_MASK; 2311 size2 = (addr + size) & ~TARGET_PAGE_MASK; 2312 index2 = tlb_index(env, mmu_idx, page2); 2313 entry2 = tlb_entry(env, mmu_idx, page2); 2314 2315 tlb_addr2 = tlb_addr_write(entry2); 2316 if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) { 2317 if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { 2318 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, 2319 mmu_idx, retaddr); 2320 index2 = tlb_index(env, mmu_idx, page2); 2321 entry2 = tlb_entry(env, mmu_idx, page2); 2322 } 2323 tlb_addr2 = tlb_addr_write(entry2); 2324 } 2325 2326 index = tlb_index(env, mmu_idx, addr); 2327 entry = tlb_entry(env, mmu_idx, addr); 2328 tlb_addr = tlb_addr_write(entry); 2329 2330 /* 2331 * Handle watchpoints. Since this may trap, all checks 2332 * must happen before any store. 2333 */ 2334 if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 2335 cpu_check_watchpoint(env_cpu(env), addr, size - size2, 2336 env_tlb(env)->d[mmu_idx].fulltlb[index].attrs, 2337 BP_MEM_WRITE, retaddr); 2338 } 2339 if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { 2340 cpu_check_watchpoint(env_cpu(env), page2, size2, 2341 env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs, 2342 BP_MEM_WRITE, retaddr); 2343 } 2344 2345 /* 2346 * XXX: not efficient, but simple. 2347 * This loop must go in the forward direction to avoid issues 2348 * with self-modifying code in Windows 64-bit. 2349 */ 2350 oi = make_memop_idx(MO_UB, mmu_idx); 2351 if (big_endian) { 2352 for (i = 0; i < size; ++i) { 2353 /* Big-endian extract. */ 2354 uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); 2355 full_stb_mmu(env, addr + i, val8, oi, retaddr); 2356 } 2357 } else { 2358 for (i = 0; i < size; ++i) { 2359 /* Little-endian extract. */ 2360 uint8_t val8 = val >> (i * 8); 2361 full_stb_mmu(env, addr + i, val8, oi, retaddr); 2362 } 2363 } 2364 } 2365 2366 static inline void QEMU_ALWAYS_INLINE 2367 store_helper(CPUArchState *env, target_ulong addr, uint64_t val, 2368 MemOpIdx oi, uintptr_t retaddr, MemOp op) 2369 { 2370 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); 2371 const unsigned a_bits = get_alignment_bits(get_memop(oi)); 2372 const size_t size = memop_size(op); 2373 uintptr_t mmu_idx = get_mmuidx(oi); 2374 uintptr_t index; 2375 CPUTLBEntry *entry; 2376 target_ulong tlb_addr; 2377 void *haddr; 2378 2379 tcg_debug_assert(mmu_idx < NB_MMU_MODES); 2380 2381 /* Handle CPU specific unaligned behaviour */ 2382 if (addr & ((1 << a_bits) - 1)) { 2383 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2384 mmu_idx, retaddr); 2385 } 2386 2387 index = tlb_index(env, mmu_idx, addr); 2388 entry = tlb_entry(env, mmu_idx, addr); 2389 tlb_addr = tlb_addr_write(entry); 2390 2391 /* If the TLB entry is for a different page, reload and try again. */ 2392 if (!tlb_hit(tlb_addr, addr)) { 2393 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 2394 addr & TARGET_PAGE_MASK)) { 2395 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 2396 mmu_idx, retaddr); 2397 index = tlb_index(env, mmu_idx, addr); 2398 entry = tlb_entry(env, mmu_idx, addr); 2399 } 2400 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; 2401 } 2402 2403 /* Handle anything that isn't just a straight memory access. */ 2404 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { 2405 CPUTLBEntryFull *full; 2406 bool need_swap; 2407 2408 /* For anything that is unaligned, recurse through byte stores. */ 2409 if ((addr & (size - 1)) != 0) { 2410 goto do_unaligned_access; 2411 } 2412 2413 full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 2414 2415 /* Handle watchpoints. */ 2416 if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 2417 /* On watchpoint hit, this will longjmp out. */ 2418 cpu_check_watchpoint(env_cpu(env), addr, size, 2419 full->attrs, BP_MEM_WRITE, retaddr); 2420 } 2421 2422 need_swap = size > 1 && (tlb_addr & TLB_BSWAP); 2423 2424 /* Handle I/O access. */ 2425 if (tlb_addr & TLB_MMIO) { 2426 io_writex(env, full, mmu_idx, val, addr, retaddr, 2427 op ^ (need_swap * MO_BSWAP)); 2428 return; 2429 } 2430 2431 /* Ignore writes to ROM. */ 2432 if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { 2433 return; 2434 } 2435 2436 /* Handle clean RAM pages. */ 2437 if (tlb_addr & TLB_NOTDIRTY) { 2438 notdirty_write(env_cpu(env), addr, size, full, retaddr); 2439 } 2440 2441 haddr = (void *)((uintptr_t)addr + entry->addend); 2442 2443 /* 2444 * Keep these two store_memop separate to ensure that the compiler 2445 * is able to fold the entire function to a single instruction. 2446 * There is a build-time assert inside to remind you of this. ;-) 2447 */ 2448 if (unlikely(need_swap)) { 2449 store_memop(haddr, val, op ^ MO_BSWAP); 2450 } else { 2451 store_memop(haddr, val, op); 2452 } 2453 return; 2454 } 2455 2456 /* Handle slow unaligned access (it spans two pages or IO). */ 2457 if (size > 1 2458 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 2459 >= TARGET_PAGE_SIZE)) { 2460 do_unaligned_access: 2461 store_helper_unaligned(env, addr, val, retaddr, size, 2462 mmu_idx, memop_big_endian(op)); 2463 return; 2464 } 2465 2466 haddr = (void *)((uintptr_t)addr + entry->addend); 2467 store_memop(haddr, val, op); 2468 } 2469 2470 static void __attribute__((noinline)) 2471 full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2472 MemOpIdx oi, uintptr_t retaddr) 2473 { 2474 validate_memop(oi, MO_UB); 2475 store_helper(env, addr, val, oi, retaddr, MO_UB); 2476 } 2477 2478 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2479 MemOpIdx oi, uintptr_t retaddr) 2480 { 2481 full_stb_mmu(env, addr, val, oi, retaddr); 2482 } 2483 2484 static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2485 MemOpIdx oi, uintptr_t retaddr) 2486 { 2487 validate_memop(oi, MO_LEUW); 2488 store_helper(env, addr, val, oi, retaddr, MO_LEUW); 2489 } 2490 2491 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2492 MemOpIdx oi, uintptr_t retaddr) 2493 { 2494 full_le_stw_mmu(env, addr, val, oi, retaddr); 2495 } 2496 2497 static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2498 MemOpIdx oi, uintptr_t retaddr) 2499 { 2500 validate_memop(oi, MO_BEUW); 2501 store_helper(env, addr, val, oi, retaddr, MO_BEUW); 2502 } 2503 2504 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2505 MemOpIdx oi, uintptr_t retaddr) 2506 { 2507 full_be_stw_mmu(env, addr, val, oi, retaddr); 2508 } 2509 2510 static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2511 MemOpIdx oi, uintptr_t retaddr) 2512 { 2513 validate_memop(oi, MO_LEUL); 2514 store_helper(env, addr, val, oi, retaddr, MO_LEUL); 2515 } 2516 2517 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2518 MemOpIdx oi, uintptr_t retaddr) 2519 { 2520 full_le_stl_mmu(env, addr, val, oi, retaddr); 2521 } 2522 2523 static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2524 MemOpIdx oi, uintptr_t retaddr) 2525 { 2526 validate_memop(oi, MO_BEUL); 2527 store_helper(env, addr, val, oi, retaddr, MO_BEUL); 2528 } 2529 2530 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2531 MemOpIdx oi, uintptr_t retaddr) 2532 { 2533 full_be_stl_mmu(env, addr, val, oi, retaddr); 2534 } 2535 2536 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2537 MemOpIdx oi, uintptr_t retaddr) 2538 { 2539 validate_memop(oi, MO_LEUQ); 2540 store_helper(env, addr, val, oi, retaddr, MO_LEUQ); 2541 } 2542 2543 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2544 MemOpIdx oi, uintptr_t retaddr) 2545 { 2546 validate_memop(oi, MO_BEUQ); 2547 store_helper(env, addr, val, oi, retaddr, MO_BEUQ); 2548 } 2549 2550 /* 2551 * Store Helpers for cpu_ldst.h 2552 */ 2553 2554 typedef void FullStoreHelper(CPUArchState *env, target_ulong addr, 2555 uint64_t val, MemOpIdx oi, uintptr_t retaddr); 2556 2557 static inline void cpu_store_helper(CPUArchState *env, target_ulong addr, 2558 uint64_t val, MemOpIdx oi, uintptr_t ra, 2559 FullStoreHelper *full_store) 2560 { 2561 full_store(env, addr, val, oi, ra); 2562 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 2563 } 2564 2565 void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2566 MemOpIdx oi, uintptr_t retaddr) 2567 { 2568 cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu); 2569 } 2570 2571 void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2572 MemOpIdx oi, uintptr_t retaddr) 2573 { 2574 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu); 2575 } 2576 2577 void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2578 MemOpIdx oi, uintptr_t retaddr) 2579 { 2580 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu); 2581 } 2582 2583 void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2584 MemOpIdx oi, uintptr_t retaddr) 2585 { 2586 cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu); 2587 } 2588 2589 void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2590 MemOpIdx oi, uintptr_t retaddr) 2591 { 2592 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu); 2593 } 2594 2595 void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2596 MemOpIdx oi, uintptr_t retaddr) 2597 { 2598 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu); 2599 } 2600 2601 void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2602 MemOpIdx oi, uintptr_t retaddr) 2603 { 2604 cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); 2605 } 2606 2607 void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val, 2608 MemOpIdx oi, uintptr_t ra) 2609 { 2610 MemOp mop = get_memop(oi); 2611 int mmu_idx = get_mmuidx(oi); 2612 MemOpIdx new_oi; 2613 unsigned a_bits; 2614 2615 tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128)); 2616 a_bits = get_alignment_bits(mop); 2617 2618 /* Handle CPU specific unaligned behaviour */ 2619 if (addr & ((1 << a_bits) - 1)) { 2620 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2621 mmu_idx, ra); 2622 } 2623 2624 /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2625 mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2626 new_oi = make_memop_idx(mop, mmu_idx); 2627 2628 helper_be_stq_mmu(env, addr, int128_gethi(val), new_oi, ra); 2629 helper_be_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra); 2630 2631 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 2632 } 2633 2634 void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val, 2635 MemOpIdx oi, uintptr_t ra) 2636 { 2637 MemOp mop = get_memop(oi); 2638 int mmu_idx = get_mmuidx(oi); 2639 MemOpIdx new_oi; 2640 unsigned a_bits; 2641 2642 tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128)); 2643 a_bits = get_alignment_bits(mop); 2644 2645 /* Handle CPU specific unaligned behaviour */ 2646 if (addr & ((1 << a_bits) - 1)) { 2647 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 2648 mmu_idx, ra); 2649 } 2650 2651 /* Construct an unaligned 64-bit replacement MemOpIdx. */ 2652 mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN; 2653 new_oi = make_memop_idx(mop, mmu_idx); 2654 2655 helper_le_stq_mmu(env, addr, int128_getlo(val), new_oi, ra); 2656 helper_le_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra); 2657 2658 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 2659 } 2660 2661 #include "ldst_common.c.inc" 2662 2663 /* 2664 * First set of functions passes in OI and RETADDR. 2665 * This makes them callable from other helpers. 2666 */ 2667 2668 #define ATOMIC_NAME(X) \ 2669 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 2670 2671 #define ATOMIC_MMU_CLEANUP 2672 2673 #include "atomic_common.c.inc" 2674 2675 #define DATA_SIZE 1 2676 #include "atomic_template.h" 2677 2678 #define DATA_SIZE 2 2679 #include "atomic_template.h" 2680 2681 #define DATA_SIZE 4 2682 #include "atomic_template.h" 2683 2684 #ifdef CONFIG_ATOMIC64 2685 #define DATA_SIZE 8 2686 #include "atomic_template.h" 2687 #endif 2688 2689 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 2690 #define DATA_SIZE 16 2691 #include "atomic_template.h" 2692 #endif 2693 2694 /* Code access functions. */ 2695 2696 static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, 2697 MemOpIdx oi, uintptr_t retaddr) 2698 { 2699 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); 2700 } 2701 2702 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 2703 { 2704 MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 2705 return full_ldub_code(env, addr, oi, 0); 2706 } 2707 2708 static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, 2709 MemOpIdx oi, uintptr_t retaddr) 2710 { 2711 return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); 2712 } 2713 2714 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 2715 { 2716 MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 2717 return full_lduw_code(env, addr, oi, 0); 2718 } 2719 2720 static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, 2721 MemOpIdx oi, uintptr_t retaddr) 2722 { 2723 return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); 2724 } 2725 2726 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 2727 { 2728 MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 2729 return full_ldl_code(env, addr, oi, 0); 2730 } 2731 2732 static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, 2733 MemOpIdx oi, uintptr_t retaddr) 2734 { 2735 return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code); 2736 } 2737 2738 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 2739 { 2740 MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); 2741 return full_ldq_code(env, addr, oi, 0); 2742 } 2743