1 /* 2 * Common CPU TLB handling 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/main-loop.h" 22 #include "hw/core/tcg-cpu-ops.h" 23 #include "exec/exec-all.h" 24 #include "exec/memory.h" 25 #include "exec/cpu_ldst.h" 26 #include "exec/cputlb.h" 27 #include "exec/tb-flush.h" 28 #include "exec/memory-internal.h" 29 #include "exec/ram_addr.h" 30 #include "tcg/tcg.h" 31 #include "qemu/error-report.h" 32 #include "exec/log.h" 33 #include "exec/helper-proto-common.h" 34 #include "qemu/atomic.h" 35 #include "qemu/atomic128.h" 36 #include "exec/translate-all.h" 37 #include "trace.h" 38 #include "tb-hash.h" 39 #include "internal-common.h" 40 #include "internal-target.h" 41 #ifdef CONFIG_PLUGIN 42 #include "qemu/plugin-memory.h" 43 #endif 44 #include "tcg/tcg-ldst.h" 45 #include "tcg/oversized-guest.h" 46 47 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 48 /* #define DEBUG_TLB */ 49 /* #define DEBUG_TLB_LOG */ 50 51 #ifdef DEBUG_TLB 52 # define DEBUG_TLB_GATE 1 53 # ifdef DEBUG_TLB_LOG 54 # define DEBUG_TLB_LOG_GATE 1 55 # else 56 # define DEBUG_TLB_LOG_GATE 0 57 # endif 58 #else 59 # define DEBUG_TLB_GATE 0 60 # define DEBUG_TLB_LOG_GATE 0 61 #endif 62 63 #define tlb_debug(fmt, ...) do { \ 64 if (DEBUG_TLB_LOG_GATE) { \ 65 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 66 ## __VA_ARGS__); \ 67 } else if (DEBUG_TLB_GATE) { \ 68 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 69 } \ 70 } while (0) 71 72 #define assert_cpu_is_self(cpu) do { \ 73 if (DEBUG_TLB_GATE) { \ 74 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 75 } \ 76 } while (0) 77 78 /* run_on_cpu_data.target_ptr should always be big enough for a 79 * vaddr even on 32 bit builds 80 */ 81 QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data)); 82 83 /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 84 */ 85 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 86 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 87 88 static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 89 { 90 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 91 } 92 93 static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 94 { 95 return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 96 } 97 98 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 99 size_t max_entries) 100 { 101 desc->window_begin_ns = ns; 102 desc->window_max_entries = max_entries; 103 } 104 105 static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr) 106 { 107 CPUJumpCache *jc = cpu->tb_jmp_cache; 108 int i, i0; 109 110 if (unlikely(!jc)) { 111 return; 112 } 113 114 i0 = tb_jmp_cache_hash_page(page_addr); 115 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 116 qatomic_set(&jc->array[i0 + i].tb, NULL); 117 } 118 } 119 120 /** 121 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 122 * @desc: The CPUTLBDesc portion of the TLB 123 * @fast: The CPUTLBDescFast portion of the same TLB 124 * 125 * Called with tlb_lock_held. 126 * 127 * We have two main constraints when resizing a TLB: (1) we only resize it 128 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 129 * the array or unnecessarily flushing it), which means we do not control how 130 * frequently the resizing can occur; (2) we don't have access to the guest's 131 * future scheduling decisions, and therefore have to decide the magnitude of 132 * the resize based on past observations. 133 * 134 * In general, a memory-hungry process can benefit greatly from an appropriately 135 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 136 * we just have to make the TLB as large as possible; while an oversized TLB 137 * results in minimal TLB miss rates, it also takes longer to be flushed 138 * (flushes can be _very_ frequent), and the reduced locality can also hurt 139 * performance. 140 * 141 * To achieve near-optimal performance for all kinds of workloads, we: 142 * 143 * 1. Aggressively increase the size of the TLB when the use rate of the 144 * TLB being flushed is high, since it is likely that in the near future this 145 * memory-hungry process will execute again, and its memory hungriness will 146 * probably be similar. 147 * 148 * 2. Slowly reduce the size of the TLB as the use rate declines over a 149 * reasonably large time window. The rationale is that if in such a time window 150 * we have not observed a high TLB use rate, it is likely that we won't observe 151 * it in the near future. In that case, once a time window expires we downsize 152 * the TLB to match the maximum use rate observed in the window. 153 * 154 * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 155 * since in that range performance is likely near-optimal. Recall that the TLB 156 * is direct mapped, so we want the use rate to be low (or at least not too 157 * high), since otherwise we are likely to have a significant amount of 158 * conflict misses. 159 */ 160 static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 161 int64_t now) 162 { 163 size_t old_size = tlb_n_entries(fast); 164 size_t rate; 165 size_t new_size = old_size; 166 int64_t window_len_ms = 100; 167 int64_t window_len_ns = window_len_ms * 1000 * 1000; 168 bool window_expired = now > desc->window_begin_ns + window_len_ns; 169 170 if (desc->n_used_entries > desc->window_max_entries) { 171 desc->window_max_entries = desc->n_used_entries; 172 } 173 rate = desc->window_max_entries * 100 / old_size; 174 175 if (rate > 70) { 176 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 177 } else if (rate < 30 && window_expired) { 178 size_t ceil = pow2ceil(desc->window_max_entries); 179 size_t expected_rate = desc->window_max_entries * 100 / ceil; 180 181 /* 182 * Avoid undersizing when the max number of entries seen is just below 183 * a pow2. For instance, if max_entries == 1025, the expected use rate 184 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 185 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 186 * later. Thus, make sure that the expected use rate remains below 70%. 187 * (and since we double the size, that means the lowest rate we'd 188 * expect to get is 35%, which is still in the 30-70% range where 189 * we consider that the size is appropriate.) 190 */ 191 if (expected_rate > 70) { 192 ceil *= 2; 193 } 194 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 195 } 196 197 if (new_size == old_size) { 198 if (window_expired) { 199 tlb_window_reset(desc, now, desc->n_used_entries); 200 } 201 return; 202 } 203 204 g_free(fast->table); 205 g_free(desc->fulltlb); 206 207 tlb_window_reset(desc, now, 0); 208 /* desc->n_used_entries is cleared by the caller */ 209 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 210 fast->table = g_try_new(CPUTLBEntry, new_size); 211 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 212 213 /* 214 * If the allocations fail, try smaller sizes. We just freed some 215 * memory, so going back to half of new_size has a good chance of working. 216 * Increased memory pressure elsewhere in the system might cause the 217 * allocations to fail though, so we progressively reduce the allocation 218 * size, aborting if we cannot even allocate the smallest TLB we support. 219 */ 220 while (fast->table == NULL || desc->fulltlb == NULL) { 221 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 222 error_report("%s: %s", __func__, strerror(errno)); 223 abort(); 224 } 225 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 226 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 227 228 g_free(fast->table); 229 g_free(desc->fulltlb); 230 fast->table = g_try_new(CPUTLBEntry, new_size); 231 desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 232 } 233 } 234 235 static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 236 { 237 desc->n_used_entries = 0; 238 desc->large_page_addr = -1; 239 desc->large_page_mask = -1; 240 desc->vindex = 0; 241 memset(fast->table, -1, sizeof_tlb(fast)); 242 memset(desc->vtable, -1, sizeof(desc->vtable)); 243 } 244 245 static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx, 246 int64_t now) 247 { 248 CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; 249 CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; 250 251 tlb_mmu_resize_locked(desc, fast, now); 252 tlb_mmu_flush_locked(desc, fast); 253 } 254 255 static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 256 { 257 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 258 259 tlb_window_reset(desc, now, 0); 260 desc->n_used_entries = 0; 261 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 262 fast->table = g_new(CPUTLBEntry, n_entries); 263 desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); 264 tlb_mmu_flush_locked(desc, fast); 265 } 266 267 static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx) 268 { 269 cpu->neg.tlb.d[mmu_idx].n_used_entries++; 270 } 271 272 static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx) 273 { 274 cpu->neg.tlb.d[mmu_idx].n_used_entries--; 275 } 276 277 void tlb_init(CPUState *cpu) 278 { 279 int64_t now = get_clock_realtime(); 280 int i; 281 282 qemu_spin_init(&cpu->neg.tlb.c.lock); 283 284 /* All tlbs are initialized flushed. */ 285 cpu->neg.tlb.c.dirty = 0; 286 287 for (i = 0; i < NB_MMU_MODES; i++) { 288 tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now); 289 } 290 } 291 292 void tlb_destroy(CPUState *cpu) 293 { 294 int i; 295 296 qemu_spin_destroy(&cpu->neg.tlb.c.lock); 297 for (i = 0; i < NB_MMU_MODES; i++) { 298 CPUTLBDesc *desc = &cpu->neg.tlb.d[i]; 299 CPUTLBDescFast *fast = &cpu->neg.tlb.f[i]; 300 301 g_free(fast->table); 302 g_free(desc->fulltlb); 303 } 304 } 305 306 /* flush_all_helper: run fn across all cpus 307 * 308 * If the wait flag is set then the src cpu's helper will be queued as 309 * "safe" work and the loop exited creating a synchronisation point 310 * where all queued work will be finished before execution starts 311 * again. 312 */ 313 static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 314 run_on_cpu_data d) 315 { 316 CPUState *cpu; 317 318 CPU_FOREACH(cpu) { 319 if (cpu != src) { 320 async_run_on_cpu(cpu, fn, d); 321 } 322 } 323 } 324 325 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 326 { 327 uint16_t asked = data.host_int; 328 uint16_t all_dirty, work, to_clean; 329 int64_t now = get_clock_realtime(); 330 331 assert_cpu_is_self(cpu); 332 333 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 334 335 qemu_spin_lock(&cpu->neg.tlb.c.lock); 336 337 all_dirty = cpu->neg.tlb.c.dirty; 338 to_clean = asked & all_dirty; 339 all_dirty &= ~to_clean; 340 cpu->neg.tlb.c.dirty = all_dirty; 341 342 for (work = to_clean; work != 0; work &= work - 1) { 343 int mmu_idx = ctz32(work); 344 tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now); 345 } 346 347 qemu_spin_unlock(&cpu->neg.tlb.c.lock); 348 349 tcg_flush_jmp_cache(cpu); 350 351 if (to_clean == ALL_MMUIDX_BITS) { 352 qatomic_set(&cpu->neg.tlb.c.full_flush_count, 353 cpu->neg.tlb.c.full_flush_count + 1); 354 } else { 355 qatomic_set(&cpu->neg.tlb.c.part_flush_count, 356 cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean)); 357 if (to_clean != asked) { 358 qatomic_set(&cpu->neg.tlb.c.elide_flush_count, 359 cpu->neg.tlb.c.elide_flush_count + 360 ctpop16(asked & ~to_clean)); 361 } 362 } 363 } 364 365 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 366 { 367 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 368 369 if (cpu->created && !qemu_cpu_is_self(cpu)) { 370 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 371 RUN_ON_CPU_HOST_INT(idxmap)); 372 } else { 373 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 374 } 375 } 376 377 void tlb_flush(CPUState *cpu) 378 { 379 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 380 } 381 382 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 383 { 384 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 385 386 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 387 388 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 389 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 390 } 391 392 void tlb_flush_all_cpus(CPUState *src_cpu) 393 { 394 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 395 } 396 397 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 398 { 399 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 400 401 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 402 403 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 404 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 405 } 406 407 void tlb_flush_all_cpus_synced(CPUState *src_cpu) 408 { 409 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 410 } 411 412 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 413 vaddr page, vaddr mask) 414 { 415 page &= mask; 416 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 417 418 return (page == (tlb_entry->addr_read & mask) || 419 page == (tlb_addr_write(tlb_entry) & mask) || 420 page == (tlb_entry->addr_code & mask)); 421 } 422 423 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page) 424 { 425 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 426 } 427 428 /** 429 * tlb_entry_is_empty - return true if the entry is not in use 430 * @te: pointer to CPUTLBEntry 431 */ 432 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 433 { 434 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 435 } 436 437 /* Called with tlb_c.lock held */ 438 static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 439 vaddr page, 440 vaddr mask) 441 { 442 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 443 memset(tlb_entry, -1, sizeof(*tlb_entry)); 444 return true; 445 } 446 return false; 447 } 448 449 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page) 450 { 451 return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 452 } 453 454 /* Called with tlb_c.lock held */ 455 static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx, 456 vaddr page, 457 vaddr mask) 458 { 459 CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx]; 460 int k; 461 462 assert_cpu_is_self(cpu); 463 for (k = 0; k < CPU_VTLB_SIZE; k++) { 464 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 465 tlb_n_used_entries_dec(cpu, mmu_idx); 466 } 467 } 468 } 469 470 static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx, 471 vaddr page) 472 { 473 tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1); 474 } 475 476 static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page) 477 { 478 vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr; 479 vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask; 480 481 /* Check if we need to flush due to large pages. */ 482 if ((page & lp_mask) == lp_addr) { 483 tlb_debug("forcing full flush midx %d (%016" 484 VADDR_PRIx "/%016" VADDR_PRIx ")\n", 485 midx, lp_addr, lp_mask); 486 tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); 487 } else { 488 if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) { 489 tlb_n_used_entries_dec(cpu, midx); 490 } 491 tlb_flush_vtlb_page_locked(cpu, midx, page); 492 } 493 } 494 495 /** 496 * tlb_flush_page_by_mmuidx_async_0: 497 * @cpu: cpu on which to flush 498 * @addr: page of virtual address to flush 499 * @idxmap: set of mmu_idx to flush 500 * 501 * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 502 * at @addr from the tlbs indicated by @idxmap from @cpu. 503 */ 504 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 505 vaddr addr, 506 uint16_t idxmap) 507 { 508 int mmu_idx; 509 510 assert_cpu_is_self(cpu); 511 512 tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap); 513 514 qemu_spin_lock(&cpu->neg.tlb.c.lock); 515 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 516 if ((idxmap >> mmu_idx) & 1) { 517 tlb_flush_page_locked(cpu, mmu_idx, addr); 518 } 519 } 520 qemu_spin_unlock(&cpu->neg.tlb.c.lock); 521 522 /* 523 * Discard jump cache entries for any tb which might potentially 524 * overlap the flushed page, which includes the previous. 525 */ 526 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 527 tb_jmp_cache_clear_page(cpu, addr); 528 } 529 530 /** 531 * tlb_flush_page_by_mmuidx_async_1: 532 * @cpu: cpu on which to flush 533 * @data: encoded addr + idxmap 534 * 535 * Helper for tlb_flush_page_by_mmuidx and friends, called through 536 * async_run_on_cpu. The idxmap parameter is encoded in the page 537 * offset of the target_ptr field. This limits the set of mmu_idx 538 * that can be passed via this method. 539 */ 540 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 541 run_on_cpu_data data) 542 { 543 vaddr addr_and_idxmap = data.target_ptr; 544 vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK; 545 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 546 547 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 548 } 549 550 typedef struct { 551 vaddr addr; 552 uint16_t idxmap; 553 } TLBFlushPageByMMUIdxData; 554 555 /** 556 * tlb_flush_page_by_mmuidx_async_2: 557 * @cpu: cpu on which to flush 558 * @data: allocated addr + idxmap 559 * 560 * Helper for tlb_flush_page_by_mmuidx and friends, called through 561 * async_run_on_cpu. The addr+idxmap parameters are stored in a 562 * TLBFlushPageByMMUIdxData structure that has been allocated 563 * specifically for this helper. Free the structure when done. 564 */ 565 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 566 run_on_cpu_data data) 567 { 568 TLBFlushPageByMMUIdxData *d = data.host_ptr; 569 570 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 571 g_free(d); 572 } 573 574 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap) 575 { 576 tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap); 577 578 /* This should already be page aligned */ 579 addr &= TARGET_PAGE_MASK; 580 581 if (qemu_cpu_is_self(cpu)) { 582 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 583 } else if (idxmap < TARGET_PAGE_SIZE) { 584 /* 585 * Most targets have only a few mmu_idx. In the case where 586 * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 587 * allocating memory for this operation. 588 */ 589 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 590 RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 591 } else { 592 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 593 594 /* Otherwise allocate a structure, freed by the worker. */ 595 d->addr = addr; 596 d->idxmap = idxmap; 597 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 598 RUN_ON_CPU_HOST_PTR(d)); 599 } 600 } 601 602 void tlb_flush_page(CPUState *cpu, vaddr addr) 603 { 604 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 605 } 606 607 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr, 608 uint16_t idxmap) 609 { 610 tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap); 611 612 /* This should already be page aligned */ 613 addr &= TARGET_PAGE_MASK; 614 615 /* 616 * Allocate memory to hold addr+idxmap only when needed. 617 * See tlb_flush_page_by_mmuidx for details. 618 */ 619 if (idxmap < TARGET_PAGE_SIZE) { 620 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 621 RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 622 } else { 623 CPUState *dst_cpu; 624 625 /* Allocate a separate data block for each destination cpu. */ 626 CPU_FOREACH(dst_cpu) { 627 if (dst_cpu != src_cpu) { 628 TLBFlushPageByMMUIdxData *d 629 = g_new(TLBFlushPageByMMUIdxData, 1); 630 631 d->addr = addr; 632 d->idxmap = idxmap; 633 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 634 RUN_ON_CPU_HOST_PTR(d)); 635 } 636 } 637 } 638 639 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 640 } 641 642 void tlb_flush_page_all_cpus(CPUState *src, vaddr addr) 643 { 644 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 645 } 646 647 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 648 vaddr addr, 649 uint16_t idxmap) 650 { 651 tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap); 652 653 /* This should already be page aligned */ 654 addr &= TARGET_PAGE_MASK; 655 656 /* 657 * Allocate memory to hold addr+idxmap only when needed. 658 * See tlb_flush_page_by_mmuidx for details. 659 */ 660 if (idxmap < TARGET_PAGE_SIZE) { 661 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 662 RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 663 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 664 RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 665 } else { 666 CPUState *dst_cpu; 667 TLBFlushPageByMMUIdxData *d; 668 669 /* Allocate a separate data block for each destination cpu. */ 670 CPU_FOREACH(dst_cpu) { 671 if (dst_cpu != src_cpu) { 672 d = g_new(TLBFlushPageByMMUIdxData, 1); 673 d->addr = addr; 674 d->idxmap = idxmap; 675 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 676 RUN_ON_CPU_HOST_PTR(d)); 677 } 678 } 679 680 d = g_new(TLBFlushPageByMMUIdxData, 1); 681 d->addr = addr; 682 d->idxmap = idxmap; 683 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 684 RUN_ON_CPU_HOST_PTR(d)); 685 } 686 } 687 688 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) 689 { 690 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 691 } 692 693 static void tlb_flush_range_locked(CPUState *cpu, int midx, 694 vaddr addr, vaddr len, 695 unsigned bits) 696 { 697 CPUTLBDesc *d = &cpu->neg.tlb.d[midx]; 698 CPUTLBDescFast *f = &cpu->neg.tlb.f[midx]; 699 vaddr mask = MAKE_64BIT_MASK(0, bits); 700 701 /* 702 * If @bits is smaller than the tlb size, there may be multiple entries 703 * within the TLB; otherwise all addresses that match under @mask hit 704 * the same TLB entry. 705 * TODO: Perhaps allow bits to be a few bits less than the size. 706 * For now, just flush the entire TLB. 707 * 708 * If @len is larger than the tlb size, then it will take longer to 709 * test all of the entries in the TLB than it will to flush it all. 710 */ 711 if (mask < f->mask || len > f->mask) { 712 tlb_debug("forcing full flush midx %d (" 713 "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n", 714 midx, addr, mask, len); 715 tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); 716 return; 717 } 718 719 /* 720 * Check if we need to flush due to large pages. 721 * Because large_page_mask contains all 1's from the msb, 722 * we only need to test the end of the range. 723 */ 724 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { 725 tlb_debug("forcing full flush midx %d (" 726 "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n", 727 midx, d->large_page_addr, d->large_page_mask); 728 tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); 729 return; 730 } 731 732 for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) { 733 vaddr page = addr + i; 734 CPUTLBEntry *entry = tlb_entry(cpu, midx, page); 735 736 if (tlb_flush_entry_mask_locked(entry, page, mask)) { 737 tlb_n_used_entries_dec(cpu, midx); 738 } 739 tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask); 740 } 741 } 742 743 typedef struct { 744 vaddr addr; 745 vaddr len; 746 uint16_t idxmap; 747 uint16_t bits; 748 } TLBFlushRangeData; 749 750 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, 751 TLBFlushRangeData d) 752 { 753 int mmu_idx; 754 755 assert_cpu_is_self(cpu); 756 757 tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n", 758 d.addr, d.bits, d.len, d.idxmap); 759 760 qemu_spin_lock(&cpu->neg.tlb.c.lock); 761 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 762 if ((d.idxmap >> mmu_idx) & 1) { 763 tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits); 764 } 765 } 766 qemu_spin_unlock(&cpu->neg.tlb.c.lock); 767 768 /* 769 * If the length is larger than the jump cache size, then it will take 770 * longer to clear each entry individually than it will to clear it all. 771 */ 772 if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { 773 tcg_flush_jmp_cache(cpu); 774 return; 775 } 776 777 /* 778 * Discard jump cache entries for any tb which might potentially 779 * overlap the flushed pages, which includes the previous. 780 */ 781 d.addr -= TARGET_PAGE_SIZE; 782 for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { 783 tb_jmp_cache_clear_page(cpu, d.addr); 784 d.addr += TARGET_PAGE_SIZE; 785 } 786 } 787 788 static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, 789 run_on_cpu_data data) 790 { 791 TLBFlushRangeData *d = data.host_ptr; 792 tlb_flush_range_by_mmuidx_async_0(cpu, *d); 793 g_free(d); 794 } 795 796 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, 797 vaddr len, uint16_t idxmap, 798 unsigned bits) 799 { 800 TLBFlushRangeData d; 801 802 /* 803 * If all bits are significant, and len is small, 804 * this devolves to tlb_flush_page. 805 */ 806 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 807 tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 808 return; 809 } 810 /* If no page bits are significant, this devolves to tlb_flush. */ 811 if (bits < TARGET_PAGE_BITS) { 812 tlb_flush_by_mmuidx(cpu, idxmap); 813 return; 814 } 815 816 /* This should already be page aligned */ 817 d.addr = addr & TARGET_PAGE_MASK; 818 d.len = len; 819 d.idxmap = idxmap; 820 d.bits = bits; 821 822 if (qemu_cpu_is_self(cpu)) { 823 tlb_flush_range_by_mmuidx_async_0(cpu, d); 824 } else { 825 /* Otherwise allocate a structure, freed by the worker. */ 826 TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 827 async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, 828 RUN_ON_CPU_HOST_PTR(p)); 829 } 830 } 831 832 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, 833 uint16_t idxmap, unsigned bits) 834 { 835 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); 836 } 837 838 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, 839 vaddr addr, vaddr len, 840 uint16_t idxmap, unsigned bits) 841 { 842 TLBFlushRangeData d; 843 CPUState *dst_cpu; 844 845 /* 846 * If all bits are significant, and len is small, 847 * this devolves to tlb_flush_page. 848 */ 849 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 850 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 851 return; 852 } 853 /* If no page bits are significant, this devolves to tlb_flush. */ 854 if (bits < TARGET_PAGE_BITS) { 855 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 856 return; 857 } 858 859 /* This should already be page aligned */ 860 d.addr = addr & TARGET_PAGE_MASK; 861 d.len = len; 862 d.idxmap = idxmap; 863 d.bits = bits; 864 865 /* Allocate a separate data block for each destination cpu. */ 866 CPU_FOREACH(dst_cpu) { 867 if (dst_cpu != src_cpu) { 868 TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 869 async_run_on_cpu(dst_cpu, 870 tlb_flush_range_by_mmuidx_async_1, 871 RUN_ON_CPU_HOST_PTR(p)); 872 } 873 } 874 875 tlb_flush_range_by_mmuidx_async_0(src_cpu, d); 876 } 877 878 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 879 vaddr addr, uint16_t idxmap, 880 unsigned bits) 881 { 882 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, 883 idxmap, bits); 884 } 885 886 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 887 vaddr addr, 888 vaddr len, 889 uint16_t idxmap, 890 unsigned bits) 891 { 892 TLBFlushRangeData d, *p; 893 CPUState *dst_cpu; 894 895 /* 896 * If all bits are significant, and len is small, 897 * this devolves to tlb_flush_page. 898 */ 899 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 900 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 901 return; 902 } 903 /* If no page bits are significant, this devolves to tlb_flush. */ 904 if (bits < TARGET_PAGE_BITS) { 905 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 906 return; 907 } 908 909 /* This should already be page aligned */ 910 d.addr = addr & TARGET_PAGE_MASK; 911 d.len = len; 912 d.idxmap = idxmap; 913 d.bits = bits; 914 915 /* Allocate a separate data block for each destination cpu. */ 916 CPU_FOREACH(dst_cpu) { 917 if (dst_cpu != src_cpu) { 918 p = g_memdup(&d, sizeof(d)); 919 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, 920 RUN_ON_CPU_HOST_PTR(p)); 921 } 922 } 923 924 p = g_memdup(&d, sizeof(d)); 925 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, 926 RUN_ON_CPU_HOST_PTR(p)); 927 } 928 929 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 930 vaddr addr, 931 uint16_t idxmap, 932 unsigned bits) 933 { 934 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, 935 idxmap, bits); 936 } 937 938 /* update the TLBs so that writes to code in the virtual page 'addr' 939 can be detected */ 940 void tlb_protect_code(ram_addr_t ram_addr) 941 { 942 cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, 943 TARGET_PAGE_SIZE, 944 DIRTY_MEMORY_CODE); 945 } 946 947 /* update the TLB so that writes in physical page 'phys_addr' are no longer 948 tested for self modifying code */ 949 void tlb_unprotect_code(ram_addr_t ram_addr) 950 { 951 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 952 } 953 954 955 /* 956 * Dirty write flag handling 957 * 958 * When the TCG code writes to a location it looks up the address in 959 * the TLB and uses that data to compute the final address. If any of 960 * the lower bits of the address are set then the slow path is forced. 961 * There are a number of reasons to do this but for normal RAM the 962 * most usual is detecting writes to code regions which may invalidate 963 * generated code. 964 * 965 * Other vCPUs might be reading their TLBs during guest execution, so we update 966 * te->addr_write with qatomic_set. We don't need to worry about this for 967 * oversized guests as MTTCG is disabled for them. 968 * 969 * Called with tlb_c.lock held. 970 */ 971 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 972 uintptr_t start, uintptr_t length) 973 { 974 uintptr_t addr = tlb_entry->addr_write; 975 976 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 977 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 978 addr &= TARGET_PAGE_MASK; 979 addr += tlb_entry->addend; 980 if ((addr - start) < length) { 981 #if TARGET_LONG_BITS == 32 982 uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write; 983 ptr_write += HOST_BIG_ENDIAN; 984 qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY); 985 #elif TCG_OVERSIZED_GUEST 986 tlb_entry->addr_write |= TLB_NOTDIRTY; 987 #else 988 qatomic_set(&tlb_entry->addr_write, 989 tlb_entry->addr_write | TLB_NOTDIRTY); 990 #endif 991 } 992 } 993 } 994 995 /* 996 * Called with tlb_c.lock held. 997 * Called only from the vCPU context, i.e. the TLB's owner thread. 998 */ 999 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 1000 { 1001 *d = *s; 1002 } 1003 1004 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 1005 * the target vCPU). 1006 * We must take tlb_c.lock to avoid racing with another vCPU update. The only 1007 * thing actually updated is the target TLB entry ->addr_write flags. 1008 */ 1009 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 1010 { 1011 int mmu_idx; 1012 1013 qemu_spin_lock(&cpu->neg.tlb.c.lock); 1014 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1015 unsigned int i; 1016 unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]); 1017 1018 for (i = 0; i < n; i++) { 1019 tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i], 1020 start1, length); 1021 } 1022 1023 for (i = 0; i < CPU_VTLB_SIZE; i++) { 1024 tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i], 1025 start1, length); 1026 } 1027 } 1028 qemu_spin_unlock(&cpu->neg.tlb.c.lock); 1029 } 1030 1031 /* Called with tlb_c.lock held */ 1032 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 1033 vaddr addr) 1034 { 1035 if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) { 1036 tlb_entry->addr_write = addr; 1037 } 1038 } 1039 1040 /* update the TLB corresponding to virtual page vaddr 1041 so that it is no longer dirty */ 1042 void tlb_set_dirty(CPUState *cpu, vaddr addr) 1043 { 1044 int mmu_idx; 1045 1046 assert_cpu_is_self(cpu); 1047 1048 addr &= TARGET_PAGE_MASK; 1049 qemu_spin_lock(&cpu->neg.tlb.c.lock); 1050 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1051 tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr); 1052 } 1053 1054 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1055 int k; 1056 for (k = 0; k < CPU_VTLB_SIZE; k++) { 1057 tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr); 1058 } 1059 } 1060 qemu_spin_unlock(&cpu->neg.tlb.c.lock); 1061 } 1062 1063 /* Our TLB does not support large pages, so remember the area covered by 1064 large pages and trigger a full TLB flush if these are invalidated. */ 1065 static void tlb_add_large_page(CPUState *cpu, int mmu_idx, 1066 vaddr addr, uint64_t size) 1067 { 1068 vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr; 1069 vaddr lp_mask = ~(size - 1); 1070 1071 if (lp_addr == (vaddr)-1) { 1072 /* No previous large page. */ 1073 lp_addr = addr; 1074 } else { 1075 /* Extend the existing region to include the new page. 1076 This is a compromise between unnecessary flushes and 1077 the cost of maintaining a full variable size TLB. */ 1078 lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask; 1079 while (((lp_addr ^ addr) & lp_mask) != 0) { 1080 lp_mask <<= 1; 1081 } 1082 } 1083 cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask; 1084 cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask; 1085 } 1086 1087 static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent, 1088 vaddr address, int flags, 1089 MMUAccessType access_type, bool enable) 1090 { 1091 if (enable) { 1092 address |= flags & TLB_FLAGS_MASK; 1093 flags &= TLB_SLOW_FLAGS_MASK; 1094 if (flags) { 1095 address |= TLB_FORCE_SLOW; 1096 } 1097 } else { 1098 address = -1; 1099 flags = 0; 1100 } 1101 ent->addr_idx[access_type] = address; 1102 full->slow_flags[access_type] = flags; 1103 } 1104 1105 /* 1106 * Add a new TLB entry. At most one entry for a given virtual address 1107 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1108 * supplied size is only used by tlb_flush_page. 1109 * 1110 * Called from TCG-generated code, which is under an RCU read-side 1111 * critical section. 1112 */ 1113 void tlb_set_page_full(CPUState *cpu, int mmu_idx, 1114 vaddr addr, CPUTLBEntryFull *full) 1115 { 1116 CPUTLB *tlb = &cpu->neg.tlb; 1117 CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1118 MemoryRegionSection *section; 1119 unsigned int index, read_flags, write_flags; 1120 uintptr_t addend; 1121 CPUTLBEntry *te, tn; 1122 hwaddr iotlb, xlat, sz, paddr_page; 1123 vaddr addr_page; 1124 int asidx, wp_flags, prot; 1125 bool is_ram, is_romd; 1126 1127 assert_cpu_is_self(cpu); 1128 1129 if (full->lg_page_size <= TARGET_PAGE_BITS) { 1130 sz = TARGET_PAGE_SIZE; 1131 } else { 1132 sz = (hwaddr)1 << full->lg_page_size; 1133 tlb_add_large_page(cpu, mmu_idx, addr, sz); 1134 } 1135 addr_page = addr & TARGET_PAGE_MASK; 1136 paddr_page = full->phys_addr & TARGET_PAGE_MASK; 1137 1138 prot = full->prot; 1139 asidx = cpu_asidx_from_attrs(cpu, full->attrs); 1140 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 1141 &xlat, &sz, full->attrs, &prot); 1142 assert(sz >= TARGET_PAGE_SIZE); 1143 1144 tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx 1145 " prot=%x idx=%d\n", 1146 addr, full->phys_addr, prot, mmu_idx); 1147 1148 read_flags = 0; 1149 if (full->lg_page_size < TARGET_PAGE_BITS) { 1150 /* Repeat the MMU check and TLB fill on every access. */ 1151 read_flags |= TLB_INVALID_MASK; 1152 } 1153 if (full->attrs.byte_swap) { 1154 read_flags |= TLB_BSWAP; 1155 } 1156 1157 is_ram = memory_region_is_ram(section->mr); 1158 is_romd = memory_region_is_romd(section->mr); 1159 1160 if (is_ram || is_romd) { 1161 /* RAM and ROMD both have associated host memory. */ 1162 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 1163 } else { 1164 /* I/O does not; force the host address to NULL. */ 1165 addend = 0; 1166 } 1167 1168 write_flags = read_flags; 1169 if (is_ram) { 1170 iotlb = memory_region_get_ram_addr(section->mr) + xlat; 1171 assert(!(iotlb & ~TARGET_PAGE_MASK)); 1172 /* 1173 * Computing is_clean is expensive; avoid all that unless 1174 * the page is actually writable. 1175 */ 1176 if (prot & PAGE_WRITE) { 1177 if (section->readonly) { 1178 write_flags |= TLB_DISCARD_WRITE; 1179 } else if (cpu_physical_memory_is_clean(iotlb)) { 1180 write_flags |= TLB_NOTDIRTY; 1181 } 1182 } 1183 } else { 1184 /* I/O or ROMD */ 1185 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 1186 /* 1187 * Writes to romd devices must go through MMIO to enable write. 1188 * Reads to romd devices go through the ram_ptr found above, 1189 * but of course reads to I/O must go through MMIO. 1190 */ 1191 write_flags |= TLB_MMIO; 1192 if (!is_romd) { 1193 read_flags = write_flags; 1194 } 1195 } 1196 1197 wp_flags = cpu_watchpoint_address_matches(cpu, addr_page, 1198 TARGET_PAGE_SIZE); 1199 1200 index = tlb_index(cpu, mmu_idx, addr_page); 1201 te = tlb_entry(cpu, mmu_idx, addr_page); 1202 1203 /* 1204 * Hold the TLB lock for the rest of the function. We could acquire/release 1205 * the lock several times in the function, but it is faster to amortize the 1206 * acquisition cost by acquiring it just once. Note that this leads to 1207 * a longer critical section, but this is not a concern since the TLB lock 1208 * is unlikely to be contended. 1209 */ 1210 qemu_spin_lock(&tlb->c.lock); 1211 1212 /* Note that the tlb is no longer clean. */ 1213 tlb->c.dirty |= 1 << mmu_idx; 1214 1215 /* Make sure there's no cached translation for the new page. */ 1216 tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page); 1217 1218 /* 1219 * Only evict the old entry to the victim tlb if it's for a 1220 * different page; otherwise just overwrite the stale data. 1221 */ 1222 if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) { 1223 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1224 CPUTLBEntry *tv = &desc->vtable[vidx]; 1225 1226 /* Evict the old entry into the victim tlb. */ 1227 copy_tlb_helper_locked(tv, te); 1228 desc->vfulltlb[vidx] = desc->fulltlb[index]; 1229 tlb_n_used_entries_dec(cpu, mmu_idx); 1230 } 1231 1232 /* refill the tlb */ 1233 /* 1234 * When memory region is ram, iotlb contains a TARGET_PAGE_BITS 1235 * aligned ram_addr_t of the page base of the target RAM. 1236 * Otherwise, iotlb contains 1237 * - a physical section number in the lower TARGET_PAGE_BITS 1238 * - the offset within section->mr of the page base (I/O, ROMD) with the 1239 * TARGET_PAGE_BITS masked off. 1240 * We subtract addr_page (which is page aligned and thus won't 1241 * disturb the low bits) to give an offset which can be added to the 1242 * (non-page-aligned) vaddr of the eventual memory access to get 1243 * the MemoryRegion offset for the access. Note that the vaddr we 1244 * subtract here is that of the page base, and not the same as the 1245 * vaddr we add back in io_prepare()/get_page_addr_code(). 1246 */ 1247 desc->fulltlb[index] = *full; 1248 full = &desc->fulltlb[index]; 1249 full->xlat_section = iotlb - addr_page; 1250 full->phys_addr = paddr_page; 1251 1252 /* Now calculate the new entry */ 1253 tn.addend = addend - addr_page; 1254 1255 tlb_set_compare(full, &tn, addr_page, read_flags, 1256 MMU_INST_FETCH, prot & PAGE_EXEC); 1257 1258 if (wp_flags & BP_MEM_READ) { 1259 read_flags |= TLB_WATCHPOINT; 1260 } 1261 tlb_set_compare(full, &tn, addr_page, read_flags, 1262 MMU_DATA_LOAD, prot & PAGE_READ); 1263 1264 if (prot & PAGE_WRITE_INV) { 1265 write_flags |= TLB_INVALID_MASK; 1266 } 1267 if (wp_flags & BP_MEM_WRITE) { 1268 write_flags |= TLB_WATCHPOINT; 1269 } 1270 tlb_set_compare(full, &tn, addr_page, write_flags, 1271 MMU_DATA_STORE, prot & PAGE_WRITE); 1272 1273 copy_tlb_helper_locked(te, &tn); 1274 tlb_n_used_entries_inc(cpu, mmu_idx); 1275 qemu_spin_unlock(&tlb->c.lock); 1276 } 1277 1278 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, 1279 hwaddr paddr, MemTxAttrs attrs, int prot, 1280 int mmu_idx, uint64_t size) 1281 { 1282 CPUTLBEntryFull full = { 1283 .phys_addr = paddr, 1284 .attrs = attrs, 1285 .prot = prot, 1286 .lg_page_size = ctz64(size) 1287 }; 1288 1289 assert(is_power_of_2(size)); 1290 tlb_set_page_full(cpu, mmu_idx, addr, &full); 1291 } 1292 1293 void tlb_set_page(CPUState *cpu, vaddr addr, 1294 hwaddr paddr, int prot, 1295 int mmu_idx, uint64_t size) 1296 { 1297 tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED, 1298 prot, mmu_idx, size); 1299 } 1300 1301 /* 1302 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1303 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1304 * be discarded and looked up again (e.g. via tlb_entry()). 1305 */ 1306 static void tlb_fill(CPUState *cpu, vaddr addr, int size, 1307 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1308 { 1309 bool ok; 1310 1311 /* 1312 * This is not a probe, so only valid return is success; failure 1313 * should result in exception + longjmp to the cpu loop. 1314 */ 1315 ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, 1316 access_type, mmu_idx, false, retaddr); 1317 assert(ok); 1318 } 1319 1320 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 1321 MMUAccessType access_type, 1322 int mmu_idx, uintptr_t retaddr) 1323 { 1324 cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, 1325 mmu_idx, retaddr); 1326 } 1327 1328 static MemoryRegionSection * 1329 io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat, 1330 MemTxAttrs attrs, vaddr addr, uintptr_t retaddr) 1331 { 1332 MemoryRegionSection *section; 1333 hwaddr mr_offset; 1334 1335 section = iotlb_to_section(cpu, xlat, attrs); 1336 mr_offset = (xlat & TARGET_PAGE_MASK) + addr; 1337 cpu->mem_io_pc = retaddr; 1338 if (!cpu->neg.can_do_io) { 1339 cpu_io_recompile(cpu, retaddr); 1340 } 1341 1342 *out_offset = mr_offset; 1343 return section; 1344 } 1345 1346 static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr, 1347 unsigned size, MMUAccessType access_type, int mmu_idx, 1348 MemTxResult response, uintptr_t retaddr) 1349 { 1350 if (!cpu->ignore_memory_transaction_failures 1351 && cpu->cc->tcg_ops->do_transaction_failed) { 1352 hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK); 1353 1354 cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, 1355 access_type, mmu_idx, 1356 full->attrs, response, retaddr); 1357 } 1358 } 1359 1360 /* Return true if ADDR is present in the victim tlb, and has been copied 1361 back to the main tlb. */ 1362 static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index, 1363 MMUAccessType access_type, vaddr page) 1364 { 1365 size_t vidx; 1366 1367 assert_cpu_is_self(cpu); 1368 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1369 CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx]; 1370 uint64_t cmp = tlb_read_idx(vtlb, access_type); 1371 1372 if (cmp == page) { 1373 /* Found entry in victim tlb, swap tlb and iotlb. */ 1374 CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index]; 1375 1376 qemu_spin_lock(&cpu->neg.tlb.c.lock); 1377 copy_tlb_helper_locked(&tmptlb, tlb); 1378 copy_tlb_helper_locked(tlb, vtlb); 1379 copy_tlb_helper_locked(vtlb, &tmptlb); 1380 qemu_spin_unlock(&cpu->neg.tlb.c.lock); 1381 1382 CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 1383 CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx]; 1384 CPUTLBEntryFull tmpf; 1385 tmpf = *f1; *f1 = *f2; *f2 = tmpf; 1386 return true; 1387 } 1388 } 1389 return false; 1390 } 1391 1392 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 1393 CPUTLBEntryFull *full, uintptr_t retaddr) 1394 { 1395 ram_addr_t ram_addr = mem_vaddr + full->xlat_section; 1396 1397 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1398 1399 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1400 tb_invalidate_phys_range_fast(ram_addr, size, retaddr); 1401 } 1402 1403 /* 1404 * Set both VGA and migration bits for simplicity and to remove 1405 * the notdirty callback faster. 1406 */ 1407 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1408 1409 /* We remove the notdirty callback only if the code has been flushed. */ 1410 if (!cpu_physical_memory_is_clean(ram_addr)) { 1411 trace_memory_notdirty_set_dirty(mem_vaddr); 1412 tlb_set_dirty(cpu, mem_vaddr); 1413 } 1414 } 1415 1416 static int probe_access_internal(CPUState *cpu, vaddr addr, 1417 int fault_size, MMUAccessType access_type, 1418 int mmu_idx, bool nonfault, 1419 void **phost, CPUTLBEntryFull **pfull, 1420 uintptr_t retaddr, bool check_mem_cbs) 1421 { 1422 uintptr_t index = tlb_index(cpu, mmu_idx, addr); 1423 CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr); 1424 uint64_t tlb_addr = tlb_read_idx(entry, access_type); 1425 vaddr page_addr = addr & TARGET_PAGE_MASK; 1426 int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW; 1427 bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu); 1428 CPUTLBEntryFull *full; 1429 1430 if (!tlb_hit_page(tlb_addr, page_addr)) { 1431 if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) { 1432 if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type, 1433 mmu_idx, nonfault, retaddr)) { 1434 /* Non-faulting page table read failed. */ 1435 *phost = NULL; 1436 *pfull = NULL; 1437 return TLB_INVALID_MASK; 1438 } 1439 1440 /* TLB resize via tlb_fill may have moved the entry. */ 1441 index = tlb_index(cpu, mmu_idx, addr); 1442 entry = tlb_entry(cpu, mmu_idx, addr); 1443 1444 /* 1445 * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, 1446 * to force the next access through tlb_fill. We've just 1447 * called tlb_fill, so we know that this entry *is* valid. 1448 */ 1449 flags &= ~TLB_INVALID_MASK; 1450 } 1451 tlb_addr = tlb_read_idx(entry, access_type); 1452 } 1453 flags &= tlb_addr; 1454 1455 *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 1456 flags |= full->slow_flags[access_type]; 1457 1458 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 1459 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY)) 1460 || 1461 (access_type != MMU_INST_FETCH && force_mmio)) { 1462 *phost = NULL; 1463 return TLB_MMIO; 1464 } 1465 1466 /* Everything else is RAM. */ 1467 *phost = (void *)((uintptr_t)addr + entry->addend); 1468 return flags; 1469 } 1470 1471 int probe_access_full(CPUArchState *env, vaddr addr, int size, 1472 MMUAccessType access_type, int mmu_idx, 1473 bool nonfault, void **phost, CPUTLBEntryFull **pfull, 1474 uintptr_t retaddr) 1475 { 1476 int flags = probe_access_internal(env_cpu(env), addr, size, access_type, 1477 mmu_idx, nonfault, phost, pfull, retaddr, 1478 true); 1479 1480 /* Handle clean RAM pages. */ 1481 if (unlikely(flags & TLB_NOTDIRTY)) { 1482 int dirtysize = size == 0 ? 1 : size; 1483 notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr); 1484 flags &= ~TLB_NOTDIRTY; 1485 } 1486 1487 return flags; 1488 } 1489 1490 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size, 1491 MMUAccessType access_type, int mmu_idx, 1492 void **phost, CPUTLBEntryFull **pfull) 1493 { 1494 void *discard_phost; 1495 CPUTLBEntryFull *discard_tlb; 1496 1497 /* privately handle users that don't need full results */ 1498 phost = phost ? phost : &discard_phost; 1499 pfull = pfull ? pfull : &discard_tlb; 1500 1501 int flags = probe_access_internal(env_cpu(env), addr, size, access_type, 1502 mmu_idx, true, phost, pfull, 0, false); 1503 1504 /* Handle clean RAM pages. */ 1505 if (unlikely(flags & TLB_NOTDIRTY)) { 1506 int dirtysize = size == 0 ? 1 : size; 1507 notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0); 1508 flags &= ~TLB_NOTDIRTY; 1509 } 1510 1511 return flags; 1512 } 1513 1514 int probe_access_flags(CPUArchState *env, vaddr addr, int size, 1515 MMUAccessType access_type, int mmu_idx, 1516 bool nonfault, void **phost, uintptr_t retaddr) 1517 { 1518 CPUTLBEntryFull *full; 1519 int flags; 1520 1521 g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1522 1523 flags = probe_access_internal(env_cpu(env), addr, size, access_type, 1524 mmu_idx, nonfault, phost, &full, retaddr, 1525 true); 1526 1527 /* Handle clean RAM pages. */ 1528 if (unlikely(flags & TLB_NOTDIRTY)) { 1529 int dirtysize = size == 0 ? 1 : size; 1530 notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr); 1531 flags &= ~TLB_NOTDIRTY; 1532 } 1533 1534 return flags; 1535 } 1536 1537 void *probe_access(CPUArchState *env, vaddr addr, int size, 1538 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1539 { 1540 CPUTLBEntryFull *full; 1541 void *host; 1542 int flags; 1543 1544 g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1545 1546 flags = probe_access_internal(env_cpu(env), addr, size, access_type, 1547 mmu_idx, false, &host, &full, retaddr, 1548 true); 1549 1550 /* Per the interface, size == 0 merely faults the access. */ 1551 if (size == 0) { 1552 return NULL; 1553 } 1554 1555 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 1556 /* Handle watchpoints. */ 1557 if (flags & TLB_WATCHPOINT) { 1558 int wp_access = (access_type == MMU_DATA_STORE 1559 ? BP_MEM_WRITE : BP_MEM_READ); 1560 cpu_check_watchpoint(env_cpu(env), addr, size, 1561 full->attrs, wp_access, retaddr); 1562 } 1563 1564 /* Handle clean RAM pages. */ 1565 if (flags & TLB_NOTDIRTY) { 1566 notdirty_write(env_cpu(env), addr, size, full, retaddr); 1567 } 1568 } 1569 1570 return host; 1571 } 1572 1573 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 1574 MMUAccessType access_type, int mmu_idx) 1575 { 1576 CPUTLBEntryFull *full; 1577 void *host; 1578 int flags; 1579 1580 flags = probe_access_internal(env_cpu(env), addr, 0, access_type, 1581 mmu_idx, true, &host, &full, 0, false); 1582 1583 /* No combination of flags are expected by the caller. */ 1584 return flags ? NULL : host; 1585 } 1586 1587 /* 1588 * Return a ram_addr_t for the virtual address for execution. 1589 * 1590 * Return -1 if we can't translate and execute from an entire page 1591 * of RAM. This will force us to execute by loading and translating 1592 * one insn at a time, without caching. 1593 * 1594 * NOTE: This function will trigger an exception if the page is 1595 * not executable. 1596 */ 1597 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, 1598 void **hostp) 1599 { 1600 CPUTLBEntryFull *full; 1601 void *p; 1602 1603 (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH, 1604 cpu_mmu_index(env, true), false, 1605 &p, &full, 0, false); 1606 if (p == NULL) { 1607 return -1; 1608 } 1609 1610 if (full->lg_page_size < TARGET_PAGE_BITS) { 1611 return -1; 1612 } 1613 1614 if (hostp) { 1615 *hostp = p; 1616 } 1617 return qemu_ram_addr_from_host_nofail(p); 1618 } 1619 1620 /* Load/store with atomicity primitives. */ 1621 #include "ldst_atomicity.c.inc" 1622 1623 #ifdef CONFIG_PLUGIN 1624 /* 1625 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1626 * This should be a hot path as we will have just looked this path up 1627 * in the softmmu lookup code (or helper). We don't handle re-fills or 1628 * checking the victim table. This is purely informational. 1629 * 1630 * The one corner case is i/o write, which can cause changes to the 1631 * address space. Those changes, and the corresponding tlb flush, 1632 * should be delayed until the next TB, so even then this ought not fail. 1633 * But check, Just in Case. 1634 */ 1635 bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx, 1636 bool is_store, struct qemu_plugin_hwaddr *data) 1637 { 1638 CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr); 1639 uintptr_t index = tlb_index(cpu, mmu_idx, addr); 1640 MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD; 1641 uint64_t tlb_addr = tlb_read_idx(tlbe, access_type); 1642 CPUTLBEntryFull *full; 1643 1644 if (unlikely(!tlb_hit(tlb_addr, addr))) { 1645 return false; 1646 } 1647 1648 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 1649 data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK); 1650 1651 /* We must have an iotlb entry for MMIO */ 1652 if (tlb_addr & TLB_MMIO) { 1653 MemoryRegionSection *section = 1654 iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK, 1655 full->attrs); 1656 data->is_io = true; 1657 data->mr = section->mr; 1658 } else { 1659 data->is_io = false; 1660 data->mr = NULL; 1661 } 1662 return true; 1663 } 1664 #endif 1665 1666 /* 1667 * Probe for a load/store operation. 1668 * Return the host address and into @flags. 1669 */ 1670 1671 typedef struct MMULookupPageData { 1672 CPUTLBEntryFull *full; 1673 void *haddr; 1674 vaddr addr; 1675 int flags; 1676 int size; 1677 } MMULookupPageData; 1678 1679 typedef struct MMULookupLocals { 1680 MMULookupPageData page[2]; 1681 MemOp memop; 1682 int mmu_idx; 1683 } MMULookupLocals; 1684 1685 /** 1686 * mmu_lookup1: translate one page 1687 * @cpu: generic cpu state 1688 * @data: lookup parameters 1689 * @mmu_idx: virtual address context 1690 * @access_type: load/store/code 1691 * @ra: return address into tcg generated code, or 0 1692 * 1693 * Resolve the translation for the one page at @data.addr, filling in 1694 * the rest of @data with the results. If the translation fails, 1695 * tlb_fill will longjmp out. Return true if the softmmu tlb for 1696 * @mmu_idx may have resized. 1697 */ 1698 static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, 1699 int mmu_idx, MMUAccessType access_type, uintptr_t ra) 1700 { 1701 vaddr addr = data->addr; 1702 uintptr_t index = tlb_index(cpu, mmu_idx, addr); 1703 CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr); 1704 uint64_t tlb_addr = tlb_read_idx(entry, access_type); 1705 bool maybe_resized = false; 1706 CPUTLBEntryFull *full; 1707 int flags; 1708 1709 /* If the TLB entry is for a different page, reload and try again. */ 1710 if (!tlb_hit(tlb_addr, addr)) { 1711 if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, 1712 addr & TARGET_PAGE_MASK)) { 1713 tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra); 1714 maybe_resized = true; 1715 index = tlb_index(cpu, mmu_idx, addr); 1716 entry = tlb_entry(cpu, mmu_idx, addr); 1717 } 1718 tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK; 1719 } 1720 1721 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 1722 flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW); 1723 flags |= full->slow_flags[access_type]; 1724 1725 data->full = full; 1726 data->flags = flags; 1727 /* Compute haddr speculatively; depending on flags it might be invalid. */ 1728 data->haddr = (void *)((uintptr_t)addr + entry->addend); 1729 1730 return maybe_resized; 1731 } 1732 1733 /** 1734 * mmu_watch_or_dirty 1735 * @cpu: generic cpu state 1736 * @data: lookup parameters 1737 * @access_type: load/store/code 1738 * @ra: return address into tcg generated code, or 0 1739 * 1740 * Trigger watchpoints for @data.addr:@data.size; 1741 * record writes to protected clean pages. 1742 */ 1743 static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data, 1744 MMUAccessType access_type, uintptr_t ra) 1745 { 1746 CPUTLBEntryFull *full = data->full; 1747 vaddr addr = data->addr; 1748 int flags = data->flags; 1749 int size = data->size; 1750 1751 /* On watchpoint hit, this will longjmp out. */ 1752 if (flags & TLB_WATCHPOINT) { 1753 int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ; 1754 cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra); 1755 flags &= ~TLB_WATCHPOINT; 1756 } 1757 1758 /* Note that notdirty is only set for writes. */ 1759 if (flags & TLB_NOTDIRTY) { 1760 notdirty_write(cpu, addr, size, full, ra); 1761 flags &= ~TLB_NOTDIRTY; 1762 } 1763 data->flags = flags; 1764 } 1765 1766 /** 1767 * mmu_lookup: translate page(s) 1768 * @cpu: generic cpu state 1769 * @addr: virtual address 1770 * @oi: combined mmu_idx and MemOp 1771 * @ra: return address into tcg generated code, or 0 1772 * @access_type: load/store/code 1773 * @l: output result 1774 * 1775 * Resolve the translation for the page(s) beginning at @addr, for MemOp.size 1776 * bytes. Return true if the lookup crosses a page boundary. 1777 */ 1778 static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, 1779 uintptr_t ra, MMUAccessType type, MMULookupLocals *l) 1780 { 1781 unsigned a_bits; 1782 bool crosspage; 1783 int flags; 1784 1785 l->memop = get_memop(oi); 1786 l->mmu_idx = get_mmuidx(oi); 1787 1788 tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); 1789 1790 /* Handle CPU specific unaligned behaviour */ 1791 a_bits = get_alignment_bits(l->memop); 1792 if (addr & ((1 << a_bits) - 1)) { 1793 cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra); 1794 } 1795 1796 l->page[0].addr = addr; 1797 l->page[0].size = memop_size(l->memop); 1798 l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; 1799 l->page[1].size = 0; 1800 crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; 1801 1802 if (likely(!crosspage)) { 1803 mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra); 1804 1805 flags = l->page[0].flags; 1806 if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1807 mmu_watch_or_dirty(cpu, &l->page[0], type, ra); 1808 } 1809 if (unlikely(flags & TLB_BSWAP)) { 1810 l->memop ^= MO_BSWAP; 1811 } 1812 } else { 1813 /* Finish compute of page crossing. */ 1814 int size0 = l->page[1].addr - addr; 1815 l->page[1].size = l->page[0].size - size0; 1816 l->page[0].size = size0; 1817 1818 /* 1819 * Lookup both pages, recognizing exceptions from either. If the 1820 * second lookup potentially resized, refresh first CPUTLBEntryFull. 1821 */ 1822 mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra); 1823 if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) { 1824 uintptr_t index = tlb_index(cpu, l->mmu_idx, addr); 1825 l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index]; 1826 } 1827 1828 flags = l->page[0].flags | l->page[1].flags; 1829 if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1830 mmu_watch_or_dirty(cpu, &l->page[0], type, ra); 1831 mmu_watch_or_dirty(cpu, &l->page[1], type, ra); 1832 } 1833 1834 /* 1835 * Since target/sparc is the only user of TLB_BSWAP, and all 1836 * Sparc accesses are aligned, any treatment across two pages 1837 * would be arbitrary. Refuse it until there's a use. 1838 */ 1839 tcg_debug_assert((flags & TLB_BSWAP) == 0); 1840 } 1841 1842 return crosspage; 1843 } 1844 1845 /* 1846 * Probe for an atomic operation. Do not allow unaligned operations, 1847 * or io operations to proceed. Return the host address. 1848 */ 1849 static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, 1850 int size, uintptr_t retaddr) 1851 { 1852 uintptr_t mmu_idx = get_mmuidx(oi); 1853 MemOp mop = get_memop(oi); 1854 int a_bits = get_alignment_bits(mop); 1855 uintptr_t index; 1856 CPUTLBEntry *tlbe; 1857 vaddr tlb_addr; 1858 void *hostaddr; 1859 CPUTLBEntryFull *full; 1860 1861 tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1862 1863 /* Adjust the given return address. */ 1864 retaddr -= GETPC_ADJ; 1865 1866 /* Enforce guest required alignment. */ 1867 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1868 /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 1869 cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, 1870 mmu_idx, retaddr); 1871 } 1872 1873 /* Enforce qemu required alignment. */ 1874 if (unlikely(addr & (size - 1))) { 1875 /* We get here if guest alignment was not requested, 1876 or was not enforced by cpu_unaligned_access above. 1877 We might widen the access and emulate, but for now 1878 mark an exception and exit the cpu loop. */ 1879 goto stop_the_world; 1880 } 1881 1882 index = tlb_index(cpu, mmu_idx, addr); 1883 tlbe = tlb_entry(cpu, mmu_idx, addr); 1884 1885 /* Check TLB entry and enforce page permissions. */ 1886 tlb_addr = tlb_addr_write(tlbe); 1887 if (!tlb_hit(tlb_addr, addr)) { 1888 if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE, 1889 addr & TARGET_PAGE_MASK)) { 1890 tlb_fill(cpu, addr, size, 1891 MMU_DATA_STORE, mmu_idx, retaddr); 1892 index = tlb_index(cpu, mmu_idx, addr); 1893 tlbe = tlb_entry(cpu, mmu_idx, addr); 1894 } 1895 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1896 } 1897 1898 /* 1899 * Let the guest notice RMW on a write-only page. 1900 * We have just verified that the page is writable. 1901 * Subpage lookups may have left TLB_INVALID_MASK set, 1902 * but addr_read will only be -1 if PAGE_READ was unset. 1903 */ 1904 if (unlikely(tlbe->addr_read == -1)) { 1905 tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 1906 /* 1907 * Since we don't support reads and writes to different 1908 * addresses, and we do have the proper page loaded for 1909 * write, this shouldn't ever return. But just in case, 1910 * handle via stop-the-world. 1911 */ 1912 goto stop_the_world; 1913 } 1914 /* Collect tlb flags for read. */ 1915 tlb_addr |= tlbe->addr_read; 1916 1917 /* Notice an IO access or a needs-MMU-lookup access */ 1918 if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) { 1919 /* There's really nothing that can be done to 1920 support this apart from stop-the-world. */ 1921 goto stop_the_world; 1922 } 1923 1924 hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1925 full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 1926 1927 if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1928 notdirty_write(cpu, addr, size, full, retaddr); 1929 } 1930 1931 if (unlikely(tlb_addr & TLB_FORCE_SLOW)) { 1932 int wp_flags = 0; 1933 1934 if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) { 1935 wp_flags |= BP_MEM_WRITE; 1936 } 1937 if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) { 1938 wp_flags |= BP_MEM_READ; 1939 } 1940 if (wp_flags) { 1941 cpu_check_watchpoint(cpu, addr, size, 1942 full->attrs, wp_flags, retaddr); 1943 } 1944 } 1945 1946 return hostaddr; 1947 1948 stop_the_world: 1949 cpu_loop_exit_atomic(cpu, retaddr); 1950 } 1951 1952 /* 1953 * Load Helpers 1954 * 1955 * We support two different access types. SOFTMMU_CODE_ACCESS is 1956 * specifically for reading instructions from system memory. It is 1957 * called by the translation loop and in some helpers where the code 1958 * is disassembled. It shouldn't be called directly by guest code. 1959 * 1960 * For the benefit of TCG generated code, we want to avoid the 1961 * complication of ABI-specific return type promotion and always 1962 * return a value extended to the register size of the host. This is 1963 * tcg_target_long, except in the case of a 32-bit host and 64-bit 1964 * data, and for that we always have uint64_t. 1965 * 1966 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1967 */ 1968 1969 /** 1970 * do_ld_mmio_beN: 1971 * @cpu: generic cpu state 1972 * @full: page parameters 1973 * @ret_be: accumulated data 1974 * @addr: virtual address 1975 * @size: number of bytes 1976 * @mmu_idx: virtual address context 1977 * @ra: return address into tcg generated code, or 0 1978 * Context: BQL held 1979 * 1980 * Load @size bytes from @addr, which is memory-mapped i/o. 1981 * The bytes are concatenated in big-endian order with @ret_be. 1982 */ 1983 static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, 1984 uint64_t ret_be, vaddr addr, int size, 1985 int mmu_idx, MMUAccessType type, uintptr_t ra, 1986 MemoryRegion *mr, hwaddr mr_offset) 1987 { 1988 do { 1989 MemOp this_mop; 1990 unsigned this_size; 1991 uint64_t val; 1992 MemTxResult r; 1993 1994 /* Read aligned pieces up to 8 bytes. */ 1995 this_mop = ctz32(size | (int)addr | 8); 1996 this_size = 1 << this_mop; 1997 this_mop |= MO_BE; 1998 1999 r = memory_region_dispatch_read(mr, mr_offset, &val, 2000 this_mop, full->attrs); 2001 if (unlikely(r != MEMTX_OK)) { 2002 io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra); 2003 } 2004 if (this_size == 8) { 2005 return val; 2006 } 2007 2008 ret_be = (ret_be << (this_size * 8)) | val; 2009 addr += this_size; 2010 mr_offset += this_size; 2011 size -= this_size; 2012 } while (size); 2013 2014 return ret_be; 2015 } 2016 2017 static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, 2018 uint64_t ret_be, vaddr addr, int size, 2019 int mmu_idx, MMUAccessType type, uintptr_t ra) 2020 { 2021 MemoryRegionSection *section; 2022 MemoryRegion *mr; 2023 hwaddr mr_offset; 2024 MemTxAttrs attrs; 2025 uint64_t ret; 2026 2027 tcg_debug_assert(size > 0 && size <= 8); 2028 2029 attrs = full->attrs; 2030 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 2031 mr = section->mr; 2032 2033 bql_lock(); 2034 ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx, 2035 type, ra, mr, mr_offset); 2036 bql_unlock(); 2037 2038 return ret; 2039 } 2040 2041 static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, 2042 uint64_t ret_be, vaddr addr, int size, 2043 int mmu_idx, uintptr_t ra) 2044 { 2045 MemoryRegionSection *section; 2046 MemoryRegion *mr; 2047 hwaddr mr_offset; 2048 MemTxAttrs attrs; 2049 uint64_t a, b; 2050 2051 tcg_debug_assert(size > 8 && size <= 16); 2052 2053 attrs = full->attrs; 2054 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 2055 mr = section->mr; 2056 2057 bql_lock(); 2058 a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx, 2059 MMU_DATA_LOAD, ra, mr, mr_offset); 2060 b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx, 2061 MMU_DATA_LOAD, ra, mr, mr_offset + size - 8); 2062 bql_unlock(); 2063 2064 return int128_make128(b, a); 2065 } 2066 2067 /** 2068 * do_ld_bytes_beN 2069 * @p: translation parameters 2070 * @ret_be: accumulated data 2071 * 2072 * Load @p->size bytes from @p->haddr, which is RAM. 2073 * The bytes to concatenated in big-endian order with @ret_be. 2074 */ 2075 static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be) 2076 { 2077 uint8_t *haddr = p->haddr; 2078 int i, size = p->size; 2079 2080 for (i = 0; i < size; i++) { 2081 ret_be = (ret_be << 8) | haddr[i]; 2082 } 2083 return ret_be; 2084 } 2085 2086 /** 2087 * do_ld_parts_beN 2088 * @p: translation parameters 2089 * @ret_be: accumulated data 2090 * 2091 * As do_ld_bytes_beN, but atomically on each aligned part. 2092 */ 2093 static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be) 2094 { 2095 void *haddr = p->haddr; 2096 int size = p->size; 2097 2098 do { 2099 uint64_t x; 2100 int n; 2101 2102 /* 2103 * Find minimum of alignment and size. 2104 * This is slightly stronger than required by MO_ATOM_SUBALIGN, which 2105 * would have only checked the low bits of addr|size once at the start, 2106 * but is just as easy. 2107 */ 2108 switch (((uintptr_t)haddr | size) & 7) { 2109 case 4: 2110 x = cpu_to_be32(load_atomic4(haddr)); 2111 ret_be = (ret_be << 32) | x; 2112 n = 4; 2113 break; 2114 case 2: 2115 case 6: 2116 x = cpu_to_be16(load_atomic2(haddr)); 2117 ret_be = (ret_be << 16) | x; 2118 n = 2; 2119 break; 2120 default: 2121 x = *(uint8_t *)haddr; 2122 ret_be = (ret_be << 8) | x; 2123 n = 1; 2124 break; 2125 case 0: 2126 g_assert_not_reached(); 2127 } 2128 haddr += n; 2129 size -= n; 2130 } while (size != 0); 2131 return ret_be; 2132 } 2133 2134 /** 2135 * do_ld_parts_be4 2136 * @p: translation parameters 2137 * @ret_be: accumulated data 2138 * 2139 * As do_ld_bytes_beN, but with one atomic load. 2140 * Four aligned bytes are guaranteed to cover the load. 2141 */ 2142 static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be) 2143 { 2144 int o = p->addr & 3; 2145 uint32_t x = load_atomic4(p->haddr - o); 2146 2147 x = cpu_to_be32(x); 2148 x <<= o * 8; 2149 x >>= (4 - p->size) * 8; 2150 return (ret_be << (p->size * 8)) | x; 2151 } 2152 2153 /** 2154 * do_ld_parts_be8 2155 * @p: translation parameters 2156 * @ret_be: accumulated data 2157 * 2158 * As do_ld_bytes_beN, but with one atomic load. 2159 * Eight aligned bytes are guaranteed to cover the load. 2160 */ 2161 static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra, 2162 MMULookupPageData *p, uint64_t ret_be) 2163 { 2164 int o = p->addr & 7; 2165 uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o); 2166 2167 x = cpu_to_be64(x); 2168 x <<= o * 8; 2169 x >>= (8 - p->size) * 8; 2170 return (ret_be << (p->size * 8)) | x; 2171 } 2172 2173 /** 2174 * do_ld_parts_be16 2175 * @p: translation parameters 2176 * @ret_be: accumulated data 2177 * 2178 * As do_ld_bytes_beN, but with one atomic load. 2179 * 16 aligned bytes are guaranteed to cover the load. 2180 */ 2181 static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra, 2182 MMULookupPageData *p, uint64_t ret_be) 2183 { 2184 int o = p->addr & 15; 2185 Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o); 2186 int size = p->size; 2187 2188 if (!HOST_BIG_ENDIAN) { 2189 y = bswap128(y); 2190 } 2191 y = int128_lshift(y, o * 8); 2192 y = int128_urshift(y, (16 - size) * 8); 2193 x = int128_make64(ret_be); 2194 x = int128_lshift(x, size * 8); 2195 return int128_or(x, y); 2196 } 2197 2198 /* 2199 * Wrapper for the above. 2200 */ 2201 static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p, 2202 uint64_t ret_be, int mmu_idx, MMUAccessType type, 2203 MemOp mop, uintptr_t ra) 2204 { 2205 MemOp atom; 2206 unsigned tmp, half_size; 2207 2208 if (unlikely(p->flags & TLB_MMIO)) { 2209 return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size, 2210 mmu_idx, type, ra); 2211 } 2212 2213 /* 2214 * It is a given that we cross a page and therefore there is no 2215 * atomicity for the load as a whole, but subobjects may need attention. 2216 */ 2217 atom = mop & MO_ATOM_MASK; 2218 switch (atom) { 2219 case MO_ATOM_SUBALIGN: 2220 return do_ld_parts_beN(p, ret_be); 2221 2222 case MO_ATOM_IFALIGN_PAIR: 2223 case MO_ATOM_WITHIN16_PAIR: 2224 tmp = mop & MO_SIZE; 2225 tmp = tmp ? tmp - 1 : 0; 2226 half_size = 1 << tmp; 2227 if (atom == MO_ATOM_IFALIGN_PAIR 2228 ? p->size == half_size 2229 : p->size >= half_size) { 2230 if (!HAVE_al8_fast && p->size < 4) { 2231 return do_ld_whole_be4(p, ret_be); 2232 } else { 2233 return do_ld_whole_be8(cpu, ra, p, ret_be); 2234 } 2235 } 2236 /* fall through */ 2237 2238 case MO_ATOM_IFALIGN: 2239 case MO_ATOM_WITHIN16: 2240 case MO_ATOM_NONE: 2241 return do_ld_bytes_beN(p, ret_be); 2242 2243 default: 2244 g_assert_not_reached(); 2245 } 2246 } 2247 2248 /* 2249 * Wrapper for the above, for 8 < size < 16. 2250 */ 2251 static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p, 2252 uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra) 2253 { 2254 int size = p->size; 2255 uint64_t b; 2256 MemOp atom; 2257 2258 if (unlikely(p->flags & TLB_MMIO)) { 2259 return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra); 2260 } 2261 2262 /* 2263 * It is a given that we cross a page and therefore there is no 2264 * atomicity for the load as a whole, but subobjects may need attention. 2265 */ 2266 atom = mop & MO_ATOM_MASK; 2267 switch (atom) { 2268 case MO_ATOM_SUBALIGN: 2269 p->size = size - 8; 2270 a = do_ld_parts_beN(p, a); 2271 p->haddr += size - 8; 2272 p->size = 8; 2273 b = do_ld_parts_beN(p, 0); 2274 break; 2275 2276 case MO_ATOM_WITHIN16_PAIR: 2277 /* Since size > 8, this is the half that must be atomic. */ 2278 return do_ld_whole_be16(cpu, ra, p, a); 2279 2280 case MO_ATOM_IFALIGN_PAIR: 2281 /* 2282 * Since size > 8, both halves are misaligned, 2283 * and so neither is atomic. 2284 */ 2285 case MO_ATOM_IFALIGN: 2286 case MO_ATOM_WITHIN16: 2287 case MO_ATOM_NONE: 2288 p->size = size - 8; 2289 a = do_ld_bytes_beN(p, a); 2290 b = ldq_be_p(p->haddr + size - 8); 2291 break; 2292 2293 default: 2294 g_assert_not_reached(); 2295 } 2296 2297 return int128_make128(b, a); 2298 } 2299 2300 static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 2301 MMUAccessType type, uintptr_t ra) 2302 { 2303 if (unlikely(p->flags & TLB_MMIO)) { 2304 return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra); 2305 } else { 2306 return *(uint8_t *)p->haddr; 2307 } 2308 } 2309 2310 static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 2311 MMUAccessType type, MemOp memop, uintptr_t ra) 2312 { 2313 uint16_t ret; 2314 2315 if (unlikely(p->flags & TLB_MMIO)) { 2316 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra); 2317 if ((memop & MO_BSWAP) == MO_LE) { 2318 ret = bswap16(ret); 2319 } 2320 } else { 2321 /* Perform the load host endian, then swap if necessary. */ 2322 ret = load_atom_2(cpu, ra, p->haddr, memop); 2323 if (memop & MO_BSWAP) { 2324 ret = bswap16(ret); 2325 } 2326 } 2327 return ret; 2328 } 2329 2330 static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 2331 MMUAccessType type, MemOp memop, uintptr_t ra) 2332 { 2333 uint32_t ret; 2334 2335 if (unlikely(p->flags & TLB_MMIO)) { 2336 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra); 2337 if ((memop & MO_BSWAP) == MO_LE) { 2338 ret = bswap32(ret); 2339 } 2340 } else { 2341 /* Perform the load host endian. */ 2342 ret = load_atom_4(cpu, ra, p->haddr, memop); 2343 if (memop & MO_BSWAP) { 2344 ret = bswap32(ret); 2345 } 2346 } 2347 return ret; 2348 } 2349 2350 static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 2351 MMUAccessType type, MemOp memop, uintptr_t ra) 2352 { 2353 uint64_t ret; 2354 2355 if (unlikely(p->flags & TLB_MMIO)) { 2356 ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra); 2357 if ((memop & MO_BSWAP) == MO_LE) { 2358 ret = bswap64(ret); 2359 } 2360 } else { 2361 /* Perform the load host endian. */ 2362 ret = load_atom_8(cpu, ra, p->haddr, memop); 2363 if (memop & MO_BSWAP) { 2364 ret = bswap64(ret); 2365 } 2366 } 2367 return ret; 2368 } 2369 2370 static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 2371 uintptr_t ra, MMUAccessType access_type) 2372 { 2373 MMULookupLocals l; 2374 bool crosspage; 2375 2376 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2377 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 2378 tcg_debug_assert(!crosspage); 2379 2380 return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra); 2381 } 2382 2383 static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 2384 uintptr_t ra, MMUAccessType access_type) 2385 { 2386 MMULookupLocals l; 2387 bool crosspage; 2388 uint16_t ret; 2389 uint8_t a, b; 2390 2391 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2392 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 2393 if (likely(!crosspage)) { 2394 return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 2395 } 2396 2397 a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra); 2398 b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra); 2399 2400 if ((l.memop & MO_BSWAP) == MO_LE) { 2401 ret = a | (b << 8); 2402 } else { 2403 ret = b | (a << 8); 2404 } 2405 return ret; 2406 } 2407 2408 static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 2409 uintptr_t ra, MMUAccessType access_type) 2410 { 2411 MMULookupLocals l; 2412 bool crosspage; 2413 uint32_t ret; 2414 2415 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2416 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 2417 if (likely(!crosspage)) { 2418 return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 2419 } 2420 2421 ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2422 ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 2423 if ((l.memop & MO_BSWAP) == MO_LE) { 2424 ret = bswap32(ret); 2425 } 2426 return ret; 2427 } 2428 2429 static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 2430 uintptr_t ra, MMUAccessType access_type) 2431 { 2432 MMULookupLocals l; 2433 bool crosspage; 2434 uint64_t ret; 2435 2436 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2437 crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 2438 if (likely(!crosspage)) { 2439 return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 2440 } 2441 2442 ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2443 ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 2444 if ((l.memop & MO_BSWAP) == MO_LE) { 2445 ret = bswap64(ret); 2446 } 2447 return ret; 2448 } 2449 2450 static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr, 2451 MemOpIdx oi, uintptr_t ra) 2452 { 2453 MMULookupLocals l; 2454 bool crosspage; 2455 uint64_t a, b; 2456 Int128 ret; 2457 int first; 2458 2459 cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2460 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l); 2461 if (likely(!crosspage)) { 2462 if (unlikely(l.page[0].flags & TLB_MMIO)) { 2463 ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16, 2464 l.mmu_idx, ra); 2465 if ((l.memop & MO_BSWAP) == MO_LE) { 2466 ret = bswap128(ret); 2467 } 2468 } else { 2469 /* Perform the load host endian. */ 2470 ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop); 2471 if (l.memop & MO_BSWAP) { 2472 ret = bswap128(ret); 2473 } 2474 } 2475 return ret; 2476 } 2477 2478 first = l.page[0].size; 2479 if (first == 8) { 2480 MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64; 2481 2482 a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); 2483 b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); 2484 if ((mop8 & MO_BSWAP) == MO_LE) { 2485 ret = int128_make128(a, b); 2486 } else { 2487 ret = int128_make128(b, a); 2488 } 2489 return ret; 2490 } 2491 2492 if (first < 8) { 2493 a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, 2494 MMU_DATA_LOAD, l.memop, ra); 2495 ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra); 2496 } else { 2497 ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra); 2498 b = int128_getlo(ret); 2499 ret = int128_lshift(ret, l.page[1].size * 8); 2500 a = int128_gethi(ret); 2501 b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx, 2502 MMU_DATA_LOAD, l.memop, ra); 2503 ret = int128_make128(b, a); 2504 } 2505 if ((l.memop & MO_BSWAP) == MO_LE) { 2506 ret = bswap128(ret); 2507 } 2508 return ret; 2509 } 2510 2511 /* 2512 * Store Helpers 2513 */ 2514 2515 /** 2516 * do_st_mmio_leN: 2517 * @cpu: generic cpu state 2518 * @full: page parameters 2519 * @val_le: data to store 2520 * @addr: virtual address 2521 * @size: number of bytes 2522 * @mmu_idx: virtual address context 2523 * @ra: return address into tcg generated code, or 0 2524 * Context: BQL held 2525 * 2526 * Store @size bytes at @addr, which is memory-mapped i/o. 2527 * The bytes to store are extracted in little-endian order from @val_le; 2528 * return the bytes of @val_le beyond @p->size that have not been stored. 2529 */ 2530 static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, 2531 uint64_t val_le, vaddr addr, int size, 2532 int mmu_idx, uintptr_t ra, 2533 MemoryRegion *mr, hwaddr mr_offset) 2534 { 2535 do { 2536 MemOp this_mop; 2537 unsigned this_size; 2538 MemTxResult r; 2539 2540 /* Store aligned pieces up to 8 bytes. */ 2541 this_mop = ctz32(size | (int)addr | 8); 2542 this_size = 1 << this_mop; 2543 this_mop |= MO_LE; 2544 2545 r = memory_region_dispatch_write(mr, mr_offset, val_le, 2546 this_mop, full->attrs); 2547 if (unlikely(r != MEMTX_OK)) { 2548 io_failed(cpu, full, addr, this_size, MMU_DATA_STORE, 2549 mmu_idx, r, ra); 2550 } 2551 if (this_size == 8) { 2552 return 0; 2553 } 2554 2555 val_le >>= this_size * 8; 2556 addr += this_size; 2557 mr_offset += this_size; 2558 size -= this_size; 2559 } while (size); 2560 2561 return val_le; 2562 } 2563 2564 static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, 2565 uint64_t val_le, vaddr addr, int size, 2566 int mmu_idx, uintptr_t ra) 2567 { 2568 MemoryRegionSection *section; 2569 hwaddr mr_offset; 2570 MemoryRegion *mr; 2571 MemTxAttrs attrs; 2572 uint64_t ret; 2573 2574 tcg_debug_assert(size > 0 && size <= 8); 2575 2576 attrs = full->attrs; 2577 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 2578 mr = section->mr; 2579 2580 bql_lock(); 2581 ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx, 2582 ra, mr, mr_offset); 2583 bql_unlock(); 2584 2585 return ret; 2586 } 2587 2588 static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, 2589 Int128 val_le, vaddr addr, int size, 2590 int mmu_idx, uintptr_t ra) 2591 { 2592 MemoryRegionSection *section; 2593 MemoryRegion *mr; 2594 hwaddr mr_offset; 2595 MemTxAttrs attrs; 2596 uint64_t ret; 2597 2598 tcg_debug_assert(size > 8 && size <= 16); 2599 2600 attrs = full->attrs; 2601 section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 2602 mr = section->mr; 2603 2604 bql_lock(); 2605 int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8, 2606 mmu_idx, ra, mr, mr_offset); 2607 ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8, 2608 size - 8, mmu_idx, ra, mr, mr_offset + 8); 2609 bql_unlock(); 2610 2611 return ret; 2612 } 2613 2614 /* 2615 * Wrapper for the above. 2616 */ 2617 static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p, 2618 uint64_t val_le, int mmu_idx, 2619 MemOp mop, uintptr_t ra) 2620 { 2621 MemOp atom; 2622 unsigned tmp, half_size; 2623 2624 if (unlikely(p->flags & TLB_MMIO)) { 2625 return do_st_mmio_leN(cpu, p->full, val_le, p->addr, 2626 p->size, mmu_idx, ra); 2627 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 2628 return val_le >> (p->size * 8); 2629 } 2630 2631 /* 2632 * It is a given that we cross a page and therefore there is no atomicity 2633 * for the store as a whole, but subobjects may need attention. 2634 */ 2635 atom = mop & MO_ATOM_MASK; 2636 switch (atom) { 2637 case MO_ATOM_SUBALIGN: 2638 return store_parts_leN(p->haddr, p->size, val_le); 2639 2640 case MO_ATOM_IFALIGN_PAIR: 2641 case MO_ATOM_WITHIN16_PAIR: 2642 tmp = mop & MO_SIZE; 2643 tmp = tmp ? tmp - 1 : 0; 2644 half_size = 1 << tmp; 2645 if (atom == MO_ATOM_IFALIGN_PAIR 2646 ? p->size == half_size 2647 : p->size >= half_size) { 2648 if (!HAVE_al8_fast && p->size <= 4) { 2649 return store_whole_le4(p->haddr, p->size, val_le); 2650 } else if (HAVE_al8) { 2651 return store_whole_le8(p->haddr, p->size, val_le); 2652 } else { 2653 cpu_loop_exit_atomic(cpu, ra); 2654 } 2655 } 2656 /* fall through */ 2657 2658 case MO_ATOM_IFALIGN: 2659 case MO_ATOM_WITHIN16: 2660 case MO_ATOM_NONE: 2661 return store_bytes_leN(p->haddr, p->size, val_le); 2662 2663 default: 2664 g_assert_not_reached(); 2665 } 2666 } 2667 2668 /* 2669 * Wrapper for the above, for 8 < size < 16. 2670 */ 2671 static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p, 2672 Int128 val_le, int mmu_idx, 2673 MemOp mop, uintptr_t ra) 2674 { 2675 int size = p->size; 2676 MemOp atom; 2677 2678 if (unlikely(p->flags & TLB_MMIO)) { 2679 return do_st16_mmio_leN(cpu, p->full, val_le, p->addr, 2680 size, mmu_idx, ra); 2681 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 2682 return int128_gethi(val_le) >> ((size - 8) * 8); 2683 } 2684 2685 /* 2686 * It is a given that we cross a page and therefore there is no atomicity 2687 * for the store as a whole, but subobjects may need attention. 2688 */ 2689 atom = mop & MO_ATOM_MASK; 2690 switch (atom) { 2691 case MO_ATOM_SUBALIGN: 2692 store_parts_leN(p->haddr, 8, int128_getlo(val_le)); 2693 return store_parts_leN(p->haddr + 8, p->size - 8, 2694 int128_gethi(val_le)); 2695 2696 case MO_ATOM_WITHIN16_PAIR: 2697 /* Since size > 8, this is the half that must be atomic. */ 2698 if (!HAVE_CMPXCHG128) { 2699 cpu_loop_exit_atomic(cpu, ra); 2700 } 2701 return store_whole_le16(p->haddr, p->size, val_le); 2702 2703 case MO_ATOM_IFALIGN_PAIR: 2704 /* 2705 * Since size > 8, both halves are misaligned, 2706 * and so neither is atomic. 2707 */ 2708 case MO_ATOM_IFALIGN: 2709 case MO_ATOM_WITHIN16: 2710 case MO_ATOM_NONE: 2711 stq_le_p(p->haddr, int128_getlo(val_le)); 2712 return store_bytes_leN(p->haddr + 8, p->size - 8, 2713 int128_gethi(val_le)); 2714 2715 default: 2716 g_assert_not_reached(); 2717 } 2718 } 2719 2720 static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val, 2721 int mmu_idx, uintptr_t ra) 2722 { 2723 if (unlikely(p->flags & TLB_MMIO)) { 2724 do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra); 2725 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 2726 /* nothing */ 2727 } else { 2728 *(uint8_t *)p->haddr = val; 2729 } 2730 } 2731 2732 static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val, 2733 int mmu_idx, MemOp memop, uintptr_t ra) 2734 { 2735 if (unlikely(p->flags & TLB_MMIO)) { 2736 if ((memop & MO_BSWAP) != MO_LE) { 2737 val = bswap16(val); 2738 } 2739 do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra); 2740 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 2741 /* nothing */ 2742 } else { 2743 /* Swap to host endian if necessary, then store. */ 2744 if (memop & MO_BSWAP) { 2745 val = bswap16(val); 2746 } 2747 store_atom_2(cpu, ra, p->haddr, memop, val); 2748 } 2749 } 2750 2751 static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val, 2752 int mmu_idx, MemOp memop, uintptr_t ra) 2753 { 2754 if (unlikely(p->flags & TLB_MMIO)) { 2755 if ((memop & MO_BSWAP) != MO_LE) { 2756 val = bswap32(val); 2757 } 2758 do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra); 2759 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 2760 /* nothing */ 2761 } else { 2762 /* Swap to host endian if necessary, then store. */ 2763 if (memop & MO_BSWAP) { 2764 val = bswap32(val); 2765 } 2766 store_atom_4(cpu, ra, p->haddr, memop, val); 2767 } 2768 } 2769 2770 static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val, 2771 int mmu_idx, MemOp memop, uintptr_t ra) 2772 { 2773 if (unlikely(p->flags & TLB_MMIO)) { 2774 if ((memop & MO_BSWAP) != MO_LE) { 2775 val = bswap64(val); 2776 } 2777 do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra); 2778 } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 2779 /* nothing */ 2780 } else { 2781 /* Swap to host endian if necessary, then store. */ 2782 if (memop & MO_BSWAP) { 2783 val = bswap64(val); 2784 } 2785 store_atom_8(cpu, ra, p->haddr, memop, val); 2786 } 2787 } 2788 2789 static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val, 2790 MemOpIdx oi, uintptr_t ra) 2791 { 2792 MMULookupLocals l; 2793 bool crosspage; 2794 2795 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2796 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 2797 tcg_debug_assert(!crosspage); 2798 2799 do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra); 2800 } 2801 2802 static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, 2803 MemOpIdx oi, uintptr_t ra) 2804 { 2805 MMULookupLocals l; 2806 bool crosspage; 2807 uint8_t a, b; 2808 2809 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2810 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 2811 if (likely(!crosspage)) { 2812 do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 2813 return; 2814 } 2815 2816 if ((l.memop & MO_BSWAP) == MO_LE) { 2817 a = val, b = val >> 8; 2818 } else { 2819 b = val, a = val >> 8; 2820 } 2821 do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra); 2822 do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra); 2823 } 2824 2825 static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, 2826 MemOpIdx oi, uintptr_t ra) 2827 { 2828 MMULookupLocals l; 2829 bool crosspage; 2830 2831 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2832 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 2833 if (likely(!crosspage)) { 2834 do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 2835 return; 2836 } 2837 2838 /* Swap to little endian for simplicity, then store by bytes. */ 2839 if ((l.memop & MO_BSWAP) != MO_LE) { 2840 val = bswap32(val); 2841 } 2842 val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 2843 (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); 2844 } 2845 2846 static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, 2847 MemOpIdx oi, uintptr_t ra) 2848 { 2849 MMULookupLocals l; 2850 bool crosspage; 2851 2852 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2853 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 2854 if (likely(!crosspage)) { 2855 do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 2856 return; 2857 } 2858 2859 /* Swap to little endian for simplicity, then store by bytes. */ 2860 if ((l.memop & MO_BSWAP) != MO_LE) { 2861 val = bswap64(val); 2862 } 2863 val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 2864 (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); 2865 } 2866 2867 static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, 2868 MemOpIdx oi, uintptr_t ra) 2869 { 2870 MMULookupLocals l; 2871 bool crosspage; 2872 uint64_t a, b; 2873 int first; 2874 2875 cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2876 crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 2877 if (likely(!crosspage)) { 2878 if (unlikely(l.page[0].flags & TLB_MMIO)) { 2879 if ((l.memop & MO_BSWAP) != MO_LE) { 2880 val = bswap128(val); 2881 } 2882 do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra); 2883 } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) { 2884 /* nothing */ 2885 } else { 2886 /* Swap to host endian if necessary, then store. */ 2887 if (l.memop & MO_BSWAP) { 2888 val = bswap128(val); 2889 } 2890 store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val); 2891 } 2892 return; 2893 } 2894 2895 first = l.page[0].size; 2896 if (first == 8) { 2897 MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64; 2898 2899 if (l.memop & MO_BSWAP) { 2900 val = bswap128(val); 2901 } 2902 if (HOST_BIG_ENDIAN) { 2903 b = int128_getlo(val), a = int128_gethi(val); 2904 } else { 2905 a = int128_getlo(val), b = int128_gethi(val); 2906 } 2907 do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra); 2908 do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra); 2909 return; 2910 } 2911 2912 if ((l.memop & MO_BSWAP) != MO_LE) { 2913 val = bswap128(val); 2914 } 2915 if (first < 8) { 2916 do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra); 2917 val = int128_urshift(val, first * 8); 2918 do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); 2919 } else { 2920 b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 2921 do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra); 2922 } 2923 } 2924 2925 #include "ldst_common.c.inc" 2926 2927 /* 2928 * First set of functions passes in OI and RETADDR. 2929 * This makes them callable from other helpers. 2930 */ 2931 2932 #define ATOMIC_NAME(X) \ 2933 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 2934 2935 #define ATOMIC_MMU_CLEANUP 2936 2937 #include "atomic_common.c.inc" 2938 2939 #define DATA_SIZE 1 2940 #include "atomic_template.h" 2941 2942 #define DATA_SIZE 2 2943 #include "atomic_template.h" 2944 2945 #define DATA_SIZE 4 2946 #include "atomic_template.h" 2947 2948 #ifdef CONFIG_ATOMIC64 2949 #define DATA_SIZE 8 2950 #include "atomic_template.h" 2951 #endif 2952 2953 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128 2954 #define DATA_SIZE 16 2955 #include "atomic_template.h" 2956 #endif 2957 2958 /* Code access functions. */ 2959 2960 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 2961 { 2962 MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 2963 return do_ld1_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); 2964 } 2965 2966 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 2967 { 2968 MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 2969 return do_ld2_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); 2970 } 2971 2972 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 2973 { 2974 MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 2975 return do_ld4_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); 2976 } 2977 2978 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 2979 { 2980 MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); 2981 return do_ld8_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); 2982 } 2983 2984 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, 2985 MemOpIdx oi, uintptr_t retaddr) 2986 { 2987 return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 2988 } 2989 2990 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, 2991 MemOpIdx oi, uintptr_t retaddr) 2992 { 2993 return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 2994 } 2995 2996 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, 2997 MemOpIdx oi, uintptr_t retaddr) 2998 { 2999 return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 3000 } 3001 3002 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, 3003 MemOpIdx oi, uintptr_t retaddr) 3004 { 3005 return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 3006 } 3007