1 /* 2 * RAM allocation and memory access 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/page-vary.h" 22 #include "qapi/error.h" 23 24 #include "qemu/cutils.h" 25 #include "qemu/cacheflush.h" 26 #include "qemu/hbitmap.h" 27 #include "qemu/madvise.h" 28 #include "qemu/lockable.h" 29 30 #ifdef CONFIG_TCG 31 #include "hw/core/tcg-cpu-ops.h" 32 #endif /* CONFIG_TCG */ 33 34 #include "exec/exec-all.h" 35 #include "exec/page-protection.h" 36 #include "exec/target_page.h" 37 #include "hw/qdev-core.h" 38 #include "hw/qdev-properties.h" 39 #include "hw/boards.h" 40 #include "sysemu/xen.h" 41 #include "sysemu/kvm.h" 42 #include "sysemu/tcg.h" 43 #include "sysemu/qtest.h" 44 #include "qemu/timer.h" 45 #include "qemu/config-file.h" 46 #include "qemu/error-report.h" 47 #include "qemu/qemu-print.h" 48 #include "qemu/log.h" 49 #include "qemu/memalign.h" 50 #include "exec/memory.h" 51 #include "exec/ioport.h" 52 #include "sysemu/dma.h" 53 #include "sysemu/hostmem.h" 54 #include "sysemu/hw_accel.h" 55 #include "sysemu/xen-mapcache.h" 56 #include "trace/trace-root.h" 57 58 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 59 #include <linux/falloc.h> 60 #endif 61 62 #include "qemu/rcu_queue.h" 63 #include "qemu/main-loop.h" 64 #include "exec/translate-all.h" 65 #include "sysemu/replay.h" 66 67 #include "exec/memory-internal.h" 68 #include "exec/ram_addr.h" 69 70 #include "qemu/pmem.h" 71 72 #include "migration/vmstate.h" 73 74 #include "qemu/range.h" 75 #ifndef _WIN32 76 #include "qemu/mmap-alloc.h" 77 #endif 78 79 #include "monitor/monitor.h" 80 81 #ifdef CONFIG_LIBDAXCTL 82 #include <daxctl/libdaxctl.h> 83 #endif 84 85 //#define DEBUG_SUBPAGE 86 87 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes 88 * are protected by the ramlist lock. 89 */ 90 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; 91 92 static MemoryRegion *system_memory; 93 static MemoryRegion *system_io; 94 95 AddressSpace address_space_io; 96 AddressSpace address_space_memory; 97 98 static MemoryRegion io_mem_unassigned; 99 100 typedef struct PhysPageEntry PhysPageEntry; 101 102 struct PhysPageEntry { 103 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ 104 uint32_t skip : 6; 105 /* index into phys_sections (!skip) or phys_map_nodes (skip) */ 106 uint32_t ptr : 26; 107 }; 108 109 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) 110 111 /* Size of the L2 (and L3, etc) page tables. */ 112 #define ADDR_SPACE_BITS 64 113 114 #define P_L2_BITS 9 115 #define P_L2_SIZE (1 << P_L2_BITS) 116 117 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) 118 119 typedef PhysPageEntry Node[P_L2_SIZE]; 120 121 typedef struct PhysPageMap { 122 struct rcu_head rcu; 123 124 unsigned sections_nb; 125 unsigned sections_nb_alloc; 126 unsigned nodes_nb; 127 unsigned nodes_nb_alloc; 128 Node *nodes; 129 MemoryRegionSection *sections; 130 } PhysPageMap; 131 132 struct AddressSpaceDispatch { 133 MemoryRegionSection *mru_section; 134 /* This is a multi-level map on the physical address space. 135 * The bottom level has pointers to MemoryRegionSections. 136 */ 137 PhysPageEntry phys_map; 138 PhysPageMap map; 139 }; 140 141 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 142 typedef struct subpage_t { 143 MemoryRegion iomem; 144 FlatView *fv; 145 hwaddr base; 146 uint16_t sub_section[]; 147 } subpage_t; 148 149 #define PHYS_SECTION_UNASSIGNED 0 150 151 static void io_mem_init(void); 152 static void memory_map_init(void); 153 static void tcg_log_global_after_sync(MemoryListener *listener); 154 static void tcg_commit(MemoryListener *listener); 155 156 /** 157 * CPUAddressSpace: all the information a CPU needs about an AddressSpace 158 * @cpu: the CPU whose AddressSpace this is 159 * @as: the AddressSpace itself 160 * @memory_dispatch: its dispatch pointer (cached, RCU protected) 161 * @tcg_as_listener: listener for tracking changes to the AddressSpace 162 */ 163 typedef struct CPUAddressSpace { 164 CPUState *cpu; 165 AddressSpace *as; 166 struct AddressSpaceDispatch *memory_dispatch; 167 MemoryListener tcg_as_listener; 168 } CPUAddressSpace; 169 170 struct DirtyBitmapSnapshot { 171 ram_addr_t start; 172 ram_addr_t end; 173 unsigned long dirty[]; 174 }; 175 176 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) 177 { 178 static unsigned alloc_hint = 16; 179 if (map->nodes_nb + nodes > map->nodes_nb_alloc) { 180 map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes); 181 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); 182 alloc_hint = map->nodes_nb_alloc; 183 } 184 } 185 186 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) 187 { 188 unsigned i; 189 uint32_t ret; 190 PhysPageEntry e; 191 PhysPageEntry *p; 192 193 ret = map->nodes_nb++; 194 p = map->nodes[ret]; 195 assert(ret != PHYS_MAP_NODE_NIL); 196 assert(ret != map->nodes_nb_alloc); 197 198 e.skip = leaf ? 0 : 1; 199 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; 200 for (i = 0; i < P_L2_SIZE; ++i) { 201 memcpy(&p[i], &e, sizeof(e)); 202 } 203 return ret; 204 } 205 206 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, 207 hwaddr *index, uint64_t *nb, uint16_t leaf, 208 int level) 209 { 210 PhysPageEntry *p; 211 hwaddr step = (hwaddr)1 << (level * P_L2_BITS); 212 213 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { 214 lp->ptr = phys_map_node_alloc(map, level == 0); 215 } 216 p = map->nodes[lp->ptr]; 217 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; 218 219 while (*nb && lp < &p[P_L2_SIZE]) { 220 if ((*index & (step - 1)) == 0 && *nb >= step) { 221 lp->skip = 0; 222 lp->ptr = leaf; 223 *index += step; 224 *nb -= step; 225 } else { 226 phys_page_set_level(map, lp, index, nb, leaf, level - 1); 227 } 228 ++lp; 229 } 230 } 231 232 static void phys_page_set(AddressSpaceDispatch *d, 233 hwaddr index, uint64_t nb, 234 uint16_t leaf) 235 { 236 /* Wildly overreserve - it doesn't matter much. */ 237 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); 238 239 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); 240 } 241 242 /* Compact a non leaf page entry. Simply detect that the entry has a single child, 243 * and update our entry so we can skip it and go directly to the destination. 244 */ 245 static void phys_page_compact(PhysPageEntry *lp, Node *nodes) 246 { 247 unsigned valid_ptr = P_L2_SIZE; 248 int valid = 0; 249 PhysPageEntry *p; 250 int i; 251 252 if (lp->ptr == PHYS_MAP_NODE_NIL) { 253 return; 254 } 255 256 p = nodes[lp->ptr]; 257 for (i = 0; i < P_L2_SIZE; i++) { 258 if (p[i].ptr == PHYS_MAP_NODE_NIL) { 259 continue; 260 } 261 262 valid_ptr = i; 263 valid++; 264 if (p[i].skip) { 265 phys_page_compact(&p[i], nodes); 266 } 267 } 268 269 /* We can only compress if there's only one child. */ 270 if (valid != 1) { 271 return; 272 } 273 274 assert(valid_ptr < P_L2_SIZE); 275 276 /* Don't compress if it won't fit in the # of bits we have. */ 277 if (P_L2_LEVELS >= (1 << 6) && 278 lp->skip + p[valid_ptr].skip >= (1 << 6)) { 279 return; 280 } 281 282 lp->ptr = p[valid_ptr].ptr; 283 if (!p[valid_ptr].skip) { 284 /* If our only child is a leaf, make this a leaf. */ 285 /* By design, we should have made this node a leaf to begin with so we 286 * should never reach here. 287 * But since it's so simple to handle this, let's do it just in case we 288 * change this rule. 289 */ 290 lp->skip = 0; 291 } else { 292 lp->skip += p[valid_ptr].skip; 293 } 294 } 295 296 void address_space_dispatch_compact(AddressSpaceDispatch *d) 297 { 298 if (d->phys_map.skip) { 299 phys_page_compact(&d->phys_map, d->map.nodes); 300 } 301 } 302 303 static inline bool section_covers_addr(const MemoryRegionSection *section, 304 hwaddr addr) 305 { 306 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means 307 * the section must cover the entire address space. 308 */ 309 return int128_gethi(section->size) || 310 range_covers_byte(section->offset_within_address_space, 311 int128_getlo(section->size), addr); 312 } 313 314 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr) 315 { 316 PhysPageEntry lp = d->phys_map, *p; 317 Node *nodes = d->map.nodes; 318 MemoryRegionSection *sections = d->map.sections; 319 hwaddr index = addr >> TARGET_PAGE_BITS; 320 int i; 321 322 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { 323 if (lp.ptr == PHYS_MAP_NODE_NIL) { 324 return §ions[PHYS_SECTION_UNASSIGNED]; 325 } 326 p = nodes[lp.ptr]; 327 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; 328 } 329 330 if (section_covers_addr(§ions[lp.ptr], addr)) { 331 return §ions[lp.ptr]; 332 } else { 333 return §ions[PHYS_SECTION_UNASSIGNED]; 334 } 335 } 336 337 /* Called from RCU critical section */ 338 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, 339 hwaddr addr, 340 bool resolve_subpage) 341 { 342 MemoryRegionSection *section = qatomic_read(&d->mru_section); 343 subpage_t *subpage; 344 345 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || 346 !section_covers_addr(section, addr)) { 347 section = phys_page_find(d, addr); 348 qatomic_set(&d->mru_section, section); 349 } 350 if (resolve_subpage && section->mr->subpage) { 351 subpage = container_of(section->mr, subpage_t, iomem); 352 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; 353 } 354 return section; 355 } 356 357 /* Called from RCU critical section */ 358 static MemoryRegionSection * 359 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, 360 hwaddr *plen, bool resolve_subpage) 361 { 362 MemoryRegionSection *section; 363 MemoryRegion *mr; 364 Int128 diff; 365 366 section = address_space_lookup_region(d, addr, resolve_subpage); 367 /* Compute offset within MemoryRegionSection */ 368 addr -= section->offset_within_address_space; 369 370 /* Compute offset within MemoryRegion */ 371 *xlat = addr + section->offset_within_region; 372 373 mr = section->mr; 374 375 /* MMIO registers can be expected to perform full-width accesses based only 376 * on their address, without considering adjacent registers that could 377 * decode to completely different MemoryRegions. When such registers 378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO 379 * regions overlap wildly. For this reason we cannot clamp the accesses 380 * here. 381 * 382 * If the length is small (as is the case for address_space_ldl/stl), 383 * everything works fine. If the incoming length is large, however, 384 * the caller really has to do the clamping through memory_access_size. 385 */ 386 if (memory_region_is_ram(mr)) { 387 diff = int128_sub(section->size, int128_make64(addr)); 388 *plen = int128_get64(int128_min(diff, int128_make64(*plen))); 389 } 390 return section; 391 } 392 393 /** 394 * address_space_translate_iommu - translate an address through an IOMMU 395 * memory region and then through the target address space. 396 * 397 * @iommu_mr: the IOMMU memory region that we start the translation from 398 * @addr: the address to be translated through the MMU 399 * @xlat: the translated address offset within the destination memory region. 400 * It cannot be %NULL. 401 * @plen_out: valid read/write length of the translated address. It 402 * cannot be %NULL. 403 * @page_mask_out: page mask for the translated address. This 404 * should only be meaningful for IOMMU translated 405 * addresses, since there may be huge pages that this bit 406 * would tell. It can be %NULL if we don't care about it. 407 * @is_write: whether the translation operation is for write 408 * @is_mmio: whether this can be MMIO, set true if it can 409 * @target_as: the address space targeted by the IOMMU 410 * @attrs: transaction attributes 411 * 412 * This function is called from RCU critical section. It is the common 413 * part of flatview_do_translate and address_space_translate_cached. 414 */ 415 static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr, 416 hwaddr *xlat, 417 hwaddr *plen_out, 418 hwaddr *page_mask_out, 419 bool is_write, 420 bool is_mmio, 421 AddressSpace **target_as, 422 MemTxAttrs attrs) 423 { 424 MemoryRegionSection *section; 425 hwaddr page_mask = (hwaddr)-1; 426 427 do { 428 hwaddr addr = *xlat; 429 IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 430 int iommu_idx = 0; 431 IOMMUTLBEntry iotlb; 432 433 if (imrc->attrs_to_index) { 434 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 435 } 436 437 iotlb = imrc->translate(iommu_mr, addr, is_write ? 438 IOMMU_WO : IOMMU_RO, iommu_idx); 439 440 if (!(iotlb.perm & (1 << is_write))) { 441 goto unassigned; 442 } 443 444 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 445 | (addr & iotlb.addr_mask)); 446 page_mask &= iotlb.addr_mask; 447 *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); 448 *target_as = iotlb.target_as; 449 450 section = address_space_translate_internal( 451 address_space_to_dispatch(iotlb.target_as), addr, xlat, 452 plen_out, is_mmio); 453 454 iommu_mr = memory_region_get_iommu(section->mr); 455 } while (unlikely(iommu_mr)); 456 457 if (page_mask_out) { 458 *page_mask_out = page_mask; 459 } 460 return *section; 461 462 unassigned: 463 return (MemoryRegionSection) { .mr = &io_mem_unassigned }; 464 } 465 466 /** 467 * flatview_do_translate - translate an address in FlatView 468 * 469 * @fv: the flat view that we want to translate on 470 * @addr: the address to be translated in above address space 471 * @xlat: the translated address offset within memory region. It 472 * cannot be @NULL. 473 * @plen_out: valid read/write length of the translated address. It 474 * can be @NULL when we don't care about it. 475 * @page_mask_out: page mask for the translated address. This 476 * should only be meaningful for IOMMU translated 477 * addresses, since there may be huge pages that this bit 478 * would tell. It can be @NULL if we don't care about it. 479 * @is_write: whether the translation operation is for write 480 * @is_mmio: whether this can be MMIO, set true if it can 481 * @target_as: the address space targeted by the IOMMU 482 * @attrs: memory transaction attributes 483 * 484 * This function is called from RCU critical section 485 */ 486 static MemoryRegionSection flatview_do_translate(FlatView *fv, 487 hwaddr addr, 488 hwaddr *xlat, 489 hwaddr *plen_out, 490 hwaddr *page_mask_out, 491 bool is_write, 492 bool is_mmio, 493 AddressSpace **target_as, 494 MemTxAttrs attrs) 495 { 496 MemoryRegionSection *section; 497 IOMMUMemoryRegion *iommu_mr; 498 hwaddr plen = (hwaddr)(-1); 499 500 if (!plen_out) { 501 plen_out = &plen; 502 } 503 504 section = address_space_translate_internal( 505 flatview_to_dispatch(fv), addr, xlat, 506 plen_out, is_mmio); 507 508 iommu_mr = memory_region_get_iommu(section->mr); 509 if (unlikely(iommu_mr)) { 510 return address_space_translate_iommu(iommu_mr, xlat, 511 plen_out, page_mask_out, 512 is_write, is_mmio, 513 target_as, attrs); 514 } 515 if (page_mask_out) { 516 /* Not behind an IOMMU, use default page size. */ 517 *page_mask_out = ~TARGET_PAGE_MASK; 518 } 519 520 return *section; 521 } 522 523 /* Called from RCU critical section */ 524 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 525 bool is_write, MemTxAttrs attrs) 526 { 527 MemoryRegionSection section; 528 hwaddr xlat, page_mask; 529 530 /* 531 * This can never be MMIO, and we don't really care about plen, 532 * but page mask. 533 */ 534 section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat, 535 NULL, &page_mask, is_write, false, &as, 536 attrs); 537 538 /* Illegal translation */ 539 if (section.mr == &io_mem_unassigned) { 540 goto iotlb_fail; 541 } 542 543 /* Convert memory region offset into address space offset */ 544 xlat += section.offset_within_address_space - 545 section.offset_within_region; 546 547 return (IOMMUTLBEntry) { 548 .target_as = as, 549 .iova = addr & ~page_mask, 550 .translated_addr = xlat & ~page_mask, 551 .addr_mask = page_mask, 552 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */ 553 .perm = IOMMU_RW, 554 }; 555 556 iotlb_fail: 557 return (IOMMUTLBEntry) {0}; 558 } 559 560 /* Called from RCU critical section */ 561 MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, 562 hwaddr *plen, bool is_write, 563 MemTxAttrs attrs) 564 { 565 MemoryRegion *mr; 566 MemoryRegionSection section; 567 AddressSpace *as = NULL; 568 569 /* This can be MMIO, so setup MMIO bit. */ 570 section = flatview_do_translate(fv, addr, xlat, plen, NULL, 571 is_write, true, &as, attrs); 572 mr = section.mr; 573 574 if (xen_enabled() && memory_access_is_direct(mr, is_write)) { 575 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; 576 *plen = MIN(page, *plen); 577 } 578 579 return mr; 580 } 581 582 typedef struct TCGIOMMUNotifier { 583 IOMMUNotifier n; 584 MemoryRegion *mr; 585 CPUState *cpu; 586 int iommu_idx; 587 bool active; 588 } TCGIOMMUNotifier; 589 590 static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 591 { 592 TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n); 593 594 if (!notifier->active) { 595 return; 596 } 597 tlb_flush(notifier->cpu); 598 notifier->active = false; 599 /* We leave the notifier struct on the list to avoid reallocating it later. 600 * Generally the number of IOMMUs a CPU deals with will be small. 601 * In any case we can't unregister the iommu notifier from a notify 602 * callback. 603 */ 604 } 605 606 static void tcg_register_iommu_notifier(CPUState *cpu, 607 IOMMUMemoryRegion *iommu_mr, 608 int iommu_idx) 609 { 610 /* Make sure this CPU has an IOMMU notifier registered for this 611 * IOMMU/IOMMU index combination, so that we can flush its TLB 612 * when the IOMMU tells us the mappings we've cached have changed. 613 */ 614 MemoryRegion *mr = MEMORY_REGION(iommu_mr); 615 TCGIOMMUNotifier *notifier = NULL; 616 int i; 617 618 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 619 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 620 if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) { 621 break; 622 } 623 } 624 if (i == cpu->iommu_notifiers->len) { 625 /* Not found, add a new entry at the end of the array */ 626 cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1); 627 notifier = g_new0(TCGIOMMUNotifier, 1); 628 g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier; 629 630 notifier->mr = mr; 631 notifier->iommu_idx = iommu_idx; 632 notifier->cpu = cpu; 633 /* Rather than trying to register interest in the specific part 634 * of the iommu's address space that we've accessed and then 635 * expand it later as subsequent accesses touch more of it, we 636 * just register interest in the whole thing, on the assumption 637 * that iommu reconfiguration will be rare. 638 */ 639 iommu_notifier_init(¬ifier->n, 640 tcg_iommu_unmap_notify, 641 IOMMU_NOTIFIER_UNMAP, 642 0, 643 HWADDR_MAX, 644 iommu_idx); 645 memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, 646 &error_fatal); 647 } 648 649 if (!notifier->active) { 650 notifier->active = true; 651 } 652 } 653 654 void tcg_iommu_free_notifier_list(CPUState *cpu) 655 { 656 /* Destroy the CPU's notifier list */ 657 int i; 658 TCGIOMMUNotifier *notifier; 659 660 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 661 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 662 memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n); 663 g_free(notifier); 664 } 665 g_array_free(cpu->iommu_notifiers, true); 666 } 667 668 void tcg_iommu_init_notifier_list(CPUState *cpu) 669 { 670 cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *)); 671 } 672 673 /* Called from RCU critical section */ 674 MemoryRegionSection * 675 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, 676 hwaddr *xlat, hwaddr *plen, 677 MemTxAttrs attrs, int *prot) 678 { 679 MemoryRegionSection *section; 680 IOMMUMemoryRegion *iommu_mr; 681 IOMMUMemoryRegionClass *imrc; 682 IOMMUTLBEntry iotlb; 683 int iommu_idx; 684 hwaddr addr = orig_addr; 685 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; 686 687 for (;;) { 688 section = address_space_translate_internal(d, addr, &addr, plen, false); 689 690 iommu_mr = memory_region_get_iommu(section->mr); 691 if (!iommu_mr) { 692 break; 693 } 694 695 imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 696 697 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 698 tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx); 699 /* We need all the permissions, so pass IOMMU_NONE so the IOMMU 700 * doesn't short-cut its translation table walk. 701 */ 702 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); 703 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 704 | (addr & iotlb.addr_mask)); 705 /* Update the caller's prot bits to remove permissions the IOMMU 706 * is giving us a failure response for. If we get down to no 707 * permissions left at all we can give up now. 708 */ 709 if (!(iotlb.perm & IOMMU_RO)) { 710 *prot &= ~(PAGE_READ | PAGE_EXEC); 711 } 712 if (!(iotlb.perm & IOMMU_WO)) { 713 *prot &= ~PAGE_WRITE; 714 } 715 716 if (!*prot) { 717 goto translate_fail; 718 } 719 720 d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as)); 721 } 722 723 assert(!memory_region_is_iommu(section->mr)); 724 *xlat = addr; 725 return section; 726 727 translate_fail: 728 /* 729 * We should be given a page-aligned address -- certainly 730 * tlb_set_page_with_attrs() does so. The page offset of xlat 731 * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0. 732 * The page portion of xlat will be logged by memory_region_access_valid() 733 * when this memory access is rejected, so use the original untranslated 734 * physical address. 735 */ 736 assert((orig_addr & ~TARGET_PAGE_MASK) == 0); 737 *xlat = orig_addr; 738 return &d->map.sections[PHYS_SECTION_UNASSIGNED]; 739 } 740 741 void cpu_address_space_init(CPUState *cpu, int asidx, 742 const char *prefix, MemoryRegion *mr) 743 { 744 CPUAddressSpace *newas; 745 AddressSpace *as = g_new0(AddressSpace, 1); 746 char *as_name; 747 748 assert(mr); 749 as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index); 750 address_space_init(as, mr, as_name); 751 g_free(as_name); 752 753 /* Target code should have set num_ases before calling us */ 754 assert(asidx < cpu->num_ases); 755 756 if (asidx == 0) { 757 /* address space 0 gets the convenience alias */ 758 cpu->as = as; 759 } 760 761 /* KVM cannot currently support multiple address spaces. */ 762 assert(asidx == 0 || !kvm_enabled()); 763 764 if (!cpu->cpu_ases) { 765 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); 766 } 767 768 newas = &cpu->cpu_ases[asidx]; 769 newas->cpu = cpu; 770 newas->as = as; 771 if (tcg_enabled()) { 772 newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; 773 newas->tcg_as_listener.commit = tcg_commit; 774 newas->tcg_as_listener.name = "tcg"; 775 memory_listener_register(&newas->tcg_as_listener, as); 776 } 777 } 778 779 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) 780 { 781 /* Return the AddressSpace corresponding to the specified index */ 782 return cpu->cpu_ases[asidx].as; 783 } 784 785 /* Called from RCU critical section */ 786 static RAMBlock *qemu_get_ram_block(ram_addr_t addr) 787 { 788 RAMBlock *block; 789 790 block = qatomic_rcu_read(&ram_list.mru_block); 791 if (block && addr - block->offset < block->max_length) { 792 return block; 793 } 794 RAMBLOCK_FOREACH(block) { 795 if (addr - block->offset < block->max_length) { 796 goto found; 797 } 798 } 799 800 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 801 abort(); 802 803 found: 804 /* It is safe to write mru_block outside the BQL. This 805 * is what happens: 806 * 807 * mru_block = xxx 808 * rcu_read_unlock() 809 * xxx removed from list 810 * rcu_read_lock() 811 * read mru_block 812 * mru_block = NULL; 813 * call_rcu(reclaim_ramblock, xxx); 814 * rcu_read_unlock() 815 * 816 * qatomic_rcu_set is not needed here. The block was already published 817 * when it was placed into the list. Here we're just making an extra 818 * copy of the pointer. 819 */ 820 ram_list.mru_block = block; 821 return block; 822 } 823 824 void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) 825 { 826 CPUState *cpu; 827 ram_addr_t start1; 828 RAMBlock *block; 829 ram_addr_t end; 830 831 assert(tcg_enabled()); 832 end = TARGET_PAGE_ALIGN(start + length); 833 start &= TARGET_PAGE_MASK; 834 835 RCU_READ_LOCK_GUARD(); 836 block = qemu_get_ram_block(start); 837 assert(block == qemu_get_ram_block(end - 1)); 838 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); 839 CPU_FOREACH(cpu) { 840 tlb_reset_dirty(cpu, start1, length); 841 } 842 } 843 844 /* Note: start and end must be within the same ram block. */ 845 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, 846 ram_addr_t length, 847 unsigned client) 848 { 849 DirtyMemoryBlocks *blocks; 850 unsigned long end, page, start_page; 851 bool dirty = false; 852 RAMBlock *ramblock; 853 uint64_t mr_offset, mr_size; 854 855 if (length == 0) { 856 return false; 857 } 858 859 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 860 start_page = start >> TARGET_PAGE_BITS; 861 page = start_page; 862 863 WITH_RCU_READ_LOCK_GUARD() { 864 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 865 ramblock = qemu_get_ram_block(start); 866 /* Range sanity check on the ramblock */ 867 assert(start >= ramblock->offset && 868 start + length <= ramblock->offset + ramblock->used_length); 869 870 while (page < end) { 871 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 872 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; 873 unsigned long num = MIN(end - page, 874 DIRTY_MEMORY_BLOCK_SIZE - offset); 875 876 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], 877 offset, num); 878 page += num; 879 } 880 881 mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; 882 mr_size = (end - start_page) << TARGET_PAGE_BITS; 883 memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); 884 } 885 886 if (dirty) { 887 cpu_physical_memory_dirty_bits_cleared(start, length); 888 } 889 890 return dirty; 891 } 892 893 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty 894 (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) 895 { 896 DirtyMemoryBlocks *blocks; 897 ram_addr_t start = memory_region_get_ram_addr(mr) + offset; 898 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL); 899 ram_addr_t first = QEMU_ALIGN_DOWN(start, align); 900 ram_addr_t last = QEMU_ALIGN_UP(start + length, align); 901 DirtyBitmapSnapshot *snap; 902 unsigned long page, end, dest; 903 904 snap = g_malloc0(sizeof(*snap) + 905 ((last - first) >> (TARGET_PAGE_BITS + 3))); 906 snap->start = first; 907 snap->end = last; 908 909 page = first >> TARGET_PAGE_BITS; 910 end = last >> TARGET_PAGE_BITS; 911 dest = 0; 912 913 WITH_RCU_READ_LOCK_GUARD() { 914 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 915 916 while (page < end) { 917 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 918 unsigned long ofs = page % DIRTY_MEMORY_BLOCK_SIZE; 919 unsigned long num = MIN(end - page, 920 DIRTY_MEMORY_BLOCK_SIZE - ofs); 921 922 assert(QEMU_IS_ALIGNED(ofs, (1 << BITS_PER_LEVEL))); 923 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL))); 924 ofs >>= BITS_PER_LEVEL; 925 926 bitmap_copy_and_clear_atomic(snap->dirty + dest, 927 blocks->blocks[idx] + ofs, 928 num); 929 page += num; 930 dest += num >> BITS_PER_LEVEL; 931 } 932 } 933 934 cpu_physical_memory_dirty_bits_cleared(start, length); 935 936 memory_region_clear_dirty_bitmap(mr, offset, length); 937 938 return snap; 939 } 940 941 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, 942 ram_addr_t start, 943 ram_addr_t length) 944 { 945 unsigned long page, end; 946 947 assert(start >= snap->start); 948 assert(start + length <= snap->end); 949 950 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; 951 page = (start - snap->start) >> TARGET_PAGE_BITS; 952 953 while (page < end) { 954 if (test_bit(page, snap->dirty)) { 955 return true; 956 } 957 page++; 958 } 959 return false; 960 } 961 962 /* Called from RCU critical section */ 963 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 964 MemoryRegionSection *section) 965 { 966 AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); 967 return section - d->map.sections; 968 } 969 970 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 971 uint16_t section); 972 static subpage_t *subpage_init(FlatView *fv, hwaddr base); 973 974 static uint16_t phys_section_add(PhysPageMap *map, 975 MemoryRegionSection *section) 976 { 977 /* The physical section number is ORed with a page-aligned 978 * pointer to produce the iotlb entries. Thus it should 979 * never overflow into the page-aligned value. 980 */ 981 assert(map->sections_nb < TARGET_PAGE_SIZE); 982 983 if (map->sections_nb == map->sections_nb_alloc) { 984 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); 985 map->sections = g_renew(MemoryRegionSection, map->sections, 986 map->sections_nb_alloc); 987 } 988 map->sections[map->sections_nb] = *section; 989 memory_region_ref(section->mr); 990 return map->sections_nb++; 991 } 992 993 static void phys_section_destroy(MemoryRegion *mr) 994 { 995 bool have_sub_page = mr->subpage; 996 997 memory_region_unref(mr); 998 999 if (have_sub_page) { 1000 subpage_t *subpage = container_of(mr, subpage_t, iomem); 1001 object_unref(OBJECT(&subpage->iomem)); 1002 g_free(subpage); 1003 } 1004 } 1005 1006 static void phys_sections_free(PhysPageMap *map) 1007 { 1008 while (map->sections_nb > 0) { 1009 MemoryRegionSection *section = &map->sections[--map->sections_nb]; 1010 phys_section_destroy(section->mr); 1011 } 1012 g_free(map->sections); 1013 g_free(map->nodes); 1014 } 1015 1016 static void register_subpage(FlatView *fv, MemoryRegionSection *section) 1017 { 1018 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1019 subpage_t *subpage; 1020 hwaddr base = section->offset_within_address_space 1021 & TARGET_PAGE_MASK; 1022 MemoryRegionSection *existing = phys_page_find(d, base); 1023 MemoryRegionSection subsection = { 1024 .offset_within_address_space = base, 1025 .size = int128_make64(TARGET_PAGE_SIZE), 1026 }; 1027 hwaddr start, end; 1028 1029 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); 1030 1031 if (!(existing->mr->subpage)) { 1032 subpage = subpage_init(fv, base); 1033 subsection.fv = fv; 1034 subsection.mr = &subpage->iomem; 1035 phys_page_set(d, base >> TARGET_PAGE_BITS, 1, 1036 phys_section_add(&d->map, &subsection)); 1037 } else { 1038 subpage = container_of(existing->mr, subpage_t, iomem); 1039 } 1040 start = section->offset_within_address_space & ~TARGET_PAGE_MASK; 1041 end = start + int128_get64(section->size) - 1; 1042 subpage_register(subpage, start, end, 1043 phys_section_add(&d->map, section)); 1044 } 1045 1046 1047 static void register_multipage(FlatView *fv, 1048 MemoryRegionSection *section) 1049 { 1050 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1051 hwaddr start_addr = section->offset_within_address_space; 1052 uint16_t section_index = phys_section_add(&d->map, section); 1053 uint64_t num_pages = int128_get64(int128_rshift(section->size, 1054 TARGET_PAGE_BITS)); 1055 1056 assert(num_pages); 1057 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); 1058 } 1059 1060 /* 1061 * The range in *section* may look like this: 1062 * 1063 * |s|PPPPPPP|s| 1064 * 1065 * where s stands for subpage and P for page. 1066 */ 1067 void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section) 1068 { 1069 MemoryRegionSection remain = *section; 1070 Int128 page_size = int128_make64(TARGET_PAGE_SIZE); 1071 1072 /* register first subpage */ 1073 if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { 1074 uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space) 1075 - remain.offset_within_address_space; 1076 1077 MemoryRegionSection now = remain; 1078 now.size = int128_min(int128_make64(left), now.size); 1079 register_subpage(fv, &now); 1080 if (int128_eq(remain.size, now.size)) { 1081 return; 1082 } 1083 remain.size = int128_sub(remain.size, now.size); 1084 remain.offset_within_address_space += int128_get64(now.size); 1085 remain.offset_within_region += int128_get64(now.size); 1086 } 1087 1088 /* register whole pages */ 1089 if (int128_ge(remain.size, page_size)) { 1090 MemoryRegionSection now = remain; 1091 now.size = int128_and(now.size, int128_neg(page_size)); 1092 register_multipage(fv, &now); 1093 if (int128_eq(remain.size, now.size)) { 1094 return; 1095 } 1096 remain.size = int128_sub(remain.size, now.size); 1097 remain.offset_within_address_space += int128_get64(now.size); 1098 remain.offset_within_region += int128_get64(now.size); 1099 } 1100 1101 /* register last subpage */ 1102 register_subpage(fv, &remain); 1103 } 1104 1105 void qemu_flush_coalesced_mmio_buffer(void) 1106 { 1107 if (kvm_enabled()) 1108 kvm_flush_coalesced_mmio_buffer(); 1109 } 1110 1111 void qemu_mutex_lock_ramlist(void) 1112 { 1113 qemu_mutex_lock(&ram_list.mutex); 1114 } 1115 1116 void qemu_mutex_unlock_ramlist(void) 1117 { 1118 qemu_mutex_unlock(&ram_list.mutex); 1119 } 1120 1121 GString *ram_block_format(void) 1122 { 1123 RAMBlock *block; 1124 char *psize; 1125 GString *buf = g_string_new(""); 1126 1127 RCU_READ_LOCK_GUARD(); 1128 g_string_append_printf(buf, "%24s %8s %18s %18s %18s %18s %3s\n", 1129 "Block Name", "PSize", "Offset", "Used", "Total", 1130 "HVA", "RO"); 1131 1132 RAMBLOCK_FOREACH(block) { 1133 psize = size_to_str(block->page_size); 1134 g_string_append_printf(buf, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 1135 " 0x%016" PRIx64 " 0x%016" PRIx64 " %3s\n", 1136 block->idstr, psize, 1137 (uint64_t)block->offset, 1138 (uint64_t)block->used_length, 1139 (uint64_t)block->max_length, 1140 (uint64_t)(uintptr_t)block->host, 1141 block->mr->readonly ? "ro" : "rw"); 1142 1143 g_free(psize); 1144 } 1145 1146 return buf; 1147 } 1148 1149 static int find_min_backend_pagesize(Object *obj, void *opaque) 1150 { 1151 long *hpsize_min = opaque; 1152 1153 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1154 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1155 long hpsize = host_memory_backend_pagesize(backend); 1156 1157 if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) { 1158 *hpsize_min = hpsize; 1159 } 1160 } 1161 1162 return 0; 1163 } 1164 1165 static int find_max_backend_pagesize(Object *obj, void *opaque) 1166 { 1167 long *hpsize_max = opaque; 1168 1169 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1170 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1171 long hpsize = host_memory_backend_pagesize(backend); 1172 1173 if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) { 1174 *hpsize_max = hpsize; 1175 } 1176 } 1177 1178 return 0; 1179 } 1180 1181 /* 1182 * TODO: We assume right now that all mapped host memory backends are 1183 * used as RAM, however some might be used for different purposes. 1184 */ 1185 long qemu_minrampagesize(void) 1186 { 1187 long hpsize = LONG_MAX; 1188 Object *memdev_root = object_resolve_path("/objects", NULL); 1189 1190 object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); 1191 return hpsize; 1192 } 1193 1194 long qemu_maxrampagesize(void) 1195 { 1196 long pagesize = 0; 1197 Object *memdev_root = object_resolve_path("/objects", NULL); 1198 1199 object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize); 1200 return pagesize; 1201 } 1202 1203 #ifdef CONFIG_POSIX 1204 static int64_t get_file_size(int fd) 1205 { 1206 int64_t size; 1207 #if defined(__linux__) 1208 struct stat st; 1209 1210 if (fstat(fd, &st) < 0) { 1211 return -errno; 1212 } 1213 1214 /* Special handling for devdax character devices */ 1215 if (S_ISCHR(st.st_mode)) { 1216 g_autofree char *subsystem_path = NULL; 1217 g_autofree char *subsystem = NULL; 1218 1219 subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem", 1220 major(st.st_rdev), minor(st.st_rdev)); 1221 subsystem = g_file_read_link(subsystem_path, NULL); 1222 1223 if (subsystem && g_str_has_suffix(subsystem, "/dax")) { 1224 g_autofree char *size_path = NULL; 1225 g_autofree char *size_str = NULL; 1226 1227 size_path = g_strdup_printf("/sys/dev/char/%d:%d/size", 1228 major(st.st_rdev), minor(st.st_rdev)); 1229 1230 if (g_file_get_contents(size_path, &size_str, NULL, NULL)) { 1231 return g_ascii_strtoll(size_str, NULL, 0); 1232 } 1233 } 1234 } 1235 #endif /* defined(__linux__) */ 1236 1237 /* st.st_size may be zero for special files yet lseek(2) works */ 1238 size = lseek(fd, 0, SEEK_END); 1239 if (size < 0) { 1240 return -errno; 1241 } 1242 return size; 1243 } 1244 1245 static int64_t get_file_align(int fd) 1246 { 1247 int64_t align = -1; 1248 #if defined(__linux__) && defined(CONFIG_LIBDAXCTL) 1249 struct stat st; 1250 1251 if (fstat(fd, &st) < 0) { 1252 return -errno; 1253 } 1254 1255 /* Special handling for devdax character devices */ 1256 if (S_ISCHR(st.st_mode)) { 1257 g_autofree char *path = NULL; 1258 g_autofree char *rpath = NULL; 1259 struct daxctl_ctx *ctx; 1260 struct daxctl_region *region; 1261 int rc = 0; 1262 1263 path = g_strdup_printf("/sys/dev/char/%d:%d", 1264 major(st.st_rdev), minor(st.st_rdev)); 1265 rpath = realpath(path, NULL); 1266 if (!rpath) { 1267 return -errno; 1268 } 1269 1270 rc = daxctl_new(&ctx); 1271 if (rc) { 1272 return -1; 1273 } 1274 1275 daxctl_region_foreach(ctx, region) { 1276 if (strstr(rpath, daxctl_region_get_path(region))) { 1277 align = daxctl_region_get_align(region); 1278 break; 1279 } 1280 } 1281 daxctl_unref(ctx); 1282 } 1283 #endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */ 1284 1285 return align; 1286 } 1287 1288 static int file_ram_open(const char *path, 1289 const char *region_name, 1290 bool readonly, 1291 bool *created) 1292 { 1293 char *filename; 1294 char *sanitized_name; 1295 char *c; 1296 int fd = -1; 1297 1298 *created = false; 1299 for (;;) { 1300 fd = open(path, readonly ? O_RDONLY : O_RDWR); 1301 if (fd >= 0) { 1302 /* 1303 * open(O_RDONLY) won't fail with EISDIR. Check manually if we 1304 * opened a directory and fail similarly to how we fail ENOENT 1305 * in readonly mode. Note that mkstemp() would imply O_RDWR. 1306 */ 1307 if (readonly) { 1308 struct stat file_stat; 1309 1310 if (fstat(fd, &file_stat)) { 1311 close(fd); 1312 if (errno == EINTR) { 1313 continue; 1314 } 1315 return -errno; 1316 } else if (S_ISDIR(file_stat.st_mode)) { 1317 close(fd); 1318 return -EISDIR; 1319 } 1320 } 1321 /* @path names an existing file, use it */ 1322 break; 1323 } 1324 if (errno == ENOENT) { 1325 if (readonly) { 1326 /* Refuse to create new, readonly files. */ 1327 return -ENOENT; 1328 } 1329 /* @path names a file that doesn't exist, create it */ 1330 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); 1331 if (fd >= 0) { 1332 *created = true; 1333 break; 1334 } 1335 } else if (errno == EISDIR) { 1336 /* @path names a directory, create a file there */ 1337 /* Make name safe to use with mkstemp by replacing '/' with '_'. */ 1338 sanitized_name = g_strdup(region_name); 1339 for (c = sanitized_name; *c != '\0'; c++) { 1340 if (*c == '/') { 1341 *c = '_'; 1342 } 1343 } 1344 1345 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, 1346 sanitized_name); 1347 g_free(sanitized_name); 1348 1349 fd = mkstemp(filename); 1350 if (fd >= 0) { 1351 unlink(filename); 1352 g_free(filename); 1353 break; 1354 } 1355 g_free(filename); 1356 } 1357 if (errno != EEXIST && errno != EINTR) { 1358 return -errno; 1359 } 1360 /* 1361 * Try again on EINTR and EEXIST. The latter happens when 1362 * something else creates the file between our two open(). 1363 */ 1364 } 1365 1366 return fd; 1367 } 1368 1369 static void *file_ram_alloc(RAMBlock *block, 1370 ram_addr_t memory, 1371 int fd, 1372 bool truncate, 1373 off_t offset, 1374 Error **errp) 1375 { 1376 uint32_t qemu_map_flags; 1377 void *area; 1378 1379 block->page_size = qemu_fd_getpagesize(fd); 1380 if (block->mr->align % block->page_size) { 1381 error_setg(errp, "alignment 0x%" PRIx64 1382 " must be multiples of page size 0x%zx", 1383 block->mr->align, block->page_size); 1384 return NULL; 1385 } else if (block->mr->align && !is_power_of_2(block->mr->align)) { 1386 error_setg(errp, "alignment 0x%" PRIx64 1387 " must be a power of two", block->mr->align); 1388 return NULL; 1389 } else if (offset % block->page_size) { 1390 error_setg(errp, "offset 0x%" PRIx64 1391 " must be multiples of page size 0x%zx", 1392 offset, block->page_size); 1393 return NULL; 1394 } 1395 block->mr->align = MAX(block->page_size, block->mr->align); 1396 #if defined(__s390x__) 1397 if (kvm_enabled()) { 1398 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); 1399 } 1400 #endif 1401 1402 if (memory < block->page_size) { 1403 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " 1404 "or larger than page size 0x%zx", 1405 memory, block->page_size); 1406 return NULL; 1407 } 1408 1409 memory = ROUND_UP(memory, block->page_size); 1410 1411 /* 1412 * ftruncate is not supported by hugetlbfs in older 1413 * hosts, so don't bother bailing out on errors. 1414 * If anything goes wrong with it under other filesystems, 1415 * mmap will fail. 1416 * 1417 * Do not truncate the non-empty backend file to avoid corrupting 1418 * the existing data in the file. Disabling shrinking is not 1419 * enough. For example, the current vNVDIMM implementation stores 1420 * the guest NVDIMM labels at the end of the backend file. If the 1421 * backend file is later extended, QEMU will not be able to find 1422 * those labels. Therefore, extending the non-empty backend file 1423 * is disabled as well. 1424 */ 1425 if (truncate && ftruncate(fd, offset + memory)) { 1426 perror("ftruncate"); 1427 } 1428 1429 qemu_map_flags = (block->flags & RAM_READONLY) ? QEMU_MAP_READONLY : 0; 1430 qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0; 1431 qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0; 1432 qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0; 1433 area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset); 1434 if (area == MAP_FAILED) { 1435 error_setg_errno(errp, errno, 1436 "unable to map backing store for guest RAM"); 1437 return NULL; 1438 } 1439 1440 block->fd = fd; 1441 block->fd_offset = offset; 1442 return area; 1443 } 1444 #endif 1445 1446 /* Allocate space within the ram_addr_t space that governs the 1447 * dirty bitmaps. 1448 * Called with the ramlist lock held. 1449 */ 1450 static ram_addr_t find_ram_offset(ram_addr_t size) 1451 { 1452 RAMBlock *block, *next_block; 1453 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; 1454 1455 assert(size != 0); /* it would hand out same offset multiple times */ 1456 1457 if (QLIST_EMPTY_RCU(&ram_list.blocks)) { 1458 return 0; 1459 } 1460 1461 RAMBLOCK_FOREACH(block) { 1462 ram_addr_t candidate, next = RAM_ADDR_MAX; 1463 1464 /* Align blocks to start on a 'long' in the bitmap 1465 * which makes the bitmap sync'ing take the fast path. 1466 */ 1467 candidate = block->offset + block->max_length; 1468 candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS); 1469 1470 /* Search for the closest following block 1471 * and find the gap. 1472 */ 1473 RAMBLOCK_FOREACH(next_block) { 1474 if (next_block->offset >= candidate) { 1475 next = MIN(next, next_block->offset); 1476 } 1477 } 1478 1479 /* If it fits remember our place and remember the size 1480 * of gap, but keep going so that we might find a smaller 1481 * gap to fill so avoiding fragmentation. 1482 */ 1483 if (next - candidate >= size && next - candidate < mingap) { 1484 offset = candidate; 1485 mingap = next - candidate; 1486 } 1487 1488 trace_find_ram_offset_loop(size, candidate, offset, next, mingap); 1489 } 1490 1491 if (offset == RAM_ADDR_MAX) { 1492 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", 1493 (uint64_t)size); 1494 abort(); 1495 } 1496 1497 trace_find_ram_offset(size, offset); 1498 1499 return offset; 1500 } 1501 1502 static unsigned long last_ram_page(void) 1503 { 1504 RAMBlock *block; 1505 ram_addr_t last = 0; 1506 1507 RCU_READ_LOCK_GUARD(); 1508 RAMBLOCK_FOREACH(block) { 1509 last = MAX(last, block->offset + block->max_length); 1510 } 1511 return last >> TARGET_PAGE_BITS; 1512 } 1513 1514 static void qemu_ram_setup_dump(void *addr, ram_addr_t size) 1515 { 1516 int ret; 1517 1518 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ 1519 if (!machine_dump_guest_core(current_machine)) { 1520 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); 1521 if (ret) { 1522 perror("qemu_madvise"); 1523 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " 1524 "but dump_guest_core=off specified\n"); 1525 } 1526 } 1527 } 1528 1529 const char *qemu_ram_get_idstr(RAMBlock *rb) 1530 { 1531 return rb->idstr; 1532 } 1533 1534 void *qemu_ram_get_host_addr(RAMBlock *rb) 1535 { 1536 return rb->host; 1537 } 1538 1539 ram_addr_t qemu_ram_get_offset(RAMBlock *rb) 1540 { 1541 return rb->offset; 1542 } 1543 1544 ram_addr_t qemu_ram_get_used_length(RAMBlock *rb) 1545 { 1546 return rb->used_length; 1547 } 1548 1549 ram_addr_t qemu_ram_get_max_length(RAMBlock *rb) 1550 { 1551 return rb->max_length; 1552 } 1553 1554 bool qemu_ram_is_shared(RAMBlock *rb) 1555 { 1556 return rb->flags & RAM_SHARED; 1557 } 1558 1559 bool qemu_ram_is_noreserve(RAMBlock *rb) 1560 { 1561 return rb->flags & RAM_NORESERVE; 1562 } 1563 1564 /* Note: Only set at the start of postcopy */ 1565 bool qemu_ram_is_uf_zeroable(RAMBlock *rb) 1566 { 1567 return rb->flags & RAM_UF_ZEROPAGE; 1568 } 1569 1570 void qemu_ram_set_uf_zeroable(RAMBlock *rb) 1571 { 1572 rb->flags |= RAM_UF_ZEROPAGE; 1573 } 1574 1575 bool qemu_ram_is_migratable(RAMBlock *rb) 1576 { 1577 return rb->flags & RAM_MIGRATABLE; 1578 } 1579 1580 void qemu_ram_set_migratable(RAMBlock *rb) 1581 { 1582 rb->flags |= RAM_MIGRATABLE; 1583 } 1584 1585 void qemu_ram_unset_migratable(RAMBlock *rb) 1586 { 1587 rb->flags &= ~RAM_MIGRATABLE; 1588 } 1589 1590 bool qemu_ram_is_named_file(RAMBlock *rb) 1591 { 1592 return rb->flags & RAM_NAMED_FILE; 1593 } 1594 1595 int qemu_ram_get_fd(RAMBlock *rb) 1596 { 1597 return rb->fd; 1598 } 1599 1600 /* Called with the BQL held. */ 1601 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) 1602 { 1603 RAMBlock *block; 1604 1605 assert(new_block); 1606 assert(!new_block->idstr[0]); 1607 1608 if (dev) { 1609 char *id = qdev_get_dev_path(dev); 1610 if (id) { 1611 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 1612 g_free(id); 1613 } 1614 } 1615 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 1616 1617 RCU_READ_LOCK_GUARD(); 1618 RAMBLOCK_FOREACH(block) { 1619 if (block != new_block && 1620 !strcmp(block->idstr, new_block->idstr)) { 1621 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 1622 new_block->idstr); 1623 abort(); 1624 } 1625 } 1626 } 1627 1628 /* Called with the BQL held. */ 1629 void qemu_ram_unset_idstr(RAMBlock *block) 1630 { 1631 /* FIXME: arch_init.c assumes that this is not called throughout 1632 * migration. Ignore the problem since hot-unplug during migration 1633 * does not work anyway. 1634 */ 1635 if (block) { 1636 memset(block->idstr, 0, sizeof(block->idstr)); 1637 } 1638 } 1639 1640 size_t qemu_ram_pagesize(RAMBlock *rb) 1641 { 1642 return rb->page_size; 1643 } 1644 1645 /* Returns the largest size of page in use */ 1646 size_t qemu_ram_pagesize_largest(void) 1647 { 1648 RAMBlock *block; 1649 size_t largest = 0; 1650 1651 RAMBLOCK_FOREACH(block) { 1652 largest = MAX(largest, qemu_ram_pagesize(block)); 1653 } 1654 1655 return largest; 1656 } 1657 1658 static int memory_try_enable_merging(void *addr, size_t len) 1659 { 1660 if (!machine_mem_merge(current_machine)) { 1661 /* disabled by the user */ 1662 return 0; 1663 } 1664 1665 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); 1666 } 1667 1668 /* 1669 * Resizing RAM while migrating can result in the migration being canceled. 1670 * Care has to be taken if the guest might have already detected the memory. 1671 * 1672 * As memory core doesn't know how is memory accessed, it is up to 1673 * resize callback to update device state and/or add assertions to detect 1674 * misuse, if necessary. 1675 */ 1676 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) 1677 { 1678 const ram_addr_t oldsize = block->used_length; 1679 const ram_addr_t unaligned_size = newsize; 1680 1681 assert(block); 1682 1683 newsize = TARGET_PAGE_ALIGN(newsize); 1684 newsize = REAL_HOST_PAGE_ALIGN(newsize); 1685 1686 if (block->used_length == newsize) { 1687 /* 1688 * We don't have to resize the ram block (which only knows aligned 1689 * sizes), however, we have to notify if the unaligned size changed. 1690 */ 1691 if (unaligned_size != memory_region_size(block->mr)) { 1692 memory_region_set_size(block->mr, unaligned_size); 1693 if (block->resized) { 1694 block->resized(block->idstr, unaligned_size, block->host); 1695 } 1696 } 1697 return 0; 1698 } 1699 1700 if (!(block->flags & RAM_RESIZEABLE)) { 1701 error_setg_errno(errp, EINVAL, 1702 "Size mismatch: %s: 0x" RAM_ADDR_FMT 1703 " != 0x" RAM_ADDR_FMT, block->idstr, 1704 newsize, block->used_length); 1705 return -EINVAL; 1706 } 1707 1708 if (block->max_length < newsize) { 1709 error_setg_errno(errp, EINVAL, 1710 "Size too large: %s: 0x" RAM_ADDR_FMT 1711 " > 0x" RAM_ADDR_FMT, block->idstr, 1712 newsize, block->max_length); 1713 return -EINVAL; 1714 } 1715 1716 /* Notify before modifying the ram block and touching the bitmaps. */ 1717 if (block->host) { 1718 ram_block_notify_resize(block->host, oldsize, newsize); 1719 } 1720 1721 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); 1722 block->used_length = newsize; 1723 cpu_physical_memory_set_dirty_range(block->offset, block->used_length, 1724 DIRTY_CLIENTS_ALL); 1725 memory_region_set_size(block->mr, unaligned_size); 1726 if (block->resized) { 1727 block->resized(block->idstr, unaligned_size, block->host); 1728 } 1729 return 0; 1730 } 1731 1732 /* 1733 * Trigger sync on the given ram block for range [start, start + length] 1734 * with the backing store if one is available. 1735 * Otherwise no-op. 1736 * @Note: this is supposed to be a synchronous op. 1737 */ 1738 void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) 1739 { 1740 /* The requested range should fit in within the block range */ 1741 g_assert((start + length) <= block->used_length); 1742 1743 #ifdef CONFIG_LIBPMEM 1744 /* The lack of support for pmem should not block the sync */ 1745 if (ramblock_is_pmem(block)) { 1746 void *addr = ramblock_ptr(block, start); 1747 pmem_persist(addr, length); 1748 return; 1749 } 1750 #endif 1751 if (block->fd >= 0) { 1752 /** 1753 * Case there is no support for PMEM or the memory has not been 1754 * specified as persistent (or is not one) - use the msync. 1755 * Less optimal but still achieves the same goal 1756 */ 1757 void *addr = ramblock_ptr(block, start); 1758 if (qemu_msync(addr, length, block->fd)) { 1759 warn_report("%s: failed to sync memory range: start: " 1760 RAM_ADDR_FMT " length: " RAM_ADDR_FMT, 1761 __func__, start, length); 1762 } 1763 } 1764 } 1765 1766 /* Called with ram_list.mutex held */ 1767 static void dirty_memory_extend(ram_addr_t old_ram_size, 1768 ram_addr_t new_ram_size) 1769 { 1770 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size, 1771 DIRTY_MEMORY_BLOCK_SIZE); 1772 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size, 1773 DIRTY_MEMORY_BLOCK_SIZE); 1774 int i; 1775 1776 /* Only need to extend if block count increased */ 1777 if (new_num_blocks <= old_num_blocks) { 1778 return; 1779 } 1780 1781 for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 1782 DirtyMemoryBlocks *old_blocks; 1783 DirtyMemoryBlocks *new_blocks; 1784 int j; 1785 1786 old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]); 1787 new_blocks = g_malloc(sizeof(*new_blocks) + 1788 sizeof(new_blocks->blocks[0]) * new_num_blocks); 1789 1790 if (old_num_blocks) { 1791 memcpy(new_blocks->blocks, old_blocks->blocks, 1792 old_num_blocks * sizeof(old_blocks->blocks[0])); 1793 } 1794 1795 for (j = old_num_blocks; j < new_num_blocks; j++) { 1796 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); 1797 } 1798 1799 qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); 1800 1801 if (old_blocks) { 1802 g_free_rcu(old_blocks, rcu); 1803 } 1804 } 1805 } 1806 1807 static void ram_block_add(RAMBlock *new_block, Error **errp) 1808 { 1809 const bool noreserve = qemu_ram_is_noreserve(new_block); 1810 const bool shared = qemu_ram_is_shared(new_block); 1811 RAMBlock *block; 1812 RAMBlock *last_block = NULL; 1813 bool free_on_error = false; 1814 ram_addr_t old_ram_size, new_ram_size; 1815 Error *err = NULL; 1816 1817 old_ram_size = last_ram_page(); 1818 1819 qemu_mutex_lock_ramlist(); 1820 new_block->offset = find_ram_offset(new_block->max_length); 1821 1822 if (!new_block->host) { 1823 if (xen_enabled()) { 1824 xen_ram_alloc(new_block->offset, new_block->max_length, 1825 new_block->mr, &err); 1826 if (err) { 1827 error_propagate(errp, err); 1828 qemu_mutex_unlock_ramlist(); 1829 return; 1830 } 1831 } else { 1832 new_block->host = qemu_anon_ram_alloc(new_block->max_length, 1833 &new_block->mr->align, 1834 shared, noreserve); 1835 if (!new_block->host) { 1836 error_setg_errno(errp, errno, 1837 "cannot set up guest memory '%s'", 1838 memory_region_name(new_block->mr)); 1839 qemu_mutex_unlock_ramlist(); 1840 return; 1841 } 1842 memory_try_enable_merging(new_block->host, new_block->max_length); 1843 free_on_error = true; 1844 } 1845 } 1846 1847 if (new_block->flags & RAM_GUEST_MEMFD) { 1848 assert(kvm_enabled()); 1849 assert(new_block->guest_memfd < 0); 1850 1851 if (ram_block_discard_require(true) < 0) { 1852 error_setg_errno(errp, errno, 1853 "cannot set up private guest memory: discard currently blocked"); 1854 error_append_hint(errp, "Are you using assigned devices?\n"); 1855 goto out_free; 1856 } 1857 1858 new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length, 1859 0, errp); 1860 if (new_block->guest_memfd < 0) { 1861 qemu_mutex_unlock_ramlist(); 1862 goto out_free; 1863 } 1864 } 1865 1866 new_ram_size = MAX(old_ram_size, 1867 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS); 1868 if (new_ram_size > old_ram_size) { 1869 dirty_memory_extend(old_ram_size, new_ram_size); 1870 } 1871 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, 1872 * QLIST (which has an RCU-friendly variant) does not have insertion at 1873 * tail, so save the last element in last_block. 1874 */ 1875 RAMBLOCK_FOREACH(block) { 1876 last_block = block; 1877 if (block->max_length < new_block->max_length) { 1878 break; 1879 } 1880 } 1881 if (block) { 1882 QLIST_INSERT_BEFORE_RCU(block, new_block, next); 1883 } else if (last_block) { 1884 QLIST_INSERT_AFTER_RCU(last_block, new_block, next); 1885 } else { /* list is empty */ 1886 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); 1887 } 1888 ram_list.mru_block = NULL; 1889 1890 /* Write list before version */ 1891 smp_wmb(); 1892 ram_list.version++; 1893 qemu_mutex_unlock_ramlist(); 1894 1895 cpu_physical_memory_set_dirty_range(new_block->offset, 1896 new_block->used_length, 1897 DIRTY_CLIENTS_ALL); 1898 1899 if (new_block->host) { 1900 qemu_ram_setup_dump(new_block->host, new_block->max_length); 1901 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); 1902 /* 1903 * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU 1904 * Configure it unless the machine is a qtest server, in which case 1905 * KVM is not used and it may be forked (eg for fuzzing purposes). 1906 */ 1907 if (!qtest_enabled()) { 1908 qemu_madvise(new_block->host, new_block->max_length, 1909 QEMU_MADV_DONTFORK); 1910 } 1911 ram_block_notify_add(new_block->host, new_block->used_length, 1912 new_block->max_length); 1913 } 1914 return; 1915 1916 out_free: 1917 if (free_on_error) { 1918 qemu_anon_ram_free(new_block->host, new_block->max_length); 1919 new_block->host = NULL; 1920 } 1921 } 1922 1923 #ifdef CONFIG_POSIX 1924 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, 1925 uint32_t ram_flags, int fd, off_t offset, 1926 Error **errp) 1927 { 1928 RAMBlock *new_block; 1929 Error *local_err = NULL; 1930 int64_t file_size, file_align; 1931 1932 /* Just support these ram flags by now. */ 1933 assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE | 1934 RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY | 1935 RAM_READONLY_FD | RAM_GUEST_MEMFD)) == 0); 1936 1937 if (xen_enabled()) { 1938 error_setg(errp, "-mem-path not supported with Xen"); 1939 return NULL; 1940 } 1941 1942 if (kvm_enabled() && !kvm_has_sync_mmu()) { 1943 error_setg(errp, 1944 "host lacks kvm mmu notifiers, -mem-path unsupported"); 1945 return NULL; 1946 } 1947 1948 size = TARGET_PAGE_ALIGN(size); 1949 size = REAL_HOST_PAGE_ALIGN(size); 1950 1951 file_size = get_file_size(fd); 1952 if (file_size > offset && file_size < (offset + size)) { 1953 error_setg(errp, "backing store size 0x%" PRIx64 1954 " does not match 'size' option 0x" RAM_ADDR_FMT, 1955 file_size, size); 1956 return NULL; 1957 } 1958 1959 file_align = get_file_align(fd); 1960 if (file_align > 0 && file_align > mr->align) { 1961 error_setg(errp, "backing store align 0x%" PRIx64 1962 " is larger than 'align' option 0x%" PRIx64, 1963 file_align, mr->align); 1964 return NULL; 1965 } 1966 1967 new_block = g_malloc0(sizeof(*new_block)); 1968 new_block->mr = mr; 1969 new_block->used_length = size; 1970 new_block->max_length = size; 1971 new_block->flags = ram_flags; 1972 new_block->guest_memfd = -1; 1973 new_block->host = file_ram_alloc(new_block, size, fd, !file_size, offset, 1974 errp); 1975 if (!new_block->host) { 1976 g_free(new_block); 1977 return NULL; 1978 } 1979 1980 ram_block_add(new_block, &local_err); 1981 if (local_err) { 1982 g_free(new_block); 1983 error_propagate(errp, local_err); 1984 return NULL; 1985 } 1986 return new_block; 1987 1988 } 1989 1990 1991 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 1992 uint32_t ram_flags, const char *mem_path, 1993 off_t offset, Error **errp) 1994 { 1995 int fd; 1996 bool created; 1997 RAMBlock *block; 1998 1999 fd = file_ram_open(mem_path, memory_region_name(mr), 2000 !!(ram_flags & RAM_READONLY_FD), &created); 2001 if (fd < 0) { 2002 error_setg_errno(errp, -fd, "can't open backing store %s for guest RAM", 2003 mem_path); 2004 if (!(ram_flags & RAM_READONLY_FD) && !(ram_flags & RAM_SHARED) && 2005 fd == -EACCES) { 2006 /* 2007 * If we can open the file R/O (note: will never create a new file) 2008 * and we are dealing with a private mapping, there are still ways 2009 * to consume such files and get RAM instead of ROM. 2010 */ 2011 fd = file_ram_open(mem_path, memory_region_name(mr), true, 2012 &created); 2013 if (fd < 0) { 2014 return NULL; 2015 } 2016 assert(!created); 2017 close(fd); 2018 error_append_hint(errp, "Consider opening the backing store" 2019 " read-only but still creating writable RAM using" 2020 " '-object memory-backend-file,readonly=on,rom=off...'" 2021 " (see \"VM templating\" documentation)\n"); 2022 } 2023 return NULL; 2024 } 2025 2026 block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset, errp); 2027 if (!block) { 2028 if (created) { 2029 unlink(mem_path); 2030 } 2031 close(fd); 2032 return NULL; 2033 } 2034 2035 return block; 2036 } 2037 #endif 2038 2039 static 2040 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, 2041 void (*resized)(const char*, 2042 uint64_t length, 2043 void *host), 2044 void *host, uint32_t ram_flags, 2045 MemoryRegion *mr, Error **errp) 2046 { 2047 RAMBlock *new_block; 2048 Error *local_err = NULL; 2049 int align; 2050 2051 assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC | 2052 RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0); 2053 assert(!host ^ (ram_flags & RAM_PREALLOC)); 2054 2055 align = qemu_real_host_page_size(); 2056 align = MAX(align, TARGET_PAGE_SIZE); 2057 size = ROUND_UP(size, align); 2058 max_size = ROUND_UP(max_size, align); 2059 2060 new_block = g_malloc0(sizeof(*new_block)); 2061 new_block->mr = mr; 2062 new_block->resized = resized; 2063 new_block->used_length = size; 2064 new_block->max_length = max_size; 2065 assert(max_size >= size); 2066 new_block->fd = -1; 2067 new_block->guest_memfd = -1; 2068 new_block->page_size = qemu_real_host_page_size(); 2069 new_block->host = host; 2070 new_block->flags = ram_flags; 2071 ram_block_add(new_block, &local_err); 2072 if (local_err) { 2073 g_free(new_block); 2074 error_propagate(errp, local_err); 2075 return NULL; 2076 } 2077 return new_block; 2078 } 2079 2080 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 2081 MemoryRegion *mr, Error **errp) 2082 { 2083 return qemu_ram_alloc_internal(size, size, NULL, host, RAM_PREALLOC, mr, 2084 errp); 2085 } 2086 2087 RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, 2088 MemoryRegion *mr, Error **errp) 2089 { 2090 assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0); 2091 return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp); 2092 } 2093 2094 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, 2095 void (*resized)(const char*, 2096 uint64_t length, 2097 void *host), 2098 MemoryRegion *mr, Error **errp) 2099 { 2100 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, 2101 RAM_RESIZEABLE, mr, errp); 2102 } 2103 2104 static void reclaim_ramblock(RAMBlock *block) 2105 { 2106 if (block->flags & RAM_PREALLOC) { 2107 ; 2108 } else if (xen_enabled()) { 2109 xen_invalidate_map_cache_entry(block->host); 2110 #ifndef _WIN32 2111 } else if (block->fd >= 0) { 2112 qemu_ram_munmap(block->fd, block->host, block->max_length); 2113 close(block->fd); 2114 #endif 2115 } else { 2116 qemu_anon_ram_free(block->host, block->max_length); 2117 } 2118 2119 if (block->guest_memfd >= 0) { 2120 close(block->guest_memfd); 2121 ram_block_discard_require(false); 2122 } 2123 2124 g_free(block); 2125 } 2126 2127 void qemu_ram_free(RAMBlock *block) 2128 { 2129 if (!block) { 2130 return; 2131 } 2132 2133 if (block->host) { 2134 ram_block_notify_remove(block->host, block->used_length, 2135 block->max_length); 2136 } 2137 2138 qemu_mutex_lock_ramlist(); 2139 QLIST_REMOVE_RCU(block, next); 2140 ram_list.mru_block = NULL; 2141 /* Write list before version */ 2142 smp_wmb(); 2143 ram_list.version++; 2144 call_rcu(block, reclaim_ramblock, rcu); 2145 qemu_mutex_unlock_ramlist(); 2146 } 2147 2148 #ifndef _WIN32 2149 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) 2150 { 2151 RAMBlock *block; 2152 ram_addr_t offset; 2153 int flags; 2154 void *area, *vaddr; 2155 int prot; 2156 2157 RAMBLOCK_FOREACH(block) { 2158 offset = addr - block->offset; 2159 if (offset < block->max_length) { 2160 vaddr = ramblock_ptr(block, offset); 2161 if (block->flags & RAM_PREALLOC) { 2162 ; 2163 } else if (xen_enabled()) { 2164 abort(); 2165 } else { 2166 flags = MAP_FIXED; 2167 flags |= block->flags & RAM_SHARED ? 2168 MAP_SHARED : MAP_PRIVATE; 2169 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; 2170 prot = PROT_READ; 2171 prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; 2172 if (block->fd >= 0) { 2173 area = mmap(vaddr, length, prot, flags, block->fd, 2174 offset + block->fd_offset); 2175 } else { 2176 flags |= MAP_ANONYMOUS; 2177 area = mmap(vaddr, length, prot, flags, -1, 0); 2178 } 2179 if (area != vaddr) { 2180 error_report("Could not remap addr: " 2181 RAM_ADDR_FMT "@" RAM_ADDR_FMT "", 2182 length, addr); 2183 exit(1); 2184 } 2185 memory_try_enable_merging(vaddr, length); 2186 qemu_ram_setup_dump(vaddr, length); 2187 } 2188 } 2189 } 2190 } 2191 #endif /* !_WIN32 */ 2192 2193 /* 2194 * Return a host pointer to guest's ram. 2195 * For Xen, foreign mappings get created if they don't already exist. 2196 * 2197 * @block: block for the RAM to lookup (optional and may be NULL). 2198 * @addr: address within the memory region. 2199 * @size: pointer to requested size (optional and may be NULL). 2200 * size may get modified and return a value smaller than 2201 * what was requested. 2202 * @lock: wether to lock the mapping in xen-mapcache until invalidated. 2203 * @is_write: hint wether to map RW or RO in the xen-mapcache. 2204 * (optional and may always be set to true). 2205 * 2206 * Called within RCU critical section. 2207 */ 2208 static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr, 2209 hwaddr *size, bool lock, 2210 bool is_write) 2211 { 2212 hwaddr len = 0; 2213 2214 if (size && *size == 0) { 2215 return NULL; 2216 } 2217 2218 if (block == NULL) { 2219 block = qemu_get_ram_block(addr); 2220 addr -= block->offset; 2221 } 2222 if (size) { 2223 *size = MIN(*size, block->max_length - addr); 2224 len = *size; 2225 } 2226 2227 if (xen_enabled() && block->host == NULL) { 2228 /* We need to check if the requested address is in the RAM 2229 * because we don't want to map the entire memory in QEMU. 2230 * In that case just map the requested area. 2231 */ 2232 if (block->offset == 0) { 2233 return xen_map_cache(block->mr, addr, len, lock, lock, 2234 is_write); 2235 } 2236 2237 block->host = xen_map_cache(block->mr, block->offset, 2238 block->max_length, 1, 2239 lock, is_write); 2240 } 2241 2242 return ramblock_ptr(block, addr); 2243 } 2244 2245 /* 2246 * Return a host pointer to ram allocated with qemu_ram_alloc. 2247 * This should not be used for general purpose DMA. Use address_space_map 2248 * or address_space_rw instead. For local memory (e.g. video ram) that the 2249 * device owns, use memory_region_get_ram_ptr. 2250 * 2251 * Called within RCU critical section. 2252 */ 2253 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) 2254 { 2255 return qemu_ram_ptr_length(ram_block, addr, NULL, false, true); 2256 } 2257 2258 /* Return the offset of a hostpointer within a ramblock */ 2259 ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) 2260 { 2261 ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; 2262 assert((uintptr_t)host >= (uintptr_t)rb->host); 2263 assert(res < rb->max_length); 2264 2265 return res; 2266 } 2267 2268 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, 2269 ram_addr_t *offset) 2270 { 2271 RAMBlock *block; 2272 uint8_t *host = ptr; 2273 2274 if (xen_enabled()) { 2275 ram_addr_t ram_addr; 2276 RCU_READ_LOCK_GUARD(); 2277 ram_addr = xen_ram_addr_from_mapcache(ptr); 2278 block = qemu_get_ram_block(ram_addr); 2279 if (block) { 2280 *offset = ram_addr - block->offset; 2281 } 2282 return block; 2283 } 2284 2285 RCU_READ_LOCK_GUARD(); 2286 block = qatomic_rcu_read(&ram_list.mru_block); 2287 if (block && block->host && host - block->host < block->max_length) { 2288 goto found; 2289 } 2290 2291 RAMBLOCK_FOREACH(block) { 2292 /* This case append when the block is not mapped. */ 2293 if (block->host == NULL) { 2294 continue; 2295 } 2296 if (host - block->host < block->max_length) { 2297 goto found; 2298 } 2299 } 2300 2301 return NULL; 2302 2303 found: 2304 *offset = (host - block->host); 2305 if (round_offset) { 2306 *offset &= TARGET_PAGE_MASK; 2307 } 2308 return block; 2309 } 2310 2311 /* 2312 * Finds the named RAMBlock 2313 * 2314 * name: The name of RAMBlock to find 2315 * 2316 * Returns: RAMBlock (or NULL if not found) 2317 */ 2318 RAMBlock *qemu_ram_block_by_name(const char *name) 2319 { 2320 RAMBlock *block; 2321 2322 RAMBLOCK_FOREACH(block) { 2323 if (!strcmp(name, block->idstr)) { 2324 return block; 2325 } 2326 } 2327 2328 return NULL; 2329 } 2330 2331 /* 2332 * Some of the system routines need to translate from a host pointer 2333 * (typically a TLB entry) back to a ram offset. 2334 */ 2335 ram_addr_t qemu_ram_addr_from_host(void *ptr) 2336 { 2337 RAMBlock *block; 2338 ram_addr_t offset; 2339 2340 block = qemu_ram_block_from_host(ptr, false, &offset); 2341 if (!block) { 2342 return RAM_ADDR_INVALID; 2343 } 2344 2345 return block->offset + offset; 2346 } 2347 2348 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 2349 { 2350 ram_addr_t ram_addr; 2351 2352 ram_addr = qemu_ram_addr_from_host(ptr); 2353 if (ram_addr == RAM_ADDR_INVALID) { 2354 error_report("Bad ram pointer %p", ptr); 2355 abort(); 2356 } 2357 return ram_addr; 2358 } 2359 2360 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 2361 MemTxAttrs attrs, void *buf, hwaddr len); 2362 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 2363 const void *buf, hwaddr len); 2364 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 2365 bool is_write, MemTxAttrs attrs); 2366 2367 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, 2368 unsigned len, MemTxAttrs attrs) 2369 { 2370 subpage_t *subpage = opaque; 2371 uint8_t buf[8]; 2372 MemTxResult res; 2373 2374 #if defined(DEBUG_SUBPAGE) 2375 printf("%s: subpage %p len %u addr " HWADDR_FMT_plx "\n", __func__, 2376 subpage, len, addr); 2377 #endif 2378 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len); 2379 if (res) { 2380 return res; 2381 } 2382 *data = ldn_p(buf, len); 2383 return MEMTX_OK; 2384 } 2385 2386 static MemTxResult subpage_write(void *opaque, hwaddr addr, 2387 uint64_t value, unsigned len, MemTxAttrs attrs) 2388 { 2389 subpage_t *subpage = opaque; 2390 uint8_t buf[8]; 2391 2392 #if defined(DEBUG_SUBPAGE) 2393 printf("%s: subpage %p len %u addr " HWADDR_FMT_plx 2394 " value %"PRIx64"\n", 2395 __func__, subpage, len, addr, value); 2396 #endif 2397 stn_p(buf, len, value); 2398 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len); 2399 } 2400 2401 static bool subpage_accepts(void *opaque, hwaddr addr, 2402 unsigned len, bool is_write, 2403 MemTxAttrs attrs) 2404 { 2405 subpage_t *subpage = opaque; 2406 #if defined(DEBUG_SUBPAGE) 2407 printf("%s: subpage %p %c len %u addr " HWADDR_FMT_plx "\n", 2408 __func__, subpage, is_write ? 'w' : 'r', len, addr); 2409 #endif 2410 2411 return flatview_access_valid(subpage->fv, addr + subpage->base, 2412 len, is_write, attrs); 2413 } 2414 2415 static const MemoryRegionOps subpage_ops = { 2416 .read_with_attrs = subpage_read, 2417 .write_with_attrs = subpage_write, 2418 .impl.min_access_size = 1, 2419 .impl.max_access_size = 8, 2420 .valid.min_access_size = 1, 2421 .valid.max_access_size = 8, 2422 .valid.accepts = subpage_accepts, 2423 .endianness = DEVICE_NATIVE_ENDIAN, 2424 }; 2425 2426 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 2427 uint16_t section) 2428 { 2429 int idx, eidx; 2430 2431 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) 2432 return -1; 2433 idx = SUBPAGE_IDX(start); 2434 eidx = SUBPAGE_IDX(end); 2435 #if defined(DEBUG_SUBPAGE) 2436 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", 2437 __func__, mmio, start, end, idx, eidx, section); 2438 #endif 2439 for (; idx <= eidx; idx++) { 2440 mmio->sub_section[idx] = section; 2441 } 2442 2443 return 0; 2444 } 2445 2446 static subpage_t *subpage_init(FlatView *fv, hwaddr base) 2447 { 2448 subpage_t *mmio; 2449 2450 /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ 2451 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); 2452 mmio->fv = fv; 2453 mmio->base = base; 2454 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, 2455 NULL, TARGET_PAGE_SIZE); 2456 mmio->iomem.subpage = true; 2457 #if defined(DEBUG_SUBPAGE) 2458 printf("%s: %p base " HWADDR_FMT_plx " len %08x\n", __func__, 2459 mmio, base, TARGET_PAGE_SIZE); 2460 #endif 2461 2462 return mmio; 2463 } 2464 2465 static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr) 2466 { 2467 assert(fv); 2468 MemoryRegionSection section = { 2469 .fv = fv, 2470 .mr = mr, 2471 .offset_within_address_space = 0, 2472 .offset_within_region = 0, 2473 .size = int128_2_64(), 2474 }; 2475 2476 return phys_section_add(map, §ion); 2477 } 2478 2479 MemoryRegionSection *iotlb_to_section(CPUState *cpu, 2480 hwaddr index, MemTxAttrs attrs) 2481 { 2482 int asidx = cpu_asidx_from_attrs(cpu, attrs); 2483 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; 2484 AddressSpaceDispatch *d = cpuas->memory_dispatch; 2485 int section_index = index & ~TARGET_PAGE_MASK; 2486 MemoryRegionSection *ret; 2487 2488 assert(section_index < d->map.sections_nb); 2489 ret = d->map.sections + section_index; 2490 assert(ret->mr); 2491 assert(ret->mr->ops); 2492 2493 return ret; 2494 } 2495 2496 static void io_mem_init(void) 2497 { 2498 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, 2499 NULL, UINT64_MAX); 2500 } 2501 2502 AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) 2503 { 2504 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); 2505 uint16_t n; 2506 2507 n = dummy_section(&d->map, fv, &io_mem_unassigned); 2508 assert(n == PHYS_SECTION_UNASSIGNED); 2509 2510 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; 2511 2512 return d; 2513 } 2514 2515 void address_space_dispatch_free(AddressSpaceDispatch *d) 2516 { 2517 phys_sections_free(&d->map); 2518 g_free(d); 2519 } 2520 2521 static void do_nothing(CPUState *cpu, run_on_cpu_data d) 2522 { 2523 } 2524 2525 static void tcg_log_global_after_sync(MemoryListener *listener) 2526 { 2527 CPUAddressSpace *cpuas; 2528 2529 /* Wait for the CPU to end the current TB. This avoids the following 2530 * incorrect race: 2531 * 2532 * vCPU migration 2533 * ---------------------- ------------------------- 2534 * TLB check -> slow path 2535 * notdirty_mem_write 2536 * write to RAM 2537 * mark dirty 2538 * clear dirty flag 2539 * TLB check -> fast path 2540 * read memory 2541 * write to RAM 2542 * 2543 * by pushing the migration thread's memory read after the vCPU thread has 2544 * written the memory. 2545 */ 2546 if (replay_mode == REPLAY_MODE_NONE) { 2547 /* 2548 * VGA can make calls to this function while updating the screen. 2549 * In record/replay mode this causes a deadlock, because 2550 * run_on_cpu waits for rr mutex. Therefore no races are possible 2551 * in this case and no need for making run_on_cpu when 2552 * record/replay is enabled. 2553 */ 2554 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2555 run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); 2556 } 2557 } 2558 2559 static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data) 2560 { 2561 CPUAddressSpace *cpuas = data.host_ptr; 2562 2563 cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as); 2564 tlb_flush(cpu); 2565 } 2566 2567 static void tcg_commit(MemoryListener *listener) 2568 { 2569 CPUAddressSpace *cpuas; 2570 CPUState *cpu; 2571 2572 assert(tcg_enabled()); 2573 /* since each CPU stores ram addresses in its TLB cache, we must 2574 reset the modified entries */ 2575 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2576 cpu = cpuas->cpu; 2577 2578 /* 2579 * Defer changes to as->memory_dispatch until the cpu is quiescent. 2580 * Otherwise we race between (1) other cpu threads and (2) ongoing 2581 * i/o for the current cpu thread, with data cached by mmu_lookup(). 2582 * 2583 * In addition, queueing the work function will kick the cpu back to 2584 * the main loop, which will end the RCU critical section and reclaim 2585 * the memory data structures. 2586 * 2587 * That said, the listener is also called during realize, before 2588 * all of the tcg machinery for run-on is initialized: thus halt_cond. 2589 */ 2590 if (cpu->halt_cond) { 2591 async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas)); 2592 } else { 2593 tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas)); 2594 } 2595 } 2596 2597 static void memory_map_init(void) 2598 { 2599 system_memory = g_malloc(sizeof(*system_memory)); 2600 2601 memory_region_init(system_memory, NULL, "system", UINT64_MAX); 2602 address_space_init(&address_space_memory, system_memory, "memory"); 2603 2604 system_io = g_malloc(sizeof(*system_io)); 2605 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", 2606 65536); 2607 address_space_init(&address_space_io, system_io, "I/O"); 2608 } 2609 2610 MemoryRegion *get_system_memory(void) 2611 { 2612 return system_memory; 2613 } 2614 2615 MemoryRegion *get_system_io(void) 2616 { 2617 return system_io; 2618 } 2619 2620 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, 2621 hwaddr length) 2622 { 2623 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); 2624 addr += memory_region_get_ram_addr(mr); 2625 2626 /* No early return if dirty_log_mask is or becomes 0, because 2627 * cpu_physical_memory_set_dirty_range will still call 2628 * xen_modified_memory. 2629 */ 2630 if (dirty_log_mask) { 2631 dirty_log_mask = 2632 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); 2633 } 2634 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { 2635 assert(tcg_enabled()); 2636 tb_invalidate_phys_range(addr, addr + length - 1); 2637 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); 2638 } 2639 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); 2640 } 2641 2642 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size) 2643 { 2644 /* 2645 * In principle this function would work on other memory region types too, 2646 * but the ROM device use case is the only one where this operation is 2647 * necessary. Other memory regions should use the 2648 * address_space_read/write() APIs. 2649 */ 2650 assert(memory_region_is_romd(mr)); 2651 2652 invalidate_and_set_dirty(mr, addr, size); 2653 } 2654 2655 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) 2656 { 2657 unsigned access_size_max = mr->ops->valid.max_access_size; 2658 2659 /* Regions are assumed to support 1-4 byte accesses unless 2660 otherwise specified. */ 2661 if (access_size_max == 0) { 2662 access_size_max = 4; 2663 } 2664 2665 /* Bound the maximum access by the alignment of the address. */ 2666 if (!mr->ops->impl.unaligned) { 2667 unsigned align_size_max = addr & -addr; 2668 if (align_size_max != 0 && align_size_max < access_size_max) { 2669 access_size_max = align_size_max; 2670 } 2671 } 2672 2673 /* Don't attempt accesses larger than the maximum. */ 2674 if (l > access_size_max) { 2675 l = access_size_max; 2676 } 2677 l = pow2floor(l); 2678 2679 return l; 2680 } 2681 2682 bool prepare_mmio_access(MemoryRegion *mr) 2683 { 2684 bool release_lock = false; 2685 2686 if (!bql_locked()) { 2687 bql_lock(); 2688 release_lock = true; 2689 } 2690 if (mr->flush_coalesced_mmio) { 2691 qemu_flush_coalesced_mmio_buffer(); 2692 } 2693 2694 return release_lock; 2695 } 2696 2697 /** 2698 * flatview_access_allowed 2699 * @mr: #MemoryRegion to be accessed 2700 * @attrs: memory transaction attributes 2701 * @addr: address within that memory region 2702 * @len: the number of bytes to access 2703 * 2704 * Check if a memory transaction is allowed. 2705 * 2706 * Returns: true if transaction is allowed, false if denied. 2707 */ 2708 static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs, 2709 hwaddr addr, hwaddr len) 2710 { 2711 if (likely(!attrs.memory)) { 2712 return true; 2713 } 2714 if (memory_region_is_ram(mr)) { 2715 return true; 2716 } 2717 qemu_log_mask(LOG_GUEST_ERROR, 2718 "Invalid access to non-RAM device at " 2719 "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", " 2720 "region '%s'\n", addr, len, memory_region_name(mr)); 2721 return false; 2722 } 2723 2724 static MemTxResult flatview_write_continue_step(MemTxAttrs attrs, 2725 const uint8_t *buf, 2726 hwaddr len, hwaddr mr_addr, 2727 hwaddr *l, MemoryRegion *mr) 2728 { 2729 if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { 2730 return MEMTX_ACCESS_ERROR; 2731 } 2732 2733 if (!memory_access_is_direct(mr, true)) { 2734 uint64_t val; 2735 MemTxResult result; 2736 bool release_lock = prepare_mmio_access(mr); 2737 2738 *l = memory_access_size(mr, *l, mr_addr); 2739 /* 2740 * XXX: could force current_cpu to NULL to avoid 2741 * potential bugs 2742 */ 2743 2744 /* 2745 * Assure Coverity (and ourselves) that we are not going to OVERRUN 2746 * the buffer by following ldn_he_p(). 2747 */ 2748 #ifdef QEMU_STATIC_ANALYSIS 2749 assert((*l == 1 && len >= 1) || 2750 (*l == 2 && len >= 2) || 2751 (*l == 4 && len >= 4) || 2752 (*l == 8 && len >= 8)); 2753 #endif 2754 val = ldn_he_p(buf, *l); 2755 result = memory_region_dispatch_write(mr, mr_addr, val, 2756 size_memop(*l), attrs); 2757 if (release_lock) { 2758 bql_unlock(); 2759 } 2760 2761 return result; 2762 } else { 2763 /* RAM case */ 2764 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, 2765 false, true); 2766 2767 memmove(ram_ptr, buf, *l); 2768 invalidate_and_set_dirty(mr, mr_addr, *l); 2769 2770 return MEMTX_OK; 2771 } 2772 } 2773 2774 /* Called within RCU critical section. */ 2775 static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, 2776 MemTxAttrs attrs, 2777 const void *ptr, 2778 hwaddr len, hwaddr mr_addr, 2779 hwaddr l, MemoryRegion *mr) 2780 { 2781 MemTxResult result = MEMTX_OK; 2782 const uint8_t *buf = ptr; 2783 2784 for (;;) { 2785 result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, 2786 mr); 2787 2788 len -= l; 2789 buf += l; 2790 addr += l; 2791 2792 if (!len) { 2793 break; 2794 } 2795 2796 l = len; 2797 mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); 2798 } 2799 2800 return result; 2801 } 2802 2803 /* Called from RCU critical section. */ 2804 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 2805 const void *buf, hwaddr len) 2806 { 2807 hwaddr l; 2808 hwaddr mr_addr; 2809 MemoryRegion *mr; 2810 2811 l = len; 2812 mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); 2813 if (!flatview_access_allowed(mr, attrs, addr, len)) { 2814 return MEMTX_ACCESS_ERROR; 2815 } 2816 return flatview_write_continue(fv, addr, attrs, buf, len, 2817 mr_addr, l, mr); 2818 } 2819 2820 static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf, 2821 hwaddr len, hwaddr mr_addr, 2822 hwaddr *l, 2823 MemoryRegion *mr) 2824 { 2825 if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { 2826 return MEMTX_ACCESS_ERROR; 2827 } 2828 2829 if (!memory_access_is_direct(mr, false)) { 2830 /* I/O case */ 2831 uint64_t val; 2832 MemTxResult result; 2833 bool release_lock = prepare_mmio_access(mr); 2834 2835 *l = memory_access_size(mr, *l, mr_addr); 2836 result = memory_region_dispatch_read(mr, mr_addr, &val, size_memop(*l), 2837 attrs); 2838 2839 /* 2840 * Assure Coverity (and ourselves) that we are not going to OVERRUN 2841 * the buffer by following stn_he_p(). 2842 */ 2843 #ifdef QEMU_STATIC_ANALYSIS 2844 assert((*l == 1 && len >= 1) || 2845 (*l == 2 && len >= 2) || 2846 (*l == 4 && len >= 4) || 2847 (*l == 8 && len >= 8)); 2848 #endif 2849 stn_he_p(buf, *l, val); 2850 2851 if (release_lock) { 2852 bql_unlock(); 2853 } 2854 return result; 2855 } else { 2856 /* RAM case */ 2857 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, 2858 false, false); 2859 2860 memcpy(buf, ram_ptr, *l); 2861 2862 return MEMTX_OK; 2863 } 2864 } 2865 2866 /* Called within RCU critical section. */ 2867 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 2868 MemTxAttrs attrs, void *ptr, 2869 hwaddr len, hwaddr mr_addr, hwaddr l, 2870 MemoryRegion *mr) 2871 { 2872 MemTxResult result = MEMTX_OK; 2873 uint8_t *buf = ptr; 2874 2875 fuzz_dma_read_cb(addr, len, mr); 2876 for (;;) { 2877 result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); 2878 2879 len -= l; 2880 buf += l; 2881 addr += l; 2882 2883 if (!len) { 2884 break; 2885 } 2886 2887 l = len; 2888 mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); 2889 } 2890 2891 return result; 2892 } 2893 2894 /* Called from RCU critical section. */ 2895 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 2896 MemTxAttrs attrs, void *buf, hwaddr len) 2897 { 2898 hwaddr l; 2899 hwaddr mr_addr; 2900 MemoryRegion *mr; 2901 2902 l = len; 2903 mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); 2904 if (!flatview_access_allowed(mr, attrs, addr, len)) { 2905 return MEMTX_ACCESS_ERROR; 2906 } 2907 return flatview_read_continue(fv, addr, attrs, buf, len, 2908 mr_addr, l, mr); 2909 } 2910 2911 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 2912 MemTxAttrs attrs, void *buf, hwaddr len) 2913 { 2914 MemTxResult result = MEMTX_OK; 2915 FlatView *fv; 2916 2917 if (len > 0) { 2918 RCU_READ_LOCK_GUARD(); 2919 fv = address_space_to_flatview(as); 2920 result = flatview_read(fv, addr, attrs, buf, len); 2921 } 2922 2923 return result; 2924 } 2925 2926 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 2927 MemTxAttrs attrs, 2928 const void *buf, hwaddr len) 2929 { 2930 MemTxResult result = MEMTX_OK; 2931 FlatView *fv; 2932 2933 if (len > 0) { 2934 RCU_READ_LOCK_GUARD(); 2935 fv = address_space_to_flatview(as); 2936 result = flatview_write(fv, addr, attrs, buf, len); 2937 } 2938 2939 return result; 2940 } 2941 2942 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, 2943 void *buf, hwaddr len, bool is_write) 2944 { 2945 if (is_write) { 2946 return address_space_write(as, addr, attrs, buf, len); 2947 } else { 2948 return address_space_read_full(as, addr, attrs, buf, len); 2949 } 2950 } 2951 2952 MemTxResult address_space_set(AddressSpace *as, hwaddr addr, 2953 uint8_t c, hwaddr len, MemTxAttrs attrs) 2954 { 2955 #define FILLBUF_SIZE 512 2956 uint8_t fillbuf[FILLBUF_SIZE]; 2957 int l; 2958 MemTxResult error = MEMTX_OK; 2959 2960 memset(fillbuf, c, FILLBUF_SIZE); 2961 while (len > 0) { 2962 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; 2963 error |= address_space_write(as, addr, attrs, fillbuf, l); 2964 len -= l; 2965 addr += l; 2966 } 2967 2968 return error; 2969 } 2970 2971 void cpu_physical_memory_rw(hwaddr addr, void *buf, 2972 hwaddr len, bool is_write) 2973 { 2974 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, 2975 buf, len, is_write); 2976 } 2977 2978 enum write_rom_type { 2979 WRITE_DATA, 2980 FLUSH_CACHE, 2981 }; 2982 2983 static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, 2984 hwaddr addr, 2985 MemTxAttrs attrs, 2986 const void *ptr, 2987 hwaddr len, 2988 enum write_rom_type type) 2989 { 2990 hwaddr l; 2991 uint8_t *ram_ptr; 2992 hwaddr addr1; 2993 MemoryRegion *mr; 2994 const uint8_t *buf = ptr; 2995 2996 RCU_READ_LOCK_GUARD(); 2997 while (len > 0) { 2998 l = len; 2999 mr = address_space_translate(as, addr, &addr1, &l, true, attrs); 3000 3001 if (!(memory_region_is_ram(mr) || 3002 memory_region_is_romd(mr))) { 3003 l = memory_access_size(mr, l, addr1); 3004 } else { 3005 /* ROM/RAM case */ 3006 ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 3007 switch (type) { 3008 case WRITE_DATA: 3009 memcpy(ram_ptr, buf, l); 3010 invalidate_and_set_dirty(mr, addr1, l); 3011 break; 3012 case FLUSH_CACHE: 3013 flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l); 3014 break; 3015 } 3016 } 3017 len -= l; 3018 buf += l; 3019 addr += l; 3020 } 3021 return MEMTX_OK; 3022 } 3023 3024 /* used for ROM loading : can write in RAM and ROM */ 3025 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, 3026 MemTxAttrs attrs, 3027 const void *buf, hwaddr len) 3028 { 3029 return address_space_write_rom_internal(as, addr, attrs, 3030 buf, len, WRITE_DATA); 3031 } 3032 3033 void cpu_flush_icache_range(hwaddr start, hwaddr len) 3034 { 3035 /* 3036 * This function should do the same thing as an icache flush that was 3037 * triggered from within the guest. For TCG we are always cache coherent, 3038 * so there is no need to flush anything. For KVM / Xen we need to flush 3039 * the host's instruction cache at least. 3040 */ 3041 if (tcg_enabled()) { 3042 return; 3043 } 3044 3045 address_space_write_rom_internal(&address_space_memory, 3046 start, MEMTXATTRS_UNSPECIFIED, 3047 NULL, len, FLUSH_CACHE); 3048 } 3049 3050 static void 3051 address_space_unregister_map_client_do(AddressSpaceMapClient *client) 3052 { 3053 QLIST_REMOVE(client, link); 3054 g_free(client); 3055 } 3056 3057 static void address_space_notify_map_clients_locked(AddressSpace *as) 3058 { 3059 AddressSpaceMapClient *client; 3060 3061 while (!QLIST_EMPTY(&as->map_client_list)) { 3062 client = QLIST_FIRST(&as->map_client_list); 3063 qemu_bh_schedule(client->bh); 3064 address_space_unregister_map_client_do(client); 3065 } 3066 } 3067 3068 void address_space_register_map_client(AddressSpace *as, QEMUBH *bh) 3069 { 3070 AddressSpaceMapClient *client = g_malloc(sizeof(*client)); 3071 3072 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3073 client->bh = bh; 3074 QLIST_INSERT_HEAD(&as->map_client_list, client, link); 3075 /* Write map_client_list before reading in_use. */ 3076 smp_mb(); 3077 if (!qatomic_read(&as->bounce.in_use)) { 3078 address_space_notify_map_clients_locked(as); 3079 } 3080 } 3081 3082 void cpu_exec_init_all(void) 3083 { 3084 qemu_mutex_init(&ram_list.mutex); 3085 /* The data structures we set up here depend on knowing the page size, 3086 * so no more changes can be made after this point. 3087 * In an ideal world, nothing we did before we had finished the 3088 * machine setup would care about the target page size, and we could 3089 * do this much later, rather than requiring board models to state 3090 * up front what their requirements are. 3091 */ 3092 finalize_target_page_bits(); 3093 io_mem_init(); 3094 memory_map_init(); 3095 } 3096 3097 void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh) 3098 { 3099 AddressSpaceMapClient *client; 3100 3101 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3102 QLIST_FOREACH(client, &as->map_client_list, link) { 3103 if (client->bh == bh) { 3104 address_space_unregister_map_client_do(client); 3105 break; 3106 } 3107 } 3108 } 3109 3110 static void address_space_notify_map_clients(AddressSpace *as) 3111 { 3112 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3113 address_space_notify_map_clients_locked(as); 3114 } 3115 3116 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 3117 bool is_write, MemTxAttrs attrs) 3118 { 3119 MemoryRegion *mr; 3120 hwaddr l, xlat; 3121 3122 while (len > 0) { 3123 l = len; 3124 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3125 if (!memory_access_is_direct(mr, is_write)) { 3126 l = memory_access_size(mr, l, addr); 3127 if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) { 3128 return false; 3129 } 3130 } 3131 3132 len -= l; 3133 addr += l; 3134 } 3135 return true; 3136 } 3137 3138 bool address_space_access_valid(AddressSpace *as, hwaddr addr, 3139 hwaddr len, bool is_write, 3140 MemTxAttrs attrs) 3141 { 3142 FlatView *fv; 3143 3144 RCU_READ_LOCK_GUARD(); 3145 fv = address_space_to_flatview(as); 3146 return flatview_access_valid(fv, addr, len, is_write, attrs); 3147 } 3148 3149 static hwaddr 3150 flatview_extend_translation(FlatView *fv, hwaddr addr, 3151 hwaddr target_len, 3152 MemoryRegion *mr, hwaddr base, hwaddr len, 3153 bool is_write, MemTxAttrs attrs) 3154 { 3155 hwaddr done = 0; 3156 hwaddr xlat; 3157 MemoryRegion *this_mr; 3158 3159 for (;;) { 3160 target_len -= len; 3161 addr += len; 3162 done += len; 3163 if (target_len == 0) { 3164 return done; 3165 } 3166 3167 len = target_len; 3168 this_mr = flatview_translate(fv, addr, &xlat, 3169 &len, is_write, attrs); 3170 if (this_mr != mr || xlat != base + done) { 3171 return done; 3172 } 3173 } 3174 } 3175 3176 /* Map a physical memory region into a host virtual address. 3177 * May map a subset of the requested range, given by and returned in *plen. 3178 * May return NULL if resources needed to perform the mapping are exhausted. 3179 * Use only for reads OR writes - not for read-modify-write operations. 3180 * Use address_space_register_map_client() to know when retrying the map 3181 * operation is likely to succeed. 3182 */ 3183 void *address_space_map(AddressSpace *as, 3184 hwaddr addr, 3185 hwaddr *plen, 3186 bool is_write, 3187 MemTxAttrs attrs) 3188 { 3189 hwaddr len = *plen; 3190 hwaddr l, xlat; 3191 MemoryRegion *mr; 3192 FlatView *fv; 3193 3194 if (len == 0) { 3195 return NULL; 3196 } 3197 3198 l = len; 3199 RCU_READ_LOCK_GUARD(); 3200 fv = address_space_to_flatview(as); 3201 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3202 3203 if (!memory_access_is_direct(mr, is_write)) { 3204 if (qatomic_xchg(&as->bounce.in_use, true)) { 3205 *plen = 0; 3206 return NULL; 3207 } 3208 /* Avoid unbounded allocations */ 3209 l = MIN(l, TARGET_PAGE_SIZE); 3210 as->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); 3211 as->bounce.addr = addr; 3212 as->bounce.len = l; 3213 3214 memory_region_ref(mr); 3215 as->bounce.mr = mr; 3216 if (!is_write) { 3217 flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED, 3218 as->bounce.buffer, l); 3219 } 3220 3221 *plen = l; 3222 return as->bounce.buffer; 3223 } 3224 3225 3226 memory_region_ref(mr); 3227 *plen = flatview_extend_translation(fv, addr, len, mr, xlat, 3228 l, is_write, attrs); 3229 fuzz_dma_read_cb(addr, *plen, mr); 3230 return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true, is_write); 3231 } 3232 3233 /* Unmaps a memory region previously mapped by address_space_map(). 3234 * Will also mark the memory as dirty if is_write is true. access_len gives 3235 * the amount of memory that was actually read or written by the caller. 3236 */ 3237 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 3238 bool is_write, hwaddr access_len) 3239 { 3240 if (buffer != as->bounce.buffer) { 3241 MemoryRegion *mr; 3242 ram_addr_t addr1; 3243 3244 mr = memory_region_from_host(buffer, &addr1); 3245 assert(mr != NULL); 3246 if (is_write) { 3247 invalidate_and_set_dirty(mr, addr1, access_len); 3248 } 3249 if (xen_enabled()) { 3250 xen_invalidate_map_cache_entry(buffer); 3251 } 3252 memory_region_unref(mr); 3253 return; 3254 } 3255 if (is_write) { 3256 address_space_write(as, as->bounce.addr, MEMTXATTRS_UNSPECIFIED, 3257 as->bounce.buffer, access_len); 3258 } 3259 qemu_vfree(as->bounce.buffer); 3260 as->bounce.buffer = NULL; 3261 memory_region_unref(as->bounce.mr); 3262 /* Clear in_use before reading map_client_list. */ 3263 qatomic_set_mb(&as->bounce.in_use, false); 3264 address_space_notify_map_clients(as); 3265 } 3266 3267 void *cpu_physical_memory_map(hwaddr addr, 3268 hwaddr *plen, 3269 bool is_write) 3270 { 3271 return address_space_map(&address_space_memory, addr, plen, is_write, 3272 MEMTXATTRS_UNSPECIFIED); 3273 } 3274 3275 void cpu_physical_memory_unmap(void *buffer, hwaddr len, 3276 bool is_write, hwaddr access_len) 3277 { 3278 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); 3279 } 3280 3281 #define ARG1_DECL AddressSpace *as 3282 #define ARG1 as 3283 #define SUFFIX 3284 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) 3285 #define RCU_READ_LOCK(...) rcu_read_lock() 3286 #define RCU_READ_UNLOCK(...) rcu_read_unlock() 3287 #include "memory_ldst.c.inc" 3288 3289 int64_t address_space_cache_init(MemoryRegionCache *cache, 3290 AddressSpace *as, 3291 hwaddr addr, 3292 hwaddr len, 3293 bool is_write) 3294 { 3295 AddressSpaceDispatch *d; 3296 hwaddr l; 3297 MemoryRegion *mr; 3298 Int128 diff; 3299 3300 assert(len > 0); 3301 3302 l = len; 3303 cache->fv = address_space_get_flatview(as); 3304 d = flatview_to_dispatch(cache->fv); 3305 cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); 3306 3307 /* 3308 * cache->xlat is now relative to cache->mrs.mr, not to the section itself. 3309 * Take that into account to compute how many bytes are there between 3310 * cache->xlat and the end of the section. 3311 */ 3312 diff = int128_sub(cache->mrs.size, 3313 int128_make64(cache->xlat - cache->mrs.offset_within_region)); 3314 l = int128_get64(int128_min(diff, int128_make64(l))); 3315 3316 mr = cache->mrs.mr; 3317 memory_region_ref(mr); 3318 if (memory_access_is_direct(mr, is_write)) { 3319 /* We don't care about the memory attributes here as we're only 3320 * doing this if we found actual RAM, which behaves the same 3321 * regardless of attributes; so UNSPECIFIED is fine. 3322 */ 3323 l = flatview_extend_translation(cache->fv, addr, len, mr, 3324 cache->xlat, l, is_write, 3325 MEMTXATTRS_UNSPECIFIED); 3326 cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true, 3327 is_write); 3328 } else { 3329 cache->ptr = NULL; 3330 } 3331 3332 cache->len = l; 3333 cache->is_write = is_write; 3334 return l; 3335 } 3336 3337 void address_space_cache_invalidate(MemoryRegionCache *cache, 3338 hwaddr addr, 3339 hwaddr access_len) 3340 { 3341 assert(cache->is_write); 3342 if (likely(cache->ptr)) { 3343 invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len); 3344 } 3345 } 3346 3347 void address_space_cache_destroy(MemoryRegionCache *cache) 3348 { 3349 if (!cache->mrs.mr) { 3350 return; 3351 } 3352 3353 if (xen_enabled()) { 3354 xen_invalidate_map_cache_entry(cache->ptr); 3355 } 3356 memory_region_unref(cache->mrs.mr); 3357 flatview_unref(cache->fv); 3358 cache->mrs.mr = NULL; 3359 cache->fv = NULL; 3360 } 3361 3362 /* Called from RCU critical section. This function has the same 3363 * semantics as address_space_translate, but it only works on a 3364 * predefined range of a MemoryRegion that was mapped with 3365 * address_space_cache_init. 3366 */ 3367 static inline MemoryRegion *address_space_translate_cached( 3368 MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, 3369 hwaddr *plen, bool is_write, MemTxAttrs attrs) 3370 { 3371 MemoryRegionSection section; 3372 MemoryRegion *mr; 3373 IOMMUMemoryRegion *iommu_mr; 3374 AddressSpace *target_as; 3375 3376 assert(!cache->ptr); 3377 *xlat = addr + cache->xlat; 3378 3379 mr = cache->mrs.mr; 3380 iommu_mr = memory_region_get_iommu(mr); 3381 if (!iommu_mr) { 3382 /* MMIO region. */ 3383 return mr; 3384 } 3385 3386 section = address_space_translate_iommu(iommu_mr, xlat, plen, 3387 NULL, is_write, true, 3388 &target_as, attrs); 3389 return section.mr; 3390 } 3391 3392 /* Called within RCU critical section. */ 3393 static MemTxResult address_space_write_continue_cached(MemTxAttrs attrs, 3394 const void *ptr, 3395 hwaddr len, 3396 hwaddr mr_addr, 3397 hwaddr l, 3398 MemoryRegion *mr) 3399 { 3400 MemTxResult result = MEMTX_OK; 3401 const uint8_t *buf = ptr; 3402 3403 for (;;) { 3404 result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, 3405 mr); 3406 3407 len -= l; 3408 buf += l; 3409 mr_addr += l; 3410 3411 if (!len) { 3412 break; 3413 } 3414 3415 l = len; 3416 } 3417 3418 return result; 3419 } 3420 3421 /* Called within RCU critical section. */ 3422 static MemTxResult address_space_read_continue_cached(MemTxAttrs attrs, 3423 void *ptr, hwaddr len, 3424 hwaddr mr_addr, hwaddr l, 3425 MemoryRegion *mr) 3426 { 3427 MemTxResult result = MEMTX_OK; 3428 uint8_t *buf = ptr; 3429 3430 for (;;) { 3431 result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); 3432 len -= l; 3433 buf += l; 3434 mr_addr += l; 3435 3436 if (!len) { 3437 break; 3438 } 3439 l = len; 3440 } 3441 3442 return result; 3443 } 3444 3445 /* Called from RCU critical section. address_space_read_cached uses this 3446 * out of line function when the target is an MMIO or IOMMU region. 3447 */ 3448 MemTxResult 3449 address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3450 void *buf, hwaddr len) 3451 { 3452 hwaddr mr_addr, l; 3453 MemoryRegion *mr; 3454 3455 l = len; 3456 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false, 3457 MEMTXATTRS_UNSPECIFIED); 3458 return address_space_read_continue_cached(MEMTXATTRS_UNSPECIFIED, 3459 buf, len, mr_addr, l, mr); 3460 } 3461 3462 /* Called from RCU critical section. address_space_write_cached uses this 3463 * out of line function when the target is an MMIO or IOMMU region. 3464 */ 3465 MemTxResult 3466 address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3467 const void *buf, hwaddr len) 3468 { 3469 hwaddr mr_addr, l; 3470 MemoryRegion *mr; 3471 3472 l = len; 3473 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true, 3474 MEMTXATTRS_UNSPECIFIED); 3475 return address_space_write_continue_cached(MEMTXATTRS_UNSPECIFIED, 3476 buf, len, mr_addr, l, mr); 3477 } 3478 3479 #define ARG1_DECL MemoryRegionCache *cache 3480 #define ARG1 cache 3481 #define SUFFIX _cached_slow 3482 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) 3483 #define RCU_READ_LOCK() ((void)0) 3484 #define RCU_READ_UNLOCK() ((void)0) 3485 #include "memory_ldst.c.inc" 3486 3487 /* virtual memory access for debug (includes writing to ROM) */ 3488 int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, 3489 void *ptr, size_t len, bool is_write) 3490 { 3491 hwaddr phys_addr; 3492 vaddr l, page; 3493 uint8_t *buf = ptr; 3494 3495 cpu_synchronize_state(cpu); 3496 while (len > 0) { 3497 int asidx; 3498 MemTxAttrs attrs; 3499 MemTxResult res; 3500 3501 page = addr & TARGET_PAGE_MASK; 3502 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); 3503 asidx = cpu_asidx_from_attrs(cpu, attrs); 3504 /* if no physical page mapped, return an error */ 3505 if (phys_addr == -1) 3506 return -1; 3507 l = (page + TARGET_PAGE_SIZE) - addr; 3508 if (l > len) 3509 l = len; 3510 phys_addr += (addr & ~TARGET_PAGE_MASK); 3511 if (is_write) { 3512 res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, 3513 attrs, buf, l); 3514 } else { 3515 res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr, 3516 attrs, buf, l); 3517 } 3518 if (res != MEMTX_OK) { 3519 return -1; 3520 } 3521 len -= l; 3522 buf += l; 3523 addr += l; 3524 } 3525 return 0; 3526 } 3527 3528 bool cpu_physical_memory_is_io(hwaddr phys_addr) 3529 { 3530 MemoryRegion*mr; 3531 hwaddr l = 1; 3532 3533 RCU_READ_LOCK_GUARD(); 3534 mr = address_space_translate(&address_space_memory, 3535 phys_addr, &phys_addr, &l, false, 3536 MEMTXATTRS_UNSPECIFIED); 3537 3538 return !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); 3539 } 3540 3541 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) 3542 { 3543 RAMBlock *block; 3544 int ret = 0; 3545 3546 RCU_READ_LOCK_GUARD(); 3547 RAMBLOCK_FOREACH(block) { 3548 ret = func(block, opaque); 3549 if (ret) { 3550 break; 3551 } 3552 } 3553 return ret; 3554 } 3555 3556 /* 3557 * Unmap pages of memory from start to start+length such that 3558 * they a) read as 0, b) Trigger whatever fault mechanism 3559 * the OS provides for postcopy. 3560 * The pages must be unmapped by the end of the function. 3561 * Returns: 0 on success, none-0 on failure 3562 * 3563 */ 3564 int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) 3565 { 3566 int ret = -1; 3567 3568 uint8_t *host_startaddr = rb->host + start; 3569 3570 if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { 3571 error_report("%s: Unaligned start address: %p", 3572 __func__, host_startaddr); 3573 goto err; 3574 } 3575 3576 if ((start + length) <= rb->max_length) { 3577 bool need_madvise, need_fallocate; 3578 if (!QEMU_IS_ALIGNED(length, rb->page_size)) { 3579 error_report("%s: Unaligned length: %zx", __func__, length); 3580 goto err; 3581 } 3582 3583 errno = ENOTSUP; /* If we are missing MADVISE etc */ 3584 3585 /* The logic here is messy; 3586 * madvise DONTNEED fails for hugepages 3587 * fallocate works on hugepages and shmem 3588 * shared anonymous memory requires madvise REMOVE 3589 */ 3590 need_madvise = (rb->page_size == qemu_real_host_page_size()); 3591 need_fallocate = rb->fd != -1; 3592 if (need_fallocate) { 3593 /* For a file, this causes the area of the file to be zero'd 3594 * if read, and for hugetlbfs also causes it to be unmapped 3595 * so a userfault will trigger. 3596 */ 3597 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 3598 /* 3599 * fallocate() will fail with readonly files. Let's print a 3600 * proper error message. 3601 */ 3602 if (rb->flags & RAM_READONLY_FD) { 3603 error_report("%s: Discarding RAM with readonly files is not" 3604 " supported", __func__); 3605 goto err; 3606 3607 } 3608 /* 3609 * We'll discard data from the actual file, even though we only 3610 * have a MAP_PRIVATE mapping, possibly messing with other 3611 * MAP_PRIVATE/MAP_SHARED mappings. There is no easy way to 3612 * change that behavior whithout violating the promised 3613 * semantics of ram_block_discard_range(). 3614 * 3615 * Only warn, because it works as long as nobody else uses that 3616 * file. 3617 */ 3618 if (!qemu_ram_is_shared(rb)) { 3619 warn_report_once("%s: Discarding RAM" 3620 " in private file mappings is possibly" 3621 " dangerous, because it will modify the" 3622 " underlying file and will affect other" 3623 " users of the file", __func__); 3624 } 3625 3626 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 3627 start, length); 3628 if (ret) { 3629 ret = -errno; 3630 error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)", 3631 __func__, rb->idstr, start, length, ret); 3632 goto err; 3633 } 3634 #else 3635 ret = -ENOSYS; 3636 error_report("%s: fallocate not available/file" 3637 "%s:%" PRIx64 " +%zx (%d)", 3638 __func__, rb->idstr, start, length, ret); 3639 goto err; 3640 #endif 3641 } 3642 if (need_madvise) { 3643 /* For normal RAM this causes it to be unmapped, 3644 * for shared memory it causes the local mapping to disappear 3645 * and to fall back on the file contents (which we just 3646 * fallocate'd away). 3647 */ 3648 #if defined(CONFIG_MADVISE) 3649 if (qemu_ram_is_shared(rb) && rb->fd < 0) { 3650 ret = madvise(host_startaddr, length, QEMU_MADV_REMOVE); 3651 } else { 3652 ret = madvise(host_startaddr, length, QEMU_MADV_DONTNEED); 3653 } 3654 if (ret) { 3655 ret = -errno; 3656 error_report("%s: Failed to discard range " 3657 "%s:%" PRIx64 " +%zx (%d)", 3658 __func__, rb->idstr, start, length, ret); 3659 goto err; 3660 } 3661 #else 3662 ret = -ENOSYS; 3663 error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)", 3664 __func__, rb->idstr, start, length, ret); 3665 goto err; 3666 #endif 3667 } 3668 trace_ram_block_discard_range(rb->idstr, host_startaddr, length, 3669 need_madvise, need_fallocate, ret); 3670 } else { 3671 error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", 3672 __func__, rb->idstr, start, length, rb->max_length); 3673 } 3674 3675 err: 3676 return ret; 3677 } 3678 3679 int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, 3680 size_t length) 3681 { 3682 int ret = -1; 3683 3684 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 3685 ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 3686 start, length); 3687 3688 if (ret) { 3689 ret = -errno; 3690 error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)", 3691 __func__, rb->idstr, start, length, ret); 3692 } 3693 #else 3694 ret = -ENOSYS; 3695 error_report("%s: fallocate not available %s:%" PRIx64 " +%zx (%d)", 3696 __func__, rb->idstr, start, length, ret); 3697 #endif 3698 3699 return ret; 3700 } 3701 3702 bool ramblock_is_pmem(RAMBlock *rb) 3703 { 3704 return rb->flags & RAM_PMEM; 3705 } 3706 3707 static void mtree_print_phys_entries(int start, int end, int skip, int ptr) 3708 { 3709 if (start == end - 1) { 3710 qemu_printf("\t%3d ", start); 3711 } else { 3712 qemu_printf("\t%3d..%-3d ", start, end - 1); 3713 } 3714 qemu_printf(" skip=%d ", skip); 3715 if (ptr == PHYS_MAP_NODE_NIL) { 3716 qemu_printf(" ptr=NIL"); 3717 } else if (!skip) { 3718 qemu_printf(" ptr=#%d", ptr); 3719 } else { 3720 qemu_printf(" ptr=[%d]", ptr); 3721 } 3722 qemu_printf("\n"); 3723 } 3724 3725 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ 3726 int128_sub((size), int128_one())) : 0) 3727 3728 void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) 3729 { 3730 int i; 3731 3732 qemu_printf(" Dispatch\n"); 3733 qemu_printf(" Physical sections\n"); 3734 3735 for (i = 0; i < d->map.sections_nb; ++i) { 3736 MemoryRegionSection *s = d->map.sections + i; 3737 const char *names[] = { " [unassigned]", " [not dirty]", 3738 " [ROM]", " [watch]" }; 3739 3740 qemu_printf(" #%d @" HWADDR_FMT_plx ".." HWADDR_FMT_plx 3741 " %s%s%s%s%s", 3742 i, 3743 s->offset_within_address_space, 3744 s->offset_within_address_space + MR_SIZE(s->size), 3745 s->mr->name ? s->mr->name : "(noname)", 3746 i < ARRAY_SIZE(names) ? names[i] : "", 3747 s->mr == root ? " [ROOT]" : "", 3748 s == d->mru_section ? " [MRU]" : "", 3749 s->mr->is_iommu ? " [iommu]" : ""); 3750 3751 if (s->mr->alias) { 3752 qemu_printf(" alias=%s", s->mr->alias->name ? 3753 s->mr->alias->name : "noname"); 3754 } 3755 qemu_printf("\n"); 3756 } 3757 3758 qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n", 3759 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); 3760 for (i = 0; i < d->map.nodes_nb; ++i) { 3761 int j, jprev; 3762 PhysPageEntry prev; 3763 Node *n = d->map.nodes + i; 3764 3765 qemu_printf(" [%d]\n", i); 3766 3767 for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) { 3768 PhysPageEntry *pe = *n + j; 3769 3770 if (pe->ptr == prev.ptr && pe->skip == prev.skip) { 3771 continue; 3772 } 3773 3774 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 3775 3776 jprev = j; 3777 prev = *pe; 3778 } 3779 3780 if (jprev != ARRAY_SIZE(*n)) { 3781 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 3782 } 3783 } 3784 } 3785 3786 /* Require any discards to work. */ 3787 static unsigned int ram_block_discard_required_cnt; 3788 /* Require only coordinated discards to work. */ 3789 static unsigned int ram_block_coordinated_discard_required_cnt; 3790 /* Disable any discards. */ 3791 static unsigned int ram_block_discard_disabled_cnt; 3792 /* Disable only uncoordinated discards. */ 3793 static unsigned int ram_block_uncoordinated_discard_disabled_cnt; 3794 static QemuMutex ram_block_discard_disable_mutex; 3795 3796 static void ram_block_discard_disable_mutex_lock(void) 3797 { 3798 static gsize initialized; 3799 3800 if (g_once_init_enter(&initialized)) { 3801 qemu_mutex_init(&ram_block_discard_disable_mutex); 3802 g_once_init_leave(&initialized, 1); 3803 } 3804 qemu_mutex_lock(&ram_block_discard_disable_mutex); 3805 } 3806 3807 static void ram_block_discard_disable_mutex_unlock(void) 3808 { 3809 qemu_mutex_unlock(&ram_block_discard_disable_mutex); 3810 } 3811 3812 int ram_block_discard_disable(bool state) 3813 { 3814 int ret = 0; 3815 3816 ram_block_discard_disable_mutex_lock(); 3817 if (!state) { 3818 ram_block_discard_disabled_cnt--; 3819 } else if (ram_block_discard_required_cnt || 3820 ram_block_coordinated_discard_required_cnt) { 3821 ret = -EBUSY; 3822 } else { 3823 ram_block_discard_disabled_cnt++; 3824 } 3825 ram_block_discard_disable_mutex_unlock(); 3826 return ret; 3827 } 3828 3829 int ram_block_uncoordinated_discard_disable(bool state) 3830 { 3831 int ret = 0; 3832 3833 ram_block_discard_disable_mutex_lock(); 3834 if (!state) { 3835 ram_block_uncoordinated_discard_disabled_cnt--; 3836 } else if (ram_block_discard_required_cnt) { 3837 ret = -EBUSY; 3838 } else { 3839 ram_block_uncoordinated_discard_disabled_cnt++; 3840 } 3841 ram_block_discard_disable_mutex_unlock(); 3842 return ret; 3843 } 3844 3845 int ram_block_discard_require(bool state) 3846 { 3847 int ret = 0; 3848 3849 ram_block_discard_disable_mutex_lock(); 3850 if (!state) { 3851 ram_block_discard_required_cnt--; 3852 } else if (ram_block_discard_disabled_cnt || 3853 ram_block_uncoordinated_discard_disabled_cnt) { 3854 ret = -EBUSY; 3855 } else { 3856 ram_block_discard_required_cnt++; 3857 } 3858 ram_block_discard_disable_mutex_unlock(); 3859 return ret; 3860 } 3861 3862 int ram_block_coordinated_discard_require(bool state) 3863 { 3864 int ret = 0; 3865 3866 ram_block_discard_disable_mutex_lock(); 3867 if (!state) { 3868 ram_block_coordinated_discard_required_cnt--; 3869 } else if (ram_block_discard_disabled_cnt) { 3870 ret = -EBUSY; 3871 } else { 3872 ram_block_coordinated_discard_required_cnt++; 3873 } 3874 ram_block_discard_disable_mutex_unlock(); 3875 return ret; 3876 } 3877 3878 bool ram_block_discard_is_disabled(void) 3879 { 3880 return qatomic_read(&ram_block_discard_disabled_cnt) || 3881 qatomic_read(&ram_block_uncoordinated_discard_disabled_cnt); 3882 } 3883 3884 bool ram_block_discard_is_required(void) 3885 { 3886 return qatomic_read(&ram_block_discard_required_cnt) || 3887 qatomic_read(&ram_block_coordinated_discard_required_cnt); 3888 } 3889