1 /* 2 * RAM allocation and memory access 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/page-vary.h" 22 #include "qapi/error.h" 23 24 #include "qemu/cutils.h" 25 #include "qemu/cacheflush.h" 26 #include "qemu/hbitmap.h" 27 #include "qemu/madvise.h" 28 #include "qemu/lockable.h" 29 30 #ifdef CONFIG_TCG 31 #include "accel/tcg/cpu-ops.h" 32 #include "accel/tcg/iommu.h" 33 #endif /* CONFIG_TCG */ 34 35 #include "exec/exec-all.h" 36 #include "exec/cputlb.h" 37 #include "exec/page-protection.h" 38 #include "exec/target_page.h" 39 #include "exec/translation-block.h" 40 #include "hw/qdev-core.h" 41 #include "hw/qdev-properties.h" 42 #include "hw/boards.h" 43 #include "system/xen.h" 44 #include "system/kvm.h" 45 #include "system/tcg.h" 46 #include "system/qtest.h" 47 #include "qemu/timer.h" 48 #include "qemu/config-file.h" 49 #include "qemu/error-report.h" 50 #include "qemu/qemu-print.h" 51 #include "qemu/log.h" 52 #include "qemu/memalign.h" 53 #include "qemu/memfd.h" 54 #include "system/memory.h" 55 #include "system/ioport.h" 56 #include "system/dma.h" 57 #include "system/hostmem.h" 58 #include "system/hw_accel.h" 59 #include "system/xen-mapcache.h" 60 #include "trace.h" 61 62 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 63 #include <linux/falloc.h> 64 #endif 65 66 #include "qemu/rcu_queue.h" 67 #include "qemu/main-loop.h" 68 #include "system/replay.h" 69 70 #include "system/ram_addr.h" 71 72 #include "qemu/pmem.h" 73 74 #include "qapi/qapi-types-migration.h" 75 #include "migration/blocker.h" 76 #include "migration/cpr.h" 77 #include "migration/options.h" 78 #include "migration/vmstate.h" 79 80 #include "qemu/range.h" 81 #ifndef _WIN32 82 #include "qemu/mmap-alloc.h" 83 #endif 84 85 #include "monitor/monitor.h" 86 87 #ifdef CONFIG_LIBDAXCTL 88 #include <daxctl/libdaxctl.h> 89 #endif 90 91 #include "memory-internal.h" 92 93 //#define DEBUG_SUBPAGE 94 95 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes 96 * are protected by the ramlist lock. 97 */ 98 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; 99 100 static MemoryRegion *system_memory; 101 static MemoryRegion *system_io; 102 103 AddressSpace address_space_io; 104 AddressSpace address_space_memory; 105 106 static MemoryRegion io_mem_unassigned; 107 108 typedef struct PhysPageEntry PhysPageEntry; 109 110 struct PhysPageEntry { 111 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ 112 uint32_t skip : 6; 113 /* index into phys_sections (!skip) or phys_map_nodes (skip) */ 114 uint32_t ptr : 26; 115 }; 116 117 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) 118 119 /* Size of the L2 (and L3, etc) page tables. */ 120 #define ADDR_SPACE_BITS 64 121 122 #define P_L2_BITS 9 123 #define P_L2_SIZE (1 << P_L2_BITS) 124 125 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) 126 127 typedef PhysPageEntry Node[P_L2_SIZE]; 128 129 typedef struct PhysPageMap { 130 struct rcu_head rcu; 131 132 unsigned sections_nb; 133 unsigned sections_nb_alloc; 134 unsigned nodes_nb; 135 unsigned nodes_nb_alloc; 136 Node *nodes; 137 MemoryRegionSection *sections; 138 } PhysPageMap; 139 140 struct AddressSpaceDispatch { 141 MemoryRegionSection *mru_section; 142 /* This is a multi-level map on the physical address space. 143 * The bottom level has pointers to MemoryRegionSections. 144 */ 145 PhysPageEntry phys_map; 146 PhysPageMap map; 147 }; 148 149 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 150 typedef struct subpage_t { 151 MemoryRegion iomem; 152 FlatView *fv; 153 hwaddr base; 154 uint16_t sub_section[]; 155 } subpage_t; 156 157 #define PHYS_SECTION_UNASSIGNED 0 158 159 static void io_mem_init(void); 160 static void memory_map_init(void); 161 static void tcg_log_global_after_sync(MemoryListener *listener); 162 static void tcg_commit(MemoryListener *listener); 163 static bool ram_is_cpr_compatible(RAMBlock *rb); 164 165 /** 166 * CPUAddressSpace: all the information a CPU needs about an AddressSpace 167 * @cpu: the CPU whose AddressSpace this is 168 * @as: the AddressSpace itself 169 * @memory_dispatch: its dispatch pointer (cached, RCU protected) 170 * @tcg_as_listener: listener for tracking changes to the AddressSpace 171 */ 172 typedef struct CPUAddressSpace { 173 CPUState *cpu; 174 AddressSpace *as; 175 struct AddressSpaceDispatch *memory_dispatch; 176 MemoryListener tcg_as_listener; 177 } CPUAddressSpace; 178 179 struct DirtyBitmapSnapshot { 180 ram_addr_t start; 181 ram_addr_t end; 182 unsigned long dirty[]; 183 }; 184 185 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) 186 { 187 static unsigned alloc_hint = 16; 188 if (map->nodes_nb + nodes > map->nodes_nb_alloc) { 189 map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes); 190 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); 191 alloc_hint = map->nodes_nb_alloc; 192 } 193 } 194 195 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) 196 { 197 unsigned i; 198 uint32_t ret; 199 PhysPageEntry e; 200 PhysPageEntry *p; 201 202 ret = map->nodes_nb++; 203 p = map->nodes[ret]; 204 assert(ret != PHYS_MAP_NODE_NIL); 205 assert(ret != map->nodes_nb_alloc); 206 207 e.skip = leaf ? 0 : 1; 208 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; 209 for (i = 0; i < P_L2_SIZE; ++i) { 210 memcpy(&p[i], &e, sizeof(e)); 211 } 212 return ret; 213 } 214 215 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, 216 hwaddr *index, uint64_t *nb, uint16_t leaf, 217 int level) 218 { 219 PhysPageEntry *p; 220 hwaddr step = (hwaddr)1 << (level * P_L2_BITS); 221 222 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { 223 lp->ptr = phys_map_node_alloc(map, level == 0); 224 } 225 p = map->nodes[lp->ptr]; 226 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; 227 228 while (*nb && lp < &p[P_L2_SIZE]) { 229 if ((*index & (step - 1)) == 0 && *nb >= step) { 230 lp->skip = 0; 231 lp->ptr = leaf; 232 *index += step; 233 *nb -= step; 234 } else { 235 phys_page_set_level(map, lp, index, nb, leaf, level - 1); 236 } 237 ++lp; 238 } 239 } 240 241 static void phys_page_set(AddressSpaceDispatch *d, 242 hwaddr index, uint64_t nb, 243 uint16_t leaf) 244 { 245 /* Wildly overreserve - it doesn't matter much. */ 246 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); 247 248 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); 249 } 250 251 /* Compact a non leaf page entry. Simply detect that the entry has a single child, 252 * and update our entry so we can skip it and go directly to the destination. 253 */ 254 static void phys_page_compact(PhysPageEntry *lp, Node *nodes) 255 { 256 unsigned valid_ptr = P_L2_SIZE; 257 int valid = 0; 258 PhysPageEntry *p; 259 int i; 260 261 if (lp->ptr == PHYS_MAP_NODE_NIL) { 262 return; 263 } 264 265 p = nodes[lp->ptr]; 266 for (i = 0; i < P_L2_SIZE; i++) { 267 if (p[i].ptr == PHYS_MAP_NODE_NIL) { 268 continue; 269 } 270 271 valid_ptr = i; 272 valid++; 273 if (p[i].skip) { 274 phys_page_compact(&p[i], nodes); 275 } 276 } 277 278 /* We can only compress if there's only one child. */ 279 if (valid != 1) { 280 return; 281 } 282 283 assert(valid_ptr < P_L2_SIZE); 284 285 /* Don't compress if it won't fit in the # of bits we have. */ 286 if (P_L2_LEVELS >= (1 << 6) && 287 lp->skip + p[valid_ptr].skip >= (1 << 6)) { 288 return; 289 } 290 291 lp->ptr = p[valid_ptr].ptr; 292 if (!p[valid_ptr].skip) { 293 /* If our only child is a leaf, make this a leaf. */ 294 /* By design, we should have made this node a leaf to begin with so we 295 * should never reach here. 296 * But since it's so simple to handle this, let's do it just in case we 297 * change this rule. 298 */ 299 lp->skip = 0; 300 } else { 301 lp->skip += p[valid_ptr].skip; 302 } 303 } 304 305 void address_space_dispatch_compact(AddressSpaceDispatch *d) 306 { 307 if (d->phys_map.skip) { 308 phys_page_compact(&d->phys_map, d->map.nodes); 309 } 310 } 311 312 static inline bool section_covers_addr(const MemoryRegionSection *section, 313 hwaddr addr) 314 { 315 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means 316 * the section must cover the entire address space. 317 */ 318 return int128_gethi(section->size) || 319 range_covers_byte(section->offset_within_address_space, 320 int128_getlo(section->size), addr); 321 } 322 323 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr) 324 { 325 PhysPageEntry lp = d->phys_map, *p; 326 Node *nodes = d->map.nodes; 327 MemoryRegionSection *sections = d->map.sections; 328 hwaddr index = addr >> TARGET_PAGE_BITS; 329 int i; 330 331 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { 332 if (lp.ptr == PHYS_MAP_NODE_NIL) { 333 return §ions[PHYS_SECTION_UNASSIGNED]; 334 } 335 p = nodes[lp.ptr]; 336 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; 337 } 338 339 if (section_covers_addr(§ions[lp.ptr], addr)) { 340 return §ions[lp.ptr]; 341 } else { 342 return §ions[PHYS_SECTION_UNASSIGNED]; 343 } 344 } 345 346 /* Called from RCU critical section */ 347 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, 348 hwaddr addr, 349 bool resolve_subpage) 350 { 351 MemoryRegionSection *section = qatomic_read(&d->mru_section); 352 subpage_t *subpage; 353 354 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || 355 !section_covers_addr(section, addr)) { 356 section = phys_page_find(d, addr); 357 qatomic_set(&d->mru_section, section); 358 } 359 if (resolve_subpage && section->mr->subpage) { 360 subpage = container_of(section->mr, subpage_t, iomem); 361 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; 362 } 363 return section; 364 } 365 366 /* Called from RCU critical section */ 367 static MemoryRegionSection * 368 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, 369 hwaddr *plen, bool resolve_subpage) 370 { 371 MemoryRegionSection *section; 372 MemoryRegion *mr; 373 Int128 diff; 374 375 section = address_space_lookup_region(d, addr, resolve_subpage); 376 /* Compute offset within MemoryRegionSection */ 377 addr -= section->offset_within_address_space; 378 379 /* Compute offset within MemoryRegion */ 380 *xlat = addr + section->offset_within_region; 381 382 mr = section->mr; 383 384 /* MMIO registers can be expected to perform full-width accesses based only 385 * on their address, without considering adjacent registers that could 386 * decode to completely different MemoryRegions. When such registers 387 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO 388 * regions overlap wildly. For this reason we cannot clamp the accesses 389 * here. 390 * 391 * If the length is small (as is the case for address_space_ldl/stl), 392 * everything works fine. If the incoming length is large, however, 393 * the caller really has to do the clamping through memory_access_size. 394 */ 395 if (memory_region_is_ram(mr)) { 396 diff = int128_sub(section->size, int128_make64(addr)); 397 *plen = int128_get64(int128_min(diff, int128_make64(*plen))); 398 } 399 return section; 400 } 401 402 /** 403 * address_space_translate_iommu - translate an address through an IOMMU 404 * memory region and then through the target address space. 405 * 406 * @iommu_mr: the IOMMU memory region that we start the translation from 407 * @addr: the address to be translated through the MMU 408 * @xlat: the translated address offset within the destination memory region. 409 * It cannot be %NULL. 410 * @plen_out: valid read/write length of the translated address. It 411 * cannot be %NULL. 412 * @page_mask_out: page mask for the translated address. This 413 * should only be meaningful for IOMMU translated 414 * addresses, since there may be huge pages that this bit 415 * would tell. It can be %NULL if we don't care about it. 416 * @is_write: whether the translation operation is for write 417 * @is_mmio: whether this can be MMIO, set true if it can 418 * @target_as: the address space targeted by the IOMMU 419 * @attrs: transaction attributes 420 * 421 * This function is called from RCU critical section. It is the common 422 * part of flatview_do_translate and address_space_translate_cached. 423 */ 424 static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr, 425 hwaddr *xlat, 426 hwaddr *plen_out, 427 hwaddr *page_mask_out, 428 bool is_write, 429 bool is_mmio, 430 AddressSpace **target_as, 431 MemTxAttrs attrs) 432 { 433 MemoryRegionSection *section; 434 hwaddr page_mask = (hwaddr)-1; 435 436 do { 437 hwaddr addr = *xlat; 438 IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 439 int iommu_idx = 0; 440 IOMMUTLBEntry iotlb; 441 442 if (imrc->attrs_to_index) { 443 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 444 } 445 446 iotlb = imrc->translate(iommu_mr, addr, is_write ? 447 IOMMU_WO : IOMMU_RO, iommu_idx); 448 449 if (!(iotlb.perm & (1 << is_write))) { 450 goto unassigned; 451 } 452 453 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 454 | (addr & iotlb.addr_mask)); 455 page_mask &= iotlb.addr_mask; 456 *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); 457 *target_as = iotlb.target_as; 458 459 section = address_space_translate_internal( 460 address_space_to_dispatch(iotlb.target_as), addr, xlat, 461 plen_out, is_mmio); 462 463 iommu_mr = memory_region_get_iommu(section->mr); 464 } while (unlikely(iommu_mr)); 465 466 if (page_mask_out) { 467 *page_mask_out = page_mask; 468 } 469 return *section; 470 471 unassigned: 472 return (MemoryRegionSection) { .mr = &io_mem_unassigned }; 473 } 474 475 /** 476 * flatview_do_translate - translate an address in FlatView 477 * 478 * @fv: the flat view that we want to translate on 479 * @addr: the address to be translated in above address space 480 * @xlat: the translated address offset within memory region. It 481 * cannot be @NULL. 482 * @plen_out: valid read/write length of the translated address. It 483 * can be @NULL when we don't care about it. 484 * @page_mask_out: page mask for the translated address. This 485 * should only be meaningful for IOMMU translated 486 * addresses, since there may be huge pages that this bit 487 * would tell. It can be @NULL if we don't care about it. 488 * @is_write: whether the translation operation is for write 489 * @is_mmio: whether this can be MMIO, set true if it can 490 * @target_as: the address space targeted by the IOMMU 491 * @attrs: memory transaction attributes 492 * 493 * This function is called from RCU critical section 494 */ 495 static MemoryRegionSection flatview_do_translate(FlatView *fv, 496 hwaddr addr, 497 hwaddr *xlat, 498 hwaddr *plen_out, 499 hwaddr *page_mask_out, 500 bool is_write, 501 bool is_mmio, 502 AddressSpace **target_as, 503 MemTxAttrs attrs) 504 { 505 MemoryRegionSection *section; 506 IOMMUMemoryRegion *iommu_mr; 507 hwaddr plen = (hwaddr)(-1); 508 509 if (!plen_out) { 510 plen_out = &plen; 511 } 512 513 section = address_space_translate_internal( 514 flatview_to_dispatch(fv), addr, xlat, 515 plen_out, is_mmio); 516 517 iommu_mr = memory_region_get_iommu(section->mr); 518 if (unlikely(iommu_mr)) { 519 return address_space_translate_iommu(iommu_mr, xlat, 520 plen_out, page_mask_out, 521 is_write, is_mmio, 522 target_as, attrs); 523 } 524 if (page_mask_out) { 525 /* Not behind an IOMMU, use default page size. */ 526 *page_mask_out = ~TARGET_PAGE_MASK; 527 } 528 529 return *section; 530 } 531 532 /* Called from RCU critical section */ 533 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 534 bool is_write, MemTxAttrs attrs) 535 { 536 MemoryRegionSection section; 537 hwaddr xlat, page_mask; 538 539 /* 540 * This can never be MMIO, and we don't really care about plen, 541 * but page mask. 542 */ 543 section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat, 544 NULL, &page_mask, is_write, false, &as, 545 attrs); 546 547 /* Illegal translation */ 548 if (section.mr == &io_mem_unassigned) { 549 goto iotlb_fail; 550 } 551 552 /* Convert memory region offset into address space offset */ 553 xlat += section.offset_within_address_space - 554 section.offset_within_region; 555 556 return (IOMMUTLBEntry) { 557 .target_as = as, 558 .iova = addr & ~page_mask, 559 .translated_addr = xlat & ~page_mask, 560 .addr_mask = page_mask, 561 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */ 562 .perm = IOMMU_RW, 563 }; 564 565 iotlb_fail: 566 return (IOMMUTLBEntry) {0}; 567 } 568 569 /* Called from RCU critical section */ 570 MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, 571 hwaddr *plen, bool is_write, 572 MemTxAttrs attrs) 573 { 574 MemoryRegion *mr; 575 MemoryRegionSection section; 576 AddressSpace *as = NULL; 577 578 /* This can be MMIO, so setup MMIO bit. */ 579 section = flatview_do_translate(fv, addr, xlat, plen, NULL, 580 is_write, true, &as, attrs); 581 mr = section.mr; 582 583 if (xen_enabled() && memory_access_is_direct(mr, is_write, attrs)) { 584 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; 585 *plen = MIN(page, *plen); 586 } 587 588 return mr; 589 } 590 591 #ifdef CONFIG_TCG 592 593 typedef struct TCGIOMMUNotifier { 594 IOMMUNotifier n; 595 MemoryRegion *mr; 596 CPUState *cpu; 597 int iommu_idx; 598 bool active; 599 } TCGIOMMUNotifier; 600 601 static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 602 { 603 TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n); 604 605 if (!notifier->active) { 606 return; 607 } 608 tlb_flush(notifier->cpu); 609 notifier->active = false; 610 /* We leave the notifier struct on the list to avoid reallocating it later. 611 * Generally the number of IOMMUs a CPU deals with will be small. 612 * In any case we can't unregister the iommu notifier from a notify 613 * callback. 614 */ 615 } 616 617 static void tcg_register_iommu_notifier(CPUState *cpu, 618 IOMMUMemoryRegion *iommu_mr, 619 int iommu_idx) 620 { 621 /* Make sure this CPU has an IOMMU notifier registered for this 622 * IOMMU/IOMMU index combination, so that we can flush its TLB 623 * when the IOMMU tells us the mappings we've cached have changed. 624 */ 625 MemoryRegion *mr = MEMORY_REGION(iommu_mr); 626 TCGIOMMUNotifier *notifier = NULL; 627 int i; 628 629 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 630 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 631 if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) { 632 break; 633 } 634 } 635 if (i == cpu->iommu_notifiers->len) { 636 /* Not found, add a new entry at the end of the array */ 637 cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1); 638 notifier = g_new0(TCGIOMMUNotifier, 1); 639 g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier; 640 641 notifier->mr = mr; 642 notifier->iommu_idx = iommu_idx; 643 notifier->cpu = cpu; 644 /* Rather than trying to register interest in the specific part 645 * of the iommu's address space that we've accessed and then 646 * expand it later as subsequent accesses touch more of it, we 647 * just register interest in the whole thing, on the assumption 648 * that iommu reconfiguration will be rare. 649 */ 650 iommu_notifier_init(¬ifier->n, 651 tcg_iommu_unmap_notify, 652 IOMMU_NOTIFIER_UNMAP, 653 0, 654 HWADDR_MAX, 655 iommu_idx); 656 memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, 657 &error_fatal); 658 } 659 660 if (!notifier->active) { 661 notifier->active = true; 662 } 663 } 664 665 void tcg_iommu_free_notifier_list(CPUState *cpu) 666 { 667 /* Destroy the CPU's notifier list */ 668 int i; 669 TCGIOMMUNotifier *notifier; 670 671 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 672 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 673 memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n); 674 g_free(notifier); 675 } 676 g_array_free(cpu->iommu_notifiers, true); 677 } 678 679 void tcg_iommu_init_notifier_list(CPUState *cpu) 680 { 681 cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *)); 682 } 683 684 /* Called from RCU critical section */ 685 MemoryRegionSection * 686 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, 687 hwaddr *xlat, hwaddr *plen, 688 MemTxAttrs attrs, int *prot) 689 { 690 MemoryRegionSection *section; 691 IOMMUMemoryRegion *iommu_mr; 692 IOMMUMemoryRegionClass *imrc; 693 IOMMUTLBEntry iotlb; 694 int iommu_idx; 695 hwaddr addr = orig_addr; 696 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; 697 698 for (;;) { 699 section = address_space_translate_internal(d, addr, &addr, plen, false); 700 701 iommu_mr = memory_region_get_iommu(section->mr); 702 if (!iommu_mr) { 703 break; 704 } 705 706 imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 707 708 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 709 tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx); 710 /* We need all the permissions, so pass IOMMU_NONE so the IOMMU 711 * doesn't short-cut its translation table walk. 712 */ 713 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); 714 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 715 | (addr & iotlb.addr_mask)); 716 /* Update the caller's prot bits to remove permissions the IOMMU 717 * is giving us a failure response for. If we get down to no 718 * permissions left at all we can give up now. 719 */ 720 if (!(iotlb.perm & IOMMU_RO)) { 721 *prot &= ~(PAGE_READ | PAGE_EXEC); 722 } 723 if (!(iotlb.perm & IOMMU_WO)) { 724 *prot &= ~PAGE_WRITE; 725 } 726 727 if (!*prot) { 728 goto translate_fail; 729 } 730 731 d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as)); 732 } 733 734 assert(!memory_region_is_iommu(section->mr)); 735 *xlat = addr; 736 return section; 737 738 translate_fail: 739 /* 740 * We should be given a page-aligned address -- certainly 741 * tlb_set_page_with_attrs() does so. The page offset of xlat 742 * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0. 743 * The page portion of xlat will be logged by memory_region_access_valid() 744 * when this memory access is rejected, so use the original untranslated 745 * physical address. 746 */ 747 assert((orig_addr & ~TARGET_PAGE_MASK) == 0); 748 *xlat = orig_addr; 749 return &d->map.sections[PHYS_SECTION_UNASSIGNED]; 750 } 751 752 MemoryRegionSection *iotlb_to_section(CPUState *cpu, 753 hwaddr index, MemTxAttrs attrs) 754 { 755 int asidx = cpu_asidx_from_attrs(cpu, attrs); 756 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; 757 AddressSpaceDispatch *d = cpuas->memory_dispatch; 758 int section_index = index & ~TARGET_PAGE_MASK; 759 MemoryRegionSection *ret; 760 761 assert(section_index < d->map.sections_nb); 762 ret = d->map.sections + section_index; 763 assert(ret->mr); 764 assert(ret->mr->ops); 765 766 return ret; 767 } 768 769 /* Called from RCU critical section */ 770 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 771 MemoryRegionSection *section) 772 { 773 AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); 774 return section - d->map.sections; 775 } 776 777 #endif /* CONFIG_TCG */ 778 779 void cpu_address_space_init(CPUState *cpu, int asidx, 780 const char *prefix, MemoryRegion *mr) 781 { 782 CPUAddressSpace *newas; 783 AddressSpace *as = g_new0(AddressSpace, 1); 784 char *as_name; 785 786 assert(mr); 787 as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index); 788 address_space_init(as, mr, as_name); 789 g_free(as_name); 790 791 /* Target code should have set num_ases before calling us */ 792 assert(asidx < cpu->num_ases); 793 794 if (asidx == 0) { 795 /* address space 0 gets the convenience alias */ 796 cpu->as = as; 797 } 798 799 /* KVM cannot currently support multiple address spaces. */ 800 assert(asidx == 0 || !kvm_enabled()); 801 802 if (!cpu->cpu_ases) { 803 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); 804 cpu->cpu_ases_count = cpu->num_ases; 805 } 806 807 newas = &cpu->cpu_ases[asidx]; 808 newas->cpu = cpu; 809 newas->as = as; 810 if (tcg_enabled()) { 811 newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; 812 newas->tcg_as_listener.commit = tcg_commit; 813 newas->tcg_as_listener.name = "tcg"; 814 memory_listener_register(&newas->tcg_as_listener, as); 815 } 816 } 817 818 void cpu_address_space_destroy(CPUState *cpu, int asidx) 819 { 820 CPUAddressSpace *cpuas; 821 822 assert(cpu->cpu_ases); 823 assert(asidx >= 0 && asidx < cpu->num_ases); 824 /* KVM cannot currently support multiple address spaces. */ 825 assert(asidx == 0 || !kvm_enabled()); 826 827 cpuas = &cpu->cpu_ases[asidx]; 828 if (tcg_enabled()) { 829 memory_listener_unregister(&cpuas->tcg_as_listener); 830 } 831 832 address_space_destroy(cpuas->as); 833 g_free_rcu(cpuas->as, rcu); 834 835 if (asidx == 0) { 836 /* reset the convenience alias for address space 0 */ 837 cpu->as = NULL; 838 } 839 840 if (--cpu->cpu_ases_count == 0) { 841 g_free(cpu->cpu_ases); 842 cpu->cpu_ases = NULL; 843 } 844 } 845 846 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) 847 { 848 /* Return the AddressSpace corresponding to the specified index */ 849 return cpu->cpu_ases[asidx].as; 850 } 851 852 /* Called from RCU critical section */ 853 static RAMBlock *qemu_get_ram_block(ram_addr_t addr) 854 { 855 RAMBlock *block; 856 857 block = qatomic_rcu_read(&ram_list.mru_block); 858 if (block && addr - block->offset < block->max_length) { 859 return block; 860 } 861 RAMBLOCK_FOREACH(block) { 862 if (addr - block->offset < block->max_length) { 863 goto found; 864 } 865 } 866 867 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 868 abort(); 869 870 found: 871 /* It is safe to write mru_block outside the BQL. This 872 * is what happens: 873 * 874 * mru_block = xxx 875 * rcu_read_unlock() 876 * xxx removed from list 877 * rcu_read_lock() 878 * read mru_block 879 * mru_block = NULL; 880 * call_rcu(reclaim_ramblock, xxx); 881 * rcu_read_unlock() 882 * 883 * qatomic_rcu_set is not needed here. The block was already published 884 * when it was placed into the list. Here we're just making an extra 885 * copy of the pointer. 886 */ 887 ram_list.mru_block = block; 888 return block; 889 } 890 891 void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) 892 { 893 CPUState *cpu; 894 ram_addr_t start1; 895 RAMBlock *block; 896 ram_addr_t end; 897 898 assert(tcg_enabled()); 899 end = TARGET_PAGE_ALIGN(start + length); 900 start &= TARGET_PAGE_MASK; 901 902 RCU_READ_LOCK_GUARD(); 903 block = qemu_get_ram_block(start); 904 assert(block == qemu_get_ram_block(end - 1)); 905 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); 906 CPU_FOREACH(cpu) { 907 tlb_reset_dirty(cpu, start1, length); 908 } 909 } 910 911 /* Note: start and end must be within the same ram block. */ 912 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, 913 ram_addr_t length, 914 unsigned client) 915 { 916 DirtyMemoryBlocks *blocks; 917 unsigned long end, page, start_page; 918 bool dirty = false; 919 RAMBlock *ramblock; 920 uint64_t mr_offset, mr_size; 921 922 if (length == 0) { 923 return false; 924 } 925 926 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 927 start_page = start >> TARGET_PAGE_BITS; 928 page = start_page; 929 930 WITH_RCU_READ_LOCK_GUARD() { 931 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 932 ramblock = qemu_get_ram_block(start); 933 /* Range sanity check on the ramblock */ 934 assert(start >= ramblock->offset && 935 start + length <= ramblock->offset + ramblock->used_length); 936 937 while (page < end) { 938 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 939 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; 940 unsigned long num = MIN(end - page, 941 DIRTY_MEMORY_BLOCK_SIZE - offset); 942 943 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], 944 offset, num); 945 page += num; 946 } 947 948 mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; 949 mr_size = (end - start_page) << TARGET_PAGE_BITS; 950 memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); 951 } 952 953 if (dirty) { 954 cpu_physical_memory_dirty_bits_cleared(start, length); 955 } 956 957 return dirty; 958 } 959 960 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty 961 (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) 962 { 963 DirtyMemoryBlocks *blocks; 964 ram_addr_t start, first, last; 965 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL); 966 DirtyBitmapSnapshot *snap; 967 unsigned long page, end, dest; 968 969 start = memory_region_get_ram_addr(mr); 970 /* We know we're only called for RAM MemoryRegions */ 971 assert(start != RAM_ADDR_INVALID); 972 start += offset; 973 974 first = QEMU_ALIGN_DOWN(start, align); 975 last = QEMU_ALIGN_UP(start + length, align); 976 977 snap = g_malloc0(sizeof(*snap) + 978 ((last - first) >> (TARGET_PAGE_BITS + 3))); 979 snap->start = first; 980 snap->end = last; 981 982 page = first >> TARGET_PAGE_BITS; 983 end = last >> TARGET_PAGE_BITS; 984 dest = 0; 985 986 WITH_RCU_READ_LOCK_GUARD() { 987 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 988 989 while (page < end) { 990 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 991 unsigned long ofs = page % DIRTY_MEMORY_BLOCK_SIZE; 992 unsigned long num = MIN(end - page, 993 DIRTY_MEMORY_BLOCK_SIZE - ofs); 994 995 assert(QEMU_IS_ALIGNED(ofs, (1 << BITS_PER_LEVEL))); 996 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL))); 997 ofs >>= BITS_PER_LEVEL; 998 999 bitmap_copy_and_clear_atomic(snap->dirty + dest, 1000 blocks->blocks[idx] + ofs, 1001 num); 1002 page += num; 1003 dest += num >> BITS_PER_LEVEL; 1004 } 1005 } 1006 1007 cpu_physical_memory_dirty_bits_cleared(start, length); 1008 1009 memory_region_clear_dirty_bitmap(mr, offset, length); 1010 1011 return snap; 1012 } 1013 1014 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, 1015 ram_addr_t start, 1016 ram_addr_t length) 1017 { 1018 unsigned long page, end; 1019 1020 assert(start >= snap->start); 1021 assert(start + length <= snap->end); 1022 1023 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; 1024 page = (start - snap->start) >> TARGET_PAGE_BITS; 1025 1026 while (page < end) { 1027 if (test_bit(page, snap->dirty)) { 1028 return true; 1029 } 1030 page++; 1031 } 1032 return false; 1033 } 1034 1035 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 1036 uint16_t section); 1037 static subpage_t *subpage_init(FlatView *fv, hwaddr base); 1038 1039 static uint16_t phys_section_add(PhysPageMap *map, 1040 MemoryRegionSection *section) 1041 { 1042 /* The physical section number is ORed with a page-aligned 1043 * pointer to produce the iotlb entries. Thus it should 1044 * never overflow into the page-aligned value. 1045 */ 1046 assert(map->sections_nb < TARGET_PAGE_SIZE); 1047 1048 if (map->sections_nb == map->sections_nb_alloc) { 1049 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); 1050 map->sections = g_renew(MemoryRegionSection, map->sections, 1051 map->sections_nb_alloc); 1052 } 1053 map->sections[map->sections_nb] = *section; 1054 memory_region_ref(section->mr); 1055 return map->sections_nb++; 1056 } 1057 1058 static void phys_section_destroy(MemoryRegion *mr) 1059 { 1060 bool have_sub_page = mr->subpage; 1061 1062 memory_region_unref(mr); 1063 1064 if (have_sub_page) { 1065 subpage_t *subpage = container_of(mr, subpage_t, iomem); 1066 object_unref(OBJECT(&subpage->iomem)); 1067 g_free(subpage); 1068 } 1069 } 1070 1071 static void phys_sections_free(PhysPageMap *map) 1072 { 1073 while (map->sections_nb > 0) { 1074 MemoryRegionSection *section = &map->sections[--map->sections_nb]; 1075 phys_section_destroy(section->mr); 1076 } 1077 g_free(map->sections); 1078 g_free(map->nodes); 1079 } 1080 1081 static void register_subpage(FlatView *fv, MemoryRegionSection *section) 1082 { 1083 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1084 subpage_t *subpage; 1085 hwaddr base = section->offset_within_address_space 1086 & TARGET_PAGE_MASK; 1087 MemoryRegionSection *existing = phys_page_find(d, base); 1088 MemoryRegionSection subsection = { 1089 .offset_within_address_space = base, 1090 .size = int128_make64(TARGET_PAGE_SIZE), 1091 }; 1092 hwaddr start, end; 1093 1094 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); 1095 1096 if (!(existing->mr->subpage)) { 1097 subpage = subpage_init(fv, base); 1098 subsection.fv = fv; 1099 subsection.mr = &subpage->iomem; 1100 phys_page_set(d, base >> TARGET_PAGE_BITS, 1, 1101 phys_section_add(&d->map, &subsection)); 1102 } else { 1103 subpage = container_of(existing->mr, subpage_t, iomem); 1104 } 1105 start = section->offset_within_address_space & ~TARGET_PAGE_MASK; 1106 end = start + int128_get64(section->size) - 1; 1107 subpage_register(subpage, start, end, 1108 phys_section_add(&d->map, section)); 1109 } 1110 1111 1112 static void register_multipage(FlatView *fv, 1113 MemoryRegionSection *section) 1114 { 1115 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1116 hwaddr start_addr = section->offset_within_address_space; 1117 uint16_t section_index = phys_section_add(&d->map, section); 1118 uint64_t num_pages = int128_get64(int128_rshift(section->size, 1119 TARGET_PAGE_BITS)); 1120 1121 assert(num_pages); 1122 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); 1123 } 1124 1125 /* 1126 * The range in *section* may look like this: 1127 * 1128 * |s|PPPPPPP|s| 1129 * 1130 * where s stands for subpage and P for page. 1131 */ 1132 void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section) 1133 { 1134 MemoryRegionSection remain = *section; 1135 Int128 page_size = int128_make64(TARGET_PAGE_SIZE); 1136 1137 /* register first subpage */ 1138 if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { 1139 uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space) 1140 - remain.offset_within_address_space; 1141 1142 MemoryRegionSection now = remain; 1143 now.size = int128_min(int128_make64(left), now.size); 1144 register_subpage(fv, &now); 1145 if (int128_eq(remain.size, now.size)) { 1146 return; 1147 } 1148 remain.size = int128_sub(remain.size, now.size); 1149 remain.offset_within_address_space += int128_get64(now.size); 1150 remain.offset_within_region += int128_get64(now.size); 1151 } 1152 1153 /* register whole pages */ 1154 if (int128_ge(remain.size, page_size)) { 1155 MemoryRegionSection now = remain; 1156 now.size = int128_and(now.size, int128_neg(page_size)); 1157 register_multipage(fv, &now); 1158 if (int128_eq(remain.size, now.size)) { 1159 return; 1160 } 1161 remain.size = int128_sub(remain.size, now.size); 1162 remain.offset_within_address_space += int128_get64(now.size); 1163 remain.offset_within_region += int128_get64(now.size); 1164 } 1165 1166 /* register last subpage */ 1167 register_subpage(fv, &remain); 1168 } 1169 1170 void qemu_flush_coalesced_mmio_buffer(void) 1171 { 1172 if (kvm_enabled()) 1173 kvm_flush_coalesced_mmio_buffer(); 1174 } 1175 1176 void qemu_mutex_lock_ramlist(void) 1177 { 1178 qemu_mutex_lock(&ram_list.mutex); 1179 } 1180 1181 void qemu_mutex_unlock_ramlist(void) 1182 { 1183 qemu_mutex_unlock(&ram_list.mutex); 1184 } 1185 1186 GString *ram_block_format(void) 1187 { 1188 RAMBlock *block; 1189 char *psize; 1190 GString *buf = g_string_new(""); 1191 1192 RCU_READ_LOCK_GUARD(); 1193 g_string_append_printf(buf, "%24s %8s %18s %18s %18s %18s %3s\n", 1194 "Block Name", "PSize", "Offset", "Used", "Total", 1195 "HVA", "RO"); 1196 1197 RAMBLOCK_FOREACH(block) { 1198 psize = size_to_str(block->page_size); 1199 g_string_append_printf(buf, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 1200 " 0x%016" PRIx64 " 0x%016" PRIx64 " %3s\n", 1201 block->idstr, psize, 1202 (uint64_t)block->offset, 1203 (uint64_t)block->used_length, 1204 (uint64_t)block->max_length, 1205 (uint64_t)(uintptr_t)block->host, 1206 block->mr->readonly ? "ro" : "rw"); 1207 1208 g_free(psize); 1209 } 1210 1211 return buf; 1212 } 1213 1214 static int find_min_backend_pagesize(Object *obj, void *opaque) 1215 { 1216 long *hpsize_min = opaque; 1217 1218 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1219 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1220 long hpsize = host_memory_backend_pagesize(backend); 1221 1222 if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) { 1223 *hpsize_min = hpsize; 1224 } 1225 } 1226 1227 return 0; 1228 } 1229 1230 static int find_max_backend_pagesize(Object *obj, void *opaque) 1231 { 1232 long *hpsize_max = opaque; 1233 1234 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1235 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1236 long hpsize = host_memory_backend_pagesize(backend); 1237 1238 if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) { 1239 *hpsize_max = hpsize; 1240 } 1241 } 1242 1243 return 0; 1244 } 1245 1246 /* 1247 * TODO: We assume right now that all mapped host memory backends are 1248 * used as RAM, however some might be used for different purposes. 1249 */ 1250 long qemu_minrampagesize(void) 1251 { 1252 long hpsize = LONG_MAX; 1253 Object *memdev_root = object_resolve_path("/objects", NULL); 1254 1255 object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); 1256 return hpsize; 1257 } 1258 1259 long qemu_maxrampagesize(void) 1260 { 1261 long pagesize = 0; 1262 Object *memdev_root = object_resolve_path("/objects", NULL); 1263 1264 object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize); 1265 return pagesize; 1266 } 1267 1268 #ifdef CONFIG_POSIX 1269 static int64_t get_file_size(int fd) 1270 { 1271 int64_t size; 1272 #if defined(__linux__) 1273 struct stat st; 1274 1275 if (fstat(fd, &st) < 0) { 1276 return -errno; 1277 } 1278 1279 /* Special handling for devdax character devices */ 1280 if (S_ISCHR(st.st_mode)) { 1281 g_autofree char *subsystem_path = NULL; 1282 g_autofree char *subsystem = NULL; 1283 1284 subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem", 1285 major(st.st_rdev), minor(st.st_rdev)); 1286 subsystem = g_file_read_link(subsystem_path, NULL); 1287 1288 if (subsystem && g_str_has_suffix(subsystem, "/dax")) { 1289 g_autofree char *size_path = NULL; 1290 g_autofree char *size_str = NULL; 1291 1292 size_path = g_strdup_printf("/sys/dev/char/%d:%d/size", 1293 major(st.st_rdev), minor(st.st_rdev)); 1294 1295 if (g_file_get_contents(size_path, &size_str, NULL, NULL)) { 1296 return g_ascii_strtoll(size_str, NULL, 0); 1297 } 1298 } 1299 } 1300 #endif /* defined(__linux__) */ 1301 1302 /* st.st_size may be zero for special files yet lseek(2) works */ 1303 size = lseek(fd, 0, SEEK_END); 1304 if (size < 0) { 1305 return -errno; 1306 } 1307 return size; 1308 } 1309 1310 static int64_t get_file_align(int fd) 1311 { 1312 int64_t align = -1; 1313 #if defined(__linux__) && defined(CONFIG_LIBDAXCTL) 1314 struct stat st; 1315 1316 if (fstat(fd, &st) < 0) { 1317 return -errno; 1318 } 1319 1320 /* Special handling for devdax character devices */ 1321 if (S_ISCHR(st.st_mode)) { 1322 g_autofree char *path = NULL; 1323 g_autofree char *rpath = NULL; 1324 struct daxctl_ctx *ctx; 1325 struct daxctl_region *region; 1326 int rc = 0; 1327 1328 path = g_strdup_printf("/sys/dev/char/%d:%d", 1329 major(st.st_rdev), minor(st.st_rdev)); 1330 rpath = realpath(path, NULL); 1331 if (!rpath) { 1332 return -errno; 1333 } 1334 1335 rc = daxctl_new(&ctx); 1336 if (rc) { 1337 return -1; 1338 } 1339 1340 daxctl_region_foreach(ctx, region) { 1341 if (strstr(rpath, daxctl_region_get_path(region))) { 1342 align = daxctl_region_get_align(region); 1343 break; 1344 } 1345 } 1346 daxctl_unref(ctx); 1347 } 1348 #endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */ 1349 1350 return align; 1351 } 1352 1353 static int file_ram_open(const char *path, 1354 const char *region_name, 1355 bool readonly, 1356 bool *created) 1357 { 1358 char *filename; 1359 char *sanitized_name; 1360 char *c; 1361 int fd = -1; 1362 1363 *created = false; 1364 for (;;) { 1365 fd = open(path, readonly ? O_RDONLY : O_RDWR); 1366 if (fd >= 0) { 1367 /* 1368 * open(O_RDONLY) won't fail with EISDIR. Check manually if we 1369 * opened a directory and fail similarly to how we fail ENOENT 1370 * in readonly mode. Note that mkstemp() would imply O_RDWR. 1371 */ 1372 if (readonly) { 1373 struct stat file_stat; 1374 1375 if (fstat(fd, &file_stat)) { 1376 close(fd); 1377 if (errno == EINTR) { 1378 continue; 1379 } 1380 return -errno; 1381 } else if (S_ISDIR(file_stat.st_mode)) { 1382 close(fd); 1383 return -EISDIR; 1384 } 1385 } 1386 /* @path names an existing file, use it */ 1387 break; 1388 } 1389 if (errno == ENOENT) { 1390 if (readonly) { 1391 /* Refuse to create new, readonly files. */ 1392 return -ENOENT; 1393 } 1394 /* @path names a file that doesn't exist, create it */ 1395 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); 1396 if (fd >= 0) { 1397 *created = true; 1398 break; 1399 } 1400 } else if (errno == EISDIR) { 1401 /* @path names a directory, create a file there */ 1402 /* Make name safe to use with mkstemp by replacing '/' with '_'. */ 1403 sanitized_name = g_strdup(region_name); 1404 for (c = sanitized_name; *c != '\0'; c++) { 1405 if (*c == '/') { 1406 *c = '_'; 1407 } 1408 } 1409 1410 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, 1411 sanitized_name); 1412 g_free(sanitized_name); 1413 1414 fd = mkstemp(filename); 1415 if (fd >= 0) { 1416 unlink(filename); 1417 g_free(filename); 1418 break; 1419 } 1420 g_free(filename); 1421 } 1422 if (errno != EEXIST && errno != EINTR) { 1423 return -errno; 1424 } 1425 /* 1426 * Try again on EINTR and EEXIST. The latter happens when 1427 * something else creates the file between our two open(). 1428 */ 1429 } 1430 1431 return fd; 1432 } 1433 1434 static void *file_ram_alloc(RAMBlock *block, 1435 ram_addr_t memory, 1436 int fd, 1437 bool truncate, 1438 off_t offset, 1439 Error **errp) 1440 { 1441 uint32_t qemu_map_flags; 1442 void *area; 1443 1444 block->page_size = qemu_fd_getpagesize(fd); 1445 if (block->mr->align % block->page_size) { 1446 error_setg(errp, "alignment 0x%" PRIx64 1447 " must be multiples of page size 0x%zx", 1448 block->mr->align, block->page_size); 1449 return NULL; 1450 } else if (block->mr->align && !is_power_of_2(block->mr->align)) { 1451 error_setg(errp, "alignment 0x%" PRIx64 1452 " must be a power of two", block->mr->align); 1453 return NULL; 1454 } else if (offset % block->page_size) { 1455 error_setg(errp, "offset 0x%" PRIx64 1456 " must be multiples of page size 0x%zx", 1457 offset, block->page_size); 1458 return NULL; 1459 } 1460 block->mr->align = MAX(block->page_size, block->mr->align); 1461 #if defined(__s390x__) 1462 if (kvm_enabled()) { 1463 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); 1464 } 1465 #endif 1466 1467 if (memory < block->page_size) { 1468 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " 1469 "or larger than page size 0x%zx", 1470 memory, block->page_size); 1471 return NULL; 1472 } 1473 1474 memory = ROUND_UP(memory, block->page_size); 1475 1476 /* 1477 * ftruncate is not supported by hugetlbfs in older 1478 * hosts, so don't bother bailing out on errors. 1479 * If anything goes wrong with it under other filesystems, 1480 * mmap will fail. 1481 * 1482 * Do not truncate the non-empty backend file to avoid corrupting 1483 * the existing data in the file. Disabling shrinking is not 1484 * enough. For example, the current vNVDIMM implementation stores 1485 * the guest NVDIMM labels at the end of the backend file. If the 1486 * backend file is later extended, QEMU will not be able to find 1487 * those labels. Therefore, extending the non-empty backend file 1488 * is disabled as well. 1489 */ 1490 if (truncate && ftruncate(fd, offset + memory)) { 1491 perror("ftruncate"); 1492 } 1493 1494 qemu_map_flags = (block->flags & RAM_READONLY) ? QEMU_MAP_READONLY : 0; 1495 qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0; 1496 qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0; 1497 qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0; 1498 area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset); 1499 if (area == MAP_FAILED) { 1500 error_setg_errno(errp, errno, 1501 "unable to map backing store for guest RAM"); 1502 return NULL; 1503 } 1504 1505 block->fd = fd; 1506 block->fd_offset = offset; 1507 return area; 1508 } 1509 #endif 1510 1511 /* Allocate space within the ram_addr_t space that governs the 1512 * dirty bitmaps. 1513 * Called with the ramlist lock held. 1514 */ 1515 static ram_addr_t find_ram_offset(ram_addr_t size) 1516 { 1517 RAMBlock *block, *next_block; 1518 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; 1519 1520 assert(size != 0); /* it would hand out same offset multiple times */ 1521 1522 if (QLIST_EMPTY_RCU(&ram_list.blocks)) { 1523 return 0; 1524 } 1525 1526 RAMBLOCK_FOREACH(block) { 1527 ram_addr_t candidate, next = RAM_ADDR_MAX; 1528 1529 /* Align blocks to start on a 'long' in the bitmap 1530 * which makes the bitmap sync'ing take the fast path. 1531 */ 1532 candidate = block->offset + block->max_length; 1533 candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS); 1534 1535 /* Search for the closest following block 1536 * and find the gap. 1537 */ 1538 RAMBLOCK_FOREACH(next_block) { 1539 if (next_block->offset >= candidate) { 1540 next = MIN(next, next_block->offset); 1541 } 1542 } 1543 1544 /* If it fits remember our place and remember the size 1545 * of gap, but keep going so that we might find a smaller 1546 * gap to fill so avoiding fragmentation. 1547 */ 1548 if (next - candidate >= size && next - candidate < mingap) { 1549 offset = candidate; 1550 mingap = next - candidate; 1551 } 1552 1553 trace_find_ram_offset_loop(size, candidate, offset, next, mingap); 1554 } 1555 1556 if (offset == RAM_ADDR_MAX) { 1557 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", 1558 (uint64_t)size); 1559 abort(); 1560 } 1561 1562 trace_find_ram_offset(size, offset); 1563 1564 return offset; 1565 } 1566 1567 static void qemu_ram_setup_dump(void *addr, ram_addr_t size) 1568 { 1569 int ret; 1570 1571 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ 1572 if (!machine_dump_guest_core(current_machine)) { 1573 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); 1574 if (ret) { 1575 perror("qemu_madvise"); 1576 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " 1577 "but dump-guest-core=off specified\n"); 1578 } 1579 } 1580 } 1581 1582 const char *qemu_ram_get_idstr(RAMBlock *rb) 1583 { 1584 return rb->idstr; 1585 } 1586 1587 void *qemu_ram_get_host_addr(RAMBlock *rb) 1588 { 1589 return rb->host; 1590 } 1591 1592 ram_addr_t qemu_ram_get_offset(RAMBlock *rb) 1593 { 1594 return rb->offset; 1595 } 1596 1597 ram_addr_t qemu_ram_get_used_length(RAMBlock *rb) 1598 { 1599 return rb->used_length; 1600 } 1601 1602 ram_addr_t qemu_ram_get_max_length(RAMBlock *rb) 1603 { 1604 return rb->max_length; 1605 } 1606 1607 bool qemu_ram_is_shared(RAMBlock *rb) 1608 { 1609 return rb->flags & RAM_SHARED; 1610 } 1611 1612 bool qemu_ram_is_noreserve(RAMBlock *rb) 1613 { 1614 return rb->flags & RAM_NORESERVE; 1615 } 1616 1617 /* Note: Only set at the start of postcopy */ 1618 bool qemu_ram_is_uf_zeroable(RAMBlock *rb) 1619 { 1620 return rb->flags & RAM_UF_ZEROPAGE; 1621 } 1622 1623 void qemu_ram_set_uf_zeroable(RAMBlock *rb) 1624 { 1625 rb->flags |= RAM_UF_ZEROPAGE; 1626 } 1627 1628 bool qemu_ram_is_migratable(RAMBlock *rb) 1629 { 1630 return rb->flags & RAM_MIGRATABLE; 1631 } 1632 1633 void qemu_ram_set_migratable(RAMBlock *rb) 1634 { 1635 rb->flags |= RAM_MIGRATABLE; 1636 } 1637 1638 void qemu_ram_unset_migratable(RAMBlock *rb) 1639 { 1640 rb->flags &= ~RAM_MIGRATABLE; 1641 } 1642 1643 bool qemu_ram_is_named_file(RAMBlock *rb) 1644 { 1645 return rb->flags & RAM_NAMED_FILE; 1646 } 1647 1648 int qemu_ram_get_fd(RAMBlock *rb) 1649 { 1650 return rb->fd; 1651 } 1652 1653 /* Called with the BQL held. */ 1654 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) 1655 { 1656 RAMBlock *block; 1657 1658 assert(new_block); 1659 assert(!new_block->idstr[0]); 1660 1661 if (dev) { 1662 char *id = qdev_get_dev_path(dev); 1663 if (id) { 1664 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 1665 g_free(id); 1666 } 1667 } 1668 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 1669 1670 RCU_READ_LOCK_GUARD(); 1671 RAMBLOCK_FOREACH(block) { 1672 if (block != new_block && 1673 !strcmp(block->idstr, new_block->idstr)) { 1674 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 1675 new_block->idstr); 1676 abort(); 1677 } 1678 } 1679 } 1680 1681 /* Called with the BQL held. */ 1682 void qemu_ram_unset_idstr(RAMBlock *block) 1683 { 1684 /* FIXME: arch_init.c assumes that this is not called throughout 1685 * migration. Ignore the problem since hot-unplug during migration 1686 * does not work anyway. 1687 */ 1688 if (block) { 1689 memset(block->idstr, 0, sizeof(block->idstr)); 1690 } 1691 } 1692 1693 static char *cpr_name(MemoryRegion *mr) 1694 { 1695 const char *mr_name = memory_region_name(mr); 1696 g_autofree char *id = mr->dev ? qdev_get_dev_path(mr->dev) : NULL; 1697 1698 if (id) { 1699 return g_strdup_printf("%s/%s", id, mr_name); 1700 } else { 1701 return g_strdup(mr_name); 1702 } 1703 } 1704 1705 size_t qemu_ram_pagesize(RAMBlock *rb) 1706 { 1707 return rb->page_size; 1708 } 1709 1710 /* Returns the largest size of page in use */ 1711 size_t qemu_ram_pagesize_largest(void) 1712 { 1713 RAMBlock *block; 1714 size_t largest = 0; 1715 1716 RAMBLOCK_FOREACH(block) { 1717 largest = MAX(largest, qemu_ram_pagesize(block)); 1718 } 1719 1720 return largest; 1721 } 1722 1723 static int memory_try_enable_merging(void *addr, size_t len) 1724 { 1725 if (!machine_mem_merge(current_machine)) { 1726 /* disabled by the user */ 1727 return 0; 1728 } 1729 1730 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); 1731 } 1732 1733 /* 1734 * Resizing RAM while migrating can result in the migration being canceled. 1735 * Care has to be taken if the guest might have already detected the memory. 1736 * 1737 * As memory core doesn't know how is memory accessed, it is up to 1738 * resize callback to update device state and/or add assertions to detect 1739 * misuse, if necessary. 1740 */ 1741 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) 1742 { 1743 const ram_addr_t oldsize = block->used_length; 1744 const ram_addr_t unaligned_size = newsize; 1745 1746 assert(block); 1747 1748 newsize = TARGET_PAGE_ALIGN(newsize); 1749 newsize = REAL_HOST_PAGE_ALIGN(newsize); 1750 1751 if (block->used_length == newsize) { 1752 /* 1753 * We don't have to resize the ram block (which only knows aligned 1754 * sizes), however, we have to notify if the unaligned size changed. 1755 */ 1756 if (unaligned_size != memory_region_size(block->mr)) { 1757 memory_region_set_size(block->mr, unaligned_size); 1758 if (block->resized) { 1759 block->resized(block->idstr, unaligned_size, block->host); 1760 } 1761 } 1762 return 0; 1763 } 1764 1765 if (!(block->flags & RAM_RESIZEABLE)) { 1766 error_setg_errno(errp, EINVAL, 1767 "Size mismatch: %s: 0x" RAM_ADDR_FMT 1768 " != 0x" RAM_ADDR_FMT, block->idstr, 1769 newsize, block->used_length); 1770 return -EINVAL; 1771 } 1772 1773 if (block->max_length < newsize) { 1774 error_setg_errno(errp, EINVAL, 1775 "Size too large: %s: 0x" RAM_ADDR_FMT 1776 " > 0x" RAM_ADDR_FMT, block->idstr, 1777 newsize, block->max_length); 1778 return -EINVAL; 1779 } 1780 1781 /* Notify before modifying the ram block and touching the bitmaps. */ 1782 if (block->host) { 1783 ram_block_notify_resize(block->host, oldsize, newsize); 1784 } 1785 1786 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); 1787 block->used_length = newsize; 1788 cpu_physical_memory_set_dirty_range(block->offset, block->used_length, 1789 DIRTY_CLIENTS_ALL); 1790 memory_region_set_size(block->mr, unaligned_size); 1791 if (block->resized) { 1792 block->resized(block->idstr, unaligned_size, block->host); 1793 } 1794 return 0; 1795 } 1796 1797 /* 1798 * Trigger sync on the given ram block for range [start, start + length] 1799 * with the backing store if one is available. 1800 * Otherwise no-op. 1801 * @Note: this is supposed to be a synchronous op. 1802 */ 1803 void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) 1804 { 1805 /* The requested range should fit in within the block range */ 1806 g_assert((start + length) <= block->used_length); 1807 1808 #ifdef CONFIG_LIBPMEM 1809 /* The lack of support for pmem should not block the sync */ 1810 if (ramblock_is_pmem(block)) { 1811 void *addr = ramblock_ptr(block, start); 1812 pmem_persist(addr, length); 1813 return; 1814 } 1815 #endif 1816 if (block->fd >= 0) { 1817 /** 1818 * Case there is no support for PMEM or the memory has not been 1819 * specified as persistent (or is not one) - use the msync. 1820 * Less optimal but still achieves the same goal 1821 */ 1822 void *addr = ramblock_ptr(block, start); 1823 if (qemu_msync(addr, length, block->fd)) { 1824 warn_report("%s: failed to sync memory range: start: " 1825 RAM_ADDR_FMT " length: " RAM_ADDR_FMT, 1826 __func__, start, length); 1827 } 1828 } 1829 } 1830 1831 /* Called with ram_list.mutex held */ 1832 static void dirty_memory_extend(ram_addr_t new_ram_size) 1833 { 1834 unsigned int old_num_blocks = ram_list.num_dirty_blocks; 1835 unsigned int new_num_blocks = DIV_ROUND_UP(new_ram_size, 1836 DIRTY_MEMORY_BLOCK_SIZE); 1837 int i; 1838 1839 /* Only need to extend if block count increased */ 1840 if (new_num_blocks <= old_num_blocks) { 1841 return; 1842 } 1843 1844 for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 1845 DirtyMemoryBlocks *old_blocks; 1846 DirtyMemoryBlocks *new_blocks; 1847 int j; 1848 1849 old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]); 1850 new_blocks = g_malloc(sizeof(*new_blocks) + 1851 sizeof(new_blocks->blocks[0]) * new_num_blocks); 1852 1853 if (old_num_blocks) { 1854 memcpy(new_blocks->blocks, old_blocks->blocks, 1855 old_num_blocks * sizeof(old_blocks->blocks[0])); 1856 } 1857 1858 for (j = old_num_blocks; j < new_num_blocks; j++) { 1859 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); 1860 } 1861 1862 qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); 1863 1864 if (old_blocks) { 1865 g_free_rcu(old_blocks, rcu); 1866 } 1867 } 1868 1869 ram_list.num_dirty_blocks = new_num_blocks; 1870 } 1871 1872 static void ram_block_add(RAMBlock *new_block, Error **errp) 1873 { 1874 const bool noreserve = qemu_ram_is_noreserve(new_block); 1875 const bool shared = qemu_ram_is_shared(new_block); 1876 RAMBlock *block; 1877 RAMBlock *last_block = NULL; 1878 bool free_on_error = false; 1879 ram_addr_t ram_size; 1880 Error *err = NULL; 1881 1882 qemu_mutex_lock_ramlist(); 1883 new_block->offset = find_ram_offset(new_block->max_length); 1884 1885 if (!new_block->host) { 1886 if (xen_enabled()) { 1887 xen_ram_alloc(new_block->offset, new_block->max_length, 1888 new_block->mr, &err); 1889 if (err) { 1890 error_propagate(errp, err); 1891 qemu_mutex_unlock_ramlist(); 1892 return; 1893 } 1894 } else { 1895 new_block->host = qemu_anon_ram_alloc(new_block->max_length, 1896 &new_block->mr->align, 1897 shared, noreserve); 1898 if (!new_block->host) { 1899 error_setg_errno(errp, errno, 1900 "cannot set up guest memory '%s'", 1901 memory_region_name(new_block->mr)); 1902 qemu_mutex_unlock_ramlist(); 1903 return; 1904 } 1905 memory_try_enable_merging(new_block->host, new_block->max_length); 1906 free_on_error = true; 1907 } 1908 } 1909 1910 if (new_block->flags & RAM_GUEST_MEMFD) { 1911 int ret; 1912 1913 if (!kvm_enabled()) { 1914 error_setg(errp, "cannot set up private guest memory for %s: KVM required", 1915 object_get_typename(OBJECT(current_machine->cgs))); 1916 goto out_free; 1917 } 1918 assert(new_block->guest_memfd < 0); 1919 1920 ret = ram_block_discard_require(true); 1921 if (ret < 0) { 1922 error_setg_errno(errp, -ret, 1923 "cannot set up private guest memory: discard currently blocked"); 1924 error_append_hint(errp, "Are you using assigned devices?\n"); 1925 goto out_free; 1926 } 1927 1928 new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length, 1929 0, errp); 1930 if (new_block->guest_memfd < 0) { 1931 qemu_mutex_unlock_ramlist(); 1932 goto out_free; 1933 } 1934 1935 /* 1936 * Add a specific guest_memfd blocker if a generic one would not be 1937 * added by ram_block_add_cpr_blocker. 1938 */ 1939 if (ram_is_cpr_compatible(new_block)) { 1940 error_setg(&new_block->cpr_blocker, 1941 "Memory region %s uses guest_memfd, " 1942 "which is not supported with CPR.", 1943 memory_region_name(new_block->mr)); 1944 migrate_add_blocker_modes(&new_block->cpr_blocker, errp, 1945 MIG_MODE_CPR_TRANSFER, -1); 1946 } 1947 } 1948 1949 ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS; 1950 dirty_memory_extend(ram_size); 1951 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, 1952 * QLIST (which has an RCU-friendly variant) does not have insertion at 1953 * tail, so save the last element in last_block. 1954 */ 1955 RAMBLOCK_FOREACH(block) { 1956 last_block = block; 1957 if (block->max_length < new_block->max_length) { 1958 break; 1959 } 1960 } 1961 if (block) { 1962 QLIST_INSERT_BEFORE_RCU(block, new_block, next); 1963 } else if (last_block) { 1964 QLIST_INSERT_AFTER_RCU(last_block, new_block, next); 1965 } else { /* list is empty */ 1966 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); 1967 } 1968 ram_list.mru_block = NULL; 1969 1970 /* Write list before version */ 1971 smp_wmb(); 1972 ram_list.version++; 1973 qemu_mutex_unlock_ramlist(); 1974 1975 cpu_physical_memory_set_dirty_range(new_block->offset, 1976 new_block->used_length, 1977 DIRTY_CLIENTS_ALL); 1978 1979 if (new_block->host) { 1980 qemu_ram_setup_dump(new_block->host, new_block->max_length); 1981 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); 1982 /* 1983 * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU 1984 * Configure it unless the machine is a qtest server, in which case 1985 * KVM is not used and it may be forked (eg for fuzzing purposes). 1986 */ 1987 if (!qtest_enabled()) { 1988 qemu_madvise(new_block->host, new_block->max_length, 1989 QEMU_MADV_DONTFORK); 1990 } 1991 ram_block_notify_add(new_block->host, new_block->used_length, 1992 new_block->max_length); 1993 } 1994 return; 1995 1996 out_free: 1997 if (free_on_error) { 1998 qemu_anon_ram_free(new_block->host, new_block->max_length); 1999 new_block->host = NULL; 2000 } 2001 } 2002 2003 #ifdef CONFIG_POSIX 2004 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, ram_addr_t max_size, 2005 qemu_ram_resize_cb resized, MemoryRegion *mr, 2006 uint32_t ram_flags, int fd, off_t offset, 2007 bool grow, 2008 Error **errp) 2009 { 2010 ERRP_GUARD(); 2011 RAMBlock *new_block; 2012 Error *local_err = NULL; 2013 int64_t file_size, file_align, share_flags; 2014 2015 share_flags = ram_flags & (RAM_PRIVATE | RAM_SHARED); 2016 assert(share_flags != (RAM_SHARED | RAM_PRIVATE)); 2017 ram_flags &= ~RAM_PRIVATE; 2018 2019 /* Just support these ram flags by now. */ 2020 assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE | 2021 RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY | 2022 RAM_READONLY_FD | RAM_GUEST_MEMFD | 2023 RAM_RESIZEABLE)) == 0); 2024 assert(max_size >= size); 2025 2026 if (xen_enabled()) { 2027 error_setg(errp, "-mem-path not supported with Xen"); 2028 return NULL; 2029 } 2030 2031 if (kvm_enabled() && !kvm_has_sync_mmu()) { 2032 error_setg(errp, 2033 "host lacks kvm mmu notifiers, -mem-path unsupported"); 2034 return NULL; 2035 } 2036 2037 size = TARGET_PAGE_ALIGN(size); 2038 size = REAL_HOST_PAGE_ALIGN(size); 2039 max_size = TARGET_PAGE_ALIGN(max_size); 2040 max_size = REAL_HOST_PAGE_ALIGN(max_size); 2041 2042 file_size = get_file_size(fd); 2043 if (file_size && file_size < offset + max_size && !grow) { 2044 error_setg(errp, "%s backing store size 0x%" PRIx64 2045 " is too small for 'size' option 0x" RAM_ADDR_FMT 2046 " plus 'offset' option 0x%" PRIx64, 2047 memory_region_name(mr), file_size, max_size, 2048 (uint64_t)offset); 2049 return NULL; 2050 } 2051 2052 file_align = get_file_align(fd); 2053 if (file_align > 0 && file_align > mr->align) { 2054 error_setg(errp, "backing store align 0x%" PRIx64 2055 " is larger than 'align' option 0x%" PRIx64, 2056 file_align, mr->align); 2057 return NULL; 2058 } 2059 2060 new_block = g_malloc0(sizeof(*new_block)); 2061 new_block->mr = mr; 2062 new_block->used_length = size; 2063 new_block->max_length = max_size; 2064 new_block->resized = resized; 2065 new_block->flags = ram_flags; 2066 new_block->guest_memfd = -1; 2067 new_block->host = file_ram_alloc(new_block, max_size, fd, 2068 file_size < offset + max_size, 2069 offset, errp); 2070 if (!new_block->host) { 2071 g_free(new_block); 2072 return NULL; 2073 } 2074 2075 ram_block_add(new_block, &local_err); 2076 if (local_err) { 2077 g_free(new_block); 2078 error_propagate(errp, local_err); 2079 return NULL; 2080 } 2081 return new_block; 2082 2083 } 2084 2085 2086 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 2087 uint32_t ram_flags, const char *mem_path, 2088 off_t offset, Error **errp) 2089 { 2090 int fd; 2091 bool created; 2092 RAMBlock *block; 2093 2094 fd = file_ram_open(mem_path, memory_region_name(mr), 2095 !!(ram_flags & RAM_READONLY_FD), &created); 2096 if (fd < 0) { 2097 error_setg_errno(errp, -fd, "can't open backing store %s for guest RAM", 2098 mem_path); 2099 if (!(ram_flags & RAM_READONLY_FD) && !(ram_flags & RAM_SHARED) && 2100 fd == -EACCES) { 2101 /* 2102 * If we can open the file R/O (note: will never create a new file) 2103 * and we are dealing with a private mapping, there are still ways 2104 * to consume such files and get RAM instead of ROM. 2105 */ 2106 fd = file_ram_open(mem_path, memory_region_name(mr), true, 2107 &created); 2108 if (fd < 0) { 2109 return NULL; 2110 } 2111 assert(!created); 2112 close(fd); 2113 error_append_hint(errp, "Consider opening the backing store" 2114 " read-only but still creating writable RAM using" 2115 " '-object memory-backend-file,readonly=on,rom=off...'" 2116 " (see \"VM templating\" documentation)\n"); 2117 } 2118 return NULL; 2119 } 2120 2121 block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd, offset, 2122 false, errp); 2123 if (!block) { 2124 if (created) { 2125 unlink(mem_path); 2126 } 2127 close(fd); 2128 return NULL; 2129 } 2130 2131 return block; 2132 } 2133 #endif 2134 2135 #ifdef CONFIG_POSIX 2136 /* 2137 * Create MAP_SHARED RAMBlocks by mmap'ing a file descriptor, so it can be 2138 * shared with another process if CPR is being used. Use memfd if available 2139 * because it has no size limits, else use POSIX shm. 2140 */ 2141 static int qemu_ram_get_shared_fd(const char *name, bool *reused, Error **errp) 2142 { 2143 int fd = cpr_find_fd(name, 0); 2144 2145 if (fd >= 0) { 2146 *reused = true; 2147 return fd; 2148 } 2149 2150 if (qemu_memfd_check(0)) { 2151 fd = qemu_memfd_create(name, 0, 0, 0, 0, errp); 2152 } else { 2153 fd = qemu_shm_alloc(0, errp); 2154 } 2155 2156 if (fd >= 0) { 2157 cpr_save_fd(name, 0, fd); 2158 } 2159 *reused = false; 2160 return fd; 2161 } 2162 #endif 2163 2164 static 2165 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, 2166 qemu_ram_resize_cb resized, 2167 void *host, uint32_t ram_flags, 2168 MemoryRegion *mr, Error **errp) 2169 { 2170 RAMBlock *new_block; 2171 Error *local_err = NULL; 2172 int align, share_flags; 2173 2174 share_flags = ram_flags & (RAM_PRIVATE | RAM_SHARED); 2175 assert(share_flags != (RAM_SHARED | RAM_PRIVATE)); 2176 ram_flags &= ~RAM_PRIVATE; 2177 2178 assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC | 2179 RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0); 2180 assert(!host ^ (ram_flags & RAM_PREALLOC)); 2181 assert(max_size >= size); 2182 2183 #ifdef CONFIG_POSIX /* ignore RAM_SHARED for Windows */ 2184 if (!host) { 2185 if (!share_flags && current_machine->aux_ram_share) { 2186 ram_flags |= RAM_SHARED; 2187 } 2188 if (ram_flags & RAM_SHARED) { 2189 bool reused; 2190 g_autofree char *name = cpr_name(mr); 2191 int fd = qemu_ram_get_shared_fd(name, &reused, errp); 2192 2193 if (fd < 0) { 2194 return NULL; 2195 } 2196 2197 /* Use same alignment as qemu_anon_ram_alloc */ 2198 mr->align = QEMU_VMALLOC_ALIGN; 2199 2200 /* 2201 * This can fail if the shm mount size is too small, or alloc from 2202 * fd is not supported, but previous QEMU versions that called 2203 * qemu_anon_ram_alloc for anonymous shared memory could have 2204 * succeeded. Quietly fail and fall back. 2205 * 2206 * After cpr-transfer, new QEMU could create a memory region 2207 * with a larger max size than old, so pass reused to grow the 2208 * region if necessary. The extra space will be usable after a 2209 * guest reset. 2210 */ 2211 new_block = qemu_ram_alloc_from_fd(size, max_size, resized, mr, 2212 ram_flags, fd, 0, reused, NULL); 2213 if (new_block) { 2214 trace_qemu_ram_alloc_shared(name, new_block->used_length, 2215 new_block->max_length, fd, 2216 new_block->host); 2217 return new_block; 2218 } 2219 2220 cpr_delete_fd(name, 0); 2221 close(fd); 2222 /* fall back to anon allocation */ 2223 } 2224 } 2225 #endif 2226 2227 align = qemu_real_host_page_size(); 2228 align = MAX(align, TARGET_PAGE_SIZE); 2229 size = ROUND_UP(size, align); 2230 max_size = ROUND_UP(max_size, align); 2231 2232 new_block = g_malloc0(sizeof(*new_block)); 2233 new_block->mr = mr; 2234 new_block->resized = resized; 2235 new_block->used_length = size; 2236 new_block->max_length = max_size; 2237 new_block->fd = -1; 2238 new_block->guest_memfd = -1; 2239 new_block->page_size = qemu_real_host_page_size(); 2240 new_block->host = host; 2241 new_block->flags = ram_flags; 2242 ram_block_add(new_block, &local_err); 2243 if (local_err) { 2244 g_free(new_block); 2245 error_propagate(errp, local_err); 2246 return NULL; 2247 } 2248 return new_block; 2249 } 2250 2251 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 2252 MemoryRegion *mr, Error **errp) 2253 { 2254 return qemu_ram_alloc_internal(size, size, NULL, host, RAM_PREALLOC, mr, 2255 errp); 2256 } 2257 2258 RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, 2259 MemoryRegion *mr, Error **errp) 2260 { 2261 assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD | 2262 RAM_PRIVATE)) == 0); 2263 return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp); 2264 } 2265 2266 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, 2267 qemu_ram_resize_cb resized, 2268 MemoryRegion *mr, Error **errp) 2269 { 2270 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, 2271 RAM_RESIZEABLE, mr, errp); 2272 } 2273 2274 static void reclaim_ramblock(RAMBlock *block) 2275 { 2276 if (block->flags & RAM_PREALLOC) { 2277 ; 2278 } else if (xen_enabled()) { 2279 xen_invalidate_map_cache_entry(block->host); 2280 #ifndef _WIN32 2281 } else if (block->fd >= 0) { 2282 qemu_ram_munmap(block->fd, block->host, block->max_length); 2283 close(block->fd); 2284 #endif 2285 } else { 2286 qemu_anon_ram_free(block->host, block->max_length); 2287 } 2288 2289 if (block->guest_memfd >= 0) { 2290 close(block->guest_memfd); 2291 ram_block_discard_require(false); 2292 } 2293 2294 g_free(block); 2295 } 2296 2297 void qemu_ram_free(RAMBlock *block) 2298 { 2299 g_autofree char *name = NULL; 2300 2301 if (!block) { 2302 return; 2303 } 2304 2305 if (block->host) { 2306 ram_block_notify_remove(block->host, block->used_length, 2307 block->max_length); 2308 } 2309 2310 qemu_mutex_lock_ramlist(); 2311 name = cpr_name(block->mr); 2312 cpr_delete_fd(name, 0); 2313 QLIST_REMOVE_RCU(block, next); 2314 ram_list.mru_block = NULL; 2315 /* Write list before version */ 2316 smp_wmb(); 2317 ram_list.version++; 2318 call_rcu(block, reclaim_ramblock, rcu); 2319 qemu_mutex_unlock_ramlist(); 2320 } 2321 2322 #ifndef _WIN32 2323 /* Simply remap the given VM memory location from start to start+length */ 2324 static int qemu_ram_remap_mmap(RAMBlock *block, uint64_t start, size_t length) 2325 { 2326 int flags, prot; 2327 void *area; 2328 void *host_startaddr = block->host + start; 2329 2330 assert(block->fd < 0); 2331 flags = MAP_FIXED | MAP_ANONYMOUS; 2332 flags |= block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE; 2333 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; 2334 prot = PROT_READ; 2335 prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; 2336 area = mmap(host_startaddr, length, prot, flags, -1, 0); 2337 return area != host_startaddr ? -errno : 0; 2338 } 2339 2340 /* 2341 * qemu_ram_remap - remap a single RAM page 2342 * 2343 * @addr: address in ram_addr_t address space. 2344 * 2345 * This function will try remapping a single page of guest RAM identified by 2346 * @addr, essentially discarding memory to recover from previously poisoned 2347 * memory (MCE). The page size depends on the RAMBlock (i.e., hugetlb). @addr 2348 * does not have to point at the start of the page. 2349 * 2350 * This function is only to be used during system resets; it will kill the 2351 * VM if remapping failed. 2352 */ 2353 void qemu_ram_remap(ram_addr_t addr) 2354 { 2355 RAMBlock *block; 2356 uint64_t offset; 2357 void *vaddr; 2358 size_t page_size; 2359 2360 RAMBLOCK_FOREACH(block) { 2361 offset = addr - block->offset; 2362 if (offset < block->max_length) { 2363 /* Respect the pagesize of our RAMBlock */ 2364 page_size = qemu_ram_pagesize(block); 2365 offset = QEMU_ALIGN_DOWN(offset, page_size); 2366 2367 vaddr = ramblock_ptr(block, offset); 2368 if (block->flags & RAM_PREALLOC) { 2369 ; 2370 } else if (xen_enabled()) { 2371 abort(); 2372 } else { 2373 if (ram_block_discard_range(block, offset, page_size) != 0) { 2374 /* 2375 * Fall back to using mmap() only for anonymous mapping, 2376 * as if a backing file is associated we may not be able 2377 * to recover the memory in all cases. 2378 * So don't take the risk of using only mmap and fail now. 2379 */ 2380 if (block->fd >= 0) { 2381 error_report("Could not remap RAM %s:%" PRIx64 "+%" 2382 PRIx64 " +%zx", block->idstr, offset, 2383 block->fd_offset, page_size); 2384 exit(1); 2385 } 2386 if (qemu_ram_remap_mmap(block, offset, page_size) != 0) { 2387 error_report("Could not remap RAM %s:%" PRIx64 " +%zx", 2388 block->idstr, offset, page_size); 2389 exit(1); 2390 } 2391 } 2392 memory_try_enable_merging(vaddr, page_size); 2393 qemu_ram_setup_dump(vaddr, page_size); 2394 } 2395 2396 break; 2397 } 2398 } 2399 } 2400 #endif /* !_WIN32 */ 2401 2402 /* 2403 * Return a host pointer to guest's ram. 2404 * For Xen, foreign mappings get created if they don't already exist. 2405 * 2406 * @block: block for the RAM to lookup (optional and may be NULL). 2407 * @addr: address within the memory region. 2408 * @size: pointer to requested size (optional and may be NULL). 2409 * size may get modified and return a value smaller than 2410 * what was requested. 2411 * @lock: wether to lock the mapping in xen-mapcache until invalidated. 2412 * @is_write: hint wether to map RW or RO in the xen-mapcache. 2413 * (optional and may always be set to true). 2414 * 2415 * Called within RCU critical section. 2416 */ 2417 static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr, 2418 hwaddr *size, bool lock, 2419 bool is_write) 2420 { 2421 hwaddr len = 0; 2422 2423 if (size && *size == 0) { 2424 return NULL; 2425 } 2426 2427 if (block == NULL) { 2428 block = qemu_get_ram_block(addr); 2429 addr -= block->offset; 2430 } 2431 if (size) { 2432 *size = MIN(*size, block->max_length - addr); 2433 len = *size; 2434 } 2435 2436 if (xen_enabled() && block->host == NULL) { 2437 /* We need to check if the requested address is in the RAM 2438 * because we don't want to map the entire memory in QEMU. 2439 * In that case just map the requested area. 2440 */ 2441 if (xen_mr_is_memory(block->mr)) { 2442 return xen_map_cache(block->mr, block->offset + addr, 2443 len, block->offset, 2444 lock, lock, is_write); 2445 } 2446 2447 block->host = xen_map_cache(block->mr, block->offset, 2448 block->max_length, 2449 block->offset, 2450 1, lock, is_write); 2451 } 2452 2453 return ramblock_ptr(block, addr); 2454 } 2455 2456 /* 2457 * Return a host pointer to ram allocated with qemu_ram_alloc. 2458 * This should not be used for general purpose DMA. Use address_space_map 2459 * or address_space_rw instead. For local memory (e.g. video ram) that the 2460 * device owns, use memory_region_get_ram_ptr. 2461 * 2462 * Called within RCU critical section. 2463 */ 2464 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) 2465 { 2466 return qemu_ram_ptr_length(ram_block, addr, NULL, false, true); 2467 } 2468 2469 /* Return the offset of a hostpointer within a ramblock */ 2470 ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) 2471 { 2472 ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; 2473 assert((uintptr_t)host >= (uintptr_t)rb->host); 2474 assert(res < rb->max_length); 2475 2476 return res; 2477 } 2478 2479 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, 2480 ram_addr_t *offset) 2481 { 2482 RAMBlock *block; 2483 uint8_t *host = ptr; 2484 2485 if (xen_enabled()) { 2486 ram_addr_t ram_addr; 2487 RCU_READ_LOCK_GUARD(); 2488 ram_addr = xen_ram_addr_from_mapcache(ptr); 2489 if (ram_addr == RAM_ADDR_INVALID) { 2490 return NULL; 2491 } 2492 2493 block = qemu_get_ram_block(ram_addr); 2494 if (block) { 2495 *offset = ram_addr - block->offset; 2496 } 2497 return block; 2498 } 2499 2500 RCU_READ_LOCK_GUARD(); 2501 block = qatomic_rcu_read(&ram_list.mru_block); 2502 if (block && block->host && host - block->host < block->max_length) { 2503 goto found; 2504 } 2505 2506 RAMBLOCK_FOREACH(block) { 2507 /* This case append when the block is not mapped. */ 2508 if (block->host == NULL) { 2509 continue; 2510 } 2511 if (host - block->host < block->max_length) { 2512 goto found; 2513 } 2514 } 2515 2516 return NULL; 2517 2518 found: 2519 *offset = (host - block->host); 2520 if (round_offset) { 2521 *offset &= TARGET_PAGE_MASK; 2522 } 2523 return block; 2524 } 2525 2526 /* 2527 * Finds the named RAMBlock 2528 * 2529 * name: The name of RAMBlock to find 2530 * 2531 * Returns: RAMBlock (or NULL if not found) 2532 */ 2533 RAMBlock *qemu_ram_block_by_name(const char *name) 2534 { 2535 RAMBlock *block; 2536 2537 RAMBLOCK_FOREACH(block) { 2538 if (!strcmp(name, block->idstr)) { 2539 return block; 2540 } 2541 } 2542 2543 return NULL; 2544 } 2545 2546 /* 2547 * Some of the system routines need to translate from a host pointer 2548 * (typically a TLB entry) back to a ram offset. 2549 */ 2550 ram_addr_t qemu_ram_addr_from_host(void *ptr) 2551 { 2552 RAMBlock *block; 2553 ram_addr_t offset; 2554 2555 block = qemu_ram_block_from_host(ptr, false, &offset); 2556 if (!block) { 2557 return RAM_ADDR_INVALID; 2558 } 2559 2560 return block->offset + offset; 2561 } 2562 2563 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 2564 { 2565 ram_addr_t ram_addr; 2566 2567 ram_addr = qemu_ram_addr_from_host(ptr); 2568 if (ram_addr == RAM_ADDR_INVALID) { 2569 error_report("Bad ram pointer %p", ptr); 2570 abort(); 2571 } 2572 return ram_addr; 2573 } 2574 2575 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 2576 MemTxAttrs attrs, void *buf, hwaddr len); 2577 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 2578 const void *buf, hwaddr len); 2579 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 2580 bool is_write, MemTxAttrs attrs); 2581 2582 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, 2583 unsigned len, MemTxAttrs attrs) 2584 { 2585 subpage_t *subpage = opaque; 2586 uint8_t buf[8]; 2587 MemTxResult res; 2588 2589 #if defined(DEBUG_SUBPAGE) 2590 printf("%s: subpage %p len %u addr " HWADDR_FMT_plx "\n", __func__, 2591 subpage, len, addr); 2592 #endif 2593 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len); 2594 if (res) { 2595 return res; 2596 } 2597 *data = ldn_p(buf, len); 2598 return MEMTX_OK; 2599 } 2600 2601 static MemTxResult subpage_write(void *opaque, hwaddr addr, 2602 uint64_t value, unsigned len, MemTxAttrs attrs) 2603 { 2604 subpage_t *subpage = opaque; 2605 uint8_t buf[8]; 2606 2607 #if defined(DEBUG_SUBPAGE) 2608 printf("%s: subpage %p len %u addr " HWADDR_FMT_plx 2609 " value %"PRIx64"\n", 2610 __func__, subpage, len, addr, value); 2611 #endif 2612 stn_p(buf, len, value); 2613 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len); 2614 } 2615 2616 static bool subpage_accepts(void *opaque, hwaddr addr, 2617 unsigned len, bool is_write, 2618 MemTxAttrs attrs) 2619 { 2620 subpage_t *subpage = opaque; 2621 #if defined(DEBUG_SUBPAGE) 2622 printf("%s: subpage %p %c len %u addr " HWADDR_FMT_plx "\n", 2623 __func__, subpage, is_write ? 'w' : 'r', len, addr); 2624 #endif 2625 2626 return flatview_access_valid(subpage->fv, addr + subpage->base, 2627 len, is_write, attrs); 2628 } 2629 2630 static const MemoryRegionOps subpage_ops = { 2631 .read_with_attrs = subpage_read, 2632 .write_with_attrs = subpage_write, 2633 .impl.min_access_size = 1, 2634 .impl.max_access_size = 8, 2635 .valid.min_access_size = 1, 2636 .valid.max_access_size = 8, 2637 .valid.accepts = subpage_accepts, 2638 .endianness = DEVICE_NATIVE_ENDIAN, 2639 }; 2640 2641 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 2642 uint16_t section) 2643 { 2644 int idx, eidx; 2645 2646 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) 2647 return -1; 2648 idx = SUBPAGE_IDX(start); 2649 eidx = SUBPAGE_IDX(end); 2650 #if defined(DEBUG_SUBPAGE) 2651 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", 2652 __func__, mmio, start, end, idx, eidx, section); 2653 #endif 2654 for (; idx <= eidx; idx++) { 2655 mmio->sub_section[idx] = section; 2656 } 2657 2658 return 0; 2659 } 2660 2661 static subpage_t *subpage_init(FlatView *fv, hwaddr base) 2662 { 2663 subpage_t *mmio; 2664 2665 /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ 2666 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); 2667 mmio->fv = fv; 2668 mmio->base = base; 2669 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, 2670 NULL, TARGET_PAGE_SIZE); 2671 mmio->iomem.subpage = true; 2672 #if defined(DEBUG_SUBPAGE) 2673 printf("%s: %p base " HWADDR_FMT_plx " len %08x\n", __func__, 2674 mmio, base, TARGET_PAGE_SIZE); 2675 #endif 2676 2677 return mmio; 2678 } 2679 2680 static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr) 2681 { 2682 assert(fv); 2683 MemoryRegionSection section = { 2684 .fv = fv, 2685 .mr = mr, 2686 .offset_within_address_space = 0, 2687 .offset_within_region = 0, 2688 .size = int128_2_64(), 2689 }; 2690 2691 return phys_section_add(map, §ion); 2692 } 2693 2694 static void io_mem_init(void) 2695 { 2696 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, 2697 NULL, UINT64_MAX); 2698 } 2699 2700 AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) 2701 { 2702 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); 2703 uint16_t n; 2704 2705 n = dummy_section(&d->map, fv, &io_mem_unassigned); 2706 assert(n == PHYS_SECTION_UNASSIGNED); 2707 2708 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; 2709 2710 return d; 2711 } 2712 2713 void address_space_dispatch_free(AddressSpaceDispatch *d) 2714 { 2715 phys_sections_free(&d->map); 2716 g_free(d); 2717 } 2718 2719 static void do_nothing(CPUState *cpu, run_on_cpu_data d) 2720 { 2721 } 2722 2723 static void tcg_log_global_after_sync(MemoryListener *listener) 2724 { 2725 CPUAddressSpace *cpuas; 2726 2727 /* Wait for the CPU to end the current TB. This avoids the following 2728 * incorrect race: 2729 * 2730 * vCPU migration 2731 * ---------------------- ------------------------- 2732 * TLB check -> slow path 2733 * notdirty_mem_write 2734 * write to RAM 2735 * mark dirty 2736 * clear dirty flag 2737 * TLB check -> fast path 2738 * read memory 2739 * write to RAM 2740 * 2741 * by pushing the migration thread's memory read after the vCPU thread has 2742 * written the memory. 2743 */ 2744 if (replay_mode == REPLAY_MODE_NONE) { 2745 /* 2746 * VGA can make calls to this function while updating the screen. 2747 * In record/replay mode this causes a deadlock, because 2748 * run_on_cpu waits for rr mutex. Therefore no races are possible 2749 * in this case and no need for making run_on_cpu when 2750 * record/replay is enabled. 2751 */ 2752 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2753 run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); 2754 } 2755 } 2756 2757 static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data) 2758 { 2759 CPUAddressSpace *cpuas = data.host_ptr; 2760 2761 cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as); 2762 tlb_flush(cpu); 2763 } 2764 2765 static void tcg_commit(MemoryListener *listener) 2766 { 2767 CPUAddressSpace *cpuas; 2768 CPUState *cpu; 2769 2770 assert(tcg_enabled()); 2771 /* since each CPU stores ram addresses in its TLB cache, we must 2772 reset the modified entries */ 2773 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2774 cpu = cpuas->cpu; 2775 2776 /* 2777 * Defer changes to as->memory_dispatch until the cpu is quiescent. 2778 * Otherwise we race between (1) other cpu threads and (2) ongoing 2779 * i/o for the current cpu thread, with data cached by mmu_lookup(). 2780 * 2781 * In addition, queueing the work function will kick the cpu back to 2782 * the main loop, which will end the RCU critical section and reclaim 2783 * the memory data structures. 2784 * 2785 * That said, the listener is also called during realize, before 2786 * all of the tcg machinery for run-on is initialized: thus halt_cond. 2787 */ 2788 if (cpu->halt_cond) { 2789 async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas)); 2790 } else { 2791 tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas)); 2792 } 2793 } 2794 2795 static void memory_map_init(void) 2796 { 2797 system_memory = g_malloc(sizeof(*system_memory)); 2798 2799 memory_region_init(system_memory, NULL, "system", UINT64_MAX); 2800 address_space_init(&address_space_memory, system_memory, "memory"); 2801 2802 system_io = g_malloc(sizeof(*system_io)); 2803 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", 2804 65536); 2805 address_space_init(&address_space_io, system_io, "I/O"); 2806 } 2807 2808 MemoryRegion *get_system_memory(void) 2809 { 2810 return system_memory; 2811 } 2812 2813 MemoryRegion *get_system_io(void) 2814 { 2815 return system_io; 2816 } 2817 2818 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, 2819 hwaddr length) 2820 { 2821 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); 2822 ram_addr_t ramaddr = memory_region_get_ram_addr(mr); 2823 2824 /* We know we're only called for RAM MemoryRegions */ 2825 assert(ramaddr != RAM_ADDR_INVALID); 2826 addr += ramaddr; 2827 2828 /* No early return if dirty_log_mask is or becomes 0, because 2829 * cpu_physical_memory_set_dirty_range will still call 2830 * xen_modified_memory. 2831 */ 2832 if (dirty_log_mask) { 2833 dirty_log_mask = 2834 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); 2835 } 2836 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { 2837 assert(tcg_enabled()); 2838 tb_invalidate_phys_range(NULL, addr, addr + length - 1); 2839 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); 2840 } 2841 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); 2842 } 2843 2844 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size) 2845 { 2846 /* 2847 * In principle this function would work on other memory region types too, 2848 * but the ROM device use case is the only one where this operation is 2849 * necessary. Other memory regions should use the 2850 * address_space_read/write() APIs. 2851 */ 2852 assert(memory_region_is_romd(mr)); 2853 2854 invalidate_and_set_dirty(mr, addr, size); 2855 } 2856 2857 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) 2858 { 2859 unsigned access_size_max = mr->ops->valid.max_access_size; 2860 2861 /* Regions are assumed to support 1-4 byte accesses unless 2862 otherwise specified. */ 2863 if (access_size_max == 0) { 2864 access_size_max = 4; 2865 } 2866 2867 /* Bound the maximum access by the alignment of the address. */ 2868 if (!mr->ops->impl.unaligned) { 2869 unsigned align_size_max = addr & -addr; 2870 if (align_size_max != 0 && align_size_max < access_size_max) { 2871 access_size_max = align_size_max; 2872 } 2873 } 2874 2875 /* Don't attempt accesses larger than the maximum. */ 2876 if (l > access_size_max) { 2877 l = access_size_max; 2878 } 2879 l = pow2floor(l); 2880 2881 return l; 2882 } 2883 2884 bool prepare_mmio_access(MemoryRegion *mr) 2885 { 2886 bool release_lock = false; 2887 2888 if (!bql_locked()) { 2889 bql_lock(); 2890 release_lock = true; 2891 } 2892 if (mr->flush_coalesced_mmio) { 2893 qemu_flush_coalesced_mmio_buffer(); 2894 } 2895 2896 return release_lock; 2897 } 2898 2899 /** 2900 * flatview_access_allowed 2901 * @mr: #MemoryRegion to be accessed 2902 * @attrs: memory transaction attributes 2903 * @addr: address within that memory region 2904 * @len: the number of bytes to access 2905 * 2906 * Check if a memory transaction is allowed. 2907 * 2908 * Returns: true if transaction is allowed, false if denied. 2909 */ 2910 static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs, 2911 hwaddr addr, hwaddr len) 2912 { 2913 if (likely(!attrs.memory)) { 2914 return true; 2915 } 2916 if (memory_region_is_ram(mr)) { 2917 return true; 2918 } 2919 qemu_log_mask(LOG_INVALID_MEM, 2920 "Invalid access to non-RAM device at " 2921 "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", " 2922 "region '%s'\n", addr, len, memory_region_name(mr)); 2923 return false; 2924 } 2925 2926 static MemTxResult flatview_write_continue_step(MemTxAttrs attrs, 2927 const uint8_t *buf, 2928 hwaddr len, hwaddr mr_addr, 2929 hwaddr *l, MemoryRegion *mr) 2930 { 2931 if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { 2932 return MEMTX_ACCESS_ERROR; 2933 } 2934 2935 if (!memory_access_is_direct(mr, true, attrs)) { 2936 uint64_t val; 2937 MemTxResult result; 2938 bool release_lock = prepare_mmio_access(mr); 2939 2940 *l = memory_access_size(mr, *l, mr_addr); 2941 /* 2942 * XXX: could force current_cpu to NULL to avoid 2943 * potential bugs 2944 */ 2945 2946 /* 2947 * Assure Coverity (and ourselves) that we are not going to OVERRUN 2948 * the buffer by following ldn_he_p(). 2949 */ 2950 #ifdef QEMU_STATIC_ANALYSIS 2951 assert((*l == 1 && len >= 1) || 2952 (*l == 2 && len >= 2) || 2953 (*l == 4 && len >= 4) || 2954 (*l == 8 && len >= 8)); 2955 #endif 2956 val = ldn_he_p(buf, *l); 2957 result = memory_region_dispatch_write(mr, mr_addr, val, 2958 size_memop(*l), attrs); 2959 if (release_lock) { 2960 bql_unlock(); 2961 } 2962 2963 return result; 2964 } else { 2965 /* RAM case */ 2966 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, 2967 false, true); 2968 2969 memmove(ram_ptr, buf, *l); 2970 invalidate_and_set_dirty(mr, mr_addr, *l); 2971 2972 return MEMTX_OK; 2973 } 2974 } 2975 2976 /* Called within RCU critical section. */ 2977 static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, 2978 MemTxAttrs attrs, 2979 const void *ptr, 2980 hwaddr len, hwaddr mr_addr, 2981 hwaddr l, MemoryRegion *mr) 2982 { 2983 MemTxResult result = MEMTX_OK; 2984 const uint8_t *buf = ptr; 2985 2986 for (;;) { 2987 result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, 2988 mr); 2989 2990 len -= l; 2991 buf += l; 2992 addr += l; 2993 2994 if (!len) { 2995 break; 2996 } 2997 2998 l = len; 2999 mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); 3000 } 3001 3002 return result; 3003 } 3004 3005 /* Called from RCU critical section. */ 3006 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 3007 const void *buf, hwaddr len) 3008 { 3009 hwaddr l; 3010 hwaddr mr_addr; 3011 MemoryRegion *mr; 3012 3013 l = len; 3014 mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); 3015 if (!flatview_access_allowed(mr, attrs, addr, len)) { 3016 return MEMTX_ACCESS_ERROR; 3017 } 3018 return flatview_write_continue(fv, addr, attrs, buf, len, 3019 mr_addr, l, mr); 3020 } 3021 3022 static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf, 3023 hwaddr len, hwaddr mr_addr, 3024 hwaddr *l, 3025 MemoryRegion *mr) 3026 { 3027 if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { 3028 return MEMTX_ACCESS_ERROR; 3029 } 3030 3031 if (!memory_access_is_direct(mr, false, attrs)) { 3032 /* I/O case */ 3033 uint64_t val; 3034 MemTxResult result; 3035 bool release_lock = prepare_mmio_access(mr); 3036 3037 *l = memory_access_size(mr, *l, mr_addr); 3038 result = memory_region_dispatch_read(mr, mr_addr, &val, size_memop(*l), 3039 attrs); 3040 3041 /* 3042 * Assure Coverity (and ourselves) that we are not going to OVERRUN 3043 * the buffer by following stn_he_p(). 3044 */ 3045 #ifdef QEMU_STATIC_ANALYSIS 3046 assert((*l == 1 && len >= 1) || 3047 (*l == 2 && len >= 2) || 3048 (*l == 4 && len >= 4) || 3049 (*l == 8 && len >= 8)); 3050 #endif 3051 stn_he_p(buf, *l, val); 3052 3053 if (release_lock) { 3054 bql_unlock(); 3055 } 3056 return result; 3057 } else { 3058 /* RAM case */ 3059 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, 3060 false, false); 3061 3062 memcpy(buf, ram_ptr, *l); 3063 3064 return MEMTX_OK; 3065 } 3066 } 3067 3068 /* Called within RCU critical section. */ 3069 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 3070 MemTxAttrs attrs, void *ptr, 3071 hwaddr len, hwaddr mr_addr, hwaddr l, 3072 MemoryRegion *mr) 3073 { 3074 MemTxResult result = MEMTX_OK; 3075 uint8_t *buf = ptr; 3076 3077 fuzz_dma_read_cb(addr, len, mr); 3078 for (;;) { 3079 result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); 3080 3081 len -= l; 3082 buf += l; 3083 addr += l; 3084 3085 if (!len) { 3086 break; 3087 } 3088 3089 l = len; 3090 mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); 3091 } 3092 3093 return result; 3094 } 3095 3096 /* Called from RCU critical section. */ 3097 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 3098 MemTxAttrs attrs, void *buf, hwaddr len) 3099 { 3100 hwaddr l; 3101 hwaddr mr_addr; 3102 MemoryRegion *mr; 3103 3104 l = len; 3105 mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); 3106 if (!flatview_access_allowed(mr, attrs, addr, len)) { 3107 return MEMTX_ACCESS_ERROR; 3108 } 3109 return flatview_read_continue(fv, addr, attrs, buf, len, 3110 mr_addr, l, mr); 3111 } 3112 3113 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 3114 MemTxAttrs attrs, void *buf, hwaddr len) 3115 { 3116 MemTxResult result = MEMTX_OK; 3117 FlatView *fv; 3118 3119 if (len > 0) { 3120 RCU_READ_LOCK_GUARD(); 3121 fv = address_space_to_flatview(as); 3122 result = flatview_read(fv, addr, attrs, buf, len); 3123 } 3124 3125 return result; 3126 } 3127 3128 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 3129 MemTxAttrs attrs, 3130 const void *buf, hwaddr len) 3131 { 3132 MemTxResult result = MEMTX_OK; 3133 FlatView *fv; 3134 3135 if (len > 0) { 3136 RCU_READ_LOCK_GUARD(); 3137 fv = address_space_to_flatview(as); 3138 result = flatview_write(fv, addr, attrs, buf, len); 3139 } 3140 3141 return result; 3142 } 3143 3144 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, 3145 void *buf, hwaddr len, bool is_write) 3146 { 3147 if (is_write) { 3148 return address_space_write(as, addr, attrs, buf, len); 3149 } else { 3150 return address_space_read_full(as, addr, attrs, buf, len); 3151 } 3152 } 3153 3154 MemTxResult address_space_set(AddressSpace *as, hwaddr addr, 3155 uint8_t c, hwaddr len, MemTxAttrs attrs) 3156 { 3157 #define FILLBUF_SIZE 512 3158 uint8_t fillbuf[FILLBUF_SIZE]; 3159 int l; 3160 MemTxResult error = MEMTX_OK; 3161 3162 memset(fillbuf, c, FILLBUF_SIZE); 3163 while (len > 0) { 3164 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; 3165 error |= address_space_write(as, addr, attrs, fillbuf, l); 3166 len -= l; 3167 addr += l; 3168 } 3169 3170 return error; 3171 } 3172 3173 void cpu_physical_memory_rw(hwaddr addr, void *buf, 3174 hwaddr len, bool is_write) 3175 { 3176 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, 3177 buf, len, is_write); 3178 } 3179 3180 enum write_rom_type { 3181 WRITE_DATA, 3182 FLUSH_CACHE, 3183 }; 3184 3185 static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, 3186 hwaddr addr, 3187 MemTxAttrs attrs, 3188 const void *ptr, 3189 hwaddr len, 3190 enum write_rom_type type) 3191 { 3192 hwaddr l; 3193 uint8_t *ram_ptr; 3194 hwaddr addr1; 3195 MemoryRegion *mr; 3196 const uint8_t *buf = ptr; 3197 3198 RCU_READ_LOCK_GUARD(); 3199 while (len > 0) { 3200 l = len; 3201 mr = address_space_translate(as, addr, &addr1, &l, true, attrs); 3202 3203 if (!memory_region_supports_direct_access(mr)) { 3204 l = memory_access_size(mr, l, addr1); 3205 } else { 3206 /* ROM/RAM case */ 3207 ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 3208 switch (type) { 3209 case WRITE_DATA: 3210 memcpy(ram_ptr, buf, l); 3211 invalidate_and_set_dirty(mr, addr1, l); 3212 break; 3213 case FLUSH_CACHE: 3214 flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l); 3215 break; 3216 } 3217 } 3218 len -= l; 3219 buf += l; 3220 addr += l; 3221 } 3222 return MEMTX_OK; 3223 } 3224 3225 /* used for ROM loading : can write in RAM and ROM */ 3226 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, 3227 MemTxAttrs attrs, 3228 const void *buf, hwaddr len) 3229 { 3230 return address_space_write_rom_internal(as, addr, attrs, 3231 buf, len, WRITE_DATA); 3232 } 3233 3234 void cpu_flush_icache_range(hwaddr start, hwaddr len) 3235 { 3236 /* 3237 * This function should do the same thing as an icache flush that was 3238 * triggered from within the guest. For TCG we are always cache coherent, 3239 * so there is no need to flush anything. For KVM / Xen we need to flush 3240 * the host's instruction cache at least. 3241 */ 3242 if (tcg_enabled()) { 3243 return; 3244 } 3245 3246 address_space_write_rom_internal(&address_space_memory, 3247 start, MEMTXATTRS_UNSPECIFIED, 3248 NULL, len, FLUSH_CACHE); 3249 } 3250 3251 /* 3252 * A magic value stored in the first 8 bytes of the bounce buffer struct. Used 3253 * to detect illegal pointers passed to address_space_unmap. 3254 */ 3255 #define BOUNCE_BUFFER_MAGIC 0xb4017ceb4ffe12ed 3256 3257 typedef struct { 3258 uint64_t magic; 3259 MemoryRegion *mr; 3260 hwaddr addr; 3261 size_t len; 3262 uint8_t buffer[]; 3263 } BounceBuffer; 3264 3265 static void 3266 address_space_unregister_map_client_do(AddressSpaceMapClient *client) 3267 { 3268 QLIST_REMOVE(client, link); 3269 g_free(client); 3270 } 3271 3272 static void address_space_notify_map_clients_locked(AddressSpace *as) 3273 { 3274 AddressSpaceMapClient *client; 3275 3276 while (!QLIST_EMPTY(&as->map_client_list)) { 3277 client = QLIST_FIRST(&as->map_client_list); 3278 qemu_bh_schedule(client->bh); 3279 address_space_unregister_map_client_do(client); 3280 } 3281 } 3282 3283 void address_space_register_map_client(AddressSpace *as, QEMUBH *bh) 3284 { 3285 AddressSpaceMapClient *client = g_malloc(sizeof(*client)); 3286 3287 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3288 client->bh = bh; 3289 QLIST_INSERT_HEAD(&as->map_client_list, client, link); 3290 /* Write map_client_list before reading bounce_buffer_size. */ 3291 smp_mb(); 3292 if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) { 3293 address_space_notify_map_clients_locked(as); 3294 } 3295 } 3296 3297 void cpu_exec_init_all(void) 3298 { 3299 qemu_mutex_init(&ram_list.mutex); 3300 /* The data structures we set up here depend on knowing the page size, 3301 * so no more changes can be made after this point. 3302 * In an ideal world, nothing we did before we had finished the 3303 * machine setup would care about the target page size, and we could 3304 * do this much later, rather than requiring board models to state 3305 * up front what their requirements are. 3306 */ 3307 finalize_target_page_bits(); 3308 io_mem_init(); 3309 memory_map_init(); 3310 } 3311 3312 void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh) 3313 { 3314 AddressSpaceMapClient *client; 3315 3316 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3317 QLIST_FOREACH(client, &as->map_client_list, link) { 3318 if (client->bh == bh) { 3319 address_space_unregister_map_client_do(client); 3320 break; 3321 } 3322 } 3323 } 3324 3325 static void address_space_notify_map_clients(AddressSpace *as) 3326 { 3327 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3328 address_space_notify_map_clients_locked(as); 3329 } 3330 3331 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 3332 bool is_write, MemTxAttrs attrs) 3333 { 3334 MemoryRegion *mr; 3335 hwaddr l, xlat; 3336 3337 while (len > 0) { 3338 l = len; 3339 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3340 if (!memory_access_is_direct(mr, is_write, attrs)) { 3341 l = memory_access_size(mr, l, addr); 3342 if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) { 3343 return false; 3344 } 3345 } 3346 3347 len -= l; 3348 addr += l; 3349 } 3350 return true; 3351 } 3352 3353 bool address_space_access_valid(AddressSpace *as, hwaddr addr, 3354 hwaddr len, bool is_write, 3355 MemTxAttrs attrs) 3356 { 3357 FlatView *fv; 3358 3359 RCU_READ_LOCK_GUARD(); 3360 fv = address_space_to_flatview(as); 3361 return flatview_access_valid(fv, addr, len, is_write, attrs); 3362 } 3363 3364 static hwaddr 3365 flatview_extend_translation(FlatView *fv, hwaddr addr, 3366 hwaddr target_len, 3367 MemoryRegion *mr, hwaddr base, hwaddr len, 3368 bool is_write, MemTxAttrs attrs) 3369 { 3370 hwaddr done = 0; 3371 hwaddr xlat; 3372 MemoryRegion *this_mr; 3373 3374 for (;;) { 3375 target_len -= len; 3376 addr += len; 3377 done += len; 3378 if (target_len == 0) { 3379 return done; 3380 } 3381 3382 len = target_len; 3383 this_mr = flatview_translate(fv, addr, &xlat, 3384 &len, is_write, attrs); 3385 if (this_mr != mr || xlat != base + done) { 3386 return done; 3387 } 3388 } 3389 } 3390 3391 /* Map a physical memory region into a host virtual address. 3392 * May map a subset of the requested range, given by and returned in *plen. 3393 * May return NULL if resources needed to perform the mapping are exhausted. 3394 * Use only for reads OR writes - not for read-modify-write operations. 3395 * Use address_space_register_map_client() to know when retrying the map 3396 * operation is likely to succeed. 3397 */ 3398 void *address_space_map(AddressSpace *as, 3399 hwaddr addr, 3400 hwaddr *plen, 3401 bool is_write, 3402 MemTxAttrs attrs) 3403 { 3404 hwaddr len = *plen; 3405 hwaddr l, xlat; 3406 MemoryRegion *mr; 3407 FlatView *fv; 3408 3409 trace_address_space_map(as, addr, len, is_write, *(uint32_t *) &attrs); 3410 3411 if (len == 0) { 3412 return NULL; 3413 } 3414 3415 l = len; 3416 RCU_READ_LOCK_GUARD(); 3417 fv = address_space_to_flatview(as); 3418 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3419 3420 if (!memory_access_is_direct(mr, is_write, attrs)) { 3421 size_t used = qatomic_read(&as->bounce_buffer_size); 3422 for (;;) { 3423 hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l); 3424 size_t new_size = used + alloc; 3425 size_t actual = 3426 qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size); 3427 if (actual == used) { 3428 l = alloc; 3429 break; 3430 } 3431 used = actual; 3432 } 3433 3434 if (l == 0) { 3435 *plen = 0; 3436 return NULL; 3437 } 3438 3439 BounceBuffer *bounce = g_malloc0(l + sizeof(BounceBuffer)); 3440 bounce->magic = BOUNCE_BUFFER_MAGIC; 3441 memory_region_ref(mr); 3442 bounce->mr = mr; 3443 bounce->addr = addr; 3444 bounce->len = l; 3445 3446 if (!is_write) { 3447 flatview_read(fv, addr, attrs, 3448 bounce->buffer, l); 3449 } 3450 3451 *plen = l; 3452 return bounce->buffer; 3453 } 3454 3455 memory_region_ref(mr); 3456 *plen = flatview_extend_translation(fv, addr, len, mr, xlat, 3457 l, is_write, attrs); 3458 fuzz_dma_read_cb(addr, *plen, mr); 3459 return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true, is_write); 3460 } 3461 3462 /* Unmaps a memory region previously mapped by address_space_map(). 3463 * Will also mark the memory as dirty if is_write is true. access_len gives 3464 * the amount of memory that was actually read or written by the caller. 3465 */ 3466 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 3467 bool is_write, hwaddr access_len) 3468 { 3469 MemoryRegion *mr; 3470 ram_addr_t addr1; 3471 3472 mr = memory_region_from_host(buffer, &addr1); 3473 if (mr != NULL) { 3474 if (is_write) { 3475 invalidate_and_set_dirty(mr, addr1, access_len); 3476 } 3477 if (xen_enabled()) { 3478 xen_invalidate_map_cache_entry(buffer); 3479 } 3480 memory_region_unref(mr); 3481 return; 3482 } 3483 3484 3485 BounceBuffer *bounce = container_of(buffer, BounceBuffer, buffer); 3486 assert(bounce->magic == BOUNCE_BUFFER_MAGIC); 3487 3488 if (is_write) { 3489 address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED, 3490 bounce->buffer, access_len); 3491 } 3492 3493 qatomic_sub(&as->bounce_buffer_size, bounce->len); 3494 bounce->magic = ~BOUNCE_BUFFER_MAGIC; 3495 memory_region_unref(bounce->mr); 3496 g_free(bounce); 3497 /* Write bounce_buffer_size before reading map_client_list. */ 3498 smp_mb(); 3499 address_space_notify_map_clients(as); 3500 } 3501 3502 void *cpu_physical_memory_map(hwaddr addr, 3503 hwaddr *plen, 3504 bool is_write) 3505 { 3506 return address_space_map(&address_space_memory, addr, plen, is_write, 3507 MEMTXATTRS_UNSPECIFIED); 3508 } 3509 3510 void cpu_physical_memory_unmap(void *buffer, hwaddr len, 3511 bool is_write, hwaddr access_len) 3512 { 3513 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); 3514 } 3515 3516 #define ARG1_DECL AddressSpace *as 3517 #define ARG1 as 3518 #define SUFFIX 3519 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) 3520 #define RCU_READ_LOCK(...) rcu_read_lock() 3521 #define RCU_READ_UNLOCK(...) rcu_read_unlock() 3522 #include "memory_ldst.c.inc" 3523 3524 int64_t address_space_cache_init(MemoryRegionCache *cache, 3525 AddressSpace *as, 3526 hwaddr addr, 3527 hwaddr len, 3528 bool is_write) 3529 { 3530 AddressSpaceDispatch *d; 3531 hwaddr l; 3532 MemoryRegion *mr; 3533 Int128 diff; 3534 3535 assert(len > 0); 3536 3537 l = len; 3538 cache->fv = address_space_get_flatview(as); 3539 d = flatview_to_dispatch(cache->fv); 3540 cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); 3541 3542 /* 3543 * cache->xlat is now relative to cache->mrs.mr, not to the section itself. 3544 * Take that into account to compute how many bytes are there between 3545 * cache->xlat and the end of the section. 3546 */ 3547 diff = int128_sub(cache->mrs.size, 3548 int128_make64(cache->xlat - cache->mrs.offset_within_region)); 3549 l = int128_get64(int128_min(diff, int128_make64(l))); 3550 3551 mr = cache->mrs.mr; 3552 memory_region_ref(mr); 3553 if (memory_access_is_direct(mr, is_write, MEMTXATTRS_UNSPECIFIED)) { 3554 /* We don't care about the memory attributes here as we're only 3555 * doing this if we found actual RAM, which behaves the same 3556 * regardless of attributes; so UNSPECIFIED is fine. 3557 */ 3558 l = flatview_extend_translation(cache->fv, addr, len, mr, 3559 cache->xlat, l, is_write, 3560 MEMTXATTRS_UNSPECIFIED); 3561 cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true, 3562 is_write); 3563 } else { 3564 cache->ptr = NULL; 3565 } 3566 3567 cache->len = l; 3568 cache->is_write = is_write; 3569 return l; 3570 } 3571 3572 void address_space_cache_invalidate(MemoryRegionCache *cache, 3573 hwaddr addr, 3574 hwaddr access_len) 3575 { 3576 assert(cache->is_write); 3577 if (likely(cache->ptr)) { 3578 invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len); 3579 } 3580 } 3581 3582 void address_space_cache_destroy(MemoryRegionCache *cache) 3583 { 3584 if (!cache->mrs.mr) { 3585 return; 3586 } 3587 3588 if (xen_enabled()) { 3589 xen_invalidate_map_cache_entry(cache->ptr); 3590 } 3591 memory_region_unref(cache->mrs.mr); 3592 flatview_unref(cache->fv); 3593 cache->mrs.mr = NULL; 3594 cache->fv = NULL; 3595 } 3596 3597 /* Called from RCU critical section. This function has the same 3598 * semantics as address_space_translate, but it only works on a 3599 * predefined range of a MemoryRegion that was mapped with 3600 * address_space_cache_init. 3601 */ 3602 static inline MemoryRegion *address_space_translate_cached( 3603 MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, 3604 hwaddr *plen, bool is_write, MemTxAttrs attrs) 3605 { 3606 MemoryRegionSection section; 3607 MemoryRegion *mr; 3608 IOMMUMemoryRegion *iommu_mr; 3609 AddressSpace *target_as; 3610 3611 assert(!cache->ptr); 3612 *xlat = addr + cache->xlat; 3613 3614 mr = cache->mrs.mr; 3615 iommu_mr = memory_region_get_iommu(mr); 3616 if (!iommu_mr) { 3617 /* MMIO region. */ 3618 return mr; 3619 } 3620 3621 section = address_space_translate_iommu(iommu_mr, xlat, plen, 3622 NULL, is_write, true, 3623 &target_as, attrs); 3624 return section.mr; 3625 } 3626 3627 /* Called within RCU critical section. */ 3628 static MemTxResult address_space_write_continue_cached(MemTxAttrs attrs, 3629 const void *ptr, 3630 hwaddr len, 3631 hwaddr mr_addr, 3632 hwaddr l, 3633 MemoryRegion *mr) 3634 { 3635 MemTxResult result = MEMTX_OK; 3636 const uint8_t *buf = ptr; 3637 3638 for (;;) { 3639 result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, 3640 mr); 3641 3642 len -= l; 3643 buf += l; 3644 mr_addr += l; 3645 3646 if (!len) { 3647 break; 3648 } 3649 3650 l = len; 3651 } 3652 3653 return result; 3654 } 3655 3656 /* Called within RCU critical section. */ 3657 static MemTxResult address_space_read_continue_cached(MemTxAttrs attrs, 3658 void *ptr, hwaddr len, 3659 hwaddr mr_addr, hwaddr l, 3660 MemoryRegion *mr) 3661 { 3662 MemTxResult result = MEMTX_OK; 3663 uint8_t *buf = ptr; 3664 3665 for (;;) { 3666 result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); 3667 len -= l; 3668 buf += l; 3669 mr_addr += l; 3670 3671 if (!len) { 3672 break; 3673 } 3674 l = len; 3675 } 3676 3677 return result; 3678 } 3679 3680 /* Called from RCU critical section. address_space_read_cached uses this 3681 * out of line function when the target is an MMIO or IOMMU region. 3682 */ 3683 MemTxResult 3684 address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3685 void *buf, hwaddr len) 3686 { 3687 hwaddr mr_addr, l; 3688 MemoryRegion *mr; 3689 3690 l = len; 3691 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false, 3692 MEMTXATTRS_UNSPECIFIED); 3693 return address_space_read_continue_cached(MEMTXATTRS_UNSPECIFIED, 3694 buf, len, mr_addr, l, mr); 3695 } 3696 3697 /* Called from RCU critical section. address_space_write_cached uses this 3698 * out of line function when the target is an MMIO or IOMMU region. 3699 */ 3700 MemTxResult 3701 address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3702 const void *buf, hwaddr len) 3703 { 3704 hwaddr mr_addr, l; 3705 MemoryRegion *mr; 3706 3707 l = len; 3708 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true, 3709 MEMTXATTRS_UNSPECIFIED); 3710 return address_space_write_continue_cached(MEMTXATTRS_UNSPECIFIED, 3711 buf, len, mr_addr, l, mr); 3712 } 3713 3714 #define ARG1_DECL MemoryRegionCache *cache 3715 #define ARG1 cache 3716 #define SUFFIX _cached_slow 3717 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) 3718 #define RCU_READ_LOCK() ((void)0) 3719 #define RCU_READ_UNLOCK() ((void)0) 3720 #include "memory_ldst.c.inc" 3721 3722 /* virtual memory access for debug (includes writing to ROM) */ 3723 int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, 3724 void *ptr, size_t len, bool is_write) 3725 { 3726 hwaddr phys_addr; 3727 vaddr l, page; 3728 uint8_t *buf = ptr; 3729 3730 cpu_synchronize_state(cpu); 3731 while (len > 0) { 3732 int asidx; 3733 MemTxAttrs attrs; 3734 MemTxResult res; 3735 3736 page = addr & TARGET_PAGE_MASK; 3737 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); 3738 asidx = cpu_asidx_from_attrs(cpu, attrs); 3739 /* if no physical page mapped, return an error */ 3740 if (phys_addr == -1) 3741 return -1; 3742 l = (page + TARGET_PAGE_SIZE) - addr; 3743 if (l > len) 3744 l = len; 3745 phys_addr += (addr & ~TARGET_PAGE_MASK); 3746 res = address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf, 3747 l, is_write); 3748 if (res != MEMTX_OK) { 3749 return -1; 3750 } 3751 len -= l; 3752 buf += l; 3753 addr += l; 3754 } 3755 return 0; 3756 } 3757 3758 bool cpu_physical_memory_is_io(hwaddr phys_addr) 3759 { 3760 MemoryRegion*mr; 3761 hwaddr l = 1; 3762 3763 RCU_READ_LOCK_GUARD(); 3764 mr = address_space_translate(&address_space_memory, 3765 phys_addr, &phys_addr, &l, false, 3766 MEMTXATTRS_UNSPECIFIED); 3767 3768 return !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); 3769 } 3770 3771 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) 3772 { 3773 RAMBlock *block; 3774 int ret = 0; 3775 3776 RCU_READ_LOCK_GUARD(); 3777 RAMBLOCK_FOREACH(block) { 3778 ret = func(block, opaque); 3779 if (ret) { 3780 break; 3781 } 3782 } 3783 return ret; 3784 } 3785 3786 /* 3787 * Unmap pages of memory from start to start+length such that 3788 * they a) read as 0, b) Trigger whatever fault mechanism 3789 * the OS provides for postcopy. 3790 * The pages must be unmapped by the end of the function. 3791 * Returns: 0 on success, none-0 on failure 3792 * 3793 */ 3794 int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) 3795 { 3796 int ret = -1; 3797 3798 uint8_t *host_startaddr = rb->host + start; 3799 3800 if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { 3801 error_report("%s: Unaligned start address: %p", 3802 __func__, host_startaddr); 3803 goto err; 3804 } 3805 3806 if ((start + length) <= rb->max_length) { 3807 bool need_madvise, need_fallocate; 3808 if (!QEMU_IS_ALIGNED(length, rb->page_size)) { 3809 error_report("%s: Unaligned length: %zx", __func__, length); 3810 goto err; 3811 } 3812 3813 errno = ENOTSUP; /* If we are missing MADVISE etc */ 3814 3815 /* The logic here is messy; 3816 * madvise DONTNEED fails for hugepages 3817 * fallocate works on hugepages and shmem 3818 * shared anonymous memory requires madvise REMOVE 3819 */ 3820 need_madvise = (rb->page_size == qemu_real_host_page_size()); 3821 need_fallocate = rb->fd != -1; 3822 if (need_fallocate) { 3823 /* For a file, this causes the area of the file to be zero'd 3824 * if read, and for hugetlbfs also causes it to be unmapped 3825 * so a userfault will trigger. 3826 */ 3827 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 3828 /* 3829 * fallocate() will fail with readonly files. Let's print a 3830 * proper error message. 3831 */ 3832 if (rb->flags & RAM_READONLY_FD) { 3833 error_report("%s: Discarding RAM with readonly files is not" 3834 " supported", __func__); 3835 goto err; 3836 3837 } 3838 /* 3839 * We'll discard data from the actual file, even though we only 3840 * have a MAP_PRIVATE mapping, possibly messing with other 3841 * MAP_PRIVATE/MAP_SHARED mappings. There is no easy way to 3842 * change that behavior whithout violating the promised 3843 * semantics of ram_block_discard_range(). 3844 * 3845 * Only warn, because it works as long as nobody else uses that 3846 * file. 3847 */ 3848 if (!qemu_ram_is_shared(rb)) { 3849 warn_report_once("%s: Discarding RAM" 3850 " in private file mappings is possibly" 3851 " dangerous, because it will modify the" 3852 " underlying file and will affect other" 3853 " users of the file", __func__); 3854 } 3855 3856 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 3857 start + rb->fd_offset, length); 3858 if (ret) { 3859 ret = -errno; 3860 error_report("%s: Failed to fallocate %s:%" PRIx64 "+%" PRIx64 3861 " +%zx (%d)", __func__, rb->idstr, start, 3862 rb->fd_offset, length, ret); 3863 goto err; 3864 } 3865 #else 3866 ret = -ENOSYS; 3867 error_report("%s: fallocate not available/file" 3868 "%s:%" PRIx64 "+%" PRIx64 " +%zx (%d)", __func__, 3869 rb->idstr, start, rb->fd_offset, length, ret); 3870 goto err; 3871 #endif 3872 } 3873 if (need_madvise) { 3874 /* For normal RAM this causes it to be unmapped, 3875 * for shared memory it causes the local mapping to disappear 3876 * and to fall back on the file contents (which we just 3877 * fallocate'd away). 3878 */ 3879 #if defined(CONFIG_MADVISE) 3880 if (qemu_ram_is_shared(rb) && rb->fd < 0) { 3881 ret = madvise(host_startaddr, length, QEMU_MADV_REMOVE); 3882 } else { 3883 ret = madvise(host_startaddr, length, QEMU_MADV_DONTNEED); 3884 } 3885 if (ret) { 3886 ret = -errno; 3887 error_report("%s: Failed to discard range " 3888 "%s:%" PRIx64 " +%zx (%d)", 3889 __func__, rb->idstr, start, length, ret); 3890 goto err; 3891 } 3892 #else 3893 ret = -ENOSYS; 3894 error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)", 3895 __func__, rb->idstr, start, length, ret); 3896 goto err; 3897 #endif 3898 } 3899 trace_ram_block_discard_range(rb->idstr, host_startaddr, length, 3900 need_madvise, need_fallocate, ret); 3901 } else { 3902 error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", 3903 __func__, rb->idstr, start, length, rb->max_length); 3904 } 3905 3906 err: 3907 return ret; 3908 } 3909 3910 int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, 3911 size_t length) 3912 { 3913 int ret = -1; 3914 3915 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 3916 /* ignore fd_offset with guest_memfd */ 3917 ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 3918 start, length); 3919 3920 if (ret) { 3921 ret = -errno; 3922 error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)", 3923 __func__, rb->idstr, start, length, ret); 3924 } 3925 #else 3926 ret = -ENOSYS; 3927 error_report("%s: fallocate not available %s:%" PRIx64 " +%zx (%d)", 3928 __func__, rb->idstr, start, length, ret); 3929 #endif 3930 3931 return ret; 3932 } 3933 3934 bool ramblock_is_pmem(RAMBlock *rb) 3935 { 3936 return rb->flags & RAM_PMEM; 3937 } 3938 3939 static void mtree_print_phys_entries(int start, int end, int skip, int ptr) 3940 { 3941 if (start == end - 1) { 3942 qemu_printf("\t%3d ", start); 3943 } else { 3944 qemu_printf("\t%3d..%-3d ", start, end - 1); 3945 } 3946 qemu_printf(" skip=%d ", skip); 3947 if (ptr == PHYS_MAP_NODE_NIL) { 3948 qemu_printf(" ptr=NIL"); 3949 } else if (!skip) { 3950 qemu_printf(" ptr=#%d", ptr); 3951 } else { 3952 qemu_printf(" ptr=[%d]", ptr); 3953 } 3954 qemu_printf("\n"); 3955 } 3956 3957 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ 3958 int128_sub((size), int128_one())) : 0) 3959 3960 void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) 3961 { 3962 int i; 3963 3964 qemu_printf(" Dispatch\n"); 3965 qemu_printf(" Physical sections\n"); 3966 3967 for (i = 0; i < d->map.sections_nb; ++i) { 3968 MemoryRegionSection *s = d->map.sections + i; 3969 const char *names[] = { " [unassigned]", " [not dirty]", 3970 " [ROM]", " [watch]" }; 3971 3972 qemu_printf(" #%d @" HWADDR_FMT_plx ".." HWADDR_FMT_plx 3973 " %s%s%s%s%s", 3974 i, 3975 s->offset_within_address_space, 3976 s->offset_within_address_space + MR_SIZE(s->size), 3977 s->mr->name ? s->mr->name : "(noname)", 3978 i < ARRAY_SIZE(names) ? names[i] : "", 3979 s->mr == root ? " [ROOT]" : "", 3980 s == d->mru_section ? " [MRU]" : "", 3981 s->mr->is_iommu ? " [iommu]" : ""); 3982 3983 if (s->mr->alias) { 3984 qemu_printf(" alias=%s", s->mr->alias->name ? 3985 s->mr->alias->name : "noname"); 3986 } 3987 qemu_printf("\n"); 3988 } 3989 3990 qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n", 3991 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); 3992 for (i = 0; i < d->map.nodes_nb; ++i) { 3993 int j, jprev; 3994 PhysPageEntry prev; 3995 Node *n = d->map.nodes + i; 3996 3997 qemu_printf(" [%d]\n", i); 3998 3999 for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) { 4000 PhysPageEntry *pe = *n + j; 4001 4002 if (pe->ptr == prev.ptr && pe->skip == prev.skip) { 4003 continue; 4004 } 4005 4006 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 4007 4008 jprev = j; 4009 prev = *pe; 4010 } 4011 4012 if (jprev != ARRAY_SIZE(*n)) { 4013 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 4014 } 4015 } 4016 } 4017 4018 /* Require any discards to work. */ 4019 static unsigned int ram_block_discard_required_cnt; 4020 /* Require only coordinated discards to work. */ 4021 static unsigned int ram_block_coordinated_discard_required_cnt; 4022 /* Disable any discards. */ 4023 static unsigned int ram_block_discard_disabled_cnt; 4024 /* Disable only uncoordinated discards. */ 4025 static unsigned int ram_block_uncoordinated_discard_disabled_cnt; 4026 static QemuMutex ram_block_discard_disable_mutex; 4027 4028 static void ram_block_discard_disable_mutex_lock(void) 4029 { 4030 static gsize initialized; 4031 4032 if (g_once_init_enter(&initialized)) { 4033 qemu_mutex_init(&ram_block_discard_disable_mutex); 4034 g_once_init_leave(&initialized, 1); 4035 } 4036 qemu_mutex_lock(&ram_block_discard_disable_mutex); 4037 } 4038 4039 static void ram_block_discard_disable_mutex_unlock(void) 4040 { 4041 qemu_mutex_unlock(&ram_block_discard_disable_mutex); 4042 } 4043 4044 int ram_block_discard_disable(bool state) 4045 { 4046 int ret = 0; 4047 4048 ram_block_discard_disable_mutex_lock(); 4049 if (!state) { 4050 ram_block_discard_disabled_cnt--; 4051 } else if (ram_block_discard_required_cnt || 4052 ram_block_coordinated_discard_required_cnt) { 4053 ret = -EBUSY; 4054 } else { 4055 ram_block_discard_disabled_cnt++; 4056 } 4057 ram_block_discard_disable_mutex_unlock(); 4058 return ret; 4059 } 4060 4061 int ram_block_uncoordinated_discard_disable(bool state) 4062 { 4063 int ret = 0; 4064 4065 ram_block_discard_disable_mutex_lock(); 4066 if (!state) { 4067 ram_block_uncoordinated_discard_disabled_cnt--; 4068 } else if (ram_block_discard_required_cnt) { 4069 ret = -EBUSY; 4070 } else { 4071 ram_block_uncoordinated_discard_disabled_cnt++; 4072 } 4073 ram_block_discard_disable_mutex_unlock(); 4074 return ret; 4075 } 4076 4077 int ram_block_discard_require(bool state) 4078 { 4079 int ret = 0; 4080 4081 ram_block_discard_disable_mutex_lock(); 4082 if (!state) { 4083 ram_block_discard_required_cnt--; 4084 } else if (ram_block_discard_disabled_cnt || 4085 ram_block_uncoordinated_discard_disabled_cnt) { 4086 ret = -EBUSY; 4087 } else { 4088 ram_block_discard_required_cnt++; 4089 } 4090 ram_block_discard_disable_mutex_unlock(); 4091 return ret; 4092 } 4093 4094 int ram_block_coordinated_discard_require(bool state) 4095 { 4096 int ret = 0; 4097 4098 ram_block_discard_disable_mutex_lock(); 4099 if (!state) { 4100 ram_block_coordinated_discard_required_cnt--; 4101 } else if (ram_block_discard_disabled_cnt) { 4102 ret = -EBUSY; 4103 } else { 4104 ram_block_coordinated_discard_required_cnt++; 4105 } 4106 ram_block_discard_disable_mutex_unlock(); 4107 return ret; 4108 } 4109 4110 bool ram_block_discard_is_disabled(void) 4111 { 4112 return qatomic_read(&ram_block_discard_disabled_cnt) || 4113 qatomic_read(&ram_block_uncoordinated_discard_disabled_cnt); 4114 } 4115 4116 bool ram_block_discard_is_required(void) 4117 { 4118 return qatomic_read(&ram_block_discard_required_cnt) || 4119 qatomic_read(&ram_block_coordinated_discard_required_cnt); 4120 } 4121 4122 /* 4123 * Return true if ram is compatible with CPR. Do not exclude rom, 4124 * because the rom file could change in new QEMU. 4125 */ 4126 static bool ram_is_cpr_compatible(RAMBlock *rb) 4127 { 4128 MemoryRegion *mr = rb->mr; 4129 4130 if (!mr || !memory_region_is_ram(mr)) { 4131 return true; 4132 } 4133 4134 /* Ram device is remapped in new QEMU */ 4135 if (memory_region_is_ram_device(mr)) { 4136 return true; 4137 } 4138 4139 /* 4140 * A file descriptor is passed to new QEMU and remapped, or its backing 4141 * file is reopened and mapped. It must be shared to avoid COW. 4142 */ 4143 if (rb->fd >= 0 && qemu_ram_is_shared(rb)) { 4144 return true; 4145 } 4146 4147 return false; 4148 } 4149 4150 /* 4151 * Add a blocker for each volatile ram block. This function should only be 4152 * called after we know that the block is migratable. Non-migratable blocks 4153 * are either re-created in new QEMU, or are handled specially, or are covered 4154 * by a device-level CPR blocker. 4155 */ 4156 void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp) 4157 { 4158 assert(qemu_ram_is_migratable(rb)); 4159 4160 if (ram_is_cpr_compatible(rb)) { 4161 return; 4162 } 4163 4164 error_setg(&rb->cpr_blocker, 4165 "Memory region %s is not compatible with CPR. share=on is " 4166 "required for memory-backend objects, and aux-ram-share=on is " 4167 "required.", memory_region_name(rb->mr)); 4168 migrate_add_blocker_modes(&rb->cpr_blocker, errp, MIG_MODE_CPR_TRANSFER, 4169 -1); 4170 } 4171 4172 void ram_block_del_cpr_blocker(RAMBlock *rb) 4173 { 4174 migrate_del_blocker(&rb->cpr_blocker); 4175 } 4176