1 /* 2 * RAM allocation and memory access 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/page-vary.h" 22 #include "qapi/error.h" 23 24 #include "qemu/cutils.h" 25 #include "qemu/cacheflush.h" 26 #include "qemu/hbitmap.h" 27 #include "qemu/madvise.h" 28 #include "qemu/lockable.h" 29 30 #ifdef CONFIG_TCG 31 #include "accel/tcg/cpu-ops.h" 32 #include "accel/tcg/iommu.h" 33 #endif /* CONFIG_TCG */ 34 35 #include "exec/cputlb.h" 36 #include "exec/page-protection.h" 37 #include "exec/target_page.h" 38 #include "exec/translation-block.h" 39 #include "hw/qdev-core.h" 40 #include "hw/qdev-properties.h" 41 #include "hw/boards.h" 42 #include "system/xen.h" 43 #include "system/kvm.h" 44 #include "system/tcg.h" 45 #include "system/qtest.h" 46 #include "qemu/timer.h" 47 #include "qemu/config-file.h" 48 #include "qemu/error-report.h" 49 #include "qemu/qemu-print.h" 50 #include "qemu/log.h" 51 #include "qemu/memalign.h" 52 #include "qemu/memfd.h" 53 #include "system/memory.h" 54 #include "system/ioport.h" 55 #include "system/dma.h" 56 #include "system/hostmem.h" 57 #include "system/hw_accel.h" 58 #include "system/xen-mapcache.h" 59 #include "trace.h" 60 61 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 62 #include <linux/falloc.h> 63 #endif 64 65 #include "qemu/rcu_queue.h" 66 #include "qemu/main-loop.h" 67 #include "system/replay.h" 68 69 #include "system/ram_addr.h" 70 71 #include "qemu/pmem.h" 72 73 #include "qapi/qapi-types-migration.h" 74 #include "migration/blocker.h" 75 #include "migration/cpr.h" 76 #include "migration/options.h" 77 #include "migration/vmstate.h" 78 79 #include "qemu/range.h" 80 #ifndef _WIN32 81 #include "qemu/mmap-alloc.h" 82 #endif 83 84 #include "monitor/monitor.h" 85 86 #ifdef CONFIG_LIBDAXCTL 87 #include <daxctl/libdaxctl.h> 88 #endif 89 90 #include "memory-internal.h" 91 92 //#define DEBUG_SUBPAGE 93 94 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes 95 * are protected by the ramlist lock. 96 */ 97 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; 98 99 static MemoryRegion *system_memory; 100 static MemoryRegion *system_io; 101 102 AddressSpace address_space_io; 103 AddressSpace address_space_memory; 104 105 static MemoryRegion io_mem_unassigned; 106 107 typedef struct PhysPageEntry PhysPageEntry; 108 109 struct PhysPageEntry { 110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ 111 uint32_t skip : 6; 112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */ 113 uint32_t ptr : 26; 114 }; 115 116 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) 117 118 /* Size of the L2 (and L3, etc) page tables. */ 119 #define ADDR_SPACE_BITS 64 120 121 #define P_L2_BITS 9 122 #define P_L2_SIZE (1 << P_L2_BITS) 123 124 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) 125 126 typedef PhysPageEntry Node[P_L2_SIZE]; 127 128 typedef struct PhysPageMap { 129 struct rcu_head rcu; 130 131 unsigned sections_nb; 132 unsigned sections_nb_alloc; 133 unsigned nodes_nb; 134 unsigned nodes_nb_alloc; 135 Node *nodes; 136 MemoryRegionSection *sections; 137 } PhysPageMap; 138 139 struct AddressSpaceDispatch { 140 MemoryRegionSection *mru_section; 141 /* This is a multi-level map on the physical address space. 142 * The bottom level has pointers to MemoryRegionSections. 143 */ 144 PhysPageEntry phys_map; 145 PhysPageMap map; 146 }; 147 148 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 149 typedef struct subpage_t { 150 MemoryRegion iomem; 151 FlatView *fv; 152 hwaddr base; 153 uint16_t sub_section[]; 154 } subpage_t; 155 156 #define PHYS_SECTION_UNASSIGNED 0 157 158 static void io_mem_init(void); 159 static void memory_map_init(void); 160 static void tcg_log_global_after_sync(MemoryListener *listener); 161 static void tcg_commit(MemoryListener *listener); 162 static bool ram_is_cpr_compatible(RAMBlock *rb); 163 164 /** 165 * CPUAddressSpace: all the information a CPU needs about an AddressSpace 166 * @cpu: the CPU whose AddressSpace this is 167 * @as: the AddressSpace itself 168 * @memory_dispatch: its dispatch pointer (cached, RCU protected) 169 * @tcg_as_listener: listener for tracking changes to the AddressSpace 170 */ 171 typedef struct CPUAddressSpace { 172 CPUState *cpu; 173 AddressSpace *as; 174 struct AddressSpaceDispatch *memory_dispatch; 175 MemoryListener tcg_as_listener; 176 } CPUAddressSpace; 177 178 struct DirtyBitmapSnapshot { 179 ram_addr_t start; 180 ram_addr_t end; 181 unsigned long dirty[]; 182 }; 183 184 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) 185 { 186 static unsigned alloc_hint = 16; 187 if (map->nodes_nb + nodes > map->nodes_nb_alloc) { 188 map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes); 189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); 190 alloc_hint = map->nodes_nb_alloc; 191 } 192 } 193 194 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) 195 { 196 unsigned i; 197 uint32_t ret; 198 PhysPageEntry e; 199 PhysPageEntry *p; 200 201 ret = map->nodes_nb++; 202 p = map->nodes[ret]; 203 assert(ret != PHYS_MAP_NODE_NIL); 204 assert(ret != map->nodes_nb_alloc); 205 206 e.skip = leaf ? 0 : 1; 207 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; 208 for (i = 0; i < P_L2_SIZE; ++i) { 209 memcpy(&p[i], &e, sizeof(e)); 210 } 211 return ret; 212 } 213 214 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, 215 hwaddr *index, uint64_t *nb, uint16_t leaf, 216 int level) 217 { 218 PhysPageEntry *p; 219 hwaddr step = (hwaddr)1 << (level * P_L2_BITS); 220 221 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { 222 lp->ptr = phys_map_node_alloc(map, level == 0); 223 } 224 p = map->nodes[lp->ptr]; 225 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; 226 227 while (*nb && lp < &p[P_L2_SIZE]) { 228 if ((*index & (step - 1)) == 0 && *nb >= step) { 229 lp->skip = 0; 230 lp->ptr = leaf; 231 *index += step; 232 *nb -= step; 233 } else { 234 phys_page_set_level(map, lp, index, nb, leaf, level - 1); 235 } 236 ++lp; 237 } 238 } 239 240 static void phys_page_set(AddressSpaceDispatch *d, 241 hwaddr index, uint64_t nb, 242 uint16_t leaf) 243 { 244 /* Wildly overreserve - it doesn't matter much. */ 245 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); 246 247 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); 248 } 249 250 /* Compact a non leaf page entry. Simply detect that the entry has a single child, 251 * and update our entry so we can skip it and go directly to the destination. 252 */ 253 static void phys_page_compact(PhysPageEntry *lp, Node *nodes) 254 { 255 unsigned valid_ptr = P_L2_SIZE; 256 int valid = 0; 257 PhysPageEntry *p; 258 int i; 259 260 if (lp->ptr == PHYS_MAP_NODE_NIL) { 261 return; 262 } 263 264 p = nodes[lp->ptr]; 265 for (i = 0; i < P_L2_SIZE; i++) { 266 if (p[i].ptr == PHYS_MAP_NODE_NIL) { 267 continue; 268 } 269 270 valid_ptr = i; 271 valid++; 272 if (p[i].skip) { 273 phys_page_compact(&p[i], nodes); 274 } 275 } 276 277 /* We can only compress if there's only one child. */ 278 if (valid != 1) { 279 return; 280 } 281 282 assert(valid_ptr < P_L2_SIZE); 283 284 /* Don't compress if it won't fit in the # of bits we have. */ 285 if (P_L2_LEVELS >= (1 << 6) && 286 lp->skip + p[valid_ptr].skip >= (1 << 6)) { 287 return; 288 } 289 290 lp->ptr = p[valid_ptr].ptr; 291 if (!p[valid_ptr].skip) { 292 /* If our only child is a leaf, make this a leaf. */ 293 /* By design, we should have made this node a leaf to begin with so we 294 * should never reach here. 295 * But since it's so simple to handle this, let's do it just in case we 296 * change this rule. 297 */ 298 lp->skip = 0; 299 } else { 300 lp->skip += p[valid_ptr].skip; 301 } 302 } 303 304 void address_space_dispatch_compact(AddressSpaceDispatch *d) 305 { 306 if (d->phys_map.skip) { 307 phys_page_compact(&d->phys_map, d->map.nodes); 308 } 309 } 310 311 static inline bool section_covers_addr(const MemoryRegionSection *section, 312 hwaddr addr) 313 { 314 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means 315 * the section must cover the entire address space. 316 */ 317 return int128_gethi(section->size) || 318 range_covers_byte(section->offset_within_address_space, 319 int128_getlo(section->size), addr); 320 } 321 322 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr) 323 { 324 PhysPageEntry lp = d->phys_map, *p; 325 Node *nodes = d->map.nodes; 326 MemoryRegionSection *sections = d->map.sections; 327 hwaddr index = addr >> TARGET_PAGE_BITS; 328 int i; 329 330 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { 331 if (lp.ptr == PHYS_MAP_NODE_NIL) { 332 return §ions[PHYS_SECTION_UNASSIGNED]; 333 } 334 p = nodes[lp.ptr]; 335 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; 336 } 337 338 if (section_covers_addr(§ions[lp.ptr], addr)) { 339 return §ions[lp.ptr]; 340 } else { 341 return §ions[PHYS_SECTION_UNASSIGNED]; 342 } 343 } 344 345 /* Called from RCU critical section */ 346 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, 347 hwaddr addr, 348 bool resolve_subpage) 349 { 350 MemoryRegionSection *section = qatomic_read(&d->mru_section); 351 subpage_t *subpage; 352 353 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || 354 !section_covers_addr(section, addr)) { 355 section = phys_page_find(d, addr); 356 qatomic_set(&d->mru_section, section); 357 } 358 if (resolve_subpage && section->mr->subpage) { 359 subpage = container_of(section->mr, subpage_t, iomem); 360 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; 361 } 362 return section; 363 } 364 365 /* Called from RCU critical section */ 366 static MemoryRegionSection * 367 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, 368 hwaddr *plen, bool resolve_subpage) 369 { 370 MemoryRegionSection *section; 371 MemoryRegion *mr; 372 Int128 diff; 373 374 section = address_space_lookup_region(d, addr, resolve_subpage); 375 /* Compute offset within MemoryRegionSection */ 376 addr -= section->offset_within_address_space; 377 378 /* Compute offset within MemoryRegion */ 379 *xlat = addr + section->offset_within_region; 380 381 mr = section->mr; 382 383 /* MMIO registers can be expected to perform full-width accesses based only 384 * on their address, without considering adjacent registers that could 385 * decode to completely different MemoryRegions. When such registers 386 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO 387 * regions overlap wildly. For this reason we cannot clamp the accesses 388 * here. 389 * 390 * If the length is small (as is the case for address_space_ldl/stl), 391 * everything works fine. If the incoming length is large, however, 392 * the caller really has to do the clamping through memory_access_size. 393 */ 394 if (memory_region_is_ram(mr)) { 395 diff = int128_sub(section->size, int128_make64(addr)); 396 *plen = int128_get64(int128_min(diff, int128_make64(*plen))); 397 } 398 return section; 399 } 400 401 /** 402 * address_space_translate_iommu - translate an address through an IOMMU 403 * memory region and then through the target address space. 404 * 405 * @iommu_mr: the IOMMU memory region that we start the translation from 406 * @addr: the address to be translated through the MMU 407 * @xlat: the translated address offset within the destination memory region. 408 * It cannot be %NULL. 409 * @plen_out: valid read/write length of the translated address. It 410 * cannot be %NULL. 411 * @page_mask_out: page mask for the translated address. This 412 * should only be meaningful for IOMMU translated 413 * addresses, since there may be huge pages that this bit 414 * would tell. It can be %NULL if we don't care about it. 415 * @is_write: whether the translation operation is for write 416 * @is_mmio: whether this can be MMIO, set true if it can 417 * @target_as: the address space targeted by the IOMMU 418 * @attrs: transaction attributes 419 * 420 * This function is called from RCU critical section. It is the common 421 * part of flatview_do_translate and address_space_translate_cached. 422 */ 423 static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr, 424 hwaddr *xlat, 425 hwaddr *plen_out, 426 hwaddr *page_mask_out, 427 bool is_write, 428 bool is_mmio, 429 AddressSpace **target_as, 430 MemTxAttrs attrs) 431 { 432 MemoryRegionSection *section; 433 hwaddr page_mask = (hwaddr)-1; 434 435 do { 436 hwaddr addr = *xlat; 437 IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 438 int iommu_idx = 0; 439 IOMMUTLBEntry iotlb; 440 441 if (imrc->attrs_to_index) { 442 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 443 } 444 445 iotlb = imrc->translate(iommu_mr, addr, is_write ? 446 IOMMU_WO : IOMMU_RO, iommu_idx); 447 448 if (!(iotlb.perm & (1 << is_write))) { 449 goto unassigned; 450 } 451 452 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 453 | (addr & iotlb.addr_mask)); 454 page_mask &= iotlb.addr_mask; 455 *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); 456 *target_as = iotlb.target_as; 457 458 section = address_space_translate_internal( 459 address_space_to_dispatch(iotlb.target_as), addr, xlat, 460 plen_out, is_mmio); 461 462 iommu_mr = memory_region_get_iommu(section->mr); 463 } while (unlikely(iommu_mr)); 464 465 if (page_mask_out) { 466 *page_mask_out = page_mask; 467 } 468 return *section; 469 470 unassigned: 471 return (MemoryRegionSection) { .mr = &io_mem_unassigned }; 472 } 473 474 /** 475 * flatview_do_translate - translate an address in FlatView 476 * 477 * @fv: the flat view that we want to translate on 478 * @addr: the address to be translated in above address space 479 * @xlat: the translated address offset within memory region. It 480 * cannot be @NULL. 481 * @plen_out: valid read/write length of the translated address. It 482 * can be @NULL when we don't care about it. 483 * @page_mask_out: page mask for the translated address. This 484 * should only be meaningful for IOMMU translated 485 * addresses, since there may be huge pages that this bit 486 * would tell. It can be @NULL if we don't care about it. 487 * @is_write: whether the translation operation is for write 488 * @is_mmio: whether this can be MMIO, set true if it can 489 * @target_as: the address space targeted by the IOMMU 490 * @attrs: memory transaction attributes 491 * 492 * This function is called from RCU critical section 493 */ 494 static MemoryRegionSection flatview_do_translate(FlatView *fv, 495 hwaddr addr, 496 hwaddr *xlat, 497 hwaddr *plen_out, 498 hwaddr *page_mask_out, 499 bool is_write, 500 bool is_mmio, 501 AddressSpace **target_as, 502 MemTxAttrs attrs) 503 { 504 MemoryRegionSection *section; 505 IOMMUMemoryRegion *iommu_mr; 506 hwaddr plen = (hwaddr)(-1); 507 508 if (!plen_out) { 509 plen_out = &plen; 510 } 511 512 section = address_space_translate_internal( 513 flatview_to_dispatch(fv), addr, xlat, 514 plen_out, is_mmio); 515 516 iommu_mr = memory_region_get_iommu(section->mr); 517 if (unlikely(iommu_mr)) { 518 return address_space_translate_iommu(iommu_mr, xlat, 519 plen_out, page_mask_out, 520 is_write, is_mmio, 521 target_as, attrs); 522 } 523 if (page_mask_out) { 524 /* Not behind an IOMMU, use default page size. */ 525 *page_mask_out = ~TARGET_PAGE_MASK; 526 } 527 528 return *section; 529 } 530 531 /* Called from RCU critical section */ 532 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 533 bool is_write, MemTxAttrs attrs) 534 { 535 MemoryRegionSection section; 536 hwaddr xlat, page_mask; 537 538 /* 539 * This can never be MMIO, and we don't really care about plen, 540 * but page mask. 541 */ 542 section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat, 543 NULL, &page_mask, is_write, false, &as, 544 attrs); 545 546 /* Illegal translation */ 547 if (section.mr == &io_mem_unassigned) { 548 goto iotlb_fail; 549 } 550 551 /* Convert memory region offset into address space offset */ 552 xlat += section.offset_within_address_space - 553 section.offset_within_region; 554 555 return (IOMMUTLBEntry) { 556 .target_as = as, 557 .iova = addr & ~page_mask, 558 .translated_addr = xlat & ~page_mask, 559 .addr_mask = page_mask, 560 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */ 561 .perm = IOMMU_RW, 562 }; 563 564 iotlb_fail: 565 return (IOMMUTLBEntry) {0}; 566 } 567 568 /* Called from RCU critical section */ 569 MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, 570 hwaddr *plen, bool is_write, 571 MemTxAttrs attrs) 572 { 573 MemoryRegion *mr; 574 MemoryRegionSection section; 575 AddressSpace *as = NULL; 576 577 /* This can be MMIO, so setup MMIO bit. */ 578 section = flatview_do_translate(fv, addr, xlat, plen, NULL, 579 is_write, true, &as, attrs); 580 mr = section.mr; 581 582 if (xen_enabled() && memory_access_is_direct(mr, is_write, attrs)) { 583 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; 584 *plen = MIN(page, *plen); 585 } 586 587 return mr; 588 } 589 590 #ifdef CONFIG_TCG 591 592 typedef struct TCGIOMMUNotifier { 593 IOMMUNotifier n; 594 MemoryRegion *mr; 595 CPUState *cpu; 596 int iommu_idx; 597 bool active; 598 } TCGIOMMUNotifier; 599 600 static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 601 { 602 TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n); 603 604 if (!notifier->active) { 605 return; 606 } 607 tlb_flush(notifier->cpu); 608 notifier->active = false; 609 /* We leave the notifier struct on the list to avoid reallocating it later. 610 * Generally the number of IOMMUs a CPU deals with will be small. 611 * In any case we can't unregister the iommu notifier from a notify 612 * callback. 613 */ 614 } 615 616 static void tcg_register_iommu_notifier(CPUState *cpu, 617 IOMMUMemoryRegion *iommu_mr, 618 int iommu_idx) 619 { 620 /* Make sure this CPU has an IOMMU notifier registered for this 621 * IOMMU/IOMMU index combination, so that we can flush its TLB 622 * when the IOMMU tells us the mappings we've cached have changed. 623 */ 624 MemoryRegion *mr = MEMORY_REGION(iommu_mr); 625 TCGIOMMUNotifier *notifier = NULL; 626 int i; 627 628 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 629 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 630 if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) { 631 break; 632 } 633 } 634 if (i == cpu->iommu_notifiers->len) { 635 /* Not found, add a new entry at the end of the array */ 636 cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1); 637 notifier = g_new0(TCGIOMMUNotifier, 1); 638 g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier; 639 640 notifier->mr = mr; 641 notifier->iommu_idx = iommu_idx; 642 notifier->cpu = cpu; 643 /* Rather than trying to register interest in the specific part 644 * of the iommu's address space that we've accessed and then 645 * expand it later as subsequent accesses touch more of it, we 646 * just register interest in the whole thing, on the assumption 647 * that iommu reconfiguration will be rare. 648 */ 649 iommu_notifier_init(¬ifier->n, 650 tcg_iommu_unmap_notify, 651 IOMMU_NOTIFIER_UNMAP, 652 0, 653 HWADDR_MAX, 654 iommu_idx); 655 memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, 656 &error_fatal); 657 } 658 659 if (!notifier->active) { 660 notifier->active = true; 661 } 662 } 663 664 void tcg_iommu_free_notifier_list(CPUState *cpu) 665 { 666 /* Destroy the CPU's notifier list */ 667 int i; 668 TCGIOMMUNotifier *notifier; 669 670 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 671 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 672 memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n); 673 g_free(notifier); 674 } 675 g_array_free(cpu->iommu_notifiers, true); 676 } 677 678 void tcg_iommu_init_notifier_list(CPUState *cpu) 679 { 680 cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *)); 681 } 682 683 /* Called from RCU critical section */ 684 MemoryRegionSection * 685 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, 686 hwaddr *xlat, hwaddr *plen, 687 MemTxAttrs attrs, int *prot) 688 { 689 MemoryRegionSection *section; 690 IOMMUMemoryRegion *iommu_mr; 691 IOMMUMemoryRegionClass *imrc; 692 IOMMUTLBEntry iotlb; 693 int iommu_idx; 694 hwaddr addr = orig_addr; 695 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; 696 697 for (;;) { 698 section = address_space_translate_internal(d, addr, &addr, plen, false); 699 700 iommu_mr = memory_region_get_iommu(section->mr); 701 if (!iommu_mr) { 702 break; 703 } 704 705 imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 706 707 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 708 tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx); 709 /* We need all the permissions, so pass IOMMU_NONE so the IOMMU 710 * doesn't short-cut its translation table walk. 711 */ 712 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); 713 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 714 | (addr & iotlb.addr_mask)); 715 /* Update the caller's prot bits to remove permissions the IOMMU 716 * is giving us a failure response for. If we get down to no 717 * permissions left at all we can give up now. 718 */ 719 if (!(iotlb.perm & IOMMU_RO)) { 720 *prot &= ~(PAGE_READ | PAGE_EXEC); 721 } 722 if (!(iotlb.perm & IOMMU_WO)) { 723 *prot &= ~PAGE_WRITE; 724 } 725 726 if (!*prot) { 727 goto translate_fail; 728 } 729 730 d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as)); 731 } 732 733 assert(!memory_region_is_iommu(section->mr)); 734 *xlat = addr; 735 return section; 736 737 translate_fail: 738 /* 739 * We should be given a page-aligned address -- certainly 740 * tlb_set_page_with_attrs() does so. The page offset of xlat 741 * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0. 742 * The page portion of xlat will be logged by memory_region_access_valid() 743 * when this memory access is rejected, so use the original untranslated 744 * physical address. 745 */ 746 assert((orig_addr & ~TARGET_PAGE_MASK) == 0); 747 *xlat = orig_addr; 748 return &d->map.sections[PHYS_SECTION_UNASSIGNED]; 749 } 750 751 MemoryRegionSection *iotlb_to_section(CPUState *cpu, 752 hwaddr index, MemTxAttrs attrs) 753 { 754 int asidx = cpu_asidx_from_attrs(cpu, attrs); 755 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; 756 AddressSpaceDispatch *d = cpuas->memory_dispatch; 757 int section_index = index & ~TARGET_PAGE_MASK; 758 MemoryRegionSection *ret; 759 760 assert(section_index < d->map.sections_nb); 761 ret = d->map.sections + section_index; 762 assert(ret->mr); 763 assert(ret->mr->ops); 764 765 return ret; 766 } 767 768 /* Called from RCU critical section */ 769 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 770 MemoryRegionSection *section) 771 { 772 AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); 773 return section - d->map.sections; 774 } 775 776 #endif /* CONFIG_TCG */ 777 778 void cpu_address_space_init(CPUState *cpu, int asidx, 779 const char *prefix, MemoryRegion *mr) 780 { 781 CPUAddressSpace *newas; 782 AddressSpace *as = g_new0(AddressSpace, 1); 783 char *as_name; 784 785 assert(mr); 786 as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index); 787 address_space_init(as, mr, as_name); 788 g_free(as_name); 789 790 /* Target code should have set num_ases before calling us */ 791 assert(asidx < cpu->num_ases); 792 793 if (asidx == 0) { 794 /* address space 0 gets the convenience alias */ 795 cpu->as = as; 796 } 797 798 /* KVM cannot currently support multiple address spaces. */ 799 assert(asidx == 0 || !kvm_enabled()); 800 801 if (!cpu->cpu_ases) { 802 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); 803 cpu->cpu_ases_count = cpu->num_ases; 804 } 805 806 newas = &cpu->cpu_ases[asidx]; 807 newas->cpu = cpu; 808 newas->as = as; 809 if (tcg_enabled()) { 810 newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; 811 newas->tcg_as_listener.commit = tcg_commit; 812 newas->tcg_as_listener.name = "tcg"; 813 memory_listener_register(&newas->tcg_as_listener, as); 814 } 815 } 816 817 void cpu_address_space_destroy(CPUState *cpu, int asidx) 818 { 819 CPUAddressSpace *cpuas; 820 821 assert(cpu->cpu_ases); 822 assert(asidx >= 0 && asidx < cpu->num_ases); 823 /* KVM cannot currently support multiple address spaces. */ 824 assert(asidx == 0 || !kvm_enabled()); 825 826 cpuas = &cpu->cpu_ases[asidx]; 827 if (tcg_enabled()) { 828 memory_listener_unregister(&cpuas->tcg_as_listener); 829 } 830 831 address_space_destroy(cpuas->as); 832 g_free_rcu(cpuas->as, rcu); 833 834 if (asidx == 0) { 835 /* reset the convenience alias for address space 0 */ 836 cpu->as = NULL; 837 } 838 839 if (--cpu->cpu_ases_count == 0) { 840 g_free(cpu->cpu_ases); 841 cpu->cpu_ases = NULL; 842 } 843 } 844 845 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) 846 { 847 /* Return the AddressSpace corresponding to the specified index */ 848 return cpu->cpu_ases[asidx].as; 849 } 850 851 /* Called from RCU critical section */ 852 static RAMBlock *qemu_get_ram_block(ram_addr_t addr) 853 { 854 RAMBlock *block; 855 856 block = qatomic_rcu_read(&ram_list.mru_block); 857 if (block && addr - block->offset < block->max_length) { 858 return block; 859 } 860 RAMBLOCK_FOREACH(block) { 861 if (addr - block->offset < block->max_length) { 862 goto found; 863 } 864 } 865 866 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 867 abort(); 868 869 found: 870 /* It is safe to write mru_block outside the BQL. This 871 * is what happens: 872 * 873 * mru_block = xxx 874 * rcu_read_unlock() 875 * xxx removed from list 876 * rcu_read_lock() 877 * read mru_block 878 * mru_block = NULL; 879 * call_rcu(reclaim_ramblock, xxx); 880 * rcu_read_unlock() 881 * 882 * qatomic_rcu_set is not needed here. The block was already published 883 * when it was placed into the list. Here we're just making an extra 884 * copy of the pointer. 885 */ 886 ram_list.mru_block = block; 887 return block; 888 } 889 890 void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) 891 { 892 CPUState *cpu; 893 ram_addr_t start1; 894 RAMBlock *block; 895 ram_addr_t end; 896 897 assert(tcg_enabled()); 898 end = TARGET_PAGE_ALIGN(start + length); 899 start &= TARGET_PAGE_MASK; 900 901 RCU_READ_LOCK_GUARD(); 902 block = qemu_get_ram_block(start); 903 assert(block == qemu_get_ram_block(end - 1)); 904 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); 905 CPU_FOREACH(cpu) { 906 tlb_reset_dirty(cpu, start1, length); 907 } 908 } 909 910 /* Note: start and end must be within the same ram block. */ 911 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, 912 ram_addr_t length, 913 unsigned client) 914 { 915 DirtyMemoryBlocks *blocks; 916 unsigned long end, page, start_page; 917 bool dirty = false; 918 RAMBlock *ramblock; 919 uint64_t mr_offset, mr_size; 920 921 if (length == 0) { 922 return false; 923 } 924 925 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 926 start_page = start >> TARGET_PAGE_BITS; 927 page = start_page; 928 929 WITH_RCU_READ_LOCK_GUARD() { 930 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 931 ramblock = qemu_get_ram_block(start); 932 /* Range sanity check on the ramblock */ 933 assert(start >= ramblock->offset && 934 start + length <= ramblock->offset + ramblock->used_length); 935 936 while (page < end) { 937 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 938 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; 939 unsigned long num = MIN(end - page, 940 DIRTY_MEMORY_BLOCK_SIZE - offset); 941 942 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], 943 offset, num); 944 page += num; 945 } 946 947 mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; 948 mr_size = (end - start_page) << TARGET_PAGE_BITS; 949 memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); 950 } 951 952 if (dirty) { 953 cpu_physical_memory_dirty_bits_cleared(start, length); 954 } 955 956 return dirty; 957 } 958 959 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty 960 (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) 961 { 962 DirtyMemoryBlocks *blocks; 963 ram_addr_t start, first, last; 964 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL); 965 DirtyBitmapSnapshot *snap; 966 unsigned long page, end, dest; 967 968 start = memory_region_get_ram_addr(mr); 969 /* We know we're only called for RAM MemoryRegions */ 970 assert(start != RAM_ADDR_INVALID); 971 start += offset; 972 973 first = QEMU_ALIGN_DOWN(start, align); 974 last = QEMU_ALIGN_UP(start + length, align); 975 976 snap = g_malloc0(sizeof(*snap) + 977 ((last - first) >> (TARGET_PAGE_BITS + 3))); 978 snap->start = first; 979 snap->end = last; 980 981 page = first >> TARGET_PAGE_BITS; 982 end = last >> TARGET_PAGE_BITS; 983 dest = 0; 984 985 WITH_RCU_READ_LOCK_GUARD() { 986 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 987 988 while (page < end) { 989 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 990 unsigned long ofs = page % DIRTY_MEMORY_BLOCK_SIZE; 991 unsigned long num = MIN(end - page, 992 DIRTY_MEMORY_BLOCK_SIZE - ofs); 993 994 assert(QEMU_IS_ALIGNED(ofs, (1 << BITS_PER_LEVEL))); 995 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL))); 996 ofs >>= BITS_PER_LEVEL; 997 998 bitmap_copy_and_clear_atomic(snap->dirty + dest, 999 blocks->blocks[idx] + ofs, 1000 num); 1001 page += num; 1002 dest += num >> BITS_PER_LEVEL; 1003 } 1004 } 1005 1006 cpu_physical_memory_dirty_bits_cleared(start, length); 1007 1008 memory_region_clear_dirty_bitmap(mr, offset, length); 1009 1010 return snap; 1011 } 1012 1013 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, 1014 ram_addr_t start, 1015 ram_addr_t length) 1016 { 1017 unsigned long page, end; 1018 1019 assert(start >= snap->start); 1020 assert(start + length <= snap->end); 1021 1022 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; 1023 page = (start - snap->start) >> TARGET_PAGE_BITS; 1024 1025 while (page < end) { 1026 if (test_bit(page, snap->dirty)) { 1027 return true; 1028 } 1029 page++; 1030 } 1031 return false; 1032 } 1033 1034 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 1035 uint16_t section); 1036 static subpage_t *subpage_init(FlatView *fv, hwaddr base); 1037 1038 static uint16_t phys_section_add(PhysPageMap *map, 1039 MemoryRegionSection *section) 1040 { 1041 /* The physical section number is ORed with a page-aligned 1042 * pointer to produce the iotlb entries. Thus it should 1043 * never overflow into the page-aligned value. 1044 */ 1045 assert(map->sections_nb < TARGET_PAGE_SIZE); 1046 1047 if (map->sections_nb == map->sections_nb_alloc) { 1048 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); 1049 map->sections = g_renew(MemoryRegionSection, map->sections, 1050 map->sections_nb_alloc); 1051 } 1052 map->sections[map->sections_nb] = *section; 1053 memory_region_ref(section->mr); 1054 return map->sections_nb++; 1055 } 1056 1057 static void phys_section_destroy(MemoryRegion *mr) 1058 { 1059 bool have_sub_page = mr->subpage; 1060 1061 memory_region_unref(mr); 1062 1063 if (have_sub_page) { 1064 subpage_t *subpage = container_of(mr, subpage_t, iomem); 1065 object_unref(OBJECT(&subpage->iomem)); 1066 g_free(subpage); 1067 } 1068 } 1069 1070 static void phys_sections_free(PhysPageMap *map) 1071 { 1072 while (map->sections_nb > 0) { 1073 MemoryRegionSection *section = &map->sections[--map->sections_nb]; 1074 phys_section_destroy(section->mr); 1075 } 1076 g_free(map->sections); 1077 g_free(map->nodes); 1078 } 1079 1080 static void register_subpage(FlatView *fv, MemoryRegionSection *section) 1081 { 1082 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1083 subpage_t *subpage; 1084 hwaddr base = section->offset_within_address_space 1085 & TARGET_PAGE_MASK; 1086 MemoryRegionSection *existing = phys_page_find(d, base); 1087 MemoryRegionSection subsection = { 1088 .offset_within_address_space = base, 1089 .size = int128_make64(TARGET_PAGE_SIZE), 1090 }; 1091 hwaddr start, end; 1092 1093 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); 1094 1095 if (!(existing->mr->subpage)) { 1096 subpage = subpage_init(fv, base); 1097 subsection.fv = fv; 1098 subsection.mr = &subpage->iomem; 1099 phys_page_set(d, base >> TARGET_PAGE_BITS, 1, 1100 phys_section_add(&d->map, &subsection)); 1101 } else { 1102 subpage = container_of(existing->mr, subpage_t, iomem); 1103 } 1104 start = section->offset_within_address_space & ~TARGET_PAGE_MASK; 1105 end = start + int128_get64(section->size) - 1; 1106 subpage_register(subpage, start, end, 1107 phys_section_add(&d->map, section)); 1108 } 1109 1110 1111 static void register_multipage(FlatView *fv, 1112 MemoryRegionSection *section) 1113 { 1114 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1115 hwaddr start_addr = section->offset_within_address_space; 1116 uint16_t section_index = phys_section_add(&d->map, section); 1117 uint64_t num_pages = int128_get64(int128_rshift(section->size, 1118 TARGET_PAGE_BITS)); 1119 1120 assert(num_pages); 1121 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); 1122 } 1123 1124 /* 1125 * The range in *section* may look like this: 1126 * 1127 * |s|PPPPPPP|s| 1128 * 1129 * where s stands for subpage and P for page. 1130 */ 1131 void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section) 1132 { 1133 MemoryRegionSection remain = *section; 1134 Int128 page_size = int128_make64(TARGET_PAGE_SIZE); 1135 1136 /* register first subpage */ 1137 if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { 1138 uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space) 1139 - remain.offset_within_address_space; 1140 1141 MemoryRegionSection now = remain; 1142 now.size = int128_min(int128_make64(left), now.size); 1143 register_subpage(fv, &now); 1144 if (int128_eq(remain.size, now.size)) { 1145 return; 1146 } 1147 remain.size = int128_sub(remain.size, now.size); 1148 remain.offset_within_address_space += int128_get64(now.size); 1149 remain.offset_within_region += int128_get64(now.size); 1150 } 1151 1152 /* register whole pages */ 1153 if (int128_ge(remain.size, page_size)) { 1154 MemoryRegionSection now = remain; 1155 now.size = int128_and(now.size, int128_neg(page_size)); 1156 register_multipage(fv, &now); 1157 if (int128_eq(remain.size, now.size)) { 1158 return; 1159 } 1160 remain.size = int128_sub(remain.size, now.size); 1161 remain.offset_within_address_space += int128_get64(now.size); 1162 remain.offset_within_region += int128_get64(now.size); 1163 } 1164 1165 /* register last subpage */ 1166 register_subpage(fv, &remain); 1167 } 1168 1169 void qemu_flush_coalesced_mmio_buffer(void) 1170 { 1171 if (kvm_enabled()) 1172 kvm_flush_coalesced_mmio_buffer(); 1173 } 1174 1175 void qemu_mutex_lock_ramlist(void) 1176 { 1177 qemu_mutex_lock(&ram_list.mutex); 1178 } 1179 1180 void qemu_mutex_unlock_ramlist(void) 1181 { 1182 qemu_mutex_unlock(&ram_list.mutex); 1183 } 1184 1185 GString *ram_block_format(void) 1186 { 1187 RAMBlock *block; 1188 char *psize; 1189 GString *buf = g_string_new(""); 1190 1191 RCU_READ_LOCK_GUARD(); 1192 g_string_append_printf(buf, "%24s %8s %18s %18s %18s %18s %3s\n", 1193 "Block Name", "PSize", "Offset", "Used", "Total", 1194 "HVA", "RO"); 1195 1196 RAMBLOCK_FOREACH(block) { 1197 psize = size_to_str(block->page_size); 1198 g_string_append_printf(buf, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 1199 " 0x%016" PRIx64 " 0x%016" PRIx64 " %3s\n", 1200 block->idstr, psize, 1201 (uint64_t)block->offset, 1202 (uint64_t)block->used_length, 1203 (uint64_t)block->max_length, 1204 (uint64_t)(uintptr_t)block->host, 1205 block->mr->readonly ? "ro" : "rw"); 1206 1207 g_free(psize); 1208 } 1209 1210 return buf; 1211 } 1212 1213 static int find_min_backend_pagesize(Object *obj, void *opaque) 1214 { 1215 long *hpsize_min = opaque; 1216 1217 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1218 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1219 long hpsize = host_memory_backend_pagesize(backend); 1220 1221 if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) { 1222 *hpsize_min = hpsize; 1223 } 1224 } 1225 1226 return 0; 1227 } 1228 1229 static int find_max_backend_pagesize(Object *obj, void *opaque) 1230 { 1231 long *hpsize_max = opaque; 1232 1233 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1234 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1235 long hpsize = host_memory_backend_pagesize(backend); 1236 1237 if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) { 1238 *hpsize_max = hpsize; 1239 } 1240 } 1241 1242 return 0; 1243 } 1244 1245 /* 1246 * TODO: We assume right now that all mapped host memory backends are 1247 * used as RAM, however some might be used for different purposes. 1248 */ 1249 long qemu_minrampagesize(void) 1250 { 1251 long hpsize = LONG_MAX; 1252 Object *memdev_root = object_resolve_path("/objects", NULL); 1253 1254 object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); 1255 return hpsize; 1256 } 1257 1258 long qemu_maxrampagesize(void) 1259 { 1260 long pagesize = 0; 1261 Object *memdev_root = object_resolve_path("/objects", NULL); 1262 1263 object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize); 1264 return pagesize; 1265 } 1266 1267 #if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) 1268 static int64_t get_file_size(int fd) 1269 { 1270 int64_t size; 1271 #if defined(__linux__) 1272 struct stat st; 1273 1274 if (fstat(fd, &st) < 0) { 1275 return -errno; 1276 } 1277 1278 /* Special handling for devdax character devices */ 1279 if (S_ISCHR(st.st_mode)) { 1280 g_autofree char *subsystem_path = NULL; 1281 g_autofree char *subsystem = NULL; 1282 1283 subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem", 1284 major(st.st_rdev), minor(st.st_rdev)); 1285 subsystem = g_file_read_link(subsystem_path, NULL); 1286 1287 if (subsystem && g_str_has_suffix(subsystem, "/dax")) { 1288 g_autofree char *size_path = NULL; 1289 g_autofree char *size_str = NULL; 1290 1291 size_path = g_strdup_printf("/sys/dev/char/%d:%d/size", 1292 major(st.st_rdev), minor(st.st_rdev)); 1293 1294 if (g_file_get_contents(size_path, &size_str, NULL, NULL)) { 1295 return g_ascii_strtoll(size_str, NULL, 0); 1296 } 1297 } 1298 } 1299 #endif /* defined(__linux__) */ 1300 1301 /* st.st_size may be zero for special files yet lseek(2) works */ 1302 size = lseek(fd, 0, SEEK_END); 1303 if (size < 0) { 1304 return -errno; 1305 } 1306 return size; 1307 } 1308 1309 static int64_t get_file_align(int fd) 1310 { 1311 int64_t align = -1; 1312 #if defined(__linux__) && defined(CONFIG_LIBDAXCTL) 1313 struct stat st; 1314 1315 if (fstat(fd, &st) < 0) { 1316 return -errno; 1317 } 1318 1319 /* Special handling for devdax character devices */ 1320 if (S_ISCHR(st.st_mode)) { 1321 g_autofree char *path = NULL; 1322 g_autofree char *rpath = NULL; 1323 struct daxctl_ctx *ctx; 1324 struct daxctl_region *region; 1325 int rc = 0; 1326 1327 path = g_strdup_printf("/sys/dev/char/%d:%d", 1328 major(st.st_rdev), minor(st.st_rdev)); 1329 rpath = realpath(path, NULL); 1330 if (!rpath) { 1331 return -errno; 1332 } 1333 1334 rc = daxctl_new(&ctx); 1335 if (rc) { 1336 return -1; 1337 } 1338 1339 daxctl_region_foreach(ctx, region) { 1340 if (strstr(rpath, daxctl_region_get_path(region))) { 1341 align = daxctl_region_get_align(region); 1342 break; 1343 } 1344 } 1345 daxctl_unref(ctx); 1346 } 1347 #endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */ 1348 1349 return align; 1350 } 1351 1352 static int file_ram_open(const char *path, 1353 const char *region_name, 1354 bool readonly, 1355 bool *created) 1356 { 1357 char *filename; 1358 char *sanitized_name; 1359 char *c; 1360 int fd = -1; 1361 1362 *created = false; 1363 for (;;) { 1364 fd = open(path, readonly ? O_RDONLY : O_RDWR); 1365 if (fd >= 0) { 1366 /* 1367 * open(O_RDONLY) won't fail with EISDIR. Check manually if we 1368 * opened a directory and fail similarly to how we fail ENOENT 1369 * in readonly mode. Note that mkstemp() would imply O_RDWR. 1370 */ 1371 if (readonly) { 1372 struct stat file_stat; 1373 1374 if (fstat(fd, &file_stat)) { 1375 close(fd); 1376 if (errno == EINTR) { 1377 continue; 1378 } 1379 return -errno; 1380 } else if (S_ISDIR(file_stat.st_mode)) { 1381 close(fd); 1382 return -EISDIR; 1383 } 1384 } 1385 /* @path names an existing file, use it */ 1386 break; 1387 } 1388 if (errno == ENOENT) { 1389 if (readonly) { 1390 /* Refuse to create new, readonly files. */ 1391 return -ENOENT; 1392 } 1393 /* @path names a file that doesn't exist, create it */ 1394 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); 1395 if (fd >= 0) { 1396 *created = true; 1397 break; 1398 } 1399 } else if (errno == EISDIR) { 1400 /* @path names a directory, create a file there */ 1401 /* Make name safe to use with mkstemp by replacing '/' with '_'. */ 1402 sanitized_name = g_strdup(region_name); 1403 for (c = sanitized_name; *c != '\0'; c++) { 1404 if (*c == '/') { 1405 *c = '_'; 1406 } 1407 } 1408 1409 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, 1410 sanitized_name); 1411 g_free(sanitized_name); 1412 1413 fd = mkstemp(filename); 1414 if (fd >= 0) { 1415 unlink(filename); 1416 g_free(filename); 1417 break; 1418 } 1419 g_free(filename); 1420 } 1421 if (errno != EEXIST && errno != EINTR) { 1422 return -errno; 1423 } 1424 /* 1425 * Try again on EINTR and EEXIST. The latter happens when 1426 * something else creates the file between our two open(). 1427 */ 1428 } 1429 1430 return fd; 1431 } 1432 1433 static void *file_ram_alloc(RAMBlock *block, 1434 ram_addr_t memory, 1435 int fd, 1436 bool truncate, 1437 off_t offset, 1438 Error **errp) 1439 { 1440 uint32_t qemu_map_flags; 1441 void *area; 1442 1443 block->page_size = qemu_fd_getpagesize(fd); 1444 if (block->mr->align % block->page_size) { 1445 error_setg(errp, "alignment 0x%" PRIx64 1446 " must be multiples of page size 0x%zx", 1447 block->mr->align, block->page_size); 1448 return NULL; 1449 } else if (block->mr->align && !is_power_of_2(block->mr->align)) { 1450 error_setg(errp, "alignment 0x%" PRIx64 1451 " must be a power of two", block->mr->align); 1452 return NULL; 1453 } else if (offset % block->page_size) { 1454 error_setg(errp, "offset 0x%" PRIx64 1455 " must be multiples of page size 0x%zx", 1456 offset, block->page_size); 1457 return NULL; 1458 } 1459 block->mr->align = MAX(block->page_size, block->mr->align); 1460 #if defined(__s390x__) 1461 if (kvm_enabled()) { 1462 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); 1463 } 1464 #endif 1465 1466 if (memory < block->page_size) { 1467 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " 1468 "or larger than page size 0x%zx", 1469 memory, block->page_size); 1470 return NULL; 1471 } 1472 1473 memory = ROUND_UP(memory, block->page_size); 1474 1475 /* 1476 * ftruncate is not supported by hugetlbfs in older 1477 * hosts, so don't bother bailing out on errors. 1478 * If anything goes wrong with it under other filesystems, 1479 * mmap will fail. 1480 * 1481 * Do not truncate the non-empty backend file to avoid corrupting 1482 * the existing data in the file. Disabling shrinking is not 1483 * enough. For example, the current vNVDIMM implementation stores 1484 * the guest NVDIMM labels at the end of the backend file. If the 1485 * backend file is later extended, QEMU will not be able to find 1486 * those labels. Therefore, extending the non-empty backend file 1487 * is disabled as well. 1488 */ 1489 if (truncate && ftruncate(fd, offset + memory)) { 1490 perror("ftruncate"); 1491 } 1492 1493 qemu_map_flags = (block->flags & RAM_READONLY) ? QEMU_MAP_READONLY : 0; 1494 qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0; 1495 qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0; 1496 qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0; 1497 area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset); 1498 if (area == MAP_FAILED) { 1499 error_setg_errno(errp, errno, 1500 "unable to map backing store for guest RAM"); 1501 return NULL; 1502 } 1503 1504 block->fd = fd; 1505 block->fd_offset = offset; 1506 return area; 1507 } 1508 #endif 1509 1510 /* Allocate space within the ram_addr_t space that governs the 1511 * dirty bitmaps. 1512 * Called with the ramlist lock held. 1513 */ 1514 static ram_addr_t find_ram_offset(ram_addr_t size) 1515 { 1516 RAMBlock *block, *next_block; 1517 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; 1518 1519 assert(size != 0); /* it would hand out same offset multiple times */ 1520 1521 if (QLIST_EMPTY_RCU(&ram_list.blocks)) { 1522 return 0; 1523 } 1524 1525 RAMBLOCK_FOREACH(block) { 1526 ram_addr_t candidate, next = RAM_ADDR_MAX; 1527 1528 /* Align blocks to start on a 'long' in the bitmap 1529 * which makes the bitmap sync'ing take the fast path. 1530 */ 1531 candidate = block->offset + block->max_length; 1532 candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS); 1533 1534 /* Search for the closest following block 1535 * and find the gap. 1536 */ 1537 RAMBLOCK_FOREACH(next_block) { 1538 if (next_block->offset >= candidate) { 1539 next = MIN(next, next_block->offset); 1540 } 1541 } 1542 1543 /* If it fits remember our place and remember the size 1544 * of gap, but keep going so that we might find a smaller 1545 * gap to fill so avoiding fragmentation. 1546 */ 1547 if (next - candidate >= size && next - candidate < mingap) { 1548 offset = candidate; 1549 mingap = next - candidate; 1550 } 1551 1552 trace_find_ram_offset_loop(size, candidate, offset, next, mingap); 1553 } 1554 1555 if (offset == RAM_ADDR_MAX) { 1556 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", 1557 (uint64_t)size); 1558 abort(); 1559 } 1560 1561 trace_find_ram_offset(size, offset); 1562 1563 return offset; 1564 } 1565 1566 static void qemu_ram_setup_dump(void *addr, ram_addr_t size) 1567 { 1568 int ret; 1569 1570 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ 1571 if (!machine_dump_guest_core(current_machine)) { 1572 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); 1573 if (ret) { 1574 perror("qemu_madvise"); 1575 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " 1576 "but dump-guest-core=off specified\n"); 1577 } 1578 } 1579 } 1580 1581 const char *qemu_ram_get_idstr(RAMBlock *rb) 1582 { 1583 return rb->idstr; 1584 } 1585 1586 void *qemu_ram_get_host_addr(RAMBlock *rb) 1587 { 1588 return rb->host; 1589 } 1590 1591 ram_addr_t qemu_ram_get_offset(RAMBlock *rb) 1592 { 1593 return rb->offset; 1594 } 1595 1596 ram_addr_t qemu_ram_get_used_length(RAMBlock *rb) 1597 { 1598 return rb->used_length; 1599 } 1600 1601 ram_addr_t qemu_ram_get_max_length(RAMBlock *rb) 1602 { 1603 return rb->max_length; 1604 } 1605 1606 bool qemu_ram_is_shared(RAMBlock *rb) 1607 { 1608 return rb->flags & RAM_SHARED; 1609 } 1610 1611 bool qemu_ram_is_noreserve(RAMBlock *rb) 1612 { 1613 return rb->flags & RAM_NORESERVE; 1614 } 1615 1616 /* Note: Only set at the start of postcopy */ 1617 bool qemu_ram_is_uf_zeroable(RAMBlock *rb) 1618 { 1619 return rb->flags & RAM_UF_ZEROPAGE; 1620 } 1621 1622 void qemu_ram_set_uf_zeroable(RAMBlock *rb) 1623 { 1624 rb->flags |= RAM_UF_ZEROPAGE; 1625 } 1626 1627 bool qemu_ram_is_migratable(RAMBlock *rb) 1628 { 1629 return rb->flags & RAM_MIGRATABLE; 1630 } 1631 1632 void qemu_ram_set_migratable(RAMBlock *rb) 1633 { 1634 rb->flags |= RAM_MIGRATABLE; 1635 } 1636 1637 void qemu_ram_unset_migratable(RAMBlock *rb) 1638 { 1639 rb->flags &= ~RAM_MIGRATABLE; 1640 } 1641 1642 bool qemu_ram_is_named_file(RAMBlock *rb) 1643 { 1644 return rb->flags & RAM_NAMED_FILE; 1645 } 1646 1647 int qemu_ram_get_fd(RAMBlock *rb) 1648 { 1649 return rb->fd; 1650 } 1651 1652 /* Called with the BQL held. */ 1653 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) 1654 { 1655 RAMBlock *block; 1656 1657 assert(new_block); 1658 assert(!new_block->idstr[0]); 1659 1660 if (dev) { 1661 char *id = qdev_get_dev_path(dev); 1662 if (id) { 1663 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 1664 g_free(id); 1665 } 1666 } 1667 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 1668 1669 RCU_READ_LOCK_GUARD(); 1670 RAMBLOCK_FOREACH(block) { 1671 if (block != new_block && 1672 !strcmp(block->idstr, new_block->idstr)) { 1673 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 1674 new_block->idstr); 1675 abort(); 1676 } 1677 } 1678 } 1679 1680 /* Called with the BQL held. */ 1681 void qemu_ram_unset_idstr(RAMBlock *block) 1682 { 1683 /* FIXME: arch_init.c assumes that this is not called throughout 1684 * migration. Ignore the problem since hot-unplug during migration 1685 * does not work anyway. 1686 */ 1687 if (block) { 1688 memset(block->idstr, 0, sizeof(block->idstr)); 1689 } 1690 } 1691 1692 static char *cpr_name(MemoryRegion *mr) 1693 { 1694 const char *mr_name = memory_region_name(mr); 1695 g_autofree char *id = mr->dev ? qdev_get_dev_path(mr->dev) : NULL; 1696 1697 if (id) { 1698 return g_strdup_printf("%s/%s", id, mr_name); 1699 } else { 1700 return g_strdup(mr_name); 1701 } 1702 } 1703 1704 size_t qemu_ram_pagesize(RAMBlock *rb) 1705 { 1706 return rb->page_size; 1707 } 1708 1709 /* Returns the largest size of page in use */ 1710 size_t qemu_ram_pagesize_largest(void) 1711 { 1712 RAMBlock *block; 1713 size_t largest = 0; 1714 1715 RAMBLOCK_FOREACH(block) { 1716 largest = MAX(largest, qemu_ram_pagesize(block)); 1717 } 1718 1719 return largest; 1720 } 1721 1722 static int memory_try_enable_merging(void *addr, size_t len) 1723 { 1724 if (!machine_mem_merge(current_machine)) { 1725 /* disabled by the user */ 1726 return 0; 1727 } 1728 1729 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); 1730 } 1731 1732 /* 1733 * Resizing RAM while migrating can result in the migration being canceled. 1734 * Care has to be taken if the guest might have already detected the memory. 1735 * 1736 * As memory core doesn't know how is memory accessed, it is up to 1737 * resize callback to update device state and/or add assertions to detect 1738 * misuse, if necessary. 1739 */ 1740 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) 1741 { 1742 const ram_addr_t oldsize = block->used_length; 1743 const ram_addr_t unaligned_size = newsize; 1744 1745 assert(block); 1746 1747 newsize = TARGET_PAGE_ALIGN(newsize); 1748 newsize = REAL_HOST_PAGE_ALIGN(newsize); 1749 1750 if (block->used_length == newsize) { 1751 /* 1752 * We don't have to resize the ram block (which only knows aligned 1753 * sizes), however, we have to notify if the unaligned size changed. 1754 */ 1755 if (unaligned_size != memory_region_size(block->mr)) { 1756 memory_region_set_size(block->mr, unaligned_size); 1757 if (block->resized) { 1758 block->resized(block->idstr, unaligned_size, block->host); 1759 } 1760 } 1761 return 0; 1762 } 1763 1764 if (!(block->flags & RAM_RESIZEABLE)) { 1765 error_setg_errno(errp, EINVAL, 1766 "Size mismatch: %s: 0x" RAM_ADDR_FMT 1767 " != 0x" RAM_ADDR_FMT, block->idstr, 1768 newsize, block->used_length); 1769 return -EINVAL; 1770 } 1771 1772 if (block->max_length < newsize) { 1773 error_setg_errno(errp, EINVAL, 1774 "Size too large: %s: 0x" RAM_ADDR_FMT 1775 " > 0x" RAM_ADDR_FMT, block->idstr, 1776 newsize, block->max_length); 1777 return -EINVAL; 1778 } 1779 1780 /* Notify before modifying the ram block and touching the bitmaps. */ 1781 if (block->host) { 1782 ram_block_notify_resize(block->host, oldsize, newsize); 1783 } 1784 1785 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); 1786 block->used_length = newsize; 1787 cpu_physical_memory_set_dirty_range(block->offset, block->used_length, 1788 DIRTY_CLIENTS_ALL); 1789 memory_region_set_size(block->mr, unaligned_size); 1790 if (block->resized) { 1791 block->resized(block->idstr, unaligned_size, block->host); 1792 } 1793 return 0; 1794 } 1795 1796 /* 1797 * Trigger sync on the given ram block for range [start, start + length] 1798 * with the backing store if one is available. 1799 * Otherwise no-op. 1800 * @Note: this is supposed to be a synchronous op. 1801 */ 1802 void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) 1803 { 1804 /* The requested range should fit in within the block range */ 1805 g_assert((start + length) <= block->used_length); 1806 1807 #ifdef CONFIG_LIBPMEM 1808 /* The lack of support for pmem should not block the sync */ 1809 if (ramblock_is_pmem(block)) { 1810 void *addr = ramblock_ptr(block, start); 1811 pmem_persist(addr, length); 1812 return; 1813 } 1814 #endif 1815 if (block->fd >= 0) { 1816 /** 1817 * Case there is no support for PMEM or the memory has not been 1818 * specified as persistent (or is not one) - use the msync. 1819 * Less optimal but still achieves the same goal 1820 */ 1821 void *addr = ramblock_ptr(block, start); 1822 if (qemu_msync(addr, length, block->fd)) { 1823 warn_report("%s: failed to sync memory range: start: " 1824 RAM_ADDR_FMT " length: " RAM_ADDR_FMT, 1825 __func__, start, length); 1826 } 1827 } 1828 } 1829 1830 /* Called with ram_list.mutex held */ 1831 static void dirty_memory_extend(ram_addr_t new_ram_size) 1832 { 1833 unsigned int old_num_blocks = ram_list.num_dirty_blocks; 1834 unsigned int new_num_blocks = DIV_ROUND_UP(new_ram_size, 1835 DIRTY_MEMORY_BLOCK_SIZE); 1836 int i; 1837 1838 /* Only need to extend if block count increased */ 1839 if (new_num_blocks <= old_num_blocks) { 1840 return; 1841 } 1842 1843 for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 1844 DirtyMemoryBlocks *old_blocks; 1845 DirtyMemoryBlocks *new_blocks; 1846 int j; 1847 1848 old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]); 1849 new_blocks = g_malloc(sizeof(*new_blocks) + 1850 sizeof(new_blocks->blocks[0]) * new_num_blocks); 1851 1852 if (old_num_blocks) { 1853 memcpy(new_blocks->blocks, old_blocks->blocks, 1854 old_num_blocks * sizeof(old_blocks->blocks[0])); 1855 } 1856 1857 for (j = old_num_blocks; j < new_num_blocks; j++) { 1858 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); 1859 } 1860 1861 qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); 1862 1863 if (old_blocks) { 1864 g_free_rcu(old_blocks, rcu); 1865 } 1866 } 1867 1868 ram_list.num_dirty_blocks = new_num_blocks; 1869 } 1870 1871 static void ram_block_add(RAMBlock *new_block, Error **errp) 1872 { 1873 const bool noreserve = qemu_ram_is_noreserve(new_block); 1874 const bool shared = qemu_ram_is_shared(new_block); 1875 RAMBlock *block; 1876 RAMBlock *last_block = NULL; 1877 bool free_on_error = false; 1878 ram_addr_t ram_size; 1879 Error *err = NULL; 1880 1881 qemu_mutex_lock_ramlist(); 1882 new_block->offset = find_ram_offset(new_block->max_length); 1883 1884 if (!new_block->host) { 1885 if (xen_enabled()) { 1886 xen_ram_alloc(new_block->offset, new_block->max_length, 1887 new_block->mr, &err); 1888 if (err) { 1889 error_propagate(errp, err); 1890 qemu_mutex_unlock_ramlist(); 1891 return; 1892 } 1893 } else { 1894 new_block->host = qemu_anon_ram_alloc(new_block->max_length, 1895 &new_block->mr->align, 1896 shared, noreserve); 1897 if (!new_block->host) { 1898 error_setg_errno(errp, errno, 1899 "cannot set up guest memory '%s'", 1900 memory_region_name(new_block->mr)); 1901 qemu_mutex_unlock_ramlist(); 1902 return; 1903 } 1904 memory_try_enable_merging(new_block->host, new_block->max_length); 1905 free_on_error = true; 1906 } 1907 } 1908 1909 if (new_block->flags & RAM_GUEST_MEMFD) { 1910 int ret; 1911 1912 if (!kvm_enabled()) { 1913 error_setg(errp, "cannot set up private guest memory for %s: KVM required", 1914 object_get_typename(OBJECT(current_machine->cgs))); 1915 goto out_free; 1916 } 1917 assert(new_block->guest_memfd < 0); 1918 1919 ret = ram_block_coordinated_discard_require(true); 1920 if (ret < 0) { 1921 error_setg_errno(errp, -ret, 1922 "cannot set up private guest memory: discard currently blocked"); 1923 error_append_hint(errp, "Are you using assigned devices?\n"); 1924 goto out_free; 1925 } 1926 1927 new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length, 1928 0, errp); 1929 if (new_block->guest_memfd < 0) { 1930 qemu_mutex_unlock_ramlist(); 1931 goto out_free; 1932 } 1933 1934 /* 1935 * The attribute bitmap of the RamBlockAttributes is default to 1936 * discarded, which mimics the behavior of kvm_set_phys_mem() when it 1937 * calls kvm_set_memory_attributes_private(). This leads to a brief 1938 * period of inconsistency between the creation of the RAMBlock and its 1939 * mapping into the physical address space. However, this is not 1940 * problematic, as no users rely on the attribute status to perform 1941 * any actions during this interval. 1942 */ 1943 new_block->attributes = ram_block_attributes_create(new_block); 1944 if (!new_block->attributes) { 1945 error_setg(errp, "Failed to create ram block attribute"); 1946 close(new_block->guest_memfd); 1947 ram_block_coordinated_discard_require(false); 1948 qemu_mutex_unlock_ramlist(); 1949 goto out_free; 1950 } 1951 1952 /* 1953 * Add a specific guest_memfd blocker if a generic one would not be 1954 * added by ram_block_add_cpr_blocker. 1955 */ 1956 if (ram_is_cpr_compatible(new_block)) { 1957 error_setg(&new_block->cpr_blocker, 1958 "Memory region %s uses guest_memfd, " 1959 "which is not supported with CPR.", 1960 memory_region_name(new_block->mr)); 1961 migrate_add_blocker_modes(&new_block->cpr_blocker, errp, 1962 MIG_MODE_CPR_TRANSFER, -1); 1963 } 1964 } 1965 1966 ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS; 1967 dirty_memory_extend(ram_size); 1968 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, 1969 * QLIST (which has an RCU-friendly variant) does not have insertion at 1970 * tail, so save the last element in last_block. 1971 */ 1972 RAMBLOCK_FOREACH(block) { 1973 last_block = block; 1974 if (block->max_length < new_block->max_length) { 1975 break; 1976 } 1977 } 1978 if (block) { 1979 QLIST_INSERT_BEFORE_RCU(block, new_block, next); 1980 } else if (last_block) { 1981 QLIST_INSERT_AFTER_RCU(last_block, new_block, next); 1982 } else { /* list is empty */ 1983 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); 1984 } 1985 ram_list.mru_block = NULL; 1986 1987 /* Write list before version */ 1988 smp_wmb(); 1989 ram_list.version++; 1990 qemu_mutex_unlock_ramlist(); 1991 1992 cpu_physical_memory_set_dirty_range(new_block->offset, 1993 new_block->used_length, 1994 DIRTY_CLIENTS_ALL); 1995 1996 if (new_block->host) { 1997 qemu_ram_setup_dump(new_block->host, new_block->max_length); 1998 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); 1999 /* 2000 * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU 2001 * Configure it unless the machine is a qtest server, in which case 2002 * KVM is not used and it may be forked (eg for fuzzing purposes). 2003 */ 2004 if (!qtest_enabled()) { 2005 qemu_madvise(new_block->host, new_block->max_length, 2006 QEMU_MADV_DONTFORK); 2007 } 2008 ram_block_notify_add(new_block->host, new_block->used_length, 2009 new_block->max_length); 2010 } 2011 return; 2012 2013 out_free: 2014 if (free_on_error) { 2015 qemu_anon_ram_free(new_block->host, new_block->max_length); 2016 new_block->host = NULL; 2017 } 2018 } 2019 2020 #if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) 2021 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, ram_addr_t max_size, 2022 qemu_ram_resize_cb resized, MemoryRegion *mr, 2023 uint32_t ram_flags, int fd, off_t offset, 2024 bool grow, 2025 Error **errp) 2026 { 2027 ERRP_GUARD(); 2028 RAMBlock *new_block; 2029 Error *local_err = NULL; 2030 int64_t file_size, file_align, share_flags; 2031 2032 share_flags = ram_flags & (RAM_PRIVATE | RAM_SHARED); 2033 assert(share_flags != (RAM_SHARED | RAM_PRIVATE)); 2034 ram_flags &= ~RAM_PRIVATE; 2035 2036 /* Just support these ram flags by now. */ 2037 assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE | 2038 RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY | 2039 RAM_READONLY_FD | RAM_GUEST_MEMFD | 2040 RAM_RESIZEABLE)) == 0); 2041 assert(max_size >= size); 2042 2043 if (xen_enabled()) { 2044 error_setg(errp, "-mem-path not supported with Xen"); 2045 return NULL; 2046 } 2047 2048 if (kvm_enabled() && !kvm_has_sync_mmu()) { 2049 error_setg(errp, 2050 "host lacks kvm mmu notifiers, -mem-path unsupported"); 2051 return NULL; 2052 } 2053 2054 size = TARGET_PAGE_ALIGN(size); 2055 size = REAL_HOST_PAGE_ALIGN(size); 2056 max_size = TARGET_PAGE_ALIGN(max_size); 2057 max_size = REAL_HOST_PAGE_ALIGN(max_size); 2058 2059 file_size = get_file_size(fd); 2060 if (file_size && file_size < offset + max_size && !grow) { 2061 error_setg(errp, "%s backing store size 0x%" PRIx64 2062 " is too small for 'size' option 0x" RAM_ADDR_FMT 2063 " plus 'offset' option 0x%" PRIx64, 2064 memory_region_name(mr), file_size, max_size, 2065 (uint64_t)offset); 2066 return NULL; 2067 } 2068 2069 file_align = get_file_align(fd); 2070 if (file_align > 0 && file_align > mr->align) { 2071 error_setg(errp, "backing store align 0x%" PRIx64 2072 " is larger than 'align' option 0x%" PRIx64, 2073 file_align, mr->align); 2074 return NULL; 2075 } 2076 2077 new_block = g_malloc0(sizeof(*new_block)); 2078 new_block->mr = mr; 2079 new_block->used_length = size; 2080 new_block->max_length = max_size; 2081 new_block->resized = resized; 2082 new_block->flags = ram_flags; 2083 new_block->guest_memfd = -1; 2084 new_block->host = file_ram_alloc(new_block, max_size, fd, 2085 file_size < offset + max_size, 2086 offset, errp); 2087 if (!new_block->host) { 2088 g_free(new_block); 2089 return NULL; 2090 } 2091 2092 ram_block_add(new_block, &local_err); 2093 if (local_err) { 2094 g_free(new_block); 2095 error_propagate(errp, local_err); 2096 return NULL; 2097 } 2098 return new_block; 2099 2100 } 2101 2102 2103 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 2104 uint32_t ram_flags, const char *mem_path, 2105 off_t offset, Error **errp) 2106 { 2107 int fd; 2108 bool created; 2109 RAMBlock *block; 2110 2111 fd = file_ram_open(mem_path, memory_region_name(mr), 2112 !!(ram_flags & RAM_READONLY_FD), &created); 2113 if (fd < 0) { 2114 error_setg_errno(errp, -fd, "can't open backing store %s for guest RAM", 2115 mem_path); 2116 if (!(ram_flags & RAM_READONLY_FD) && !(ram_flags & RAM_SHARED) && 2117 fd == -EACCES) { 2118 /* 2119 * If we can open the file R/O (note: will never create a new file) 2120 * and we are dealing with a private mapping, there are still ways 2121 * to consume such files and get RAM instead of ROM. 2122 */ 2123 fd = file_ram_open(mem_path, memory_region_name(mr), true, 2124 &created); 2125 if (fd < 0) { 2126 return NULL; 2127 } 2128 assert(!created); 2129 close(fd); 2130 error_append_hint(errp, "Consider opening the backing store" 2131 " read-only but still creating writable RAM using" 2132 " '-object memory-backend-file,readonly=on,rom=off...'" 2133 " (see \"VM templating\" documentation)\n"); 2134 } 2135 return NULL; 2136 } 2137 2138 block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd, offset, 2139 false, errp); 2140 if (!block) { 2141 if (created) { 2142 unlink(mem_path); 2143 } 2144 close(fd); 2145 return NULL; 2146 } 2147 2148 return block; 2149 } 2150 #endif 2151 2152 #ifdef CONFIG_POSIX 2153 /* 2154 * Create MAP_SHARED RAMBlocks by mmap'ing a file descriptor, so it can be 2155 * shared with another process if CPR is being used. Use memfd if available 2156 * because it has no size limits, else use POSIX shm. 2157 */ 2158 static int qemu_ram_get_shared_fd(const char *name, bool *reused, Error **errp) 2159 { 2160 int fd = cpr_find_fd(name, 0); 2161 2162 if (fd >= 0) { 2163 *reused = true; 2164 return fd; 2165 } 2166 2167 if (qemu_memfd_check(0)) { 2168 fd = qemu_memfd_create(name, 0, 0, 0, 0, errp); 2169 } else { 2170 fd = qemu_shm_alloc(0, errp); 2171 } 2172 2173 if (fd >= 0) { 2174 cpr_save_fd(name, 0, fd); 2175 } 2176 *reused = false; 2177 return fd; 2178 } 2179 #endif 2180 2181 static 2182 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, 2183 qemu_ram_resize_cb resized, 2184 void *host, uint32_t ram_flags, 2185 MemoryRegion *mr, Error **errp) 2186 { 2187 RAMBlock *new_block; 2188 Error *local_err = NULL; 2189 int align, share_flags; 2190 2191 share_flags = ram_flags & (RAM_PRIVATE | RAM_SHARED); 2192 assert(share_flags != (RAM_SHARED | RAM_PRIVATE)); 2193 ram_flags &= ~RAM_PRIVATE; 2194 2195 assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC | 2196 RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0); 2197 assert(!host ^ (ram_flags & RAM_PREALLOC)); 2198 assert(max_size >= size); 2199 2200 /* ignore RAM_SHARED for Windows and emscripten*/ 2201 #if defined(CONFIG_POSIX) && !defined(EMSCRIPTEN) 2202 if (!host) { 2203 if (!share_flags && current_machine->aux_ram_share) { 2204 ram_flags |= RAM_SHARED; 2205 } 2206 if (ram_flags & RAM_SHARED) { 2207 bool reused; 2208 g_autofree char *name = cpr_name(mr); 2209 int fd = qemu_ram_get_shared_fd(name, &reused, errp); 2210 2211 if (fd < 0) { 2212 return NULL; 2213 } 2214 2215 /* Use same alignment as qemu_anon_ram_alloc */ 2216 mr->align = QEMU_VMALLOC_ALIGN; 2217 2218 /* 2219 * This can fail if the shm mount size is too small, or alloc from 2220 * fd is not supported, but previous QEMU versions that called 2221 * qemu_anon_ram_alloc for anonymous shared memory could have 2222 * succeeded. Quietly fail and fall back. 2223 * 2224 * After cpr-transfer, new QEMU could create a memory region 2225 * with a larger max size than old, so pass reused to grow the 2226 * region if necessary. The extra space will be usable after a 2227 * guest reset. 2228 */ 2229 new_block = qemu_ram_alloc_from_fd(size, max_size, resized, mr, 2230 ram_flags, fd, 0, reused, NULL); 2231 if (new_block) { 2232 trace_qemu_ram_alloc_shared(name, new_block->used_length, 2233 new_block->max_length, fd, 2234 new_block->host); 2235 return new_block; 2236 } 2237 2238 cpr_delete_fd(name, 0); 2239 close(fd); 2240 /* fall back to anon allocation */ 2241 } 2242 } 2243 #endif 2244 2245 align = qemu_real_host_page_size(); 2246 align = MAX(align, TARGET_PAGE_SIZE); 2247 size = ROUND_UP(size, align); 2248 max_size = ROUND_UP(max_size, align); 2249 2250 new_block = g_malloc0(sizeof(*new_block)); 2251 new_block->mr = mr; 2252 new_block->resized = resized; 2253 new_block->used_length = size; 2254 new_block->max_length = max_size; 2255 new_block->fd = -1; 2256 new_block->guest_memfd = -1; 2257 new_block->page_size = qemu_real_host_page_size(); 2258 new_block->host = host; 2259 new_block->flags = ram_flags; 2260 ram_block_add(new_block, &local_err); 2261 if (local_err) { 2262 g_free(new_block); 2263 error_propagate(errp, local_err); 2264 return NULL; 2265 } 2266 return new_block; 2267 } 2268 2269 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 2270 MemoryRegion *mr, Error **errp) 2271 { 2272 return qemu_ram_alloc_internal(size, size, NULL, host, RAM_PREALLOC, mr, 2273 errp); 2274 } 2275 2276 RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, 2277 MemoryRegion *mr, Error **errp) 2278 { 2279 assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD | 2280 RAM_PRIVATE)) == 0); 2281 return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp); 2282 } 2283 2284 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, 2285 qemu_ram_resize_cb resized, 2286 MemoryRegion *mr, Error **errp) 2287 { 2288 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, 2289 RAM_RESIZEABLE, mr, errp); 2290 } 2291 2292 static void reclaim_ramblock(RAMBlock *block) 2293 { 2294 if (block->flags & RAM_PREALLOC) { 2295 ; 2296 } else if (xen_enabled()) { 2297 xen_invalidate_map_cache_entry(block->host); 2298 #if !defined(_WIN32) && !defined(EMSCRIPTEN) 2299 } else if (block->fd >= 0) { 2300 qemu_ram_munmap(block->fd, block->host, block->max_length); 2301 close(block->fd); 2302 #endif 2303 } else { 2304 qemu_anon_ram_free(block->host, block->max_length); 2305 } 2306 2307 if (block->guest_memfd >= 0) { 2308 ram_block_attributes_destroy(block->attributes); 2309 close(block->guest_memfd); 2310 ram_block_coordinated_discard_require(false); 2311 } 2312 2313 g_free(block); 2314 } 2315 2316 void qemu_ram_free(RAMBlock *block) 2317 { 2318 g_autofree char *name = NULL; 2319 2320 if (!block) { 2321 return; 2322 } 2323 2324 if (block->host) { 2325 ram_block_notify_remove(block->host, block->used_length, 2326 block->max_length); 2327 } 2328 2329 qemu_mutex_lock_ramlist(); 2330 name = cpr_name(block->mr); 2331 cpr_delete_fd(name, 0); 2332 QLIST_REMOVE_RCU(block, next); 2333 ram_list.mru_block = NULL; 2334 /* Write list before version */ 2335 smp_wmb(); 2336 ram_list.version++; 2337 call_rcu(block, reclaim_ramblock, rcu); 2338 qemu_mutex_unlock_ramlist(); 2339 } 2340 2341 #ifndef _WIN32 2342 /* Simply remap the given VM memory location from start to start+length */ 2343 static int qemu_ram_remap_mmap(RAMBlock *block, uint64_t start, size_t length) 2344 { 2345 int flags, prot; 2346 void *area; 2347 void *host_startaddr = block->host + start; 2348 2349 assert(block->fd < 0); 2350 flags = MAP_FIXED | MAP_ANONYMOUS; 2351 flags |= block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE; 2352 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; 2353 prot = PROT_READ; 2354 prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; 2355 area = mmap(host_startaddr, length, prot, flags, -1, 0); 2356 return area != host_startaddr ? -errno : 0; 2357 } 2358 2359 /* 2360 * qemu_ram_remap - remap a single RAM page 2361 * 2362 * @addr: address in ram_addr_t address space. 2363 * 2364 * This function will try remapping a single page of guest RAM identified by 2365 * @addr, essentially discarding memory to recover from previously poisoned 2366 * memory (MCE). The page size depends on the RAMBlock (i.e., hugetlb). @addr 2367 * does not have to point at the start of the page. 2368 * 2369 * This function is only to be used during system resets; it will kill the 2370 * VM if remapping failed. 2371 */ 2372 void qemu_ram_remap(ram_addr_t addr) 2373 { 2374 RAMBlock *block; 2375 uint64_t offset; 2376 void *vaddr; 2377 size_t page_size; 2378 2379 RAMBLOCK_FOREACH(block) { 2380 offset = addr - block->offset; 2381 if (offset < block->max_length) { 2382 /* Respect the pagesize of our RAMBlock */ 2383 page_size = qemu_ram_pagesize(block); 2384 offset = QEMU_ALIGN_DOWN(offset, page_size); 2385 2386 vaddr = ramblock_ptr(block, offset); 2387 if (block->flags & RAM_PREALLOC) { 2388 ; 2389 } else if (xen_enabled()) { 2390 abort(); 2391 } else { 2392 if (ram_block_discard_range(block, offset, page_size) != 0) { 2393 /* 2394 * Fall back to using mmap() only for anonymous mapping, 2395 * as if a backing file is associated we may not be able 2396 * to recover the memory in all cases. 2397 * So don't take the risk of using only mmap and fail now. 2398 */ 2399 if (block->fd >= 0) { 2400 error_report("Could not remap RAM %s:%" PRIx64 "+%" 2401 PRIx64 " +%zx", block->idstr, offset, 2402 block->fd_offset, page_size); 2403 exit(1); 2404 } 2405 if (qemu_ram_remap_mmap(block, offset, page_size) != 0) { 2406 error_report("Could not remap RAM %s:%" PRIx64 " +%zx", 2407 block->idstr, offset, page_size); 2408 exit(1); 2409 } 2410 } 2411 memory_try_enable_merging(vaddr, page_size); 2412 qemu_ram_setup_dump(vaddr, page_size); 2413 } 2414 2415 break; 2416 } 2417 } 2418 } 2419 #endif /* !_WIN32 */ 2420 2421 /* 2422 * Return a host pointer to guest's ram. 2423 * For Xen, foreign mappings get created if they don't already exist. 2424 * 2425 * @block: block for the RAM to lookup (optional and may be NULL). 2426 * @addr: address within the memory region. 2427 * @size: pointer to requested size (optional and may be NULL). 2428 * size may get modified and return a value smaller than 2429 * what was requested. 2430 * @lock: wether to lock the mapping in xen-mapcache until invalidated. 2431 * @is_write: hint wether to map RW or RO in the xen-mapcache. 2432 * (optional and may always be set to true). 2433 * 2434 * Called within RCU critical section. 2435 */ 2436 static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr, 2437 hwaddr *size, bool lock, 2438 bool is_write) 2439 { 2440 hwaddr len = 0; 2441 2442 if (size && *size == 0) { 2443 return NULL; 2444 } 2445 2446 if (block == NULL) { 2447 block = qemu_get_ram_block(addr); 2448 addr -= block->offset; 2449 } 2450 if (size) { 2451 *size = MIN(*size, block->max_length - addr); 2452 len = *size; 2453 } 2454 2455 if (xen_enabled() && block->host == NULL) { 2456 /* We need to check if the requested address is in the RAM 2457 * because we don't want to map the entire memory in QEMU. 2458 * In that case just map the requested area. 2459 */ 2460 if (xen_mr_is_memory(block->mr)) { 2461 return xen_map_cache(block->mr, block->offset + addr, 2462 len, block->offset, 2463 lock, lock, is_write); 2464 } 2465 2466 block->host = xen_map_cache(block->mr, block->offset, 2467 block->max_length, 2468 block->offset, 2469 1, lock, is_write); 2470 } 2471 2472 return ramblock_ptr(block, addr); 2473 } 2474 2475 /* 2476 * Return a host pointer to ram allocated with qemu_ram_alloc. 2477 * This should not be used for general purpose DMA. Use address_space_map 2478 * or address_space_rw instead. For local memory (e.g. video ram) that the 2479 * device owns, use memory_region_get_ram_ptr. 2480 * 2481 * Called within RCU critical section. 2482 */ 2483 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) 2484 { 2485 return qemu_ram_ptr_length(ram_block, addr, NULL, false, true); 2486 } 2487 2488 /* Return the offset of a hostpointer within a ramblock */ 2489 ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) 2490 { 2491 ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; 2492 assert((uintptr_t)host >= (uintptr_t)rb->host); 2493 assert(res < rb->max_length); 2494 2495 return res; 2496 } 2497 2498 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, 2499 ram_addr_t *offset) 2500 { 2501 RAMBlock *block; 2502 uint8_t *host = ptr; 2503 2504 if (xen_enabled()) { 2505 ram_addr_t ram_addr; 2506 RCU_READ_LOCK_GUARD(); 2507 ram_addr = xen_ram_addr_from_mapcache(ptr); 2508 if (ram_addr == RAM_ADDR_INVALID) { 2509 return NULL; 2510 } 2511 2512 block = qemu_get_ram_block(ram_addr); 2513 if (block) { 2514 *offset = ram_addr - block->offset; 2515 } 2516 return block; 2517 } 2518 2519 RCU_READ_LOCK_GUARD(); 2520 block = qatomic_rcu_read(&ram_list.mru_block); 2521 if (block && block->host && host - block->host < block->max_length) { 2522 goto found; 2523 } 2524 2525 RAMBLOCK_FOREACH(block) { 2526 /* This case append when the block is not mapped. */ 2527 if (block->host == NULL) { 2528 continue; 2529 } 2530 if (host - block->host < block->max_length) { 2531 goto found; 2532 } 2533 } 2534 2535 return NULL; 2536 2537 found: 2538 *offset = (host - block->host); 2539 if (round_offset) { 2540 *offset &= TARGET_PAGE_MASK; 2541 } 2542 return block; 2543 } 2544 2545 /* 2546 * Finds the named RAMBlock 2547 * 2548 * name: The name of RAMBlock to find 2549 * 2550 * Returns: RAMBlock (or NULL if not found) 2551 */ 2552 RAMBlock *qemu_ram_block_by_name(const char *name) 2553 { 2554 RAMBlock *block; 2555 2556 RAMBLOCK_FOREACH(block) { 2557 if (!strcmp(name, block->idstr)) { 2558 return block; 2559 } 2560 } 2561 2562 return NULL; 2563 } 2564 2565 /* 2566 * Some of the system routines need to translate from a host pointer 2567 * (typically a TLB entry) back to a ram offset. 2568 */ 2569 ram_addr_t qemu_ram_addr_from_host(void *ptr) 2570 { 2571 RAMBlock *block; 2572 ram_addr_t offset; 2573 2574 block = qemu_ram_block_from_host(ptr, false, &offset); 2575 if (!block) { 2576 return RAM_ADDR_INVALID; 2577 } 2578 2579 return block->offset + offset; 2580 } 2581 2582 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 2583 { 2584 ram_addr_t ram_addr; 2585 2586 ram_addr = qemu_ram_addr_from_host(ptr); 2587 if (ram_addr == RAM_ADDR_INVALID) { 2588 error_report("Bad ram pointer %p", ptr); 2589 abort(); 2590 } 2591 return ram_addr; 2592 } 2593 2594 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 2595 MemTxAttrs attrs, void *buf, hwaddr len); 2596 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 2597 const void *buf, hwaddr len); 2598 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 2599 bool is_write, MemTxAttrs attrs); 2600 2601 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, 2602 unsigned len, MemTxAttrs attrs) 2603 { 2604 subpage_t *subpage = opaque; 2605 uint8_t buf[8]; 2606 MemTxResult res; 2607 2608 #if defined(DEBUG_SUBPAGE) 2609 printf("%s: subpage %p len %u addr " HWADDR_FMT_plx "\n", __func__, 2610 subpage, len, addr); 2611 #endif 2612 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len); 2613 if (res) { 2614 return res; 2615 } 2616 *data = ldn_p(buf, len); 2617 return MEMTX_OK; 2618 } 2619 2620 static MemTxResult subpage_write(void *opaque, hwaddr addr, 2621 uint64_t value, unsigned len, MemTxAttrs attrs) 2622 { 2623 subpage_t *subpage = opaque; 2624 uint8_t buf[8]; 2625 2626 #if defined(DEBUG_SUBPAGE) 2627 printf("%s: subpage %p len %u addr " HWADDR_FMT_plx 2628 " value %"PRIx64"\n", 2629 __func__, subpage, len, addr, value); 2630 #endif 2631 stn_p(buf, len, value); 2632 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len); 2633 } 2634 2635 static bool subpage_accepts(void *opaque, hwaddr addr, 2636 unsigned len, bool is_write, 2637 MemTxAttrs attrs) 2638 { 2639 subpage_t *subpage = opaque; 2640 #if defined(DEBUG_SUBPAGE) 2641 printf("%s: subpage %p %c len %u addr " HWADDR_FMT_plx "\n", 2642 __func__, subpage, is_write ? 'w' : 'r', len, addr); 2643 #endif 2644 2645 return flatview_access_valid(subpage->fv, addr + subpage->base, 2646 len, is_write, attrs); 2647 } 2648 2649 static const MemoryRegionOps subpage_ops = { 2650 .read_with_attrs = subpage_read, 2651 .write_with_attrs = subpage_write, 2652 .impl.min_access_size = 1, 2653 .impl.max_access_size = 8, 2654 .valid.min_access_size = 1, 2655 .valid.max_access_size = 8, 2656 .valid.accepts = subpage_accepts, 2657 .endianness = DEVICE_NATIVE_ENDIAN, 2658 }; 2659 2660 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 2661 uint16_t section) 2662 { 2663 int idx, eidx; 2664 2665 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) 2666 return -1; 2667 idx = SUBPAGE_IDX(start); 2668 eidx = SUBPAGE_IDX(end); 2669 #if defined(DEBUG_SUBPAGE) 2670 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", 2671 __func__, mmio, start, end, idx, eidx, section); 2672 #endif 2673 for (; idx <= eidx; idx++) { 2674 mmio->sub_section[idx] = section; 2675 } 2676 2677 return 0; 2678 } 2679 2680 static subpage_t *subpage_init(FlatView *fv, hwaddr base) 2681 { 2682 subpage_t *mmio; 2683 2684 /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ 2685 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); 2686 mmio->fv = fv; 2687 mmio->base = base; 2688 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, 2689 NULL, TARGET_PAGE_SIZE); 2690 mmio->iomem.subpage = true; 2691 #if defined(DEBUG_SUBPAGE) 2692 printf("%s: %p base " HWADDR_FMT_plx " len %08x\n", __func__, 2693 mmio, base, TARGET_PAGE_SIZE); 2694 #endif 2695 2696 return mmio; 2697 } 2698 2699 static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr) 2700 { 2701 assert(fv); 2702 MemoryRegionSection section = { 2703 .fv = fv, 2704 .mr = mr, 2705 .offset_within_address_space = 0, 2706 .offset_within_region = 0, 2707 .size = int128_2_64(), 2708 }; 2709 2710 return phys_section_add(map, §ion); 2711 } 2712 2713 static void io_mem_init(void) 2714 { 2715 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, 2716 NULL, UINT64_MAX); 2717 } 2718 2719 AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) 2720 { 2721 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); 2722 uint16_t n; 2723 2724 n = dummy_section(&d->map, fv, &io_mem_unassigned); 2725 assert(n == PHYS_SECTION_UNASSIGNED); 2726 2727 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; 2728 2729 return d; 2730 } 2731 2732 void address_space_dispatch_free(AddressSpaceDispatch *d) 2733 { 2734 phys_sections_free(&d->map); 2735 g_free(d); 2736 } 2737 2738 static void do_nothing(CPUState *cpu, run_on_cpu_data d) 2739 { 2740 } 2741 2742 static void tcg_log_global_after_sync(MemoryListener *listener) 2743 { 2744 CPUAddressSpace *cpuas; 2745 2746 /* Wait for the CPU to end the current TB. This avoids the following 2747 * incorrect race: 2748 * 2749 * vCPU migration 2750 * ---------------------- ------------------------- 2751 * TLB check -> slow path 2752 * notdirty_mem_write 2753 * write to RAM 2754 * mark dirty 2755 * clear dirty flag 2756 * TLB check -> fast path 2757 * read memory 2758 * write to RAM 2759 * 2760 * by pushing the migration thread's memory read after the vCPU thread has 2761 * written the memory. 2762 */ 2763 if (replay_mode == REPLAY_MODE_NONE) { 2764 /* 2765 * VGA can make calls to this function while updating the screen. 2766 * In record/replay mode this causes a deadlock, because 2767 * run_on_cpu waits for rr mutex. Therefore no races are possible 2768 * in this case and no need for making run_on_cpu when 2769 * record/replay is enabled. 2770 */ 2771 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2772 run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); 2773 } 2774 } 2775 2776 static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data) 2777 { 2778 CPUAddressSpace *cpuas = data.host_ptr; 2779 2780 cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as); 2781 tlb_flush(cpu); 2782 } 2783 2784 static void tcg_commit(MemoryListener *listener) 2785 { 2786 CPUAddressSpace *cpuas; 2787 CPUState *cpu; 2788 2789 assert(tcg_enabled()); 2790 /* since each CPU stores ram addresses in its TLB cache, we must 2791 reset the modified entries */ 2792 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2793 cpu = cpuas->cpu; 2794 2795 /* 2796 * Defer changes to as->memory_dispatch until the cpu is quiescent. 2797 * Otherwise we race between (1) other cpu threads and (2) ongoing 2798 * i/o for the current cpu thread, with data cached by mmu_lookup(). 2799 * 2800 * In addition, queueing the work function will kick the cpu back to 2801 * the main loop, which will end the RCU critical section and reclaim 2802 * the memory data structures. 2803 * 2804 * That said, the listener is also called during realize, before 2805 * all of the tcg machinery for run-on is initialized: thus halt_cond. 2806 */ 2807 if (cpu->halt_cond) { 2808 async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas)); 2809 } else { 2810 tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas)); 2811 } 2812 } 2813 2814 static void memory_map_init(void) 2815 { 2816 system_memory = g_malloc(sizeof(*system_memory)); 2817 2818 memory_region_init(system_memory, NULL, "system", UINT64_MAX); 2819 address_space_init(&address_space_memory, system_memory, "memory"); 2820 2821 system_io = g_malloc(sizeof(*system_io)); 2822 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", 2823 65536); 2824 address_space_init(&address_space_io, system_io, "I/O"); 2825 } 2826 2827 MemoryRegion *get_system_memory(void) 2828 { 2829 return system_memory; 2830 } 2831 2832 MemoryRegion *get_system_io(void) 2833 { 2834 return system_io; 2835 } 2836 2837 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, 2838 hwaddr length) 2839 { 2840 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); 2841 ram_addr_t ramaddr = memory_region_get_ram_addr(mr); 2842 2843 /* We know we're only called for RAM MemoryRegions */ 2844 assert(ramaddr != RAM_ADDR_INVALID); 2845 addr += ramaddr; 2846 2847 /* No early return if dirty_log_mask is or becomes 0, because 2848 * cpu_physical_memory_set_dirty_range will still call 2849 * xen_modified_memory. 2850 */ 2851 if (dirty_log_mask) { 2852 dirty_log_mask = 2853 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); 2854 } 2855 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { 2856 assert(tcg_enabled()); 2857 tb_invalidate_phys_range(NULL, addr, addr + length - 1); 2858 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); 2859 } 2860 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); 2861 } 2862 2863 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size) 2864 { 2865 /* 2866 * In principle this function would work on other memory region types too, 2867 * but the ROM device use case is the only one where this operation is 2868 * necessary. Other memory regions should use the 2869 * address_space_read/write() APIs. 2870 */ 2871 assert(memory_region_is_romd(mr)); 2872 2873 invalidate_and_set_dirty(mr, addr, size); 2874 } 2875 2876 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) 2877 { 2878 unsigned access_size_max = mr->ops->valid.max_access_size; 2879 2880 /* Regions are assumed to support 1-4 byte accesses unless 2881 otherwise specified. */ 2882 if (access_size_max == 0) { 2883 access_size_max = 4; 2884 } 2885 2886 /* Bound the maximum access by the alignment of the address. */ 2887 if (!mr->ops->impl.unaligned) { 2888 unsigned align_size_max = addr & -addr; 2889 if (align_size_max != 0 && align_size_max < access_size_max) { 2890 access_size_max = align_size_max; 2891 } 2892 } 2893 2894 /* Don't attempt accesses larger than the maximum. */ 2895 if (l > access_size_max) { 2896 l = access_size_max; 2897 } 2898 l = pow2floor(l); 2899 2900 return l; 2901 } 2902 2903 bool prepare_mmio_access(MemoryRegion *mr) 2904 { 2905 bool release_lock = false; 2906 2907 if (!bql_locked()) { 2908 bql_lock(); 2909 release_lock = true; 2910 } 2911 if (mr->flush_coalesced_mmio) { 2912 qemu_flush_coalesced_mmio_buffer(); 2913 } 2914 2915 return release_lock; 2916 } 2917 2918 /** 2919 * flatview_access_allowed 2920 * @mr: #MemoryRegion to be accessed 2921 * @attrs: memory transaction attributes 2922 * @addr: address within that memory region 2923 * @len: the number of bytes to access 2924 * 2925 * Check if a memory transaction is allowed. 2926 * 2927 * Returns: true if transaction is allowed, false if denied. 2928 */ 2929 static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs, 2930 hwaddr addr, hwaddr len) 2931 { 2932 if (likely(!attrs.memory)) { 2933 return true; 2934 } 2935 if (memory_region_is_ram(mr)) { 2936 return true; 2937 } 2938 qemu_log_mask(LOG_INVALID_MEM, 2939 "Invalid access to non-RAM device at " 2940 "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", " 2941 "region '%s'\n", addr, len, memory_region_name(mr)); 2942 return false; 2943 } 2944 2945 static MemTxResult flatview_write_continue_step(MemTxAttrs attrs, 2946 const uint8_t *buf, 2947 hwaddr len, hwaddr mr_addr, 2948 hwaddr *l, MemoryRegion *mr) 2949 { 2950 if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { 2951 return MEMTX_ACCESS_ERROR; 2952 } 2953 2954 if (!memory_access_is_direct(mr, true, attrs)) { 2955 uint64_t val; 2956 MemTxResult result; 2957 bool release_lock = prepare_mmio_access(mr); 2958 2959 *l = memory_access_size(mr, *l, mr_addr); 2960 /* 2961 * XXX: could force current_cpu to NULL to avoid 2962 * potential bugs 2963 */ 2964 2965 /* 2966 * Assure Coverity (and ourselves) that we are not going to OVERRUN 2967 * the buffer by following ldn_he_p(). 2968 */ 2969 #ifdef QEMU_STATIC_ANALYSIS 2970 assert((*l == 1 && len >= 1) || 2971 (*l == 2 && len >= 2) || 2972 (*l == 4 && len >= 4) || 2973 (*l == 8 && len >= 8)); 2974 #endif 2975 val = ldn_he_p(buf, *l); 2976 result = memory_region_dispatch_write(mr, mr_addr, val, 2977 size_memop(*l), attrs); 2978 if (release_lock) { 2979 bql_unlock(); 2980 } 2981 2982 return result; 2983 } else { 2984 /* RAM case */ 2985 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, 2986 false, true); 2987 2988 memmove(ram_ptr, buf, *l); 2989 invalidate_and_set_dirty(mr, mr_addr, *l); 2990 2991 return MEMTX_OK; 2992 } 2993 } 2994 2995 /* Called within RCU critical section. */ 2996 static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, 2997 MemTxAttrs attrs, 2998 const void *ptr, 2999 hwaddr len, hwaddr mr_addr, 3000 hwaddr l, MemoryRegion *mr) 3001 { 3002 MemTxResult result = MEMTX_OK; 3003 const uint8_t *buf = ptr; 3004 3005 for (;;) { 3006 result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, 3007 mr); 3008 3009 len -= l; 3010 buf += l; 3011 addr += l; 3012 3013 if (!len) { 3014 break; 3015 } 3016 3017 l = len; 3018 mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); 3019 } 3020 3021 return result; 3022 } 3023 3024 /* Called from RCU critical section. */ 3025 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 3026 const void *buf, hwaddr len) 3027 { 3028 hwaddr l; 3029 hwaddr mr_addr; 3030 MemoryRegion *mr; 3031 3032 l = len; 3033 mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); 3034 if (!flatview_access_allowed(mr, attrs, addr, len)) { 3035 return MEMTX_ACCESS_ERROR; 3036 } 3037 return flatview_write_continue(fv, addr, attrs, buf, len, 3038 mr_addr, l, mr); 3039 } 3040 3041 static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf, 3042 hwaddr len, hwaddr mr_addr, 3043 hwaddr *l, 3044 MemoryRegion *mr) 3045 { 3046 if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { 3047 return MEMTX_ACCESS_ERROR; 3048 } 3049 3050 if (!memory_access_is_direct(mr, false, attrs)) { 3051 /* I/O case */ 3052 uint64_t val; 3053 MemTxResult result; 3054 bool release_lock = prepare_mmio_access(mr); 3055 3056 *l = memory_access_size(mr, *l, mr_addr); 3057 result = memory_region_dispatch_read(mr, mr_addr, &val, size_memop(*l), 3058 attrs); 3059 3060 /* 3061 * Assure Coverity (and ourselves) that we are not going to OVERRUN 3062 * the buffer by following stn_he_p(). 3063 */ 3064 #ifdef QEMU_STATIC_ANALYSIS 3065 assert((*l == 1 && len >= 1) || 3066 (*l == 2 && len >= 2) || 3067 (*l == 4 && len >= 4) || 3068 (*l == 8 && len >= 8)); 3069 #endif 3070 stn_he_p(buf, *l, val); 3071 3072 if (release_lock) { 3073 bql_unlock(); 3074 } 3075 return result; 3076 } else { 3077 /* RAM case */ 3078 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, 3079 false, false); 3080 3081 memcpy(buf, ram_ptr, *l); 3082 3083 return MEMTX_OK; 3084 } 3085 } 3086 3087 /* Called within RCU critical section. */ 3088 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 3089 MemTxAttrs attrs, void *ptr, 3090 hwaddr len, hwaddr mr_addr, hwaddr l, 3091 MemoryRegion *mr) 3092 { 3093 MemTxResult result = MEMTX_OK; 3094 uint8_t *buf = ptr; 3095 3096 fuzz_dma_read_cb(addr, len, mr); 3097 for (;;) { 3098 result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); 3099 3100 len -= l; 3101 buf += l; 3102 addr += l; 3103 3104 if (!len) { 3105 break; 3106 } 3107 3108 l = len; 3109 mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); 3110 } 3111 3112 return result; 3113 } 3114 3115 /* Called from RCU critical section. */ 3116 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 3117 MemTxAttrs attrs, void *buf, hwaddr len) 3118 { 3119 hwaddr l; 3120 hwaddr mr_addr; 3121 MemoryRegion *mr; 3122 3123 l = len; 3124 mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); 3125 if (!flatview_access_allowed(mr, attrs, addr, len)) { 3126 return MEMTX_ACCESS_ERROR; 3127 } 3128 return flatview_read_continue(fv, addr, attrs, buf, len, 3129 mr_addr, l, mr); 3130 } 3131 3132 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 3133 MemTxAttrs attrs, void *buf, hwaddr len) 3134 { 3135 MemTxResult result = MEMTX_OK; 3136 FlatView *fv; 3137 3138 if (len > 0) { 3139 RCU_READ_LOCK_GUARD(); 3140 fv = address_space_to_flatview(as); 3141 result = flatview_read(fv, addr, attrs, buf, len); 3142 } 3143 3144 return result; 3145 } 3146 3147 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 3148 MemTxAttrs attrs, 3149 const void *buf, hwaddr len) 3150 { 3151 MemTxResult result = MEMTX_OK; 3152 FlatView *fv; 3153 3154 if (len > 0) { 3155 RCU_READ_LOCK_GUARD(); 3156 fv = address_space_to_flatview(as); 3157 result = flatview_write(fv, addr, attrs, buf, len); 3158 } 3159 3160 return result; 3161 } 3162 3163 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, 3164 void *buf, hwaddr len, bool is_write) 3165 { 3166 if (is_write) { 3167 return address_space_write(as, addr, attrs, buf, len); 3168 } else { 3169 return address_space_read_full(as, addr, attrs, buf, len); 3170 } 3171 } 3172 3173 MemTxResult address_space_set(AddressSpace *as, hwaddr addr, 3174 uint8_t c, hwaddr len, MemTxAttrs attrs) 3175 { 3176 #define FILLBUF_SIZE 512 3177 uint8_t fillbuf[FILLBUF_SIZE]; 3178 int l; 3179 MemTxResult error = MEMTX_OK; 3180 3181 memset(fillbuf, c, FILLBUF_SIZE); 3182 while (len > 0) { 3183 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; 3184 error |= address_space_write(as, addr, attrs, fillbuf, l); 3185 len -= l; 3186 addr += l; 3187 } 3188 3189 return error; 3190 } 3191 3192 void cpu_physical_memory_rw(hwaddr addr, void *buf, 3193 hwaddr len, bool is_write) 3194 { 3195 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, 3196 buf, len, is_write); 3197 } 3198 3199 enum write_rom_type { 3200 WRITE_DATA, 3201 FLUSH_CACHE, 3202 }; 3203 3204 static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, 3205 hwaddr addr, 3206 MemTxAttrs attrs, 3207 const void *ptr, 3208 hwaddr len, 3209 enum write_rom_type type) 3210 { 3211 hwaddr l; 3212 uint8_t *ram_ptr; 3213 hwaddr addr1; 3214 MemoryRegion *mr; 3215 const uint8_t *buf = ptr; 3216 3217 RCU_READ_LOCK_GUARD(); 3218 while (len > 0) { 3219 l = len; 3220 mr = address_space_translate(as, addr, &addr1, &l, true, attrs); 3221 3222 if (!memory_region_supports_direct_access(mr)) { 3223 l = memory_access_size(mr, l, addr1); 3224 } else { 3225 /* ROM/RAM case */ 3226 ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 3227 switch (type) { 3228 case WRITE_DATA: 3229 memcpy(ram_ptr, buf, l); 3230 invalidate_and_set_dirty(mr, addr1, l); 3231 break; 3232 case FLUSH_CACHE: 3233 flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l); 3234 break; 3235 } 3236 } 3237 len -= l; 3238 buf += l; 3239 addr += l; 3240 } 3241 return MEMTX_OK; 3242 } 3243 3244 /* used for ROM loading : can write in RAM and ROM */ 3245 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, 3246 MemTxAttrs attrs, 3247 const void *buf, hwaddr len) 3248 { 3249 return address_space_write_rom_internal(as, addr, attrs, 3250 buf, len, WRITE_DATA); 3251 } 3252 3253 void cpu_flush_icache_range(hwaddr start, hwaddr len) 3254 { 3255 /* 3256 * This function should do the same thing as an icache flush that was 3257 * triggered from within the guest. For TCG we are always cache coherent, 3258 * so there is no need to flush anything. For KVM / Xen we need to flush 3259 * the host's instruction cache at least. 3260 */ 3261 if (tcg_enabled()) { 3262 return; 3263 } 3264 3265 address_space_write_rom_internal(&address_space_memory, 3266 start, MEMTXATTRS_UNSPECIFIED, 3267 NULL, len, FLUSH_CACHE); 3268 } 3269 3270 /* 3271 * A magic value stored in the first 8 bytes of the bounce buffer struct. Used 3272 * to detect illegal pointers passed to address_space_unmap. 3273 */ 3274 #define BOUNCE_BUFFER_MAGIC 0xb4017ceb4ffe12ed 3275 3276 typedef struct { 3277 uint64_t magic; 3278 MemoryRegion *mr; 3279 hwaddr addr; 3280 size_t len; 3281 uint8_t buffer[]; 3282 } BounceBuffer; 3283 3284 static void 3285 address_space_unregister_map_client_do(AddressSpaceMapClient *client) 3286 { 3287 QLIST_REMOVE(client, link); 3288 g_free(client); 3289 } 3290 3291 static void address_space_notify_map_clients_locked(AddressSpace *as) 3292 { 3293 AddressSpaceMapClient *client; 3294 3295 while (!QLIST_EMPTY(&as->map_client_list)) { 3296 client = QLIST_FIRST(&as->map_client_list); 3297 qemu_bh_schedule(client->bh); 3298 address_space_unregister_map_client_do(client); 3299 } 3300 } 3301 3302 void address_space_register_map_client(AddressSpace *as, QEMUBH *bh) 3303 { 3304 AddressSpaceMapClient *client = g_malloc(sizeof(*client)); 3305 3306 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3307 client->bh = bh; 3308 QLIST_INSERT_HEAD(&as->map_client_list, client, link); 3309 /* Write map_client_list before reading bounce_buffer_size. */ 3310 smp_mb(); 3311 if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) { 3312 address_space_notify_map_clients_locked(as); 3313 } 3314 } 3315 3316 void cpu_exec_init_all(void) 3317 { 3318 qemu_mutex_init(&ram_list.mutex); 3319 /* The data structures we set up here depend on knowing the page size, 3320 * so no more changes can be made after this point. 3321 * In an ideal world, nothing we did before we had finished the 3322 * machine setup would care about the target page size, and we could 3323 * do this much later, rather than requiring board models to state 3324 * up front what their requirements are. 3325 */ 3326 finalize_target_page_bits(); 3327 io_mem_init(); 3328 memory_map_init(); 3329 } 3330 3331 void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh) 3332 { 3333 AddressSpaceMapClient *client; 3334 3335 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3336 QLIST_FOREACH(client, &as->map_client_list, link) { 3337 if (client->bh == bh) { 3338 address_space_unregister_map_client_do(client); 3339 break; 3340 } 3341 } 3342 } 3343 3344 static void address_space_notify_map_clients(AddressSpace *as) 3345 { 3346 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3347 address_space_notify_map_clients_locked(as); 3348 } 3349 3350 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 3351 bool is_write, MemTxAttrs attrs) 3352 { 3353 MemoryRegion *mr; 3354 hwaddr l, xlat; 3355 3356 while (len > 0) { 3357 l = len; 3358 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3359 if (!memory_access_is_direct(mr, is_write, attrs)) { 3360 l = memory_access_size(mr, l, addr); 3361 if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) { 3362 return false; 3363 } 3364 } 3365 3366 len -= l; 3367 addr += l; 3368 } 3369 return true; 3370 } 3371 3372 bool address_space_access_valid(AddressSpace *as, hwaddr addr, 3373 hwaddr len, bool is_write, 3374 MemTxAttrs attrs) 3375 { 3376 FlatView *fv; 3377 3378 RCU_READ_LOCK_GUARD(); 3379 fv = address_space_to_flatview(as); 3380 return flatview_access_valid(fv, addr, len, is_write, attrs); 3381 } 3382 3383 static hwaddr 3384 flatview_extend_translation(FlatView *fv, hwaddr addr, 3385 hwaddr target_len, 3386 MemoryRegion *mr, hwaddr base, hwaddr len, 3387 bool is_write, MemTxAttrs attrs) 3388 { 3389 hwaddr done = 0; 3390 hwaddr xlat; 3391 MemoryRegion *this_mr; 3392 3393 for (;;) { 3394 target_len -= len; 3395 addr += len; 3396 done += len; 3397 if (target_len == 0) { 3398 return done; 3399 } 3400 3401 len = target_len; 3402 this_mr = flatview_translate(fv, addr, &xlat, 3403 &len, is_write, attrs); 3404 if (this_mr != mr || xlat != base + done) { 3405 return done; 3406 } 3407 } 3408 } 3409 3410 /* Map a physical memory region into a host virtual address. 3411 * May map a subset of the requested range, given by and returned in *plen. 3412 * May return NULL if resources needed to perform the mapping are exhausted. 3413 * Use only for reads OR writes - not for read-modify-write operations. 3414 * Use address_space_register_map_client() to know when retrying the map 3415 * operation is likely to succeed. 3416 */ 3417 void *address_space_map(AddressSpace *as, 3418 hwaddr addr, 3419 hwaddr *plen, 3420 bool is_write, 3421 MemTxAttrs attrs) 3422 { 3423 hwaddr len = *plen; 3424 hwaddr l, xlat; 3425 MemoryRegion *mr; 3426 FlatView *fv; 3427 3428 trace_address_space_map(as, addr, len, is_write, *(uint32_t *) &attrs); 3429 3430 if (len == 0) { 3431 return NULL; 3432 } 3433 3434 l = len; 3435 RCU_READ_LOCK_GUARD(); 3436 fv = address_space_to_flatview(as); 3437 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3438 3439 if (!memory_access_is_direct(mr, is_write, attrs)) { 3440 size_t used = qatomic_read(&as->bounce_buffer_size); 3441 for (;;) { 3442 hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l); 3443 size_t new_size = used + alloc; 3444 size_t actual = 3445 qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size); 3446 if (actual == used) { 3447 l = alloc; 3448 break; 3449 } 3450 used = actual; 3451 } 3452 3453 if (l == 0) { 3454 *plen = 0; 3455 return NULL; 3456 } 3457 3458 BounceBuffer *bounce = g_malloc0(l + sizeof(BounceBuffer)); 3459 bounce->magic = BOUNCE_BUFFER_MAGIC; 3460 memory_region_ref(mr); 3461 bounce->mr = mr; 3462 bounce->addr = addr; 3463 bounce->len = l; 3464 3465 if (!is_write) { 3466 flatview_read(fv, addr, attrs, 3467 bounce->buffer, l); 3468 } 3469 3470 *plen = l; 3471 return bounce->buffer; 3472 } 3473 3474 memory_region_ref(mr); 3475 *plen = flatview_extend_translation(fv, addr, len, mr, xlat, 3476 l, is_write, attrs); 3477 fuzz_dma_read_cb(addr, *plen, mr); 3478 return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true, is_write); 3479 } 3480 3481 /* Unmaps a memory region previously mapped by address_space_map(). 3482 * Will also mark the memory as dirty if is_write is true. access_len gives 3483 * the amount of memory that was actually read or written by the caller. 3484 */ 3485 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 3486 bool is_write, hwaddr access_len) 3487 { 3488 MemoryRegion *mr; 3489 ram_addr_t addr1; 3490 3491 mr = memory_region_from_host(buffer, &addr1); 3492 if (mr != NULL) { 3493 if (is_write) { 3494 invalidate_and_set_dirty(mr, addr1, access_len); 3495 } 3496 if (xen_enabled()) { 3497 xen_invalidate_map_cache_entry(buffer); 3498 } 3499 memory_region_unref(mr); 3500 return; 3501 } 3502 3503 3504 BounceBuffer *bounce = container_of(buffer, BounceBuffer, buffer); 3505 assert(bounce->magic == BOUNCE_BUFFER_MAGIC); 3506 3507 if (is_write) { 3508 address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED, 3509 bounce->buffer, access_len); 3510 } 3511 3512 qatomic_sub(&as->bounce_buffer_size, bounce->len); 3513 bounce->magic = ~BOUNCE_BUFFER_MAGIC; 3514 memory_region_unref(bounce->mr); 3515 g_free(bounce); 3516 /* Write bounce_buffer_size before reading map_client_list. */ 3517 smp_mb(); 3518 address_space_notify_map_clients(as); 3519 } 3520 3521 void *cpu_physical_memory_map(hwaddr addr, 3522 hwaddr *plen, 3523 bool is_write) 3524 { 3525 return address_space_map(&address_space_memory, addr, plen, is_write, 3526 MEMTXATTRS_UNSPECIFIED); 3527 } 3528 3529 void cpu_physical_memory_unmap(void *buffer, hwaddr len, 3530 bool is_write, hwaddr access_len) 3531 { 3532 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); 3533 } 3534 3535 #define ARG1_DECL AddressSpace *as 3536 #define ARG1 as 3537 #define SUFFIX 3538 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) 3539 #define RCU_READ_LOCK(...) rcu_read_lock() 3540 #define RCU_READ_UNLOCK(...) rcu_read_unlock() 3541 #include "memory_ldst.c.inc" 3542 3543 int64_t address_space_cache_init(MemoryRegionCache *cache, 3544 AddressSpace *as, 3545 hwaddr addr, 3546 hwaddr len, 3547 bool is_write) 3548 { 3549 AddressSpaceDispatch *d; 3550 hwaddr l; 3551 MemoryRegion *mr; 3552 Int128 diff; 3553 3554 assert(len > 0); 3555 3556 l = len; 3557 cache->fv = address_space_get_flatview(as); 3558 d = flatview_to_dispatch(cache->fv); 3559 cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); 3560 3561 /* 3562 * cache->xlat is now relative to cache->mrs.mr, not to the section itself. 3563 * Take that into account to compute how many bytes are there between 3564 * cache->xlat and the end of the section. 3565 */ 3566 diff = int128_sub(cache->mrs.size, 3567 int128_make64(cache->xlat - cache->mrs.offset_within_region)); 3568 l = int128_get64(int128_min(diff, int128_make64(l))); 3569 3570 mr = cache->mrs.mr; 3571 memory_region_ref(mr); 3572 if (memory_access_is_direct(mr, is_write, MEMTXATTRS_UNSPECIFIED)) { 3573 /* We don't care about the memory attributes here as we're only 3574 * doing this if we found actual RAM, which behaves the same 3575 * regardless of attributes; so UNSPECIFIED is fine. 3576 */ 3577 l = flatview_extend_translation(cache->fv, addr, len, mr, 3578 cache->xlat, l, is_write, 3579 MEMTXATTRS_UNSPECIFIED); 3580 cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true, 3581 is_write); 3582 } else { 3583 cache->ptr = NULL; 3584 } 3585 3586 cache->len = l; 3587 cache->is_write = is_write; 3588 return l; 3589 } 3590 3591 void address_space_cache_invalidate(MemoryRegionCache *cache, 3592 hwaddr addr, 3593 hwaddr access_len) 3594 { 3595 assert(cache->is_write); 3596 if (likely(cache->ptr)) { 3597 invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len); 3598 } 3599 } 3600 3601 void address_space_cache_destroy(MemoryRegionCache *cache) 3602 { 3603 if (!cache->mrs.mr) { 3604 return; 3605 } 3606 3607 if (xen_enabled()) { 3608 xen_invalidate_map_cache_entry(cache->ptr); 3609 } 3610 memory_region_unref(cache->mrs.mr); 3611 flatview_unref(cache->fv); 3612 cache->mrs.mr = NULL; 3613 cache->fv = NULL; 3614 } 3615 3616 /* Called from RCU critical section. This function has the same 3617 * semantics as address_space_translate, but it only works on a 3618 * predefined range of a MemoryRegion that was mapped with 3619 * address_space_cache_init. 3620 */ 3621 static inline MemoryRegion *address_space_translate_cached( 3622 MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, 3623 hwaddr *plen, bool is_write, MemTxAttrs attrs) 3624 { 3625 MemoryRegionSection section; 3626 MemoryRegion *mr; 3627 IOMMUMemoryRegion *iommu_mr; 3628 AddressSpace *target_as; 3629 3630 assert(!cache->ptr); 3631 *xlat = addr + cache->xlat; 3632 3633 mr = cache->mrs.mr; 3634 iommu_mr = memory_region_get_iommu(mr); 3635 if (!iommu_mr) { 3636 /* MMIO region. */ 3637 return mr; 3638 } 3639 3640 section = address_space_translate_iommu(iommu_mr, xlat, plen, 3641 NULL, is_write, true, 3642 &target_as, attrs); 3643 return section.mr; 3644 } 3645 3646 /* Called within RCU critical section. */ 3647 static MemTxResult address_space_write_continue_cached(MemTxAttrs attrs, 3648 const void *ptr, 3649 hwaddr len, 3650 hwaddr mr_addr, 3651 hwaddr l, 3652 MemoryRegion *mr) 3653 { 3654 MemTxResult result = MEMTX_OK; 3655 const uint8_t *buf = ptr; 3656 3657 for (;;) { 3658 result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, 3659 mr); 3660 3661 len -= l; 3662 buf += l; 3663 mr_addr += l; 3664 3665 if (!len) { 3666 break; 3667 } 3668 3669 l = len; 3670 } 3671 3672 return result; 3673 } 3674 3675 /* Called within RCU critical section. */ 3676 static MemTxResult address_space_read_continue_cached(MemTxAttrs attrs, 3677 void *ptr, hwaddr len, 3678 hwaddr mr_addr, hwaddr l, 3679 MemoryRegion *mr) 3680 { 3681 MemTxResult result = MEMTX_OK; 3682 uint8_t *buf = ptr; 3683 3684 for (;;) { 3685 result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); 3686 len -= l; 3687 buf += l; 3688 mr_addr += l; 3689 3690 if (!len) { 3691 break; 3692 } 3693 l = len; 3694 } 3695 3696 return result; 3697 } 3698 3699 /* Called from RCU critical section. address_space_read_cached uses this 3700 * out of line function when the target is an MMIO or IOMMU region. 3701 */ 3702 MemTxResult 3703 address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3704 void *buf, hwaddr len) 3705 { 3706 hwaddr mr_addr, l; 3707 MemoryRegion *mr; 3708 3709 l = len; 3710 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false, 3711 MEMTXATTRS_UNSPECIFIED); 3712 return address_space_read_continue_cached(MEMTXATTRS_UNSPECIFIED, 3713 buf, len, mr_addr, l, mr); 3714 } 3715 3716 /* Called from RCU critical section. address_space_write_cached uses this 3717 * out of line function when the target is an MMIO or IOMMU region. 3718 */ 3719 MemTxResult 3720 address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3721 const void *buf, hwaddr len) 3722 { 3723 hwaddr mr_addr, l; 3724 MemoryRegion *mr; 3725 3726 l = len; 3727 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true, 3728 MEMTXATTRS_UNSPECIFIED); 3729 return address_space_write_continue_cached(MEMTXATTRS_UNSPECIFIED, 3730 buf, len, mr_addr, l, mr); 3731 } 3732 3733 #define ARG1_DECL MemoryRegionCache *cache 3734 #define ARG1 cache 3735 #define SUFFIX _cached_slow 3736 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) 3737 #define RCU_READ_LOCK() ((void)0) 3738 #define RCU_READ_UNLOCK() ((void)0) 3739 #include "memory_ldst.c.inc" 3740 3741 /* virtual memory access for debug (includes writing to ROM) */ 3742 int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, 3743 void *ptr, size_t len, bool is_write) 3744 { 3745 hwaddr phys_addr; 3746 vaddr l, page; 3747 uint8_t *buf = ptr; 3748 3749 cpu_synchronize_state(cpu); 3750 while (len > 0) { 3751 int asidx; 3752 MemTxAttrs attrs; 3753 MemTxResult res; 3754 3755 page = addr & TARGET_PAGE_MASK; 3756 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); 3757 asidx = cpu_asidx_from_attrs(cpu, attrs); 3758 /* if no physical page mapped, return an error */ 3759 if (phys_addr == -1) 3760 return -1; 3761 l = (page + TARGET_PAGE_SIZE) - addr; 3762 if (l > len) 3763 l = len; 3764 phys_addr += (addr & ~TARGET_PAGE_MASK); 3765 res = address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf, 3766 l, is_write); 3767 if (res != MEMTX_OK) { 3768 return -1; 3769 } 3770 len -= l; 3771 buf += l; 3772 addr += l; 3773 } 3774 return 0; 3775 } 3776 3777 bool cpu_physical_memory_is_io(hwaddr phys_addr) 3778 { 3779 MemoryRegion*mr; 3780 hwaddr l = 1; 3781 3782 RCU_READ_LOCK_GUARD(); 3783 mr = address_space_translate(&address_space_memory, 3784 phys_addr, &phys_addr, &l, false, 3785 MEMTXATTRS_UNSPECIFIED); 3786 3787 return !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); 3788 } 3789 3790 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) 3791 { 3792 RAMBlock *block; 3793 int ret = 0; 3794 3795 RCU_READ_LOCK_GUARD(); 3796 RAMBLOCK_FOREACH(block) { 3797 ret = func(block, opaque); 3798 if (ret) { 3799 break; 3800 } 3801 } 3802 return ret; 3803 } 3804 3805 /* 3806 * Unmap pages of memory from start to start+length such that 3807 * they a) read as 0, b) Trigger whatever fault mechanism 3808 * the OS provides for postcopy. 3809 * The pages must be unmapped by the end of the function. 3810 * Returns: 0 on success, none-0 on failure 3811 * 3812 */ 3813 int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) 3814 { 3815 int ret = -1; 3816 3817 uint8_t *host_startaddr = rb->host + start; 3818 3819 if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { 3820 error_report("%s: Unaligned start address: %p", 3821 __func__, host_startaddr); 3822 goto err; 3823 } 3824 3825 if ((start + length) <= rb->max_length) { 3826 bool need_madvise, need_fallocate; 3827 if (!QEMU_IS_ALIGNED(length, rb->page_size)) { 3828 error_report("%s: Unaligned length: %zx", __func__, length); 3829 goto err; 3830 } 3831 3832 errno = ENOTSUP; /* If we are missing MADVISE etc */ 3833 3834 /* The logic here is messy; 3835 * madvise DONTNEED fails for hugepages 3836 * fallocate works on hugepages and shmem 3837 * shared anonymous memory requires madvise REMOVE 3838 */ 3839 need_madvise = (rb->page_size == qemu_real_host_page_size()); 3840 need_fallocate = rb->fd != -1; 3841 if (need_fallocate) { 3842 /* For a file, this causes the area of the file to be zero'd 3843 * if read, and for hugetlbfs also causes it to be unmapped 3844 * so a userfault will trigger. 3845 */ 3846 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 3847 /* 3848 * fallocate() will fail with readonly files. Let's print a 3849 * proper error message. 3850 */ 3851 if (rb->flags & RAM_READONLY_FD) { 3852 error_report("%s: Discarding RAM with readonly files is not" 3853 " supported", __func__); 3854 goto err; 3855 3856 } 3857 /* 3858 * We'll discard data from the actual file, even though we only 3859 * have a MAP_PRIVATE mapping, possibly messing with other 3860 * MAP_PRIVATE/MAP_SHARED mappings. There is no easy way to 3861 * change that behavior whithout violating the promised 3862 * semantics of ram_block_discard_range(). 3863 * 3864 * Only warn, because it works as long as nobody else uses that 3865 * file. 3866 */ 3867 if (!qemu_ram_is_shared(rb)) { 3868 warn_report_once("%s: Discarding RAM" 3869 " in private file mappings is possibly" 3870 " dangerous, because it will modify the" 3871 " underlying file and will affect other" 3872 " users of the file", __func__); 3873 } 3874 3875 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 3876 start + rb->fd_offset, length); 3877 if (ret) { 3878 ret = -errno; 3879 error_report("%s: Failed to fallocate %s:%" PRIx64 "+%" PRIx64 3880 " +%zx (%d)", __func__, rb->idstr, start, 3881 rb->fd_offset, length, ret); 3882 goto err; 3883 } 3884 #else 3885 ret = -ENOSYS; 3886 error_report("%s: fallocate not available/file" 3887 "%s:%" PRIx64 "+%" PRIx64 " +%zx (%d)", __func__, 3888 rb->idstr, start, rb->fd_offset, length, ret); 3889 goto err; 3890 #endif 3891 } 3892 if (need_madvise) { 3893 /* For normal RAM this causes it to be unmapped, 3894 * for shared memory it causes the local mapping to disappear 3895 * and to fall back on the file contents (which we just 3896 * fallocate'd away). 3897 */ 3898 #if defined(CONFIG_MADVISE) 3899 if (qemu_ram_is_shared(rb) && rb->fd < 0) { 3900 ret = madvise(host_startaddr, length, QEMU_MADV_REMOVE); 3901 } else { 3902 ret = madvise(host_startaddr, length, QEMU_MADV_DONTNEED); 3903 } 3904 if (ret) { 3905 ret = -errno; 3906 error_report("%s: Failed to discard range " 3907 "%s:%" PRIx64 " +%zx (%d)", 3908 __func__, rb->idstr, start, length, ret); 3909 goto err; 3910 } 3911 #else 3912 ret = -ENOSYS; 3913 error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)", 3914 __func__, rb->idstr, start, length, ret); 3915 goto err; 3916 #endif 3917 } 3918 trace_ram_block_discard_range(rb->idstr, host_startaddr, length, 3919 need_madvise, need_fallocate, ret); 3920 } else { 3921 error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", 3922 __func__, rb->idstr, start, length, rb->max_length); 3923 } 3924 3925 err: 3926 return ret; 3927 } 3928 3929 int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, 3930 size_t length) 3931 { 3932 int ret = -1; 3933 3934 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 3935 /* ignore fd_offset with guest_memfd */ 3936 ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 3937 start, length); 3938 3939 if (ret) { 3940 ret = -errno; 3941 error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)", 3942 __func__, rb->idstr, start, length, ret); 3943 } 3944 #else 3945 ret = -ENOSYS; 3946 error_report("%s: fallocate not available %s:%" PRIx64 " +%zx (%d)", 3947 __func__, rb->idstr, start, length, ret); 3948 #endif 3949 3950 return ret; 3951 } 3952 3953 bool ramblock_is_pmem(RAMBlock *rb) 3954 { 3955 return rb->flags & RAM_PMEM; 3956 } 3957 3958 static void mtree_print_phys_entries(int start, int end, int skip, int ptr) 3959 { 3960 if (start == end - 1) { 3961 qemu_printf("\t%3d ", start); 3962 } else { 3963 qemu_printf("\t%3d..%-3d ", start, end - 1); 3964 } 3965 qemu_printf(" skip=%d ", skip); 3966 if (ptr == PHYS_MAP_NODE_NIL) { 3967 qemu_printf(" ptr=NIL"); 3968 } else if (!skip) { 3969 qemu_printf(" ptr=#%d", ptr); 3970 } else { 3971 qemu_printf(" ptr=[%d]", ptr); 3972 } 3973 qemu_printf("\n"); 3974 } 3975 3976 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ 3977 int128_sub((size), int128_one())) : 0) 3978 3979 void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) 3980 { 3981 int i; 3982 3983 qemu_printf(" Dispatch\n"); 3984 qemu_printf(" Physical sections\n"); 3985 3986 for (i = 0; i < d->map.sections_nb; ++i) { 3987 MemoryRegionSection *s = d->map.sections + i; 3988 const char *names[] = { " [unassigned]", " [not dirty]", 3989 " [ROM]", " [watch]" }; 3990 3991 qemu_printf(" #%d @" HWADDR_FMT_plx ".." HWADDR_FMT_plx 3992 " %s%s%s%s%s", 3993 i, 3994 s->offset_within_address_space, 3995 s->offset_within_address_space + MR_SIZE(s->size), 3996 s->mr->name ? s->mr->name : "(noname)", 3997 i < ARRAY_SIZE(names) ? names[i] : "", 3998 s->mr == root ? " [ROOT]" : "", 3999 s == d->mru_section ? " [MRU]" : "", 4000 s->mr->is_iommu ? " [iommu]" : ""); 4001 4002 if (s->mr->alias) { 4003 qemu_printf(" alias=%s", s->mr->alias->name ? 4004 s->mr->alias->name : "noname"); 4005 } 4006 qemu_printf("\n"); 4007 } 4008 4009 qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n", 4010 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); 4011 for (i = 0; i < d->map.nodes_nb; ++i) { 4012 int j, jprev; 4013 PhysPageEntry prev; 4014 Node *n = d->map.nodes + i; 4015 4016 qemu_printf(" [%d]\n", i); 4017 4018 for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) { 4019 PhysPageEntry *pe = *n + j; 4020 4021 if (pe->ptr == prev.ptr && pe->skip == prev.skip) { 4022 continue; 4023 } 4024 4025 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 4026 4027 jprev = j; 4028 prev = *pe; 4029 } 4030 4031 if (jprev != ARRAY_SIZE(*n)) { 4032 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 4033 } 4034 } 4035 } 4036 4037 /* Require any discards to work. */ 4038 static unsigned int ram_block_discard_required_cnt; 4039 /* Require only coordinated discards to work. */ 4040 static unsigned int ram_block_coordinated_discard_required_cnt; 4041 /* Disable any discards. */ 4042 static unsigned int ram_block_discard_disabled_cnt; 4043 /* Disable only uncoordinated discards. */ 4044 static unsigned int ram_block_uncoordinated_discard_disabled_cnt; 4045 static QemuMutex ram_block_discard_disable_mutex; 4046 4047 static void ram_block_discard_disable_mutex_lock(void) 4048 { 4049 static gsize initialized; 4050 4051 if (g_once_init_enter(&initialized)) { 4052 qemu_mutex_init(&ram_block_discard_disable_mutex); 4053 g_once_init_leave(&initialized, 1); 4054 } 4055 qemu_mutex_lock(&ram_block_discard_disable_mutex); 4056 } 4057 4058 static void ram_block_discard_disable_mutex_unlock(void) 4059 { 4060 qemu_mutex_unlock(&ram_block_discard_disable_mutex); 4061 } 4062 4063 int ram_block_discard_disable(bool state) 4064 { 4065 int ret = 0; 4066 4067 ram_block_discard_disable_mutex_lock(); 4068 if (!state) { 4069 ram_block_discard_disabled_cnt--; 4070 } else if (ram_block_discard_required_cnt || 4071 ram_block_coordinated_discard_required_cnt) { 4072 ret = -EBUSY; 4073 } else { 4074 ram_block_discard_disabled_cnt++; 4075 } 4076 ram_block_discard_disable_mutex_unlock(); 4077 return ret; 4078 } 4079 4080 int ram_block_uncoordinated_discard_disable(bool state) 4081 { 4082 int ret = 0; 4083 4084 ram_block_discard_disable_mutex_lock(); 4085 if (!state) { 4086 ram_block_uncoordinated_discard_disabled_cnt--; 4087 } else if (ram_block_discard_required_cnt) { 4088 ret = -EBUSY; 4089 } else { 4090 ram_block_uncoordinated_discard_disabled_cnt++; 4091 } 4092 ram_block_discard_disable_mutex_unlock(); 4093 return ret; 4094 } 4095 4096 int ram_block_discard_require(bool state) 4097 { 4098 int ret = 0; 4099 4100 ram_block_discard_disable_mutex_lock(); 4101 if (!state) { 4102 ram_block_discard_required_cnt--; 4103 } else if (ram_block_discard_disabled_cnt || 4104 ram_block_uncoordinated_discard_disabled_cnt) { 4105 ret = -EBUSY; 4106 } else { 4107 ram_block_discard_required_cnt++; 4108 } 4109 ram_block_discard_disable_mutex_unlock(); 4110 return ret; 4111 } 4112 4113 int ram_block_coordinated_discard_require(bool state) 4114 { 4115 int ret = 0; 4116 4117 ram_block_discard_disable_mutex_lock(); 4118 if (!state) { 4119 ram_block_coordinated_discard_required_cnt--; 4120 } else if (ram_block_discard_disabled_cnt) { 4121 ret = -EBUSY; 4122 } else { 4123 ram_block_coordinated_discard_required_cnt++; 4124 } 4125 ram_block_discard_disable_mutex_unlock(); 4126 return ret; 4127 } 4128 4129 bool ram_block_discard_is_disabled(void) 4130 { 4131 return qatomic_read(&ram_block_discard_disabled_cnt) || 4132 qatomic_read(&ram_block_uncoordinated_discard_disabled_cnt); 4133 } 4134 4135 bool ram_block_discard_is_required(void) 4136 { 4137 return qatomic_read(&ram_block_discard_required_cnt) || 4138 qatomic_read(&ram_block_coordinated_discard_required_cnt); 4139 } 4140 4141 /* 4142 * Return true if ram is compatible with CPR. Do not exclude rom, 4143 * because the rom file could change in new QEMU. 4144 */ 4145 static bool ram_is_cpr_compatible(RAMBlock *rb) 4146 { 4147 MemoryRegion *mr = rb->mr; 4148 4149 if (!mr || !memory_region_is_ram(mr)) { 4150 return true; 4151 } 4152 4153 /* Ram device is remapped in new QEMU */ 4154 if (memory_region_is_ram_device(mr)) { 4155 return true; 4156 } 4157 4158 /* 4159 * A file descriptor is passed to new QEMU and remapped, or its backing 4160 * file is reopened and mapped. It must be shared to avoid COW. 4161 */ 4162 if (rb->fd >= 0 && qemu_ram_is_shared(rb)) { 4163 return true; 4164 } 4165 4166 return false; 4167 } 4168 4169 /* 4170 * Add a blocker for each volatile ram block. This function should only be 4171 * called after we know that the block is migratable. Non-migratable blocks 4172 * are either re-created in new QEMU, or are handled specially, or are covered 4173 * by a device-level CPR blocker. 4174 */ 4175 void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp) 4176 { 4177 assert(qemu_ram_is_migratable(rb)); 4178 4179 if (ram_is_cpr_compatible(rb)) { 4180 return; 4181 } 4182 4183 error_setg(&rb->cpr_blocker, 4184 "Memory region %s is not compatible with CPR. share=on is " 4185 "required for memory-backend objects, and aux-ram-share=on is " 4186 "required.", memory_region_name(rb->mr)); 4187 migrate_add_blocker_modes(&rb->cpr_blocker, errp, MIG_MODE_CPR_TRANSFER, 4188 -1); 4189 } 4190 4191 void ram_block_del_cpr_blocker(RAMBlock *rb) 4192 { 4193 migrate_del_blocker(&rb->cpr_blocker); 4194 } 4195