1 /* 2 * RAM allocation and memory access 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/page-vary.h" 22 #include "qapi/error.h" 23 24 #include "qemu/cutils.h" 25 #include "qemu/cacheflush.h" 26 #include "qemu/hbitmap.h" 27 #include "qemu/madvise.h" 28 #include "qemu/lockable.h" 29 30 #ifdef CONFIG_TCG 31 #include "hw/core/tcg-cpu-ops.h" 32 #endif /* CONFIG_TCG */ 33 34 #include "exec/exec-all.h" 35 #include "exec/page-protection.h" 36 #include "exec/target_page.h" 37 #include "hw/qdev-core.h" 38 #include "hw/qdev-properties.h" 39 #include "hw/boards.h" 40 #include "sysemu/xen.h" 41 #include "sysemu/kvm.h" 42 #include "sysemu/tcg.h" 43 #include "sysemu/qtest.h" 44 #include "qemu/timer.h" 45 #include "qemu/config-file.h" 46 #include "qemu/error-report.h" 47 #include "qemu/qemu-print.h" 48 #include "qemu/log.h" 49 #include "qemu/memalign.h" 50 #include "exec/memory.h" 51 #include "exec/ioport.h" 52 #include "sysemu/dma.h" 53 #include "sysemu/hostmem.h" 54 #include "sysemu/hw_accel.h" 55 #include "sysemu/xen-mapcache.h" 56 #include "trace.h" 57 58 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 59 #include <linux/falloc.h> 60 #endif 61 62 #include "qemu/rcu_queue.h" 63 #include "qemu/main-loop.h" 64 #include "exec/translate-all.h" 65 #include "sysemu/replay.h" 66 67 #include "exec/memory-internal.h" 68 #include "exec/ram_addr.h" 69 70 #include "qemu/pmem.h" 71 72 #include "migration/vmstate.h" 73 74 #include "qemu/range.h" 75 #ifndef _WIN32 76 #include "qemu/mmap-alloc.h" 77 #endif 78 79 #include "monitor/monitor.h" 80 81 #ifdef CONFIG_LIBDAXCTL 82 #include <daxctl/libdaxctl.h> 83 #endif 84 85 //#define DEBUG_SUBPAGE 86 87 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes 88 * are protected by the ramlist lock. 89 */ 90 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; 91 92 static MemoryRegion *system_memory; 93 static MemoryRegion *system_io; 94 95 AddressSpace address_space_io; 96 AddressSpace address_space_memory; 97 98 static MemoryRegion io_mem_unassigned; 99 100 typedef struct PhysPageEntry PhysPageEntry; 101 102 struct PhysPageEntry { 103 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ 104 uint32_t skip : 6; 105 /* index into phys_sections (!skip) or phys_map_nodes (skip) */ 106 uint32_t ptr : 26; 107 }; 108 109 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) 110 111 /* Size of the L2 (and L3, etc) page tables. */ 112 #define ADDR_SPACE_BITS 64 113 114 #define P_L2_BITS 9 115 #define P_L2_SIZE (1 << P_L2_BITS) 116 117 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) 118 119 typedef PhysPageEntry Node[P_L2_SIZE]; 120 121 typedef struct PhysPageMap { 122 struct rcu_head rcu; 123 124 unsigned sections_nb; 125 unsigned sections_nb_alloc; 126 unsigned nodes_nb; 127 unsigned nodes_nb_alloc; 128 Node *nodes; 129 MemoryRegionSection *sections; 130 } PhysPageMap; 131 132 struct AddressSpaceDispatch { 133 MemoryRegionSection *mru_section; 134 /* This is a multi-level map on the physical address space. 135 * The bottom level has pointers to MemoryRegionSections. 136 */ 137 PhysPageEntry phys_map; 138 PhysPageMap map; 139 }; 140 141 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 142 typedef struct subpage_t { 143 MemoryRegion iomem; 144 FlatView *fv; 145 hwaddr base; 146 uint16_t sub_section[]; 147 } subpage_t; 148 149 #define PHYS_SECTION_UNASSIGNED 0 150 151 static void io_mem_init(void); 152 static void memory_map_init(void); 153 static void tcg_log_global_after_sync(MemoryListener *listener); 154 static void tcg_commit(MemoryListener *listener); 155 156 /** 157 * CPUAddressSpace: all the information a CPU needs about an AddressSpace 158 * @cpu: the CPU whose AddressSpace this is 159 * @as: the AddressSpace itself 160 * @memory_dispatch: its dispatch pointer (cached, RCU protected) 161 * @tcg_as_listener: listener for tracking changes to the AddressSpace 162 */ 163 typedef struct CPUAddressSpace { 164 CPUState *cpu; 165 AddressSpace *as; 166 struct AddressSpaceDispatch *memory_dispatch; 167 MemoryListener tcg_as_listener; 168 } CPUAddressSpace; 169 170 struct DirtyBitmapSnapshot { 171 ram_addr_t start; 172 ram_addr_t end; 173 unsigned long dirty[]; 174 }; 175 phys_map_node_reserve(PhysPageMap * map,unsigned nodes)176 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) 177 { 178 static unsigned alloc_hint = 16; 179 if (map->nodes_nb + nodes > map->nodes_nb_alloc) { 180 map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes); 181 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); 182 alloc_hint = map->nodes_nb_alloc; 183 } 184 } 185 phys_map_node_alloc(PhysPageMap * map,bool leaf)186 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) 187 { 188 unsigned i; 189 uint32_t ret; 190 PhysPageEntry e; 191 PhysPageEntry *p; 192 193 ret = map->nodes_nb++; 194 p = map->nodes[ret]; 195 assert(ret != PHYS_MAP_NODE_NIL); 196 assert(ret != map->nodes_nb_alloc); 197 198 e.skip = leaf ? 0 : 1; 199 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; 200 for (i = 0; i < P_L2_SIZE; ++i) { 201 memcpy(&p[i], &e, sizeof(e)); 202 } 203 return ret; 204 } 205 phys_page_set_level(PhysPageMap * map,PhysPageEntry * lp,hwaddr * index,uint64_t * nb,uint16_t leaf,int level)206 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, 207 hwaddr *index, uint64_t *nb, uint16_t leaf, 208 int level) 209 { 210 PhysPageEntry *p; 211 hwaddr step = (hwaddr)1 << (level * P_L2_BITS); 212 213 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { 214 lp->ptr = phys_map_node_alloc(map, level == 0); 215 } 216 p = map->nodes[lp->ptr]; 217 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; 218 219 while (*nb && lp < &p[P_L2_SIZE]) { 220 if ((*index & (step - 1)) == 0 && *nb >= step) { 221 lp->skip = 0; 222 lp->ptr = leaf; 223 *index += step; 224 *nb -= step; 225 } else { 226 phys_page_set_level(map, lp, index, nb, leaf, level - 1); 227 } 228 ++lp; 229 } 230 } 231 phys_page_set(AddressSpaceDispatch * d,hwaddr index,uint64_t nb,uint16_t leaf)232 static void phys_page_set(AddressSpaceDispatch *d, 233 hwaddr index, uint64_t nb, 234 uint16_t leaf) 235 { 236 /* Wildly overreserve - it doesn't matter much. */ 237 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); 238 239 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); 240 } 241 242 /* Compact a non leaf page entry. Simply detect that the entry has a single child, 243 * and update our entry so we can skip it and go directly to the destination. 244 */ phys_page_compact(PhysPageEntry * lp,Node * nodes)245 static void phys_page_compact(PhysPageEntry *lp, Node *nodes) 246 { 247 unsigned valid_ptr = P_L2_SIZE; 248 int valid = 0; 249 PhysPageEntry *p; 250 int i; 251 252 if (lp->ptr == PHYS_MAP_NODE_NIL) { 253 return; 254 } 255 256 p = nodes[lp->ptr]; 257 for (i = 0; i < P_L2_SIZE; i++) { 258 if (p[i].ptr == PHYS_MAP_NODE_NIL) { 259 continue; 260 } 261 262 valid_ptr = i; 263 valid++; 264 if (p[i].skip) { 265 phys_page_compact(&p[i], nodes); 266 } 267 } 268 269 /* We can only compress if there's only one child. */ 270 if (valid != 1) { 271 return; 272 } 273 274 assert(valid_ptr < P_L2_SIZE); 275 276 /* Don't compress if it won't fit in the # of bits we have. */ 277 if (P_L2_LEVELS >= (1 << 6) && 278 lp->skip + p[valid_ptr].skip >= (1 << 6)) { 279 return; 280 } 281 282 lp->ptr = p[valid_ptr].ptr; 283 if (!p[valid_ptr].skip) { 284 /* If our only child is a leaf, make this a leaf. */ 285 /* By design, we should have made this node a leaf to begin with so we 286 * should never reach here. 287 * But since it's so simple to handle this, let's do it just in case we 288 * change this rule. 289 */ 290 lp->skip = 0; 291 } else { 292 lp->skip += p[valid_ptr].skip; 293 } 294 } 295 address_space_dispatch_compact(AddressSpaceDispatch * d)296 void address_space_dispatch_compact(AddressSpaceDispatch *d) 297 { 298 if (d->phys_map.skip) { 299 phys_page_compact(&d->phys_map, d->map.nodes); 300 } 301 } 302 section_covers_addr(const MemoryRegionSection * section,hwaddr addr)303 static inline bool section_covers_addr(const MemoryRegionSection *section, 304 hwaddr addr) 305 { 306 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means 307 * the section must cover the entire address space. 308 */ 309 return int128_gethi(section->size) || 310 range_covers_byte(section->offset_within_address_space, 311 int128_getlo(section->size), addr); 312 } 313 phys_page_find(AddressSpaceDispatch * d,hwaddr addr)314 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr) 315 { 316 PhysPageEntry lp = d->phys_map, *p; 317 Node *nodes = d->map.nodes; 318 MemoryRegionSection *sections = d->map.sections; 319 hwaddr index = addr >> TARGET_PAGE_BITS; 320 int i; 321 322 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { 323 if (lp.ptr == PHYS_MAP_NODE_NIL) { 324 return §ions[PHYS_SECTION_UNASSIGNED]; 325 } 326 p = nodes[lp.ptr]; 327 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; 328 } 329 330 if (section_covers_addr(§ions[lp.ptr], addr)) { 331 return §ions[lp.ptr]; 332 } else { 333 return §ions[PHYS_SECTION_UNASSIGNED]; 334 } 335 } 336 337 /* Called from RCU critical section */ address_space_lookup_region(AddressSpaceDispatch * d,hwaddr addr,bool resolve_subpage)338 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, 339 hwaddr addr, 340 bool resolve_subpage) 341 { 342 MemoryRegionSection *section = qatomic_read(&d->mru_section); 343 subpage_t *subpage; 344 345 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || 346 !section_covers_addr(section, addr)) { 347 section = phys_page_find(d, addr); 348 qatomic_set(&d->mru_section, section); 349 } 350 if (resolve_subpage && section->mr->subpage) { 351 subpage = container_of(section->mr, subpage_t, iomem); 352 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; 353 } 354 return section; 355 } 356 357 /* Called from RCU critical section */ 358 static MemoryRegionSection * address_space_translate_internal(AddressSpaceDispatch * d,hwaddr addr,hwaddr * xlat,hwaddr * plen,bool resolve_subpage)359 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, 360 hwaddr *plen, bool resolve_subpage) 361 { 362 MemoryRegionSection *section; 363 MemoryRegion *mr; 364 Int128 diff; 365 366 section = address_space_lookup_region(d, addr, resolve_subpage); 367 /* Compute offset within MemoryRegionSection */ 368 addr -= section->offset_within_address_space; 369 370 /* Compute offset within MemoryRegion */ 371 *xlat = addr + section->offset_within_region; 372 373 mr = section->mr; 374 375 /* MMIO registers can be expected to perform full-width accesses based only 376 * on their address, without considering adjacent registers that could 377 * decode to completely different MemoryRegions. When such registers 378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO 379 * regions overlap wildly. For this reason we cannot clamp the accesses 380 * here. 381 * 382 * If the length is small (as is the case for address_space_ldl/stl), 383 * everything works fine. If the incoming length is large, however, 384 * the caller really has to do the clamping through memory_access_size. 385 */ 386 if (memory_region_is_ram(mr)) { 387 diff = int128_sub(section->size, int128_make64(addr)); 388 *plen = int128_get64(int128_min(diff, int128_make64(*plen))); 389 } 390 return section; 391 } 392 393 /** 394 * address_space_translate_iommu - translate an address through an IOMMU 395 * memory region and then through the target address space. 396 * 397 * @iommu_mr: the IOMMU memory region that we start the translation from 398 * @addr: the address to be translated through the MMU 399 * @xlat: the translated address offset within the destination memory region. 400 * It cannot be %NULL. 401 * @plen_out: valid read/write length of the translated address. It 402 * cannot be %NULL. 403 * @page_mask_out: page mask for the translated address. This 404 * should only be meaningful for IOMMU translated 405 * addresses, since there may be huge pages that this bit 406 * would tell. It can be %NULL if we don't care about it. 407 * @is_write: whether the translation operation is for write 408 * @is_mmio: whether this can be MMIO, set true if it can 409 * @target_as: the address space targeted by the IOMMU 410 * @attrs: transaction attributes 411 * 412 * This function is called from RCU critical section. It is the common 413 * part of flatview_do_translate and address_space_translate_cached. 414 */ address_space_translate_iommu(IOMMUMemoryRegion * iommu_mr,hwaddr * xlat,hwaddr * plen_out,hwaddr * page_mask_out,bool is_write,bool is_mmio,AddressSpace ** target_as,MemTxAttrs attrs)415 static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr, 416 hwaddr *xlat, 417 hwaddr *plen_out, 418 hwaddr *page_mask_out, 419 bool is_write, 420 bool is_mmio, 421 AddressSpace **target_as, 422 MemTxAttrs attrs) 423 { 424 MemoryRegionSection *section; 425 hwaddr page_mask = (hwaddr)-1; 426 427 do { 428 hwaddr addr = *xlat; 429 IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 430 int iommu_idx = 0; 431 IOMMUTLBEntry iotlb; 432 433 if (imrc->attrs_to_index) { 434 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 435 } 436 437 iotlb = imrc->translate(iommu_mr, addr, is_write ? 438 IOMMU_WO : IOMMU_RO, iommu_idx); 439 440 if (!(iotlb.perm & (1 << is_write))) { 441 goto unassigned; 442 } 443 444 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 445 | (addr & iotlb.addr_mask)); 446 page_mask &= iotlb.addr_mask; 447 *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); 448 *target_as = iotlb.target_as; 449 450 section = address_space_translate_internal( 451 address_space_to_dispatch(iotlb.target_as), addr, xlat, 452 plen_out, is_mmio); 453 454 iommu_mr = memory_region_get_iommu(section->mr); 455 } while (unlikely(iommu_mr)); 456 457 if (page_mask_out) { 458 *page_mask_out = page_mask; 459 } 460 return *section; 461 462 unassigned: 463 return (MemoryRegionSection) { .mr = &io_mem_unassigned }; 464 } 465 466 /** 467 * flatview_do_translate - translate an address in FlatView 468 * 469 * @fv: the flat view that we want to translate on 470 * @addr: the address to be translated in above address space 471 * @xlat: the translated address offset within memory region. It 472 * cannot be @NULL. 473 * @plen_out: valid read/write length of the translated address. It 474 * can be @NULL when we don't care about it. 475 * @page_mask_out: page mask for the translated address. This 476 * should only be meaningful for IOMMU translated 477 * addresses, since there may be huge pages that this bit 478 * would tell. It can be @NULL if we don't care about it. 479 * @is_write: whether the translation operation is for write 480 * @is_mmio: whether this can be MMIO, set true if it can 481 * @target_as: the address space targeted by the IOMMU 482 * @attrs: memory transaction attributes 483 * 484 * This function is called from RCU critical section 485 */ flatview_do_translate(FlatView * fv,hwaddr addr,hwaddr * xlat,hwaddr * plen_out,hwaddr * page_mask_out,bool is_write,bool is_mmio,AddressSpace ** target_as,MemTxAttrs attrs)486 static MemoryRegionSection flatview_do_translate(FlatView *fv, 487 hwaddr addr, 488 hwaddr *xlat, 489 hwaddr *plen_out, 490 hwaddr *page_mask_out, 491 bool is_write, 492 bool is_mmio, 493 AddressSpace **target_as, 494 MemTxAttrs attrs) 495 { 496 MemoryRegionSection *section; 497 IOMMUMemoryRegion *iommu_mr; 498 hwaddr plen = (hwaddr)(-1); 499 500 if (!plen_out) { 501 plen_out = &plen; 502 } 503 504 section = address_space_translate_internal( 505 flatview_to_dispatch(fv), addr, xlat, 506 plen_out, is_mmio); 507 508 iommu_mr = memory_region_get_iommu(section->mr); 509 if (unlikely(iommu_mr)) { 510 return address_space_translate_iommu(iommu_mr, xlat, 511 plen_out, page_mask_out, 512 is_write, is_mmio, 513 target_as, attrs); 514 } 515 if (page_mask_out) { 516 /* Not behind an IOMMU, use default page size. */ 517 *page_mask_out = ~TARGET_PAGE_MASK; 518 } 519 520 return *section; 521 } 522 523 /* Called from RCU critical section */ address_space_get_iotlb_entry(AddressSpace * as,hwaddr addr,bool is_write,MemTxAttrs attrs)524 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, 525 bool is_write, MemTxAttrs attrs) 526 { 527 MemoryRegionSection section; 528 hwaddr xlat, page_mask; 529 530 /* 531 * This can never be MMIO, and we don't really care about plen, 532 * but page mask. 533 */ 534 section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat, 535 NULL, &page_mask, is_write, false, &as, 536 attrs); 537 538 /* Illegal translation */ 539 if (section.mr == &io_mem_unassigned) { 540 goto iotlb_fail; 541 } 542 543 /* Convert memory region offset into address space offset */ 544 xlat += section.offset_within_address_space - 545 section.offset_within_region; 546 547 return (IOMMUTLBEntry) { 548 .target_as = as, 549 .iova = addr & ~page_mask, 550 .translated_addr = xlat & ~page_mask, 551 .addr_mask = page_mask, 552 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */ 553 .perm = IOMMU_RW, 554 }; 555 556 iotlb_fail: 557 return (IOMMUTLBEntry) {0}; 558 } 559 560 /* Called from RCU critical section */ flatview_translate(FlatView * fv,hwaddr addr,hwaddr * xlat,hwaddr * plen,bool is_write,MemTxAttrs attrs)561 MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, 562 hwaddr *plen, bool is_write, 563 MemTxAttrs attrs) 564 { 565 MemoryRegion *mr; 566 MemoryRegionSection section; 567 AddressSpace *as = NULL; 568 569 /* This can be MMIO, so setup MMIO bit. */ 570 section = flatview_do_translate(fv, addr, xlat, plen, NULL, 571 is_write, true, &as, attrs); 572 mr = section.mr; 573 574 if (xen_enabled() && memory_access_is_direct(mr, is_write)) { 575 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; 576 *plen = MIN(page, *plen); 577 } 578 579 return mr; 580 } 581 582 typedef struct TCGIOMMUNotifier { 583 IOMMUNotifier n; 584 MemoryRegion *mr; 585 CPUState *cpu; 586 int iommu_idx; 587 bool active; 588 } TCGIOMMUNotifier; 589 tcg_iommu_unmap_notify(IOMMUNotifier * n,IOMMUTLBEntry * iotlb)590 static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 591 { 592 TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n); 593 594 if (!notifier->active) { 595 return; 596 } 597 tlb_flush(notifier->cpu); 598 notifier->active = false; 599 /* We leave the notifier struct on the list to avoid reallocating it later. 600 * Generally the number of IOMMUs a CPU deals with will be small. 601 * In any case we can't unregister the iommu notifier from a notify 602 * callback. 603 */ 604 } 605 tcg_register_iommu_notifier(CPUState * cpu,IOMMUMemoryRegion * iommu_mr,int iommu_idx)606 static void tcg_register_iommu_notifier(CPUState *cpu, 607 IOMMUMemoryRegion *iommu_mr, 608 int iommu_idx) 609 { 610 /* Make sure this CPU has an IOMMU notifier registered for this 611 * IOMMU/IOMMU index combination, so that we can flush its TLB 612 * when the IOMMU tells us the mappings we've cached have changed. 613 */ 614 MemoryRegion *mr = MEMORY_REGION(iommu_mr); 615 TCGIOMMUNotifier *notifier = NULL; 616 int i; 617 618 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 619 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 620 if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) { 621 break; 622 } 623 } 624 if (i == cpu->iommu_notifiers->len) { 625 /* Not found, add a new entry at the end of the array */ 626 cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1); 627 notifier = g_new0(TCGIOMMUNotifier, 1); 628 g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier; 629 630 notifier->mr = mr; 631 notifier->iommu_idx = iommu_idx; 632 notifier->cpu = cpu; 633 /* Rather than trying to register interest in the specific part 634 * of the iommu's address space that we've accessed and then 635 * expand it later as subsequent accesses touch more of it, we 636 * just register interest in the whole thing, on the assumption 637 * that iommu reconfiguration will be rare. 638 */ 639 iommu_notifier_init(¬ifier->n, 640 tcg_iommu_unmap_notify, 641 IOMMU_NOTIFIER_UNMAP, 642 0, 643 HWADDR_MAX, 644 iommu_idx); 645 memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, 646 &error_fatal); 647 } 648 649 if (!notifier->active) { 650 notifier->active = true; 651 } 652 } 653 tcg_iommu_free_notifier_list(CPUState * cpu)654 void tcg_iommu_free_notifier_list(CPUState *cpu) 655 { 656 /* Destroy the CPU's notifier list */ 657 int i; 658 TCGIOMMUNotifier *notifier; 659 660 for (i = 0; i < cpu->iommu_notifiers->len; i++) { 661 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); 662 memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n); 663 g_free(notifier); 664 } 665 g_array_free(cpu->iommu_notifiers, true); 666 } 667 tcg_iommu_init_notifier_list(CPUState * cpu)668 void tcg_iommu_init_notifier_list(CPUState *cpu) 669 { 670 cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *)); 671 } 672 673 /* Called from RCU critical section */ 674 MemoryRegionSection * address_space_translate_for_iotlb(CPUState * cpu,int asidx,hwaddr orig_addr,hwaddr * xlat,hwaddr * plen,MemTxAttrs attrs,int * prot)675 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, 676 hwaddr *xlat, hwaddr *plen, 677 MemTxAttrs attrs, int *prot) 678 { 679 MemoryRegionSection *section; 680 IOMMUMemoryRegion *iommu_mr; 681 IOMMUMemoryRegionClass *imrc; 682 IOMMUTLBEntry iotlb; 683 int iommu_idx; 684 hwaddr addr = orig_addr; 685 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; 686 687 for (;;) { 688 section = address_space_translate_internal(d, addr, &addr, plen, false); 689 690 iommu_mr = memory_region_get_iommu(section->mr); 691 if (!iommu_mr) { 692 break; 693 } 694 695 imrc = memory_region_get_iommu_class_nocheck(iommu_mr); 696 697 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); 698 tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx); 699 /* We need all the permissions, so pass IOMMU_NONE so the IOMMU 700 * doesn't short-cut its translation table walk. 701 */ 702 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); 703 addr = ((iotlb.translated_addr & ~iotlb.addr_mask) 704 | (addr & iotlb.addr_mask)); 705 /* Update the caller's prot bits to remove permissions the IOMMU 706 * is giving us a failure response for. If we get down to no 707 * permissions left at all we can give up now. 708 */ 709 if (!(iotlb.perm & IOMMU_RO)) { 710 *prot &= ~(PAGE_READ | PAGE_EXEC); 711 } 712 if (!(iotlb.perm & IOMMU_WO)) { 713 *prot &= ~PAGE_WRITE; 714 } 715 716 if (!*prot) { 717 goto translate_fail; 718 } 719 720 d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as)); 721 } 722 723 assert(!memory_region_is_iommu(section->mr)); 724 *xlat = addr; 725 return section; 726 727 translate_fail: 728 /* 729 * We should be given a page-aligned address -- certainly 730 * tlb_set_page_with_attrs() does so. The page offset of xlat 731 * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0. 732 * The page portion of xlat will be logged by memory_region_access_valid() 733 * when this memory access is rejected, so use the original untranslated 734 * physical address. 735 */ 736 assert((orig_addr & ~TARGET_PAGE_MASK) == 0); 737 *xlat = orig_addr; 738 return &d->map.sections[PHYS_SECTION_UNASSIGNED]; 739 } 740 cpu_address_space_init(CPUState * cpu,int asidx,const char * prefix,MemoryRegion * mr)741 void cpu_address_space_init(CPUState *cpu, int asidx, 742 const char *prefix, MemoryRegion *mr) 743 { 744 CPUAddressSpace *newas; 745 AddressSpace *as = g_new0(AddressSpace, 1); 746 char *as_name; 747 748 assert(mr); 749 as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index); 750 address_space_init(as, mr, as_name); 751 g_free(as_name); 752 753 /* Target code should have set num_ases before calling us */ 754 assert(asidx < cpu->num_ases); 755 756 if (asidx == 0) { 757 /* address space 0 gets the convenience alias */ 758 cpu->as = as; 759 } 760 761 /* KVM cannot currently support multiple address spaces. */ 762 assert(asidx == 0 || !kvm_enabled()); 763 764 if (!cpu->cpu_ases) { 765 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); 766 cpu->cpu_ases_count = cpu->num_ases; 767 } 768 769 newas = &cpu->cpu_ases[asidx]; 770 newas->cpu = cpu; 771 newas->as = as; 772 if (tcg_enabled()) { 773 newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; 774 newas->tcg_as_listener.commit = tcg_commit; 775 newas->tcg_as_listener.name = "tcg"; 776 memory_listener_register(&newas->tcg_as_listener, as); 777 } 778 } 779 cpu_address_space_destroy(CPUState * cpu,int asidx)780 void cpu_address_space_destroy(CPUState *cpu, int asidx) 781 { 782 CPUAddressSpace *cpuas; 783 784 assert(cpu->cpu_ases); 785 assert(asidx >= 0 && asidx < cpu->num_ases); 786 /* KVM cannot currently support multiple address spaces. */ 787 assert(asidx == 0 || !kvm_enabled()); 788 789 cpuas = &cpu->cpu_ases[asidx]; 790 if (tcg_enabled()) { 791 memory_listener_unregister(&cpuas->tcg_as_listener); 792 } 793 794 address_space_destroy(cpuas->as); 795 g_free_rcu(cpuas->as, rcu); 796 797 if (asidx == 0) { 798 /* reset the convenience alias for address space 0 */ 799 cpu->as = NULL; 800 } 801 802 if (--cpu->cpu_ases_count == 0) { 803 g_free(cpu->cpu_ases); 804 cpu->cpu_ases = NULL; 805 } 806 } 807 cpu_get_address_space(CPUState * cpu,int asidx)808 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) 809 { 810 /* Return the AddressSpace corresponding to the specified index */ 811 return cpu->cpu_ases[asidx].as; 812 } 813 814 /* Called from RCU critical section */ qemu_get_ram_block(ram_addr_t addr)815 static RAMBlock *qemu_get_ram_block(ram_addr_t addr) 816 { 817 RAMBlock *block; 818 819 block = qatomic_rcu_read(&ram_list.mru_block); 820 if (block && addr - block->offset < block->max_length) { 821 return block; 822 } 823 RAMBLOCK_FOREACH(block) { 824 if (addr - block->offset < block->max_length) { 825 goto found; 826 } 827 } 828 829 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 830 abort(); 831 832 found: 833 /* It is safe to write mru_block outside the BQL. This 834 * is what happens: 835 * 836 * mru_block = xxx 837 * rcu_read_unlock() 838 * xxx removed from list 839 * rcu_read_lock() 840 * read mru_block 841 * mru_block = NULL; 842 * call_rcu(reclaim_ramblock, xxx); 843 * rcu_read_unlock() 844 * 845 * qatomic_rcu_set is not needed here. The block was already published 846 * when it was placed into the list. Here we're just making an extra 847 * copy of the pointer. 848 */ 849 ram_list.mru_block = block; 850 return block; 851 } 852 tlb_reset_dirty_range_all(ram_addr_t start,ram_addr_t length)853 void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) 854 { 855 CPUState *cpu; 856 ram_addr_t start1; 857 RAMBlock *block; 858 ram_addr_t end; 859 860 assert(tcg_enabled()); 861 end = TARGET_PAGE_ALIGN(start + length); 862 start &= TARGET_PAGE_MASK; 863 864 RCU_READ_LOCK_GUARD(); 865 block = qemu_get_ram_block(start); 866 assert(block == qemu_get_ram_block(end - 1)); 867 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); 868 CPU_FOREACH(cpu) { 869 tlb_reset_dirty(cpu, start1, length); 870 } 871 } 872 873 /* Note: start and end must be within the same ram block. */ cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,ram_addr_t length,unsigned client)874 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, 875 ram_addr_t length, 876 unsigned client) 877 { 878 DirtyMemoryBlocks *blocks; 879 unsigned long end, page, start_page; 880 bool dirty = false; 881 RAMBlock *ramblock; 882 uint64_t mr_offset, mr_size; 883 884 if (length == 0) { 885 return false; 886 } 887 888 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; 889 start_page = start >> TARGET_PAGE_BITS; 890 page = start_page; 891 892 WITH_RCU_READ_LOCK_GUARD() { 893 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 894 ramblock = qemu_get_ram_block(start); 895 /* Range sanity check on the ramblock */ 896 assert(start >= ramblock->offset && 897 start + length <= ramblock->offset + ramblock->used_length); 898 899 while (page < end) { 900 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 901 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; 902 unsigned long num = MIN(end - page, 903 DIRTY_MEMORY_BLOCK_SIZE - offset); 904 905 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], 906 offset, num); 907 page += num; 908 } 909 910 mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; 911 mr_size = (end - start_page) << TARGET_PAGE_BITS; 912 memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); 913 } 914 915 if (dirty) { 916 cpu_physical_memory_dirty_bits_cleared(start, length); 917 } 918 919 return dirty; 920 } 921 cpu_physical_memory_snapshot_and_clear_dirty(MemoryRegion * mr,hwaddr offset,hwaddr length,unsigned client)922 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty 923 (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) 924 { 925 DirtyMemoryBlocks *blocks; 926 ram_addr_t start, first, last; 927 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL); 928 DirtyBitmapSnapshot *snap; 929 unsigned long page, end, dest; 930 931 start = memory_region_get_ram_addr(mr); 932 /* We know we're only called for RAM MemoryRegions */ 933 assert(start != RAM_ADDR_INVALID); 934 start += offset; 935 936 first = QEMU_ALIGN_DOWN(start, align); 937 last = QEMU_ALIGN_UP(start + length, align); 938 939 snap = g_malloc0(sizeof(*snap) + 940 ((last - first) >> (TARGET_PAGE_BITS + 3))); 941 snap->start = first; 942 snap->end = last; 943 944 page = first >> TARGET_PAGE_BITS; 945 end = last >> TARGET_PAGE_BITS; 946 dest = 0; 947 948 WITH_RCU_READ_LOCK_GUARD() { 949 blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); 950 951 while (page < end) { 952 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; 953 unsigned long ofs = page % DIRTY_MEMORY_BLOCK_SIZE; 954 unsigned long num = MIN(end - page, 955 DIRTY_MEMORY_BLOCK_SIZE - ofs); 956 957 assert(QEMU_IS_ALIGNED(ofs, (1 << BITS_PER_LEVEL))); 958 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL))); 959 ofs >>= BITS_PER_LEVEL; 960 961 bitmap_copy_and_clear_atomic(snap->dirty + dest, 962 blocks->blocks[idx] + ofs, 963 num); 964 page += num; 965 dest += num >> BITS_PER_LEVEL; 966 } 967 } 968 969 cpu_physical_memory_dirty_bits_cleared(start, length); 970 971 memory_region_clear_dirty_bitmap(mr, offset, length); 972 973 return snap; 974 } 975 cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot * snap,ram_addr_t start,ram_addr_t length)976 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, 977 ram_addr_t start, 978 ram_addr_t length) 979 { 980 unsigned long page, end; 981 982 assert(start >= snap->start); 983 assert(start + length <= snap->end); 984 985 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; 986 page = (start - snap->start) >> TARGET_PAGE_BITS; 987 988 while (page < end) { 989 if (test_bit(page, snap->dirty)) { 990 return true; 991 } 992 page++; 993 } 994 return false; 995 } 996 997 /* Called from RCU critical section */ memory_region_section_get_iotlb(CPUState * cpu,MemoryRegionSection * section)998 hwaddr memory_region_section_get_iotlb(CPUState *cpu, 999 MemoryRegionSection *section) 1000 { 1001 AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); 1002 return section - d->map.sections; 1003 } 1004 1005 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 1006 uint16_t section); 1007 static subpage_t *subpage_init(FlatView *fv, hwaddr base); 1008 phys_section_add(PhysPageMap * map,MemoryRegionSection * section)1009 static uint16_t phys_section_add(PhysPageMap *map, 1010 MemoryRegionSection *section) 1011 { 1012 /* The physical section number is ORed with a page-aligned 1013 * pointer to produce the iotlb entries. Thus it should 1014 * never overflow into the page-aligned value. 1015 */ 1016 assert(map->sections_nb < TARGET_PAGE_SIZE); 1017 1018 if (map->sections_nb == map->sections_nb_alloc) { 1019 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); 1020 map->sections = g_renew(MemoryRegionSection, map->sections, 1021 map->sections_nb_alloc); 1022 } 1023 map->sections[map->sections_nb] = *section; 1024 memory_region_ref(section->mr); 1025 return map->sections_nb++; 1026 } 1027 phys_section_destroy(MemoryRegion * mr)1028 static void phys_section_destroy(MemoryRegion *mr) 1029 { 1030 bool have_sub_page = mr->subpage; 1031 1032 memory_region_unref(mr); 1033 1034 if (have_sub_page) { 1035 subpage_t *subpage = container_of(mr, subpage_t, iomem); 1036 object_unref(OBJECT(&subpage->iomem)); 1037 g_free(subpage); 1038 } 1039 } 1040 phys_sections_free(PhysPageMap * map)1041 static void phys_sections_free(PhysPageMap *map) 1042 { 1043 while (map->sections_nb > 0) { 1044 MemoryRegionSection *section = &map->sections[--map->sections_nb]; 1045 phys_section_destroy(section->mr); 1046 } 1047 g_free(map->sections); 1048 g_free(map->nodes); 1049 } 1050 register_subpage(FlatView * fv,MemoryRegionSection * section)1051 static void register_subpage(FlatView *fv, MemoryRegionSection *section) 1052 { 1053 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1054 subpage_t *subpage; 1055 hwaddr base = section->offset_within_address_space 1056 & TARGET_PAGE_MASK; 1057 MemoryRegionSection *existing = phys_page_find(d, base); 1058 MemoryRegionSection subsection = { 1059 .offset_within_address_space = base, 1060 .size = int128_make64(TARGET_PAGE_SIZE), 1061 }; 1062 hwaddr start, end; 1063 1064 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); 1065 1066 if (!(existing->mr->subpage)) { 1067 subpage = subpage_init(fv, base); 1068 subsection.fv = fv; 1069 subsection.mr = &subpage->iomem; 1070 phys_page_set(d, base >> TARGET_PAGE_BITS, 1, 1071 phys_section_add(&d->map, &subsection)); 1072 } else { 1073 subpage = container_of(existing->mr, subpage_t, iomem); 1074 } 1075 start = section->offset_within_address_space & ~TARGET_PAGE_MASK; 1076 end = start + int128_get64(section->size) - 1; 1077 subpage_register(subpage, start, end, 1078 phys_section_add(&d->map, section)); 1079 } 1080 1081 register_multipage(FlatView * fv,MemoryRegionSection * section)1082 static void register_multipage(FlatView *fv, 1083 MemoryRegionSection *section) 1084 { 1085 AddressSpaceDispatch *d = flatview_to_dispatch(fv); 1086 hwaddr start_addr = section->offset_within_address_space; 1087 uint16_t section_index = phys_section_add(&d->map, section); 1088 uint64_t num_pages = int128_get64(int128_rshift(section->size, 1089 TARGET_PAGE_BITS)); 1090 1091 assert(num_pages); 1092 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); 1093 } 1094 1095 /* 1096 * The range in *section* may look like this: 1097 * 1098 * |s|PPPPPPP|s| 1099 * 1100 * where s stands for subpage and P for page. 1101 */ flatview_add_to_dispatch(FlatView * fv,MemoryRegionSection * section)1102 void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section) 1103 { 1104 MemoryRegionSection remain = *section; 1105 Int128 page_size = int128_make64(TARGET_PAGE_SIZE); 1106 1107 /* register first subpage */ 1108 if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { 1109 uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space) 1110 - remain.offset_within_address_space; 1111 1112 MemoryRegionSection now = remain; 1113 now.size = int128_min(int128_make64(left), now.size); 1114 register_subpage(fv, &now); 1115 if (int128_eq(remain.size, now.size)) { 1116 return; 1117 } 1118 remain.size = int128_sub(remain.size, now.size); 1119 remain.offset_within_address_space += int128_get64(now.size); 1120 remain.offset_within_region += int128_get64(now.size); 1121 } 1122 1123 /* register whole pages */ 1124 if (int128_ge(remain.size, page_size)) { 1125 MemoryRegionSection now = remain; 1126 now.size = int128_and(now.size, int128_neg(page_size)); 1127 register_multipage(fv, &now); 1128 if (int128_eq(remain.size, now.size)) { 1129 return; 1130 } 1131 remain.size = int128_sub(remain.size, now.size); 1132 remain.offset_within_address_space += int128_get64(now.size); 1133 remain.offset_within_region += int128_get64(now.size); 1134 } 1135 1136 /* register last subpage */ 1137 register_subpage(fv, &remain); 1138 } 1139 qemu_flush_coalesced_mmio_buffer(void)1140 void qemu_flush_coalesced_mmio_buffer(void) 1141 { 1142 if (kvm_enabled()) 1143 kvm_flush_coalesced_mmio_buffer(); 1144 } 1145 qemu_mutex_lock_ramlist(void)1146 void qemu_mutex_lock_ramlist(void) 1147 { 1148 qemu_mutex_lock(&ram_list.mutex); 1149 } 1150 qemu_mutex_unlock_ramlist(void)1151 void qemu_mutex_unlock_ramlist(void) 1152 { 1153 qemu_mutex_unlock(&ram_list.mutex); 1154 } 1155 ram_block_format(void)1156 GString *ram_block_format(void) 1157 { 1158 RAMBlock *block; 1159 char *psize; 1160 GString *buf = g_string_new(""); 1161 1162 RCU_READ_LOCK_GUARD(); 1163 g_string_append_printf(buf, "%24s %8s %18s %18s %18s %18s %3s\n", 1164 "Block Name", "PSize", "Offset", "Used", "Total", 1165 "HVA", "RO"); 1166 1167 RAMBLOCK_FOREACH(block) { 1168 psize = size_to_str(block->page_size); 1169 g_string_append_printf(buf, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 1170 " 0x%016" PRIx64 " 0x%016" PRIx64 " %3s\n", 1171 block->idstr, psize, 1172 (uint64_t)block->offset, 1173 (uint64_t)block->used_length, 1174 (uint64_t)block->max_length, 1175 (uint64_t)(uintptr_t)block->host, 1176 block->mr->readonly ? "ro" : "rw"); 1177 1178 g_free(psize); 1179 } 1180 1181 return buf; 1182 } 1183 find_min_backend_pagesize(Object * obj,void * opaque)1184 static int find_min_backend_pagesize(Object *obj, void *opaque) 1185 { 1186 long *hpsize_min = opaque; 1187 1188 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1189 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1190 long hpsize = host_memory_backend_pagesize(backend); 1191 1192 if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) { 1193 *hpsize_min = hpsize; 1194 } 1195 } 1196 1197 return 0; 1198 } 1199 find_max_backend_pagesize(Object * obj,void * opaque)1200 static int find_max_backend_pagesize(Object *obj, void *opaque) 1201 { 1202 long *hpsize_max = opaque; 1203 1204 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 1205 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1206 long hpsize = host_memory_backend_pagesize(backend); 1207 1208 if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) { 1209 *hpsize_max = hpsize; 1210 } 1211 } 1212 1213 return 0; 1214 } 1215 1216 /* 1217 * TODO: We assume right now that all mapped host memory backends are 1218 * used as RAM, however some might be used for different purposes. 1219 */ qemu_minrampagesize(void)1220 long qemu_minrampagesize(void) 1221 { 1222 long hpsize = LONG_MAX; 1223 Object *memdev_root = object_resolve_path("/objects", NULL); 1224 1225 object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); 1226 return hpsize; 1227 } 1228 qemu_maxrampagesize(void)1229 long qemu_maxrampagesize(void) 1230 { 1231 long pagesize = 0; 1232 Object *memdev_root = object_resolve_path("/objects", NULL); 1233 1234 object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize); 1235 return pagesize; 1236 } 1237 1238 #ifdef CONFIG_POSIX get_file_size(int fd)1239 static int64_t get_file_size(int fd) 1240 { 1241 int64_t size; 1242 #if defined(__linux__) 1243 struct stat st; 1244 1245 if (fstat(fd, &st) < 0) { 1246 return -errno; 1247 } 1248 1249 /* Special handling for devdax character devices */ 1250 if (S_ISCHR(st.st_mode)) { 1251 g_autofree char *subsystem_path = NULL; 1252 g_autofree char *subsystem = NULL; 1253 1254 subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem", 1255 major(st.st_rdev), minor(st.st_rdev)); 1256 subsystem = g_file_read_link(subsystem_path, NULL); 1257 1258 if (subsystem && g_str_has_suffix(subsystem, "/dax")) { 1259 g_autofree char *size_path = NULL; 1260 g_autofree char *size_str = NULL; 1261 1262 size_path = g_strdup_printf("/sys/dev/char/%d:%d/size", 1263 major(st.st_rdev), minor(st.st_rdev)); 1264 1265 if (g_file_get_contents(size_path, &size_str, NULL, NULL)) { 1266 return g_ascii_strtoll(size_str, NULL, 0); 1267 } 1268 } 1269 } 1270 #endif /* defined(__linux__) */ 1271 1272 /* st.st_size may be zero for special files yet lseek(2) works */ 1273 size = lseek(fd, 0, SEEK_END); 1274 if (size < 0) { 1275 return -errno; 1276 } 1277 return size; 1278 } 1279 get_file_align(int fd)1280 static int64_t get_file_align(int fd) 1281 { 1282 int64_t align = -1; 1283 #if defined(__linux__) && defined(CONFIG_LIBDAXCTL) 1284 struct stat st; 1285 1286 if (fstat(fd, &st) < 0) { 1287 return -errno; 1288 } 1289 1290 /* Special handling for devdax character devices */ 1291 if (S_ISCHR(st.st_mode)) { 1292 g_autofree char *path = NULL; 1293 g_autofree char *rpath = NULL; 1294 struct daxctl_ctx *ctx; 1295 struct daxctl_region *region; 1296 int rc = 0; 1297 1298 path = g_strdup_printf("/sys/dev/char/%d:%d", 1299 major(st.st_rdev), minor(st.st_rdev)); 1300 rpath = realpath(path, NULL); 1301 if (!rpath) { 1302 return -errno; 1303 } 1304 1305 rc = daxctl_new(&ctx); 1306 if (rc) { 1307 return -1; 1308 } 1309 1310 daxctl_region_foreach(ctx, region) { 1311 if (strstr(rpath, daxctl_region_get_path(region))) { 1312 align = daxctl_region_get_align(region); 1313 break; 1314 } 1315 } 1316 daxctl_unref(ctx); 1317 } 1318 #endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */ 1319 1320 return align; 1321 } 1322 file_ram_open(const char * path,const char * region_name,bool readonly,bool * created)1323 static int file_ram_open(const char *path, 1324 const char *region_name, 1325 bool readonly, 1326 bool *created) 1327 { 1328 char *filename; 1329 char *sanitized_name; 1330 char *c; 1331 int fd = -1; 1332 1333 *created = false; 1334 for (;;) { 1335 fd = open(path, readonly ? O_RDONLY : O_RDWR); 1336 if (fd >= 0) { 1337 /* 1338 * open(O_RDONLY) won't fail with EISDIR. Check manually if we 1339 * opened a directory and fail similarly to how we fail ENOENT 1340 * in readonly mode. Note that mkstemp() would imply O_RDWR. 1341 */ 1342 if (readonly) { 1343 struct stat file_stat; 1344 1345 if (fstat(fd, &file_stat)) { 1346 close(fd); 1347 if (errno == EINTR) { 1348 continue; 1349 } 1350 return -errno; 1351 } else if (S_ISDIR(file_stat.st_mode)) { 1352 close(fd); 1353 return -EISDIR; 1354 } 1355 } 1356 /* @path names an existing file, use it */ 1357 break; 1358 } 1359 if (errno == ENOENT) { 1360 if (readonly) { 1361 /* Refuse to create new, readonly files. */ 1362 return -ENOENT; 1363 } 1364 /* @path names a file that doesn't exist, create it */ 1365 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); 1366 if (fd >= 0) { 1367 *created = true; 1368 break; 1369 } 1370 } else if (errno == EISDIR) { 1371 /* @path names a directory, create a file there */ 1372 /* Make name safe to use with mkstemp by replacing '/' with '_'. */ 1373 sanitized_name = g_strdup(region_name); 1374 for (c = sanitized_name; *c != '\0'; c++) { 1375 if (*c == '/') { 1376 *c = '_'; 1377 } 1378 } 1379 1380 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, 1381 sanitized_name); 1382 g_free(sanitized_name); 1383 1384 fd = mkstemp(filename); 1385 if (fd >= 0) { 1386 unlink(filename); 1387 g_free(filename); 1388 break; 1389 } 1390 g_free(filename); 1391 } 1392 if (errno != EEXIST && errno != EINTR) { 1393 return -errno; 1394 } 1395 /* 1396 * Try again on EINTR and EEXIST. The latter happens when 1397 * something else creates the file between our two open(). 1398 */ 1399 } 1400 1401 return fd; 1402 } 1403 file_ram_alloc(RAMBlock * block,ram_addr_t memory,int fd,bool truncate,off_t offset,Error ** errp)1404 static void *file_ram_alloc(RAMBlock *block, 1405 ram_addr_t memory, 1406 int fd, 1407 bool truncate, 1408 off_t offset, 1409 Error **errp) 1410 { 1411 uint32_t qemu_map_flags; 1412 void *area; 1413 1414 block->page_size = qemu_fd_getpagesize(fd); 1415 if (block->mr->align % block->page_size) { 1416 error_setg(errp, "alignment 0x%" PRIx64 1417 " must be multiples of page size 0x%zx", 1418 block->mr->align, block->page_size); 1419 return NULL; 1420 } else if (block->mr->align && !is_power_of_2(block->mr->align)) { 1421 error_setg(errp, "alignment 0x%" PRIx64 1422 " must be a power of two", block->mr->align); 1423 return NULL; 1424 } else if (offset % block->page_size) { 1425 error_setg(errp, "offset 0x%" PRIx64 1426 " must be multiples of page size 0x%zx", 1427 offset, block->page_size); 1428 return NULL; 1429 } 1430 block->mr->align = MAX(block->page_size, block->mr->align); 1431 #if defined(__s390x__) 1432 if (kvm_enabled()) { 1433 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); 1434 } 1435 #endif 1436 1437 if (memory < block->page_size) { 1438 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " 1439 "or larger than page size 0x%zx", 1440 memory, block->page_size); 1441 return NULL; 1442 } 1443 1444 memory = ROUND_UP(memory, block->page_size); 1445 1446 /* 1447 * ftruncate is not supported by hugetlbfs in older 1448 * hosts, so don't bother bailing out on errors. 1449 * If anything goes wrong with it under other filesystems, 1450 * mmap will fail. 1451 * 1452 * Do not truncate the non-empty backend file to avoid corrupting 1453 * the existing data in the file. Disabling shrinking is not 1454 * enough. For example, the current vNVDIMM implementation stores 1455 * the guest NVDIMM labels at the end of the backend file. If the 1456 * backend file is later extended, QEMU will not be able to find 1457 * those labels. Therefore, extending the non-empty backend file 1458 * is disabled as well. 1459 */ 1460 if (truncate && ftruncate(fd, offset + memory)) { 1461 perror("ftruncate"); 1462 } 1463 1464 qemu_map_flags = (block->flags & RAM_READONLY) ? QEMU_MAP_READONLY : 0; 1465 qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0; 1466 qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0; 1467 qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0; 1468 area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset); 1469 if (area == MAP_FAILED) { 1470 error_setg_errno(errp, errno, 1471 "unable to map backing store for guest RAM"); 1472 return NULL; 1473 } 1474 1475 block->fd = fd; 1476 block->fd_offset = offset; 1477 return area; 1478 } 1479 #endif 1480 1481 /* Allocate space within the ram_addr_t space that governs the 1482 * dirty bitmaps. 1483 * Called with the ramlist lock held. 1484 */ find_ram_offset(ram_addr_t size)1485 static ram_addr_t find_ram_offset(ram_addr_t size) 1486 { 1487 RAMBlock *block, *next_block; 1488 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; 1489 1490 assert(size != 0); /* it would hand out same offset multiple times */ 1491 1492 if (QLIST_EMPTY_RCU(&ram_list.blocks)) { 1493 return 0; 1494 } 1495 1496 RAMBLOCK_FOREACH(block) { 1497 ram_addr_t candidate, next = RAM_ADDR_MAX; 1498 1499 /* Align blocks to start on a 'long' in the bitmap 1500 * which makes the bitmap sync'ing take the fast path. 1501 */ 1502 candidate = block->offset + block->max_length; 1503 candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS); 1504 1505 /* Search for the closest following block 1506 * and find the gap. 1507 */ 1508 RAMBLOCK_FOREACH(next_block) { 1509 if (next_block->offset >= candidate) { 1510 next = MIN(next, next_block->offset); 1511 } 1512 } 1513 1514 /* If it fits remember our place and remember the size 1515 * of gap, but keep going so that we might find a smaller 1516 * gap to fill so avoiding fragmentation. 1517 */ 1518 if (next - candidate >= size && next - candidate < mingap) { 1519 offset = candidate; 1520 mingap = next - candidate; 1521 } 1522 1523 trace_find_ram_offset_loop(size, candidate, offset, next, mingap); 1524 } 1525 1526 if (offset == RAM_ADDR_MAX) { 1527 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", 1528 (uint64_t)size); 1529 abort(); 1530 } 1531 1532 trace_find_ram_offset(size, offset); 1533 1534 return offset; 1535 } 1536 qemu_ram_setup_dump(void * addr,ram_addr_t size)1537 static void qemu_ram_setup_dump(void *addr, ram_addr_t size) 1538 { 1539 int ret; 1540 1541 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ 1542 if (!machine_dump_guest_core(current_machine)) { 1543 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); 1544 if (ret) { 1545 perror("qemu_madvise"); 1546 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " 1547 "but dump-guest-core=off specified\n"); 1548 } 1549 } 1550 } 1551 qemu_ram_get_idstr(RAMBlock * rb)1552 const char *qemu_ram_get_idstr(RAMBlock *rb) 1553 { 1554 return rb->idstr; 1555 } 1556 qemu_ram_get_host_addr(RAMBlock * rb)1557 void *qemu_ram_get_host_addr(RAMBlock *rb) 1558 { 1559 return rb->host; 1560 } 1561 qemu_ram_get_offset(RAMBlock * rb)1562 ram_addr_t qemu_ram_get_offset(RAMBlock *rb) 1563 { 1564 return rb->offset; 1565 } 1566 qemu_ram_get_used_length(RAMBlock * rb)1567 ram_addr_t qemu_ram_get_used_length(RAMBlock *rb) 1568 { 1569 return rb->used_length; 1570 } 1571 qemu_ram_get_max_length(RAMBlock * rb)1572 ram_addr_t qemu_ram_get_max_length(RAMBlock *rb) 1573 { 1574 return rb->max_length; 1575 } 1576 qemu_ram_is_shared(RAMBlock * rb)1577 bool qemu_ram_is_shared(RAMBlock *rb) 1578 { 1579 return rb->flags & RAM_SHARED; 1580 } 1581 qemu_ram_is_noreserve(RAMBlock * rb)1582 bool qemu_ram_is_noreserve(RAMBlock *rb) 1583 { 1584 return rb->flags & RAM_NORESERVE; 1585 } 1586 1587 /* Note: Only set at the start of postcopy */ qemu_ram_is_uf_zeroable(RAMBlock * rb)1588 bool qemu_ram_is_uf_zeroable(RAMBlock *rb) 1589 { 1590 return rb->flags & RAM_UF_ZEROPAGE; 1591 } 1592 qemu_ram_set_uf_zeroable(RAMBlock * rb)1593 void qemu_ram_set_uf_zeroable(RAMBlock *rb) 1594 { 1595 rb->flags |= RAM_UF_ZEROPAGE; 1596 } 1597 qemu_ram_is_migratable(RAMBlock * rb)1598 bool qemu_ram_is_migratable(RAMBlock *rb) 1599 { 1600 return rb->flags & RAM_MIGRATABLE; 1601 } 1602 qemu_ram_set_migratable(RAMBlock * rb)1603 void qemu_ram_set_migratable(RAMBlock *rb) 1604 { 1605 rb->flags |= RAM_MIGRATABLE; 1606 } 1607 qemu_ram_unset_migratable(RAMBlock * rb)1608 void qemu_ram_unset_migratable(RAMBlock *rb) 1609 { 1610 rb->flags &= ~RAM_MIGRATABLE; 1611 } 1612 qemu_ram_is_named_file(RAMBlock * rb)1613 bool qemu_ram_is_named_file(RAMBlock *rb) 1614 { 1615 return rb->flags & RAM_NAMED_FILE; 1616 } 1617 qemu_ram_get_fd(RAMBlock * rb)1618 int qemu_ram_get_fd(RAMBlock *rb) 1619 { 1620 return rb->fd; 1621 } 1622 1623 /* Called with the BQL held. */ qemu_ram_set_idstr(RAMBlock * new_block,const char * name,DeviceState * dev)1624 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) 1625 { 1626 RAMBlock *block; 1627 1628 assert(new_block); 1629 assert(!new_block->idstr[0]); 1630 1631 if (dev) { 1632 char *id = qdev_get_dev_path(dev); 1633 if (id) { 1634 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 1635 g_free(id); 1636 } 1637 } 1638 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 1639 1640 RCU_READ_LOCK_GUARD(); 1641 RAMBLOCK_FOREACH(block) { 1642 if (block != new_block && 1643 !strcmp(block->idstr, new_block->idstr)) { 1644 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 1645 new_block->idstr); 1646 abort(); 1647 } 1648 } 1649 } 1650 1651 /* Called with the BQL held. */ qemu_ram_unset_idstr(RAMBlock * block)1652 void qemu_ram_unset_idstr(RAMBlock *block) 1653 { 1654 /* FIXME: arch_init.c assumes that this is not called throughout 1655 * migration. Ignore the problem since hot-unplug during migration 1656 * does not work anyway. 1657 */ 1658 if (block) { 1659 memset(block->idstr, 0, sizeof(block->idstr)); 1660 } 1661 } 1662 qemu_ram_pagesize(RAMBlock * rb)1663 size_t qemu_ram_pagesize(RAMBlock *rb) 1664 { 1665 return rb->page_size; 1666 } 1667 1668 /* Returns the largest size of page in use */ qemu_ram_pagesize_largest(void)1669 size_t qemu_ram_pagesize_largest(void) 1670 { 1671 RAMBlock *block; 1672 size_t largest = 0; 1673 1674 RAMBLOCK_FOREACH(block) { 1675 largest = MAX(largest, qemu_ram_pagesize(block)); 1676 } 1677 1678 return largest; 1679 } 1680 memory_try_enable_merging(void * addr,size_t len)1681 static int memory_try_enable_merging(void *addr, size_t len) 1682 { 1683 if (!machine_mem_merge(current_machine)) { 1684 /* disabled by the user */ 1685 return 0; 1686 } 1687 1688 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); 1689 } 1690 1691 /* 1692 * Resizing RAM while migrating can result in the migration being canceled. 1693 * Care has to be taken if the guest might have already detected the memory. 1694 * 1695 * As memory core doesn't know how is memory accessed, it is up to 1696 * resize callback to update device state and/or add assertions to detect 1697 * misuse, if necessary. 1698 */ qemu_ram_resize(RAMBlock * block,ram_addr_t newsize,Error ** errp)1699 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) 1700 { 1701 const ram_addr_t oldsize = block->used_length; 1702 const ram_addr_t unaligned_size = newsize; 1703 1704 assert(block); 1705 1706 newsize = TARGET_PAGE_ALIGN(newsize); 1707 newsize = REAL_HOST_PAGE_ALIGN(newsize); 1708 1709 if (block->used_length == newsize) { 1710 /* 1711 * We don't have to resize the ram block (which only knows aligned 1712 * sizes), however, we have to notify if the unaligned size changed. 1713 */ 1714 if (unaligned_size != memory_region_size(block->mr)) { 1715 memory_region_set_size(block->mr, unaligned_size); 1716 if (block->resized) { 1717 block->resized(block->idstr, unaligned_size, block->host); 1718 } 1719 } 1720 return 0; 1721 } 1722 1723 if (!(block->flags & RAM_RESIZEABLE)) { 1724 error_setg_errno(errp, EINVAL, 1725 "Size mismatch: %s: 0x" RAM_ADDR_FMT 1726 " != 0x" RAM_ADDR_FMT, block->idstr, 1727 newsize, block->used_length); 1728 return -EINVAL; 1729 } 1730 1731 if (block->max_length < newsize) { 1732 error_setg_errno(errp, EINVAL, 1733 "Size too large: %s: 0x" RAM_ADDR_FMT 1734 " > 0x" RAM_ADDR_FMT, block->idstr, 1735 newsize, block->max_length); 1736 return -EINVAL; 1737 } 1738 1739 /* Notify before modifying the ram block and touching the bitmaps. */ 1740 if (block->host) { 1741 ram_block_notify_resize(block->host, oldsize, newsize); 1742 } 1743 1744 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); 1745 block->used_length = newsize; 1746 cpu_physical_memory_set_dirty_range(block->offset, block->used_length, 1747 DIRTY_CLIENTS_ALL); 1748 memory_region_set_size(block->mr, unaligned_size); 1749 if (block->resized) { 1750 block->resized(block->idstr, unaligned_size, block->host); 1751 } 1752 return 0; 1753 } 1754 1755 /* 1756 * Trigger sync on the given ram block for range [start, start + length] 1757 * with the backing store if one is available. 1758 * Otherwise no-op. 1759 * @Note: this is supposed to be a synchronous op. 1760 */ qemu_ram_msync(RAMBlock * block,ram_addr_t start,ram_addr_t length)1761 void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) 1762 { 1763 /* The requested range should fit in within the block range */ 1764 g_assert((start + length) <= block->used_length); 1765 1766 #ifdef CONFIG_LIBPMEM 1767 /* The lack of support for pmem should not block the sync */ 1768 if (ramblock_is_pmem(block)) { 1769 void *addr = ramblock_ptr(block, start); 1770 pmem_persist(addr, length); 1771 return; 1772 } 1773 #endif 1774 if (block->fd >= 0) { 1775 /** 1776 * Case there is no support for PMEM or the memory has not been 1777 * specified as persistent (or is not one) - use the msync. 1778 * Less optimal but still achieves the same goal 1779 */ 1780 void *addr = ramblock_ptr(block, start); 1781 if (qemu_msync(addr, length, block->fd)) { 1782 warn_report("%s: failed to sync memory range: start: " 1783 RAM_ADDR_FMT " length: " RAM_ADDR_FMT, 1784 __func__, start, length); 1785 } 1786 } 1787 } 1788 1789 /* Called with ram_list.mutex held */ dirty_memory_extend(ram_addr_t new_ram_size)1790 static void dirty_memory_extend(ram_addr_t new_ram_size) 1791 { 1792 unsigned int old_num_blocks = ram_list.num_dirty_blocks; 1793 unsigned int new_num_blocks = DIV_ROUND_UP(new_ram_size, 1794 DIRTY_MEMORY_BLOCK_SIZE); 1795 int i; 1796 1797 /* Only need to extend if block count increased */ 1798 if (new_num_blocks <= old_num_blocks) { 1799 return; 1800 } 1801 1802 for (i = 0; i < DIRTY_MEMORY_NUM; i++) { 1803 DirtyMemoryBlocks *old_blocks; 1804 DirtyMemoryBlocks *new_blocks; 1805 int j; 1806 1807 old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]); 1808 new_blocks = g_malloc(sizeof(*new_blocks) + 1809 sizeof(new_blocks->blocks[0]) * new_num_blocks); 1810 1811 if (old_num_blocks) { 1812 memcpy(new_blocks->blocks, old_blocks->blocks, 1813 old_num_blocks * sizeof(old_blocks->blocks[0])); 1814 } 1815 1816 for (j = old_num_blocks; j < new_num_blocks; j++) { 1817 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); 1818 } 1819 1820 qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); 1821 1822 if (old_blocks) { 1823 g_free_rcu(old_blocks, rcu); 1824 } 1825 } 1826 1827 ram_list.num_dirty_blocks = new_num_blocks; 1828 } 1829 ram_block_add(RAMBlock * new_block,Error ** errp)1830 static void ram_block_add(RAMBlock *new_block, Error **errp) 1831 { 1832 const bool noreserve = qemu_ram_is_noreserve(new_block); 1833 const bool shared = qemu_ram_is_shared(new_block); 1834 RAMBlock *block; 1835 RAMBlock *last_block = NULL; 1836 bool free_on_error = false; 1837 ram_addr_t ram_size; 1838 Error *err = NULL; 1839 1840 qemu_mutex_lock_ramlist(); 1841 new_block->offset = find_ram_offset(new_block->max_length); 1842 1843 if (!new_block->host) { 1844 if (xen_enabled()) { 1845 xen_ram_alloc(new_block->offset, new_block->max_length, 1846 new_block->mr, &err); 1847 if (err) { 1848 error_propagate(errp, err); 1849 qemu_mutex_unlock_ramlist(); 1850 return; 1851 } 1852 } else { 1853 new_block->host = qemu_anon_ram_alloc(new_block->max_length, 1854 &new_block->mr->align, 1855 shared, noreserve); 1856 if (!new_block->host) { 1857 error_setg_errno(errp, errno, 1858 "cannot set up guest memory '%s'", 1859 memory_region_name(new_block->mr)); 1860 qemu_mutex_unlock_ramlist(); 1861 return; 1862 } 1863 memory_try_enable_merging(new_block->host, new_block->max_length); 1864 free_on_error = true; 1865 } 1866 } 1867 1868 if (new_block->flags & RAM_GUEST_MEMFD) { 1869 int ret; 1870 1871 assert(kvm_enabled()); 1872 assert(new_block->guest_memfd < 0); 1873 1874 ret = ram_block_discard_require(true); 1875 if (ret < 0) { 1876 error_setg_errno(errp, -ret, 1877 "cannot set up private guest memory: discard currently blocked"); 1878 error_append_hint(errp, "Are you using assigned devices?\n"); 1879 goto out_free; 1880 } 1881 1882 new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length, 1883 0, errp); 1884 if (new_block->guest_memfd < 0) { 1885 qemu_mutex_unlock_ramlist(); 1886 goto out_free; 1887 } 1888 } 1889 1890 ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS; 1891 dirty_memory_extend(ram_size); 1892 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, 1893 * QLIST (which has an RCU-friendly variant) does not have insertion at 1894 * tail, so save the last element in last_block. 1895 */ 1896 RAMBLOCK_FOREACH(block) { 1897 last_block = block; 1898 if (block->max_length < new_block->max_length) { 1899 break; 1900 } 1901 } 1902 if (block) { 1903 QLIST_INSERT_BEFORE_RCU(block, new_block, next); 1904 } else if (last_block) { 1905 QLIST_INSERT_AFTER_RCU(last_block, new_block, next); 1906 } else { /* list is empty */ 1907 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); 1908 } 1909 ram_list.mru_block = NULL; 1910 1911 /* Write list before version */ 1912 smp_wmb(); 1913 ram_list.version++; 1914 qemu_mutex_unlock_ramlist(); 1915 1916 cpu_physical_memory_set_dirty_range(new_block->offset, 1917 new_block->used_length, 1918 DIRTY_CLIENTS_ALL); 1919 1920 if (new_block->host) { 1921 qemu_ram_setup_dump(new_block->host, new_block->max_length); 1922 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); 1923 /* 1924 * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU 1925 * Configure it unless the machine is a qtest server, in which case 1926 * KVM is not used and it may be forked (eg for fuzzing purposes). 1927 */ 1928 if (!qtest_enabled()) { 1929 qemu_madvise(new_block->host, new_block->max_length, 1930 QEMU_MADV_DONTFORK); 1931 } 1932 ram_block_notify_add(new_block->host, new_block->used_length, 1933 new_block->max_length); 1934 } 1935 return; 1936 1937 out_free: 1938 if (free_on_error) { 1939 qemu_anon_ram_free(new_block->host, new_block->max_length); 1940 new_block->host = NULL; 1941 } 1942 } 1943 1944 #ifdef CONFIG_POSIX qemu_ram_alloc_from_fd(ram_addr_t size,MemoryRegion * mr,uint32_t ram_flags,int fd,off_t offset,Error ** errp)1945 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, 1946 uint32_t ram_flags, int fd, off_t offset, 1947 Error **errp) 1948 { 1949 RAMBlock *new_block; 1950 Error *local_err = NULL; 1951 int64_t file_size, file_align; 1952 1953 /* Just support these ram flags by now. */ 1954 assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE | 1955 RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY | 1956 RAM_READONLY_FD | RAM_GUEST_MEMFD)) == 0); 1957 1958 if (xen_enabled()) { 1959 error_setg(errp, "-mem-path not supported with Xen"); 1960 return NULL; 1961 } 1962 1963 if (kvm_enabled() && !kvm_has_sync_mmu()) { 1964 error_setg(errp, 1965 "host lacks kvm mmu notifiers, -mem-path unsupported"); 1966 return NULL; 1967 } 1968 1969 size = TARGET_PAGE_ALIGN(size); 1970 size = REAL_HOST_PAGE_ALIGN(size); 1971 1972 file_size = get_file_size(fd); 1973 if (file_size && file_size < offset + size) { 1974 error_setg(errp, "%s backing store size 0x%" PRIx64 1975 " is too small for 'size' option 0x" RAM_ADDR_FMT 1976 " plus 'offset' option 0x%" PRIx64, 1977 memory_region_name(mr), file_size, size, 1978 (uint64_t)offset); 1979 return NULL; 1980 } 1981 1982 file_align = get_file_align(fd); 1983 if (file_align > 0 && file_align > mr->align) { 1984 error_setg(errp, "backing store align 0x%" PRIx64 1985 " is larger than 'align' option 0x%" PRIx64, 1986 file_align, mr->align); 1987 return NULL; 1988 } 1989 1990 new_block = g_malloc0(sizeof(*new_block)); 1991 new_block->mr = mr; 1992 new_block->used_length = size; 1993 new_block->max_length = size; 1994 new_block->flags = ram_flags; 1995 new_block->guest_memfd = -1; 1996 new_block->host = file_ram_alloc(new_block, size, fd, !file_size, offset, 1997 errp); 1998 if (!new_block->host) { 1999 g_free(new_block); 2000 return NULL; 2001 } 2002 2003 ram_block_add(new_block, &local_err); 2004 if (local_err) { 2005 g_free(new_block); 2006 error_propagate(errp, local_err); 2007 return NULL; 2008 } 2009 return new_block; 2010 2011 } 2012 2013 qemu_ram_alloc_from_file(ram_addr_t size,MemoryRegion * mr,uint32_t ram_flags,const char * mem_path,off_t offset,Error ** errp)2014 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, 2015 uint32_t ram_flags, const char *mem_path, 2016 off_t offset, Error **errp) 2017 { 2018 int fd; 2019 bool created; 2020 RAMBlock *block; 2021 2022 fd = file_ram_open(mem_path, memory_region_name(mr), 2023 !!(ram_flags & RAM_READONLY_FD), &created); 2024 if (fd < 0) { 2025 error_setg_errno(errp, -fd, "can't open backing store %s for guest RAM", 2026 mem_path); 2027 if (!(ram_flags & RAM_READONLY_FD) && !(ram_flags & RAM_SHARED) && 2028 fd == -EACCES) { 2029 /* 2030 * If we can open the file R/O (note: will never create a new file) 2031 * and we are dealing with a private mapping, there are still ways 2032 * to consume such files and get RAM instead of ROM. 2033 */ 2034 fd = file_ram_open(mem_path, memory_region_name(mr), true, 2035 &created); 2036 if (fd < 0) { 2037 return NULL; 2038 } 2039 assert(!created); 2040 close(fd); 2041 error_append_hint(errp, "Consider opening the backing store" 2042 " read-only but still creating writable RAM using" 2043 " '-object memory-backend-file,readonly=on,rom=off...'" 2044 " (see \"VM templating\" documentation)\n"); 2045 } 2046 return NULL; 2047 } 2048 2049 block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset, errp); 2050 if (!block) { 2051 if (created) { 2052 unlink(mem_path); 2053 } 2054 close(fd); 2055 return NULL; 2056 } 2057 2058 return block; 2059 } 2060 #endif 2061 2062 static qemu_ram_alloc_internal(ram_addr_t size,ram_addr_t max_size,void (* resized)(const char *,uint64_t length,void * host),void * host,uint32_t ram_flags,MemoryRegion * mr,Error ** errp)2063 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, 2064 void (*resized)(const char*, 2065 uint64_t length, 2066 void *host), 2067 void *host, uint32_t ram_flags, 2068 MemoryRegion *mr, Error **errp) 2069 { 2070 RAMBlock *new_block; 2071 Error *local_err = NULL; 2072 int align; 2073 2074 assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC | 2075 RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0); 2076 assert(!host ^ (ram_flags & RAM_PREALLOC)); 2077 2078 align = qemu_real_host_page_size(); 2079 align = MAX(align, TARGET_PAGE_SIZE); 2080 size = ROUND_UP(size, align); 2081 max_size = ROUND_UP(max_size, align); 2082 2083 new_block = g_malloc0(sizeof(*new_block)); 2084 new_block->mr = mr; 2085 new_block->resized = resized; 2086 new_block->used_length = size; 2087 new_block->max_length = max_size; 2088 assert(max_size >= size); 2089 new_block->fd = -1; 2090 new_block->guest_memfd = -1; 2091 new_block->page_size = qemu_real_host_page_size(); 2092 new_block->host = host; 2093 new_block->flags = ram_flags; 2094 ram_block_add(new_block, &local_err); 2095 if (local_err) { 2096 g_free(new_block); 2097 error_propagate(errp, local_err); 2098 return NULL; 2099 } 2100 return new_block; 2101 } 2102 qemu_ram_alloc_from_ptr(ram_addr_t size,void * host,MemoryRegion * mr,Error ** errp)2103 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, 2104 MemoryRegion *mr, Error **errp) 2105 { 2106 return qemu_ram_alloc_internal(size, size, NULL, host, RAM_PREALLOC, mr, 2107 errp); 2108 } 2109 qemu_ram_alloc(ram_addr_t size,uint32_t ram_flags,MemoryRegion * mr,Error ** errp)2110 RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, 2111 MemoryRegion *mr, Error **errp) 2112 { 2113 assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0); 2114 return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp); 2115 } 2116 qemu_ram_alloc_resizeable(ram_addr_t size,ram_addr_t maxsz,void (* resized)(const char *,uint64_t length,void * host),MemoryRegion * mr,Error ** errp)2117 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, 2118 void (*resized)(const char*, 2119 uint64_t length, 2120 void *host), 2121 MemoryRegion *mr, Error **errp) 2122 { 2123 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, 2124 RAM_RESIZEABLE, mr, errp); 2125 } 2126 reclaim_ramblock(RAMBlock * block)2127 static void reclaim_ramblock(RAMBlock *block) 2128 { 2129 if (block->flags & RAM_PREALLOC) { 2130 ; 2131 } else if (xen_enabled()) { 2132 xen_invalidate_map_cache_entry(block->host); 2133 #ifndef _WIN32 2134 } else if (block->fd >= 0) { 2135 qemu_ram_munmap(block->fd, block->host, block->max_length); 2136 close(block->fd); 2137 #endif 2138 } else { 2139 qemu_anon_ram_free(block->host, block->max_length); 2140 } 2141 2142 if (block->guest_memfd >= 0) { 2143 close(block->guest_memfd); 2144 ram_block_discard_require(false); 2145 } 2146 2147 g_free(block); 2148 } 2149 qemu_ram_free(RAMBlock * block)2150 void qemu_ram_free(RAMBlock *block) 2151 { 2152 if (!block) { 2153 return; 2154 } 2155 2156 if (block->host) { 2157 ram_block_notify_remove(block->host, block->used_length, 2158 block->max_length); 2159 } 2160 2161 qemu_mutex_lock_ramlist(); 2162 QLIST_REMOVE_RCU(block, next); 2163 ram_list.mru_block = NULL; 2164 /* Write list before version */ 2165 smp_wmb(); 2166 ram_list.version++; 2167 call_rcu(block, reclaim_ramblock, rcu); 2168 qemu_mutex_unlock_ramlist(); 2169 } 2170 2171 #ifndef _WIN32 qemu_ram_remap(ram_addr_t addr,ram_addr_t length)2172 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) 2173 { 2174 RAMBlock *block; 2175 ram_addr_t offset; 2176 int flags; 2177 void *area, *vaddr; 2178 int prot; 2179 2180 RAMBLOCK_FOREACH(block) { 2181 offset = addr - block->offset; 2182 if (offset < block->max_length) { 2183 vaddr = ramblock_ptr(block, offset); 2184 if (block->flags & RAM_PREALLOC) { 2185 ; 2186 } else if (xen_enabled()) { 2187 abort(); 2188 } else { 2189 flags = MAP_FIXED; 2190 flags |= block->flags & RAM_SHARED ? 2191 MAP_SHARED : MAP_PRIVATE; 2192 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; 2193 prot = PROT_READ; 2194 prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; 2195 if (block->fd >= 0) { 2196 area = mmap(vaddr, length, prot, flags, block->fd, 2197 offset + block->fd_offset); 2198 } else { 2199 flags |= MAP_ANONYMOUS; 2200 area = mmap(vaddr, length, prot, flags, -1, 0); 2201 } 2202 if (area != vaddr) { 2203 error_report("Could not remap addr: " 2204 RAM_ADDR_FMT "@" RAM_ADDR_FMT "", 2205 length, addr); 2206 exit(1); 2207 } 2208 memory_try_enable_merging(vaddr, length); 2209 qemu_ram_setup_dump(vaddr, length); 2210 } 2211 } 2212 } 2213 } 2214 #endif /* !_WIN32 */ 2215 2216 /* 2217 * Return a host pointer to guest's ram. 2218 * For Xen, foreign mappings get created if they don't already exist. 2219 * 2220 * @block: block for the RAM to lookup (optional and may be NULL). 2221 * @addr: address within the memory region. 2222 * @size: pointer to requested size (optional and may be NULL). 2223 * size may get modified and return a value smaller than 2224 * what was requested. 2225 * @lock: wether to lock the mapping in xen-mapcache until invalidated. 2226 * @is_write: hint wether to map RW or RO in the xen-mapcache. 2227 * (optional and may always be set to true). 2228 * 2229 * Called within RCU critical section. 2230 */ qemu_ram_ptr_length(RAMBlock * block,ram_addr_t addr,hwaddr * size,bool lock,bool is_write)2231 static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr, 2232 hwaddr *size, bool lock, 2233 bool is_write) 2234 { 2235 hwaddr len = 0; 2236 2237 if (size && *size == 0) { 2238 return NULL; 2239 } 2240 2241 if (block == NULL) { 2242 block = qemu_get_ram_block(addr); 2243 addr -= block->offset; 2244 } 2245 if (size) { 2246 *size = MIN(*size, block->max_length - addr); 2247 len = *size; 2248 } 2249 2250 if (xen_enabled() && block->host == NULL) { 2251 /* We need to check if the requested address is in the RAM 2252 * because we don't want to map the entire memory in QEMU. 2253 * In that case just map the requested area. 2254 */ 2255 if (xen_mr_is_memory(block->mr)) { 2256 return xen_map_cache(block->mr, block->offset + addr, 2257 len, block->offset, 2258 lock, lock, is_write); 2259 } 2260 2261 block->host = xen_map_cache(block->mr, block->offset, 2262 block->max_length, 2263 block->offset, 2264 1, lock, is_write); 2265 } 2266 2267 return ramblock_ptr(block, addr); 2268 } 2269 2270 /* 2271 * Return a host pointer to ram allocated with qemu_ram_alloc. 2272 * This should not be used for general purpose DMA. Use address_space_map 2273 * or address_space_rw instead. For local memory (e.g. video ram) that the 2274 * device owns, use memory_region_get_ram_ptr. 2275 * 2276 * Called within RCU critical section. 2277 */ qemu_map_ram_ptr(RAMBlock * ram_block,ram_addr_t addr)2278 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) 2279 { 2280 return qemu_ram_ptr_length(ram_block, addr, NULL, false, true); 2281 } 2282 2283 /* Return the offset of a hostpointer within a ramblock */ qemu_ram_block_host_offset(RAMBlock * rb,void * host)2284 ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) 2285 { 2286 ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; 2287 assert((uintptr_t)host >= (uintptr_t)rb->host); 2288 assert(res < rb->max_length); 2289 2290 return res; 2291 } 2292 qemu_ram_block_from_host(void * ptr,bool round_offset,ram_addr_t * offset)2293 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, 2294 ram_addr_t *offset) 2295 { 2296 RAMBlock *block; 2297 uint8_t *host = ptr; 2298 2299 if (xen_enabled()) { 2300 ram_addr_t ram_addr; 2301 RCU_READ_LOCK_GUARD(); 2302 ram_addr = xen_ram_addr_from_mapcache(ptr); 2303 if (ram_addr == RAM_ADDR_INVALID) { 2304 return NULL; 2305 } 2306 2307 block = qemu_get_ram_block(ram_addr); 2308 if (block) { 2309 *offset = ram_addr - block->offset; 2310 } 2311 return block; 2312 } 2313 2314 RCU_READ_LOCK_GUARD(); 2315 block = qatomic_rcu_read(&ram_list.mru_block); 2316 if (block && block->host && host - block->host < block->max_length) { 2317 goto found; 2318 } 2319 2320 RAMBLOCK_FOREACH(block) { 2321 /* This case append when the block is not mapped. */ 2322 if (block->host == NULL) { 2323 continue; 2324 } 2325 if (host - block->host < block->max_length) { 2326 goto found; 2327 } 2328 } 2329 2330 return NULL; 2331 2332 found: 2333 *offset = (host - block->host); 2334 if (round_offset) { 2335 *offset &= TARGET_PAGE_MASK; 2336 } 2337 return block; 2338 } 2339 2340 /* 2341 * Finds the named RAMBlock 2342 * 2343 * name: The name of RAMBlock to find 2344 * 2345 * Returns: RAMBlock (or NULL if not found) 2346 */ qemu_ram_block_by_name(const char * name)2347 RAMBlock *qemu_ram_block_by_name(const char *name) 2348 { 2349 RAMBlock *block; 2350 2351 RAMBLOCK_FOREACH(block) { 2352 if (!strcmp(name, block->idstr)) { 2353 return block; 2354 } 2355 } 2356 2357 return NULL; 2358 } 2359 2360 /* 2361 * Some of the system routines need to translate from a host pointer 2362 * (typically a TLB entry) back to a ram offset. 2363 */ qemu_ram_addr_from_host(void * ptr)2364 ram_addr_t qemu_ram_addr_from_host(void *ptr) 2365 { 2366 RAMBlock *block; 2367 ram_addr_t offset; 2368 2369 block = qemu_ram_block_from_host(ptr, false, &offset); 2370 if (!block) { 2371 return RAM_ADDR_INVALID; 2372 } 2373 2374 return block->offset + offset; 2375 } 2376 qemu_ram_addr_from_host_nofail(void * ptr)2377 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 2378 { 2379 ram_addr_t ram_addr; 2380 2381 ram_addr = qemu_ram_addr_from_host(ptr); 2382 if (ram_addr == RAM_ADDR_INVALID) { 2383 error_report("Bad ram pointer %p", ptr); 2384 abort(); 2385 } 2386 return ram_addr; 2387 } 2388 2389 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 2390 MemTxAttrs attrs, void *buf, hwaddr len); 2391 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 2392 const void *buf, hwaddr len); 2393 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 2394 bool is_write, MemTxAttrs attrs); 2395 subpage_read(void * opaque,hwaddr addr,uint64_t * data,unsigned len,MemTxAttrs attrs)2396 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, 2397 unsigned len, MemTxAttrs attrs) 2398 { 2399 subpage_t *subpage = opaque; 2400 uint8_t buf[8]; 2401 MemTxResult res; 2402 2403 #if defined(DEBUG_SUBPAGE) 2404 printf("%s: subpage %p len %u addr " HWADDR_FMT_plx "\n", __func__, 2405 subpage, len, addr); 2406 #endif 2407 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len); 2408 if (res) { 2409 return res; 2410 } 2411 *data = ldn_p(buf, len); 2412 return MEMTX_OK; 2413 } 2414 subpage_write(void * opaque,hwaddr addr,uint64_t value,unsigned len,MemTxAttrs attrs)2415 static MemTxResult subpage_write(void *opaque, hwaddr addr, 2416 uint64_t value, unsigned len, MemTxAttrs attrs) 2417 { 2418 subpage_t *subpage = opaque; 2419 uint8_t buf[8]; 2420 2421 #if defined(DEBUG_SUBPAGE) 2422 printf("%s: subpage %p len %u addr " HWADDR_FMT_plx 2423 " value %"PRIx64"\n", 2424 __func__, subpage, len, addr, value); 2425 #endif 2426 stn_p(buf, len, value); 2427 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len); 2428 } 2429 subpage_accepts(void * opaque,hwaddr addr,unsigned len,bool is_write,MemTxAttrs attrs)2430 static bool subpage_accepts(void *opaque, hwaddr addr, 2431 unsigned len, bool is_write, 2432 MemTxAttrs attrs) 2433 { 2434 subpage_t *subpage = opaque; 2435 #if defined(DEBUG_SUBPAGE) 2436 printf("%s: subpage %p %c len %u addr " HWADDR_FMT_plx "\n", 2437 __func__, subpage, is_write ? 'w' : 'r', len, addr); 2438 #endif 2439 2440 return flatview_access_valid(subpage->fv, addr + subpage->base, 2441 len, is_write, attrs); 2442 } 2443 2444 static const MemoryRegionOps subpage_ops = { 2445 .read_with_attrs = subpage_read, 2446 .write_with_attrs = subpage_write, 2447 .impl.min_access_size = 1, 2448 .impl.max_access_size = 8, 2449 .valid.min_access_size = 1, 2450 .valid.max_access_size = 8, 2451 .valid.accepts = subpage_accepts, 2452 .endianness = DEVICE_NATIVE_ENDIAN, 2453 }; 2454 subpage_register(subpage_t * mmio,uint32_t start,uint32_t end,uint16_t section)2455 static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, 2456 uint16_t section) 2457 { 2458 int idx, eidx; 2459 2460 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) 2461 return -1; 2462 idx = SUBPAGE_IDX(start); 2463 eidx = SUBPAGE_IDX(end); 2464 #if defined(DEBUG_SUBPAGE) 2465 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", 2466 __func__, mmio, start, end, idx, eidx, section); 2467 #endif 2468 for (; idx <= eidx; idx++) { 2469 mmio->sub_section[idx] = section; 2470 } 2471 2472 return 0; 2473 } 2474 subpage_init(FlatView * fv,hwaddr base)2475 static subpage_t *subpage_init(FlatView *fv, hwaddr base) 2476 { 2477 subpage_t *mmio; 2478 2479 /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ 2480 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); 2481 mmio->fv = fv; 2482 mmio->base = base; 2483 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, 2484 NULL, TARGET_PAGE_SIZE); 2485 mmio->iomem.subpage = true; 2486 #if defined(DEBUG_SUBPAGE) 2487 printf("%s: %p base " HWADDR_FMT_plx " len %08x\n", __func__, 2488 mmio, base, TARGET_PAGE_SIZE); 2489 #endif 2490 2491 return mmio; 2492 } 2493 dummy_section(PhysPageMap * map,FlatView * fv,MemoryRegion * mr)2494 static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr) 2495 { 2496 assert(fv); 2497 MemoryRegionSection section = { 2498 .fv = fv, 2499 .mr = mr, 2500 .offset_within_address_space = 0, 2501 .offset_within_region = 0, 2502 .size = int128_2_64(), 2503 }; 2504 2505 return phys_section_add(map, §ion); 2506 } 2507 iotlb_to_section(CPUState * cpu,hwaddr index,MemTxAttrs attrs)2508 MemoryRegionSection *iotlb_to_section(CPUState *cpu, 2509 hwaddr index, MemTxAttrs attrs) 2510 { 2511 int asidx = cpu_asidx_from_attrs(cpu, attrs); 2512 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; 2513 AddressSpaceDispatch *d = cpuas->memory_dispatch; 2514 int section_index = index & ~TARGET_PAGE_MASK; 2515 MemoryRegionSection *ret; 2516 2517 assert(section_index < d->map.sections_nb); 2518 ret = d->map.sections + section_index; 2519 assert(ret->mr); 2520 assert(ret->mr->ops); 2521 2522 return ret; 2523 } 2524 io_mem_init(void)2525 static void io_mem_init(void) 2526 { 2527 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, 2528 NULL, UINT64_MAX); 2529 } 2530 address_space_dispatch_new(FlatView * fv)2531 AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) 2532 { 2533 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); 2534 uint16_t n; 2535 2536 n = dummy_section(&d->map, fv, &io_mem_unassigned); 2537 assert(n == PHYS_SECTION_UNASSIGNED); 2538 2539 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; 2540 2541 return d; 2542 } 2543 address_space_dispatch_free(AddressSpaceDispatch * d)2544 void address_space_dispatch_free(AddressSpaceDispatch *d) 2545 { 2546 phys_sections_free(&d->map); 2547 g_free(d); 2548 } 2549 do_nothing(CPUState * cpu,run_on_cpu_data d)2550 static void do_nothing(CPUState *cpu, run_on_cpu_data d) 2551 { 2552 } 2553 tcg_log_global_after_sync(MemoryListener * listener)2554 static void tcg_log_global_after_sync(MemoryListener *listener) 2555 { 2556 CPUAddressSpace *cpuas; 2557 2558 /* Wait for the CPU to end the current TB. This avoids the following 2559 * incorrect race: 2560 * 2561 * vCPU migration 2562 * ---------------------- ------------------------- 2563 * TLB check -> slow path 2564 * notdirty_mem_write 2565 * write to RAM 2566 * mark dirty 2567 * clear dirty flag 2568 * TLB check -> fast path 2569 * read memory 2570 * write to RAM 2571 * 2572 * by pushing the migration thread's memory read after the vCPU thread has 2573 * written the memory. 2574 */ 2575 if (replay_mode == REPLAY_MODE_NONE) { 2576 /* 2577 * VGA can make calls to this function while updating the screen. 2578 * In record/replay mode this causes a deadlock, because 2579 * run_on_cpu waits for rr mutex. Therefore no races are possible 2580 * in this case and no need for making run_on_cpu when 2581 * record/replay is enabled. 2582 */ 2583 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2584 run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); 2585 } 2586 } 2587 tcg_commit_cpu(CPUState * cpu,run_on_cpu_data data)2588 static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data) 2589 { 2590 CPUAddressSpace *cpuas = data.host_ptr; 2591 2592 cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as); 2593 tlb_flush(cpu); 2594 } 2595 tcg_commit(MemoryListener * listener)2596 static void tcg_commit(MemoryListener *listener) 2597 { 2598 CPUAddressSpace *cpuas; 2599 CPUState *cpu; 2600 2601 assert(tcg_enabled()); 2602 /* since each CPU stores ram addresses in its TLB cache, we must 2603 reset the modified entries */ 2604 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); 2605 cpu = cpuas->cpu; 2606 2607 /* 2608 * Defer changes to as->memory_dispatch until the cpu is quiescent. 2609 * Otherwise we race between (1) other cpu threads and (2) ongoing 2610 * i/o for the current cpu thread, with data cached by mmu_lookup(). 2611 * 2612 * In addition, queueing the work function will kick the cpu back to 2613 * the main loop, which will end the RCU critical section and reclaim 2614 * the memory data structures. 2615 * 2616 * That said, the listener is also called during realize, before 2617 * all of the tcg machinery for run-on is initialized: thus halt_cond. 2618 */ 2619 if (cpu->halt_cond) { 2620 async_run_on_cpu(cpu, tcg_commit_cpu, RUN_ON_CPU_HOST_PTR(cpuas)); 2621 } else { 2622 tcg_commit_cpu(cpu, RUN_ON_CPU_HOST_PTR(cpuas)); 2623 } 2624 } 2625 memory_map_init(void)2626 static void memory_map_init(void) 2627 { 2628 system_memory = g_malloc(sizeof(*system_memory)); 2629 2630 memory_region_init(system_memory, NULL, "system", UINT64_MAX); 2631 address_space_init(&address_space_memory, system_memory, "memory"); 2632 2633 system_io = g_malloc(sizeof(*system_io)); 2634 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", 2635 65536); 2636 address_space_init(&address_space_io, system_io, "I/O"); 2637 } 2638 get_system_memory(void)2639 MemoryRegion *get_system_memory(void) 2640 { 2641 return system_memory; 2642 } 2643 get_system_io(void)2644 MemoryRegion *get_system_io(void) 2645 { 2646 return system_io; 2647 } 2648 invalidate_and_set_dirty(MemoryRegion * mr,hwaddr addr,hwaddr length)2649 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, 2650 hwaddr length) 2651 { 2652 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); 2653 ram_addr_t ramaddr = memory_region_get_ram_addr(mr); 2654 2655 /* We know we're only called for RAM MemoryRegions */ 2656 assert(ramaddr != RAM_ADDR_INVALID); 2657 addr += ramaddr; 2658 2659 /* No early return if dirty_log_mask is or becomes 0, because 2660 * cpu_physical_memory_set_dirty_range will still call 2661 * xen_modified_memory. 2662 */ 2663 if (dirty_log_mask) { 2664 dirty_log_mask = 2665 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); 2666 } 2667 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { 2668 assert(tcg_enabled()); 2669 tb_invalidate_phys_range(addr, addr + length - 1); 2670 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); 2671 } 2672 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); 2673 } 2674 memory_region_flush_rom_device(MemoryRegion * mr,hwaddr addr,hwaddr size)2675 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size) 2676 { 2677 /* 2678 * In principle this function would work on other memory region types too, 2679 * but the ROM device use case is the only one where this operation is 2680 * necessary. Other memory regions should use the 2681 * address_space_read/write() APIs. 2682 */ 2683 assert(memory_region_is_romd(mr)); 2684 2685 invalidate_and_set_dirty(mr, addr, size); 2686 } 2687 memory_access_size(MemoryRegion * mr,unsigned l,hwaddr addr)2688 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) 2689 { 2690 unsigned access_size_max = mr->ops->valid.max_access_size; 2691 2692 /* Regions are assumed to support 1-4 byte accesses unless 2693 otherwise specified. */ 2694 if (access_size_max == 0) { 2695 access_size_max = 4; 2696 } 2697 2698 /* Bound the maximum access by the alignment of the address. */ 2699 if (!mr->ops->impl.unaligned) { 2700 unsigned align_size_max = addr & -addr; 2701 if (align_size_max != 0 && align_size_max < access_size_max) { 2702 access_size_max = align_size_max; 2703 } 2704 } 2705 2706 /* Don't attempt accesses larger than the maximum. */ 2707 if (l > access_size_max) { 2708 l = access_size_max; 2709 } 2710 l = pow2floor(l); 2711 2712 return l; 2713 } 2714 prepare_mmio_access(MemoryRegion * mr)2715 bool prepare_mmio_access(MemoryRegion *mr) 2716 { 2717 bool release_lock = false; 2718 2719 if (!bql_locked()) { 2720 bql_lock(); 2721 release_lock = true; 2722 } 2723 if (mr->flush_coalesced_mmio) { 2724 qemu_flush_coalesced_mmio_buffer(); 2725 } 2726 2727 return release_lock; 2728 } 2729 2730 /** 2731 * flatview_access_allowed 2732 * @mr: #MemoryRegion to be accessed 2733 * @attrs: memory transaction attributes 2734 * @addr: address within that memory region 2735 * @len: the number of bytes to access 2736 * 2737 * Check if a memory transaction is allowed. 2738 * 2739 * Returns: true if transaction is allowed, false if denied. 2740 */ flatview_access_allowed(MemoryRegion * mr,MemTxAttrs attrs,hwaddr addr,hwaddr len)2741 static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs, 2742 hwaddr addr, hwaddr len) 2743 { 2744 if (likely(!attrs.memory)) { 2745 return true; 2746 } 2747 if (memory_region_is_ram(mr)) { 2748 return true; 2749 } 2750 qemu_log_mask(LOG_GUEST_ERROR, 2751 "Invalid access to non-RAM device at " 2752 "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", " 2753 "region '%s'\n", addr, len, memory_region_name(mr)); 2754 return false; 2755 } 2756 flatview_write_continue_step(MemTxAttrs attrs,const uint8_t * buf,hwaddr len,hwaddr mr_addr,hwaddr * l,MemoryRegion * mr)2757 static MemTxResult flatview_write_continue_step(MemTxAttrs attrs, 2758 const uint8_t *buf, 2759 hwaddr len, hwaddr mr_addr, 2760 hwaddr *l, MemoryRegion *mr) 2761 { 2762 if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { 2763 return MEMTX_ACCESS_ERROR; 2764 } 2765 2766 if (!memory_access_is_direct(mr, true)) { 2767 uint64_t val; 2768 MemTxResult result; 2769 bool release_lock = prepare_mmio_access(mr); 2770 2771 *l = memory_access_size(mr, *l, mr_addr); 2772 /* 2773 * XXX: could force current_cpu to NULL to avoid 2774 * potential bugs 2775 */ 2776 2777 /* 2778 * Assure Coverity (and ourselves) that we are not going to OVERRUN 2779 * the buffer by following ldn_he_p(). 2780 */ 2781 #ifdef QEMU_STATIC_ANALYSIS 2782 assert((*l == 1 && len >= 1) || 2783 (*l == 2 && len >= 2) || 2784 (*l == 4 && len >= 4) || 2785 (*l == 8 && len >= 8)); 2786 #endif 2787 val = ldn_he_p(buf, *l); 2788 result = memory_region_dispatch_write(mr, mr_addr, val, 2789 size_memop(*l), attrs); 2790 if (release_lock) { 2791 bql_unlock(); 2792 } 2793 2794 return result; 2795 } else { 2796 /* RAM case */ 2797 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, 2798 false, true); 2799 2800 memmove(ram_ptr, buf, *l); 2801 invalidate_and_set_dirty(mr, mr_addr, *l); 2802 2803 return MEMTX_OK; 2804 } 2805 } 2806 2807 /* Called within RCU critical section. */ flatview_write_continue(FlatView * fv,hwaddr addr,MemTxAttrs attrs,const void * ptr,hwaddr len,hwaddr mr_addr,hwaddr l,MemoryRegion * mr)2808 static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, 2809 MemTxAttrs attrs, 2810 const void *ptr, 2811 hwaddr len, hwaddr mr_addr, 2812 hwaddr l, MemoryRegion *mr) 2813 { 2814 MemTxResult result = MEMTX_OK; 2815 const uint8_t *buf = ptr; 2816 2817 for (;;) { 2818 result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, 2819 mr); 2820 2821 len -= l; 2822 buf += l; 2823 addr += l; 2824 2825 if (!len) { 2826 break; 2827 } 2828 2829 l = len; 2830 mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); 2831 } 2832 2833 return result; 2834 } 2835 2836 /* Called from RCU critical section. */ flatview_write(FlatView * fv,hwaddr addr,MemTxAttrs attrs,const void * buf,hwaddr len)2837 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, 2838 const void *buf, hwaddr len) 2839 { 2840 hwaddr l; 2841 hwaddr mr_addr; 2842 MemoryRegion *mr; 2843 2844 l = len; 2845 mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); 2846 if (!flatview_access_allowed(mr, attrs, addr, len)) { 2847 return MEMTX_ACCESS_ERROR; 2848 } 2849 return flatview_write_continue(fv, addr, attrs, buf, len, 2850 mr_addr, l, mr); 2851 } 2852 flatview_read_continue_step(MemTxAttrs attrs,uint8_t * buf,hwaddr len,hwaddr mr_addr,hwaddr * l,MemoryRegion * mr)2853 static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf, 2854 hwaddr len, hwaddr mr_addr, 2855 hwaddr *l, 2856 MemoryRegion *mr) 2857 { 2858 if (!flatview_access_allowed(mr, attrs, mr_addr, *l)) { 2859 return MEMTX_ACCESS_ERROR; 2860 } 2861 2862 if (!memory_access_is_direct(mr, false)) { 2863 /* I/O case */ 2864 uint64_t val; 2865 MemTxResult result; 2866 bool release_lock = prepare_mmio_access(mr); 2867 2868 *l = memory_access_size(mr, *l, mr_addr); 2869 result = memory_region_dispatch_read(mr, mr_addr, &val, size_memop(*l), 2870 attrs); 2871 2872 /* 2873 * Assure Coverity (and ourselves) that we are not going to OVERRUN 2874 * the buffer by following stn_he_p(). 2875 */ 2876 #ifdef QEMU_STATIC_ANALYSIS 2877 assert((*l == 1 && len >= 1) || 2878 (*l == 2 && len >= 2) || 2879 (*l == 4 && len >= 4) || 2880 (*l == 8 && len >= 8)); 2881 #endif 2882 stn_he_p(buf, *l, val); 2883 2884 if (release_lock) { 2885 bql_unlock(); 2886 } 2887 return result; 2888 } else { 2889 /* RAM case */ 2890 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, 2891 false, false); 2892 2893 memcpy(buf, ram_ptr, *l); 2894 2895 return MEMTX_OK; 2896 } 2897 } 2898 2899 /* Called within RCU critical section. */ flatview_read_continue(FlatView * fv,hwaddr addr,MemTxAttrs attrs,void * ptr,hwaddr len,hwaddr mr_addr,hwaddr l,MemoryRegion * mr)2900 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, 2901 MemTxAttrs attrs, void *ptr, 2902 hwaddr len, hwaddr mr_addr, hwaddr l, 2903 MemoryRegion *mr) 2904 { 2905 MemTxResult result = MEMTX_OK; 2906 uint8_t *buf = ptr; 2907 2908 fuzz_dma_read_cb(addr, len, mr); 2909 for (;;) { 2910 result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); 2911 2912 len -= l; 2913 buf += l; 2914 addr += l; 2915 2916 if (!len) { 2917 break; 2918 } 2919 2920 l = len; 2921 mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); 2922 } 2923 2924 return result; 2925 } 2926 2927 /* Called from RCU critical section. */ flatview_read(FlatView * fv,hwaddr addr,MemTxAttrs attrs,void * buf,hwaddr len)2928 static MemTxResult flatview_read(FlatView *fv, hwaddr addr, 2929 MemTxAttrs attrs, void *buf, hwaddr len) 2930 { 2931 hwaddr l; 2932 hwaddr mr_addr; 2933 MemoryRegion *mr; 2934 2935 l = len; 2936 mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); 2937 if (!flatview_access_allowed(mr, attrs, addr, len)) { 2938 return MEMTX_ACCESS_ERROR; 2939 } 2940 return flatview_read_continue(fv, addr, attrs, buf, len, 2941 mr_addr, l, mr); 2942 } 2943 address_space_read_full(AddressSpace * as,hwaddr addr,MemTxAttrs attrs,void * buf,hwaddr len)2944 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, 2945 MemTxAttrs attrs, void *buf, hwaddr len) 2946 { 2947 MemTxResult result = MEMTX_OK; 2948 FlatView *fv; 2949 2950 if (len > 0) { 2951 RCU_READ_LOCK_GUARD(); 2952 fv = address_space_to_flatview(as); 2953 result = flatview_read(fv, addr, attrs, buf, len); 2954 } 2955 2956 return result; 2957 } 2958 address_space_write(AddressSpace * as,hwaddr addr,MemTxAttrs attrs,const void * buf,hwaddr len)2959 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, 2960 MemTxAttrs attrs, 2961 const void *buf, hwaddr len) 2962 { 2963 MemTxResult result = MEMTX_OK; 2964 FlatView *fv; 2965 2966 if (len > 0) { 2967 RCU_READ_LOCK_GUARD(); 2968 fv = address_space_to_flatview(as); 2969 result = flatview_write(fv, addr, attrs, buf, len); 2970 } 2971 2972 return result; 2973 } 2974 address_space_rw(AddressSpace * as,hwaddr addr,MemTxAttrs attrs,void * buf,hwaddr len,bool is_write)2975 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, 2976 void *buf, hwaddr len, bool is_write) 2977 { 2978 if (is_write) { 2979 return address_space_write(as, addr, attrs, buf, len); 2980 } else { 2981 return address_space_read_full(as, addr, attrs, buf, len); 2982 } 2983 } 2984 address_space_set(AddressSpace * as,hwaddr addr,uint8_t c,hwaddr len,MemTxAttrs attrs)2985 MemTxResult address_space_set(AddressSpace *as, hwaddr addr, 2986 uint8_t c, hwaddr len, MemTxAttrs attrs) 2987 { 2988 #define FILLBUF_SIZE 512 2989 uint8_t fillbuf[FILLBUF_SIZE]; 2990 int l; 2991 MemTxResult error = MEMTX_OK; 2992 2993 memset(fillbuf, c, FILLBUF_SIZE); 2994 while (len > 0) { 2995 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; 2996 error |= address_space_write(as, addr, attrs, fillbuf, l); 2997 len -= l; 2998 addr += l; 2999 } 3000 3001 return error; 3002 } 3003 cpu_physical_memory_rw(hwaddr addr,void * buf,hwaddr len,bool is_write)3004 void cpu_physical_memory_rw(hwaddr addr, void *buf, 3005 hwaddr len, bool is_write) 3006 { 3007 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, 3008 buf, len, is_write); 3009 } 3010 3011 enum write_rom_type { 3012 WRITE_DATA, 3013 FLUSH_CACHE, 3014 }; 3015 address_space_write_rom_internal(AddressSpace * as,hwaddr addr,MemTxAttrs attrs,const void * ptr,hwaddr len,enum write_rom_type type)3016 static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, 3017 hwaddr addr, 3018 MemTxAttrs attrs, 3019 const void *ptr, 3020 hwaddr len, 3021 enum write_rom_type type) 3022 { 3023 hwaddr l; 3024 uint8_t *ram_ptr; 3025 hwaddr addr1; 3026 MemoryRegion *mr; 3027 const uint8_t *buf = ptr; 3028 3029 RCU_READ_LOCK_GUARD(); 3030 while (len > 0) { 3031 l = len; 3032 mr = address_space_translate(as, addr, &addr1, &l, true, attrs); 3033 3034 if (!(memory_region_is_ram(mr) || 3035 memory_region_is_romd(mr))) { 3036 l = memory_access_size(mr, l, addr1); 3037 } else { 3038 /* ROM/RAM case */ 3039 ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); 3040 switch (type) { 3041 case WRITE_DATA: 3042 memcpy(ram_ptr, buf, l); 3043 invalidate_and_set_dirty(mr, addr1, l); 3044 break; 3045 case FLUSH_CACHE: 3046 flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l); 3047 break; 3048 } 3049 } 3050 len -= l; 3051 buf += l; 3052 addr += l; 3053 } 3054 return MEMTX_OK; 3055 } 3056 3057 /* used for ROM loading : can write in RAM and ROM */ address_space_write_rom(AddressSpace * as,hwaddr addr,MemTxAttrs attrs,const void * buf,hwaddr len)3058 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, 3059 MemTxAttrs attrs, 3060 const void *buf, hwaddr len) 3061 { 3062 return address_space_write_rom_internal(as, addr, attrs, 3063 buf, len, WRITE_DATA); 3064 } 3065 cpu_flush_icache_range(hwaddr start,hwaddr len)3066 void cpu_flush_icache_range(hwaddr start, hwaddr len) 3067 { 3068 /* 3069 * This function should do the same thing as an icache flush that was 3070 * triggered from within the guest. For TCG we are always cache coherent, 3071 * so there is no need to flush anything. For KVM / Xen we need to flush 3072 * the host's instruction cache at least. 3073 */ 3074 if (tcg_enabled()) { 3075 return; 3076 } 3077 3078 address_space_write_rom_internal(&address_space_memory, 3079 start, MEMTXATTRS_UNSPECIFIED, 3080 NULL, len, FLUSH_CACHE); 3081 } 3082 3083 /* 3084 * A magic value stored in the first 8 bytes of the bounce buffer struct. Used 3085 * to detect illegal pointers passed to address_space_unmap. 3086 */ 3087 #define BOUNCE_BUFFER_MAGIC 0xb4017ceb4ffe12ed 3088 3089 typedef struct { 3090 uint64_t magic; 3091 MemoryRegion *mr; 3092 hwaddr addr; 3093 size_t len; 3094 uint8_t buffer[]; 3095 } BounceBuffer; 3096 3097 static void address_space_unregister_map_client_do(AddressSpaceMapClient * client)3098 address_space_unregister_map_client_do(AddressSpaceMapClient *client) 3099 { 3100 QLIST_REMOVE(client, link); 3101 g_free(client); 3102 } 3103 address_space_notify_map_clients_locked(AddressSpace * as)3104 static void address_space_notify_map_clients_locked(AddressSpace *as) 3105 { 3106 AddressSpaceMapClient *client; 3107 3108 while (!QLIST_EMPTY(&as->map_client_list)) { 3109 client = QLIST_FIRST(&as->map_client_list); 3110 qemu_bh_schedule(client->bh); 3111 address_space_unregister_map_client_do(client); 3112 } 3113 } 3114 address_space_register_map_client(AddressSpace * as,QEMUBH * bh)3115 void address_space_register_map_client(AddressSpace *as, QEMUBH *bh) 3116 { 3117 AddressSpaceMapClient *client = g_malloc(sizeof(*client)); 3118 3119 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3120 client->bh = bh; 3121 QLIST_INSERT_HEAD(&as->map_client_list, client, link); 3122 /* Write map_client_list before reading bounce_buffer_size. */ 3123 smp_mb(); 3124 if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) { 3125 address_space_notify_map_clients_locked(as); 3126 } 3127 } 3128 cpu_exec_init_all(void)3129 void cpu_exec_init_all(void) 3130 { 3131 qemu_mutex_init(&ram_list.mutex); 3132 /* The data structures we set up here depend on knowing the page size, 3133 * so no more changes can be made after this point. 3134 * In an ideal world, nothing we did before we had finished the 3135 * machine setup would care about the target page size, and we could 3136 * do this much later, rather than requiring board models to state 3137 * up front what their requirements are. 3138 */ 3139 finalize_target_page_bits(); 3140 io_mem_init(); 3141 memory_map_init(); 3142 } 3143 address_space_unregister_map_client(AddressSpace * as,QEMUBH * bh)3144 void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh) 3145 { 3146 AddressSpaceMapClient *client; 3147 3148 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3149 QLIST_FOREACH(client, &as->map_client_list, link) { 3150 if (client->bh == bh) { 3151 address_space_unregister_map_client_do(client); 3152 break; 3153 } 3154 } 3155 } 3156 address_space_notify_map_clients(AddressSpace * as)3157 static void address_space_notify_map_clients(AddressSpace *as) 3158 { 3159 QEMU_LOCK_GUARD(&as->map_client_list_lock); 3160 address_space_notify_map_clients_locked(as); 3161 } 3162 flatview_access_valid(FlatView * fv,hwaddr addr,hwaddr len,bool is_write,MemTxAttrs attrs)3163 static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, 3164 bool is_write, MemTxAttrs attrs) 3165 { 3166 MemoryRegion *mr; 3167 hwaddr l, xlat; 3168 3169 while (len > 0) { 3170 l = len; 3171 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3172 if (!memory_access_is_direct(mr, is_write)) { 3173 l = memory_access_size(mr, l, addr); 3174 if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) { 3175 return false; 3176 } 3177 } 3178 3179 len -= l; 3180 addr += l; 3181 } 3182 return true; 3183 } 3184 address_space_access_valid(AddressSpace * as,hwaddr addr,hwaddr len,bool is_write,MemTxAttrs attrs)3185 bool address_space_access_valid(AddressSpace *as, hwaddr addr, 3186 hwaddr len, bool is_write, 3187 MemTxAttrs attrs) 3188 { 3189 FlatView *fv; 3190 3191 RCU_READ_LOCK_GUARD(); 3192 fv = address_space_to_flatview(as); 3193 return flatview_access_valid(fv, addr, len, is_write, attrs); 3194 } 3195 3196 static hwaddr flatview_extend_translation(FlatView * fv,hwaddr addr,hwaddr target_len,MemoryRegion * mr,hwaddr base,hwaddr len,bool is_write,MemTxAttrs attrs)3197 flatview_extend_translation(FlatView *fv, hwaddr addr, 3198 hwaddr target_len, 3199 MemoryRegion *mr, hwaddr base, hwaddr len, 3200 bool is_write, MemTxAttrs attrs) 3201 { 3202 hwaddr done = 0; 3203 hwaddr xlat; 3204 MemoryRegion *this_mr; 3205 3206 for (;;) { 3207 target_len -= len; 3208 addr += len; 3209 done += len; 3210 if (target_len == 0) { 3211 return done; 3212 } 3213 3214 len = target_len; 3215 this_mr = flatview_translate(fv, addr, &xlat, 3216 &len, is_write, attrs); 3217 if (this_mr != mr || xlat != base + done) { 3218 return done; 3219 } 3220 } 3221 } 3222 3223 /* Map a physical memory region into a host virtual address. 3224 * May map a subset of the requested range, given by and returned in *plen. 3225 * May return NULL if resources needed to perform the mapping are exhausted. 3226 * Use only for reads OR writes - not for read-modify-write operations. 3227 * Use address_space_register_map_client() to know when retrying the map 3228 * operation is likely to succeed. 3229 */ address_space_map(AddressSpace * as,hwaddr addr,hwaddr * plen,bool is_write,MemTxAttrs attrs)3230 void *address_space_map(AddressSpace *as, 3231 hwaddr addr, 3232 hwaddr *plen, 3233 bool is_write, 3234 MemTxAttrs attrs) 3235 { 3236 hwaddr len = *plen; 3237 hwaddr l, xlat; 3238 MemoryRegion *mr; 3239 FlatView *fv; 3240 3241 trace_address_space_map(as, addr, len, is_write, *(uint32_t *) &attrs); 3242 3243 if (len == 0) { 3244 return NULL; 3245 } 3246 3247 l = len; 3248 RCU_READ_LOCK_GUARD(); 3249 fv = address_space_to_flatview(as); 3250 mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); 3251 3252 if (!memory_access_is_direct(mr, is_write)) { 3253 size_t used = qatomic_read(&as->bounce_buffer_size); 3254 for (;;) { 3255 hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l); 3256 size_t new_size = used + alloc; 3257 size_t actual = 3258 qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size); 3259 if (actual == used) { 3260 l = alloc; 3261 break; 3262 } 3263 used = actual; 3264 } 3265 3266 if (l == 0) { 3267 *plen = 0; 3268 return NULL; 3269 } 3270 3271 BounceBuffer *bounce = g_malloc0(l + sizeof(BounceBuffer)); 3272 bounce->magic = BOUNCE_BUFFER_MAGIC; 3273 memory_region_ref(mr); 3274 bounce->mr = mr; 3275 bounce->addr = addr; 3276 bounce->len = l; 3277 3278 if (!is_write) { 3279 flatview_read(fv, addr, attrs, 3280 bounce->buffer, l); 3281 } 3282 3283 *plen = l; 3284 return bounce->buffer; 3285 } 3286 3287 memory_region_ref(mr); 3288 *plen = flatview_extend_translation(fv, addr, len, mr, xlat, 3289 l, is_write, attrs); 3290 fuzz_dma_read_cb(addr, *plen, mr); 3291 return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true, is_write); 3292 } 3293 3294 /* Unmaps a memory region previously mapped by address_space_map(). 3295 * Will also mark the memory as dirty if is_write is true. access_len gives 3296 * the amount of memory that was actually read or written by the caller. 3297 */ address_space_unmap(AddressSpace * as,void * buffer,hwaddr len,bool is_write,hwaddr access_len)3298 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, 3299 bool is_write, hwaddr access_len) 3300 { 3301 MemoryRegion *mr; 3302 ram_addr_t addr1; 3303 3304 mr = memory_region_from_host(buffer, &addr1); 3305 if (mr != NULL) { 3306 if (is_write) { 3307 invalidate_and_set_dirty(mr, addr1, access_len); 3308 } 3309 if (xen_enabled()) { 3310 xen_invalidate_map_cache_entry(buffer); 3311 } 3312 memory_region_unref(mr); 3313 return; 3314 } 3315 3316 3317 BounceBuffer *bounce = container_of(buffer, BounceBuffer, buffer); 3318 assert(bounce->magic == BOUNCE_BUFFER_MAGIC); 3319 3320 if (is_write) { 3321 address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED, 3322 bounce->buffer, access_len); 3323 } 3324 3325 qatomic_sub(&as->bounce_buffer_size, bounce->len); 3326 bounce->magic = ~BOUNCE_BUFFER_MAGIC; 3327 memory_region_unref(bounce->mr); 3328 g_free(bounce); 3329 /* Write bounce_buffer_size before reading map_client_list. */ 3330 smp_mb(); 3331 address_space_notify_map_clients(as); 3332 } 3333 cpu_physical_memory_map(hwaddr addr,hwaddr * plen,bool is_write)3334 void *cpu_physical_memory_map(hwaddr addr, 3335 hwaddr *plen, 3336 bool is_write) 3337 { 3338 return address_space_map(&address_space_memory, addr, plen, is_write, 3339 MEMTXATTRS_UNSPECIFIED); 3340 } 3341 cpu_physical_memory_unmap(void * buffer,hwaddr len,bool is_write,hwaddr access_len)3342 void cpu_physical_memory_unmap(void *buffer, hwaddr len, 3343 bool is_write, hwaddr access_len) 3344 { 3345 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); 3346 } 3347 3348 #define ARG1_DECL AddressSpace *as 3349 #define ARG1 as 3350 #define SUFFIX 3351 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) 3352 #define RCU_READ_LOCK(...) rcu_read_lock() 3353 #define RCU_READ_UNLOCK(...) rcu_read_unlock() 3354 #include "memory_ldst.c.inc" 3355 address_space_cache_init(MemoryRegionCache * cache,AddressSpace * as,hwaddr addr,hwaddr len,bool is_write)3356 int64_t address_space_cache_init(MemoryRegionCache *cache, 3357 AddressSpace *as, 3358 hwaddr addr, 3359 hwaddr len, 3360 bool is_write) 3361 { 3362 AddressSpaceDispatch *d; 3363 hwaddr l; 3364 MemoryRegion *mr; 3365 Int128 diff; 3366 3367 assert(len > 0); 3368 3369 l = len; 3370 cache->fv = address_space_get_flatview(as); 3371 d = flatview_to_dispatch(cache->fv); 3372 cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); 3373 3374 /* 3375 * cache->xlat is now relative to cache->mrs.mr, not to the section itself. 3376 * Take that into account to compute how many bytes are there between 3377 * cache->xlat and the end of the section. 3378 */ 3379 diff = int128_sub(cache->mrs.size, 3380 int128_make64(cache->xlat - cache->mrs.offset_within_region)); 3381 l = int128_get64(int128_min(diff, int128_make64(l))); 3382 3383 mr = cache->mrs.mr; 3384 memory_region_ref(mr); 3385 if (memory_access_is_direct(mr, is_write)) { 3386 /* We don't care about the memory attributes here as we're only 3387 * doing this if we found actual RAM, which behaves the same 3388 * regardless of attributes; so UNSPECIFIED is fine. 3389 */ 3390 l = flatview_extend_translation(cache->fv, addr, len, mr, 3391 cache->xlat, l, is_write, 3392 MEMTXATTRS_UNSPECIFIED); 3393 cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true, 3394 is_write); 3395 } else { 3396 cache->ptr = NULL; 3397 } 3398 3399 cache->len = l; 3400 cache->is_write = is_write; 3401 return l; 3402 } 3403 address_space_cache_invalidate(MemoryRegionCache * cache,hwaddr addr,hwaddr access_len)3404 void address_space_cache_invalidate(MemoryRegionCache *cache, 3405 hwaddr addr, 3406 hwaddr access_len) 3407 { 3408 assert(cache->is_write); 3409 if (likely(cache->ptr)) { 3410 invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len); 3411 } 3412 } 3413 address_space_cache_destroy(MemoryRegionCache * cache)3414 void address_space_cache_destroy(MemoryRegionCache *cache) 3415 { 3416 if (!cache->mrs.mr) { 3417 return; 3418 } 3419 3420 if (xen_enabled()) { 3421 xen_invalidate_map_cache_entry(cache->ptr); 3422 } 3423 memory_region_unref(cache->mrs.mr); 3424 flatview_unref(cache->fv); 3425 cache->mrs.mr = NULL; 3426 cache->fv = NULL; 3427 } 3428 3429 /* Called from RCU critical section. This function has the same 3430 * semantics as address_space_translate, but it only works on a 3431 * predefined range of a MemoryRegion that was mapped with 3432 * address_space_cache_init. 3433 */ address_space_translate_cached(MemoryRegionCache * cache,hwaddr addr,hwaddr * xlat,hwaddr * plen,bool is_write,MemTxAttrs attrs)3434 static inline MemoryRegion *address_space_translate_cached( 3435 MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, 3436 hwaddr *plen, bool is_write, MemTxAttrs attrs) 3437 { 3438 MemoryRegionSection section; 3439 MemoryRegion *mr; 3440 IOMMUMemoryRegion *iommu_mr; 3441 AddressSpace *target_as; 3442 3443 assert(!cache->ptr); 3444 *xlat = addr + cache->xlat; 3445 3446 mr = cache->mrs.mr; 3447 iommu_mr = memory_region_get_iommu(mr); 3448 if (!iommu_mr) { 3449 /* MMIO region. */ 3450 return mr; 3451 } 3452 3453 section = address_space_translate_iommu(iommu_mr, xlat, plen, 3454 NULL, is_write, true, 3455 &target_as, attrs); 3456 return section.mr; 3457 } 3458 3459 /* Called within RCU critical section. */ address_space_write_continue_cached(MemTxAttrs attrs,const void * ptr,hwaddr len,hwaddr mr_addr,hwaddr l,MemoryRegion * mr)3460 static MemTxResult address_space_write_continue_cached(MemTxAttrs attrs, 3461 const void *ptr, 3462 hwaddr len, 3463 hwaddr mr_addr, 3464 hwaddr l, 3465 MemoryRegion *mr) 3466 { 3467 MemTxResult result = MEMTX_OK; 3468 const uint8_t *buf = ptr; 3469 3470 for (;;) { 3471 result |= flatview_write_continue_step(attrs, buf, len, mr_addr, &l, 3472 mr); 3473 3474 len -= l; 3475 buf += l; 3476 mr_addr += l; 3477 3478 if (!len) { 3479 break; 3480 } 3481 3482 l = len; 3483 } 3484 3485 return result; 3486 } 3487 3488 /* Called within RCU critical section. */ address_space_read_continue_cached(MemTxAttrs attrs,void * ptr,hwaddr len,hwaddr mr_addr,hwaddr l,MemoryRegion * mr)3489 static MemTxResult address_space_read_continue_cached(MemTxAttrs attrs, 3490 void *ptr, hwaddr len, 3491 hwaddr mr_addr, hwaddr l, 3492 MemoryRegion *mr) 3493 { 3494 MemTxResult result = MEMTX_OK; 3495 uint8_t *buf = ptr; 3496 3497 for (;;) { 3498 result |= flatview_read_continue_step(attrs, buf, len, mr_addr, &l, mr); 3499 len -= l; 3500 buf += l; 3501 mr_addr += l; 3502 3503 if (!len) { 3504 break; 3505 } 3506 l = len; 3507 } 3508 3509 return result; 3510 } 3511 3512 /* Called from RCU critical section. address_space_read_cached uses this 3513 * out of line function when the target is an MMIO or IOMMU region. 3514 */ 3515 MemTxResult address_space_read_cached_slow(MemoryRegionCache * cache,hwaddr addr,void * buf,hwaddr len)3516 address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3517 void *buf, hwaddr len) 3518 { 3519 hwaddr mr_addr, l; 3520 MemoryRegion *mr; 3521 3522 l = len; 3523 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false, 3524 MEMTXATTRS_UNSPECIFIED); 3525 return address_space_read_continue_cached(MEMTXATTRS_UNSPECIFIED, 3526 buf, len, mr_addr, l, mr); 3527 } 3528 3529 /* Called from RCU critical section. address_space_write_cached uses this 3530 * out of line function when the target is an MMIO or IOMMU region. 3531 */ 3532 MemTxResult address_space_write_cached_slow(MemoryRegionCache * cache,hwaddr addr,const void * buf,hwaddr len)3533 address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, 3534 const void *buf, hwaddr len) 3535 { 3536 hwaddr mr_addr, l; 3537 MemoryRegion *mr; 3538 3539 l = len; 3540 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true, 3541 MEMTXATTRS_UNSPECIFIED); 3542 return address_space_write_continue_cached(MEMTXATTRS_UNSPECIFIED, 3543 buf, len, mr_addr, l, mr); 3544 } 3545 3546 #define ARG1_DECL MemoryRegionCache *cache 3547 #define ARG1 cache 3548 #define SUFFIX _cached_slow 3549 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) 3550 #define RCU_READ_LOCK() ((void)0) 3551 #define RCU_READ_UNLOCK() ((void)0) 3552 #include "memory_ldst.c.inc" 3553 3554 /* virtual memory access for debug (includes writing to ROM) */ cpu_memory_rw_debug(CPUState * cpu,vaddr addr,void * ptr,size_t len,bool is_write)3555 int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, 3556 void *ptr, size_t len, bool is_write) 3557 { 3558 hwaddr phys_addr; 3559 vaddr l, page; 3560 uint8_t *buf = ptr; 3561 3562 cpu_synchronize_state(cpu); 3563 while (len > 0) { 3564 int asidx; 3565 MemTxAttrs attrs; 3566 MemTxResult res; 3567 3568 page = addr & TARGET_PAGE_MASK; 3569 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); 3570 asidx = cpu_asidx_from_attrs(cpu, attrs); 3571 /* if no physical page mapped, return an error */ 3572 if (phys_addr == -1) 3573 return -1; 3574 l = (page + TARGET_PAGE_SIZE) - addr; 3575 if (l > len) 3576 l = len; 3577 phys_addr += (addr & ~TARGET_PAGE_MASK); 3578 if (is_write) { 3579 res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, 3580 attrs, buf, l); 3581 } else { 3582 res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr, 3583 attrs, buf, l); 3584 } 3585 if (res != MEMTX_OK) { 3586 return -1; 3587 } 3588 len -= l; 3589 buf += l; 3590 addr += l; 3591 } 3592 return 0; 3593 } 3594 cpu_physical_memory_is_io(hwaddr phys_addr)3595 bool cpu_physical_memory_is_io(hwaddr phys_addr) 3596 { 3597 MemoryRegion*mr; 3598 hwaddr l = 1; 3599 3600 RCU_READ_LOCK_GUARD(); 3601 mr = address_space_translate(&address_space_memory, 3602 phys_addr, &phys_addr, &l, false, 3603 MEMTXATTRS_UNSPECIFIED); 3604 3605 return !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); 3606 } 3607 qemu_ram_foreach_block(RAMBlockIterFunc func,void * opaque)3608 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) 3609 { 3610 RAMBlock *block; 3611 int ret = 0; 3612 3613 RCU_READ_LOCK_GUARD(); 3614 RAMBLOCK_FOREACH(block) { 3615 ret = func(block, opaque); 3616 if (ret) { 3617 break; 3618 } 3619 } 3620 return ret; 3621 } 3622 3623 /* 3624 * Unmap pages of memory from start to start+length such that 3625 * they a) read as 0, b) Trigger whatever fault mechanism 3626 * the OS provides for postcopy. 3627 * The pages must be unmapped by the end of the function. 3628 * Returns: 0 on success, none-0 on failure 3629 * 3630 */ ram_block_discard_range(RAMBlock * rb,uint64_t start,size_t length)3631 int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) 3632 { 3633 int ret = -1; 3634 3635 uint8_t *host_startaddr = rb->host + start; 3636 3637 if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { 3638 error_report("%s: Unaligned start address: %p", 3639 __func__, host_startaddr); 3640 goto err; 3641 } 3642 3643 if ((start + length) <= rb->max_length) { 3644 bool need_madvise, need_fallocate; 3645 if (!QEMU_IS_ALIGNED(length, rb->page_size)) { 3646 error_report("%s: Unaligned length: %zx", __func__, length); 3647 goto err; 3648 } 3649 3650 errno = ENOTSUP; /* If we are missing MADVISE etc */ 3651 3652 /* The logic here is messy; 3653 * madvise DONTNEED fails for hugepages 3654 * fallocate works on hugepages and shmem 3655 * shared anonymous memory requires madvise REMOVE 3656 */ 3657 need_madvise = (rb->page_size == qemu_real_host_page_size()); 3658 need_fallocate = rb->fd != -1; 3659 if (need_fallocate) { 3660 /* For a file, this causes the area of the file to be zero'd 3661 * if read, and for hugetlbfs also causes it to be unmapped 3662 * so a userfault will trigger. 3663 */ 3664 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 3665 /* 3666 * fallocate() will fail with readonly files. Let's print a 3667 * proper error message. 3668 */ 3669 if (rb->flags & RAM_READONLY_FD) { 3670 error_report("%s: Discarding RAM with readonly files is not" 3671 " supported", __func__); 3672 goto err; 3673 3674 } 3675 /* 3676 * We'll discard data from the actual file, even though we only 3677 * have a MAP_PRIVATE mapping, possibly messing with other 3678 * MAP_PRIVATE/MAP_SHARED mappings. There is no easy way to 3679 * change that behavior whithout violating the promised 3680 * semantics of ram_block_discard_range(). 3681 * 3682 * Only warn, because it works as long as nobody else uses that 3683 * file. 3684 */ 3685 if (!qemu_ram_is_shared(rb)) { 3686 warn_report_once("%s: Discarding RAM" 3687 " in private file mappings is possibly" 3688 " dangerous, because it will modify the" 3689 " underlying file and will affect other" 3690 " users of the file", __func__); 3691 } 3692 3693 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 3694 start, length); 3695 if (ret) { 3696 ret = -errno; 3697 error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)", 3698 __func__, rb->idstr, start, length, ret); 3699 goto err; 3700 } 3701 #else 3702 ret = -ENOSYS; 3703 error_report("%s: fallocate not available/file" 3704 "%s:%" PRIx64 " +%zx (%d)", 3705 __func__, rb->idstr, start, length, ret); 3706 goto err; 3707 #endif 3708 } 3709 if (need_madvise) { 3710 /* For normal RAM this causes it to be unmapped, 3711 * for shared memory it causes the local mapping to disappear 3712 * and to fall back on the file contents (which we just 3713 * fallocate'd away). 3714 */ 3715 #if defined(CONFIG_MADVISE) 3716 if (qemu_ram_is_shared(rb) && rb->fd < 0) { 3717 ret = madvise(host_startaddr, length, QEMU_MADV_REMOVE); 3718 } else { 3719 ret = madvise(host_startaddr, length, QEMU_MADV_DONTNEED); 3720 } 3721 if (ret) { 3722 ret = -errno; 3723 error_report("%s: Failed to discard range " 3724 "%s:%" PRIx64 " +%zx (%d)", 3725 __func__, rb->idstr, start, length, ret); 3726 goto err; 3727 } 3728 #else 3729 ret = -ENOSYS; 3730 error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)", 3731 __func__, rb->idstr, start, length, ret); 3732 goto err; 3733 #endif 3734 } 3735 trace_ram_block_discard_range(rb->idstr, host_startaddr, length, 3736 need_madvise, need_fallocate, ret); 3737 } else { 3738 error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", 3739 __func__, rb->idstr, start, length, rb->max_length); 3740 } 3741 3742 err: 3743 return ret; 3744 } 3745 ram_block_discard_guest_memfd_range(RAMBlock * rb,uint64_t start,size_t length)3746 int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, 3747 size_t length) 3748 { 3749 int ret = -1; 3750 3751 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE 3752 ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 3753 start, length); 3754 3755 if (ret) { 3756 ret = -errno; 3757 error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)", 3758 __func__, rb->idstr, start, length, ret); 3759 } 3760 #else 3761 ret = -ENOSYS; 3762 error_report("%s: fallocate not available %s:%" PRIx64 " +%zx (%d)", 3763 __func__, rb->idstr, start, length, ret); 3764 #endif 3765 3766 return ret; 3767 } 3768 ramblock_is_pmem(RAMBlock * rb)3769 bool ramblock_is_pmem(RAMBlock *rb) 3770 { 3771 return rb->flags & RAM_PMEM; 3772 } 3773 mtree_print_phys_entries(int start,int end,int skip,int ptr)3774 static void mtree_print_phys_entries(int start, int end, int skip, int ptr) 3775 { 3776 if (start == end - 1) { 3777 qemu_printf("\t%3d ", start); 3778 } else { 3779 qemu_printf("\t%3d..%-3d ", start, end - 1); 3780 } 3781 qemu_printf(" skip=%d ", skip); 3782 if (ptr == PHYS_MAP_NODE_NIL) { 3783 qemu_printf(" ptr=NIL"); 3784 } else if (!skip) { 3785 qemu_printf(" ptr=#%d", ptr); 3786 } else { 3787 qemu_printf(" ptr=[%d]", ptr); 3788 } 3789 qemu_printf("\n"); 3790 } 3791 3792 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ 3793 int128_sub((size), int128_one())) : 0) 3794 mtree_print_dispatch(AddressSpaceDispatch * d,MemoryRegion * root)3795 void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) 3796 { 3797 int i; 3798 3799 qemu_printf(" Dispatch\n"); 3800 qemu_printf(" Physical sections\n"); 3801 3802 for (i = 0; i < d->map.sections_nb; ++i) { 3803 MemoryRegionSection *s = d->map.sections + i; 3804 const char *names[] = { " [unassigned]", " [not dirty]", 3805 " [ROM]", " [watch]" }; 3806 3807 qemu_printf(" #%d @" HWADDR_FMT_plx ".." HWADDR_FMT_plx 3808 " %s%s%s%s%s", 3809 i, 3810 s->offset_within_address_space, 3811 s->offset_within_address_space + MR_SIZE(s->size), 3812 s->mr->name ? s->mr->name : "(noname)", 3813 i < ARRAY_SIZE(names) ? names[i] : "", 3814 s->mr == root ? " [ROOT]" : "", 3815 s == d->mru_section ? " [MRU]" : "", 3816 s->mr->is_iommu ? " [iommu]" : ""); 3817 3818 if (s->mr->alias) { 3819 qemu_printf(" alias=%s", s->mr->alias->name ? 3820 s->mr->alias->name : "noname"); 3821 } 3822 qemu_printf("\n"); 3823 } 3824 3825 qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n", 3826 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); 3827 for (i = 0; i < d->map.nodes_nb; ++i) { 3828 int j, jprev; 3829 PhysPageEntry prev; 3830 Node *n = d->map.nodes + i; 3831 3832 qemu_printf(" [%d]\n", i); 3833 3834 for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) { 3835 PhysPageEntry *pe = *n + j; 3836 3837 if (pe->ptr == prev.ptr && pe->skip == prev.skip) { 3838 continue; 3839 } 3840 3841 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 3842 3843 jprev = j; 3844 prev = *pe; 3845 } 3846 3847 if (jprev != ARRAY_SIZE(*n)) { 3848 mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); 3849 } 3850 } 3851 } 3852 3853 /* Require any discards to work. */ 3854 static unsigned int ram_block_discard_required_cnt; 3855 /* Require only coordinated discards to work. */ 3856 static unsigned int ram_block_coordinated_discard_required_cnt; 3857 /* Disable any discards. */ 3858 static unsigned int ram_block_discard_disabled_cnt; 3859 /* Disable only uncoordinated discards. */ 3860 static unsigned int ram_block_uncoordinated_discard_disabled_cnt; 3861 static QemuMutex ram_block_discard_disable_mutex; 3862 ram_block_discard_disable_mutex_lock(void)3863 static void ram_block_discard_disable_mutex_lock(void) 3864 { 3865 static gsize initialized; 3866 3867 if (g_once_init_enter(&initialized)) { 3868 qemu_mutex_init(&ram_block_discard_disable_mutex); 3869 g_once_init_leave(&initialized, 1); 3870 } 3871 qemu_mutex_lock(&ram_block_discard_disable_mutex); 3872 } 3873 ram_block_discard_disable_mutex_unlock(void)3874 static void ram_block_discard_disable_mutex_unlock(void) 3875 { 3876 qemu_mutex_unlock(&ram_block_discard_disable_mutex); 3877 } 3878 ram_block_discard_disable(bool state)3879 int ram_block_discard_disable(bool state) 3880 { 3881 int ret = 0; 3882 3883 ram_block_discard_disable_mutex_lock(); 3884 if (!state) { 3885 ram_block_discard_disabled_cnt--; 3886 } else if (ram_block_discard_required_cnt || 3887 ram_block_coordinated_discard_required_cnt) { 3888 ret = -EBUSY; 3889 } else { 3890 ram_block_discard_disabled_cnt++; 3891 } 3892 ram_block_discard_disable_mutex_unlock(); 3893 return ret; 3894 } 3895 ram_block_uncoordinated_discard_disable(bool state)3896 int ram_block_uncoordinated_discard_disable(bool state) 3897 { 3898 int ret = 0; 3899 3900 ram_block_discard_disable_mutex_lock(); 3901 if (!state) { 3902 ram_block_uncoordinated_discard_disabled_cnt--; 3903 } else if (ram_block_discard_required_cnt) { 3904 ret = -EBUSY; 3905 } else { 3906 ram_block_uncoordinated_discard_disabled_cnt++; 3907 } 3908 ram_block_discard_disable_mutex_unlock(); 3909 return ret; 3910 } 3911 ram_block_discard_require(bool state)3912 int ram_block_discard_require(bool state) 3913 { 3914 int ret = 0; 3915 3916 ram_block_discard_disable_mutex_lock(); 3917 if (!state) { 3918 ram_block_discard_required_cnt--; 3919 } else if (ram_block_discard_disabled_cnt || 3920 ram_block_uncoordinated_discard_disabled_cnt) { 3921 ret = -EBUSY; 3922 } else { 3923 ram_block_discard_required_cnt++; 3924 } 3925 ram_block_discard_disable_mutex_unlock(); 3926 return ret; 3927 } 3928 ram_block_coordinated_discard_require(bool state)3929 int ram_block_coordinated_discard_require(bool state) 3930 { 3931 int ret = 0; 3932 3933 ram_block_discard_disable_mutex_lock(); 3934 if (!state) { 3935 ram_block_coordinated_discard_required_cnt--; 3936 } else if (ram_block_discard_disabled_cnt) { 3937 ret = -EBUSY; 3938 } else { 3939 ram_block_coordinated_discard_required_cnt++; 3940 } 3941 ram_block_discard_disable_mutex_unlock(); 3942 return ret; 3943 } 3944 ram_block_discard_is_disabled(void)3945 bool ram_block_discard_is_disabled(void) 3946 { 3947 return qatomic_read(&ram_block_discard_disabled_cnt) || 3948 qatomic_read(&ram_block_uncoordinated_discard_disabled_cnt); 3949 } 3950 ram_block_discard_is_required(void)3951 bool ram_block_discard_is_required(void) 3952 { 3953 return qatomic_read(&ram_block_discard_required_cnt) || 3954 qatomic_read(&ram_block_coordinated_discard_required_cnt); 3955 } 3956