Lines Matching +full:cache +full:- +full:block

21 #include "exec/page-vary.h"
31 #include "accel/tcg/cpu-ops.h"
36 #include "exec/page-protection.h"
38 #include "exec/translation-block.h"
39 #include "hw/qdev-core.h"
40 #include "hw/qdev-properties.h"
47 #include "qemu/config-file.h"
48 #include "qemu/error-report.h"
49 #include "qemu/qemu-print.h"
58 #include "system/xen-mapcache.h"
66 #include "qemu/main-loop.h"
73 #include "qapi/qapi-types-migration.h"
81 #include "qemu/mmap-alloc.h"
90 #include "memory-internal.h"
124 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
141 /* This is a multi-level map on the physical address space.
185 if (map->nodes_nb + nodes > map->nodes_nb_alloc) { in phys_map_node_reserve()
186 map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes); in phys_map_node_reserve()
187 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); in phys_map_node_reserve()
188 alloc_hint = map->nodes_nb_alloc; in phys_map_node_reserve()
199 ret = map->nodes_nb++; in phys_map_node_alloc()
200 p = map->nodes[ret]; in phys_map_node_alloc()
202 assert(ret != map->nodes_nb_alloc); in phys_map_node_alloc()
219 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { in phys_page_set_level()
220 lp->ptr = phys_map_node_alloc(map, level == 0); in phys_page_set_level()
222 p = map->nodes[lp->ptr]; in phys_page_set_level()
223 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; in phys_page_set_level()
226 if ((*index & (step - 1)) == 0 && *nb >= step) { in phys_page_set_level()
227 lp->skip = 0; in phys_page_set_level()
228 lp->ptr = leaf; in phys_page_set_level()
230 *nb -= step; in phys_page_set_level()
232 phys_page_set_level(map, lp, index, nb, leaf, level - 1); in phys_page_set_level()
242 /* Wildly overreserve - it doesn't matter much. */ in phys_page_set()
243 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); in phys_page_set()
245 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); in phys_page_set()
258 if (lp->ptr == PHYS_MAP_NODE_NIL) { in phys_page_compact()
262 p = nodes[lp->ptr]; in phys_page_compact()
284 lp->skip + p[valid_ptr].skip >= (1 << 6)) { in phys_page_compact()
288 lp->ptr = p[valid_ptr].ptr; in phys_page_compact()
296 lp->skip = 0; in phys_page_compact()
298 lp->skip += p[valid_ptr].skip; in phys_page_compact()
304 if (d->phys_map.skip) { in address_space_dispatch_compact()
305 phys_page_compact(&d->phys_map, d->map.nodes); in address_space_dispatch_compact()
315 return int128_gethi(section->size) || in section_covers_addr()
316 range_covers_byte(section->offset_within_address_space, in section_covers_addr()
317 int128_getlo(section->size), addr); in section_covers_addr()
322 PhysPageEntry lp = d->phys_map, *p; in phys_page_find()
323 Node *nodes = d->map.nodes; in phys_page_find()
324 MemoryRegionSection *sections = d->map.sections; in phys_page_find()
328 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { in phys_page_find()
333 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; in phys_page_find()
348 MemoryRegionSection *section = qatomic_read(&d->mru_section); in address_space_lookup_region()
351 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || in address_space_lookup_region()
354 qatomic_set(&d->mru_section, section); in address_space_lookup_region()
356 if (resolve_subpage && section->mr->subpage) { in address_space_lookup_region()
357 subpage = container_of(section->mr, subpage_t, iomem); in address_space_lookup_region()
358 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; in address_space_lookup_region()
374 addr -= section->offset_within_address_space; in address_space_translate_internal()
377 *xlat = addr + section->offset_within_region; in address_space_translate_internal()
379 mr = section->mr; in address_space_translate_internal()
381 /* MMIO registers can be expected to perform full-width accesses based only in address_space_translate_internal()
393 diff = int128_sub(section->size, int128_make64(addr)); in address_space_translate_internal()
400 * address_space_translate_iommu - translate an address through an IOMMU
431 hwaddr page_mask = (hwaddr)-1; in address_space_translate_iommu()
439 if (imrc->attrs_to_index) { in address_space_translate_iommu()
440 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); in address_space_translate_iommu()
443 iotlb = imrc->translate(iommu_mr, addr, is_write ? in address_space_translate_iommu()
453 *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); in address_space_translate_iommu()
460 iommu_mr = memory_region_get_iommu(section->mr); in address_space_translate_iommu()
473 * flatview_do_translate - translate an address in FlatView
504 hwaddr plen = (hwaddr)(-1); in flatview_do_translate()
514 iommu_mr = memory_region_get_iommu(section->mr); in flatview_do_translate()
550 xlat += section.offset_within_address_space - in address_space_get_iotlb_entry()
581 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; in flatview_translate()
602 if (!notifier->active) { in tcg_iommu_unmap_notify()
605 tlb_flush(notifier->cpu); in tcg_iommu_unmap_notify()
606 notifier->active = false; in tcg_iommu_unmap_notify()
626 for (i = 0; i < cpu->iommu_notifiers->len; i++) { in tcg_register_iommu_notifier()
627 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); in tcg_register_iommu_notifier()
628 if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) { in tcg_register_iommu_notifier()
632 if (i == cpu->iommu_notifiers->len) { in tcg_register_iommu_notifier()
634 cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1); in tcg_register_iommu_notifier()
636 g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier; in tcg_register_iommu_notifier()
638 notifier->mr = mr; in tcg_register_iommu_notifier()
639 notifier->iommu_idx = iommu_idx; in tcg_register_iommu_notifier()
640 notifier->cpu = cpu; in tcg_register_iommu_notifier()
647 iommu_notifier_init(&notifier->n, in tcg_register_iommu_notifier()
653 memory_region_register_iommu_notifier(notifier->mr, &notifier->n, in tcg_register_iommu_notifier()
657 if (!notifier->active) { in tcg_register_iommu_notifier()
658 notifier->active = true; in tcg_register_iommu_notifier()
668 for (i = 0; i < cpu->iommu_notifiers->len; i++) { in tcg_iommu_free_notifier_list()
669 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); in tcg_iommu_free_notifier_list()
670 memory_region_unregister_iommu_notifier(notifier->mr, &notifier->n); in tcg_iommu_free_notifier_list()
673 g_array_free(cpu->iommu_notifiers, true); in tcg_iommu_free_notifier_list()
678 cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *)); in tcg_iommu_init_notifier_list()
693 AddressSpaceDispatch *d = address_space_to_dispatch(cpu->cpu_ases[asidx].as); in address_space_translate_for_iotlb()
698 iommu_mr = memory_region_get_iommu(section->mr); in address_space_translate_for_iotlb()
705 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); in address_space_translate_for_iotlb()
708 * doesn't short-cut its translation table walk. in address_space_translate_for_iotlb()
710 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); in address_space_translate_for_iotlb()
731 assert(!memory_region_is_iommu(section->mr)); in address_space_translate_for_iotlb()
737 * We should be given a page-aligned address -- certainly in address_space_translate_for_iotlb()
746 return &d->map.sections[PHYS_SECTION_UNASSIGNED]; in address_space_translate_for_iotlb()
753 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; in iotlb_to_section()
754 AddressSpaceDispatch *d = address_space_to_dispatch(cpuas->as); in iotlb_to_section()
758 assert(section_index < d->map.sections_nb); in iotlb_to_section()
759 ret = d->map.sections + section_index; in iotlb_to_section()
760 assert(ret->mr); in iotlb_to_section()
761 assert(ret->mr->ops); in iotlb_to_section()
770 AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); in memory_region_section_get_iotlb()
771 return section - d->map.sections; in memory_region_section_get_iotlb()
784 as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index); in cpu_address_space_init()
789 assert(asidx < cpu->num_ases); in cpu_address_space_init()
793 cpu->as = as; in cpu_address_space_init()
796 if (!cpu->cpu_ases) { in cpu_address_space_init()
797 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); in cpu_address_space_init()
800 newas = &cpu->cpu_ases[asidx]; in cpu_address_space_init()
801 newas->cpu = cpu; in cpu_address_space_init()
802 newas->as = as; in cpu_address_space_init()
804 newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; in cpu_address_space_init()
805 newas->tcg_as_listener.commit = tcg_commit; in cpu_address_space_init()
806 newas->tcg_as_listener.name = "tcg"; in cpu_address_space_init()
807 memory_listener_register(&newas->tcg_as_listener, as); in cpu_address_space_init()
816 assert(cpu->cpu_ases); in cpu_destroy_address_spaces()
819 cpu->as = NULL; in cpu_destroy_address_spaces()
821 for (asidx = 0; asidx < cpu->num_ases; asidx++) { in cpu_destroy_address_spaces()
822 cpuas = &cpu->cpu_ases[asidx]; in cpu_destroy_address_spaces()
823 if (!cpuas->as) { in cpu_destroy_address_spaces()
828 memory_listener_unregister(&cpuas->tcg_as_listener); in cpu_destroy_address_spaces()
830 g_clear_pointer(&cpuas->as, address_space_destroy_free); in cpu_destroy_address_spaces()
833 g_clear_pointer(&cpu->cpu_ases, g_free); in cpu_destroy_address_spaces()
839 return cpu->cpu_ases[asidx].as; in cpu_get_address_space()
845 RAMBlock *block; in qemu_get_ram_block() local
847 block = qatomic_rcu_read(&ram_list.mru_block); in qemu_get_ram_block()
848 if (block && addr - block->offset < block->max_length) { in qemu_get_ram_block()
849 return block; in qemu_get_ram_block()
851 RAMBLOCK_FOREACH(block) { in qemu_get_ram_block()
852 if (addr - block->offset < block->max_length) { in qemu_get_ram_block()
873 * qatomic_rcu_set is not needed here. The block was already published in qemu_get_ram_block()
877 ram_list.mru_block = block; in qemu_get_ram_block()
878 return block; in qemu_get_ram_block()
885 RAMBlock *block; in tlb_reset_dirty_range_all() local
893 block = qemu_get_ram_block(start); in tlb_reset_dirty_range_all()
894 assert(block == qemu_get_ram_block(end - 1)); in tlb_reset_dirty_range_all()
895 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); in tlb_reset_dirty_range_all()
901 /* Note: start and end must be within the same ram block. */
924 assert(start >= ramblock->offset && in cpu_physical_memory_test_and_clear_dirty()
925 start + length <= ramblock->offset + ramblock->used_length); in cpu_physical_memory_test_and_clear_dirty()
930 unsigned long num = MIN(end - page, in cpu_physical_memory_test_and_clear_dirty()
931 DIRTY_MEMORY_BLOCK_SIZE - offset); in cpu_physical_memory_test_and_clear_dirty()
933 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], in cpu_physical_memory_test_and_clear_dirty()
938 mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; in cpu_physical_memory_test_and_clear_dirty()
939 mr_size = (end - start_page) << TARGET_PAGE_BITS; in cpu_physical_memory_test_and_clear_dirty()
940 memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); in cpu_physical_memory_test_and_clear_dirty()
968 ((last - first) >> (TARGET_PAGE_BITS + 3))); in cpu_physical_memory_snapshot_and_clear_dirty()
969 snap->start = first; in cpu_physical_memory_snapshot_and_clear_dirty()
970 snap->end = last; in cpu_physical_memory_snapshot_and_clear_dirty()
982 unsigned long num = MIN(end - page, in cpu_physical_memory_snapshot_and_clear_dirty()
983 DIRTY_MEMORY_BLOCK_SIZE - ofs); in cpu_physical_memory_snapshot_and_clear_dirty()
989 bitmap_copy_and_clear_atomic(snap->dirty + dest, in cpu_physical_memory_snapshot_and_clear_dirty()
990 blocks->blocks[idx] + ofs, in cpu_physical_memory_snapshot_and_clear_dirty()
1010 assert(start >= snap->start); in cpu_physical_memory_snapshot_get_dirty()
1011 assert(start + length <= snap->end); in cpu_physical_memory_snapshot_get_dirty()
1013 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; in cpu_physical_memory_snapshot_get_dirty()
1014 page = (start - snap->start) >> TARGET_PAGE_BITS; in cpu_physical_memory_snapshot_get_dirty()
1017 if (test_bit(page, snap->dirty)) { in cpu_physical_memory_snapshot_get_dirty()
1032 /* The physical section number is ORed with a page-aligned in phys_section_add()
1034 * never overflow into the page-aligned value. in phys_section_add()
1036 assert(map->sections_nb < TARGET_PAGE_SIZE); in phys_section_add()
1038 if (map->sections_nb == map->sections_nb_alloc) { in phys_section_add()
1039 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); in phys_section_add()
1040 map->sections = g_renew(MemoryRegionSection, map->sections, in phys_section_add()
1041 map->sections_nb_alloc); in phys_section_add()
1043 map->sections[map->sections_nb] = *section; in phys_section_add()
1044 memory_region_ref(section->mr); in phys_section_add()
1045 return map->sections_nb++; in phys_section_add()
1050 bool have_sub_page = mr->subpage; in phys_section_destroy()
1056 object_unref(OBJECT(&subpage->iomem)); in phys_section_destroy()
1063 while (map->sections_nb > 0) { in phys_sections_free()
1064 MemoryRegionSection *section = &map->sections[--map->sections_nb]; in phys_sections_free()
1065 phys_section_destroy(section->mr); in phys_sections_free()
1067 g_free(map->sections); in phys_sections_free()
1068 g_free(map->nodes); in phys_sections_free()
1075 hwaddr base = section->offset_within_address_space in register_subpage()
1084 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); in register_subpage()
1086 if (!(existing->mr->subpage)) { in register_subpage()
1089 subsection.mr = &subpage->iomem; in register_subpage()
1091 phys_section_add(&d->map, &subsection)); in register_subpage()
1093 subpage = container_of(existing->mr, subpage_t, iomem); in register_subpage()
1095 start = section->offset_within_address_space & ~TARGET_PAGE_MASK; in register_subpage()
1096 end = start + int128_get64(section->size) - 1; in register_subpage()
1098 phys_section_add(&d->map, section)); in register_subpage()
1106 hwaddr start_addr = section->offset_within_address_space; in register_multipage()
1107 uint16_t section_index = phys_section_add(&d->map, section); in register_multipage()
1108 uint64_t num_pages = int128_get64(int128_rshift(section->size, in register_multipage()
1130 - remain.offset_within_address_space; in flatview_add_to_dispatch()
1178 RAMBlock *block; in ram_block_format() local
1184 "Block Name", "PSize", "Offset", "Used", "Total", in ram_block_format()
1187 RAMBLOCK_FOREACH(block) { in ram_block_format()
1188 psize = size_to_str(block->page_size); in ram_block_format()
1191 block->idstr, psize, in ram_block_format()
1192 (uint64_t)block->offset, in ram_block_format()
1193 (uint64_t)block->used_length, in ram_block_format()
1194 (uint64_t)block->max_length, in ram_block_format()
1195 (uint64_t)(uintptr_t)block->host, in ram_block_format()
1196 block->mr->readonly ? "ro" : "rw"); in ram_block_format()
1266 return -errno; in get_file_size()
1295 return -errno; in get_file_size()
1302 int64_t align = -1; in get_file_align()
1307 return -errno; in get_file_align()
1322 return -errno; in get_file_align()
1327 return -1; in get_file_align()
1351 int fd = -1; in file_ram_open()
1370 return -errno; in file_ram_open()
1373 return -EISDIR; in file_ram_open()
1382 return -ENOENT; in file_ram_open()
1413 return -errno; in file_ram_open()
1424 static void *file_ram_alloc(RAMBlock *block, in file_ram_alloc() argument
1434 block->page_size = qemu_fd_getpagesize(fd); in file_ram_alloc()
1435 if (block->mr->align % block->page_size) { in file_ram_alloc()
1438 block->mr->align, block->page_size); in file_ram_alloc()
1440 } else if (block->mr->align && !is_power_of_2(block->mr->align)) { in file_ram_alloc()
1442 " must be a power of two", block->mr->align); in file_ram_alloc()
1444 } else if (offset % block->page_size) { in file_ram_alloc()
1447 offset, block->page_size); in file_ram_alloc()
1450 block->mr->align = MAX(block->page_size, block->mr->align); in file_ram_alloc()
1453 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); in file_ram_alloc()
1457 if (memory < block->page_size) { in file_ram_alloc()
1460 memory, block->page_size); in file_ram_alloc()
1464 memory = ROUND_UP(memory, block->page_size); in file_ram_alloc()
1472 * Do not truncate the non-empty backend file to avoid corrupting in file_ram_alloc()
1477 * those labels. Therefore, extending the non-empty backend file in file_ram_alloc()
1484 qemu_map_flags = (block->flags & RAM_READONLY) ? QEMU_MAP_READONLY : 0; in file_ram_alloc()
1485 qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0; in file_ram_alloc()
1486 qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0; in file_ram_alloc()
1487 qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0; in file_ram_alloc()
1488 area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset); in file_ram_alloc()
1495 block->fd = fd; in file_ram_alloc()
1496 block->fd_offset = offset; in file_ram_alloc()
1507 RAMBlock *block, *next_block; in find_ram_offset() local
1516 RAMBLOCK_FOREACH(block) { in find_ram_offset()
1522 candidate = block->offset + block->max_length; in find_ram_offset()
1525 /* Search for the closest following block in find_ram_offset()
1529 if (next_block->offset >= candidate) { in find_ram_offset()
1530 next = MIN(next, next_block->offset); in find_ram_offset()
1538 if (next - candidate >= size && next - candidate < mingap) { in find_ram_offset()
1540 mingap = next - candidate; in find_ram_offset()
1567 "but dump-guest-core=off specified\n"); in qemu_ram_setup_dump()
1574 return rb->idstr; in qemu_ram_get_idstr()
1579 return rb->host; in qemu_ram_get_host_addr()
1584 return rb->offset; in qemu_ram_get_offset()
1589 return rb->fd_offset; in qemu_ram_get_fd_offset()
1594 return rb->used_length; in qemu_ram_get_used_length()
1599 return rb->max_length; in qemu_ram_get_max_length()
1604 return rb->flags & RAM_SHARED; in qemu_ram_is_shared()
1609 return rb->flags & RAM_NORESERVE; in qemu_ram_is_noreserve()
1615 return rb->flags & RAM_UF_ZEROPAGE; in qemu_ram_is_uf_zeroable()
1620 rb->flags |= RAM_UF_ZEROPAGE; in qemu_ram_set_uf_zeroable()
1625 return rb->flags & RAM_MIGRATABLE; in qemu_ram_is_migratable()
1630 rb->flags |= RAM_MIGRATABLE; in qemu_ram_set_migratable()
1635 rb->flags &= ~RAM_MIGRATABLE; in qemu_ram_unset_migratable()
1640 return rb->flags & RAM_NAMED_FILE; in qemu_ram_is_named_file()
1645 return rb->fd; in qemu_ram_get_fd()
1651 RAMBlock *block; in qemu_ram_set_idstr() local
1654 assert(!new_block->idstr[0]); in qemu_ram_set_idstr()
1659 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); in qemu_ram_set_idstr()
1663 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); in qemu_ram_set_idstr()
1666 RAMBLOCK_FOREACH(block) { in qemu_ram_set_idstr()
1667 if (block != new_block && in qemu_ram_set_idstr()
1668 !strcmp(block->idstr, new_block->idstr)) { in qemu_ram_set_idstr()
1670 new_block->idstr); in qemu_ram_set_idstr()
1677 void qemu_ram_unset_idstr(RAMBlock *block) in qemu_ram_unset_idstr() argument
1680 * migration. Ignore the problem since hot-unplug during migration in qemu_ram_unset_idstr()
1683 if (block) { in qemu_ram_unset_idstr()
1684 memset(block->idstr, 0, sizeof(block->idstr)); in qemu_ram_unset_idstr()
1691 g_autofree char *id = mr->dev ? qdev_get_dev_path(mr->dev) : NULL; in cpr_name()
1702 return rb->page_size; in qemu_ram_pagesize()
1708 RAMBlock *block; in qemu_ram_pagesize_largest() local
1711 RAMBLOCK_FOREACH(block) { in qemu_ram_pagesize_largest()
1712 largest = MAX(largest, qemu_ram_pagesize(block)); in qemu_ram_pagesize_largest()
1736 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) in qemu_ram_resize() argument
1738 const ram_addr_t oldsize = block->used_length; in qemu_ram_resize()
1741 assert(block); in qemu_ram_resize()
1746 if (block->used_length == newsize) { in qemu_ram_resize()
1748 * We don't have to resize the ram block (which only knows aligned in qemu_ram_resize()
1751 if (unaligned_size != memory_region_size(block->mr)) { in qemu_ram_resize()
1752 memory_region_set_size(block->mr, unaligned_size); in qemu_ram_resize()
1753 if (block->resized) { in qemu_ram_resize()
1754 block->resized(block->idstr, unaligned_size, block->host); in qemu_ram_resize()
1760 if (!(block->flags & RAM_RESIZEABLE)) { in qemu_ram_resize()
1763 " != 0x" RAM_ADDR_FMT, block->idstr, in qemu_ram_resize()
1764 newsize, block->used_length); in qemu_ram_resize()
1765 return -EINVAL; in qemu_ram_resize()
1768 if (block->max_length < newsize) { in qemu_ram_resize()
1771 " > 0x" RAM_ADDR_FMT, block->idstr, in qemu_ram_resize()
1772 newsize, block->max_length); in qemu_ram_resize()
1773 return -EINVAL; in qemu_ram_resize()
1776 /* Notify before modifying the ram block and touching the bitmaps. */ in qemu_ram_resize()
1777 if (block->host) { in qemu_ram_resize()
1778 ram_block_notify_resize(block->host, oldsize, newsize); in qemu_ram_resize()
1781 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); in qemu_ram_resize()
1782 block->used_length = newsize; in qemu_ram_resize()
1783 cpu_physical_memory_set_dirty_range(block->offset, block->used_length, in qemu_ram_resize()
1785 memory_region_set_size(block->mr, unaligned_size); in qemu_ram_resize()
1786 if (block->resized) { in qemu_ram_resize()
1787 block->resized(block->idstr, unaligned_size, block->host); in qemu_ram_resize()
1793 * Trigger sync on the given ram block for range [start, start + length]
1795 * Otherwise no-op.
1798 void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) in qemu_ram_msync() argument
1800 /* The requested range should fit in within the block range */ in qemu_ram_msync()
1801 g_assert((start + length) <= block->used_length); in qemu_ram_msync()
1804 /* The lack of support for pmem should not block the sync */ in qemu_ram_msync()
1805 if (ramblock_is_pmem(block)) { in qemu_ram_msync()
1806 void *addr = ramblock_ptr(block, start); in qemu_ram_msync()
1811 if (block->fd >= 0) { in qemu_ram_msync()
1814 * specified as persistent (or is not one) - use the msync. in qemu_ram_msync()
1817 void *addr = ramblock_ptr(block, start); in qemu_ram_msync()
1818 if (qemu_msync(addr, length, block->fd)) { in qemu_ram_msync()
1834 /* Only need to extend if block count increased */ in dirty_memory_extend()
1846 sizeof(new_blocks->blocks[0]) * new_num_blocks); in dirty_memory_extend()
1849 memcpy(new_blocks->blocks, old_blocks->blocks, in dirty_memory_extend()
1850 old_num_blocks * sizeof(old_blocks->blocks[0])); in dirty_memory_extend()
1854 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); in dirty_memory_extend()
1871 RAMBlock *block; in ram_block_add() local
1878 new_block->offset = find_ram_offset(new_block->max_length); in ram_block_add()
1880 if (!new_block->host) { in ram_block_add()
1882 xen_ram_alloc(new_block->offset, new_block->max_length, in ram_block_add()
1883 new_block->mr, &err); in ram_block_add()
1890 new_block->host = qemu_anon_ram_alloc(new_block->max_length, in ram_block_add()
1891 &new_block->mr->align, in ram_block_add()
1893 if (!new_block->host) { in ram_block_add()
1896 memory_region_name(new_block->mr)); in ram_block_add()
1900 memory_try_enable_merging(new_block->host, new_block->max_length); in ram_block_add()
1905 if (new_block->flags & RAM_GUEST_MEMFD) { in ram_block_add()
1910 object_get_typename(OBJECT(current_machine->cgs))); in ram_block_add()
1913 assert(new_block->guest_memfd < 0); in ram_block_add()
1917 error_setg_errno(errp, -ret, in ram_block_add()
1923 new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length, in ram_block_add()
1925 if (new_block->guest_memfd < 0) { in ram_block_add()
1939 new_block->attributes = ram_block_attributes_create(new_block); in ram_block_add()
1940 if (!new_block->attributes) { in ram_block_add()
1941 error_setg(errp, "Failed to create ram block attribute"); in ram_block_add()
1942 close(new_block->guest_memfd); in ram_block_add()
1953 error_setg(&new_block->cpr_blocker, in ram_block_add()
1956 memory_region_name(new_block->mr)); in ram_block_add()
1957 migrate_add_blocker_modes(&new_block->cpr_blocker, errp, in ram_block_add()
1958 MIG_MODE_CPR_TRANSFER, -1); in ram_block_add()
1962 ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS; in ram_block_add()
1964 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, in ram_block_add()
1965 * QLIST (which has an RCU-friendly variant) does not have insertion at in ram_block_add()
1968 RAMBLOCK_FOREACH(block) { in ram_block_add()
1969 last_block = block; in ram_block_add()
1970 if (block->max_length < new_block->max_length) { in ram_block_add()
1974 if (block) { in ram_block_add()
1975 QLIST_INSERT_BEFORE_RCU(block, new_block, next); in ram_block_add()
1988 cpu_physical_memory_set_dirty_range(new_block->offset, in ram_block_add()
1989 new_block->used_length, in ram_block_add()
1992 if (new_block->host) { in ram_block_add()
1993 qemu_ram_setup_dump(new_block->host, new_block->max_length); in ram_block_add()
1994 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); in ram_block_add()
2001 qemu_madvise(new_block->host, new_block->max_length, in ram_block_add()
2004 ram_block_notify_add(new_block->host, new_block->used_length, in ram_block_add()
2005 new_block->max_length); in ram_block_add()
2011 qemu_anon_ram_free(new_block->host, new_block->max_length); in ram_block_add()
2012 new_block->host = NULL; in ram_block_add()
2040 error_setg(errp, "-mem-path not supported with Xen"); in qemu_ram_alloc_from_fd()
2046 "host lacks kvm mmu notifiers, -mem-path unsupported"); in qemu_ram_alloc_from_fd()
2066 if (file_align > 0 && file_align > mr->align) { in qemu_ram_alloc_from_fd()
2069 file_align, mr->align); in qemu_ram_alloc_from_fd()
2074 new_block->mr = mr; in qemu_ram_alloc_from_fd()
2075 new_block->used_length = size; in qemu_ram_alloc_from_fd()
2076 new_block->max_length = max_size; in qemu_ram_alloc_from_fd()
2077 new_block->resized = resized; in qemu_ram_alloc_from_fd()
2078 new_block->flags = ram_flags; in qemu_ram_alloc_from_fd()
2079 new_block->guest_memfd = -1; in qemu_ram_alloc_from_fd()
2080 new_block->host = file_ram_alloc(new_block, max_size, fd, in qemu_ram_alloc_from_fd()
2083 if (!new_block->host) { in qemu_ram_alloc_from_fd()
2105 RAMBlock *block; in qemu_ram_alloc_from_file() local
2110 error_setg_errno(errp, -fd, "can't open backing store %s for guest RAM", in qemu_ram_alloc_from_file()
2113 fd == -EACCES) { in qemu_ram_alloc_from_file()
2127 " read-only but still creating writable RAM using" in qemu_ram_alloc_from_file()
2128 " '-object memory-backend-file,readonly=on,rom=off...'" in qemu_ram_alloc_from_file()
2134 block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd, offset, in qemu_ram_alloc_from_file()
2136 if (!block) { in qemu_ram_alloc_from_file()
2144 return block; in qemu_ram_alloc_from_file()
2199 if (!share_flags && current_machine->aux_ram_share) { in qemu_ram_alloc_internal()
2212 mr->align = QEMU_VMALLOC_ALIGN; in qemu_ram_alloc_internal()
2220 * After cpr-transfer, new QEMU could create a memory region in qemu_ram_alloc_internal()
2228 trace_qemu_ram_alloc_shared(name, new_block->used_length, in qemu_ram_alloc_internal()
2229 new_block->max_length, fd, in qemu_ram_alloc_internal()
2230 new_block->host); in qemu_ram_alloc_internal()
2247 new_block->mr = mr; in qemu_ram_alloc_internal()
2248 new_block->resized = resized; in qemu_ram_alloc_internal()
2249 new_block->used_length = size; in qemu_ram_alloc_internal()
2250 new_block->max_length = max_size; in qemu_ram_alloc_internal()
2251 new_block->fd = -1; in qemu_ram_alloc_internal()
2252 new_block->guest_memfd = -1; in qemu_ram_alloc_internal()
2253 new_block->page_size = qemu_real_host_page_size(); in qemu_ram_alloc_internal()
2254 new_block->host = host; in qemu_ram_alloc_internal()
2255 new_block->flags = ram_flags; in qemu_ram_alloc_internal()
2288 static void reclaim_ramblock(RAMBlock *block) in reclaim_ramblock() argument
2290 if (block->flags & RAM_PREALLOC) { in reclaim_ramblock()
2293 xen_invalidate_map_cache_entry(block->host); in reclaim_ramblock()
2295 } else if (block->fd >= 0) { in reclaim_ramblock()
2296 qemu_ram_munmap(block->fd, block->host, block->max_length); in reclaim_ramblock()
2297 close(block->fd); in reclaim_ramblock()
2300 qemu_anon_ram_free(block->host, block->max_length); in reclaim_ramblock()
2303 if (block->guest_memfd >= 0) { in reclaim_ramblock()
2304 ram_block_attributes_destroy(block->attributes); in reclaim_ramblock()
2305 close(block->guest_memfd); in reclaim_ramblock()
2309 g_free(block); in reclaim_ramblock()
2312 void qemu_ram_free(RAMBlock *block) in qemu_ram_free() argument
2316 if (!block) { in qemu_ram_free()
2320 if (block->host) { in qemu_ram_free()
2321 ram_block_notify_remove(block->host, block->used_length, in qemu_ram_free()
2322 block->max_length); in qemu_ram_free()
2326 name = cpr_name(block->mr); in qemu_ram_free()
2328 QLIST_REMOVE_RCU(block, next); in qemu_ram_free()
2333 call_rcu(block, reclaim_ramblock, rcu); in qemu_ram_free()
2339 static int qemu_ram_remap_mmap(RAMBlock *block, uint64_t start, size_t length) in qemu_ram_remap_mmap() argument
2343 void *host_startaddr = block->host + start; in qemu_ram_remap_mmap()
2345 assert(block->fd < 0); in qemu_ram_remap_mmap()
2347 flags |= block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE; in qemu_ram_remap_mmap()
2348 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; in qemu_ram_remap_mmap()
2350 prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; in qemu_ram_remap_mmap()
2351 area = mmap(host_startaddr, length, prot, flags, -1, 0); in qemu_ram_remap_mmap()
2352 return area != host_startaddr ? -errno : 0; in qemu_ram_remap_mmap()
2356 * qemu_ram_remap - remap a single RAM page
2370 RAMBlock *block; in qemu_ram_remap() local
2375 RAMBLOCK_FOREACH(block) { in qemu_ram_remap()
2376 offset = addr - block->offset; in qemu_ram_remap()
2377 if (offset < block->max_length) { in qemu_ram_remap()
2379 page_size = qemu_ram_pagesize(block); in qemu_ram_remap()
2382 vaddr = ramblock_ptr(block, offset); in qemu_ram_remap()
2383 if (block->flags & RAM_PREALLOC) { in qemu_ram_remap()
2388 if (ram_block_discard_range(block, offset, page_size) != 0) { in qemu_ram_remap()
2395 if (block->fd >= 0) { in qemu_ram_remap()
2397 PRIx64 " +%zx", block->idstr, offset, in qemu_ram_remap()
2398 block->fd_offset, page_size); in qemu_ram_remap()
2401 if (qemu_ram_remap_mmap(block, offset, page_size) != 0) { in qemu_ram_remap()
2403 block->idstr, offset, page_size); in qemu_ram_remap()
2421 * @block: block for the RAM to lookup (optional and may be NULL).
2426 * @lock: wether to lock the mapping in xen-mapcache until invalidated.
2427 * @is_write: hint wether to map RW or RO in the xen-mapcache.
2432 static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr, in qemu_ram_ptr_length() argument
2442 if (block == NULL) { in qemu_ram_ptr_length()
2443 block = qemu_get_ram_block(addr); in qemu_ram_ptr_length()
2444 addr -= block->offset; in qemu_ram_ptr_length()
2447 *size = MIN(*size, block->max_length - addr); in qemu_ram_ptr_length()
2451 if (xen_enabled() && block->host == NULL) { in qemu_ram_ptr_length()
2456 if (xen_mr_is_memory(block->mr)) { in qemu_ram_ptr_length()
2457 return xen_map_cache(block->mr, block->offset + addr, in qemu_ram_ptr_length()
2458 len, block->offset, in qemu_ram_ptr_length()
2462 block->host = xen_map_cache(block->mr, block->offset, in qemu_ram_ptr_length()
2463 block->max_length, in qemu_ram_ptr_length()
2464 block->offset, in qemu_ram_ptr_length()
2468 return ramblock_ptr(block, addr); in qemu_ram_ptr_length()
2487 ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; in qemu_ram_block_host_offset()
2488 assert((uintptr_t)host >= (uintptr_t)rb->host); in qemu_ram_block_host_offset()
2489 assert(res < rb->max_length); in qemu_ram_block_host_offset()
2497 RAMBlock *block; in qemu_ram_block_from_host() local
2508 block = qemu_get_ram_block(ram_addr); in qemu_ram_block_from_host()
2509 if (block) { in qemu_ram_block_from_host()
2510 *offset = ram_addr - block->offset; in qemu_ram_block_from_host()
2512 return block; in qemu_ram_block_from_host()
2516 block = qatomic_rcu_read(&ram_list.mru_block); in qemu_ram_block_from_host()
2517 if (block && block->host && host - block->host < block->max_length) { in qemu_ram_block_from_host()
2521 RAMBLOCK_FOREACH(block) { in qemu_ram_block_from_host()
2522 /* This case append when the block is not mapped. */ in qemu_ram_block_from_host()
2523 if (block->host == NULL) { in qemu_ram_block_from_host()
2526 if (host - block->host < block->max_length) { in qemu_ram_block_from_host()
2534 *offset = (host - block->host); in qemu_ram_block_from_host()
2538 return block; in qemu_ram_block_from_host()
2550 RAMBlock *block; in qemu_ram_block_by_name() local
2552 RAMBLOCK_FOREACH(block) { in qemu_ram_block_by_name()
2553 if (!strcmp(name, block->idstr)) { in qemu_ram_block_by_name()
2554 return block; in qemu_ram_block_by_name()
2567 RAMBlock *block; in qemu_ram_addr_from_host() local
2570 block = qemu_ram_block_from_host(ptr, false, &offset); in qemu_ram_addr_from_host()
2571 if (!block) { in qemu_ram_addr_from_host()
2575 return block->offset + offset; in qemu_ram_addr_from_host()
2608 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len); in subpage_read()
2628 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len); in subpage_write()
2641 return flatview_access_valid(subpage->fv, addr + subpage->base, in subpage_accepts()
2662 return -1; in subpage_register()
2670 mmio->sub_section[idx] = section; in subpage_register()
2680 /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ in subpage_init()
2682 mmio->fv = fv; in subpage_init()
2683 mmio->base = base; in subpage_init()
2684 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, in subpage_init()
2686 mmio->iomem.subpage = true; in subpage_init()
2720 n = dummy_section(&d->map, fv, &io_mem_unassigned); in address_space_dispatch_new()
2723 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; in address_space_dispatch_new()
2730 phys_sections_free(&d->map); in address_space_dispatch_free()
2746 * ---------------------- ------------------------- in tcg_log_global_after_sync()
2747 * TLB check -> slow path in tcg_log_global_after_sync()
2752 * TLB check -> fast path in tcg_log_global_after_sync()
2768 run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); in tcg_log_global_after_sync()
2783 /* since each CPU stores ram addresses in its TLB cache, we must in tcg_commit()
2786 cpu = cpuas->cpu; in tcg_commit()
2794 * all of the tcg machinery for run-on is initialized: thus halt_cond. in tcg_commit()
2796 if (cpu->halt_cond) { in tcg_commit()
2846 tb_invalidate_phys_range(NULL, addr, addr + length - 1); in invalidate_and_set_dirty()
2867 unsigned access_size_max = mr->ops->valid.max_access_size; in memory_access_size()
2869 /* Regions are assumed to support 1-4 byte accesses unless in memory_access_size()
2876 if (!mr->ops->impl.unaligned) { in memory_access_size()
2877 unsigned align_size_max = addr & -addr; in memory_access_size()
2900 if (mr->flush_coalesced_mmio) { in prepare_mmio_access()
2928 "Invalid access to non-RAM device at " in flatview_access_allowed()
2974 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, in flatview_write_continue_step()
2998 len -= l; in flatview_write_continue()
3067 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, in flatview_read_continue_step()
3089 len -= l; in flatview_read_continue()
3174 len -= l; in address_space_set()
3215 ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); in address_space_write_rom_internal()
3226 len -= l; in address_space_write_rom_internal()
3246 * triggered from within the guest. For TCG we are always cache coherent, in cpu_flush_icache_range()
3248 * the host's instruction cache at least. in cpu_flush_icache_range()
3284 while (!QLIST_EMPTY(&as->map_client_list)) { in address_space_notify_map_clients_locked()
3285 client = QLIST_FIRST(&as->map_client_list); in address_space_notify_map_clients_locked()
3286 qemu_bh_schedule(client->bh); in address_space_notify_map_clients_locked()
3295 QEMU_LOCK_GUARD(&as->map_client_list_lock); in address_space_register_map_client()
3296 client->bh = bh; in address_space_register_map_client()
3297 QLIST_INSERT_HEAD(&as->map_client_list, client, link); in address_space_register_map_client()
3300 if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) { in address_space_register_map_client()
3324 QEMU_LOCK_GUARD(&as->map_client_list_lock); in address_space_unregister_map_client()
3325 QLIST_FOREACH(client, &as->map_client_list, link) { in address_space_unregister_map_client()
3326 if (client->bh == bh) { in address_space_unregister_map_client()
3335 QEMU_LOCK_GUARD(&as->map_client_list_lock); in address_space_notify_map_clients()
3355 len -= l; in flatview_access_valid()
3383 target_len -= len; in flatview_extend_translation()
3402 * Use only for reads OR writes - not for read-modify-write operations.
3429 size_t used = qatomic_read(&as->bounce_buffer_size); in address_space_map()
3431 hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l); in address_space_map()
3434 qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size); in address_space_map()
3448 bounce->magic = BOUNCE_BUFFER_MAGIC; in address_space_map()
3450 bounce->mr = mr; in address_space_map()
3451 bounce->addr = addr; in address_space_map()
3452 bounce->len = l; in address_space_map()
3456 bounce->buffer, l); in address_space_map()
3460 return bounce->buffer; in address_space_map()
3467 return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true, is_write); in address_space_map()
3494 assert(bounce->magic == BOUNCE_BUFFER_MAGIC); in address_space_unmap()
3497 address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED, in address_space_unmap()
3498 bounce->buffer, access_len); in address_space_unmap()
3501 qatomic_sub(&as->bounce_buffer_size, bounce->len); in address_space_unmap()
3502 bounce->magic = ~BOUNCE_BUFFER_MAGIC; in address_space_unmap()
3503 memory_region_unref(bounce->mr); in address_space_unmap()
3532 int64_t address_space_cache_init(MemoryRegionCache *cache, in address_space_cache_init() argument
3546 cache->fv = address_space_get_flatview(as); in address_space_cache_init()
3547 d = flatview_to_dispatch(cache->fv); in address_space_cache_init()
3548 cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); in address_space_cache_init()
3551 * cache->xlat is now relative to cache->mrs.mr, not to the section itself. in address_space_cache_init()
3553 * cache->xlat and the end of the section. in address_space_cache_init()
3555 diff = int128_sub(cache->mrs.size, in address_space_cache_init()
3556 int128_make64(cache->xlat - cache->mrs.offset_within_region)); in address_space_cache_init()
3559 mr = cache->mrs.mr; in address_space_cache_init()
3566 l = flatview_extend_translation(cache->fv, addr, len, mr, in address_space_cache_init()
3567 cache->xlat, l, is_write, in address_space_cache_init()
3569 cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true, in address_space_cache_init()
3572 cache->ptr = NULL; in address_space_cache_init()
3575 cache->len = l; in address_space_cache_init()
3576 cache->is_write = is_write; in address_space_cache_init()
3580 void address_space_cache_invalidate(MemoryRegionCache *cache, in address_space_cache_invalidate() argument
3584 assert(cache->is_write); in address_space_cache_invalidate()
3585 if (likely(cache->ptr)) { in address_space_cache_invalidate()
3586 invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len); in address_space_cache_invalidate()
3590 void address_space_cache_destroy(MemoryRegionCache *cache) in address_space_cache_destroy() argument
3592 if (!cache->mrs.mr) { in address_space_cache_destroy()
3597 xen_invalidate_map_cache_entry(cache->ptr); in address_space_cache_destroy()
3599 memory_region_unref(cache->mrs.mr); in address_space_cache_destroy()
3600 flatview_unref(cache->fv); in address_space_cache_destroy()
3601 cache->mrs.mr = NULL; in address_space_cache_destroy()
3602 cache->fv = NULL; in address_space_cache_destroy()
3611 MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, in address_space_translate_cached() argument
3619 assert(!cache->ptr); in address_space_translate_cached()
3620 *xlat = addr + cache->xlat; in address_space_translate_cached()
3622 mr = cache->mrs.mr; in address_space_translate_cached()
3650 len -= l; in address_space_write_continue_cached()
3675 len -= l; in address_space_read_continue_cached()
3692 address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, in address_space_read_cached_slow() argument
3699 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false, in address_space_read_cached_slow()
3709 address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, in address_space_write_cached_slow() argument
3716 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true, in address_space_write_cached_slow()
3722 #define ARG1_DECL MemoryRegionCache *cache
3723 #define ARG1 cache
3725 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
3748 if (phys_addr == -1) in cpu_memory_rw_debug()
3749 return -1; in cpu_memory_rw_debug()
3750 l = (page + TARGET_PAGE_SIZE) - addr; in cpu_memory_rw_debug()
3754 res = address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf, in cpu_memory_rw_debug()
3757 return -1; in cpu_memory_rw_debug()
3759 len -= l; in cpu_memory_rw_debug()
3781 RAMBlock *block; in qemu_ram_foreach_block() local
3785 RAMBLOCK_FOREACH(block) { in qemu_ram_foreach_block()
3786 ret = func(block, opaque); in qemu_ram_foreach_block()
3799 * Returns: 0 on success, none-0 on failure
3804 int ret = -1; in ram_block_discard_range()
3806 uint8_t *host_startaddr = rb->host + start; in ram_block_discard_range()
3808 if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { in ram_block_discard_range()
3814 if ((start + length) <= rb->max_length) { in ram_block_discard_range()
3816 if (!QEMU_IS_ALIGNED(length, rb->page_size)) { in ram_block_discard_range()
3828 need_madvise = (rb->page_size == qemu_real_host_page_size()); in ram_block_discard_range()
3829 need_fallocate = rb->fd != -1; in ram_block_discard_range()
3840 if (rb->flags & RAM_READONLY_FD) { in ram_block_discard_range()
3864 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, in ram_block_discard_range()
3865 start + rb->fd_offset, length); in ram_block_discard_range()
3867 ret = -errno; in ram_block_discard_range()
3869 " +%zx (%d)", __func__, rb->idstr, start, in ram_block_discard_range()
3870 rb->fd_offset, length, ret); in ram_block_discard_range()
3874 ret = -ENOSYS; in ram_block_discard_range()
3877 rb->idstr, start, rb->fd_offset, length, ret); in ram_block_discard_range()
3888 if (qemu_ram_is_shared(rb) && rb->fd < 0) { in ram_block_discard_range()
3894 ret = -errno; in ram_block_discard_range()
3897 __func__, rb->idstr, start, length, ret); in ram_block_discard_range()
3901 ret = -ENOSYS; in ram_block_discard_range()
3903 __func__, rb->idstr, start, length, ret); in ram_block_discard_range()
3907 trace_ram_block_discard_range(rb->idstr, host_startaddr, length, in ram_block_discard_range()
3910 error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", in ram_block_discard_range()
3911 __func__, rb->idstr, start, length, rb->max_length); in ram_block_discard_range()
3921 int ret = -1; in ram_block_discard_guest_memfd_range()
3925 ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, in ram_block_discard_guest_memfd_range()
3929 ret = -errno; in ram_block_discard_guest_memfd_range()
3931 __func__, rb->idstr, start, length, ret); in ram_block_discard_guest_memfd_range()
3934 ret = -ENOSYS; in ram_block_discard_guest_memfd_range()
3936 __func__, rb->idstr, start, length, ret); in ram_block_discard_guest_memfd_range()
3944 return rb->flags & RAM_PMEM; in ramblock_is_pmem()
3949 if (start == end - 1) { in mtree_print_phys_entries()
3952 qemu_printf("\t%3d..%-3d ", start, end - 1); in mtree_print_phys_entries()
3975 for (i = 0; i < d->map.sections_nb; ++i) { in mtree_print_dispatch()
3976 MemoryRegionSection *s = d->map.sections + i; in mtree_print_dispatch()
3983 s->offset_within_address_space, in mtree_print_dispatch()
3984 s->offset_within_address_space + MR_SIZE(s->size), in mtree_print_dispatch()
3985 s->mr->name ? s->mr->name : "(noname)", in mtree_print_dispatch()
3987 s->mr == root ? " [ROOT]" : "", in mtree_print_dispatch()
3988 s == d->mru_section ? " [MRU]" : "", in mtree_print_dispatch()
3989 s->mr->is_iommu ? " [iommu]" : ""); in mtree_print_dispatch()
3991 if (s->mr->alias) { in mtree_print_dispatch()
3992 qemu_printf(" alias=%s", s->mr->alias->name ? in mtree_print_dispatch()
3993 s->mr->alias->name : "noname"); in mtree_print_dispatch()
3999 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); in mtree_print_dispatch()
4000 for (i = 0; i < d->map.nodes_nb; ++i) { in mtree_print_dispatch()
4003 Node *n = d->map.nodes + i; in mtree_print_dispatch()
4010 if (pe->ptr == prev.ptr && pe->skip == prev.skip) { in mtree_print_dispatch()
4058 ram_block_discard_disabled_cnt--; in ram_block_discard_disable()
4061 ret = -EBUSY; in ram_block_discard_disable()
4075 ram_block_uncoordinated_discard_disabled_cnt--; in ram_block_uncoordinated_discard_disable()
4077 ret = -EBUSY; in ram_block_uncoordinated_discard_disable()
4091 ram_block_discard_required_cnt--; in ram_block_discard_require()
4094 ret = -EBUSY; in ram_block_discard_require()
4108 ram_block_coordinated_discard_required_cnt--; in ram_block_coordinated_discard_require()
4110 ret = -EBUSY; in ram_block_coordinated_discard_require()
4136 MemoryRegion *mr = rb->mr; in ram_is_cpr_compatible()
4151 if (rb->fd >= 0 && qemu_ram_is_shared(rb)) { in ram_is_cpr_compatible()
4159 * Add a blocker for each volatile ram block. This function should only be
4160 * called after we know that the block is migratable. Non-migratable blocks
4161 * are either re-created in new QEMU, or are handled specially, or are covered
4162 * by a device-level CPR blocker.
4172 error_setg(&rb->cpr_blocker, in ram_block_add_cpr_blocker()
4174 "required for memory-backend objects, and aux-ram-share=on is " in ram_block_add_cpr_blocker()
4175 "required.", memory_region_name(rb->mr)); in ram_block_add_cpr_blocker()
4176 migrate_add_blocker_modes(&rb->cpr_blocker, errp, MIG_MODE_CPR_TRANSFER, in ram_block_add_cpr_blocker()
4177 -1); in ram_block_add_cpr_blocker()
4182 migrate_del_blocker(&rb->cpr_blocker); in ram_block_del_cpr_blocker()