Lines Matching +full:cache +full:- +full:block +full:- +full:size

21 #include "exec/page-vary.h"
31 #include "accel/tcg/cpu-ops.h"
34 #include "exec/exec-all.h"
36 #include "exec/page-protection.h"
38 #include "exec/translation-block.h"
39 #include "hw/qdev-core.h"
40 #include "hw/qdev-properties.h"
47 #include "qemu/config-file.h"
48 #include "qemu/error-report.h"
49 #include "qemu/qemu-print.h"
58 #include "system/xen-mapcache.h"
66 #include "qemu/main-loop.h"
69 #include "exec/memory-internal.h"
74 #include "qapi/qapi-types-migration.h"
82 #include "qemu/mmap-alloc.h"
117 /* Size of the L2 (and L3, etc) page tables. */
123 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
140 /* This is a multi-level map on the physical address space.
186 if (map->nodes_nb + nodes > map->nodes_nb_alloc) { in phys_map_node_reserve()
187 map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes); in phys_map_node_reserve()
188 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); in phys_map_node_reserve()
189 alloc_hint = map->nodes_nb_alloc; in phys_map_node_reserve()
200 ret = map->nodes_nb++; in phys_map_node_alloc()
201 p = map->nodes[ret]; in phys_map_node_alloc()
203 assert(ret != map->nodes_nb_alloc); in phys_map_node_alloc()
220 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { in phys_page_set_level()
221 lp->ptr = phys_map_node_alloc(map, level == 0); in phys_page_set_level()
223 p = map->nodes[lp->ptr]; in phys_page_set_level()
224 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; in phys_page_set_level()
227 if ((*index & (step - 1)) == 0 && *nb >= step) { in phys_page_set_level()
228 lp->skip = 0; in phys_page_set_level()
229 lp->ptr = leaf; in phys_page_set_level()
231 *nb -= step; in phys_page_set_level()
233 phys_page_set_level(map, lp, index, nb, leaf, level - 1); in phys_page_set_level()
243 /* Wildly overreserve - it doesn't matter much. */ in phys_page_set()
244 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); in phys_page_set()
246 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); in phys_page_set()
259 if (lp->ptr == PHYS_MAP_NODE_NIL) { in phys_page_compact()
263 p = nodes[lp->ptr]; in phys_page_compact()
285 lp->skip + p[valid_ptr].skip >= (1 << 6)) { in phys_page_compact()
289 lp->ptr = p[valid_ptr].ptr; in phys_page_compact()
297 lp->skip = 0; in phys_page_compact()
299 lp->skip += p[valid_ptr].skip; in phys_page_compact()
305 if (d->phys_map.skip) { in address_space_dispatch_compact()
306 phys_page_compact(&d->phys_map, d->map.nodes); in address_space_dispatch_compact()
313 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means in section_covers_addr()
316 return int128_gethi(section->size) || in section_covers_addr()
317 range_covers_byte(section->offset_within_address_space, in section_covers_addr()
318 int128_getlo(section->size), addr); in section_covers_addr()
323 PhysPageEntry lp = d->phys_map, *p; in phys_page_find()
324 Node *nodes = d->map.nodes; in phys_page_find()
325 MemoryRegionSection *sections = d->map.sections; in phys_page_find()
329 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { in phys_page_find()
334 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; in phys_page_find()
349 MemoryRegionSection *section = qatomic_read(&d->mru_section); in address_space_lookup_region()
352 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || in address_space_lookup_region()
355 qatomic_set(&d->mru_section, section); in address_space_lookup_region()
357 if (resolve_subpage && section->mr->subpage) { in address_space_lookup_region()
358 subpage = container_of(section->mr, subpage_t, iomem); in address_space_lookup_region()
359 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; in address_space_lookup_region()
375 addr -= section->offset_within_address_space; in address_space_translate_internal()
378 *xlat = addr + section->offset_within_region; in address_space_translate_internal()
380 mr = section->mr; in address_space_translate_internal()
382 /* MMIO registers can be expected to perform full-width accesses based only in address_space_translate_internal()
394 diff = int128_sub(section->size, int128_make64(addr)); in address_space_translate_internal()
401 * address_space_translate_iommu - translate an address through an IOMMU
432 hwaddr page_mask = (hwaddr)-1; in address_space_translate_iommu()
440 if (imrc->attrs_to_index) { in address_space_translate_iommu()
441 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); in address_space_translate_iommu()
444 iotlb = imrc->translate(iommu_mr, addr, is_write ? in address_space_translate_iommu()
454 *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); in address_space_translate_iommu()
461 iommu_mr = memory_region_get_iommu(section->mr); in address_space_translate_iommu()
474 * flatview_do_translate - translate an address in FlatView
505 hwaddr plen = (hwaddr)(-1); in flatview_do_translate()
515 iommu_mr = memory_region_get_iommu(section->mr); in flatview_do_translate()
523 /* Not behind an IOMMU, use default page size. */ in flatview_do_translate()
551 xlat += section.offset_within_address_space - in address_space_get_iotlb_entry()
582 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; in flatview_translate()
601 if (!notifier->active) { in tcg_iommu_unmap_notify()
604 tlb_flush(notifier->cpu); in tcg_iommu_unmap_notify()
605 notifier->active = false; in tcg_iommu_unmap_notify()
625 for (i = 0; i < cpu->iommu_notifiers->len; i++) { in tcg_register_iommu_notifier()
626 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); in tcg_register_iommu_notifier()
627 if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) { in tcg_register_iommu_notifier()
631 if (i == cpu->iommu_notifiers->len) { in tcg_register_iommu_notifier()
633 cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1); in tcg_register_iommu_notifier()
635 g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier; in tcg_register_iommu_notifier()
637 notifier->mr = mr; in tcg_register_iommu_notifier()
638 notifier->iommu_idx = iommu_idx; in tcg_register_iommu_notifier()
639 notifier->cpu = cpu; in tcg_register_iommu_notifier()
646 iommu_notifier_init(&notifier->n, in tcg_register_iommu_notifier()
652 memory_region_register_iommu_notifier(notifier->mr, &notifier->n, in tcg_register_iommu_notifier()
656 if (!notifier->active) { in tcg_register_iommu_notifier()
657 notifier->active = true; in tcg_register_iommu_notifier()
667 for (i = 0; i < cpu->iommu_notifiers->len; i++) { in tcg_iommu_free_notifier_list()
668 notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); in tcg_iommu_free_notifier_list()
669 memory_region_unregister_iommu_notifier(notifier->mr, &notifier->n); in tcg_iommu_free_notifier_list()
672 g_array_free(cpu->iommu_notifiers, true); in tcg_iommu_free_notifier_list()
677 cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *)); in tcg_iommu_init_notifier_list()
692 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; in address_space_translate_for_iotlb()
697 iommu_mr = memory_region_get_iommu(section->mr); in address_space_translate_for_iotlb()
704 iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); in address_space_translate_for_iotlb()
707 * doesn't short-cut its translation table walk. in address_space_translate_for_iotlb()
709 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); in address_space_translate_for_iotlb()
730 assert(!memory_region_is_iommu(section->mr)); in address_space_translate_for_iotlb()
736 * We should be given a page-aligned address -- certainly in address_space_translate_for_iotlb()
745 return &d->map.sections[PHYS_SECTION_UNASSIGNED]; in address_space_translate_for_iotlb()
756 as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index); in cpu_address_space_init()
761 assert(asidx < cpu->num_ases); in cpu_address_space_init()
765 cpu->as = as; in cpu_address_space_init()
771 if (!cpu->cpu_ases) { in cpu_address_space_init()
772 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); in cpu_address_space_init()
773 cpu->cpu_ases_count = cpu->num_ases; in cpu_address_space_init()
776 newas = &cpu->cpu_ases[asidx]; in cpu_address_space_init()
777 newas->cpu = cpu; in cpu_address_space_init()
778 newas->as = as; in cpu_address_space_init()
780 newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; in cpu_address_space_init()
781 newas->tcg_as_listener.commit = tcg_commit; in cpu_address_space_init()
782 newas->tcg_as_listener.name = "tcg"; in cpu_address_space_init()
783 memory_listener_register(&newas->tcg_as_listener, as); in cpu_address_space_init()
791 assert(cpu->cpu_ases); in cpu_address_space_destroy()
792 assert(asidx >= 0 && asidx < cpu->num_ases); in cpu_address_space_destroy()
796 cpuas = &cpu->cpu_ases[asidx]; in cpu_address_space_destroy()
798 memory_listener_unregister(&cpuas->tcg_as_listener); in cpu_address_space_destroy()
801 address_space_destroy(cpuas->as); in cpu_address_space_destroy()
802 g_free_rcu(cpuas->as, rcu); in cpu_address_space_destroy()
806 cpu->as = NULL; in cpu_address_space_destroy()
809 if (--cpu->cpu_ases_count == 0) { in cpu_address_space_destroy()
810 g_free(cpu->cpu_ases); in cpu_address_space_destroy()
811 cpu->cpu_ases = NULL; in cpu_address_space_destroy()
818 return cpu->cpu_ases[asidx].as; in cpu_get_address_space()
824 RAMBlock *block; in qemu_get_ram_block() local
826 block = qatomic_rcu_read(&ram_list.mru_block); in qemu_get_ram_block()
827 if (block && addr - block->offset < block->max_length) { in qemu_get_ram_block()
828 return block; in qemu_get_ram_block()
830 RAMBLOCK_FOREACH(block) { in qemu_get_ram_block()
831 if (addr - block->offset < block->max_length) { in qemu_get_ram_block()
852 * qatomic_rcu_set is not needed here. The block was already published in qemu_get_ram_block()
856 ram_list.mru_block = block; in qemu_get_ram_block()
857 return block; in qemu_get_ram_block()
864 RAMBlock *block; in tlb_reset_dirty_range_all() local
872 block = qemu_get_ram_block(start); in tlb_reset_dirty_range_all()
873 assert(block == qemu_get_ram_block(end - 1)); in tlb_reset_dirty_range_all()
874 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); in tlb_reset_dirty_range_all()
880 /* Note: start and end must be within the same ram block. */
903 assert(start >= ramblock->offset && in cpu_physical_memory_test_and_clear_dirty()
904 start + length <= ramblock->offset + ramblock->used_length); in cpu_physical_memory_test_and_clear_dirty()
909 unsigned long num = MIN(end - page, in cpu_physical_memory_test_and_clear_dirty()
910 DIRTY_MEMORY_BLOCK_SIZE - offset); in cpu_physical_memory_test_and_clear_dirty()
912 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], in cpu_physical_memory_test_and_clear_dirty()
917 mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; in cpu_physical_memory_test_and_clear_dirty()
918 mr_size = (end - start_page) << TARGET_PAGE_BITS; in cpu_physical_memory_test_and_clear_dirty()
919 memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); in cpu_physical_memory_test_and_clear_dirty()
947 ((last - first) >> (TARGET_PAGE_BITS + 3))); in cpu_physical_memory_snapshot_and_clear_dirty()
948 snap->start = first; in cpu_physical_memory_snapshot_and_clear_dirty()
949 snap->end = last; in cpu_physical_memory_snapshot_and_clear_dirty()
961 unsigned long num = MIN(end - page, in cpu_physical_memory_snapshot_and_clear_dirty()
962 DIRTY_MEMORY_BLOCK_SIZE - ofs); in cpu_physical_memory_snapshot_and_clear_dirty()
968 bitmap_copy_and_clear_atomic(snap->dirty + dest, in cpu_physical_memory_snapshot_and_clear_dirty()
969 blocks->blocks[idx] + ofs, in cpu_physical_memory_snapshot_and_clear_dirty()
989 assert(start >= snap->start); in cpu_physical_memory_snapshot_get_dirty()
990 assert(start + length <= snap->end); in cpu_physical_memory_snapshot_get_dirty()
992 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; in cpu_physical_memory_snapshot_get_dirty()
993 page = (start - snap->start) >> TARGET_PAGE_BITS; in cpu_physical_memory_snapshot_get_dirty()
996 if (test_bit(page, snap->dirty)) { in cpu_physical_memory_snapshot_get_dirty()
1008 AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); in memory_region_section_get_iotlb()
1009 return section - d->map.sections; in memory_region_section_get_iotlb()
1019 /* The physical section number is ORed with a page-aligned in phys_section_add()
1021 * never overflow into the page-aligned value. in phys_section_add()
1023 assert(map->sections_nb < TARGET_PAGE_SIZE); in phys_section_add()
1025 if (map->sections_nb == map->sections_nb_alloc) { in phys_section_add()
1026 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); in phys_section_add()
1027 map->sections = g_renew(MemoryRegionSection, map->sections, in phys_section_add()
1028 map->sections_nb_alloc); in phys_section_add()
1030 map->sections[map->sections_nb] = *section; in phys_section_add()
1031 memory_region_ref(section->mr); in phys_section_add()
1032 return map->sections_nb++; in phys_section_add()
1037 bool have_sub_page = mr->subpage; in phys_section_destroy()
1043 object_unref(OBJECT(&subpage->iomem)); in phys_section_destroy()
1050 while (map->sections_nb > 0) { in phys_sections_free()
1051 MemoryRegionSection *section = &map->sections[--map->sections_nb]; in phys_sections_free()
1052 phys_section_destroy(section->mr); in phys_sections_free()
1054 g_free(map->sections); in phys_sections_free()
1055 g_free(map->nodes); in phys_sections_free()
1062 hwaddr base = section->offset_within_address_space in register_subpage()
1067 .size = int128_make64(TARGET_PAGE_SIZE), in register_subpage()
1071 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); in register_subpage()
1073 if (!(existing->mr->subpage)) { in register_subpage()
1076 subsection.mr = &subpage->iomem; in register_subpage()
1078 phys_section_add(&d->map, &subsection)); in register_subpage()
1080 subpage = container_of(existing->mr, subpage_t, iomem); in register_subpage()
1082 start = section->offset_within_address_space & ~TARGET_PAGE_MASK; in register_subpage()
1083 end = start + int128_get64(section->size) - 1; in register_subpage()
1085 phys_section_add(&d->map, section)); in register_subpage()
1093 hwaddr start_addr = section->offset_within_address_space; in register_multipage()
1094 uint16_t section_index = phys_section_add(&d->map, section); in register_multipage()
1095 uint64_t num_pages = int128_get64(int128_rshift(section->size, in register_multipage()
1117 - remain.offset_within_address_space; in flatview_add_to_dispatch()
1120 now.size = int128_min(int128_make64(left), now.size); in flatview_add_to_dispatch()
1122 if (int128_eq(remain.size, now.size)) { in flatview_add_to_dispatch()
1125 remain.size = int128_sub(remain.size, now.size); in flatview_add_to_dispatch()
1126 remain.offset_within_address_space += int128_get64(now.size); in flatview_add_to_dispatch()
1127 remain.offset_within_region += int128_get64(now.size); in flatview_add_to_dispatch()
1131 if (int128_ge(remain.size, page_size)) { in flatview_add_to_dispatch()
1133 now.size = int128_and(now.size, int128_neg(page_size)); in flatview_add_to_dispatch()
1135 if (int128_eq(remain.size, now.size)) { in flatview_add_to_dispatch()
1138 remain.size = int128_sub(remain.size, now.size); in flatview_add_to_dispatch()
1139 remain.offset_within_address_space += int128_get64(now.size); in flatview_add_to_dispatch()
1140 remain.offset_within_region += int128_get64(now.size); in flatview_add_to_dispatch()
1165 RAMBlock *block; in ram_block_format() local
1171 "Block Name", "PSize", "Offset", "Used", "Total", in ram_block_format()
1174 RAMBLOCK_FOREACH(block) { in ram_block_format()
1175 psize = size_to_str(block->page_size); in ram_block_format()
1178 block->idstr, psize, in ram_block_format()
1179 (uint64_t)block->offset, in ram_block_format()
1180 (uint64_t)block->used_length, in ram_block_format()
1181 (uint64_t)block->max_length, in ram_block_format()
1182 (uint64_t)(uintptr_t)block->host, in ram_block_format()
1183 block->mr->readonly ? "ro" : "rw"); in ram_block_format()
1248 int64_t size; in get_file_size() local
1253 return -errno; in get_file_size()
1269 size_path = g_strdup_printf("/sys/dev/char/%d:%d/size", in get_file_size()
1280 size = lseek(fd, 0, SEEK_END); in get_file_size()
1281 if (size < 0) { in get_file_size()
1282 return -errno; in get_file_size()
1284 return size; in get_file_size()
1289 int64_t align = -1; in get_file_align()
1294 return -errno; in get_file_align()
1309 return -errno; in get_file_align()
1314 return -1; in get_file_align()
1338 int fd = -1; in file_ram_open()
1357 return -errno; in file_ram_open()
1360 return -EISDIR; in file_ram_open()
1369 return -ENOENT; in file_ram_open()
1400 return -errno; in file_ram_open()
1411 static void *file_ram_alloc(RAMBlock *block, in file_ram_alloc() argument
1421 block->page_size = qemu_fd_getpagesize(fd); in file_ram_alloc()
1422 if (block->mr->align % block->page_size) { in file_ram_alloc()
1424 " must be multiples of page size 0x%zx", in file_ram_alloc()
1425 block->mr->align, block->page_size); in file_ram_alloc()
1427 } else if (block->mr->align && !is_power_of_2(block->mr->align)) { in file_ram_alloc()
1429 " must be a power of two", block->mr->align); in file_ram_alloc()
1431 } else if (offset % block->page_size) { in file_ram_alloc()
1433 " must be multiples of page size 0x%zx", in file_ram_alloc()
1434 offset, block->page_size); in file_ram_alloc()
1437 block->mr->align = MAX(block->page_size, block->mr->align); in file_ram_alloc()
1440 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); in file_ram_alloc()
1444 if (memory < block->page_size) { in file_ram_alloc()
1445 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " in file_ram_alloc()
1446 "or larger than page size 0x%zx", in file_ram_alloc()
1447 memory, block->page_size); in file_ram_alloc()
1451 memory = ROUND_UP(memory, block->page_size); in file_ram_alloc()
1459 * Do not truncate the non-empty backend file to avoid corrupting in file_ram_alloc()
1464 * those labels. Therefore, extending the non-empty backend file in file_ram_alloc()
1471 qemu_map_flags = (block->flags & RAM_READONLY) ? QEMU_MAP_READONLY : 0; in file_ram_alloc()
1472 qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0; in file_ram_alloc()
1473 qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0; in file_ram_alloc()
1474 qemu_map_flags |= (block->flags & RAM_NORESERVE) ? QEMU_MAP_NORESERVE : 0; in file_ram_alloc()
1475 area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset); in file_ram_alloc()
1482 block->fd = fd; in file_ram_alloc()
1483 block->fd_offset = offset; in file_ram_alloc()
1492 static ram_addr_t find_ram_offset(ram_addr_t size) in find_ram_offset() argument
1494 RAMBlock *block, *next_block; in find_ram_offset() local
1497 assert(size != 0); /* it would hand out same offset multiple times */ in find_ram_offset()
1503 RAMBLOCK_FOREACH(block) { in find_ram_offset()
1509 candidate = block->offset + block->max_length; in find_ram_offset()
1512 /* Search for the closest following block in find_ram_offset()
1516 if (next_block->offset >= candidate) { in find_ram_offset()
1517 next = MIN(next, next_block->offset); in find_ram_offset()
1521 /* If it fits remember our place and remember the size in find_ram_offset()
1525 if (next - candidate >= size && next - candidate < mingap) { in find_ram_offset()
1527 mingap = next - candidate; in find_ram_offset()
1530 trace_find_ram_offset_loop(size, candidate, offset, next, mingap); in find_ram_offset()
1534 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", in find_ram_offset()
1535 (uint64_t)size); in find_ram_offset()
1539 trace_find_ram_offset(size, offset); in find_ram_offset()
1544 static void qemu_ram_setup_dump(void *addr, ram_addr_t size) in qemu_ram_setup_dump() argument
1550 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); in qemu_ram_setup_dump()
1554 "but dump-guest-core=off specified\n"); in qemu_ram_setup_dump()
1561 return rb->idstr; in qemu_ram_get_idstr()
1566 return rb->host; in qemu_ram_get_host_addr()
1571 return rb->offset; in qemu_ram_get_offset()
1576 return rb->used_length; in qemu_ram_get_used_length()
1581 return rb->max_length; in qemu_ram_get_max_length()
1586 return rb->flags & RAM_SHARED; in qemu_ram_is_shared()
1591 return rb->flags & RAM_NORESERVE; in qemu_ram_is_noreserve()
1597 return rb->flags & RAM_UF_ZEROPAGE; in qemu_ram_is_uf_zeroable()
1602 rb->flags |= RAM_UF_ZEROPAGE; in qemu_ram_set_uf_zeroable()
1607 return rb->flags & RAM_MIGRATABLE; in qemu_ram_is_migratable()
1612 rb->flags |= RAM_MIGRATABLE; in qemu_ram_set_migratable()
1617 rb->flags &= ~RAM_MIGRATABLE; in qemu_ram_unset_migratable()
1622 return rb->flags & RAM_NAMED_FILE; in qemu_ram_is_named_file()
1627 return rb->fd; in qemu_ram_get_fd()
1633 RAMBlock *block; in qemu_ram_set_idstr() local
1636 assert(!new_block->idstr[0]); in qemu_ram_set_idstr()
1641 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); in qemu_ram_set_idstr()
1645 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); in qemu_ram_set_idstr()
1648 RAMBLOCK_FOREACH(block) { in qemu_ram_set_idstr()
1649 if (block != new_block && in qemu_ram_set_idstr()
1650 !strcmp(block->idstr, new_block->idstr)) { in qemu_ram_set_idstr()
1652 new_block->idstr); in qemu_ram_set_idstr()
1659 void qemu_ram_unset_idstr(RAMBlock *block) in qemu_ram_unset_idstr() argument
1662 * migration. Ignore the problem since hot-unplug during migration in qemu_ram_unset_idstr()
1665 if (block) { in qemu_ram_unset_idstr()
1666 memset(block->idstr, 0, sizeof(block->idstr)); in qemu_ram_unset_idstr()
1673 g_autofree char *id = mr->dev ? qdev_get_dev_path(mr->dev) : NULL; in cpr_name()
1684 return rb->page_size; in qemu_ram_pagesize()
1687 /* Returns the largest size of page in use */
1690 RAMBlock *block; in qemu_ram_pagesize_largest() local
1693 RAMBLOCK_FOREACH(block) { in qemu_ram_pagesize_largest()
1694 largest = MAX(largest, qemu_ram_pagesize(block)); in qemu_ram_pagesize_largest()
1718 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) in qemu_ram_resize() argument
1720 const ram_addr_t oldsize = block->used_length; in qemu_ram_resize()
1723 assert(block); in qemu_ram_resize()
1728 if (block->used_length == newsize) { in qemu_ram_resize()
1730 * We don't have to resize the ram block (which only knows aligned in qemu_ram_resize()
1731 * sizes), however, we have to notify if the unaligned size changed. in qemu_ram_resize()
1733 if (unaligned_size != memory_region_size(block->mr)) { in qemu_ram_resize()
1734 memory_region_set_size(block->mr, unaligned_size); in qemu_ram_resize()
1735 if (block->resized) { in qemu_ram_resize()
1736 block->resized(block->idstr, unaligned_size, block->host); in qemu_ram_resize()
1742 if (!(block->flags & RAM_RESIZEABLE)) { in qemu_ram_resize()
1744 "Size mismatch: %s: 0x" RAM_ADDR_FMT in qemu_ram_resize()
1745 " != 0x" RAM_ADDR_FMT, block->idstr, in qemu_ram_resize()
1746 newsize, block->used_length); in qemu_ram_resize()
1747 return -EINVAL; in qemu_ram_resize()
1750 if (block->max_length < newsize) { in qemu_ram_resize()
1752 "Size too large: %s: 0x" RAM_ADDR_FMT in qemu_ram_resize()
1753 " > 0x" RAM_ADDR_FMT, block->idstr, in qemu_ram_resize()
1754 newsize, block->max_length); in qemu_ram_resize()
1755 return -EINVAL; in qemu_ram_resize()
1758 /* Notify before modifying the ram block and touching the bitmaps. */ in qemu_ram_resize()
1759 if (block->host) { in qemu_ram_resize()
1760 ram_block_notify_resize(block->host, oldsize, newsize); in qemu_ram_resize()
1763 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); in qemu_ram_resize()
1764 block->used_length = newsize; in qemu_ram_resize()
1765 cpu_physical_memory_set_dirty_range(block->offset, block->used_length, in qemu_ram_resize()
1767 memory_region_set_size(block->mr, unaligned_size); in qemu_ram_resize()
1768 if (block->resized) { in qemu_ram_resize()
1769 block->resized(block->idstr, unaligned_size, block->host); in qemu_ram_resize()
1775 * Trigger sync on the given ram block for range [start, start + length]
1777 * Otherwise no-op.
1780 void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) in qemu_ram_msync() argument
1782 /* The requested range should fit in within the block range */ in qemu_ram_msync()
1783 g_assert((start + length) <= block->used_length); in qemu_ram_msync()
1786 /* The lack of support for pmem should not block the sync */ in qemu_ram_msync()
1787 if (ramblock_is_pmem(block)) { in qemu_ram_msync()
1788 void *addr = ramblock_ptr(block, start); in qemu_ram_msync()
1793 if (block->fd >= 0) { in qemu_ram_msync()
1796 * specified as persistent (or is not one) - use the msync. in qemu_ram_msync()
1799 void *addr = ramblock_ptr(block, start); in qemu_ram_msync()
1800 if (qemu_msync(addr, length, block->fd)) { in qemu_ram_msync()
1816 /* Only need to extend if block count increased */ in dirty_memory_extend()
1828 sizeof(new_blocks->blocks[0]) * new_num_blocks); in dirty_memory_extend()
1831 memcpy(new_blocks->blocks, old_blocks->blocks, in dirty_memory_extend()
1832 old_num_blocks * sizeof(old_blocks->blocks[0])); in dirty_memory_extend()
1836 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); in dirty_memory_extend()
1853 RAMBlock *block; in ram_block_add() local
1860 new_block->offset = find_ram_offset(new_block->max_length); in ram_block_add()
1862 if (!new_block->host) { in ram_block_add()
1864 xen_ram_alloc(new_block->offset, new_block->max_length, in ram_block_add()
1865 new_block->mr, &err); in ram_block_add()
1872 new_block->host = qemu_anon_ram_alloc(new_block->max_length, in ram_block_add()
1873 &new_block->mr->align, in ram_block_add()
1875 if (!new_block->host) { in ram_block_add()
1878 memory_region_name(new_block->mr)); in ram_block_add()
1882 memory_try_enable_merging(new_block->host, new_block->max_length); in ram_block_add()
1887 if (new_block->flags & RAM_GUEST_MEMFD) { in ram_block_add()
1892 object_get_typename(OBJECT(current_machine->cgs))); in ram_block_add()
1895 assert(new_block->guest_memfd < 0); in ram_block_add()
1899 error_setg_errno(errp, -ret, in ram_block_add()
1905 new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length, in ram_block_add()
1907 if (new_block->guest_memfd < 0) { in ram_block_add()
1917 error_setg(&new_block->cpr_blocker, in ram_block_add()
1920 memory_region_name(new_block->mr)); in ram_block_add()
1921 migrate_add_blocker_modes(&new_block->cpr_blocker, errp, in ram_block_add()
1922 MIG_MODE_CPR_TRANSFER, -1); in ram_block_add()
1926 ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS; in ram_block_add()
1928 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, in ram_block_add()
1929 * QLIST (which has an RCU-friendly variant) does not have insertion at in ram_block_add()
1932 RAMBLOCK_FOREACH(block) { in ram_block_add()
1933 last_block = block; in ram_block_add()
1934 if (block->max_length < new_block->max_length) { in ram_block_add()
1938 if (block) { in ram_block_add()
1939 QLIST_INSERT_BEFORE_RCU(block, new_block, next); in ram_block_add()
1952 cpu_physical_memory_set_dirty_range(new_block->offset, in ram_block_add()
1953 new_block->used_length, in ram_block_add()
1956 if (new_block->host) { in ram_block_add()
1957 qemu_ram_setup_dump(new_block->host, new_block->max_length); in ram_block_add()
1958 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); in ram_block_add()
1965 qemu_madvise(new_block->host, new_block->max_length, in ram_block_add()
1968 ram_block_notify_add(new_block->host, new_block->used_length, in ram_block_add()
1969 new_block->max_length); in ram_block_add()
1975 qemu_anon_ram_free(new_block->host, new_block->max_length); in ram_block_add()
1976 new_block->host = NULL; in ram_block_add()
1981 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, ram_addr_t max_size, in qemu_ram_alloc_from_fd() argument
2001 assert(max_size >= size); in qemu_ram_alloc_from_fd()
2004 error_setg(errp, "-mem-path not supported with Xen"); in qemu_ram_alloc_from_fd()
2010 "host lacks kvm mmu notifiers, -mem-path unsupported"); in qemu_ram_alloc_from_fd()
2014 size = TARGET_PAGE_ALIGN(size); in qemu_ram_alloc_from_fd()
2015 size = REAL_HOST_PAGE_ALIGN(size); in qemu_ram_alloc_from_fd()
2021 error_setg(errp, "%s backing store size 0x%" PRIx64 in qemu_ram_alloc_from_fd()
2022 " is too small for 'size' option 0x" RAM_ADDR_FMT in qemu_ram_alloc_from_fd()
2030 if (file_align > 0 && file_align > mr->align) { in qemu_ram_alloc_from_fd()
2033 file_align, mr->align); in qemu_ram_alloc_from_fd()
2038 new_block->mr = mr; in qemu_ram_alloc_from_fd()
2039 new_block->used_length = size; in qemu_ram_alloc_from_fd()
2040 new_block->max_length = max_size; in qemu_ram_alloc_from_fd()
2041 new_block->resized = resized; in qemu_ram_alloc_from_fd()
2042 new_block->flags = ram_flags; in qemu_ram_alloc_from_fd()
2043 new_block->guest_memfd = -1; in qemu_ram_alloc_from_fd()
2044 new_block->host = file_ram_alloc(new_block, max_size, fd, in qemu_ram_alloc_from_fd()
2047 if (!new_block->host) { in qemu_ram_alloc_from_fd()
2063 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, in qemu_ram_alloc_from_file() argument
2069 RAMBlock *block; in qemu_ram_alloc_from_file() local
2074 error_setg_errno(errp, -fd, "can't open backing store %s for guest RAM", in qemu_ram_alloc_from_file()
2077 fd == -EACCES) { in qemu_ram_alloc_from_file()
2091 " read-only but still creating writable RAM using" in qemu_ram_alloc_from_file()
2092 " '-object memory-backend-file,readonly=on,rom=off...'" in qemu_ram_alloc_from_file()
2098 block = qemu_ram_alloc_from_fd(size, size, NULL, mr, ram_flags, fd, offset, in qemu_ram_alloc_from_file()
2100 if (!block) { in qemu_ram_alloc_from_file()
2108 return block; in qemu_ram_alloc_from_file()
2116 * because it has no size limits, else use POSIX shm.
2142 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, in qemu_ram_alloc_internal() argument
2158 assert(max_size >= size); in qemu_ram_alloc_internal()
2162 if (!share_flags && current_machine->aux_ram_share) { in qemu_ram_alloc_internal()
2175 mr->align = QEMU_VMALLOC_ALIGN; in qemu_ram_alloc_internal()
2178 * This can fail if the shm mount size is too small, or alloc from in qemu_ram_alloc_internal()
2183 * After cpr-transfer, new QEMU could create a memory region in qemu_ram_alloc_internal()
2184 * with a larger max size than old, so pass reused to grow the in qemu_ram_alloc_internal()
2188 new_block = qemu_ram_alloc_from_fd(size, max_size, resized, mr, in qemu_ram_alloc_internal()
2191 trace_qemu_ram_alloc_shared(name, new_block->used_length, in qemu_ram_alloc_internal()
2192 new_block->max_length, fd, in qemu_ram_alloc_internal()
2193 new_block->host); in qemu_ram_alloc_internal()
2206 size = ROUND_UP(size, align); in qemu_ram_alloc_internal()
2210 new_block->mr = mr; in qemu_ram_alloc_internal()
2211 new_block->resized = resized; in qemu_ram_alloc_internal()
2212 new_block->used_length = size; in qemu_ram_alloc_internal()
2213 new_block->max_length = max_size; in qemu_ram_alloc_internal()
2214 new_block->fd = -1; in qemu_ram_alloc_internal()
2215 new_block->guest_memfd = -1; in qemu_ram_alloc_internal()
2216 new_block->page_size = qemu_real_host_page_size(); in qemu_ram_alloc_internal()
2217 new_block->host = host; in qemu_ram_alloc_internal()
2218 new_block->flags = ram_flags; in qemu_ram_alloc_internal()
2228 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, in qemu_ram_alloc_from_ptr() argument
2231 return qemu_ram_alloc_internal(size, size, NULL, host, RAM_PREALLOC, mr, in qemu_ram_alloc_from_ptr()
2235 RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, in qemu_ram_alloc() argument
2240 return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp); in qemu_ram_alloc()
2243 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, in qemu_ram_alloc_resizeable() argument
2247 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, in qemu_ram_alloc_resizeable()
2251 static void reclaim_ramblock(RAMBlock *block) in reclaim_ramblock() argument
2253 if (block->flags & RAM_PREALLOC) { in reclaim_ramblock()
2256 xen_invalidate_map_cache_entry(block->host); in reclaim_ramblock()
2258 } else if (block->fd >= 0) { in reclaim_ramblock()
2259 qemu_ram_munmap(block->fd, block->host, block->max_length); in reclaim_ramblock()
2260 close(block->fd); in reclaim_ramblock()
2263 qemu_anon_ram_free(block->host, block->max_length); in reclaim_ramblock()
2266 if (block->guest_memfd >= 0) { in reclaim_ramblock()
2267 close(block->guest_memfd); in reclaim_ramblock()
2271 g_free(block); in reclaim_ramblock()
2274 void qemu_ram_free(RAMBlock *block) in qemu_ram_free() argument
2278 if (!block) { in qemu_ram_free()
2282 if (block->host) { in qemu_ram_free()
2283 ram_block_notify_remove(block->host, block->used_length, in qemu_ram_free()
2284 block->max_length); in qemu_ram_free()
2288 name = cpr_name(block->mr); in qemu_ram_free()
2290 QLIST_REMOVE_RCU(block, next); in qemu_ram_free()
2295 call_rcu(block, reclaim_ramblock, rcu); in qemu_ram_free()
2301 static int qemu_ram_remap_mmap(RAMBlock *block, uint64_t start, size_t length) in qemu_ram_remap_mmap() argument
2305 void *host_startaddr = block->host + start; in qemu_ram_remap_mmap()
2307 assert(block->fd < 0); in qemu_ram_remap_mmap()
2309 flags |= block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE; in qemu_ram_remap_mmap()
2310 flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; in qemu_ram_remap_mmap()
2312 prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; in qemu_ram_remap_mmap()
2313 area = mmap(host_startaddr, length, prot, flags, -1, 0); in qemu_ram_remap_mmap()
2314 return area != host_startaddr ? -errno : 0; in qemu_ram_remap_mmap()
2318 * qemu_ram_remap - remap a single RAM page
2324 * memory (MCE). The page size depends on the RAMBlock (i.e., hugetlb). @addr
2332 RAMBlock *block; in qemu_ram_remap() local
2337 RAMBLOCK_FOREACH(block) { in qemu_ram_remap()
2338 offset = addr - block->offset; in qemu_ram_remap()
2339 if (offset < block->max_length) { in qemu_ram_remap()
2341 page_size = qemu_ram_pagesize(block); in qemu_ram_remap()
2344 vaddr = ramblock_ptr(block, offset); in qemu_ram_remap()
2345 if (block->flags & RAM_PREALLOC) { in qemu_ram_remap()
2350 if (ram_block_discard_range(block, offset, page_size) != 0) { in qemu_ram_remap()
2357 if (block->fd >= 0) { in qemu_ram_remap()
2359 PRIx64 " +%zx", block->idstr, offset, in qemu_ram_remap()
2360 block->fd_offset, page_size); in qemu_ram_remap()
2363 if (qemu_ram_remap_mmap(block, offset, page_size) != 0) { in qemu_ram_remap()
2365 block->idstr, offset, page_size); in qemu_ram_remap()
2383 * @block: block for the RAM to lookup (optional and may be NULL).
2385 * @size: pointer to requested size (optional and may be NULL).
2386 * size may get modified and return a value smaller than
2388 * @lock: wether to lock the mapping in xen-mapcache until invalidated.
2389 * @is_write: hint wether to map RW or RO in the xen-mapcache.
2394 static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr, in qemu_ram_ptr_length() argument
2395 hwaddr *size, bool lock, in qemu_ram_ptr_length() argument
2400 if (size && *size == 0) { in qemu_ram_ptr_length()
2404 if (block == NULL) { in qemu_ram_ptr_length()
2405 block = qemu_get_ram_block(addr); in qemu_ram_ptr_length()
2406 addr -= block->offset; in qemu_ram_ptr_length()
2408 if (size) { in qemu_ram_ptr_length()
2409 *size = MIN(*size, block->max_length - addr); in qemu_ram_ptr_length()
2410 len = *size; in qemu_ram_ptr_length()
2413 if (xen_enabled() && block->host == NULL) { in qemu_ram_ptr_length()
2418 if (xen_mr_is_memory(block->mr)) { in qemu_ram_ptr_length()
2419 return xen_map_cache(block->mr, block->offset + addr, in qemu_ram_ptr_length()
2420 len, block->offset, in qemu_ram_ptr_length()
2424 block->host = xen_map_cache(block->mr, block->offset, in qemu_ram_ptr_length()
2425 block->max_length, in qemu_ram_ptr_length()
2426 block->offset, in qemu_ram_ptr_length()
2430 return ramblock_ptr(block, addr); in qemu_ram_ptr_length()
2449 ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; in qemu_ram_block_host_offset()
2450 assert((uintptr_t)host >= (uintptr_t)rb->host); in qemu_ram_block_host_offset()
2451 assert(res < rb->max_length); in qemu_ram_block_host_offset()
2459 RAMBlock *block; in qemu_ram_block_from_host() local
2470 block = qemu_get_ram_block(ram_addr); in qemu_ram_block_from_host()
2471 if (block) { in qemu_ram_block_from_host()
2472 *offset = ram_addr - block->offset; in qemu_ram_block_from_host()
2474 return block; in qemu_ram_block_from_host()
2478 block = qatomic_rcu_read(&ram_list.mru_block); in qemu_ram_block_from_host()
2479 if (block && block->host && host - block->host < block->max_length) { in qemu_ram_block_from_host()
2483 RAMBLOCK_FOREACH(block) { in qemu_ram_block_from_host()
2484 /* This case append when the block is not mapped. */ in qemu_ram_block_from_host()
2485 if (block->host == NULL) { in qemu_ram_block_from_host()
2488 if (host - block->host < block->max_length) { in qemu_ram_block_from_host()
2496 *offset = (host - block->host); in qemu_ram_block_from_host()
2500 return block; in qemu_ram_block_from_host()
2512 RAMBlock *block; in qemu_ram_block_by_name() local
2514 RAMBLOCK_FOREACH(block) { in qemu_ram_block_by_name()
2515 if (!strcmp(name, block->idstr)) { in qemu_ram_block_by_name()
2516 return block; in qemu_ram_block_by_name()
2529 RAMBlock *block; in qemu_ram_addr_from_host() local
2532 block = qemu_ram_block_from_host(ptr, false, &offset); in qemu_ram_addr_from_host()
2533 if (!block) { in qemu_ram_addr_from_host()
2537 return block->offset + offset; in qemu_ram_addr_from_host()
2570 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len); in subpage_read()
2590 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len); in subpage_write()
2603 return flatview_access_valid(subpage->fv, addr + subpage->base, in subpage_accepts()
2624 return -1; in subpage_register()
2632 mmio->sub_section[idx] = section; in subpage_register()
2642 /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ in subpage_init()
2644 mmio->fv = fv; in subpage_init()
2645 mmio->base = base; in subpage_init()
2646 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, in subpage_init()
2648 mmio->iomem.subpage = true; in subpage_init()
2665 .size = int128_2_64(), in dummy_section()
2675 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; in iotlb_to_section()
2676 AddressSpaceDispatch *d = cpuas->memory_dispatch; in iotlb_to_section()
2680 assert(section_index < d->map.sections_nb); in iotlb_to_section()
2681 ret = d->map.sections + section_index; in iotlb_to_section()
2682 assert(ret->mr); in iotlb_to_section()
2683 assert(ret->mr->ops); in iotlb_to_section()
2699 n = dummy_section(&d->map, fv, &io_mem_unassigned); in address_space_dispatch_new()
2702 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; in address_space_dispatch_new()
2709 phys_sections_free(&d->map); in address_space_dispatch_free()
2725 * ---------------------- ------------------------- in tcg_log_global_after_sync()
2726 * TLB check -> slow path in tcg_log_global_after_sync()
2731 * TLB check -> fast path in tcg_log_global_after_sync()
2747 run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); in tcg_log_global_after_sync()
2755 cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as); in tcg_commit_cpu()
2765 /* since each CPU stores ram addresses in its TLB cache, we must in tcg_commit()
2768 cpu = cpuas->cpu; in tcg_commit()
2771 * Defer changes to as->memory_dispatch until the cpu is quiescent. in tcg_commit()
2780 * all of the tcg machinery for run-on is initialized: thus halt_cond. in tcg_commit()
2782 if (cpu->halt_cond) { in tcg_commit()
2832 tb_invalidate_phys_range(addr, addr + length - 1); in invalidate_and_set_dirty()
2838 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size) in memory_region_flush_rom_device() argument
2848 invalidate_and_set_dirty(mr, addr, size); in memory_region_flush_rom_device()
2853 unsigned access_size_max = mr->ops->valid.max_access_size; in memory_access_size()
2855 /* Regions are assumed to support 1-4 byte accesses unless in memory_access_size()
2862 if (!mr->ops->impl.unaligned) { in memory_access_size()
2863 unsigned align_size_max = addr & -addr; in memory_access_size()
2886 if (mr->flush_coalesced_mmio) { in prepare_mmio_access()
2914 "Invalid access to non-RAM device at " in flatview_access_allowed()
2915 "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", " in flatview_access_allowed()
2960 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, in flatview_write_continue_step()
2984 len -= l; in flatview_write_continue()
3053 uint8_t *ram_ptr = qemu_ram_ptr_length(mr->ram_block, mr_addr, l, in flatview_read_continue_step()
3075 len -= l; in flatview_read_continue()
3160 len -= l; in address_space_set()
3201 ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); in address_space_write_rom_internal()
3212 len -= l; in address_space_write_rom_internal()
3232 * triggered from within the guest. For TCG we are always cache coherent, in cpu_flush_icache_range()
3234 * the host's instruction cache at least. in cpu_flush_icache_range()
3270 while (!QLIST_EMPTY(&as->map_client_list)) { in address_space_notify_map_clients_locked()
3271 client = QLIST_FIRST(&as->map_client_list); in address_space_notify_map_clients_locked()
3272 qemu_bh_schedule(client->bh); in address_space_notify_map_clients_locked()
3281 QEMU_LOCK_GUARD(&as->map_client_list_lock); in address_space_register_map_client()
3282 client->bh = bh; in address_space_register_map_client()
3283 QLIST_INSERT_HEAD(&as->map_client_list, client, link); in address_space_register_map_client()
3286 if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) { in address_space_register_map_client()
3294 /* The data structures we set up here depend on knowing the page size, in cpu_exec_init_all()
3297 * machine setup would care about the target page size, and we could in cpu_exec_init_all()
3310 QEMU_LOCK_GUARD(&as->map_client_list_lock); in address_space_unregister_map_client()
3311 QLIST_FOREACH(client, &as->map_client_list, link) { in address_space_unregister_map_client()
3312 if (client->bh == bh) { in address_space_unregister_map_client()
3321 QEMU_LOCK_GUARD(&as->map_client_list_lock); in address_space_notify_map_clients()
3341 len -= l; in flatview_access_valid()
3369 target_len -= len; in flatview_extend_translation()
3388 * Use only for reads OR writes - not for read-modify-write operations.
3415 size_t used = qatomic_read(&as->bounce_buffer_size); in address_space_map()
3417 hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l); in address_space_map()
3420 qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size); in address_space_map()
3434 bounce->magic = BOUNCE_BUFFER_MAGIC; in address_space_map()
3436 bounce->mr = mr; in address_space_map()
3437 bounce->addr = addr; in address_space_map()
3438 bounce->len = l; in address_space_map()
3442 bounce->buffer, l); in address_space_map()
3446 return bounce->buffer; in address_space_map()
3453 return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true, is_write); in address_space_map()
3480 assert(bounce->magic == BOUNCE_BUFFER_MAGIC); in address_space_unmap()
3483 address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED, in address_space_unmap()
3484 bounce->buffer, access_len); in address_space_unmap()
3487 qatomic_sub(&as->bounce_buffer_size, bounce->len); in address_space_unmap()
3488 bounce->magic = ~BOUNCE_BUFFER_MAGIC; in address_space_unmap()
3489 memory_region_unref(bounce->mr); in address_space_unmap()
3518 int64_t address_space_cache_init(MemoryRegionCache *cache, in address_space_cache_init() argument
3532 cache->fv = address_space_get_flatview(as); in address_space_cache_init()
3533 d = flatview_to_dispatch(cache->fv); in address_space_cache_init()
3534 cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); in address_space_cache_init()
3537 * cache->xlat is now relative to cache->mrs.mr, not to the section itself. in address_space_cache_init()
3539 * cache->xlat and the end of the section. in address_space_cache_init()
3541 diff = int128_sub(cache->mrs.size, in address_space_cache_init()
3542 int128_make64(cache->xlat - cache->mrs.offset_within_region)); in address_space_cache_init()
3545 mr = cache->mrs.mr; in address_space_cache_init()
3552 l = flatview_extend_translation(cache->fv, addr, len, mr, in address_space_cache_init()
3553 cache->xlat, l, is_write, in address_space_cache_init()
3555 cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true, in address_space_cache_init()
3558 cache->ptr = NULL; in address_space_cache_init()
3561 cache->len = l; in address_space_cache_init()
3562 cache->is_write = is_write; in address_space_cache_init()
3566 void address_space_cache_invalidate(MemoryRegionCache *cache, in address_space_cache_invalidate() argument
3570 assert(cache->is_write); in address_space_cache_invalidate()
3571 if (likely(cache->ptr)) { in address_space_cache_invalidate()
3572 invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len); in address_space_cache_invalidate()
3576 void address_space_cache_destroy(MemoryRegionCache *cache) in address_space_cache_destroy() argument
3578 if (!cache->mrs.mr) { in address_space_cache_destroy()
3583 xen_invalidate_map_cache_entry(cache->ptr); in address_space_cache_destroy()
3585 memory_region_unref(cache->mrs.mr); in address_space_cache_destroy()
3586 flatview_unref(cache->fv); in address_space_cache_destroy()
3587 cache->mrs.mr = NULL; in address_space_cache_destroy()
3588 cache->fv = NULL; in address_space_cache_destroy()
3597 MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, in address_space_translate_cached() argument
3605 assert(!cache->ptr); in address_space_translate_cached()
3606 *xlat = addr + cache->xlat; in address_space_translate_cached()
3608 mr = cache->mrs.mr; in address_space_translate_cached()
3636 len -= l; in address_space_write_continue_cached()
3661 len -= l; in address_space_read_continue_cached()
3678 address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, in address_space_read_cached_slow() argument
3685 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, false, in address_space_read_cached_slow()
3695 address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, in address_space_write_cached_slow() argument
3702 mr = address_space_translate_cached(cache, addr, &mr_addr, &l, true, in address_space_write_cached_slow()
3708 #define ARG1_DECL MemoryRegionCache *cache
3709 #define ARG1 cache
3711 #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
3734 if (phys_addr == -1) in cpu_memory_rw_debug()
3735 return -1; in cpu_memory_rw_debug()
3736 l = (page + TARGET_PAGE_SIZE) - addr; in cpu_memory_rw_debug()
3740 res = address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, attrs, buf, in cpu_memory_rw_debug()
3743 return -1; in cpu_memory_rw_debug()
3745 len -= l; in cpu_memory_rw_debug()
3767 RAMBlock *block; in qemu_ram_foreach_block() local
3771 RAMBLOCK_FOREACH(block) { in qemu_ram_foreach_block()
3772 ret = func(block, opaque); in qemu_ram_foreach_block()
3785 * Returns: 0 on success, none-0 on failure
3790 int ret = -1; in ram_block_discard_range()
3792 uint8_t *host_startaddr = rb->host + start; in ram_block_discard_range()
3794 if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { in ram_block_discard_range()
3800 if ((start + length) <= rb->max_length) { in ram_block_discard_range()
3802 if (!QEMU_IS_ALIGNED(length, rb->page_size)) { in ram_block_discard_range()
3814 need_madvise = (rb->page_size == qemu_real_host_page_size()); in ram_block_discard_range()
3815 need_fallocate = rb->fd != -1; in ram_block_discard_range()
3826 if (rb->flags & RAM_READONLY_FD) { in ram_block_discard_range()
3850 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, in ram_block_discard_range()
3851 start + rb->fd_offset, length); in ram_block_discard_range()
3853 ret = -errno; in ram_block_discard_range()
3855 " +%zx (%d)", __func__, rb->idstr, start, in ram_block_discard_range()
3856 rb->fd_offset, length, ret); in ram_block_discard_range()
3860 ret = -ENOSYS; in ram_block_discard_range()
3863 rb->idstr, start, rb->fd_offset, length, ret); in ram_block_discard_range()
3874 if (qemu_ram_is_shared(rb) && rb->fd < 0) { in ram_block_discard_range()
3880 ret = -errno; in ram_block_discard_range()
3883 __func__, rb->idstr, start, length, ret); in ram_block_discard_range()
3887 ret = -ENOSYS; in ram_block_discard_range()
3889 __func__, rb->idstr, start, length, ret); in ram_block_discard_range()
3893 trace_ram_block_discard_range(rb->idstr, host_startaddr, length, in ram_block_discard_range()
3896 error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", in ram_block_discard_range()
3897 __func__, rb->idstr, start, length, rb->max_length); in ram_block_discard_range()
3907 int ret = -1; in ram_block_discard_guest_memfd_range()
3911 ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, in ram_block_discard_guest_memfd_range()
3915 ret = -errno; in ram_block_discard_guest_memfd_range()
3917 __func__, rb->idstr, start, length, ret); in ram_block_discard_guest_memfd_range()
3920 ret = -ENOSYS; in ram_block_discard_guest_memfd_range()
3922 __func__, rb->idstr, start, length, ret); in ram_block_discard_guest_memfd_range()
3930 return rb->flags & RAM_PMEM; in ramblock_is_pmem()
3935 if (start == end - 1) { in mtree_print_phys_entries()
3938 qemu_printf("\t%3d..%-3d ", start, end - 1); in mtree_print_phys_entries()
3951 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ argument
3952 int128_sub((size), int128_one())) : 0)
3961 for (i = 0; i < d->map.sections_nb; ++i) { in mtree_print_dispatch()
3962 MemoryRegionSection *s = d->map.sections + i; in mtree_print_dispatch()
3969 s->offset_within_address_space, in mtree_print_dispatch()
3970 s->offset_within_address_space + MR_SIZE(s->size), in mtree_print_dispatch()
3971 s->mr->name ? s->mr->name : "(noname)", in mtree_print_dispatch()
3973 s->mr == root ? " [ROOT]" : "", in mtree_print_dispatch()
3974 s == d->mru_section ? " [MRU]" : "", in mtree_print_dispatch()
3975 s->mr->is_iommu ? " [iommu]" : ""); in mtree_print_dispatch()
3977 if (s->mr->alias) { in mtree_print_dispatch()
3978 qemu_printf(" alias=%s", s->mr->alias->name ? in mtree_print_dispatch()
3979 s->mr->alias->name : "noname"); in mtree_print_dispatch()
3985 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); in mtree_print_dispatch()
3986 for (i = 0; i < d->map.nodes_nb; ++i) { in mtree_print_dispatch()
3989 Node *n = d->map.nodes + i; in mtree_print_dispatch()
3996 if (pe->ptr == prev.ptr && pe->skip == prev.skip) { in mtree_print_dispatch()
4044 ram_block_discard_disabled_cnt--; in ram_block_discard_disable()
4047 ret = -EBUSY; in ram_block_discard_disable()
4061 ram_block_uncoordinated_discard_disabled_cnt--; in ram_block_uncoordinated_discard_disable()
4063 ret = -EBUSY; in ram_block_uncoordinated_discard_disable()
4077 ram_block_discard_required_cnt--; in ram_block_discard_require()
4080 ret = -EBUSY; in ram_block_discard_require()
4094 ram_block_coordinated_discard_required_cnt--; in ram_block_coordinated_discard_require()
4096 ret = -EBUSY; in ram_block_coordinated_discard_require()
4122 MemoryRegion *mr = rb->mr; in ram_is_cpr_compatible()
4137 if (rb->fd >= 0 && qemu_ram_is_shared(rb)) { in ram_is_cpr_compatible()
4145 * Add a blocker for each volatile ram block. This function should only be
4146 * called after we know that the block is migratable. Non-migratable blocks
4147 * are either re-created in new QEMU, or are handled specially, or are covered
4148 * by a device-level CPR blocker.
4158 error_setg(&rb->cpr_blocker, in ram_block_add_cpr_blocker()
4160 "required for memory-backend objects, and aux-ram-share=on is " in ram_block_add_cpr_blocker()
4161 "required.", memory_region_name(rb->mr)); in ram_block_add_cpr_blocker()
4162 migrate_add_blocker_modes(&rb->cpr_blocker, errp, MIG_MODE_CPR_TRANSFER, in ram_block_add_cpr_blocker()
4163 -1); in ram_block_add_cpr_blocker()
4168 migrate_del_blocker(&rb->cpr_blocker); in ram_block_del_cpr_blocker()