154857b08SSteve Sistare /*
254857b08SSteve Sistare * Copyright (c) 2021-2025 Oracle and/or its affiliates.
354857b08SSteve Sistare *
454857b08SSteve Sistare * SPDX-License-Identifier: GPL-2.0-or-later
554857b08SSteve Sistare */
654857b08SSteve Sistare
754857b08SSteve Sistare #include <sys/ioctl.h>
854857b08SSteve Sistare #include <linux/vfio.h>
954857b08SSteve Sistare #include "qemu/osdep.h"
1054857b08SSteve Sistare #include "hw/vfio/vfio-container.h"
11c29a65edSSteve Sistare #include "hw/vfio/vfio-device.h"
127e9f2141SSteve Sistare #include "hw/vfio/vfio-listener.h"
1354857b08SSteve Sistare #include "migration/blocker.h"
1454857b08SSteve Sistare #include "migration/cpr.h"
1554857b08SSteve Sistare #include "migration/migration.h"
1654857b08SSteve Sistare #include "migration/vmstate.h"
1754857b08SSteve Sistare #include "qapi/error.h"
187e9f2141SSteve Sistare #include "qemu/error-report.h"
1954857b08SSteve Sistare
vfio_dma_unmap_vaddr_all(VFIOContainer * container,Error ** errp)201faadd96SSteve Sistare static bool vfio_dma_unmap_vaddr_all(VFIOContainer *container, Error **errp)
211faadd96SSteve Sistare {
221faadd96SSteve Sistare struct vfio_iommu_type1_dma_unmap unmap = {
231faadd96SSteve Sistare .argsz = sizeof(unmap),
241faadd96SSteve Sistare .flags = VFIO_DMA_UNMAP_FLAG_VADDR | VFIO_DMA_UNMAP_FLAG_ALL,
251faadd96SSteve Sistare .iova = 0,
261faadd96SSteve Sistare .size = 0,
271faadd96SSteve Sistare };
281faadd96SSteve Sistare if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
291faadd96SSteve Sistare error_setg_errno(errp, errno, "vfio_dma_unmap_vaddr_all");
301faadd96SSteve Sistare return false;
311faadd96SSteve Sistare }
32eba1f657SSteve Sistare container->cpr.vaddr_unmapped = true;
331faadd96SSteve Sistare return true;
341faadd96SSteve Sistare }
351faadd96SSteve Sistare
367e9f2141SSteve Sistare /*
377e9f2141SSteve Sistare * Set the new @vaddr for any mappings registered during cpr load.
387e9f2141SSteve Sistare * The incoming state is cleared thereafter.
397e9f2141SSteve Sistare */
vfio_legacy_cpr_dma_map(const VFIOContainerBase * bcontainer,hwaddr iova,ram_addr_t size,void * vaddr,bool readonly,MemoryRegion * mr)407e9f2141SSteve Sistare static int vfio_legacy_cpr_dma_map(const VFIOContainerBase *bcontainer,
417e9f2141SSteve Sistare hwaddr iova, ram_addr_t size, void *vaddr,
427e9f2141SSteve Sistare bool readonly, MemoryRegion *mr)
437e9f2141SSteve Sistare {
447e9f2141SSteve Sistare const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
457e9f2141SSteve Sistare bcontainer);
467e9f2141SSteve Sistare struct vfio_iommu_type1_dma_map map = {
477e9f2141SSteve Sistare .argsz = sizeof(map),
487e9f2141SSteve Sistare .flags = VFIO_DMA_MAP_FLAG_VADDR,
497e9f2141SSteve Sistare .vaddr = (__u64)(uintptr_t)vaddr,
507e9f2141SSteve Sistare .iova = iova,
517e9f2141SSteve Sistare .size = size,
527e9f2141SSteve Sistare };
537e9f2141SSteve Sistare
547e9f2141SSteve Sistare g_assert(cpr_is_incoming());
557e9f2141SSteve Sistare
567e9f2141SSteve Sistare if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map)) {
577e9f2141SSteve Sistare return -errno;
587e9f2141SSteve Sistare }
597e9f2141SSteve Sistare
607e9f2141SSteve Sistare return 0;
617e9f2141SSteve Sistare }
621faadd96SSteve Sistare
vfio_region_remap(MemoryListener * listener,MemoryRegionSection * section)63eba1f657SSteve Sistare static void vfio_region_remap(MemoryListener *listener,
64eba1f657SSteve Sistare MemoryRegionSection *section)
65eba1f657SSteve Sistare {
66eba1f657SSteve Sistare VFIOContainer *container = container_of(listener, VFIOContainer,
67eba1f657SSteve Sistare cpr.remap_listener);
68eba1f657SSteve Sistare vfio_container_region_add(&container->bcontainer, section, true);
69eba1f657SSteve Sistare }
70eba1f657SSteve Sistare
vfio_cpr_supported(VFIOContainer * container,Error ** errp)7154857b08SSteve Sistare static bool vfio_cpr_supported(VFIOContainer *container, Error **errp)
7254857b08SSteve Sistare {
7354857b08SSteve Sistare if (!ioctl(container->fd, VFIO_CHECK_EXTENSION, VFIO_UPDATE_VADDR)) {
7454857b08SSteve Sistare error_setg(errp, "VFIO container does not support VFIO_UPDATE_VADDR");
7554857b08SSteve Sistare return false;
7654857b08SSteve Sistare
7754857b08SSteve Sistare } else if (!ioctl(container->fd, VFIO_CHECK_EXTENSION, VFIO_UNMAP_ALL)) {
7854857b08SSteve Sistare error_setg(errp, "VFIO container does not support VFIO_UNMAP_ALL");
7954857b08SSteve Sistare return false;
8054857b08SSteve Sistare
8154857b08SSteve Sistare } else {
8254857b08SSteve Sistare return true;
8354857b08SSteve Sistare }
8454857b08SSteve Sistare }
8554857b08SSteve Sistare
vfio_container_pre_save(void * opaque)861faadd96SSteve Sistare static int vfio_container_pre_save(void *opaque)
871faadd96SSteve Sistare {
881faadd96SSteve Sistare VFIOContainer *container = opaque;
891faadd96SSteve Sistare Error *local_err = NULL;
901faadd96SSteve Sistare
911faadd96SSteve Sistare if (!vfio_dma_unmap_vaddr_all(container, &local_err)) {
921faadd96SSteve Sistare error_report_err(local_err);
931faadd96SSteve Sistare return -1;
941faadd96SSteve Sistare }
951faadd96SSteve Sistare return 0;
961faadd96SSteve Sistare }
971faadd96SSteve Sistare
vfio_container_post_load(void * opaque,int version_id)987e9f2141SSteve Sistare static int vfio_container_post_load(void *opaque, int version_id)
997e9f2141SSteve Sistare {
1007e9f2141SSteve Sistare VFIOContainer *container = opaque;
1017e9f2141SSteve Sistare VFIOContainerBase *bcontainer = &container->bcontainer;
102924c3ccbSZhenzhong Duan VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
103924c3ccbSZhenzhong Duan dma_map_fn saved_dma_map = vioc->dma_map;
1047e9f2141SSteve Sistare Error *local_err = NULL;
1057e9f2141SSteve Sistare
106924c3ccbSZhenzhong Duan /* During incoming CPR, divert calls to dma_map. */
107924c3ccbSZhenzhong Duan vioc->dma_map = vfio_legacy_cpr_dma_map;
108924c3ccbSZhenzhong Duan
1097e9f2141SSteve Sistare if (!vfio_listener_register(bcontainer, &local_err)) {
1107e9f2141SSteve Sistare error_report_err(local_err);
1117e9f2141SSteve Sistare return -1;
1127e9f2141SSteve Sistare }
1137e9f2141SSteve Sistare
1147e9f2141SSteve Sistare /* Restore original dma_map function */
115924c3ccbSZhenzhong Duan vioc->dma_map = saved_dma_map;
116924c3ccbSZhenzhong Duan
1177e9f2141SSteve Sistare return 0;
1187e9f2141SSteve Sistare }
1197e9f2141SSteve Sistare
12054857b08SSteve Sistare static const VMStateDescription vfio_container_vmstate = {
12154857b08SSteve Sistare .name = "vfio-container",
12254857b08SSteve Sistare .version_id = 0,
12354857b08SSteve Sistare .minimum_version_id = 0,
1247e9f2141SSteve Sistare .priority = MIG_PRI_LOW, /* Must happen after devices and groups */
1251faadd96SSteve Sistare .pre_save = vfio_container_pre_save,
1267e9f2141SSteve Sistare .post_load = vfio_container_post_load,
12754857b08SSteve Sistare .needed = cpr_incoming_needed,
12854857b08SSteve Sistare .fields = (VMStateField[]) {
12954857b08SSteve Sistare VMSTATE_END_OF_LIST()
13054857b08SSteve Sistare }
13154857b08SSteve Sistare };
13254857b08SSteve Sistare
vfio_cpr_fail_notifier(NotifierWithReturn * notifier,MigrationEvent * e,Error ** errp)133eba1f657SSteve Sistare static int vfio_cpr_fail_notifier(NotifierWithReturn *notifier,
134eba1f657SSteve Sistare MigrationEvent *e, Error **errp)
135eba1f657SSteve Sistare {
136eba1f657SSteve Sistare VFIOContainer *container =
137eba1f657SSteve Sistare container_of(notifier, VFIOContainer, cpr.transfer_notifier);
138eba1f657SSteve Sistare VFIOContainerBase *bcontainer = &container->bcontainer;
139eba1f657SSteve Sistare
140eba1f657SSteve Sistare if (e->type != MIG_EVENT_PRECOPY_FAILED) {
141eba1f657SSteve Sistare return 0;
142eba1f657SSteve Sistare }
143eba1f657SSteve Sistare
144eba1f657SSteve Sistare if (container->cpr.vaddr_unmapped) {
145eba1f657SSteve Sistare /*
146eba1f657SSteve Sistare * Force a call to vfio_region_remap for each mapped section by
147eba1f657SSteve Sistare * temporarily registering a listener, and temporarily diverting
148eba1f657SSteve Sistare * dma_map to vfio_legacy_cpr_dma_map. The latter restores vaddr.
149eba1f657SSteve Sistare */
150eba1f657SSteve Sistare
151eba1f657SSteve Sistare VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
152924c3ccbSZhenzhong Duan dma_map_fn saved_dma_map = vioc->dma_map;
153eba1f657SSteve Sistare vioc->dma_map = vfio_legacy_cpr_dma_map;
154eba1f657SSteve Sistare
155eba1f657SSteve Sistare container->cpr.remap_listener = (MemoryListener) {
156eba1f657SSteve Sistare .name = "vfio cpr recover",
157eba1f657SSteve Sistare .region_add = vfio_region_remap
158eba1f657SSteve Sistare };
159eba1f657SSteve Sistare memory_listener_register(&container->cpr.remap_listener,
160eba1f657SSteve Sistare bcontainer->space->as);
161eba1f657SSteve Sistare memory_listener_unregister(&container->cpr.remap_listener);
162eba1f657SSteve Sistare container->cpr.vaddr_unmapped = false;
163924c3ccbSZhenzhong Duan vioc->dma_map = saved_dma_map;
164eba1f657SSteve Sistare }
165eba1f657SSteve Sistare return 0;
166eba1f657SSteve Sistare }
167eba1f657SSteve Sistare
vfio_legacy_cpr_register_container(VFIOContainer * container,Error ** errp)16854857b08SSteve Sistare bool vfio_legacy_cpr_register_container(VFIOContainer *container, Error **errp)
16954857b08SSteve Sistare {
17054857b08SSteve Sistare VFIOContainerBase *bcontainer = &container->bcontainer;
17154857b08SSteve Sistare Error **cpr_blocker = &container->cpr.blocker;
17254857b08SSteve Sistare
17354857b08SSteve Sistare migration_add_notifier_mode(&bcontainer->cpr_reboot_notifier,
17454857b08SSteve Sistare vfio_cpr_reboot_notifier,
17554857b08SSteve Sistare MIG_MODE_CPR_REBOOT);
17654857b08SSteve Sistare
17754857b08SSteve Sistare if (!vfio_cpr_supported(container, cpr_blocker)) {
17854857b08SSteve Sistare return migrate_add_blocker_modes(cpr_blocker, errp,
17954857b08SSteve Sistare MIG_MODE_CPR_TRANSFER, -1) == 0;
18054857b08SSteve Sistare }
18154857b08SSteve Sistare
182*7ed09191SSteve Sistare vfio_cpr_add_kvm_notifier();
183*7ed09191SSteve Sistare
18454857b08SSteve Sistare vmstate_register(NULL, -1, &vfio_container_vmstate, container);
18554857b08SSteve Sistare
186eba1f657SSteve Sistare migration_add_notifier_mode(&container->cpr.transfer_notifier,
187eba1f657SSteve Sistare vfio_cpr_fail_notifier,
188eba1f657SSteve Sistare MIG_MODE_CPR_TRANSFER);
18954857b08SSteve Sistare return true;
19054857b08SSteve Sistare }
19154857b08SSteve Sistare
vfio_legacy_cpr_unregister_container(VFIOContainer * container)19254857b08SSteve Sistare void vfio_legacy_cpr_unregister_container(VFIOContainer *container)
19354857b08SSteve Sistare {
19454857b08SSteve Sistare VFIOContainerBase *bcontainer = &container->bcontainer;
19554857b08SSteve Sistare
19654857b08SSteve Sistare migration_remove_notifier(&bcontainer->cpr_reboot_notifier);
19754857b08SSteve Sistare migrate_del_blocker(&container->cpr.blocker);
19854857b08SSteve Sistare vmstate_unregister(NULL, &vfio_container_vmstate, container);
199eba1f657SSteve Sistare migration_remove_notifier(&container->cpr.transfer_notifier);
200eba1f657SSteve Sistare }
201eba1f657SSteve Sistare
202eba1f657SSteve Sistare /*
203eba1f657SSteve Sistare * In old QEMU, VFIO_DMA_UNMAP_FLAG_VADDR may fail on some mapping after
204eba1f657SSteve Sistare * succeeding for others, so the latter have lost their vaddr. Call this
205eba1f657SSteve Sistare * to restore vaddr for a section with a giommu.
206eba1f657SSteve Sistare *
207eba1f657SSteve Sistare * The giommu already exists. Find it and replay it, which calls
208eba1f657SSteve Sistare * vfio_legacy_cpr_dma_map further down the stack.
209eba1f657SSteve Sistare */
vfio_cpr_giommu_remap(VFIOContainerBase * bcontainer,MemoryRegionSection * section)210eba1f657SSteve Sistare void vfio_cpr_giommu_remap(VFIOContainerBase *bcontainer,
211eba1f657SSteve Sistare MemoryRegionSection *section)
212eba1f657SSteve Sistare {
213eba1f657SSteve Sistare VFIOGuestIOMMU *giommu = NULL;
214eba1f657SSteve Sistare hwaddr as_offset = section->offset_within_address_space;
215eba1f657SSteve Sistare hwaddr iommu_offset = as_offset - section->offset_within_region;
216eba1f657SSteve Sistare
217eba1f657SSteve Sistare QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
218eba1f657SSteve Sistare if (giommu->iommu_mr == IOMMU_MEMORY_REGION(section->mr) &&
219eba1f657SSteve Sistare giommu->iommu_offset == iommu_offset) {
220eba1f657SSteve Sistare break;
221eba1f657SSteve Sistare }
222eba1f657SSteve Sistare }
223eba1f657SSteve Sistare g_assert(giommu);
224eba1f657SSteve Sistare memory_region_iommu_replay(giommu->iommu_mr, &giommu->n);
225eba1f657SSteve Sistare }
226eba1f657SSteve Sistare
227eba1f657SSteve Sistare /*
228eba1f657SSteve Sistare * In old QEMU, VFIO_DMA_UNMAP_FLAG_VADDR may fail on some mapping after
229eba1f657SSteve Sistare * succeeding for others, so the latter have lost their vaddr. Call this
230eba1f657SSteve Sistare * to restore vaddr for a section with a RamDiscardManager.
231eba1f657SSteve Sistare *
232eba1f657SSteve Sistare * The ram discard listener already exists. Call its populate function
233eba1f657SSteve Sistare * directly, which calls vfio_legacy_cpr_dma_map.
234eba1f657SSteve Sistare */
vfio_cpr_ram_discard_register_listener(VFIOContainerBase * bcontainer,MemoryRegionSection * section)235eba1f657SSteve Sistare bool vfio_cpr_ram_discard_register_listener(VFIOContainerBase *bcontainer,
236eba1f657SSteve Sistare MemoryRegionSection *section)
237eba1f657SSteve Sistare {
238eba1f657SSteve Sistare VFIORamDiscardListener *vrdl =
239eba1f657SSteve Sistare vfio_find_ram_discard_listener(bcontainer, section);
240eba1f657SSteve Sistare
241eba1f657SSteve Sistare g_assert(vrdl);
242eba1f657SSteve Sistare return vrdl->listener.notify_populate(&vrdl->listener, section) == 0;
24354857b08SSteve Sistare }
244c29a65edSSteve Sistare
vfio_cpr_group_get_device_fd(int d,const char * name)245c29a65edSSteve Sistare int vfio_cpr_group_get_device_fd(int d, const char *name)
246c29a65edSSteve Sistare {
247c29a65edSSteve Sistare const int id = 0;
248c29a65edSSteve Sistare int fd = cpr_find_fd(name, id);
249c29a65edSSteve Sistare
250c29a65edSSteve Sistare if (fd < 0) {
251c29a65edSSteve Sistare fd = ioctl(d, VFIO_GROUP_GET_DEVICE_FD, name);
252c29a65edSSteve Sistare if (fd >= 0) {
253c29a65edSSteve Sistare cpr_save_fd(name, id, fd);
254c29a65edSSteve Sistare }
255c29a65edSSteve Sistare }
256c29a65edSSteve Sistare return fd;
257c29a65edSSteve Sistare }
258c29a65edSSteve Sistare
same_device(int fd1,int fd2)259c29a65edSSteve Sistare static bool same_device(int fd1, int fd2)
260c29a65edSSteve Sistare {
261c29a65edSSteve Sistare struct stat st1, st2;
262c29a65edSSteve Sistare
263c29a65edSSteve Sistare return !fstat(fd1, &st1) && !fstat(fd2, &st2) && st1.st_dev == st2.st_dev;
264c29a65edSSteve Sistare }
265c29a65edSSteve Sistare
vfio_cpr_container_match(VFIOContainer * container,VFIOGroup * group,int fd)266c29a65edSSteve Sistare bool vfio_cpr_container_match(VFIOContainer *container, VFIOGroup *group,
267c29a65edSSteve Sistare int fd)
268c29a65edSSteve Sistare {
269c29a65edSSteve Sistare if (container->fd == fd) {
270c29a65edSSteve Sistare return true;
271c29a65edSSteve Sistare }
272c29a65edSSteve Sistare if (!same_device(container->fd, fd)) {
273c29a65edSSteve Sistare return false;
274c29a65edSSteve Sistare }
275c29a65edSSteve Sistare /*
276c29a65edSSteve Sistare * Same device, different fd. This occurs when the container fd is
277c29a65edSSteve Sistare * cpr_save'd multiple times, once for each groupid, so SCM_RIGHTS
278c29a65edSSteve Sistare * produces duplicates. De-dup it.
279c29a65edSSteve Sistare */
280c29a65edSSteve Sistare cpr_delete_fd("vfio_container_for_group", group->groupid);
281c29a65edSSteve Sistare close(fd);
282c29a65edSSteve Sistare cpr_save_fd("vfio_container_for_group", group->groupid, container->fd);
283c29a65edSSteve Sistare return true;
284c29a65edSSteve Sistare }
285