1318f67ceSAlexey Kardashevskiy /* 2318f67ceSAlexey Kardashevskiy * DMA memory preregistration 3318f67ceSAlexey Kardashevskiy * 4318f67ceSAlexey Kardashevskiy * Authors: 5318f67ceSAlexey Kardashevskiy * Alexey Kardashevskiy <aik@ozlabs.ru> 6318f67ceSAlexey Kardashevskiy * 7318f67ceSAlexey Kardashevskiy * This work is licensed under the terms of the GNU GPL, version 2. See 8318f67ceSAlexey Kardashevskiy * the COPYING file in the top-level directory. 9318f67ceSAlexey Kardashevskiy */ 10318f67ceSAlexey Kardashevskiy 11318f67ceSAlexey Kardashevskiy #include "qemu/osdep.h" 12318f67ceSAlexey Kardashevskiy #include "cpu.h" 13318f67ceSAlexey Kardashevskiy #include <sys/ioctl.h> 14318f67ceSAlexey Kardashevskiy #include <linux/vfio.h> 15318f67ceSAlexey Kardashevskiy 16318f67ceSAlexey Kardashevskiy #include "hw/vfio/vfio-common.h" 17318f67ceSAlexey Kardashevskiy #include "hw/hw.h" 18c26bc185SAlexey Kardashevskiy #include "exec/ram_addr.h" 19318f67ceSAlexey Kardashevskiy #include "qemu/error-report.h" 20d7d87836SEric Auger #include "qapi/error.h" 21318f67ceSAlexey Kardashevskiy #include "trace.h" 22318f67ceSAlexey Kardashevskiy 23318f67ceSAlexey Kardashevskiy static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section) 24318f67ceSAlexey Kardashevskiy { 25318f67ceSAlexey Kardashevskiy if (memory_region_is_iommu(section->mr)) { 26318f67ceSAlexey Kardashevskiy hw_error("Cannot possibly preregister IOMMU memory"); 27318f67ceSAlexey Kardashevskiy } 28318f67ceSAlexey Kardashevskiy 29318f67ceSAlexey Kardashevskiy return !memory_region_is_ram(section->mr) || 3021e00fa5SAlex Williamson memory_region_is_ram_device(section->mr); 31318f67ceSAlexey Kardashevskiy } 32318f67ceSAlexey Kardashevskiy 33318f67ceSAlexey Kardashevskiy static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa) 34318f67ceSAlexey Kardashevskiy { 35318f67ceSAlexey Kardashevskiy return memory_region_get_ram_ptr(section->mr) + 36318f67ceSAlexey Kardashevskiy section->offset_within_region + 37318f67ceSAlexey Kardashevskiy (gpa - section->offset_within_address_space); 38318f67ceSAlexey Kardashevskiy } 39318f67ceSAlexey Kardashevskiy 40318f67ceSAlexey Kardashevskiy static void vfio_prereg_listener_region_add(MemoryListener *listener, 41318f67ceSAlexey Kardashevskiy MemoryRegionSection *section) 42318f67ceSAlexey Kardashevskiy { 43318f67ceSAlexey Kardashevskiy VFIOContainer *container = container_of(listener, VFIOContainer, 44318f67ceSAlexey Kardashevskiy prereg_listener); 45318f67ceSAlexey Kardashevskiy const hwaddr gpa = section->offset_within_address_space; 46318f67ceSAlexey Kardashevskiy hwaddr end; 47318f67ceSAlexey Kardashevskiy int ret; 48318f67ceSAlexey Kardashevskiy hwaddr page_mask = qemu_real_host_page_mask; 49318f67ceSAlexey Kardashevskiy struct vfio_iommu_spapr_register_memory reg = { 50318f67ceSAlexey Kardashevskiy .argsz = sizeof(reg), 51318f67ceSAlexey Kardashevskiy .flags = 0, 52318f67ceSAlexey Kardashevskiy }; 53318f67ceSAlexey Kardashevskiy 54318f67ceSAlexey Kardashevskiy if (vfio_prereg_listener_skipped_section(section)) { 55318f67ceSAlexey Kardashevskiy trace_vfio_prereg_listener_region_add_skip( 56318f67ceSAlexey Kardashevskiy section->offset_within_address_space, 57318f67ceSAlexey Kardashevskiy section->offset_within_address_space + 58318f67ceSAlexey Kardashevskiy int128_get64(int128_sub(section->size, int128_one()))); 59318f67ceSAlexey Kardashevskiy return; 60318f67ceSAlexey Kardashevskiy } 61318f67ceSAlexey Kardashevskiy 62318f67ceSAlexey Kardashevskiy if (unlikely((section->offset_within_address_space & ~page_mask) || 63318f67ceSAlexey Kardashevskiy (section->offset_within_region & ~page_mask) || 64318f67ceSAlexey Kardashevskiy (int128_get64(section->size) & ~page_mask))) { 65318f67ceSAlexey Kardashevskiy error_report("%s received unaligned region", __func__); 66318f67ceSAlexey Kardashevskiy return; 67318f67ceSAlexey Kardashevskiy } 68318f67ceSAlexey Kardashevskiy 69318f67ceSAlexey Kardashevskiy end = section->offset_within_address_space + int128_get64(section->size); 70318f67ceSAlexey Kardashevskiy if (gpa >= end) { 71318f67ceSAlexey Kardashevskiy return; 72318f67ceSAlexey Kardashevskiy } 73318f67ceSAlexey Kardashevskiy 74318f67ceSAlexey Kardashevskiy memory_region_ref(section->mr); 75318f67ceSAlexey Kardashevskiy 76318f67ceSAlexey Kardashevskiy reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa); 77318f67ceSAlexey Kardashevskiy reg.size = end - gpa; 78318f67ceSAlexey Kardashevskiy 79318f67ceSAlexey Kardashevskiy ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®); 80318f67ceSAlexey Kardashevskiy trace_vfio_prereg_register(reg.vaddr, reg.size, ret ? -errno : 0); 81318f67ceSAlexey Kardashevskiy if (ret) { 82318f67ceSAlexey Kardashevskiy /* 83318f67ceSAlexey Kardashevskiy * On the initfn path, store the first error in the container so we 84318f67ceSAlexey Kardashevskiy * can gracefully fail. Runtime, there's not much we can do other 85318f67ceSAlexey Kardashevskiy * than throw a hardware error. 86318f67ceSAlexey Kardashevskiy */ 87318f67ceSAlexey Kardashevskiy if (!container->initialized) { 88318f67ceSAlexey Kardashevskiy if (!container->error) { 89d7d87836SEric Auger error_setg_errno(&container->error, -ret, 90d7d87836SEric Auger "Memory registering failed"); 91318f67ceSAlexey Kardashevskiy } 92318f67ceSAlexey Kardashevskiy } else { 93318f67ceSAlexey Kardashevskiy hw_error("vfio: Memory registering failed, unable to continue"); 94318f67ceSAlexey Kardashevskiy } 95318f67ceSAlexey Kardashevskiy } 96318f67ceSAlexey Kardashevskiy } 97318f67ceSAlexey Kardashevskiy 98318f67ceSAlexey Kardashevskiy static void vfio_prereg_listener_region_del(MemoryListener *listener, 99318f67ceSAlexey Kardashevskiy MemoryRegionSection *section) 100318f67ceSAlexey Kardashevskiy { 101318f67ceSAlexey Kardashevskiy VFIOContainer *container = container_of(listener, VFIOContainer, 102318f67ceSAlexey Kardashevskiy prereg_listener); 103318f67ceSAlexey Kardashevskiy const hwaddr gpa = section->offset_within_address_space; 104318f67ceSAlexey Kardashevskiy hwaddr end; 105318f67ceSAlexey Kardashevskiy int ret; 106318f67ceSAlexey Kardashevskiy hwaddr page_mask = qemu_real_host_page_mask; 107318f67ceSAlexey Kardashevskiy struct vfio_iommu_spapr_register_memory reg = { 108318f67ceSAlexey Kardashevskiy .argsz = sizeof(reg), 109318f67ceSAlexey Kardashevskiy .flags = 0, 110318f67ceSAlexey Kardashevskiy }; 111318f67ceSAlexey Kardashevskiy 112318f67ceSAlexey Kardashevskiy if (vfio_prereg_listener_skipped_section(section)) { 113318f67ceSAlexey Kardashevskiy trace_vfio_prereg_listener_region_del_skip( 114318f67ceSAlexey Kardashevskiy section->offset_within_address_space, 115318f67ceSAlexey Kardashevskiy section->offset_within_address_space + 116318f67ceSAlexey Kardashevskiy int128_get64(int128_sub(section->size, int128_one()))); 117318f67ceSAlexey Kardashevskiy return; 118318f67ceSAlexey Kardashevskiy } 119318f67ceSAlexey Kardashevskiy 120318f67ceSAlexey Kardashevskiy if (unlikely((section->offset_within_address_space & ~page_mask) || 121318f67ceSAlexey Kardashevskiy (section->offset_within_region & ~page_mask) || 122318f67ceSAlexey Kardashevskiy (int128_get64(section->size) & ~page_mask))) { 123318f67ceSAlexey Kardashevskiy error_report("%s received unaligned region", __func__); 124318f67ceSAlexey Kardashevskiy return; 125318f67ceSAlexey Kardashevskiy } 126318f67ceSAlexey Kardashevskiy 127318f67ceSAlexey Kardashevskiy end = section->offset_within_address_space + int128_get64(section->size); 128318f67ceSAlexey Kardashevskiy if (gpa >= end) { 129318f67ceSAlexey Kardashevskiy return; 130318f67ceSAlexey Kardashevskiy } 131318f67ceSAlexey Kardashevskiy 132318f67ceSAlexey Kardashevskiy reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa); 133318f67ceSAlexey Kardashevskiy reg.size = end - gpa; 134318f67ceSAlexey Kardashevskiy 135318f67ceSAlexey Kardashevskiy ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, ®); 136318f67ceSAlexey Kardashevskiy trace_vfio_prereg_unregister(reg.vaddr, reg.size, ret ? -errno : 0); 137318f67ceSAlexey Kardashevskiy } 138318f67ceSAlexey Kardashevskiy 139318f67ceSAlexey Kardashevskiy const MemoryListener vfio_prereg_listener = { 140318f67ceSAlexey Kardashevskiy .region_add = vfio_prereg_listener_region_add, 141318f67ceSAlexey Kardashevskiy .region_del = vfio_prereg_listener_region_del, 142318f67ceSAlexey Kardashevskiy }; 1432e4109deSAlexey Kardashevskiy 1442e4109deSAlexey Kardashevskiy int vfio_spapr_create_window(VFIOContainer *container, 1452e4109deSAlexey Kardashevskiy MemoryRegionSection *section, 1462e4109deSAlexey Kardashevskiy hwaddr *pgsize) 1472e4109deSAlexey Kardashevskiy { 14816107998SAlexey Kardashevskiy int ret = 0; 1493df9d748SAlexey Kardashevskiy IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); 150*79178eddSAlexey Kardashevskiy uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr), pgmask; 15116107998SAlexey Kardashevskiy unsigned entries, bits_total, bits_per_level, max_levels; 1522e4109deSAlexey Kardashevskiy struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) }; 153905b7ee4SDavid Hildenbrand long rampagesize = qemu_minrampagesize(); 154c26bc185SAlexey Kardashevskiy 155c26bc185SAlexey Kardashevskiy /* 156c26bc185SAlexey Kardashevskiy * The host might not support the guest supported IOMMU page size, 157c26bc185SAlexey Kardashevskiy * so we will use smaller physical IOMMU pages to back them. 158c26bc185SAlexey Kardashevskiy */ 1593cdd801bSAlexey Kardashevskiy if (pagesize > rampagesize) { 1603cdd801bSAlexey Kardashevskiy pagesize = rampagesize; 161c26bc185SAlexey Kardashevskiy } 162*79178eddSAlexey Kardashevskiy pgmask = container->pgsizes & (pagesize | (pagesize - 1)); 163*79178eddSAlexey Kardashevskiy pagesize = pgmask ? (1ULL << (63 - clz64(pgmask))) : 0; 164c26bc185SAlexey Kardashevskiy if (!pagesize) { 165c26bc185SAlexey Kardashevskiy error_report("Host doesn't support page size 0x%"PRIx64 166c26bc185SAlexey Kardashevskiy ", the supported mask is 0x%lx", 167c26bc185SAlexey Kardashevskiy memory_region_iommu_get_min_page_size(iommu_mr), 168c26bc185SAlexey Kardashevskiy container->pgsizes); 169c26bc185SAlexey Kardashevskiy return -EINVAL; 170c26bc185SAlexey Kardashevskiy } 1712e4109deSAlexey Kardashevskiy 1722e4109deSAlexey Kardashevskiy /* 1732e4109deSAlexey Kardashevskiy * FIXME: For VFIO iommu types which have KVM acceleration to 1742e4109deSAlexey Kardashevskiy * avoid bouncing all map/unmaps through qemu this way, this 1752e4109deSAlexey Kardashevskiy * would be the right place to wire that up (tell the KVM 1762e4109deSAlexey Kardashevskiy * device emulation the VFIO iommu handles to use). 1772e4109deSAlexey Kardashevskiy */ 1782e4109deSAlexey Kardashevskiy create.window_size = int128_get64(section->size); 1792e4109deSAlexey Kardashevskiy create.page_shift = ctz64(pagesize); 1802e4109deSAlexey Kardashevskiy /* 18116107998SAlexey Kardashevskiy * SPAPR host supports multilevel TCE tables. We try to guess optimal 18216107998SAlexey Kardashevskiy * levels number and if this fails (for example due to the host memory 18316107998SAlexey Kardashevskiy * fragmentation), we increase levels. The DMA address structure is: 18416107998SAlexey Kardashevskiy * rrrrrrrr rxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx iiiiiiii 18516107998SAlexey Kardashevskiy * where: 18616107998SAlexey Kardashevskiy * r = reserved (bits >= 55 are reserved in the existing hardware) 18716107998SAlexey Kardashevskiy * i = IOMMU page offset (64K in this example) 18816107998SAlexey Kardashevskiy * x = bits to index a TCE which can be split to equal chunks to index 18916107998SAlexey Kardashevskiy * within the level. 19016107998SAlexey Kardashevskiy * The aim is to split "x" to smaller possible number of levels. 1912e4109deSAlexey Kardashevskiy */ 1922e4109deSAlexey Kardashevskiy entries = create.window_size >> create.page_shift; 19316107998SAlexey Kardashevskiy /* bits_total is number of "x" needed */ 19416107998SAlexey Kardashevskiy bits_total = ctz64(entries * sizeof(uint64_t)); 19516107998SAlexey Kardashevskiy /* 19616107998SAlexey Kardashevskiy * bits_per_level is a safe guess of how much we can allocate per level: 19716107998SAlexey Kardashevskiy * 8 is the current minimum for CONFIG_FORCE_MAX_ZONEORDER and MAX_ORDER 19816107998SAlexey Kardashevskiy * is usually bigger than that. 199038adc2fSWei Yang * Below we look at qemu_real_host_page_size as TCEs are allocated from 200038adc2fSWei Yang * system pages. 20116107998SAlexey Kardashevskiy */ 202038adc2fSWei Yang bits_per_level = ctz64(qemu_real_host_page_size) + 8; 20316107998SAlexey Kardashevskiy create.levels = bits_total / bits_per_level; 20416107998SAlexey Kardashevskiy if (bits_total % bits_per_level) { 20516107998SAlexey Kardashevskiy ++create.levels; 20616107998SAlexey Kardashevskiy } 207038adc2fSWei Yang max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size); 20816107998SAlexey Kardashevskiy for ( ; create.levels <= max_levels; ++create.levels) { 2092e4109deSAlexey Kardashevskiy ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); 21016107998SAlexey Kardashevskiy if (!ret) { 21116107998SAlexey Kardashevskiy break; 21216107998SAlexey Kardashevskiy } 21316107998SAlexey Kardashevskiy } 2142e4109deSAlexey Kardashevskiy if (ret) { 2152e4109deSAlexey Kardashevskiy error_report("Failed to create a window, ret = %d (%m)", ret); 2162e4109deSAlexey Kardashevskiy return -errno; 2172e4109deSAlexey Kardashevskiy } 2182e4109deSAlexey Kardashevskiy 2192e4109deSAlexey Kardashevskiy if (create.start_addr != section->offset_within_address_space) { 2202e4109deSAlexey Kardashevskiy vfio_spapr_remove_window(container, create.start_addr); 2212e4109deSAlexey Kardashevskiy 2222e4109deSAlexey Kardashevskiy error_report("Host doesn't support DMA window at %"HWADDR_PRIx", must be %"PRIx64, 2232e4109deSAlexey Kardashevskiy section->offset_within_address_space, 2242e4109deSAlexey Kardashevskiy (uint64_t)create.start_addr); 2252e4109deSAlexey Kardashevskiy return -EINVAL; 2262e4109deSAlexey Kardashevskiy } 2272e4109deSAlexey Kardashevskiy trace_vfio_spapr_create_window(create.page_shift, 22816107998SAlexey Kardashevskiy create.levels, 2292e4109deSAlexey Kardashevskiy create.window_size, 2302e4109deSAlexey Kardashevskiy create.start_addr); 2312e4109deSAlexey Kardashevskiy *pgsize = pagesize; 2322e4109deSAlexey Kardashevskiy 2332e4109deSAlexey Kardashevskiy return 0; 2342e4109deSAlexey Kardashevskiy } 2352e4109deSAlexey Kardashevskiy 2362e4109deSAlexey Kardashevskiy int vfio_spapr_remove_window(VFIOContainer *container, 2372e4109deSAlexey Kardashevskiy hwaddr offset_within_address_space) 2382e4109deSAlexey Kardashevskiy { 2392e4109deSAlexey Kardashevskiy struct vfio_iommu_spapr_tce_remove remove = { 2402e4109deSAlexey Kardashevskiy .argsz = sizeof(remove), 2412e4109deSAlexey Kardashevskiy .start_addr = offset_within_address_space, 2422e4109deSAlexey Kardashevskiy }; 2432e4109deSAlexey Kardashevskiy int ret; 2442e4109deSAlexey Kardashevskiy 2452e4109deSAlexey Kardashevskiy ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove); 2462e4109deSAlexey Kardashevskiy if (ret) { 2472e4109deSAlexey Kardashevskiy error_report("Failed to remove window at %"PRIx64, 2482e4109deSAlexey Kardashevskiy (uint64_t)remove.start_addr); 2492e4109deSAlexey Kardashevskiy return -errno; 2502e4109deSAlexey Kardashevskiy } 2512e4109deSAlexey Kardashevskiy 2522e4109deSAlexey Kardashevskiy trace_vfio_spapr_remove_window(offset_within_address_space); 2532e4109deSAlexey Kardashevskiy 2542e4109deSAlexey Kardashevskiy return 0; 2552e4109deSAlexey Kardashevskiy } 256