1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * VFIO API definition 4 * 5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved. 6 * Author: Alex Williamson <alex.williamson@redhat.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 #ifndef VFIO_H 13 #define VFIO_H 14 15 #include <linux/types.h> 16 #include <linux/ioctl.h> 17 18 #define VFIO_API_VERSION 0 19 20 21 /* Kernel & User level defines for VFIO IOCTLs. */ 22 23 /* Extensions */ 24 25 #define VFIO_TYPE1_IOMMU 1 26 #define VFIO_SPAPR_TCE_IOMMU 2 27 #define VFIO_TYPE1v2_IOMMU 3 28 /* 29 * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping). This 30 * capability is subject to change as groups are added or removed. 31 */ 32 #define VFIO_DMA_CC_IOMMU 4 33 34 /* Check if EEH is supported */ 35 #define VFIO_EEH 5 36 37 /* Two-stage IOMMU */ 38 #define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */ 39 40 #define VFIO_SPAPR_TCE_v2_IOMMU 7 41 42 /* 43 * The No-IOMMU IOMMU offers no translation or isolation for devices and 44 * supports no ioctls outside of VFIO_CHECK_EXTENSION. Use of VFIO's No-IOMMU 45 * code will taint the host kernel and should be used with extreme caution. 46 */ 47 #define VFIO_NOIOMMU_IOMMU 8 48 49 /* 50 * The IOCTL interface is designed for extensibility by embedding the 51 * structure length (argsz) and flags into structures passed between 52 * kernel and userspace. We therefore use the _IO() macro for these 53 * defines to avoid implicitly embedding a size into the ioctl request. 54 * As structure fields are added, argsz will increase to match and flag 55 * bits will be defined to indicate additional fields with valid data. 56 * It's *always* the caller's responsibility to indicate the size of 57 * the structure passed by setting argsz appropriately. 58 */ 59 60 #define VFIO_TYPE (';') 61 #define VFIO_BASE 100 62 63 /* 64 * For extension of INFO ioctls, VFIO makes use of a capability chain 65 * designed after PCI/e capabilities. A flag bit indicates whether 66 * this capability chain is supported and a field defined in the fixed 67 * structure defines the offset of the first capability in the chain. 68 * This field is only valid when the corresponding bit in the flags 69 * bitmap is set. This offset field is relative to the start of the 70 * INFO buffer, as is the next field within each capability header. 71 * The id within the header is a shared address space per INFO ioctl, 72 * while the version field is specific to the capability id. The 73 * contents following the header are specific to the capability id. 74 */ 75 struct vfio_info_cap_header { 76 __u16 id; /* Identifies capability */ 77 __u16 version; /* Version specific to the capability ID */ 78 __u32 next; /* Offset of next capability */ 79 }; 80 81 /* 82 * Callers of INFO ioctls passing insufficiently sized buffers will see 83 * the capability chain flag bit set, a zero value for the first capability 84 * offset (if available within the provided argsz), and argsz will be 85 * updated to report the necessary buffer size. For compatibility, the 86 * INFO ioctl will not report error in this case, but the capability chain 87 * will not be available. 88 */ 89 90 /* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */ 91 92 /** 93 * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0) 94 * 95 * Report the version of the VFIO API. This allows us to bump the entire 96 * API version should we later need to add or change features in incompatible 97 * ways. 98 * Return: VFIO_API_VERSION 99 * Availability: Always 100 */ 101 #define VFIO_GET_API_VERSION _IO(VFIO_TYPE, VFIO_BASE + 0) 102 103 /** 104 * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32) 105 * 106 * Check whether an extension is supported. 107 * Return: 0 if not supported, 1 (or some other positive integer) if supported. 108 * Availability: Always 109 */ 110 #define VFIO_CHECK_EXTENSION _IO(VFIO_TYPE, VFIO_BASE + 1) 111 112 /** 113 * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32) 114 * 115 * Set the iommu to the given type. The type must be supported by an 116 * iommu driver as verified by calling CHECK_EXTENSION using the same 117 * type. A group must be set to this file descriptor before this 118 * ioctl is available. The IOMMU interfaces enabled by this call are 119 * specific to the value set. 120 * Return: 0 on success, -errno on failure 121 * Availability: When VFIO group attached 122 */ 123 #define VFIO_SET_IOMMU _IO(VFIO_TYPE, VFIO_BASE + 2) 124 125 /* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */ 126 127 /** 128 * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3, 129 * struct vfio_group_status) 130 * 131 * Retrieve information about the group. Fills in provided 132 * struct vfio_group_info. Caller sets argsz. 133 * Return: 0 on succes, -errno on failure. 134 * Availability: Always 135 */ 136 struct vfio_group_status { 137 __u32 argsz; 138 __u32 flags; 139 #define VFIO_GROUP_FLAGS_VIABLE (1 << 0) 140 #define VFIO_GROUP_FLAGS_CONTAINER_SET (1 << 1) 141 }; 142 #define VFIO_GROUP_GET_STATUS _IO(VFIO_TYPE, VFIO_BASE + 3) 143 144 /** 145 * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32) 146 * 147 * Set the container for the VFIO group to the open VFIO file 148 * descriptor provided. Groups may only belong to a single 149 * container. Containers may, at their discretion, support multiple 150 * groups. Only when a container is set are all of the interfaces 151 * of the VFIO file descriptor and the VFIO group file descriptor 152 * available to the user. 153 * Return: 0 on success, -errno on failure. 154 * Availability: Always 155 */ 156 #define VFIO_GROUP_SET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 4) 157 158 /** 159 * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5) 160 * 161 * Remove the group from the attached container. This is the 162 * opposite of the SET_CONTAINER call and returns the group to 163 * an initial state. All device file descriptors must be released 164 * prior to calling this interface. When removing the last group 165 * from a container, the IOMMU will be disabled and all state lost, 166 * effectively also returning the VFIO file descriptor to an initial 167 * state. 168 * Return: 0 on success, -errno on failure. 169 * Availability: When attached to container 170 */ 171 #define VFIO_GROUP_UNSET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 5) 172 173 /** 174 * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char) 175 * 176 * Return a new file descriptor for the device object described by 177 * the provided string. The string should match a device listed in 178 * the devices subdirectory of the IOMMU group sysfs entry. The 179 * group containing the device must already be added to this context. 180 * Return: new file descriptor on success, -errno on failure. 181 * Availability: When attached to container 182 */ 183 #define VFIO_GROUP_GET_DEVICE_FD _IO(VFIO_TYPE, VFIO_BASE + 6) 184 185 /* --------------- IOCTLs for DEVICE file descriptors --------------- */ 186 187 /** 188 * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7, 189 * struct vfio_device_info) 190 * 191 * Retrieve information about the device. Fills in provided 192 * struct vfio_device_info. Caller sets argsz. 193 * Return: 0 on success, -errno on failure. 194 */ 195 struct vfio_device_info { 196 __u32 argsz; 197 __u32 flags; 198 #define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */ 199 #define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */ 200 #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */ 201 #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */ 202 #define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */ 203 #define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */ 204 __u32 num_regions; /* Max region index + 1 */ 205 __u32 num_irqs; /* Max IRQ index + 1 */ 206 }; 207 #define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7) 208 209 /* 210 * Vendor driver using Mediated device framework should provide device_api 211 * attribute in supported type attribute groups. Device API string should be one 212 * of the following corresponding to device flags in vfio_device_info structure. 213 */ 214 215 #define VFIO_DEVICE_API_PCI_STRING "vfio-pci" 216 #define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform" 217 #define VFIO_DEVICE_API_AMBA_STRING "vfio-amba" 218 #define VFIO_DEVICE_API_CCW_STRING "vfio-ccw" 219 #define VFIO_DEVICE_API_AP_STRING "vfio-ap" 220 221 /** 222 * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8, 223 * struct vfio_region_info) 224 * 225 * Retrieve information about a device region. Caller provides 226 * struct vfio_region_info with index value set. Caller sets argsz. 227 * Implementation of region mapping is bus driver specific. This is 228 * intended to describe MMIO, I/O port, as well as bus specific 229 * regions (ex. PCI config space). Zero sized regions may be used 230 * to describe unimplemented regions (ex. unimplemented PCI BARs). 231 * Return: 0 on success, -errno on failure. 232 */ 233 struct vfio_region_info { 234 __u32 argsz; 235 __u32 flags; 236 #define VFIO_REGION_INFO_FLAG_READ (1 << 0) /* Region supports read */ 237 #define VFIO_REGION_INFO_FLAG_WRITE (1 << 1) /* Region supports write */ 238 #define VFIO_REGION_INFO_FLAG_MMAP (1 << 2) /* Region supports mmap */ 239 #define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */ 240 __u32 index; /* Region index */ 241 __u32 cap_offset; /* Offset within info struct of first cap */ 242 __u64 size; /* Region size (bytes) */ 243 __u64 offset; /* Region offset from start of device fd */ 244 }; 245 #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8) 246 247 /* 248 * The sparse mmap capability allows finer granularity of specifying areas 249 * within a region with mmap support. When specified, the user should only 250 * mmap the offset ranges specified by the areas array. mmaps outside of the 251 * areas specified may fail (such as the range covering a PCI MSI-X table) or 252 * may result in improper device behavior. 253 * 254 * The structures below define version 1 of this capability. 255 */ 256 #define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1 257 258 struct vfio_region_sparse_mmap_area { 259 __u64 offset; /* Offset of mmap'able area within region */ 260 __u64 size; /* Size of mmap'able area */ 261 }; 262 263 struct vfio_region_info_cap_sparse_mmap { 264 struct vfio_info_cap_header header; 265 __u32 nr_areas; 266 __u32 reserved; 267 struct vfio_region_sparse_mmap_area areas[]; 268 }; 269 270 /* 271 * The device specific type capability allows regions unique to a specific 272 * device or class of devices to be exposed. This helps solve the problem for 273 * vfio bus drivers of defining which region indexes correspond to which region 274 * on the device, without needing to resort to static indexes, as done by 275 * vfio-pci. For instance, if we were to go back in time, we might remove 276 * VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes 277 * greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd 278 * make a "VGA" device specific type to describe the VGA access space. This 279 * means that non-VGA devices wouldn't need to waste this index, and thus the 280 * address space associated with it due to implementation of device file 281 * descriptor offsets in vfio-pci. 282 * 283 * The current implementation is now part of the user ABI, so we can't use this 284 * for VGA, but there are other upcoming use cases, such as opregions for Intel 285 * IGD devices and framebuffers for vGPU devices. We missed VGA, but we'll 286 * use this for future additions. 287 * 288 * The structure below defines version 1 of this capability. 289 */ 290 #define VFIO_REGION_INFO_CAP_TYPE 2 291 292 struct vfio_region_info_cap_type { 293 struct vfio_info_cap_header header; 294 __u32 type; /* global per bus driver */ 295 __u32 subtype; /* type specific */ 296 }; 297 298 /* 299 * List of region types, global per bus driver. 300 * If you introduce a new type, please add it here. 301 */ 302 303 /* PCI region type containing a PCI vendor part */ 304 #define VFIO_REGION_TYPE_PCI_VENDOR_TYPE (1 << 31) 305 #define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff) 306 #define VFIO_REGION_TYPE_GFX (1) 307 #define VFIO_REGION_TYPE_CCW (2) 308 #define VFIO_REGION_TYPE_MIGRATION (3) 309 310 /* sub-types for VFIO_REGION_TYPE_PCI_* */ 311 312 /* 8086 vendor PCI sub-types */ 313 #define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION (1) 314 #define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2) 315 #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3) 316 317 /* 10de vendor PCI sub-types */ 318 /* 319 * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space. 320 */ 321 #define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1) 322 323 /* 1014 vendor PCI sub-types */ 324 /* 325 * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU 326 * to do TLB invalidation on a GPU. 327 */ 328 #define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1) 329 330 /* sub-types for VFIO_REGION_TYPE_GFX */ 331 #define VFIO_REGION_SUBTYPE_GFX_EDID (1) 332 333 /** 334 * struct vfio_region_gfx_edid - EDID region layout. 335 * 336 * Set display link state and EDID blob. 337 * 338 * The EDID blob has monitor information such as brand, name, serial 339 * number, physical size, supported video modes and more. 340 * 341 * This special region allows userspace (typically qemu) set a virtual 342 * EDID for the virtual monitor, which allows a flexible display 343 * configuration. 344 * 345 * For the edid blob spec look here: 346 * https://en.wikipedia.org/wiki/Extended_Display_Identification_Data 347 * 348 * On linux systems you can find the EDID blob in sysfs: 349 * /sys/class/drm/${card}/${connector}/edid 350 * 351 * You can use the edid-decode ulility (comes with xorg-x11-utils) to 352 * decode the EDID blob. 353 * 354 * @edid_offset: location of the edid blob, relative to the 355 * start of the region (readonly). 356 * @edid_max_size: max size of the edid blob (readonly). 357 * @edid_size: actual edid size (read/write). 358 * @link_state: display link state (read/write). 359 * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on. 360 * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off. 361 * @max_xres: max display width (0 == no limitation, readonly). 362 * @max_yres: max display height (0 == no limitation, readonly). 363 * 364 * EDID update protocol: 365 * (1) set link-state to down. 366 * (2) update edid blob and size. 367 * (3) set link-state to up. 368 */ 369 struct vfio_region_gfx_edid { 370 __u32 edid_offset; 371 __u32 edid_max_size; 372 __u32 edid_size; 373 __u32 max_xres; 374 __u32 max_yres; 375 __u32 link_state; 376 #define VFIO_DEVICE_GFX_LINK_STATE_UP 1 377 #define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2 378 }; 379 380 /* sub-types for VFIO_REGION_TYPE_CCW */ 381 #define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1) 382 #define VFIO_REGION_SUBTYPE_CCW_SCHIB (2) 383 #define VFIO_REGION_SUBTYPE_CCW_CRW (3) 384 385 /* sub-types for VFIO_REGION_TYPE_MIGRATION */ 386 #define VFIO_REGION_SUBTYPE_MIGRATION (1) 387 388 /* 389 * The structure vfio_device_migration_info is placed at the 0th offset of 390 * the VFIO_REGION_SUBTYPE_MIGRATION region to get and set VFIO device related 391 * migration information. Field accesses from this structure are only supported 392 * at their native width and alignment. Otherwise, the result is undefined and 393 * vendor drivers should return an error. 394 * 395 * device_state: (read/write) 396 * - The user application writes to this field to inform the vendor driver 397 * about the device state to be transitioned to. 398 * - The vendor driver should take the necessary actions to change the 399 * device state. After successful transition to a given state, the 400 * vendor driver should return success on write(device_state, state) 401 * system call. If the device state transition fails, the vendor driver 402 * should return an appropriate -errno for the fault condition. 403 * - On the user application side, if the device state transition fails, 404 * that is, if write(device_state, state) returns an error, read 405 * device_state again to determine the current state of the device from 406 * the vendor driver. 407 * - The vendor driver should return previous state of the device unless 408 * the vendor driver has encountered an internal error, in which case 409 * the vendor driver may report the device_state VFIO_DEVICE_STATE_ERROR. 410 * - The user application must use the device reset ioctl to recover the 411 * device from VFIO_DEVICE_STATE_ERROR state. If the device is 412 * indicated to be in a valid device state by reading device_state, the 413 * user application may attempt to transition the device to any valid 414 * state reachable from the current state or terminate itself. 415 * 416 * device_state consists of 3 bits: 417 * - If bit 0 is set, it indicates the _RUNNING state. If bit 0 is clear, 418 * it indicates the _STOP state. When the device state is changed to 419 * _STOP, driver should stop the device before write() returns. 420 * - If bit 1 is set, it indicates the _SAVING state, which means that the 421 * driver should start gathering device state information that will be 422 * provided to the VFIO user application to save the device's state. 423 * - If bit 2 is set, it indicates the _RESUMING state, which means that 424 * the driver should prepare to resume the device. Data provided through 425 * the migration region should be used to resume the device. 426 * Bits 3 - 31 are reserved for future use. To preserve them, the user 427 * application should perform a read-modify-write operation on this 428 * field when modifying the specified bits. 429 * 430 * +------- _RESUMING 431 * |+------ _SAVING 432 * ||+----- _RUNNING 433 * ||| 434 * 000b => Device Stopped, not saving or resuming 435 * 001b => Device running, which is the default state 436 * 010b => Stop the device & save the device state, stop-and-copy state 437 * 011b => Device running and save the device state, pre-copy state 438 * 100b => Device stopped and the device state is resuming 439 * 101b => Invalid state 440 * 110b => Error state 441 * 111b => Invalid state 442 * 443 * State transitions: 444 * 445 * _RESUMING _RUNNING Pre-copy Stop-and-copy _STOP 446 * (100b) (001b) (011b) (010b) (000b) 447 * 0. Running or default state 448 * | 449 * 450 * 1. Normal Shutdown (optional) 451 * |------------------------------------->| 452 * 453 * 2. Save the state or suspend 454 * |------------------------->|---------->| 455 * 456 * 3. Save the state during live migration 457 * |----------->|------------>|---------->| 458 * 459 * 4. Resuming 460 * |<---------| 461 * 462 * 5. Resumed 463 * |--------->| 464 * 465 * 0. Default state of VFIO device is _RUNNNG when the user application starts. 466 * 1. During normal shutdown of the user application, the user application may 467 * optionally change the VFIO device state from _RUNNING to _STOP. This 468 * transition is optional. The vendor driver must support this transition but 469 * must not require it. 470 * 2. When the user application saves state or suspends the application, the 471 * device state transitions from _RUNNING to stop-and-copy and then to _STOP. 472 * On state transition from _RUNNING to stop-and-copy, driver must stop the 473 * device, save the device state and send it to the application through the 474 * migration region. The sequence to be followed for such transition is given 475 * below. 476 * 3. In live migration of user application, the state transitions from _RUNNING 477 * to pre-copy, to stop-and-copy, and to _STOP. 478 * On state transition from _RUNNING to pre-copy, the driver should start 479 * gathering the device state while the application is still running and send 480 * the device state data to application through the migration region. 481 * On state transition from pre-copy to stop-and-copy, the driver must stop 482 * the device, save the device state and send it to the user application 483 * through the migration region. 484 * Vendor drivers must support the pre-copy state even for implementations 485 * where no data is provided to the user before the stop-and-copy state. The 486 * user must not be required to consume all migration data before the device 487 * transitions to a new state, including the stop-and-copy state. 488 * The sequence to be followed for above two transitions is given below. 489 * 4. To start the resuming phase, the device state should be transitioned from 490 * the _RUNNING to the _RESUMING state. 491 * In the _RESUMING state, the driver should use the device state data 492 * received through the migration region to resume the device. 493 * 5. After providing saved device data to the driver, the application should 494 * change the state from _RESUMING to _RUNNING. 495 * 496 * reserved: 497 * Reads on this field return zero and writes are ignored. 498 * 499 * pending_bytes: (read only) 500 * The number of pending bytes still to be migrated from the vendor driver. 501 * 502 * data_offset: (read only) 503 * The user application should read data_offset field from the migration 504 * region. The user application should read the device data from this 505 * offset within the migration region during the _SAVING state or write 506 * the device data during the _RESUMING state. See below for details of 507 * sequence to be followed. 508 * 509 * data_size: (read/write) 510 * The user application should read data_size to get the size in bytes of 511 * the data copied in the migration region during the _SAVING state and 512 * write the size in bytes of the data copied in the migration region 513 * during the _RESUMING state. 514 * 515 * The format of the migration region is as follows: 516 * ------------------------------------------------------------------ 517 * |vfio_device_migration_info| data section | 518 * | | /////////////////////////////// | 519 * ------------------------------------------------------------------ 520 * ^ ^ 521 * offset 0-trapped part data_offset 522 * 523 * The structure vfio_device_migration_info is always followed by the data 524 * section in the region, so data_offset will always be nonzero. The offset 525 * from where the data is copied is decided by the kernel driver. The data 526 * section can be trapped, mmapped, or partitioned, depending on how the kernel 527 * driver defines the data section. The data section partition can be defined 528 * as mapped by the sparse mmap capability. If mmapped, data_offset must be 529 * page aligned, whereas initial section which contains the 530 * vfio_device_migration_info structure, might not end at the offset, which is 531 * page aligned. The user is not required to access through mmap regardless 532 * of the capabilities of the region mmap. 533 * The vendor driver should determine whether and how to partition the data 534 * section. The vendor driver should return data_offset accordingly. 535 * 536 * The sequence to be followed while in pre-copy state and stop-and-copy state 537 * is as follows: 538 * a. Read pending_bytes, indicating the start of a new iteration to get device 539 * data. Repeated read on pending_bytes at this stage should have no side 540 * effects. 541 * If pending_bytes == 0, the user application should not iterate to get data 542 * for that device. 543 * If pending_bytes > 0, perform the following steps. 544 * b. Read data_offset, indicating that the vendor driver should make data 545 * available through the data section. The vendor driver should return this 546 * read operation only after data is available from (region + data_offset) 547 * to (region + data_offset + data_size). 548 * c. Read data_size, which is the amount of data in bytes available through 549 * the migration region. 550 * Read on data_offset and data_size should return the offset and size of 551 * the current buffer if the user application reads data_offset and 552 * data_size more than once here. 553 * d. Read data_size bytes of data from (region + data_offset) from the 554 * migration region. 555 * e. Process the data. 556 * f. Read pending_bytes, which indicates that the data from the previous 557 * iteration has been read. If pending_bytes > 0, go to step b. 558 * 559 * The user application can transition from the _SAVING|_RUNNING 560 * (pre-copy state) to the _SAVING (stop-and-copy) state regardless of the 561 * number of pending bytes. The user application should iterate in _SAVING 562 * (stop-and-copy) until pending_bytes is 0. 563 * 564 * The sequence to be followed while _RESUMING device state is as follows: 565 * While data for this device is available, repeat the following steps: 566 * a. Read data_offset from where the user application should write data. 567 * b. Write migration data starting at the migration region + data_offset for 568 * the length determined by data_size from the migration source. 569 * c. Write data_size, which indicates to the vendor driver that data is 570 * written in the migration region. Vendor driver must return this write 571 * operations on consuming data. Vendor driver should apply the 572 * user-provided migration region data to the device resume state. 573 * 574 * If an error occurs during the above sequences, the vendor driver can return 575 * an error code for next read() or write() operation, which will terminate the 576 * loop. The user application should then take the next necessary action, for 577 * example, failing migration or terminating the user application. 578 * 579 * For the user application, data is opaque. The user application should write 580 * data in the same order as the data is received and the data should be of 581 * same transaction size at the source. 582 */ 583 584 struct vfio_device_migration_info { 585 __u32 device_state; /* VFIO device state */ 586 #define VFIO_DEVICE_STATE_STOP (0) 587 #define VFIO_DEVICE_STATE_RUNNING (1 << 0) 588 #define VFIO_DEVICE_STATE_SAVING (1 << 1) 589 #define VFIO_DEVICE_STATE_RESUMING (1 << 2) 590 #define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_RUNNING | \ 591 VFIO_DEVICE_STATE_SAVING | \ 592 VFIO_DEVICE_STATE_RESUMING) 593 594 #define VFIO_DEVICE_STATE_VALID(state) \ 595 (state & VFIO_DEVICE_STATE_RESUMING ? \ 596 (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_RESUMING : 1) 597 598 #define VFIO_DEVICE_STATE_IS_ERROR(state) \ 599 ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_SAVING | \ 600 VFIO_DEVICE_STATE_RESUMING)) 601 602 #define VFIO_DEVICE_STATE_SET_ERROR(state) \ 603 ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_SATE_SAVING | \ 604 VFIO_DEVICE_STATE_RESUMING) 605 606 __u32 reserved; 607 __u64 pending_bytes; 608 __u64 data_offset; 609 __u64 data_size; 610 }; 611 612 /* 613 * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped 614 * which allows direct access to non-MSIX registers which happened to be within 615 * the same system page. 616 * 617 * Even though the userspace gets direct access to the MSIX data, the existing 618 * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration. 619 */ 620 #define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3 621 622 /* 623 * Capability with compressed real address (aka SSA - small system address) 624 * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing 625 * and by the userspace to associate a NVLink bridge with a GPU. 626 */ 627 #define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4 628 629 struct vfio_region_info_cap_nvlink2_ssatgt { 630 struct vfio_info_cap_header header; 631 __u64 tgt; 632 }; 633 634 /* 635 * Capability with an NVLink link speed. The value is read by 636 * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed" 637 * property in the device tree. The value is fixed in the hardware 638 * and failing to provide the correct value results in the link 639 * not working with no indication from the driver why. 640 */ 641 #define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5 642 643 struct vfio_region_info_cap_nvlink2_lnkspd { 644 struct vfio_info_cap_header header; 645 __u32 link_speed; 646 __u32 __pad; 647 }; 648 649 /** 650 * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9, 651 * struct vfio_irq_info) 652 * 653 * Retrieve information about a device IRQ. Caller provides 654 * struct vfio_irq_info with index value set. Caller sets argsz. 655 * Implementation of IRQ mapping is bus driver specific. Indexes 656 * using multiple IRQs are primarily intended to support MSI-like 657 * interrupt blocks. Zero count irq blocks may be used to describe 658 * unimplemented interrupt types. 659 * 660 * The EVENTFD flag indicates the interrupt index supports eventfd based 661 * signaling. 662 * 663 * The MASKABLE flags indicates the index supports MASK and UNMASK 664 * actions described below. 665 * 666 * AUTOMASKED indicates that after signaling, the interrupt line is 667 * automatically masked by VFIO and the user needs to unmask the line 668 * to receive new interrupts. This is primarily intended to distinguish 669 * level triggered interrupts. 670 * 671 * The NORESIZE flag indicates that the interrupt lines within the index 672 * are setup as a set and new subindexes cannot be enabled without first 673 * disabling the entire index. This is used for interrupts like PCI MSI 674 * and MSI-X where the driver may only use a subset of the available 675 * indexes, but VFIO needs to enable a specific number of vectors 676 * upfront. In the case of MSI-X, where the user can enable MSI-X and 677 * then add and unmask vectors, it's up to userspace to make the decision 678 * whether to allocate the maximum supported number of vectors or tear 679 * down setup and incrementally increase the vectors as each is enabled. 680 */ 681 struct vfio_irq_info { 682 __u32 argsz; 683 __u32 flags; 684 #define VFIO_IRQ_INFO_EVENTFD (1 << 0) 685 #define VFIO_IRQ_INFO_MASKABLE (1 << 1) 686 #define VFIO_IRQ_INFO_AUTOMASKED (1 << 2) 687 #define VFIO_IRQ_INFO_NORESIZE (1 << 3) 688 __u32 index; /* IRQ index */ 689 __u32 count; /* Number of IRQs within this index */ 690 }; 691 #define VFIO_DEVICE_GET_IRQ_INFO _IO(VFIO_TYPE, VFIO_BASE + 9) 692 693 /** 694 * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set) 695 * 696 * Set signaling, masking, and unmasking of interrupts. Caller provides 697 * struct vfio_irq_set with all fields set. 'start' and 'count' indicate 698 * the range of subindexes being specified. 699 * 700 * The DATA flags specify the type of data provided. If DATA_NONE, the 701 * operation performs the specified action immediately on the specified 702 * interrupt(s). For example, to unmask AUTOMASKED interrupt [0,0]: 703 * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1. 704 * 705 * DATA_BOOL allows sparse support for the same on arrays of interrupts. 706 * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]): 707 * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3, 708 * data = {1,0,1} 709 * 710 * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd. 711 * A value of -1 can be used to either de-assign interrupts if already 712 * assigned or skip un-assigned interrupts. For example, to set an eventfd 713 * to be trigger for interrupts [0,0] and [0,2]: 714 * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3, 715 * data = {fd1, -1, fd2} 716 * If index [0,1] is previously set, two count = 1 ioctls calls would be 717 * required to set [0,0] and [0,2] without changing [0,1]. 718 * 719 * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used 720 * with ACTION_TRIGGER to perform kernel level interrupt loopback testing 721 * from userspace (ie. simulate hardware triggering). 722 * 723 * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER 724 * enables the interrupt index for the device. Individual subindex interrupts 725 * can be disabled using the -1 value for DATA_EVENTFD or the index can be 726 * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0. 727 * 728 * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while 729 * ACTION_TRIGGER specifies kernel->user signaling. 730 */ 731 struct vfio_irq_set { 732 __u32 argsz; 733 __u32 flags; 734 #define VFIO_IRQ_SET_DATA_NONE (1 << 0) /* Data not present */ 735 #define VFIO_IRQ_SET_DATA_BOOL (1 << 1) /* Data is bool (u8) */ 736 #define VFIO_IRQ_SET_DATA_EVENTFD (1 << 2) /* Data is eventfd (s32) */ 737 #define VFIO_IRQ_SET_ACTION_MASK (1 << 3) /* Mask interrupt */ 738 #define VFIO_IRQ_SET_ACTION_UNMASK (1 << 4) /* Unmask interrupt */ 739 #define VFIO_IRQ_SET_ACTION_TRIGGER (1 << 5) /* Trigger interrupt */ 740 __u32 index; 741 __u32 start; 742 __u32 count; 743 __u8 data[]; 744 }; 745 #define VFIO_DEVICE_SET_IRQS _IO(VFIO_TYPE, VFIO_BASE + 10) 746 747 #define VFIO_IRQ_SET_DATA_TYPE_MASK (VFIO_IRQ_SET_DATA_NONE | \ 748 VFIO_IRQ_SET_DATA_BOOL | \ 749 VFIO_IRQ_SET_DATA_EVENTFD) 750 #define VFIO_IRQ_SET_ACTION_TYPE_MASK (VFIO_IRQ_SET_ACTION_MASK | \ 751 VFIO_IRQ_SET_ACTION_UNMASK | \ 752 VFIO_IRQ_SET_ACTION_TRIGGER) 753 /** 754 * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11) 755 * 756 * Reset a device. 757 */ 758 #define VFIO_DEVICE_RESET _IO(VFIO_TYPE, VFIO_BASE + 11) 759 760 /* 761 * The VFIO-PCI bus driver makes use of the following fixed region and 762 * IRQ index mapping. Unimplemented regions return a size of zero. 763 * Unimplemented IRQ types return a count of zero. 764 */ 765 766 enum { 767 VFIO_PCI_BAR0_REGION_INDEX, 768 VFIO_PCI_BAR1_REGION_INDEX, 769 VFIO_PCI_BAR2_REGION_INDEX, 770 VFIO_PCI_BAR3_REGION_INDEX, 771 VFIO_PCI_BAR4_REGION_INDEX, 772 VFIO_PCI_BAR5_REGION_INDEX, 773 VFIO_PCI_ROM_REGION_INDEX, 774 VFIO_PCI_CONFIG_REGION_INDEX, 775 /* 776 * Expose VGA regions defined for PCI base class 03, subclass 00. 777 * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df 778 * as well as the MMIO range 0xa0000 to 0xbffff. Each implemented 779 * range is found at it's identity mapped offset from the region 780 * offset, for example 0x3b0 is region_info.offset + 0x3b0. Areas 781 * between described ranges are unimplemented. 782 */ 783 VFIO_PCI_VGA_REGION_INDEX, 784 VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */ 785 /* device specific cap to define content. */ 786 }; 787 788 enum { 789 VFIO_PCI_INTX_IRQ_INDEX, 790 VFIO_PCI_MSI_IRQ_INDEX, 791 VFIO_PCI_MSIX_IRQ_INDEX, 792 VFIO_PCI_ERR_IRQ_INDEX, 793 VFIO_PCI_REQ_IRQ_INDEX, 794 VFIO_PCI_NUM_IRQS 795 }; 796 797 /* 798 * The vfio-ccw bus driver makes use of the following fixed region and 799 * IRQ index mapping. Unimplemented regions return a size of zero. 800 * Unimplemented IRQ types return a count of zero. 801 */ 802 803 enum { 804 VFIO_CCW_CONFIG_REGION_INDEX, 805 VFIO_CCW_NUM_REGIONS 806 }; 807 808 enum { 809 VFIO_CCW_IO_IRQ_INDEX, 810 VFIO_CCW_CRW_IRQ_INDEX, 811 VFIO_CCW_NUM_IRQS 812 }; 813 814 /** 815 * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12, 816 * struct vfio_pci_hot_reset_info) 817 * 818 * Return: 0 on success, -errno on failure: 819 * -enospc = insufficient buffer, -enodev = unsupported for device. 820 */ 821 struct vfio_pci_dependent_device { 822 __u32 group_id; 823 __u16 segment; 824 __u8 bus; 825 __u8 devfn; /* Use PCI_SLOT/PCI_FUNC */ 826 }; 827 828 struct vfio_pci_hot_reset_info { 829 __u32 argsz; 830 __u32 flags; 831 __u32 count; 832 struct vfio_pci_dependent_device devices[]; 833 }; 834 835 #define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) 836 837 /** 838 * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13, 839 * struct vfio_pci_hot_reset) 840 * 841 * Return: 0 on success, -errno on failure. 842 */ 843 struct vfio_pci_hot_reset { 844 __u32 argsz; 845 __u32 flags; 846 __u32 count; 847 __s32 group_fds[]; 848 }; 849 850 #define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13) 851 852 /** 853 * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14, 854 * struct vfio_device_query_gfx_plane) 855 * 856 * Set the drm_plane_type and flags, then retrieve the gfx plane info. 857 * 858 * flags supported: 859 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set 860 * to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no 861 * support for dma-buf. 862 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set 863 * to ask if the mdev supports region. 0 on support, -EINVAL on no 864 * support for region. 865 * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set 866 * with each call to query the plane info. 867 * - Others are invalid and return -EINVAL. 868 * 869 * Note: 870 * 1. Plane could be disabled by guest. In that case, success will be 871 * returned with zero-initialized drm_format, size, width and height 872 * fields. 873 * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available 874 * 875 * Return: 0 on success, -errno on other failure. 876 */ 877 struct vfio_device_gfx_plane_info { 878 __u32 argsz; 879 __u32 flags; 880 #define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0) 881 #define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1) 882 #define VFIO_GFX_PLANE_TYPE_REGION (1 << 2) 883 /* in */ 884 __u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */ 885 /* out */ 886 __u32 drm_format; /* drm format of plane */ 887 __u64 drm_format_mod; /* tiled mode */ 888 __u32 width; /* width of plane */ 889 __u32 height; /* height of plane */ 890 __u32 stride; /* stride of plane */ 891 __u32 size; /* size of plane in bytes, align on page*/ 892 __u32 x_pos; /* horizontal position of cursor plane */ 893 __u32 y_pos; /* vertical position of cursor plane*/ 894 __u32 x_hot; /* horizontal position of cursor hotspot */ 895 __u32 y_hot; /* vertical position of cursor hotspot */ 896 union { 897 __u32 region_index; /* region index */ 898 __u32 dmabuf_id; /* dma-buf id */ 899 }; 900 }; 901 902 #define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14) 903 904 /** 905 * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32) 906 * 907 * Return a new dma-buf file descriptor for an exposed guest framebuffer 908 * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_ 909 * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer. 910 */ 911 912 #define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15) 913 914 /** 915 * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16, 916 * struct vfio_device_ioeventfd) 917 * 918 * Perform a write to the device at the specified device fd offset, with 919 * the specified data and width when the provided eventfd is triggered. 920 * vfio bus drivers may not support this for all regions, for all widths, 921 * or at all. vfio-pci currently only enables support for BAR regions, 922 * excluding the MSI-X vector table. 923 * 924 * Return: 0 on success, -errno on failure. 925 */ 926 struct vfio_device_ioeventfd { 927 __u32 argsz; 928 __u32 flags; 929 #define VFIO_DEVICE_IOEVENTFD_8 (1 << 0) /* 1-byte write */ 930 #define VFIO_DEVICE_IOEVENTFD_16 (1 << 1) /* 2-byte write */ 931 #define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */ 932 #define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */ 933 #define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf) 934 __u64 offset; /* device fd offset of write */ 935 __u64 data; /* data to be written */ 936 __s32 fd; /* -1 for de-assignment */ 937 }; 938 939 #define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16) 940 941 /** 942 * VFIO_DEVICE_FEATURE - _IORW(VFIO_TYPE, VFIO_BASE + 17, 943 * struct vfio_device_feature) 944 * 945 * Get, set, or probe feature data of the device. The feature is selected 946 * using the FEATURE_MASK portion of the flags field. Support for a feature 947 * can be probed by setting both the FEATURE_MASK and PROBE bits. A probe 948 * may optionally include the GET and/or SET bits to determine read vs write 949 * access of the feature respectively. Probing a feature will return success 950 * if the feature is supported and all of the optionally indicated GET/SET 951 * methods are supported. The format of the data portion of the structure is 952 * specific to the given feature. The data portion is not required for 953 * probing. GET and SET are mutually exclusive, except for use with PROBE. 954 * 955 * Return 0 on success, -errno on failure. 956 */ 957 struct vfio_device_feature { 958 __u32 argsz; 959 __u32 flags; 960 #define VFIO_DEVICE_FEATURE_MASK (0xffff) /* 16-bit feature index */ 961 #define VFIO_DEVICE_FEATURE_GET (1 << 16) /* Get feature into data[] */ 962 #define VFIO_DEVICE_FEATURE_SET (1 << 17) /* Set feature from data[] */ 963 #define VFIO_DEVICE_FEATURE_PROBE (1 << 18) /* Probe feature support */ 964 __u8 data[]; 965 }; 966 967 #define VFIO_DEVICE_FEATURE _IO(VFIO_TYPE, VFIO_BASE + 17) 968 969 /* 970 * Provide support for setting a PCI VF Token, which is used as a shared 971 * secret between PF and VF drivers. This feature may only be set on a 972 * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing 973 * open VFs. Data provided when setting this feature is a 16-byte array 974 * (__u8 b[16]), representing a UUID. 975 */ 976 #define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0) 977 978 /* -------- API for Type1 VFIO IOMMU -------- */ 979 980 /** 981 * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info) 982 * 983 * Retrieve information about the IOMMU object. Fills in provided 984 * struct vfio_iommu_info. Caller sets argsz. 985 * 986 * XXX Should we do these by CHECK_EXTENSION too? 987 */ 988 struct vfio_iommu_type1_info { 989 __u32 argsz; 990 __u32 flags; 991 #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */ 992 #define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */ 993 __u64 iova_pgsizes; /* Bitmap of supported page sizes */ 994 __u32 cap_offset; /* Offset within info struct of first cap */ 995 }; 996 997 /* 998 * The IOVA capability allows to report the valid IOVA range(s) 999 * excluding any non-relaxable reserved regions exposed by 1000 * devices attached to the container. Any DMA map attempt 1001 * outside the valid iova range will return error. 1002 * 1003 * The structures below define version 1 of this capability. 1004 */ 1005 #define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE 1 1006 1007 struct vfio_iova_range { 1008 __u64 start; 1009 __u64 end; 1010 }; 1011 1012 struct vfio_iommu_type1_info_cap_iova_range { 1013 struct vfio_info_cap_header header; 1014 __u32 nr_iovas; 1015 __u32 reserved; 1016 struct vfio_iova_range iova_ranges[]; 1017 }; 1018 1019 /* 1020 * The migration capability allows to report supported features for migration. 1021 * 1022 * The structures below define version 1 of this capability. 1023 * 1024 * The existence of this capability indicates that IOMMU kernel driver supports 1025 * dirty page logging. 1026 * 1027 * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty 1028 * page logging. 1029 * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap 1030 * size in bytes that can be used by user applications when getting the dirty 1031 * bitmap. 1032 */ 1033 #define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION 2 1034 1035 struct vfio_iommu_type1_info_cap_migration { 1036 struct vfio_info_cap_header header; 1037 __u32 flags; 1038 __u64 pgsize_bitmap; 1039 __u64 max_dirty_bitmap_size; /* in bytes */ 1040 }; 1041 1042 #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) 1043 1044 /** 1045 * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map) 1046 * 1047 * Map process virtual addresses to IO virtual addresses using the 1048 * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required. 1049 */ 1050 struct vfio_iommu_type1_dma_map { 1051 __u32 argsz; 1052 __u32 flags; 1053 #define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */ 1054 #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */ 1055 __u64 vaddr; /* Process virtual address */ 1056 __u64 iova; /* IO virtual address */ 1057 __u64 size; /* Size of mapping (bytes) */ 1058 }; 1059 1060 #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13) 1061 1062 struct vfio_bitmap { 1063 __u64 pgsize; /* page size for bitmap in bytes */ 1064 __u64 size; /* in bytes */ 1065 __u64 *data; /* one bit per page */ 1066 }; 1067 1068 /** 1069 * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14, 1070 * struct vfio_dma_unmap) 1071 * 1072 * Unmap IO virtual addresses using the provided struct vfio_dma_unmap. 1073 * Caller sets argsz. The actual unmapped size is returned in the size 1074 * field. No guarantee is made to the user that arbitrary unmaps of iova 1075 * or size different from those used in the original mapping call will 1076 * succeed. 1077 * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap 1078 * before unmapping IO virtual addresses. When this flag is set, the user must 1079 * provide a struct vfio_bitmap in data[]. User must provide zero-allocated 1080 * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field. 1081 * A bit in the bitmap represents one page, of user provided page size in 1082 * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set 1083 * indicates that the page at that offset from iova is dirty. A Bitmap of the 1084 * pages in the range of unmapped size is returned in the user-provided 1085 * vfio_bitmap.data. 1086 */ 1087 struct vfio_iommu_type1_dma_unmap { 1088 __u32 argsz; 1089 __u32 flags; 1090 #define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0) 1091 __u64 iova; /* IO virtual address */ 1092 __u64 size; /* Size of mapping (bytes) */ 1093 __u8 data[]; 1094 }; 1095 1096 #define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14) 1097 1098 /* 1099 * IOCTLs to enable/disable IOMMU container usage. 1100 * No parameters are supported. 1101 */ 1102 #define VFIO_IOMMU_ENABLE _IO(VFIO_TYPE, VFIO_BASE + 15) 1103 #define VFIO_IOMMU_DISABLE _IO(VFIO_TYPE, VFIO_BASE + 16) 1104 1105 /** 1106 * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17, 1107 * struct vfio_iommu_type1_dirty_bitmap) 1108 * IOCTL is used for dirty pages logging. 1109 * Caller should set flag depending on which operation to perform, details as 1110 * below: 1111 * 1112 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs 1113 * the IOMMU driver to log pages that are dirtied or potentially dirtied by 1114 * the device; designed to be used when a migration is in progress. Dirty pages 1115 * are logged until logging is disabled by user application by calling the IOCTL 1116 * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag. 1117 * 1118 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs 1119 * the IOMMU driver to stop logging dirtied pages. 1120 * 1121 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set 1122 * returns the dirty pages bitmap for IOMMU container for a given IOVA range. 1123 * The user must specify the IOVA range and the pgsize through the structure 1124 * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface 1125 * supports getting a bitmap of the smallest supported pgsize only and can be 1126 * modified in future to get a bitmap of any specified supported pgsize. The 1127 * user must provide a zeroed memory area for the bitmap memory and specify its 1128 * size in bitmap.size. One bit is used to represent one page consecutively 1129 * starting from iova offset. The user should provide page size in bitmap.pgsize 1130 * field. A bit set in the bitmap indicates that the page at that offset from 1131 * iova is dirty. The caller must set argsz to a value including the size of 1132 * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the 1133 * actual bitmap. If dirty pages logging is not enabled, an error will be 1134 * returned. 1135 * 1136 * Only one of the flags _START, _STOP and _GET may be specified at a time. 1137 * 1138 */ 1139 struct vfio_iommu_type1_dirty_bitmap { 1140 __u32 argsz; 1141 __u32 flags; 1142 #define VFIO_IOMMU_DIRTY_PAGES_FLAG_START (1 << 0) 1143 #define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP (1 << 1) 1144 #define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP (1 << 2) 1145 __u8 data[]; 1146 }; 1147 1148 struct vfio_iommu_type1_dirty_bitmap_get { 1149 __u64 iova; /* IO virtual address */ 1150 __u64 size; /* Size of iova range */ 1151 struct vfio_bitmap bitmap; 1152 }; 1153 1154 #define VFIO_IOMMU_DIRTY_PAGES _IO(VFIO_TYPE, VFIO_BASE + 17) 1155 1156 /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */ 1157 1158 /* 1159 * The SPAPR TCE DDW info struct provides the information about 1160 * the details of Dynamic DMA window capability. 1161 * 1162 * @pgsizes contains a page size bitmask, 4K/64K/16M are supported. 1163 * @max_dynamic_windows_supported tells the maximum number of windows 1164 * which the platform can create. 1165 * @levels tells the maximum number of levels in multi-level IOMMU tables; 1166 * this allows splitting a table into smaller chunks which reduces 1167 * the amount of physically contiguous memory required for the table. 1168 */ 1169 struct vfio_iommu_spapr_tce_ddw_info { 1170 __u64 pgsizes; /* Bitmap of supported page sizes */ 1171 __u32 max_dynamic_windows_supported; 1172 __u32 levels; 1173 }; 1174 1175 /* 1176 * The SPAPR TCE info struct provides the information about the PCI bus 1177 * address ranges available for DMA, these values are programmed into 1178 * the hardware so the guest has to know that information. 1179 * 1180 * The DMA 32 bit window start is an absolute PCI bus address. 1181 * The IOVA address passed via map/unmap ioctls are absolute PCI bus 1182 * addresses too so the window works as a filter rather than an offset 1183 * for IOVA addresses. 1184 * 1185 * Flags supported: 1186 * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows 1187 * (DDW) support is present. @ddw is only supported when DDW is present. 1188 */ 1189 struct vfio_iommu_spapr_tce_info { 1190 __u32 argsz; 1191 __u32 flags; 1192 #define VFIO_IOMMU_SPAPR_INFO_DDW (1 << 0) /* DDW supported */ 1193 __u32 dma32_window_start; /* 32 bit window start (bytes) */ 1194 __u32 dma32_window_size; /* 32 bit window size (bytes) */ 1195 struct vfio_iommu_spapr_tce_ddw_info ddw; 1196 }; 1197 1198 #define VFIO_IOMMU_SPAPR_TCE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) 1199 1200 /* 1201 * EEH PE operation struct provides ways to: 1202 * - enable/disable EEH functionality; 1203 * - unfreeze IO/DMA for frozen PE; 1204 * - read PE state; 1205 * - reset PE; 1206 * - configure PE; 1207 * - inject EEH error. 1208 */ 1209 struct vfio_eeh_pe_err { 1210 __u32 type; 1211 __u32 func; 1212 __u64 addr; 1213 __u64 mask; 1214 }; 1215 1216 struct vfio_eeh_pe_op { 1217 __u32 argsz; 1218 __u32 flags; 1219 __u32 op; 1220 union { 1221 struct vfio_eeh_pe_err err; 1222 }; 1223 }; 1224 1225 #define VFIO_EEH_PE_DISABLE 0 /* Disable EEH functionality */ 1226 #define VFIO_EEH_PE_ENABLE 1 /* Enable EEH functionality */ 1227 #define VFIO_EEH_PE_UNFREEZE_IO 2 /* Enable IO for frozen PE */ 1228 #define VFIO_EEH_PE_UNFREEZE_DMA 3 /* Enable DMA for frozen PE */ 1229 #define VFIO_EEH_PE_GET_STATE 4 /* PE state retrieval */ 1230 #define VFIO_EEH_PE_STATE_NORMAL 0 /* PE in functional state */ 1231 #define VFIO_EEH_PE_STATE_RESET 1 /* PE reset in progress */ 1232 #define VFIO_EEH_PE_STATE_STOPPED 2 /* Stopped DMA and IO */ 1233 #define VFIO_EEH_PE_STATE_STOPPED_DMA 4 /* Stopped DMA only */ 1234 #define VFIO_EEH_PE_STATE_UNAVAIL 5 /* State unavailable */ 1235 #define VFIO_EEH_PE_RESET_DEACTIVATE 5 /* Deassert PE reset */ 1236 #define VFIO_EEH_PE_RESET_HOT 6 /* Assert hot reset */ 1237 #define VFIO_EEH_PE_RESET_FUNDAMENTAL 7 /* Assert fundamental reset */ 1238 #define VFIO_EEH_PE_CONFIGURE 8 /* PE configuration */ 1239 #define VFIO_EEH_PE_INJECT_ERR 9 /* Inject EEH error */ 1240 1241 #define VFIO_EEH_PE_OP _IO(VFIO_TYPE, VFIO_BASE + 21) 1242 1243 /** 1244 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory) 1245 * 1246 * Registers user space memory where DMA is allowed. It pins 1247 * user pages and does the locked memory accounting so 1248 * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls 1249 * get faster. 1250 */ 1251 struct vfio_iommu_spapr_register_memory { 1252 __u32 argsz; 1253 __u32 flags; 1254 __u64 vaddr; /* Process virtual address */ 1255 __u64 size; /* Size of mapping (bytes) */ 1256 }; 1257 #define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17) 1258 1259 /** 1260 * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory) 1261 * 1262 * Unregisters user space memory registered with 1263 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY. 1264 * Uses vfio_iommu_spapr_register_memory for parameters. 1265 */ 1266 #define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 18) 1267 1268 /** 1269 * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create) 1270 * 1271 * Creates an additional TCE table and programs it (sets a new DMA window) 1272 * to every IOMMU group in the container. It receives page shift, window 1273 * size and number of levels in the TCE table being created. 1274 * 1275 * It allocates and returns an offset on a PCI bus of the new DMA window. 1276 */ 1277 struct vfio_iommu_spapr_tce_create { 1278 __u32 argsz; 1279 __u32 flags; 1280 /* in */ 1281 __u32 page_shift; 1282 __u32 __resv1; 1283 __u64 window_size; 1284 __u32 levels; 1285 __u32 __resv2; 1286 /* out */ 1287 __u64 start_addr; 1288 }; 1289 #define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19) 1290 1291 /** 1292 * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove) 1293 * 1294 * Unprograms a TCE table from all groups in the container and destroys it. 1295 * It receives a PCI bus offset as a window id. 1296 */ 1297 struct vfio_iommu_spapr_tce_remove { 1298 __u32 argsz; 1299 __u32 flags; 1300 /* in */ 1301 __u64 start_addr; 1302 }; 1303 #define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20) 1304 1305 /* ***************************************************************** */ 1306 1307 #endif /* VFIO_H */ 1308