xref: /openbmc/qemu/linux-headers/linux/vfio.h (revision 3b916140)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  * VFIO API definition
4  *
5  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
6  *     Author: Alex Williamson <alex.williamson@redhat.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #ifndef VFIO_H
13 #define VFIO_H
14 
15 #include <linux/types.h>
16 #include <linux/ioctl.h>
17 
18 #define VFIO_API_VERSION	0
19 
20 
21 /* Kernel & User level defines for VFIO IOCTLs. */
22 
23 /* Extensions */
24 
25 #define VFIO_TYPE1_IOMMU		1
26 #define VFIO_SPAPR_TCE_IOMMU		2
27 #define VFIO_TYPE1v2_IOMMU		3
28 /*
29  * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping).  This
30  * capability is subject to change as groups are added or removed.
31  */
32 #define VFIO_DMA_CC_IOMMU		4
33 
34 /* Check if EEH is supported */
35 #define VFIO_EEH			5
36 
37 /* Two-stage IOMMU */
38 #define VFIO_TYPE1_NESTING_IOMMU	6	/* Implies v2 */
39 
40 #define VFIO_SPAPR_TCE_v2_IOMMU		7
41 
42 /*
43  * The No-IOMMU IOMMU offers no translation or isolation for devices and
44  * supports no ioctls outside of VFIO_CHECK_EXTENSION.  Use of VFIO's No-IOMMU
45  * code will taint the host kernel and should be used with extreme caution.
46  */
47 #define VFIO_NOIOMMU_IOMMU		8
48 
49 /* Supports VFIO_DMA_UNMAP_FLAG_ALL */
50 #define VFIO_UNMAP_ALL			9
51 
52 /*
53  * Supports the vaddr flag for DMA map and unmap.  Not supported for mediated
54  * devices, so this capability is subject to change as groups are added or
55  * removed.
56  */
57 #define VFIO_UPDATE_VADDR		10
58 
59 /*
60  * The IOCTL interface is designed for extensibility by embedding the
61  * structure length (argsz) and flags into structures passed between
62  * kernel and userspace.  We therefore use the _IO() macro for these
63  * defines to avoid implicitly embedding a size into the ioctl request.
64  * As structure fields are added, argsz will increase to match and flag
65  * bits will be defined to indicate additional fields with valid data.
66  * It's *always* the caller's responsibility to indicate the size of
67  * the structure passed by setting argsz appropriately.
68  */
69 
70 #define VFIO_TYPE	(';')
71 #define VFIO_BASE	100
72 
73 /*
74  * For extension of INFO ioctls, VFIO makes use of a capability chain
75  * designed after PCI/e capabilities.  A flag bit indicates whether
76  * this capability chain is supported and a field defined in the fixed
77  * structure defines the offset of the first capability in the chain.
78  * This field is only valid when the corresponding bit in the flags
79  * bitmap is set.  This offset field is relative to the start of the
80  * INFO buffer, as is the next field within each capability header.
81  * The id within the header is a shared address space per INFO ioctl,
82  * while the version field is specific to the capability id.  The
83  * contents following the header are specific to the capability id.
84  */
85 struct vfio_info_cap_header {
86 	__u16	id;		/* Identifies capability */
87 	__u16	version;	/* Version specific to the capability ID */
88 	__u32	next;		/* Offset of next capability */
89 };
90 
91 /*
92  * Callers of INFO ioctls passing insufficiently sized buffers will see
93  * the capability chain flag bit set, a zero value for the first capability
94  * offset (if available within the provided argsz), and argsz will be
95  * updated to report the necessary buffer size.  For compatibility, the
96  * INFO ioctl will not report error in this case, but the capability chain
97  * will not be available.
98  */
99 
100 /* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
101 
102 /**
103  * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
104  *
105  * Report the version of the VFIO API.  This allows us to bump the entire
106  * API version should we later need to add or change features in incompatible
107  * ways.
108  * Return: VFIO_API_VERSION
109  * Availability: Always
110  */
111 #define VFIO_GET_API_VERSION		_IO(VFIO_TYPE, VFIO_BASE + 0)
112 
113 /**
114  * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
115  *
116  * Check whether an extension is supported.
117  * Return: 0 if not supported, 1 (or some other positive integer) if supported.
118  * Availability: Always
119  */
120 #define VFIO_CHECK_EXTENSION		_IO(VFIO_TYPE, VFIO_BASE + 1)
121 
122 /**
123  * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
124  *
125  * Set the iommu to the given type.  The type must be supported by an
126  * iommu driver as verified by calling CHECK_EXTENSION using the same
127  * type.  A group must be set to this file descriptor before this
128  * ioctl is available.  The IOMMU interfaces enabled by this call are
129  * specific to the value set.
130  * Return: 0 on success, -errno on failure
131  * Availability: When VFIO group attached
132  */
133 #define VFIO_SET_IOMMU			_IO(VFIO_TYPE, VFIO_BASE + 2)
134 
135 /* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
136 
137 /**
138  * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
139  *						struct vfio_group_status)
140  *
141  * Retrieve information about the group.  Fills in provided
142  * struct vfio_group_info.  Caller sets argsz.
143  * Return: 0 on succes, -errno on failure.
144  * Availability: Always
145  */
146 struct vfio_group_status {
147 	__u32	argsz;
148 	__u32	flags;
149 #define VFIO_GROUP_FLAGS_VIABLE		(1 << 0)
150 #define VFIO_GROUP_FLAGS_CONTAINER_SET	(1 << 1)
151 };
152 #define VFIO_GROUP_GET_STATUS		_IO(VFIO_TYPE, VFIO_BASE + 3)
153 
154 /**
155  * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
156  *
157  * Set the container for the VFIO group to the open VFIO file
158  * descriptor provided.  Groups may only belong to a single
159  * container.  Containers may, at their discretion, support multiple
160  * groups.  Only when a container is set are all of the interfaces
161  * of the VFIO file descriptor and the VFIO group file descriptor
162  * available to the user.
163  * Return: 0 on success, -errno on failure.
164  * Availability: Always
165  */
166 #define VFIO_GROUP_SET_CONTAINER	_IO(VFIO_TYPE, VFIO_BASE + 4)
167 
168 /**
169  * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
170  *
171  * Remove the group from the attached container.  This is the
172  * opposite of the SET_CONTAINER call and returns the group to
173  * an initial state.  All device file descriptors must be released
174  * prior to calling this interface.  When removing the last group
175  * from a container, the IOMMU will be disabled and all state lost,
176  * effectively also returning the VFIO file descriptor to an initial
177  * state.
178  * Return: 0 on success, -errno on failure.
179  * Availability: When attached to container
180  */
181 #define VFIO_GROUP_UNSET_CONTAINER	_IO(VFIO_TYPE, VFIO_BASE + 5)
182 
183 /**
184  * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
185  *
186  * Return a new file descriptor for the device object described by
187  * the provided string.  The string should match a device listed in
188  * the devices subdirectory of the IOMMU group sysfs entry.  The
189  * group containing the device must already be added to this context.
190  * Return: new file descriptor on success, -errno on failure.
191  * Availability: When attached to container
192  */
193 #define VFIO_GROUP_GET_DEVICE_FD	_IO(VFIO_TYPE, VFIO_BASE + 6)
194 
195 /* --------------- IOCTLs for DEVICE file descriptors --------------- */
196 
197 /**
198  * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
199  *						struct vfio_device_info)
200  *
201  * Retrieve information about the device.  Fills in provided
202  * struct vfio_device_info.  Caller sets argsz.
203  * Return: 0 on success, -errno on failure.
204  */
205 struct vfio_device_info {
206 	__u32	argsz;
207 	__u32	flags;
208 #define VFIO_DEVICE_FLAGS_RESET	(1 << 0)	/* Device supports reset */
209 #define VFIO_DEVICE_FLAGS_PCI	(1 << 1)	/* vfio-pci device */
210 #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2)	/* vfio-platform device */
211 #define VFIO_DEVICE_FLAGS_AMBA  (1 << 3)	/* vfio-amba device */
212 #define VFIO_DEVICE_FLAGS_CCW	(1 << 4)	/* vfio-ccw device */
213 #define VFIO_DEVICE_FLAGS_AP	(1 << 5)	/* vfio-ap device */
214 #define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6)	/* vfio-fsl-mc device */
215 #define VFIO_DEVICE_FLAGS_CAPS	(1 << 7)	/* Info supports caps */
216 #define VFIO_DEVICE_FLAGS_CDX	(1 << 8)	/* vfio-cdx device */
217 	__u32	num_regions;	/* Max region index + 1 */
218 	__u32	num_irqs;	/* Max IRQ index + 1 */
219 	__u32   cap_offset;	/* Offset within info struct of first cap */
220 	__u32   pad;
221 };
222 #define VFIO_DEVICE_GET_INFO		_IO(VFIO_TYPE, VFIO_BASE + 7)
223 
224 /*
225  * Vendor driver using Mediated device framework should provide device_api
226  * attribute in supported type attribute groups. Device API string should be one
227  * of the following corresponding to device flags in vfio_device_info structure.
228  */
229 
230 #define VFIO_DEVICE_API_PCI_STRING		"vfio-pci"
231 #define VFIO_DEVICE_API_PLATFORM_STRING		"vfio-platform"
232 #define VFIO_DEVICE_API_AMBA_STRING		"vfio-amba"
233 #define VFIO_DEVICE_API_CCW_STRING		"vfio-ccw"
234 #define VFIO_DEVICE_API_AP_STRING		"vfio-ap"
235 
236 /*
237  * The following capabilities are unique to s390 zPCI devices.  Their contents
238  * are further-defined in vfio_zdev.h
239  */
240 #define VFIO_DEVICE_INFO_CAP_ZPCI_BASE		1
241 #define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP		2
242 #define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL		3
243 #define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP		4
244 
245 /*
246  * The following VFIO_DEVICE_INFO capability reports support for PCIe AtomicOp
247  * completion to the root bus with supported widths provided via flags.
248  */
249 #define VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP	5
250 struct vfio_device_info_cap_pci_atomic_comp {
251 	struct vfio_info_cap_header header;
252 	__u32 flags;
253 #define VFIO_PCI_ATOMIC_COMP32	(1 << 0)
254 #define VFIO_PCI_ATOMIC_COMP64	(1 << 1)
255 #define VFIO_PCI_ATOMIC_COMP128	(1 << 2)
256 	__u32 reserved;
257 };
258 
259 /**
260  * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
261  *				       struct vfio_region_info)
262  *
263  * Retrieve information about a device region.  Caller provides
264  * struct vfio_region_info with index value set.  Caller sets argsz.
265  * Implementation of region mapping is bus driver specific.  This is
266  * intended to describe MMIO, I/O port, as well as bus specific
267  * regions (ex. PCI config space).  Zero sized regions may be used
268  * to describe unimplemented regions (ex. unimplemented PCI BARs).
269  * Return: 0 on success, -errno on failure.
270  */
271 struct vfio_region_info {
272 	__u32	argsz;
273 	__u32	flags;
274 #define VFIO_REGION_INFO_FLAG_READ	(1 << 0) /* Region supports read */
275 #define VFIO_REGION_INFO_FLAG_WRITE	(1 << 1) /* Region supports write */
276 #define VFIO_REGION_INFO_FLAG_MMAP	(1 << 2) /* Region supports mmap */
277 #define VFIO_REGION_INFO_FLAG_CAPS	(1 << 3) /* Info supports caps */
278 	__u32	index;		/* Region index */
279 	__u32	cap_offset;	/* Offset within info struct of first cap */
280 	__aligned_u64	size;	/* Region size (bytes) */
281 	__aligned_u64	offset;	/* Region offset from start of device fd */
282 };
283 #define VFIO_DEVICE_GET_REGION_INFO	_IO(VFIO_TYPE, VFIO_BASE + 8)
284 
285 /*
286  * The sparse mmap capability allows finer granularity of specifying areas
287  * within a region with mmap support.  When specified, the user should only
288  * mmap the offset ranges specified by the areas array.  mmaps outside of the
289  * areas specified may fail (such as the range covering a PCI MSI-X table) or
290  * may result in improper device behavior.
291  *
292  * The structures below define version 1 of this capability.
293  */
294 #define VFIO_REGION_INFO_CAP_SPARSE_MMAP	1
295 
296 struct vfio_region_sparse_mmap_area {
297 	__aligned_u64	offset;	/* Offset of mmap'able area within region */
298 	__aligned_u64	size;	/* Size of mmap'able area */
299 };
300 
301 struct vfio_region_info_cap_sparse_mmap {
302 	struct vfio_info_cap_header header;
303 	__u32	nr_areas;
304 	__u32	reserved;
305 	struct vfio_region_sparse_mmap_area areas[];
306 };
307 
308 /*
309  * The device specific type capability allows regions unique to a specific
310  * device or class of devices to be exposed.  This helps solve the problem for
311  * vfio bus drivers of defining which region indexes correspond to which region
312  * on the device, without needing to resort to static indexes, as done by
313  * vfio-pci.  For instance, if we were to go back in time, we might remove
314  * VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes
315  * greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd
316  * make a "VGA" device specific type to describe the VGA access space.  This
317  * means that non-VGA devices wouldn't need to waste this index, and thus the
318  * address space associated with it due to implementation of device file
319  * descriptor offsets in vfio-pci.
320  *
321  * The current implementation is now part of the user ABI, so we can't use this
322  * for VGA, but there are other upcoming use cases, such as opregions for Intel
323  * IGD devices and framebuffers for vGPU devices.  We missed VGA, but we'll
324  * use this for future additions.
325  *
326  * The structure below defines version 1 of this capability.
327  */
328 #define VFIO_REGION_INFO_CAP_TYPE	2
329 
330 struct vfio_region_info_cap_type {
331 	struct vfio_info_cap_header header;
332 	__u32 type;	/* global per bus driver */
333 	__u32 subtype;	/* type specific */
334 };
335 
336 /*
337  * List of region types, global per bus driver.
338  * If you introduce a new type, please add it here.
339  */
340 
341 /* PCI region type containing a PCI vendor part */
342 #define VFIO_REGION_TYPE_PCI_VENDOR_TYPE	(1 << 31)
343 #define VFIO_REGION_TYPE_PCI_VENDOR_MASK	(0xffff)
344 #define VFIO_REGION_TYPE_GFX                    (1)
345 #define VFIO_REGION_TYPE_CCW			(2)
346 #define VFIO_REGION_TYPE_MIGRATION_DEPRECATED   (3)
347 
348 /* sub-types for VFIO_REGION_TYPE_PCI_* */
349 
350 /* 8086 vendor PCI sub-types */
351 #define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION	(1)
352 #define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG	(2)
353 #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG	(3)
354 
355 /* 10de vendor PCI sub-types */
356 /*
357  * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
358  *
359  * Deprecated, region no longer provided
360  */
361 #define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM	(1)
362 
363 /* 1014 vendor PCI sub-types */
364 /*
365  * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
366  * to do TLB invalidation on a GPU.
367  *
368  * Deprecated, region no longer provided
369  */
370 #define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD	(1)
371 
372 /* sub-types for VFIO_REGION_TYPE_GFX */
373 #define VFIO_REGION_SUBTYPE_GFX_EDID            (1)
374 
375 /**
376  * struct vfio_region_gfx_edid - EDID region layout.
377  *
378  * Set display link state and EDID blob.
379  *
380  * The EDID blob has monitor information such as brand, name, serial
381  * number, physical size, supported video modes and more.
382  *
383  * This special region allows userspace (typically qemu) set a virtual
384  * EDID for the virtual monitor, which allows a flexible display
385  * configuration.
386  *
387  * For the edid blob spec look here:
388  *    https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
389  *
390  * On linux systems you can find the EDID blob in sysfs:
391  *    /sys/class/drm/${card}/${connector}/edid
392  *
393  * You can use the edid-decode ulility (comes with xorg-x11-utils) to
394  * decode the EDID blob.
395  *
396  * @edid_offset: location of the edid blob, relative to the
397  *               start of the region (readonly).
398  * @edid_max_size: max size of the edid blob (readonly).
399  * @edid_size: actual edid size (read/write).
400  * @link_state: display link state (read/write).
401  * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
402  * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
403  * @max_xres: max display width (0 == no limitation, readonly).
404  * @max_yres: max display height (0 == no limitation, readonly).
405  *
406  * EDID update protocol:
407  *   (1) set link-state to down.
408  *   (2) update edid blob and size.
409  *   (3) set link-state to up.
410  */
411 struct vfio_region_gfx_edid {
412 	__u32 edid_offset;
413 	__u32 edid_max_size;
414 	__u32 edid_size;
415 	__u32 max_xres;
416 	__u32 max_yres;
417 	__u32 link_state;
418 #define VFIO_DEVICE_GFX_LINK_STATE_UP    1
419 #define VFIO_DEVICE_GFX_LINK_STATE_DOWN  2
420 };
421 
422 /* sub-types for VFIO_REGION_TYPE_CCW */
423 #define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD	(1)
424 #define VFIO_REGION_SUBTYPE_CCW_SCHIB		(2)
425 #define VFIO_REGION_SUBTYPE_CCW_CRW		(3)
426 
427 /* sub-types for VFIO_REGION_TYPE_MIGRATION */
428 #define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1)
429 
430 struct vfio_device_migration_info {
431 	__u32 device_state;         /* VFIO device state */
432 #define VFIO_DEVICE_STATE_V1_STOP      (0)
433 #define VFIO_DEVICE_STATE_V1_RUNNING   (1 << 0)
434 #define VFIO_DEVICE_STATE_V1_SAVING    (1 << 1)
435 #define VFIO_DEVICE_STATE_V1_RESUMING  (1 << 2)
436 #define VFIO_DEVICE_STATE_MASK      (VFIO_DEVICE_STATE_V1_RUNNING | \
437 				     VFIO_DEVICE_STATE_V1_SAVING |  \
438 				     VFIO_DEVICE_STATE_V1_RESUMING)
439 
440 #define VFIO_DEVICE_STATE_VALID(state) \
441 	(state & VFIO_DEVICE_STATE_V1_RESUMING ? \
442 	(state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1)
443 
444 #define VFIO_DEVICE_STATE_IS_ERROR(state) \
445 	((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \
446 					      VFIO_DEVICE_STATE_V1_RESUMING))
447 
448 #define VFIO_DEVICE_STATE_SET_ERROR(state) \
449 	((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \
450 					     VFIO_DEVICE_STATE_V1_RESUMING)
451 
452 	__u32 reserved;
453 	__aligned_u64 pending_bytes;
454 	__aligned_u64 data_offset;
455 	__aligned_u64 data_size;
456 };
457 
458 /*
459  * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
460  * which allows direct access to non-MSIX registers which happened to be within
461  * the same system page.
462  *
463  * Even though the userspace gets direct access to the MSIX data, the existing
464  * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
465  */
466 #define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE	3
467 
468 /*
469  * Capability with compressed real address (aka SSA - small system address)
470  * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
471  * and by the userspace to associate a NVLink bridge with a GPU.
472  *
473  * Deprecated, capability no longer provided
474  */
475 #define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT	4
476 
477 struct vfio_region_info_cap_nvlink2_ssatgt {
478 	struct vfio_info_cap_header header;
479 	__aligned_u64 tgt;
480 };
481 
482 /*
483  * Capability with an NVLink link speed. The value is read by
484  * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
485  * property in the device tree. The value is fixed in the hardware
486  * and failing to provide the correct value results in the link
487  * not working with no indication from the driver why.
488  *
489  * Deprecated, capability no longer provided
490  */
491 #define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD	5
492 
493 struct vfio_region_info_cap_nvlink2_lnkspd {
494 	struct vfio_info_cap_header header;
495 	__u32 link_speed;
496 	__u32 __pad;
497 };
498 
499 /**
500  * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
501  *				    struct vfio_irq_info)
502  *
503  * Retrieve information about a device IRQ.  Caller provides
504  * struct vfio_irq_info with index value set.  Caller sets argsz.
505  * Implementation of IRQ mapping is bus driver specific.  Indexes
506  * using multiple IRQs are primarily intended to support MSI-like
507  * interrupt blocks.  Zero count irq blocks may be used to describe
508  * unimplemented interrupt types.
509  *
510  * The EVENTFD flag indicates the interrupt index supports eventfd based
511  * signaling.
512  *
513  * The MASKABLE flags indicates the index supports MASK and UNMASK
514  * actions described below.
515  *
516  * AUTOMASKED indicates that after signaling, the interrupt line is
517  * automatically masked by VFIO and the user needs to unmask the line
518  * to receive new interrupts.  This is primarily intended to distinguish
519  * level triggered interrupts.
520  *
521  * The NORESIZE flag indicates that the interrupt lines within the index
522  * are setup as a set and new subindexes cannot be enabled without first
523  * disabling the entire index.  This is used for interrupts like PCI MSI
524  * and MSI-X where the driver may only use a subset of the available
525  * indexes, but VFIO needs to enable a specific number of vectors
526  * upfront.  In the case of MSI-X, where the user can enable MSI-X and
527  * then add and unmask vectors, it's up to userspace to make the decision
528  * whether to allocate the maximum supported number of vectors or tear
529  * down setup and incrementally increase the vectors as each is enabled.
530  * Absence of the NORESIZE flag indicates that vectors can be enabled
531  * and disabled dynamically without impacting other vectors within the
532  * index.
533  */
534 struct vfio_irq_info {
535 	__u32	argsz;
536 	__u32	flags;
537 #define VFIO_IRQ_INFO_EVENTFD		(1 << 0)
538 #define VFIO_IRQ_INFO_MASKABLE		(1 << 1)
539 #define VFIO_IRQ_INFO_AUTOMASKED	(1 << 2)
540 #define VFIO_IRQ_INFO_NORESIZE		(1 << 3)
541 	__u32	index;		/* IRQ index */
542 	__u32	count;		/* Number of IRQs within this index */
543 };
544 #define VFIO_DEVICE_GET_IRQ_INFO	_IO(VFIO_TYPE, VFIO_BASE + 9)
545 
546 /**
547  * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
548  *
549  * Set signaling, masking, and unmasking of interrupts.  Caller provides
550  * struct vfio_irq_set with all fields set.  'start' and 'count' indicate
551  * the range of subindexes being specified.
552  *
553  * The DATA flags specify the type of data provided.  If DATA_NONE, the
554  * operation performs the specified action immediately on the specified
555  * interrupt(s).  For example, to unmask AUTOMASKED interrupt [0,0]:
556  * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
557  *
558  * DATA_BOOL allows sparse support for the same on arrays of interrupts.
559  * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
560  * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
561  * data = {1,0,1}
562  *
563  * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
564  * A value of -1 can be used to either de-assign interrupts if already
565  * assigned or skip un-assigned interrupts.  For example, to set an eventfd
566  * to be trigger for interrupts [0,0] and [0,2]:
567  * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
568  * data = {fd1, -1, fd2}
569  * If index [0,1] is previously set, two count = 1 ioctls calls would be
570  * required to set [0,0] and [0,2] without changing [0,1].
571  *
572  * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
573  * with ACTION_TRIGGER to perform kernel level interrupt loopback testing
574  * from userspace (ie. simulate hardware triggering).
575  *
576  * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
577  * enables the interrupt index for the device.  Individual subindex interrupts
578  * can be disabled using the -1 value for DATA_EVENTFD or the index can be
579  * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
580  *
581  * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
582  * ACTION_TRIGGER specifies kernel->user signaling.
583  */
584 struct vfio_irq_set {
585 	__u32	argsz;
586 	__u32	flags;
587 #define VFIO_IRQ_SET_DATA_NONE		(1 << 0) /* Data not present */
588 #define VFIO_IRQ_SET_DATA_BOOL		(1 << 1) /* Data is bool (u8) */
589 #define VFIO_IRQ_SET_DATA_EVENTFD	(1 << 2) /* Data is eventfd (s32) */
590 #define VFIO_IRQ_SET_ACTION_MASK	(1 << 3) /* Mask interrupt */
591 #define VFIO_IRQ_SET_ACTION_UNMASK	(1 << 4) /* Unmask interrupt */
592 #define VFIO_IRQ_SET_ACTION_TRIGGER	(1 << 5) /* Trigger interrupt */
593 	__u32	index;
594 	__u32	start;
595 	__u32	count;
596 	__u8	data[];
597 };
598 #define VFIO_DEVICE_SET_IRQS		_IO(VFIO_TYPE, VFIO_BASE + 10)
599 
600 #define VFIO_IRQ_SET_DATA_TYPE_MASK	(VFIO_IRQ_SET_DATA_NONE | \
601 					 VFIO_IRQ_SET_DATA_BOOL | \
602 					 VFIO_IRQ_SET_DATA_EVENTFD)
603 #define VFIO_IRQ_SET_ACTION_TYPE_MASK	(VFIO_IRQ_SET_ACTION_MASK | \
604 					 VFIO_IRQ_SET_ACTION_UNMASK | \
605 					 VFIO_IRQ_SET_ACTION_TRIGGER)
606 /**
607  * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
608  *
609  * Reset a device.
610  */
611 #define VFIO_DEVICE_RESET		_IO(VFIO_TYPE, VFIO_BASE + 11)
612 
613 /*
614  * The VFIO-PCI bus driver makes use of the following fixed region and
615  * IRQ index mapping.  Unimplemented regions return a size of zero.
616  * Unimplemented IRQ types return a count of zero.
617  */
618 
619 enum {
620 	VFIO_PCI_BAR0_REGION_INDEX,
621 	VFIO_PCI_BAR1_REGION_INDEX,
622 	VFIO_PCI_BAR2_REGION_INDEX,
623 	VFIO_PCI_BAR3_REGION_INDEX,
624 	VFIO_PCI_BAR4_REGION_INDEX,
625 	VFIO_PCI_BAR5_REGION_INDEX,
626 	VFIO_PCI_ROM_REGION_INDEX,
627 	VFIO_PCI_CONFIG_REGION_INDEX,
628 	/*
629 	 * Expose VGA regions defined for PCI base class 03, subclass 00.
630 	 * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
631 	 * as well as the MMIO range 0xa0000 to 0xbffff.  Each implemented
632 	 * range is found at it's identity mapped offset from the region
633 	 * offset, for example 0x3b0 is region_info.offset + 0x3b0.  Areas
634 	 * between described ranges are unimplemented.
635 	 */
636 	VFIO_PCI_VGA_REGION_INDEX,
637 	VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */
638 				 /* device specific cap to define content. */
639 };
640 
641 enum {
642 	VFIO_PCI_INTX_IRQ_INDEX,
643 	VFIO_PCI_MSI_IRQ_INDEX,
644 	VFIO_PCI_MSIX_IRQ_INDEX,
645 	VFIO_PCI_ERR_IRQ_INDEX,
646 	VFIO_PCI_REQ_IRQ_INDEX,
647 	VFIO_PCI_NUM_IRQS
648 };
649 
650 /*
651  * The vfio-ccw bus driver makes use of the following fixed region and
652  * IRQ index mapping. Unimplemented regions return a size of zero.
653  * Unimplemented IRQ types return a count of zero.
654  */
655 
656 enum {
657 	VFIO_CCW_CONFIG_REGION_INDEX,
658 	VFIO_CCW_NUM_REGIONS
659 };
660 
661 enum {
662 	VFIO_CCW_IO_IRQ_INDEX,
663 	VFIO_CCW_CRW_IRQ_INDEX,
664 	VFIO_CCW_REQ_IRQ_INDEX,
665 	VFIO_CCW_NUM_IRQS
666 };
667 
668 /*
669  * The vfio-ap bus driver makes use of the following IRQ index mapping.
670  * Unimplemented IRQ types return a count of zero.
671  */
672 enum {
673 	VFIO_AP_REQ_IRQ_INDEX,
674 	VFIO_AP_NUM_IRQS
675 };
676 
677 /**
678  * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
679  *					      struct vfio_pci_hot_reset_info)
680  *
681  * This command is used to query the affected devices in the hot reset for
682  * a given device.
683  *
684  * This command always reports the segment, bus, and devfn information for
685  * each affected device, and selectively reports the group_id or devid per
686  * the way how the calling device is opened.
687  *
688  *	- If the calling device is opened via the traditional group/container
689  *	  API, group_id is reported.  User should check if it has owned all
690  *	  the affected devices and provides a set of group fds to prove the
691  *	  ownership in VFIO_DEVICE_PCI_HOT_RESET ioctl.
692  *
693  *	- If the calling device is opened as a cdev, devid is reported.
694  *	  Flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set to indicate this
695  *	  data type.  All the affected devices should be represented in
696  *	  the dev_set, ex. bound to a vfio driver, and also be owned by
697  *	  this interface which is determined by the following conditions:
698  *	  1) Has a valid devid within the iommufd_ctx of the calling device.
699  *	     Ownership cannot be determined across separate iommufd_ctx and
700  *	     the cdev calling conventions do not support a proof-of-ownership
701  *	     model as provided in the legacy group interface.  In this case
702  *	     valid devid with value greater than zero is provided in the return
703  *	     structure.
704  *	  2) Does not have a valid devid within the iommufd_ctx of the calling
705  *	     device, but belongs to the same IOMMU group as the calling device
706  *	     or another opened device that has a valid devid within the
707  *	     iommufd_ctx of the calling device.  This provides implicit ownership
708  *	     for devices within the same DMA isolation context.  In this case
709  *	     the devid value of VFIO_PCI_DEVID_OWNED is provided in the return
710  *	     structure.
711  *
712  *	  A devid value of VFIO_PCI_DEVID_NOT_OWNED is provided in the return
713  *	  structure for affected devices where device is NOT represented in the
714  *	  dev_set or ownership is not available.  Such devices prevent the use
715  *	  of VFIO_DEVICE_PCI_HOT_RESET ioctl outside of the proof-of-ownership
716  *	  calling conventions (ie. via legacy group accessed devices).  Flag
717  *	  VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED would be set when all the
718  *	  affected devices are represented in the dev_set and also owned by
719  *	  the user.  This flag is available only when
720  *	  flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set, otherwise reserved.
721  *	  When set, user could invoke VFIO_DEVICE_PCI_HOT_RESET with a zero
722  *	  length fd array on the calling device as the ownership is validated
723  *	  by iommufd_ctx.
724  *
725  * Return: 0 on success, -errno on failure:
726  *	-enospc = insufficient buffer, -enodev = unsupported for device.
727  */
728 struct vfio_pci_dependent_device {
729 	union {
730 		__u32   group_id;
731 		__u32	devid;
732 #define VFIO_PCI_DEVID_OWNED		0
733 #define VFIO_PCI_DEVID_NOT_OWNED	-1
734 	};
735 	__u16	segment;
736 	__u8	bus;
737 	__u8	devfn; /* Use PCI_SLOT/PCI_FUNC */
738 };
739 
740 struct vfio_pci_hot_reset_info {
741 	__u32	argsz;
742 	__u32	flags;
743 #define VFIO_PCI_HOT_RESET_FLAG_DEV_ID		(1 << 0)
744 #define VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED	(1 << 1)
745 	__u32	count;
746 	struct vfio_pci_dependent_device	devices[];
747 };
748 
749 #define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO	_IO(VFIO_TYPE, VFIO_BASE + 12)
750 
751 /**
752  * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
753  *				    struct vfio_pci_hot_reset)
754  *
755  * A PCI hot reset results in either a bus or slot reset which may affect
756  * other devices sharing the bus/slot.  The calling user must have
757  * ownership of the full set of affected devices as determined by the
758  * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO ioctl.
759  *
760  * When called on a device file descriptor acquired through the vfio
761  * group interface, the user is required to provide proof of ownership
762  * of those affected devices via the group_fds array in struct
763  * vfio_pci_hot_reset.
764  *
765  * When called on a direct cdev opened vfio device, the flags field of
766  * struct vfio_pci_hot_reset_info reports the ownership status of the
767  * affected devices and this ioctl must be called with an empty group_fds
768  * array.  See above INFO ioctl definition for ownership requirements.
769  *
770  * Mixed usage of legacy groups and cdevs across the set of affected
771  * devices is not supported.
772  *
773  * Return: 0 on success, -errno on failure.
774  */
775 struct vfio_pci_hot_reset {
776 	__u32	argsz;
777 	__u32	flags;
778 	__u32	count;
779 	__s32	group_fds[];
780 };
781 
782 #define VFIO_DEVICE_PCI_HOT_RESET	_IO(VFIO_TYPE, VFIO_BASE + 13)
783 
784 /**
785  * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
786  *                                    struct vfio_device_query_gfx_plane)
787  *
788  * Set the drm_plane_type and flags, then retrieve the gfx plane info.
789  *
790  * flags supported:
791  * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
792  *   to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
793  *   support for dma-buf.
794  * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
795  *   to ask if the mdev supports region. 0 on support, -EINVAL on no
796  *   support for region.
797  * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
798  *   with each call to query the plane info.
799  * - Others are invalid and return -EINVAL.
800  *
801  * Note:
802  * 1. Plane could be disabled by guest. In that case, success will be
803  *    returned with zero-initialized drm_format, size, width and height
804  *    fields.
805  * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
806  *
807  * Return: 0 on success, -errno on other failure.
808  */
809 struct vfio_device_gfx_plane_info {
810 	__u32 argsz;
811 	__u32 flags;
812 #define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
813 #define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
814 #define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
815 	/* in */
816 	__u32 drm_plane_type;	/* type of plane: DRM_PLANE_TYPE_* */
817 	/* out */
818 	__u32 drm_format;	/* drm format of plane */
819 	__aligned_u64 drm_format_mod;   /* tiled mode */
820 	__u32 width;	/* width of plane */
821 	__u32 height;	/* height of plane */
822 	__u32 stride;	/* stride of plane */
823 	__u32 size;	/* size of plane in bytes, align on page*/
824 	__u32 x_pos;	/* horizontal position of cursor plane */
825 	__u32 y_pos;	/* vertical position of cursor plane*/
826 	__u32 x_hot;    /* horizontal position of cursor hotspot */
827 	__u32 y_hot;    /* vertical position of cursor hotspot */
828 	union {
829 		__u32 region_index;	/* region index */
830 		__u32 dmabuf_id;	/* dma-buf id */
831 	};
832 	__u32 reserved;
833 };
834 
835 #define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
836 
837 /**
838  * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
839  *
840  * Return a new dma-buf file descriptor for an exposed guest framebuffer
841  * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
842  * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
843  */
844 
845 #define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
846 
847 /**
848  * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
849  *                              struct vfio_device_ioeventfd)
850  *
851  * Perform a write to the device at the specified device fd offset, with
852  * the specified data and width when the provided eventfd is triggered.
853  * vfio bus drivers may not support this for all regions, for all widths,
854  * or at all.  vfio-pci currently only enables support for BAR regions,
855  * excluding the MSI-X vector table.
856  *
857  * Return: 0 on success, -errno on failure.
858  */
859 struct vfio_device_ioeventfd {
860 	__u32	argsz;
861 	__u32	flags;
862 #define VFIO_DEVICE_IOEVENTFD_8		(1 << 0) /* 1-byte write */
863 #define VFIO_DEVICE_IOEVENTFD_16	(1 << 1) /* 2-byte write */
864 #define VFIO_DEVICE_IOEVENTFD_32	(1 << 2) /* 4-byte write */
865 #define VFIO_DEVICE_IOEVENTFD_64	(1 << 3) /* 8-byte write */
866 #define VFIO_DEVICE_IOEVENTFD_SIZE_MASK	(0xf)
867 	__aligned_u64	offset;		/* device fd offset of write */
868 	__aligned_u64	data;		/* data to be written */
869 	__s32	fd;			/* -1 for de-assignment */
870 	__u32	reserved;
871 };
872 
873 #define VFIO_DEVICE_IOEVENTFD		_IO(VFIO_TYPE, VFIO_BASE + 16)
874 
875 /**
876  * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
877  *			       struct vfio_device_feature)
878  *
879  * Get, set, or probe feature data of the device.  The feature is selected
880  * using the FEATURE_MASK portion of the flags field.  Support for a feature
881  * can be probed by setting both the FEATURE_MASK and PROBE bits.  A probe
882  * may optionally include the GET and/or SET bits to determine read vs write
883  * access of the feature respectively.  Probing a feature will return success
884  * if the feature is supported and all of the optionally indicated GET/SET
885  * methods are supported.  The format of the data portion of the structure is
886  * specific to the given feature.  The data portion is not required for
887  * probing.  GET and SET are mutually exclusive, except for use with PROBE.
888  *
889  * Return 0 on success, -errno on failure.
890  */
891 struct vfio_device_feature {
892 	__u32	argsz;
893 	__u32	flags;
894 #define VFIO_DEVICE_FEATURE_MASK	(0xffff) /* 16-bit feature index */
895 #define VFIO_DEVICE_FEATURE_GET		(1 << 16) /* Get feature into data[] */
896 #define VFIO_DEVICE_FEATURE_SET		(1 << 17) /* Set feature from data[] */
897 #define VFIO_DEVICE_FEATURE_PROBE	(1 << 18) /* Probe feature support */
898 	__u8	data[];
899 };
900 
901 #define VFIO_DEVICE_FEATURE		_IO(VFIO_TYPE, VFIO_BASE + 17)
902 
903 /*
904  * VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18,
905  *				   struct vfio_device_bind_iommufd)
906  * @argsz:	 User filled size of this data.
907  * @flags:	 Must be 0.
908  * @iommufd:	 iommufd to bind.
909  * @out_devid:	 The device id generated by this bind. devid is a handle for
910  *		 this device/iommufd bond and can be used in IOMMUFD commands.
911  *
912  * Bind a vfio_device to the specified iommufd.
913  *
914  * User is restricted from accessing the device before the binding operation
915  * is completed.  Only allowed on cdev fds.
916  *
917  * Unbind is automatically conducted when device fd is closed.
918  *
919  * Return: 0 on success, -errno on failure.
920  */
921 struct vfio_device_bind_iommufd {
922 	__u32		argsz;
923 	__u32		flags;
924 	__s32		iommufd;
925 	__u32		out_devid;
926 };
927 
928 #define VFIO_DEVICE_BIND_IOMMUFD	_IO(VFIO_TYPE, VFIO_BASE + 18)
929 
930 /*
931  * VFIO_DEVICE_ATTACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 19,
932  *					struct vfio_device_attach_iommufd_pt)
933  * @argsz:	User filled size of this data.
934  * @flags:	Must be 0.
935  * @pt_id:	Input the target id which can represent an ioas or a hwpt
936  *		allocated via iommufd subsystem.
937  *		Output the input ioas id or the attached hwpt id which could
938  *		be the specified hwpt itself or a hwpt automatically created
939  *		for the specified ioas by kernel during the attachment.
940  *
941  * Associate the device with an address space within the bound iommufd.
942  * Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close.  This is only
943  * allowed on cdev fds.
944  *
945  * If a vfio device is currently attached to a valid hw_pagetable, without doing
946  * a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl
947  * passing in another hw_pagetable (hwpt) id is allowed. This action, also known
948  * as a hw_pagetable replacement, will replace the device's currently attached
949  * hw_pagetable with a new hw_pagetable corresponding to the given pt_id.
950  *
951  * Return: 0 on success, -errno on failure.
952  */
953 struct vfio_device_attach_iommufd_pt {
954 	__u32	argsz;
955 	__u32	flags;
956 	__u32	pt_id;
957 };
958 
959 #define VFIO_DEVICE_ATTACH_IOMMUFD_PT		_IO(VFIO_TYPE, VFIO_BASE + 19)
960 
961 /*
962  * VFIO_DEVICE_DETACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 20,
963  *					struct vfio_device_detach_iommufd_pt)
964  * @argsz:	User filled size of this data.
965  * @flags:	Must be 0.
966  *
967  * Remove the association of the device and its current associated address
968  * space.  After it, the device should be in a blocking DMA state.  This is only
969  * allowed on cdev fds.
970  *
971  * Return: 0 on success, -errno on failure.
972  */
973 struct vfio_device_detach_iommufd_pt {
974 	__u32	argsz;
975 	__u32	flags;
976 };
977 
978 #define VFIO_DEVICE_DETACH_IOMMUFD_PT		_IO(VFIO_TYPE, VFIO_BASE + 20)
979 
980 /*
981  * Provide support for setting a PCI VF Token, which is used as a shared
982  * secret between PF and VF drivers.  This feature may only be set on a
983  * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
984  * open VFs.  Data provided when setting this feature is a 16-byte array
985  * (__u8 b[16]), representing a UUID.
986  */
987 #define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN	(0)
988 
989 /*
990  * Indicates the device can support the migration API through
991  * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and
992  * ERROR states are always supported. Support for additional states is
993  * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be
994  * set.
995  *
996  * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and
997  * RESUMING are supported.
998  *
999  * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P
1000  * is supported in addition to the STOP_COPY states.
1001  *
1002  * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY means that
1003  * PRE_COPY is supported in addition to the STOP_COPY states.
1004  *
1005  * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY
1006  * means that RUNNING_P2P, PRE_COPY and PRE_COPY_P2P are supported
1007  * in addition to the STOP_COPY states.
1008  *
1009  * Other combinations of flags have behavior to be defined in the future.
1010  */
1011 struct vfio_device_feature_migration {
1012 	__aligned_u64 flags;
1013 #define VFIO_MIGRATION_STOP_COPY	(1 << 0)
1014 #define VFIO_MIGRATION_P2P		(1 << 1)
1015 #define VFIO_MIGRATION_PRE_COPY		(1 << 2)
1016 };
1017 #define VFIO_DEVICE_FEATURE_MIGRATION 1
1018 
1019 /*
1020  * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO
1021  * device. The new state is supplied in device_state, see enum
1022  * vfio_device_mig_state for details
1023  *
1024  * The kernel migration driver must fully transition the device to the new state
1025  * value before the operation returns to the user.
1026  *
1027  * The kernel migration driver must not generate asynchronous device state
1028  * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET
1029  * ioctl as described above.
1030  *
1031  * If this function fails then current device_state may be the original
1032  * operating state or some other state along the combination transition path.
1033  * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt
1034  * to return to the original state, or attempt to return to some other state
1035  * such as RUNNING or STOP.
1036  *
1037  * If the new_state starts a new data transfer session then the FD associated
1038  * with that session is returned in data_fd. The user is responsible to close
1039  * this FD when it is finished. The user must consider the migration data stream
1040  * carried over the FD to be opaque and must preserve the byte order of the
1041  * stream. The user is not required to preserve buffer segmentation when writing
1042  * the data stream during the RESUMING operation.
1043  *
1044  * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO
1045  * device, data_fd will be -1.
1046  */
1047 struct vfio_device_feature_mig_state {
1048 	__u32 device_state; /* From enum vfio_device_mig_state */
1049 	__s32 data_fd;
1050 };
1051 #define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2
1052 
1053 /*
1054  * The device migration Finite State Machine is described by the enum
1055  * vfio_device_mig_state. Some of the FSM arcs will create a migration data
1056  * transfer session by returning a FD, in this case the migration data will
1057  * flow over the FD using read() and write() as discussed below.
1058  *
1059  * There are 5 states to support VFIO_MIGRATION_STOP_COPY:
1060  *  RUNNING - The device is running normally
1061  *  STOP - The device does not change the internal or external state
1062  *  STOP_COPY - The device internal state can be read out
1063  *  RESUMING - The device is stopped and is loading a new internal state
1064  *  ERROR - The device has failed and must be reset
1065  *
1066  * And optional states to support VFIO_MIGRATION_P2P:
1067  *  RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA
1068  * And VFIO_MIGRATION_PRE_COPY:
1069  *  PRE_COPY - The device is running normally but tracking internal state
1070  *             changes
1071  * And VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY:
1072  *  PRE_COPY_P2P - PRE_COPY, except the device cannot do peer to peer DMA
1073  *
1074  * The FSM takes actions on the arcs between FSM states. The driver implements
1075  * the following behavior for the FSM arcs:
1076  *
1077  * RUNNING_P2P -> STOP
1078  * STOP_COPY -> STOP
1079  *   While in STOP the device must stop the operation of the device. The device
1080  *   must not generate interrupts, DMA, or any other change to external state.
1081  *   It must not change its internal state. When stopped the device and kernel
1082  *   migration driver must accept and respond to interaction to support external
1083  *   subsystems in the STOP state, for example PCI MSI-X and PCI config space.
1084  *   Failure by the user to restrict device access while in STOP must not result
1085  *   in error conditions outside the user context (ex. host system faults).
1086  *
1087  *   The STOP_COPY arc will terminate a data transfer session.
1088  *
1089  * RESUMING -> STOP
1090  *   Leaving RESUMING terminates a data transfer session and indicates the
1091  *   device should complete processing of the data delivered by write(). The
1092  *   kernel migration driver should complete the incorporation of data written
1093  *   to the data transfer FD into the device internal state and perform
1094  *   final validity and consistency checking of the new device state. If the
1095  *   user provided data is found to be incomplete, inconsistent, or otherwise
1096  *   invalid, the migration driver must fail the SET_STATE ioctl and
1097  *   optionally go to the ERROR state as described below.
1098  *
1099  *   While in STOP the device has the same behavior as other STOP states
1100  *   described above.
1101  *
1102  *   To abort a RESUMING session the device must be reset.
1103  *
1104  * PRE_COPY -> RUNNING
1105  * RUNNING_P2P -> RUNNING
1106  *   While in RUNNING the device is fully operational, the device may generate
1107  *   interrupts, DMA, respond to MMIO, all vfio device regions are functional,
1108  *   and the device may advance its internal state.
1109  *
1110  *   The PRE_COPY arc will terminate a data transfer session.
1111  *
1112  * PRE_COPY_P2P -> RUNNING_P2P
1113  * RUNNING -> RUNNING_P2P
1114  * STOP -> RUNNING_P2P
1115  *   While in RUNNING_P2P the device is partially running in the P2P quiescent
1116  *   state defined below.
1117  *
1118  *   The PRE_COPY_P2P arc will terminate a data transfer session.
1119  *
1120  * RUNNING -> PRE_COPY
1121  * RUNNING_P2P -> PRE_COPY_P2P
1122  * STOP -> STOP_COPY
1123  *   PRE_COPY, PRE_COPY_P2P and STOP_COPY form the "saving group" of states
1124  *   which share a data transfer session. Moving between these states alters
1125  *   what is streamed in session, but does not terminate or otherwise affect
1126  *   the associated fd.
1127  *
1128  *   These arcs begin the process of saving the device state and will return a
1129  *   new data_fd. The migration driver may perform actions such as enabling
1130  *   dirty logging of device state when entering PRE_COPY or PER_COPY_P2P.
1131  *
1132  *   Each arc does not change the device operation, the device remains
1133  *   RUNNING, P2P quiesced or in STOP. The STOP_COPY state is described below
1134  *   in PRE_COPY_P2P -> STOP_COPY.
1135  *
1136  * PRE_COPY -> PRE_COPY_P2P
1137  *   Entering PRE_COPY_P2P continues all the behaviors of PRE_COPY above.
1138  *   However, while in the PRE_COPY_P2P state, the device is partially running
1139  *   in the P2P quiescent state defined below, like RUNNING_P2P.
1140  *
1141  * PRE_COPY_P2P -> PRE_COPY
1142  *   This arc allows returning the device to a full RUNNING behavior while
1143  *   continuing all the behaviors of PRE_COPY.
1144  *
1145  * PRE_COPY_P2P -> STOP_COPY
1146  *   While in the STOP_COPY state the device has the same behavior as STOP
1147  *   with the addition that the data transfers session continues to stream the
1148  *   migration state. End of stream on the FD indicates the entire device
1149  *   state has been transferred.
1150  *
1151  *   The user should take steps to restrict access to vfio device regions while
1152  *   the device is in STOP_COPY or risk corruption of the device migration data
1153  *   stream.
1154  *
1155  * STOP -> RESUMING
1156  *   Entering the RESUMING state starts a process of restoring the device state
1157  *   and will return a new data_fd. The data stream fed into the data_fd should
1158  *   be taken from the data transfer output of a single FD during saving from
1159  *   a compatible device. The migration driver may alter/reset the internal
1160  *   device state for this arc if required to prepare the device to receive the
1161  *   migration data.
1162  *
1163  * STOP_COPY -> PRE_COPY
1164  * STOP_COPY -> PRE_COPY_P2P
1165  *   These arcs are not permitted and return error if requested. Future
1166  *   revisions of this API may define behaviors for these arcs, in this case
1167  *   support will be discoverable by a new flag in
1168  *   VFIO_DEVICE_FEATURE_MIGRATION.
1169  *
1170  * any -> ERROR
1171  *   ERROR cannot be specified as a device state, however any transition request
1172  *   can be failed with an errno return and may then move the device_state into
1173  *   ERROR. In this case the device was unable to execute the requested arc and
1174  *   was also unable to restore the device to any valid device_state.
1175  *   To recover from ERROR VFIO_DEVICE_RESET must be used to return the
1176  *   device_state back to RUNNING.
1177  *
1178  * The optional peer to peer (P2P) quiescent state is intended to be a quiescent
1179  * state for the device for the purposes of managing multiple devices within a
1180  * user context where peer-to-peer DMA between devices may be active. The
1181  * RUNNING_P2P and PRE_COPY_P2P states must prevent the device from initiating
1182  * any new P2P DMA transactions. If the device can identify P2P transactions
1183  * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration
1184  * driver must complete any such outstanding operations prior to completing the
1185  * FSM arc into a P2P state. For the purpose of specification the states
1186  * behave as though the device was fully running if not supported. Like while in
1187  * STOP or STOP_COPY the user must not touch the device, otherwise the state
1188  * can be exited.
1189  *
1190  * The remaining possible transitions are interpreted as combinations of the
1191  * above FSM arcs. As there are multiple paths through the FSM arcs the path
1192  * should be selected based on the following rules:
1193  *   - Select the shortest path.
1194  *   - The path cannot have saving group states as interior arcs, only
1195  *     starting/end states.
1196  * Refer to vfio_mig_get_next_state() for the result of the algorithm.
1197  *
1198  * The automatic transit through the FSM arcs that make up the combination
1199  * transition is invisible to the user. When working with combination arcs the
1200  * user may see any step along the path in the device_state if SET_STATE
1201  * fails. When handling these types of errors users should anticipate future
1202  * revisions of this protocol using new states and those states becoming
1203  * visible in this case.
1204  *
1205  * The optional states cannot be used with SET_STATE if the device does not
1206  * support them. The user can discover if these states are supported by using
1207  * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can
1208  * avoid knowing about these optional states if the kernel driver supports them.
1209  *
1210  * Arcs touching PRE_COPY and PRE_COPY_P2P are removed if support for PRE_COPY
1211  * is not present.
1212  */
1213 enum vfio_device_mig_state {
1214 	VFIO_DEVICE_STATE_ERROR = 0,
1215 	VFIO_DEVICE_STATE_STOP = 1,
1216 	VFIO_DEVICE_STATE_RUNNING = 2,
1217 	VFIO_DEVICE_STATE_STOP_COPY = 3,
1218 	VFIO_DEVICE_STATE_RESUMING = 4,
1219 	VFIO_DEVICE_STATE_RUNNING_P2P = 5,
1220 	VFIO_DEVICE_STATE_PRE_COPY = 6,
1221 	VFIO_DEVICE_STATE_PRE_COPY_P2P = 7,
1222 };
1223 
1224 /**
1225  * VFIO_MIG_GET_PRECOPY_INFO - _IO(VFIO_TYPE, VFIO_BASE + 21)
1226  *
1227  * This ioctl is used on the migration data FD in the precopy phase of the
1228  * migration data transfer. It returns an estimate of the current data sizes
1229  * remaining to be transferred. It allows the user to judge when it is
1230  * appropriate to leave PRE_COPY for STOP_COPY.
1231  *
1232  * This ioctl is valid only in PRE_COPY states and kernel driver should
1233  * return -EINVAL from any other migration state.
1234  *
1235  * The vfio_precopy_info data structure returned by this ioctl provides
1236  * estimates of data available from the device during the PRE_COPY states.
1237  * This estimate is split into two categories, initial_bytes and
1238  * dirty_bytes.
1239  *
1240  * The initial_bytes field indicates the amount of initial precopy
1241  * data available from the device. This field should have a non-zero initial
1242  * value and decrease as migration data is read from the device.
1243  * It is recommended to leave PRE_COPY for STOP_COPY only after this field
1244  * reaches zero. Leaving PRE_COPY earlier might make things slower.
1245  *
1246  * The dirty_bytes field tracks device state changes relative to data
1247  * previously retrieved.  This field starts at zero and may increase as
1248  * the internal device state is modified or decrease as that modified
1249  * state is read from the device.
1250  *
1251  * Userspace may use the combination of these fields to estimate the
1252  * potential data size available during the PRE_COPY phases, as well as
1253  * trends relative to the rate the device is dirtying its internal
1254  * state, but these fields are not required to have any bearing relative
1255  * to the data size available during the STOP_COPY phase.
1256  *
1257  * Drivers have a lot of flexibility in when and what they transfer during the
1258  * PRE_COPY phase, and how they report this from VFIO_MIG_GET_PRECOPY_INFO.
1259  *
1260  * During pre-copy the migration data FD has a temporary "end of stream" that is
1261  * reached when both initial_bytes and dirty_byte are zero. For instance, this
1262  * may indicate that the device is idle and not currently dirtying any internal
1263  * state. When read() is done on this temporary end of stream the kernel driver
1264  * should return ENOMSG from read(). Userspace can wait for more data (which may
1265  * never come) by using poll.
1266  *
1267  * Once in STOP_COPY the migration data FD has a permanent end of stream
1268  * signaled in the usual way by read() always returning 0 and poll always
1269  * returning readable. ENOMSG may not be returned in STOP_COPY.
1270  * Support for this ioctl is mandatory if a driver claims to support
1271  * VFIO_MIGRATION_PRE_COPY.
1272  *
1273  * Return: 0 on success, -1 and errno set on failure.
1274  */
1275 struct vfio_precopy_info {
1276 	__u32 argsz;
1277 	__u32 flags;
1278 	__aligned_u64 initial_bytes;
1279 	__aligned_u64 dirty_bytes;
1280 };
1281 
1282 #define VFIO_MIG_GET_PRECOPY_INFO _IO(VFIO_TYPE, VFIO_BASE + 21)
1283 
1284 /*
1285  * Upon VFIO_DEVICE_FEATURE_SET, allow the device to be moved into a low power
1286  * state with the platform-based power management.  Device use of lower power
1287  * states depends on factors managed by the runtime power management core,
1288  * including system level support and coordinating support among dependent
1289  * devices.  Enabling device low power entry does not guarantee lower power
1290  * usage by the device, nor is a mechanism provided through this feature to
1291  * know the current power state of the device.  If any device access happens
1292  * (either from the host or through the vfio uAPI) when the device is in the
1293  * low power state, then the host will move the device out of the low power
1294  * state as necessary prior to the access.  Once the access is completed, the
1295  * device may re-enter the low power state.  For single shot low power support
1296  * with wake-up notification, see
1297  * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP below.  Access to mmap'd
1298  * device regions is disabled on LOW_POWER_ENTRY and may only be resumed after
1299  * calling LOW_POWER_EXIT.
1300  */
1301 #define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY 3
1302 
1303 /*
1304  * This device feature has the same behavior as
1305  * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY with the exception that the user
1306  * provides an eventfd for wake-up notification.  When the device moves out of
1307  * the low power state for the wake-up, the host will not allow the device to
1308  * re-enter a low power state without a subsequent user call to one of the low
1309  * power entry device feature IOCTLs.  Access to mmap'd device regions is
1310  * disabled on LOW_POWER_ENTRY_WITH_WAKEUP and may only be resumed after the
1311  * low power exit.  The low power exit can happen either through LOW_POWER_EXIT
1312  * or through any other access (where the wake-up notification has been
1313  * generated).  The access to mmap'd device regions will not trigger low power
1314  * exit.
1315  *
1316  * The notification through the provided eventfd will be generated only when
1317  * the device has entered and is resumed from a low power state after
1318  * calling this device feature IOCTL.  A device that has not entered low power
1319  * state, as managed through the runtime power management core, will not
1320  * generate a notification through the provided eventfd on access.  Calling the
1321  * LOW_POWER_EXIT feature is optional in the case where notification has been
1322  * signaled on the provided eventfd that a resume from low power has occurred.
1323  */
1324 struct vfio_device_low_power_entry_with_wakeup {
1325 	__s32 wakeup_eventfd;
1326 	__u32 reserved;
1327 };
1328 
1329 #define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP 4
1330 
1331 /*
1332  * Upon VFIO_DEVICE_FEATURE_SET, disallow use of device low power states as
1333  * previously enabled via VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY or
1334  * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP device features.
1335  * This device feature IOCTL may itself generate a wakeup eventfd notification
1336  * in the latter case if the device had previously entered a low power state.
1337  */
1338 #define VFIO_DEVICE_FEATURE_LOW_POWER_EXIT 5
1339 
1340 /*
1341  * Upon VFIO_DEVICE_FEATURE_SET start/stop device DMA logging.
1342  * VFIO_DEVICE_FEATURE_PROBE can be used to detect if the device supports
1343  * DMA logging.
1344  *
1345  * DMA logging allows a device to internally record what DMAs the device is
1346  * initiating and report them back to userspace. It is part of the VFIO
1347  * migration infrastructure that allows implementing dirty page tracking
1348  * during the pre copy phase of live migration. Only DMA WRITEs are logged,
1349  * and this API is not connected to VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE.
1350  *
1351  * When DMA logging is started a range of IOVAs to monitor is provided and the
1352  * device can optimize its logging to cover only the IOVA range given. Each
1353  * DMA that the device initiates inside the range will be logged by the device
1354  * for later retrieval.
1355  *
1356  * page_size is an input that hints what tracking granularity the device
1357  * should try to achieve. If the device cannot do the hinted page size then
1358  * it's the driver choice which page size to pick based on its support.
1359  * On output the device will return the page size it selected.
1360  *
1361  * ranges is a pointer to an array of
1362  * struct vfio_device_feature_dma_logging_range.
1363  *
1364  * The core kernel code guarantees to support by minimum num_ranges that fit
1365  * into a single kernel page. User space can try higher values but should give
1366  * up if the above can't be achieved as of some driver limitations.
1367  *
1368  * A single call to start device DMA logging can be issued and a matching stop
1369  * should follow at the end. Another start is not allowed in the meantime.
1370  */
1371 struct vfio_device_feature_dma_logging_control {
1372 	__aligned_u64 page_size;
1373 	__u32 num_ranges;
1374 	__u32 __reserved;
1375 	__aligned_u64 ranges;
1376 };
1377 
1378 struct vfio_device_feature_dma_logging_range {
1379 	__aligned_u64 iova;
1380 	__aligned_u64 length;
1381 };
1382 
1383 #define VFIO_DEVICE_FEATURE_DMA_LOGGING_START 6
1384 
1385 /*
1386  * Upon VFIO_DEVICE_FEATURE_SET stop device DMA logging that was started
1387  * by VFIO_DEVICE_FEATURE_DMA_LOGGING_START
1388  */
1389 #define VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP 7
1390 
1391 /*
1392  * Upon VFIO_DEVICE_FEATURE_GET read back and clear the device DMA log
1393  *
1394  * Query the device's DMA log for written pages within the given IOVA range.
1395  * During querying the log is cleared for the IOVA range.
1396  *
1397  * bitmap is a pointer to an array of u64s that will hold the output bitmap
1398  * with 1 bit reporting a page_size unit of IOVA. The mapping of IOVA to bits
1399  * is given by:
1400  *  bitmap[(addr - iova)/page_size] & (1ULL << (addr % 64))
1401  *
1402  * The input page_size can be any power of two value and does not have to
1403  * match the value given to VFIO_DEVICE_FEATURE_DMA_LOGGING_START. The driver
1404  * will format its internal logging to match the reporting page size, possibly
1405  * by replicating bits if the internal page size is lower than requested.
1406  *
1407  * The LOGGING_REPORT will only set bits in the bitmap and never clear or
1408  * perform any initialization of the user provided bitmap.
1409  *
1410  * If any error is returned userspace should assume that the dirty log is
1411  * corrupted. Error recovery is to consider all memory dirty and try to
1412  * restart the dirty tracking, or to abort/restart the whole migration.
1413  *
1414  * If DMA logging is not enabled, an error will be returned.
1415  *
1416  */
1417 struct vfio_device_feature_dma_logging_report {
1418 	__aligned_u64 iova;
1419 	__aligned_u64 length;
1420 	__aligned_u64 page_size;
1421 	__aligned_u64 bitmap;
1422 };
1423 
1424 #define VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT 8
1425 
1426 /*
1427  * Upon VFIO_DEVICE_FEATURE_GET read back the estimated data length that will
1428  * be required to complete stop copy.
1429  *
1430  * Note: Can be called on each device state.
1431  */
1432 
1433 struct vfio_device_feature_mig_data_size {
1434 	__aligned_u64 stop_copy_length;
1435 };
1436 
1437 #define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9
1438 
1439 /**
1440  * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device
1441  * based on the operation specified in op flag.
1442  *
1443  * The functionality is incorporated for devices that needs bus master control,
1444  * but the in-band device interface lacks the support. Consequently, it is not
1445  * applicable to PCI devices, as bus master control for PCI devices is managed
1446  * in-band through the configuration space. At present, this feature is supported
1447  * only for CDX devices.
1448  * When the device's BUS MASTER setting is configured as CLEAR, it will result in
1449  * blocking all incoming DMA requests from the device. On the other hand, configuring
1450  * the device's BUS MASTER setting as SET (enable) will grant the device the
1451  * capability to perform DMA to the host memory.
1452  */
1453 struct vfio_device_feature_bus_master {
1454 	__u32 op;
1455 #define		VFIO_DEVICE_FEATURE_CLEAR_MASTER	0	/* Clear Bus Master */
1456 #define		VFIO_DEVICE_FEATURE_SET_MASTER		1	/* Set Bus Master */
1457 };
1458 #define VFIO_DEVICE_FEATURE_BUS_MASTER 10
1459 
1460 /* -------- API for Type1 VFIO IOMMU -------- */
1461 
1462 /**
1463  * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
1464  *
1465  * Retrieve information about the IOMMU object. Fills in provided
1466  * struct vfio_iommu_info. Caller sets argsz.
1467  *
1468  * XXX Should we do these by CHECK_EXTENSION too?
1469  */
1470 struct vfio_iommu_type1_info {
1471 	__u32	argsz;
1472 	__u32	flags;
1473 #define VFIO_IOMMU_INFO_PGSIZES (1 << 0)	/* supported page sizes info */
1474 #define VFIO_IOMMU_INFO_CAPS	(1 << 1)	/* Info supports caps */
1475 	__aligned_u64	iova_pgsizes;		/* Bitmap of supported page sizes */
1476 	__u32   cap_offset;	/* Offset within info struct of first cap */
1477 	__u32   pad;
1478 };
1479 
1480 /*
1481  * The IOVA capability allows to report the valid IOVA range(s)
1482  * excluding any non-relaxable reserved regions exposed by
1483  * devices attached to the container. Any DMA map attempt
1484  * outside the valid iova range will return error.
1485  *
1486  * The structures below define version 1 of this capability.
1487  */
1488 #define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE  1
1489 
1490 struct vfio_iova_range {
1491 	__u64	start;
1492 	__u64	end;
1493 };
1494 
1495 struct vfio_iommu_type1_info_cap_iova_range {
1496 	struct	vfio_info_cap_header header;
1497 	__u32	nr_iovas;
1498 	__u32	reserved;
1499 	struct	vfio_iova_range iova_ranges[];
1500 };
1501 
1502 /*
1503  * The migration capability allows to report supported features for migration.
1504  *
1505  * The structures below define version 1 of this capability.
1506  *
1507  * The existence of this capability indicates that IOMMU kernel driver supports
1508  * dirty page logging.
1509  *
1510  * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
1511  * page logging.
1512  * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
1513  * size in bytes that can be used by user applications when getting the dirty
1514  * bitmap.
1515  */
1516 #define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION  2
1517 
1518 struct vfio_iommu_type1_info_cap_migration {
1519 	struct	vfio_info_cap_header header;
1520 	__u32	flags;
1521 	__u64	pgsize_bitmap;
1522 	__u64	max_dirty_bitmap_size;		/* in bytes */
1523 };
1524 
1525 /*
1526  * The DMA available capability allows to report the current number of
1527  * simultaneously outstanding DMA mappings that are allowed.
1528  *
1529  * The structure below defines version 1 of this capability.
1530  *
1531  * avail: specifies the current number of outstanding DMA mappings allowed.
1532  */
1533 #define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
1534 
1535 struct vfio_iommu_type1_info_dma_avail {
1536 	struct	vfio_info_cap_header header;
1537 	__u32	avail;
1538 };
1539 
1540 #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
1541 
1542 /**
1543  * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
1544  *
1545  * Map process virtual addresses to IO virtual addresses using the
1546  * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
1547  *
1548  * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr
1549  * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR.  To
1550  * maintain memory consistency within the user application, the updated vaddr
1551  * must address the same memory object as originally mapped.  Failure to do so
1552  * will result in user memory corruption and/or device misbehavior.  iova and
1553  * size must match those in the original MAP_DMA call.  Protection is not
1554  * changed, and the READ & WRITE flags must be 0.
1555  */
1556 struct vfio_iommu_type1_dma_map {
1557 	__u32	argsz;
1558 	__u32	flags;
1559 #define VFIO_DMA_MAP_FLAG_READ (1 << 0)		/* readable from device */
1560 #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)	/* writable from device */
1561 #define VFIO_DMA_MAP_FLAG_VADDR (1 << 2)
1562 	__u64	vaddr;				/* Process virtual address */
1563 	__u64	iova;				/* IO virtual address */
1564 	__u64	size;				/* Size of mapping (bytes) */
1565 };
1566 
1567 #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
1568 
1569 struct vfio_bitmap {
1570 	__u64        pgsize;	/* page size for bitmap in bytes */
1571 	__u64        size;	/* in bytes */
1572 	__u64 *data;	/* one bit per page */
1573 };
1574 
1575 /**
1576  * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
1577  *							struct vfio_dma_unmap)
1578  *
1579  * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
1580  * Caller sets argsz.  The actual unmapped size is returned in the size
1581  * field.  No guarantee is made to the user that arbitrary unmaps of iova
1582  * or size different from those used in the original mapping call will
1583  * succeed.
1584  *
1585  * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
1586  * before unmapping IO virtual addresses. When this flag is set, the user must
1587  * provide a struct vfio_bitmap in data[]. User must provide zero-allocated
1588  * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
1589  * A bit in the bitmap represents one page, of user provided page size in
1590  * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
1591  * indicates that the page at that offset from iova is dirty. A Bitmap of the
1592  * pages in the range of unmapped size is returned in the user-provided
1593  * vfio_bitmap.data.
1594  *
1595  * If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses.  iova and size
1596  * must be 0.  This cannot be combined with the get-dirty-bitmap flag.
1597  *
1598  * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
1599  * virtual addresses in the iova range.  DMA to already-mapped pages continues.
1600  * Groups may not be added to the container while any addresses are invalid.
1601  * This cannot be combined with the get-dirty-bitmap flag.
1602  */
1603 struct vfio_iommu_type1_dma_unmap {
1604 	__u32	argsz;
1605 	__u32	flags;
1606 #define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
1607 #define VFIO_DMA_UNMAP_FLAG_ALL		     (1 << 1)
1608 #define VFIO_DMA_UNMAP_FLAG_VADDR	     (1 << 2)
1609 	__u64	iova;				/* IO virtual address */
1610 	__u64	size;				/* Size of mapping (bytes) */
1611 	__u8    data[];
1612 };
1613 
1614 #define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
1615 
1616 /*
1617  * IOCTLs to enable/disable IOMMU container usage.
1618  * No parameters are supported.
1619  */
1620 #define VFIO_IOMMU_ENABLE	_IO(VFIO_TYPE, VFIO_BASE + 15)
1621 #define VFIO_IOMMU_DISABLE	_IO(VFIO_TYPE, VFIO_BASE + 16)
1622 
1623 /**
1624  * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
1625  *                                     struct vfio_iommu_type1_dirty_bitmap)
1626  * IOCTL is used for dirty pages logging.
1627  * Caller should set flag depending on which operation to perform, details as
1628  * below:
1629  *
1630  * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
1631  * the IOMMU driver to log pages that are dirtied or potentially dirtied by
1632  * the device; designed to be used when a migration is in progress. Dirty pages
1633  * are logged until logging is disabled by user application by calling the IOCTL
1634  * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
1635  *
1636  * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
1637  * the IOMMU driver to stop logging dirtied pages.
1638  *
1639  * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
1640  * returns the dirty pages bitmap for IOMMU container for a given IOVA range.
1641  * The user must specify the IOVA range and the pgsize through the structure
1642  * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
1643  * supports getting a bitmap of the smallest supported pgsize only and can be
1644  * modified in future to get a bitmap of any specified supported pgsize. The
1645  * user must provide a zeroed memory area for the bitmap memory and specify its
1646  * size in bitmap.size. One bit is used to represent one page consecutively
1647  * starting from iova offset. The user should provide page size in bitmap.pgsize
1648  * field. A bit set in the bitmap indicates that the page at that offset from
1649  * iova is dirty. The caller must set argsz to a value including the size of
1650  * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
1651  * actual bitmap. If dirty pages logging is not enabled, an error will be
1652  * returned.
1653  *
1654  * Only one of the flags _START, _STOP and _GET may be specified at a time.
1655  *
1656  */
1657 struct vfio_iommu_type1_dirty_bitmap {
1658 	__u32        argsz;
1659 	__u32        flags;
1660 #define VFIO_IOMMU_DIRTY_PAGES_FLAG_START	(1 << 0)
1661 #define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP	(1 << 1)
1662 #define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP	(1 << 2)
1663 	__u8         data[];
1664 };
1665 
1666 struct vfio_iommu_type1_dirty_bitmap_get {
1667 	__u64              iova;	/* IO virtual address */
1668 	__u64              size;	/* Size of iova range */
1669 	struct vfio_bitmap bitmap;
1670 };
1671 
1672 #define VFIO_IOMMU_DIRTY_PAGES             _IO(VFIO_TYPE, VFIO_BASE + 17)
1673 
1674 /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
1675 
1676 /*
1677  * The SPAPR TCE DDW info struct provides the information about
1678  * the details of Dynamic DMA window capability.
1679  *
1680  * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
1681  * @max_dynamic_windows_supported tells the maximum number of windows
1682  * which the platform can create.
1683  * @levels tells the maximum number of levels in multi-level IOMMU tables;
1684  * this allows splitting a table into smaller chunks which reduces
1685  * the amount of physically contiguous memory required for the table.
1686  */
1687 struct vfio_iommu_spapr_tce_ddw_info {
1688 	__u64 pgsizes;			/* Bitmap of supported page sizes */
1689 	__u32 max_dynamic_windows_supported;
1690 	__u32 levels;
1691 };
1692 
1693 /*
1694  * The SPAPR TCE info struct provides the information about the PCI bus
1695  * address ranges available for DMA, these values are programmed into
1696  * the hardware so the guest has to know that information.
1697  *
1698  * The DMA 32 bit window start is an absolute PCI bus address.
1699  * The IOVA address passed via map/unmap ioctls are absolute PCI bus
1700  * addresses too so the window works as a filter rather than an offset
1701  * for IOVA addresses.
1702  *
1703  * Flags supported:
1704  * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
1705  *   (DDW) support is present. @ddw is only supported when DDW is present.
1706  */
1707 struct vfio_iommu_spapr_tce_info {
1708 	__u32 argsz;
1709 	__u32 flags;
1710 #define VFIO_IOMMU_SPAPR_INFO_DDW	(1 << 0)	/* DDW supported */
1711 	__u32 dma32_window_start;	/* 32 bit window start (bytes) */
1712 	__u32 dma32_window_size;	/* 32 bit window size (bytes) */
1713 	struct vfio_iommu_spapr_tce_ddw_info ddw;
1714 };
1715 
1716 #define VFIO_IOMMU_SPAPR_TCE_GET_INFO	_IO(VFIO_TYPE, VFIO_BASE + 12)
1717 
1718 /*
1719  * EEH PE operation struct provides ways to:
1720  * - enable/disable EEH functionality;
1721  * - unfreeze IO/DMA for frozen PE;
1722  * - read PE state;
1723  * - reset PE;
1724  * - configure PE;
1725  * - inject EEH error.
1726  */
1727 struct vfio_eeh_pe_err {
1728 	__u32 type;
1729 	__u32 func;
1730 	__u64 addr;
1731 	__u64 mask;
1732 };
1733 
1734 struct vfio_eeh_pe_op {
1735 	__u32 argsz;
1736 	__u32 flags;
1737 	__u32 op;
1738 	union {
1739 		struct vfio_eeh_pe_err err;
1740 	};
1741 };
1742 
1743 #define VFIO_EEH_PE_DISABLE		0	/* Disable EEH functionality */
1744 #define VFIO_EEH_PE_ENABLE		1	/* Enable EEH functionality  */
1745 #define VFIO_EEH_PE_UNFREEZE_IO		2	/* Enable IO for frozen PE   */
1746 #define VFIO_EEH_PE_UNFREEZE_DMA	3	/* Enable DMA for frozen PE  */
1747 #define VFIO_EEH_PE_GET_STATE		4	/* PE state retrieval        */
1748 #define  VFIO_EEH_PE_STATE_NORMAL	0	/* PE in functional state    */
1749 #define  VFIO_EEH_PE_STATE_RESET	1	/* PE reset in progress      */
1750 #define  VFIO_EEH_PE_STATE_STOPPED	2	/* Stopped DMA and IO        */
1751 #define  VFIO_EEH_PE_STATE_STOPPED_DMA	4	/* Stopped DMA only          */
1752 #define  VFIO_EEH_PE_STATE_UNAVAIL	5	/* State unavailable         */
1753 #define VFIO_EEH_PE_RESET_DEACTIVATE	5	/* Deassert PE reset         */
1754 #define VFIO_EEH_PE_RESET_HOT		6	/* Assert hot reset          */
1755 #define VFIO_EEH_PE_RESET_FUNDAMENTAL	7	/* Assert fundamental reset  */
1756 #define VFIO_EEH_PE_CONFIGURE		8	/* PE configuration          */
1757 #define VFIO_EEH_PE_INJECT_ERR		9	/* Inject EEH error          */
1758 
1759 #define VFIO_EEH_PE_OP			_IO(VFIO_TYPE, VFIO_BASE + 21)
1760 
1761 /**
1762  * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
1763  *
1764  * Registers user space memory where DMA is allowed. It pins
1765  * user pages and does the locked memory accounting so
1766  * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
1767  * get faster.
1768  */
1769 struct vfio_iommu_spapr_register_memory {
1770 	__u32	argsz;
1771 	__u32	flags;
1772 	__u64	vaddr;				/* Process virtual address */
1773 	__u64	size;				/* Size of mapping (bytes) */
1774 };
1775 #define VFIO_IOMMU_SPAPR_REGISTER_MEMORY	_IO(VFIO_TYPE, VFIO_BASE + 17)
1776 
1777 /**
1778  * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
1779  *
1780  * Unregisters user space memory registered with
1781  * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
1782  * Uses vfio_iommu_spapr_register_memory for parameters.
1783  */
1784 #define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY	_IO(VFIO_TYPE, VFIO_BASE + 18)
1785 
1786 /**
1787  * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
1788  *
1789  * Creates an additional TCE table and programs it (sets a new DMA window)
1790  * to every IOMMU group in the container. It receives page shift, window
1791  * size and number of levels in the TCE table being created.
1792  *
1793  * It allocates and returns an offset on a PCI bus of the new DMA window.
1794  */
1795 struct vfio_iommu_spapr_tce_create {
1796 	__u32 argsz;
1797 	__u32 flags;
1798 	/* in */
1799 	__u32 page_shift;
1800 	__u32 __resv1;
1801 	__u64 window_size;
1802 	__u32 levels;
1803 	__u32 __resv2;
1804 	/* out */
1805 	__u64 start_addr;
1806 };
1807 #define VFIO_IOMMU_SPAPR_TCE_CREATE	_IO(VFIO_TYPE, VFIO_BASE + 19)
1808 
1809 /**
1810  * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
1811  *
1812  * Unprograms a TCE table from all groups in the container and destroys it.
1813  * It receives a PCI bus offset as a window id.
1814  */
1815 struct vfio_iommu_spapr_tce_remove {
1816 	__u32 argsz;
1817 	__u32 flags;
1818 	/* in */
1819 	__u64 start_addr;
1820 };
1821 #define VFIO_IOMMU_SPAPR_TCE_REMOVE	_IO(VFIO_TYPE, VFIO_BASE + 20)
1822 
1823 /* ***************************************************************** */
1824 
1825 #endif /* VFIO_H */
1826