xref: /openbmc/qemu/linux-headers/linux/iommufd.h (revision 6b504a01)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3  */
4 #ifndef _IOMMUFD_H
5 #define _IOMMUFD_H
6 
7 #include <linux/types.h>
8 #include <linux/ioctl.h>
9 
10 #define IOMMUFD_TYPE (';')
11 
12 /**
13  * DOC: General ioctl format
14  *
15  * The ioctl interface follows a general format to allow for extensibility. Each
16  * ioctl is passed in a structure pointer as the argument providing the size of
17  * the structure in the first u32. The kernel checks that any structure space
18  * beyond what it understands is 0. This allows userspace to use the backward
19  * compatible portion while consistently using the newer, larger, structures.
20  *
21  * ioctls use a standard meaning for common errnos:
22  *
23  *  - ENOTTY: The IOCTL number itself is not supported at all
24  *  - E2BIG: The IOCTL number is supported, but the provided structure has
25  *    non-zero in a part the kernel does not understand.
26  *  - EOPNOTSUPP: The IOCTL number is supported, and the structure is
27  *    understood, however a known field has a value the kernel does not
28  *    understand or support.
29  *  - EINVAL: Everything about the IOCTL was understood, but a field is not
30  *    correct.
31  *  - ENOENT: An ID or IOVA provided does not exist.
32  *  - ENOMEM: Out of memory.
33  *  - EOVERFLOW: Mathematics overflowed.
34  *
35  * As well as additional errnos, within specific ioctls.
36  */
37 enum {
38 	IOMMUFD_CMD_BASE = 0x80,
39 	IOMMUFD_CMD_DESTROY = IOMMUFD_CMD_BASE,
40 	IOMMUFD_CMD_IOAS_ALLOC,
41 	IOMMUFD_CMD_IOAS_ALLOW_IOVAS,
42 	IOMMUFD_CMD_IOAS_COPY,
43 	IOMMUFD_CMD_IOAS_IOVA_RANGES,
44 	IOMMUFD_CMD_IOAS_MAP,
45 	IOMMUFD_CMD_IOAS_UNMAP,
46 	IOMMUFD_CMD_OPTION,
47 	IOMMUFD_CMD_VFIO_IOAS,
48 	IOMMUFD_CMD_HWPT_ALLOC,
49 	IOMMUFD_CMD_GET_HW_INFO,
50 	IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING,
51 	IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP,
52 };
53 
54 /**
55  * struct iommu_destroy - ioctl(IOMMU_DESTROY)
56  * @size: sizeof(struct iommu_destroy)
57  * @id: iommufd object ID to destroy. Can be any destroyable object type.
58  *
59  * Destroy any object held within iommufd.
60  */
61 struct iommu_destroy {
62 	__u32 size;
63 	__u32 id;
64 };
65 #define IOMMU_DESTROY _IO(IOMMUFD_TYPE, IOMMUFD_CMD_DESTROY)
66 
67 /**
68  * struct iommu_ioas_alloc - ioctl(IOMMU_IOAS_ALLOC)
69  * @size: sizeof(struct iommu_ioas_alloc)
70  * @flags: Must be 0
71  * @out_ioas_id: Output IOAS ID for the allocated object
72  *
73  * Allocate an IO Address Space (IOAS) which holds an IO Virtual Address (IOVA)
74  * to memory mapping.
75  */
76 struct iommu_ioas_alloc {
77 	__u32 size;
78 	__u32 flags;
79 	__u32 out_ioas_id;
80 };
81 #define IOMMU_IOAS_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_ALLOC)
82 
83 /**
84  * struct iommu_iova_range - ioctl(IOMMU_IOVA_RANGE)
85  * @start: First IOVA
86  * @last: Inclusive last IOVA
87  *
88  * An interval in IOVA space.
89  */
90 struct iommu_iova_range {
91 	__aligned_u64 start;
92 	__aligned_u64 last;
93 };
94 
95 /**
96  * struct iommu_ioas_iova_ranges - ioctl(IOMMU_IOAS_IOVA_RANGES)
97  * @size: sizeof(struct iommu_ioas_iova_ranges)
98  * @ioas_id: IOAS ID to read ranges from
99  * @num_iovas: Input/Output total number of ranges in the IOAS
100  * @__reserved: Must be 0
101  * @allowed_iovas: Pointer to the output array of struct iommu_iova_range
102  * @out_iova_alignment: Minimum alignment required for mapping IOVA
103  *
104  * Query an IOAS for ranges of allowed IOVAs. Mapping IOVA outside these ranges
105  * is not allowed. num_iovas will be set to the total number of iovas and
106  * the allowed_iovas[] will be filled in as space permits.
107  *
108  * The allowed ranges are dependent on the HW path the DMA operation takes, and
109  * can change during the lifetime of the IOAS. A fresh empty IOAS will have a
110  * full range, and each attached device will narrow the ranges based on that
111  * device's HW restrictions. Detaching a device can widen the ranges. Userspace
112  * should query ranges after every attach/detach to know what IOVAs are valid
113  * for mapping.
114  *
115  * On input num_iovas is the length of the allowed_iovas array. On output it is
116  * the total number of iovas filled in. The ioctl will return -EMSGSIZE and set
117  * num_iovas to the required value if num_iovas is too small. In this case the
118  * caller should allocate a larger output array and re-issue the ioctl.
119  *
120  * out_iova_alignment returns the minimum IOVA alignment that can be given
121  * to IOMMU_IOAS_MAP/COPY. IOVA's must satisfy::
122  *
123  *   starting_iova % out_iova_alignment == 0
124  *   (starting_iova + length) % out_iova_alignment == 0
125  *
126  * out_iova_alignment can be 1 indicating any IOVA is allowed. It cannot
127  * be higher than the system PAGE_SIZE.
128  */
129 struct iommu_ioas_iova_ranges {
130 	__u32 size;
131 	__u32 ioas_id;
132 	__u32 num_iovas;
133 	__u32 __reserved;
134 	__aligned_u64 allowed_iovas;
135 	__aligned_u64 out_iova_alignment;
136 };
137 #define IOMMU_IOAS_IOVA_RANGES _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_IOVA_RANGES)
138 
139 /**
140  * struct iommu_ioas_allow_iovas - ioctl(IOMMU_IOAS_ALLOW_IOVAS)
141  * @size: sizeof(struct iommu_ioas_allow_iovas)
142  * @ioas_id: IOAS ID to allow IOVAs from
143  * @num_iovas: Input/Output total number of ranges in the IOAS
144  * @__reserved: Must be 0
145  * @allowed_iovas: Pointer to array of struct iommu_iova_range
146  *
147  * Ensure a range of IOVAs are always available for allocation. If this call
148  * succeeds then IOMMU_IOAS_IOVA_RANGES will never return a list of IOVA ranges
149  * that are narrower than the ranges provided here. This call will fail if
150  * IOMMU_IOAS_IOVA_RANGES is currently narrower than the given ranges.
151  *
152  * When an IOAS is first created the IOVA_RANGES will be maximally sized, and as
153  * devices are attached the IOVA will narrow based on the device restrictions.
154  * When an allowed range is specified any narrowing will be refused, ie device
155  * attachment can fail if the device requires limiting within the allowed range.
156  *
157  * Automatic IOVA allocation is also impacted by this call. MAP will only
158  * allocate within the allowed IOVAs if they are present.
159  *
160  * This call replaces the entire allowed list with the given list.
161  */
162 struct iommu_ioas_allow_iovas {
163 	__u32 size;
164 	__u32 ioas_id;
165 	__u32 num_iovas;
166 	__u32 __reserved;
167 	__aligned_u64 allowed_iovas;
168 };
169 #define IOMMU_IOAS_ALLOW_IOVAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_ALLOW_IOVAS)
170 
171 /**
172  * enum iommufd_ioas_map_flags - Flags for map and copy
173  * @IOMMU_IOAS_MAP_FIXED_IOVA: If clear the kernel will compute an appropriate
174  *                             IOVA to place the mapping at
175  * @IOMMU_IOAS_MAP_WRITEABLE: DMA is allowed to write to this mapping
176  * @IOMMU_IOAS_MAP_READABLE: DMA is allowed to read from this mapping
177  */
178 enum iommufd_ioas_map_flags {
179 	IOMMU_IOAS_MAP_FIXED_IOVA = 1 << 0,
180 	IOMMU_IOAS_MAP_WRITEABLE = 1 << 1,
181 	IOMMU_IOAS_MAP_READABLE = 1 << 2,
182 };
183 
184 /**
185  * struct iommu_ioas_map - ioctl(IOMMU_IOAS_MAP)
186  * @size: sizeof(struct iommu_ioas_map)
187  * @flags: Combination of enum iommufd_ioas_map_flags
188  * @ioas_id: IOAS ID to change the mapping of
189  * @__reserved: Must be 0
190  * @user_va: Userspace pointer to start mapping from
191  * @length: Number of bytes to map
192  * @iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is set
193  *        then this must be provided as input.
194  *
195  * Set an IOVA mapping from a user pointer. If FIXED_IOVA is specified then the
196  * mapping will be established at iova, otherwise a suitable location based on
197  * the reserved and allowed lists will be automatically selected and returned in
198  * iova.
199  *
200  * If IOMMU_IOAS_MAP_FIXED_IOVA is specified then the iova range must currently
201  * be unused, existing IOVA cannot be replaced.
202  */
203 struct iommu_ioas_map {
204 	__u32 size;
205 	__u32 flags;
206 	__u32 ioas_id;
207 	__u32 __reserved;
208 	__aligned_u64 user_va;
209 	__aligned_u64 length;
210 	__aligned_u64 iova;
211 };
212 #define IOMMU_IOAS_MAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP)
213 
214 /**
215  * struct iommu_ioas_copy - ioctl(IOMMU_IOAS_COPY)
216  * @size: sizeof(struct iommu_ioas_copy)
217  * @flags: Combination of enum iommufd_ioas_map_flags
218  * @dst_ioas_id: IOAS ID to change the mapping of
219  * @src_ioas_id: IOAS ID to copy from
220  * @length: Number of bytes to copy and map
221  * @dst_iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is
222  *            set then this must be provided as input.
223  * @src_iova: IOVA to start the copy
224  *
225  * Copy an already existing mapping from src_ioas_id and establish it in
226  * dst_ioas_id. The src iova/length must exactly match a range used with
227  * IOMMU_IOAS_MAP.
228  *
229  * This may be used to efficiently clone a subset of an IOAS to another, or as a
230  * kind of 'cache' to speed up mapping. Copy has an efficiency advantage over
231  * establishing equivalent new mappings, as internal resources are shared, and
232  * the kernel will pin the user memory only once.
233  */
234 struct iommu_ioas_copy {
235 	__u32 size;
236 	__u32 flags;
237 	__u32 dst_ioas_id;
238 	__u32 src_ioas_id;
239 	__aligned_u64 length;
240 	__aligned_u64 dst_iova;
241 	__aligned_u64 src_iova;
242 };
243 #define IOMMU_IOAS_COPY _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_COPY)
244 
245 /**
246  * struct iommu_ioas_unmap - ioctl(IOMMU_IOAS_UNMAP)
247  * @size: sizeof(struct iommu_ioas_unmap)
248  * @ioas_id: IOAS ID to change the mapping of
249  * @iova: IOVA to start the unmapping at
250  * @length: Number of bytes to unmap, and return back the bytes unmapped
251  *
252  * Unmap an IOVA range. The iova/length must be a superset of a previously
253  * mapped range used with IOMMU_IOAS_MAP or IOMMU_IOAS_COPY. Splitting or
254  * truncating ranges is not allowed. The values 0 to U64_MAX will unmap
255  * everything.
256  */
257 struct iommu_ioas_unmap {
258 	__u32 size;
259 	__u32 ioas_id;
260 	__aligned_u64 iova;
261 	__aligned_u64 length;
262 };
263 #define IOMMU_IOAS_UNMAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_UNMAP)
264 
265 /**
266  * enum iommufd_option - ioctl(IOMMU_OPTION_RLIMIT_MODE) and
267  *                       ioctl(IOMMU_OPTION_HUGE_PAGES)
268  * @IOMMU_OPTION_RLIMIT_MODE:
269  *    Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege
270  *    to invoke this. Value 0 (default) is user based accouting, 1 uses process
271  *    based accounting. Global option, object_id must be 0
272  * @IOMMU_OPTION_HUGE_PAGES:
273  *    Value 1 (default) allows contiguous pages to be combined when generating
274  *    iommu mappings. Value 0 disables combining, everything is mapped to
275  *    PAGE_SIZE. This can be useful for benchmarking.  This is a per-IOAS
276  *    option, the object_id must be the IOAS ID.
277  */
278 enum iommufd_option {
279 	IOMMU_OPTION_RLIMIT_MODE = 0,
280 	IOMMU_OPTION_HUGE_PAGES = 1,
281 };
282 
283 /**
284  * enum iommufd_option_ops - ioctl(IOMMU_OPTION_OP_SET) and
285  *                           ioctl(IOMMU_OPTION_OP_GET)
286  * @IOMMU_OPTION_OP_SET: Set the option's value
287  * @IOMMU_OPTION_OP_GET: Get the option's value
288  */
289 enum iommufd_option_ops {
290 	IOMMU_OPTION_OP_SET = 0,
291 	IOMMU_OPTION_OP_GET = 1,
292 };
293 
294 /**
295  * struct iommu_option - iommu option multiplexer
296  * @size: sizeof(struct iommu_option)
297  * @option_id: One of enum iommufd_option
298  * @op: One of enum iommufd_option_ops
299  * @__reserved: Must be 0
300  * @object_id: ID of the object if required
301  * @val64: Option value to set or value returned on get
302  *
303  * Change a simple option value. This multiplexor allows controlling options
304  * on objects. IOMMU_OPTION_OP_SET will load an option and IOMMU_OPTION_OP_GET
305  * will return the current value.
306  */
307 struct iommu_option {
308 	__u32 size;
309 	__u32 option_id;
310 	__u16 op;
311 	__u16 __reserved;
312 	__u32 object_id;
313 	__aligned_u64 val64;
314 };
315 #define IOMMU_OPTION _IO(IOMMUFD_TYPE, IOMMUFD_CMD_OPTION)
316 
317 /**
318  * enum iommufd_vfio_ioas_op - IOMMU_VFIO_IOAS_* ioctls
319  * @IOMMU_VFIO_IOAS_GET: Get the current compatibility IOAS
320  * @IOMMU_VFIO_IOAS_SET: Change the current compatibility IOAS
321  * @IOMMU_VFIO_IOAS_CLEAR: Disable VFIO compatibility
322  */
323 enum iommufd_vfio_ioas_op {
324 	IOMMU_VFIO_IOAS_GET = 0,
325 	IOMMU_VFIO_IOAS_SET = 1,
326 	IOMMU_VFIO_IOAS_CLEAR = 2,
327 };
328 
329 /**
330  * struct iommu_vfio_ioas - ioctl(IOMMU_VFIO_IOAS)
331  * @size: sizeof(struct iommu_vfio_ioas)
332  * @ioas_id: For IOMMU_VFIO_IOAS_SET the input IOAS ID to set
333  *           For IOMMU_VFIO_IOAS_GET will output the IOAS ID
334  * @op: One of enum iommufd_vfio_ioas_op
335  * @__reserved: Must be 0
336  *
337  * The VFIO compatibility support uses a single ioas because VFIO APIs do not
338  * support the ID field. Set or Get the IOAS that VFIO compatibility will use.
339  * When VFIO_GROUP_SET_CONTAINER is used on an iommufd it will get the
340  * compatibility ioas, either by taking what is already set, or auto creating
341  * one. From then on VFIO will continue to use that ioas and is not effected by
342  * this ioctl. SET or CLEAR does not destroy any auto-created IOAS.
343  */
344 struct iommu_vfio_ioas {
345 	__u32 size;
346 	__u32 ioas_id;
347 	__u16 op;
348 	__u16 __reserved;
349 };
350 #define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS)
351 
352 /**
353  * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation
354  * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as
355  *                                the parent HWPT in a nesting configuration.
356  * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is
357  *                                   enforced on device attachment
358  */
359 enum iommufd_hwpt_alloc_flags {
360 	IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0,
361 	IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1,
362 };
363 
364 /**
365  * enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table
366  *                                entry attributes
367  * @IOMMU_VTD_S1_SRE: Supervisor request
368  * @IOMMU_VTD_S1_EAFE: Extended access enable
369  * @IOMMU_VTD_S1_WPE: Write protect enable
370  */
371 enum iommu_hwpt_vtd_s1_flags {
372 	IOMMU_VTD_S1_SRE = 1 << 0,
373 	IOMMU_VTD_S1_EAFE = 1 << 1,
374 	IOMMU_VTD_S1_WPE = 1 << 2,
375 };
376 
377 /**
378  * struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table
379  *                            info (IOMMU_HWPT_DATA_VTD_S1)
380  * @flags: Combination of enum iommu_hwpt_vtd_s1_flags
381  * @pgtbl_addr: The base address of the stage-1 page table.
382  * @addr_width: The address width of the stage-1 page table
383  * @__reserved: Must be 0
384  */
385 struct iommu_hwpt_vtd_s1 {
386 	__aligned_u64 flags;
387 	__aligned_u64 pgtbl_addr;
388 	__u32 addr_width;
389 	__u32 __reserved;
390 };
391 
392 /**
393  * enum iommu_hwpt_data_type - IOMMU HWPT Data Type
394  * @IOMMU_HWPT_DATA_NONE: no data
395  * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table
396  */
397 enum iommu_hwpt_data_type {
398 	IOMMU_HWPT_DATA_NONE,
399 	IOMMU_HWPT_DATA_VTD_S1,
400 };
401 
402 /**
403  * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC)
404  * @size: sizeof(struct iommu_hwpt_alloc)
405  * @flags: Combination of enum iommufd_hwpt_alloc_flags
406  * @dev_id: The device to allocate this HWPT for
407  * @pt_id: The IOAS or HWPT to connect this HWPT to
408  * @out_hwpt_id: The ID of the new HWPT
409  * @__reserved: Must be 0
410  * @data_type: One of enum iommu_hwpt_data_type
411  * @data_len: Length of the type specific data
412  * @data_uptr: User pointer to the type specific data
413  *
414  * Explicitly allocate a hardware page table object. This is the same object
415  * type that is returned by iommufd_device_attach() and represents the
416  * underlying iommu driver's iommu_domain kernel object.
417  *
418  * A kernel-managed HWPT will be created with the mappings from the given
419  * IOAS via the @pt_id. The @data_type for this allocation must be set to
420  * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a
421  * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags.
422  *
423  * A user-managed nested HWPT will be created from a given parent HWPT via
424  * @pt_id, in which the parent HWPT must be allocated previously via the
425  * same ioctl from a given IOAS (@pt_id). In this case, the @data_type
426  * must be set to a pre-defined type corresponding to an I/O page table
427  * type supported by the underlying IOMMU hardware.
428  *
429  * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and
430  * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr
431  * must be given.
432  */
433 struct iommu_hwpt_alloc {
434 	__u32 size;
435 	__u32 flags;
436 	__u32 dev_id;
437 	__u32 pt_id;
438 	__u32 out_hwpt_id;
439 	__u32 __reserved;
440 	__u32 data_type;
441 	__u32 data_len;
442 	__aligned_u64 data_uptr;
443 };
444 #define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC)
445 
446 /**
447  * enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info
448  * @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings
449  *                                         on a nested_parent domain.
450  *                                         https://www.intel.com/content/www/us/en/content-details/772415/content-details.html
451  */
452 enum iommu_hw_info_vtd_flags {
453 	IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0,
454 };
455 
456 /**
457  * struct iommu_hw_info_vtd - Intel VT-d hardware information
458  *
459  * @flags: Combination of enum iommu_hw_info_vtd_flags
460  * @__reserved: Must be 0
461  *
462  * @cap_reg: Value of Intel VT-d capability register defined in VT-d spec
463  *           section 11.4.2 Capability Register.
464  * @ecap_reg: Value of Intel VT-d capability register defined in VT-d spec
465  *            section 11.4.3 Extended Capability Register.
466  *
467  * User needs to understand the Intel VT-d specification to decode the
468  * register value.
469  */
470 struct iommu_hw_info_vtd {
471 	__u32 flags;
472 	__u32 __reserved;
473 	__aligned_u64 cap_reg;
474 	__aligned_u64 ecap_reg;
475 };
476 
477 /**
478  * enum iommu_hw_info_type - IOMMU Hardware Info Types
479  * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware
480  *                           info
481  * @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type
482  */
483 enum iommu_hw_info_type {
484 	IOMMU_HW_INFO_TYPE_NONE,
485 	IOMMU_HW_INFO_TYPE_INTEL_VTD,
486 };
487 
488 /**
489  * enum iommufd_hw_capabilities
490  * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking
491  *                               If available, it means the following APIs
492  *                               are supported:
493  *
494  *                                   IOMMU_HWPT_GET_DIRTY_BITMAP
495  *                                   IOMMU_HWPT_SET_DIRTY_TRACKING
496  *
497  */
498 enum iommufd_hw_capabilities {
499 	IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
500 };
501 
502 /**
503  * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
504  * @size: sizeof(struct iommu_hw_info)
505  * @flags: Must be 0
506  * @dev_id: The device bound to the iommufd
507  * @data_len: Input the length of a user buffer in bytes. Output the length of
508  *            data that kernel supports
509  * @data_uptr: User pointer to a user-space buffer used by the kernel to fill
510  *             the iommu type specific hardware information data
511  * @out_data_type: Output the iommu hardware info type as defined in the enum
512  *                 iommu_hw_info_type.
513  * @out_capabilities: Output the generic iommu capability info type as defined
514  *                    in the enum iommu_hw_capabilities.
515  * @__reserved: Must be 0
516  *
517  * Query an iommu type specific hardware information data from an iommu behind
518  * a given device that has been bound to iommufd. This hardware info data will
519  * be used to sync capabilities between the virtual iommu and the physical
520  * iommu, e.g. a nested translation setup needs to check the hardware info, so
521  * a guest stage-1 page table can be compatible with the physical iommu.
522  *
523  * To capture an iommu type specific hardware information data, @data_uptr and
524  * its length @data_len must be provided. Trailing bytes will be zeroed if the
525  * user buffer is larger than the data that kernel has. Otherwise, kernel only
526  * fills the buffer using the given length in @data_len. If the ioctl succeeds,
527  * @data_len will be updated to the length that kernel actually supports,
528  * @out_data_type will be filled to decode the data filled in the buffer
529  * pointed by @data_uptr. Input @data_len == zero is allowed.
530  */
531 struct iommu_hw_info {
532 	__u32 size;
533 	__u32 flags;
534 	__u32 dev_id;
535 	__u32 data_len;
536 	__aligned_u64 data_uptr;
537 	__u32 out_data_type;
538 	__u32 __reserved;
539 	__aligned_u64 out_capabilities;
540 };
541 #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
542 
543 /*
544  * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty
545  *                                              tracking
546  * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking
547  */
548 enum iommufd_hwpt_set_dirty_tracking_flags {
549 	IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1,
550 };
551 
552 /**
553  * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING)
554  * @size: sizeof(struct iommu_hwpt_set_dirty_tracking)
555  * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags
556  * @hwpt_id: HW pagetable ID that represents the IOMMU domain
557  * @__reserved: Must be 0
558  *
559  * Toggle dirty tracking on an HW pagetable.
560  */
561 struct iommu_hwpt_set_dirty_tracking {
562 	__u32 size;
563 	__u32 flags;
564 	__u32 hwpt_id;
565 	__u32 __reserved;
566 };
567 #define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \
568 					  IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING)
569 
570 /**
571  * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits
572  * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing
573  *                                        any dirty bits metadata. This flag
574  *                                        can be passed in the expectation
575  *                                        where the next operation is an unmap
576  *                                        of the same IOVA range.
577  *
578  */
579 enum iommufd_hwpt_get_dirty_bitmap_flags {
580 	IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1,
581 };
582 
583 /**
584  * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP)
585  * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap)
586  * @hwpt_id: HW pagetable ID that represents the IOMMU domain
587  * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags
588  * @__reserved: Must be 0
589  * @iova: base IOVA of the bitmap first bit
590  * @length: IOVA range size
591  * @page_size: page size granularity of each bit in the bitmap
592  * @data: bitmap where to set the dirty bits. The bitmap bits each
593  *        represent a page_size which you deviate from an arbitrary iova.
594  *
595  * Checking a given IOVA is dirty:
596  *
597  *  data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64))
598  *
599  * Walk the IOMMU pagetables for a given IOVA range to return a bitmap
600  * with the dirty IOVAs. In doing so it will also by default clear any
601  * dirty bit metadata set in the IOPTE.
602  */
603 struct iommu_hwpt_get_dirty_bitmap {
604 	__u32 size;
605 	__u32 hwpt_id;
606 	__u32 flags;
607 	__u32 __reserved;
608 	__aligned_u64 iova;
609 	__aligned_u64 length;
610 	__aligned_u64 page_size;
611 	__aligned_u64 data;
612 };
613 #define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \
614 					IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP)
615 
616 #endif
617