xref: /openbmc/linux/include/linux/vfio.h (revision 2ee432d7)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * VFIO API definition
4  *
5  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
6  *     Author: Alex Williamson <alex.williamson@redhat.com>
7  */
8 #ifndef VFIO_H
9 #define VFIO_H
10 
11 
12 #include <linux/iommu.h>
13 #include <linux/mm.h>
14 #include <linux/workqueue.h>
15 #include <linux/poll.h>
16 #include <linux/cdev.h>
17 #include <uapi/linux/vfio.h>
18 #include <linux/iova_bitmap.h>
19 
20 struct kvm;
21 struct iommufd_ctx;
22 struct iommufd_device;
23 struct iommufd_access;
24 
25 /*
26  * VFIO devices can be placed in a set, this allows all devices to share this
27  * structure and the VFIO core will provide a lock that is held around
28  * open_device()/close_device() for all devices in the set.
29  */
30 struct vfio_device_set {
31 	void *set_id;
32 	struct mutex lock;
33 	struct list_head device_list;
34 	unsigned int device_count;
35 };
36 
37 struct vfio_device {
38 	struct device *dev;
39 	const struct vfio_device_ops *ops;
40 	/*
41 	 * mig_ops/log_ops is a static property of the vfio_device which must
42 	 * be set prior to registering the vfio_device.
43 	 */
44 	const struct vfio_migration_ops *mig_ops;
45 	const struct vfio_log_ops *log_ops;
46 #if IS_ENABLED(CONFIG_VFIO_GROUP)
47 	struct vfio_group *group;
48 	struct list_head group_next;
49 	struct list_head iommu_entry;
50 #endif
51 	struct vfio_device_set *dev_set;
52 	struct list_head dev_set_list;
53 	unsigned int migration_flags;
54 	struct kvm *kvm;
55 
56 	/* Members below here are private, not for driver use */
57 	unsigned int index;
58 	struct device device;	/* device.kref covers object life circle */
59 #if IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV)
60 	struct cdev cdev;
61 #endif
62 	refcount_t refcount;	/* user count on registered device*/
63 	unsigned int open_count;
64 	struct completion comp;
65 	struct iommufd_access *iommufd_access;
66 	void (*put_kvm)(struct kvm *kvm);
67 #if IS_ENABLED(CONFIG_IOMMUFD)
68 	struct iommufd_device *iommufd_device;
69 	u8 iommufd_attached:1;
70 #endif
71 	u8 cdev_opened:1;
72 };
73 
74 /**
75  * struct vfio_device_ops - VFIO bus driver device callbacks
76  *
77  * @name: Name of the device driver.
78  * @init: initialize private fields in device structure
79  * @release: Reclaim private fields in device structure
80  * @bind_iommufd: Called when binding the device to an iommufd
81  * @unbind_iommufd: Opposite of bind_iommufd
82  * @attach_ioas: Called when attaching device to an IOAS/HWPT managed by the
83  *		 bound iommufd. Undo in unbind_iommufd if @detach_ioas is not
84  *		 called.
85  * @detach_ioas: Opposite of attach_ioas
86  * @open_device: Called when the first file descriptor is opened for this device
87  * @close_device: Opposite of open_device
88  * @read: Perform read(2) on device file descriptor
89  * @write: Perform write(2) on device file descriptor
90  * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
91  *         operations documented below
92  * @mmap: Perform mmap(2) on a region of the device file descriptor
93  * @request: Request for the bus driver to release the device
94  * @match: Optional device name match callback (return: 0 for no-match, >0 for
95  *         match, -errno for abort (ex. match with insufficient or incorrect
96  *         additional args)
97  * @dma_unmap: Called when userspace unmaps IOVA from the container
98  *             this device is attached to.
99  * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl
100  */
101 struct vfio_device_ops {
102 	char	*name;
103 	int	(*init)(struct vfio_device *vdev);
104 	void	(*release)(struct vfio_device *vdev);
105 	int	(*bind_iommufd)(struct vfio_device *vdev,
106 				struct iommufd_ctx *ictx, u32 *out_device_id);
107 	void	(*unbind_iommufd)(struct vfio_device *vdev);
108 	int	(*attach_ioas)(struct vfio_device *vdev, u32 *pt_id);
109 	void	(*detach_ioas)(struct vfio_device *vdev);
110 	int	(*open_device)(struct vfio_device *vdev);
111 	void	(*close_device)(struct vfio_device *vdev);
112 	ssize_t	(*read)(struct vfio_device *vdev, char __user *buf,
113 			size_t count, loff_t *ppos);
114 	ssize_t	(*write)(struct vfio_device *vdev, const char __user *buf,
115 			 size_t count, loff_t *size);
116 	long	(*ioctl)(struct vfio_device *vdev, unsigned int cmd,
117 			 unsigned long arg);
118 	int	(*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma);
119 	void	(*request)(struct vfio_device *vdev, unsigned int count);
120 	int	(*match)(struct vfio_device *vdev, char *buf);
121 	void	(*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length);
122 	int	(*device_feature)(struct vfio_device *device, u32 flags,
123 				  void __user *arg, size_t argsz);
124 };
125 
126 #if IS_ENABLED(CONFIG_IOMMUFD)
127 struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev);
128 int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx);
129 int vfio_iommufd_physical_bind(struct vfio_device *vdev,
130 			       struct iommufd_ctx *ictx, u32 *out_device_id);
131 void vfio_iommufd_physical_unbind(struct vfio_device *vdev);
132 int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
133 void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev);
134 int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
135 			       struct iommufd_ctx *ictx, u32 *out_device_id);
136 void vfio_iommufd_emulated_unbind(struct vfio_device *vdev);
137 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
138 void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev);
139 #else
140 static inline struct iommufd_ctx *
vfio_iommufd_device_ictx(struct vfio_device * vdev)141 vfio_iommufd_device_ictx(struct vfio_device *vdev)
142 {
143 	return NULL;
144 }
145 
146 static inline int
vfio_iommufd_get_dev_id(struct vfio_device * vdev,struct iommufd_ctx * ictx)147 vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
148 {
149 	return VFIO_PCI_DEVID_NOT_OWNED;
150 }
151 
152 #define vfio_iommufd_physical_bind                                      \
153 	((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx,   \
154 		  u32 *out_device_id)) NULL)
155 #define vfio_iommufd_physical_unbind \
156 	((void (*)(struct vfio_device *vdev)) NULL)
157 #define vfio_iommufd_physical_attach_ioas \
158 	((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
159 #define vfio_iommufd_physical_detach_ioas \
160 	((void (*)(struct vfio_device *vdev)) NULL)
161 #define vfio_iommufd_emulated_bind                                      \
162 	((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx,   \
163 		  u32 *out_device_id)) NULL)
164 #define vfio_iommufd_emulated_unbind \
165 	((void (*)(struct vfio_device *vdev)) NULL)
166 #define vfio_iommufd_emulated_attach_ioas \
167 	((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
168 #define vfio_iommufd_emulated_detach_ioas \
169 	((void (*)(struct vfio_device *vdev)) NULL)
170 #endif
171 
vfio_device_cdev_opened(struct vfio_device * device)172 static inline bool vfio_device_cdev_opened(struct vfio_device *device)
173 {
174 	return device->cdev_opened;
175 }
176 
177 /**
178  * struct vfio_migration_ops - VFIO bus device driver migration callbacks
179  *
180  * @migration_set_state: Optional callback to change the migration state for
181  *         devices that support migration. It's mandatory for
182  *         VFIO_DEVICE_FEATURE_MIGRATION migration support.
183  *         The returned FD is used for data transfer according to the FSM
184  *         definition. The driver is responsible to ensure that FD reaches end
185  *         of stream or error whenever the migration FSM leaves a data transfer
186  *         state or before close_device() returns.
187  * @migration_get_state: Optional callback to get the migration state for
188  *         devices that support migration. It's mandatory for
189  *         VFIO_DEVICE_FEATURE_MIGRATION migration support.
190  * @migration_get_data_size: Optional callback to get the estimated data
191  *          length that will be required to complete stop copy. It's mandatory for
192  *          VFIO_DEVICE_FEATURE_MIGRATION migration support.
193  */
194 struct vfio_migration_ops {
195 	struct file *(*migration_set_state)(
196 		struct vfio_device *device,
197 		enum vfio_device_mig_state new_state);
198 	int (*migration_get_state)(struct vfio_device *device,
199 				   enum vfio_device_mig_state *curr_state);
200 	int (*migration_get_data_size)(struct vfio_device *device,
201 				       unsigned long *stop_copy_length);
202 };
203 
204 /**
205  * struct vfio_log_ops - VFIO bus device driver logging callbacks
206  *
207  * @log_start: Optional callback to ask the device start DMA logging.
208  * @log_stop: Optional callback to ask the device stop DMA logging.
209  * @log_read_and_clear: Optional callback to ask the device read
210  *         and clear the dirty DMAs in some given range.
211  *
212  * The vfio core implementation of the DEVICE_FEATURE_DMA_LOGGING_ set
213  * of features does not track logging state relative to the device,
214  * therefore the device implementation of vfio_log_ops must handle
215  * arbitrary user requests. This includes rejecting subsequent calls
216  * to log_start without an intervening log_stop, as well as graceful
217  * handling of log_stop and log_read_and_clear from invalid states.
218  */
219 struct vfio_log_ops {
220 	int (*log_start)(struct vfio_device *device,
221 		struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
222 	int (*log_stop)(struct vfio_device *device);
223 	int (*log_read_and_clear)(struct vfio_device *device,
224 		unsigned long iova, unsigned long length,
225 		struct iova_bitmap *dirty);
226 };
227 
228 /**
229  * vfio_check_feature - Validate user input for the VFIO_DEVICE_FEATURE ioctl
230  * @flags: Arg from the device_feature op
231  * @argsz: Arg from the device_feature op
232  * @supported_ops: Combination of VFIO_DEVICE_FEATURE_GET and SET the driver
233  *                 supports
234  * @minsz: Minimum data size the driver accepts
235  *
236  * For use in a driver's device_feature op. Checks that the inputs to the
237  * VFIO_DEVICE_FEATURE ioctl are correct for the driver's feature. Returns 1 if
238  * the driver should execute the get or set, otherwise the relevant
239  * value should be returned.
240  */
vfio_check_feature(u32 flags,size_t argsz,u32 supported_ops,size_t minsz)241 static inline int vfio_check_feature(u32 flags, size_t argsz, u32 supported_ops,
242 				    size_t minsz)
243 {
244 	if ((flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)) &
245 	    ~supported_ops)
246 		return -EINVAL;
247 	if (flags & VFIO_DEVICE_FEATURE_PROBE)
248 		return 0;
249 	/* Without PROBE one of GET or SET must be requested */
250 	if (!(flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)))
251 		return -EINVAL;
252 	if (argsz < minsz)
253 		return -EINVAL;
254 	return 1;
255 }
256 
257 struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
258 				       const struct vfio_device_ops *ops);
259 #define vfio_alloc_device(dev_struct, member, dev, ops)				\
260 	container_of(_vfio_alloc_device(sizeof(struct dev_struct) +		\
261 					BUILD_BUG_ON_ZERO(offsetof(		\
262 						struct dev_struct, member)),	\
263 					dev, ops),				\
264 		     struct dev_struct, member)
265 
vfio_put_device(struct vfio_device * device)266 static inline void vfio_put_device(struct vfio_device *device)
267 {
268 	put_device(&device->device);
269 }
270 
271 int vfio_register_group_dev(struct vfio_device *device);
272 int vfio_register_emulated_iommu_dev(struct vfio_device *device);
273 void vfio_unregister_group_dev(struct vfio_device *device);
274 
275 int vfio_assign_device_set(struct vfio_device *device, void *set_id);
276 unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set);
277 struct vfio_device *
278 vfio_find_device_in_devset(struct vfio_device_set *dev_set,
279 			   struct device *dev);
280 
281 int vfio_mig_get_next_state(struct vfio_device *device,
282 			    enum vfio_device_mig_state cur_fsm,
283 			    enum vfio_device_mig_state new_fsm,
284 			    enum vfio_device_mig_state *next_fsm);
285 
286 void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
287 			      u32 req_nodes);
288 
289 /*
290  * External user API
291  */
292 struct iommu_group *vfio_file_iommu_group(struct file *file);
293 
294 #if IS_ENABLED(CONFIG_VFIO_GROUP)
295 bool vfio_file_is_group(struct file *file);
296 bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
297 #else
vfio_file_is_group(struct file * file)298 static inline bool vfio_file_is_group(struct file *file)
299 {
300 	return false;
301 }
302 
vfio_file_has_dev(struct file * file,struct vfio_device * device)303 static inline bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
304 {
305 	return false;
306 }
307 #endif
308 bool vfio_file_is_valid(struct file *file);
309 bool vfio_file_enforced_coherent(struct file *file);
310 void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
311 
312 #define VFIO_PIN_PAGES_MAX_ENTRIES	(PAGE_SIZE/sizeof(unsigned long))
313 
314 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
315 		   int npage, int prot, struct page **pages);
316 void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage);
317 int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova,
318 		void *data, size_t len, bool write);
319 
320 /*
321  * Sub-module helpers
322  */
323 struct vfio_info_cap {
324 	struct vfio_info_cap_header *buf;
325 	size_t size;
326 };
327 struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
328 					       size_t size, u16 id,
329 					       u16 version);
330 void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
331 
332 int vfio_info_add_capability(struct vfio_info_cap *caps,
333 			     struct vfio_info_cap_header *cap, size_t size);
334 
335 int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
336 				       int num_irqs, int max_irq_type,
337 				       size_t *data_size);
338 
339 /*
340  * IRQfd - generic
341  */
342 struct virqfd {
343 	void			*opaque;
344 	struct eventfd_ctx	*eventfd;
345 	int			(*handler)(void *, void *);
346 	void			(*thread)(void *, void *);
347 	void			*data;
348 	struct work_struct	inject;
349 	wait_queue_entry_t		wait;
350 	poll_table		pt;
351 	struct work_struct	shutdown;
352 	struct work_struct	flush_inject;
353 	struct virqfd		**pvirqfd;
354 };
355 
356 int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *),
357 		       void (*thread)(void *, void *), void *data,
358 		       struct virqfd **pvirqfd, int fd);
359 void vfio_virqfd_disable(struct virqfd **pvirqfd);
360 void vfio_virqfd_flush_thread(struct virqfd **pvirqfd);
361 
362 #endif /* VFIO_H */
363