1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 Red Hat, Inc. All rights reserved. 4 * Author: Alex Williamson <alex.williamson@redhat.com> 5 */ 6 #ifndef __VFIO_VFIO_H__ 7 #define __VFIO_VFIO_H__ 8 9 #include <linux/file.h> 10 #include <linux/device.h> 11 #include <linux/cdev.h> 12 #include <linux/module.h> 13 #include <linux/vfio.h> 14 15 struct iommufd_ctx; 16 struct iommu_group; 17 struct vfio_container; 18 19 void vfio_device_put_registration(struct vfio_device *device); 20 bool vfio_device_try_get_registration(struct vfio_device *device); 21 int vfio_device_open(struct vfio_device *device, 22 struct iommufd_ctx *iommufd, struct kvm *kvm); 23 void vfio_device_close(struct vfio_device *device, 24 struct iommufd_ctx *iommufd); 25 26 extern const struct file_operations vfio_device_fops; 27 28 enum vfio_group_type { 29 /* 30 * Physical device with IOMMU backing. 31 */ 32 VFIO_IOMMU, 33 34 /* 35 * Virtual device without IOMMU backing. The VFIO core fakes up an 36 * iommu_group as the iommu_group sysfs interface is part of the 37 * userspace ABI. The user of these devices must not be able to 38 * directly trigger unmediated DMA. 39 */ 40 VFIO_EMULATED_IOMMU, 41 42 /* 43 * Physical device without IOMMU backing. The VFIO core fakes up an 44 * iommu_group as the iommu_group sysfs interface is part of the 45 * userspace ABI. Users can trigger unmediated DMA by the device, 46 * usage is highly dangerous, requires an explicit opt-in and will 47 * taint the kernel. 48 */ 49 VFIO_NO_IOMMU, 50 }; 51 52 struct vfio_group { 53 struct device dev; 54 struct cdev cdev; 55 /* 56 * When drivers is non-zero a driver is attached to the struct device 57 * that provided the iommu_group and thus the iommu_group is a valid 58 * pointer. When drivers is 0 the driver is being detached. Once users 59 * reaches 0 then the iommu_group is invalid. 60 */ 61 refcount_t drivers; 62 unsigned int container_users; 63 struct iommu_group *iommu_group; 64 struct vfio_container *container; 65 struct list_head device_list; 66 struct mutex device_lock; 67 struct list_head vfio_next; 68 #if IS_ENABLED(CONFIG_VFIO_CONTAINER) 69 struct list_head container_next; 70 #endif 71 enum vfio_group_type type; 72 struct mutex group_lock; 73 struct kvm *kvm; 74 struct file *opened_file; 75 struct blocking_notifier_head notifier; 76 struct iommufd_ctx *iommufd; 77 }; 78 79 int vfio_device_set_group(struct vfio_device *device, 80 enum vfio_group_type type); 81 void vfio_device_remove_group(struct vfio_device *device); 82 void vfio_device_group_register(struct vfio_device *device); 83 void vfio_device_group_unregister(struct vfio_device *device); 84 int vfio_device_group_use_iommu(struct vfio_device *device); 85 void vfio_device_group_unuse_iommu(struct vfio_device *device); 86 void vfio_device_group_close(struct vfio_device *device); 87 bool vfio_device_has_container(struct vfio_device *device); 88 int __init vfio_group_init(void); 89 void vfio_group_cleanup(void); 90 91 static inline bool vfio_device_is_noiommu(struct vfio_device *vdev) 92 { 93 return IS_ENABLED(CONFIG_VFIO_NOIOMMU) && 94 vdev->group->type == VFIO_NO_IOMMU; 95 } 96 97 #if IS_ENABLED(CONFIG_VFIO_CONTAINER) 98 /* events for the backend driver notify callback */ 99 enum vfio_iommu_notify_type { 100 VFIO_IOMMU_CONTAINER_CLOSE = 0, 101 }; 102 103 /** 104 * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks 105 */ 106 struct vfio_iommu_driver_ops { 107 char *name; 108 struct module *owner; 109 void *(*open)(unsigned long arg); 110 void (*release)(void *iommu_data); 111 long (*ioctl)(void *iommu_data, unsigned int cmd, 112 unsigned long arg); 113 int (*attach_group)(void *iommu_data, 114 struct iommu_group *group, 115 enum vfio_group_type); 116 void (*detach_group)(void *iommu_data, 117 struct iommu_group *group); 118 int (*pin_pages)(void *iommu_data, 119 struct iommu_group *group, 120 dma_addr_t user_iova, 121 int npage, int prot, 122 struct page **pages); 123 void (*unpin_pages)(void *iommu_data, 124 dma_addr_t user_iova, int npage); 125 void (*register_device)(void *iommu_data, 126 struct vfio_device *vdev); 127 void (*unregister_device)(void *iommu_data, 128 struct vfio_device *vdev); 129 int (*dma_rw)(void *iommu_data, dma_addr_t user_iova, 130 void *data, size_t count, bool write); 131 struct iommu_domain *(*group_iommu_domain)(void *iommu_data, 132 struct iommu_group *group); 133 void (*notify)(void *iommu_data, 134 enum vfio_iommu_notify_type event); 135 }; 136 137 struct vfio_iommu_driver { 138 const struct vfio_iommu_driver_ops *ops; 139 struct list_head vfio_next; 140 }; 141 142 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); 143 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops); 144 145 struct vfio_container *vfio_container_from_file(struct file *filep); 146 int vfio_group_use_container(struct vfio_group *group); 147 void vfio_group_unuse_container(struct vfio_group *group); 148 int vfio_container_attach_group(struct vfio_container *container, 149 struct vfio_group *group); 150 void vfio_group_detach_container(struct vfio_group *group); 151 void vfio_device_container_register(struct vfio_device *device); 152 void vfio_device_container_unregister(struct vfio_device *device); 153 int vfio_device_container_pin_pages(struct vfio_device *device, 154 dma_addr_t iova, int npage, 155 int prot, struct page **pages); 156 void vfio_device_container_unpin_pages(struct vfio_device *device, 157 dma_addr_t iova, int npage); 158 int vfio_device_container_dma_rw(struct vfio_device *device, 159 dma_addr_t iova, void *data, 160 size_t len, bool write); 161 162 int __init vfio_container_init(void); 163 void vfio_container_cleanup(void); 164 #else 165 static inline struct vfio_container * 166 vfio_container_from_file(struct file *filep) 167 { 168 return NULL; 169 } 170 171 static inline int vfio_group_use_container(struct vfio_group *group) 172 { 173 return -EOPNOTSUPP; 174 } 175 176 static inline void vfio_group_unuse_container(struct vfio_group *group) 177 { 178 } 179 180 static inline int vfio_container_attach_group(struct vfio_container *container, 181 struct vfio_group *group) 182 { 183 return -EOPNOTSUPP; 184 } 185 186 static inline void vfio_group_detach_container(struct vfio_group *group) 187 { 188 } 189 190 static inline void vfio_device_container_register(struct vfio_device *device) 191 { 192 } 193 194 static inline void vfio_device_container_unregister(struct vfio_device *device) 195 { 196 } 197 198 static inline int vfio_device_container_pin_pages(struct vfio_device *device, 199 dma_addr_t iova, int npage, 200 int prot, struct page **pages) 201 { 202 return -EOPNOTSUPP; 203 } 204 205 static inline void vfio_device_container_unpin_pages(struct vfio_device *device, 206 dma_addr_t iova, int npage) 207 { 208 } 209 210 static inline int vfio_device_container_dma_rw(struct vfio_device *device, 211 dma_addr_t iova, void *data, 212 size_t len, bool write) 213 { 214 return -EOPNOTSUPP; 215 } 216 217 static inline int vfio_container_init(void) 218 { 219 return 0; 220 } 221 static inline void vfio_container_cleanup(void) 222 { 223 } 224 #endif 225 226 #if IS_ENABLED(CONFIG_IOMMUFD) 227 int vfio_iommufd_bind(struct vfio_device *device, struct iommufd_ctx *ictx); 228 void vfio_iommufd_unbind(struct vfio_device *device); 229 #else 230 static inline int vfio_iommufd_bind(struct vfio_device *device, 231 struct iommufd_ctx *ictx) 232 { 233 return -EOPNOTSUPP; 234 } 235 236 static inline void vfio_iommufd_unbind(struct vfio_device *device) 237 { 238 } 239 #endif 240 241 #if IS_ENABLED(CONFIG_VFIO_VIRQFD) 242 int __init vfio_virqfd_init(void); 243 void vfio_virqfd_exit(void); 244 #else 245 static inline int __init vfio_virqfd_init(void) 246 { 247 return 0; 248 } 249 static inline void vfio_virqfd_exit(void) 250 { 251 } 252 #endif 253 254 #ifdef CONFIG_VFIO_NOIOMMU 255 extern bool vfio_noiommu __read_mostly; 256 #else 257 enum { vfio_noiommu = false }; 258 #endif 259 260 #endif 261