1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES 4 */ 5 #include <linux/vfio.h> 6 #include <linux/iommufd.h> 7 8 #include "vfio.h" 9 10 MODULE_IMPORT_NS(IOMMUFD); 11 MODULE_IMPORT_NS(IOMMUFD_VFIO); 12 13 int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx) 14 { 15 u32 ioas_id; 16 u32 device_id; 17 int ret; 18 19 lockdep_assert_held(&vdev->dev_set->lock); 20 21 if (vfio_device_is_noiommu(vdev)) { 22 if (!capable(CAP_SYS_RAWIO)) 23 return -EPERM; 24 25 /* 26 * Require no compat ioas to be assigned to proceed. The basic 27 * statement is that the user cannot have done something that 28 * implies they expected translation to exist 29 */ 30 if (!iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id)) 31 return -EPERM; 32 return 0; 33 } 34 35 if (WARN_ON(!vdev->ops->bind_iommufd)) 36 return -ENODEV; 37 38 ret = vdev->ops->bind_iommufd(vdev, ictx, &device_id); 39 if (ret) 40 return ret; 41 42 ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id); 43 if (ret) 44 goto err_unbind; 45 ret = vdev->ops->attach_ioas(vdev, &ioas_id); 46 if (ret) 47 goto err_unbind; 48 49 /* 50 * The legacy path has no way to return the device id or the selected 51 * pt_id 52 */ 53 return 0; 54 55 err_unbind: 56 if (vdev->ops->unbind_iommufd) 57 vdev->ops->unbind_iommufd(vdev); 58 return ret; 59 } 60 61 void vfio_iommufd_unbind(struct vfio_device *vdev) 62 { 63 lockdep_assert_held(&vdev->dev_set->lock); 64 65 if (vfio_device_is_noiommu(vdev)) 66 return; 67 68 if (vdev->ops->unbind_iommufd) 69 vdev->ops->unbind_iommufd(vdev); 70 } 71 72 /* 73 * The physical standard ops mean that the iommufd_device is bound to the 74 * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers 75 * using this ops set should call vfio_register_group_dev() 76 */ 77 int vfio_iommufd_physical_bind(struct vfio_device *vdev, 78 struct iommufd_ctx *ictx, u32 *out_device_id) 79 { 80 struct iommufd_device *idev; 81 82 idev = iommufd_device_bind(ictx, vdev->dev, out_device_id); 83 if (IS_ERR(idev)) 84 return PTR_ERR(idev); 85 vdev->iommufd_device = idev; 86 return 0; 87 } 88 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind); 89 90 void vfio_iommufd_physical_unbind(struct vfio_device *vdev) 91 { 92 lockdep_assert_held(&vdev->dev_set->lock); 93 94 if (vdev->iommufd_attached) { 95 iommufd_device_detach(vdev->iommufd_device); 96 vdev->iommufd_attached = false; 97 } 98 iommufd_device_unbind(vdev->iommufd_device); 99 vdev->iommufd_device = NULL; 100 } 101 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind); 102 103 int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id) 104 { 105 int rc; 106 107 rc = iommufd_device_attach(vdev->iommufd_device, pt_id); 108 if (rc) 109 return rc; 110 vdev->iommufd_attached = true; 111 return 0; 112 } 113 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas); 114 115 /* 116 * The emulated standard ops mean that vfio_device is going to use the 117 * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this 118 * ops set should call vfio_register_emulated_iommu_dev(). Drivers that do 119 * not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap. 120 */ 121 122 static void vfio_emulated_unmap(void *data, unsigned long iova, 123 unsigned long length) 124 { 125 struct vfio_device *vdev = data; 126 127 if (vdev->ops->dma_unmap) 128 vdev->ops->dma_unmap(vdev, iova, length); 129 } 130 131 static const struct iommufd_access_ops vfio_user_ops = { 132 .needs_pin_pages = 1, 133 .unmap = vfio_emulated_unmap, 134 }; 135 136 int vfio_iommufd_emulated_bind(struct vfio_device *vdev, 137 struct iommufd_ctx *ictx, u32 *out_device_id) 138 { 139 struct iommufd_access *user; 140 141 lockdep_assert_held(&vdev->dev_set->lock); 142 143 user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id); 144 if (IS_ERR(user)) 145 return PTR_ERR(user); 146 vdev->iommufd_access = user; 147 return 0; 148 } 149 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind); 150 151 void vfio_iommufd_emulated_unbind(struct vfio_device *vdev) 152 { 153 lockdep_assert_held(&vdev->dev_set->lock); 154 155 if (vdev->iommufd_access) { 156 iommufd_access_destroy(vdev->iommufd_access); 157 vdev->iommufd_attached = false; 158 vdev->iommufd_access = NULL; 159 } 160 } 161 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind); 162 163 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id) 164 { 165 int rc; 166 167 lockdep_assert_held(&vdev->dev_set->lock); 168 169 if (vdev->iommufd_attached) 170 return -EBUSY; 171 rc = iommufd_access_attach(vdev->iommufd_access, *pt_id); 172 if (rc) 173 return rc; 174 vdev->iommufd_attached = true; 175 return 0; 176 } 177 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas); 178