xref: /openbmc/linux/drivers/vfio/cdx/main.c (revision d7955ce4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
4  */
5 
6 #include <linux/vfio.h>
7 #include <linux/cdx/cdx_bus.h>
8 
9 #include "private.h"
10 
vfio_cdx_open_device(struct vfio_device * core_vdev)11 static int vfio_cdx_open_device(struct vfio_device *core_vdev)
12 {
13 	struct vfio_cdx_device *vdev =
14 		container_of(core_vdev, struct vfio_cdx_device, vdev);
15 	struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
16 	int count = cdx_dev->res_count;
17 	int i;
18 
19 	vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region),
20 				GFP_KERNEL_ACCOUNT);
21 	if (!vdev->regions)
22 		return -ENOMEM;
23 
24 	for (i = 0; i < count; i++) {
25 		struct resource *res = &cdx_dev->res[i];
26 
27 		vdev->regions[i].addr = res->start;
28 		vdev->regions[i].size = resource_size(res);
29 		vdev->regions[i].type = res->flags;
30 		/*
31 		 * Only regions addressed with PAGE granularity may be
32 		 * MMAP'ed securely.
33 		 */
34 		if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
35 		    !(vdev->regions[i].size & ~PAGE_MASK))
36 			vdev->regions[i].flags |=
37 					VFIO_REGION_INFO_FLAG_MMAP;
38 		vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
39 		if (!(cdx_dev->res[i].flags & IORESOURCE_READONLY))
40 			vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
41 	}
42 
43 	return 0;
44 }
45 
vfio_cdx_close_device(struct vfio_device * core_vdev)46 static void vfio_cdx_close_device(struct vfio_device *core_vdev)
47 {
48 	struct vfio_cdx_device *vdev =
49 		container_of(core_vdev, struct vfio_cdx_device, vdev);
50 
51 	kfree(vdev->regions);
52 	cdx_dev_reset(core_vdev->dev);
53 }
54 
vfio_cdx_ioctl_get_info(struct vfio_cdx_device * vdev,struct vfio_device_info __user * arg)55 static int vfio_cdx_ioctl_get_info(struct vfio_cdx_device *vdev,
56 				   struct vfio_device_info __user *arg)
57 {
58 	unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
59 	struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
60 	struct vfio_device_info info;
61 
62 	if (copy_from_user(&info, arg, minsz))
63 		return -EFAULT;
64 
65 	if (info.argsz < minsz)
66 		return -EINVAL;
67 
68 	info.flags = VFIO_DEVICE_FLAGS_CDX;
69 	info.flags |= VFIO_DEVICE_FLAGS_RESET;
70 
71 	info.num_regions = cdx_dev->res_count;
72 	info.num_irqs = 0;
73 
74 	return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
75 }
76 
vfio_cdx_ioctl_get_region_info(struct vfio_cdx_device * vdev,struct vfio_region_info __user * arg)77 static int vfio_cdx_ioctl_get_region_info(struct vfio_cdx_device *vdev,
78 					  struct vfio_region_info __user *arg)
79 {
80 	unsigned long minsz = offsetofend(struct vfio_region_info, offset);
81 	struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
82 	struct vfio_region_info info;
83 
84 	if (copy_from_user(&info, arg, minsz))
85 		return -EFAULT;
86 
87 	if (info.argsz < minsz)
88 		return -EINVAL;
89 
90 	if (info.index >= cdx_dev->res_count)
91 		return -EINVAL;
92 
93 	/* map offset to the physical address */
94 	info.offset = vfio_cdx_index_to_offset(info.index);
95 	info.size = vdev->regions[info.index].size;
96 	info.flags = vdev->regions[info.index].flags;
97 
98 	return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
99 }
100 
vfio_cdx_ioctl(struct vfio_device * core_vdev,unsigned int cmd,unsigned long arg)101 static long vfio_cdx_ioctl(struct vfio_device *core_vdev,
102 			   unsigned int cmd, unsigned long arg)
103 {
104 	struct vfio_cdx_device *vdev =
105 		container_of(core_vdev, struct vfio_cdx_device, vdev);
106 	void __user *uarg = (void __user *)arg;
107 
108 	switch (cmd) {
109 	case VFIO_DEVICE_GET_INFO:
110 		return vfio_cdx_ioctl_get_info(vdev, uarg);
111 	case VFIO_DEVICE_GET_REGION_INFO:
112 		return vfio_cdx_ioctl_get_region_info(vdev, uarg);
113 	case VFIO_DEVICE_RESET:
114 		return cdx_dev_reset(core_vdev->dev);
115 	default:
116 		return -ENOTTY;
117 	}
118 }
119 
vfio_cdx_mmap_mmio(struct vfio_cdx_region region,struct vm_area_struct * vma)120 static int vfio_cdx_mmap_mmio(struct vfio_cdx_region region,
121 			      struct vm_area_struct *vma)
122 {
123 	u64 size = vma->vm_end - vma->vm_start;
124 	u64 pgoff, base;
125 
126 	pgoff = vma->vm_pgoff &
127 		((1U << (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
128 	base = pgoff << PAGE_SHIFT;
129 
130 	if (base + size > region.size)
131 		return -EINVAL;
132 
133 	vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
134 	vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
135 
136 	return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
137 				  size, vma->vm_page_prot);
138 }
139 
vfio_cdx_mmap(struct vfio_device * core_vdev,struct vm_area_struct * vma)140 static int vfio_cdx_mmap(struct vfio_device *core_vdev,
141 			 struct vm_area_struct *vma)
142 {
143 	struct vfio_cdx_device *vdev =
144 		container_of(core_vdev, struct vfio_cdx_device, vdev);
145 	struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
146 	unsigned int index;
147 
148 	index = vma->vm_pgoff >> (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT);
149 
150 	if (index >= cdx_dev->res_count)
151 		return -EINVAL;
152 
153 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
154 		return -EINVAL;
155 
156 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) &&
157 	    (vma->vm_flags & VM_READ))
158 		return -EPERM;
159 
160 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) &&
161 	    (vma->vm_flags & VM_WRITE))
162 		return -EPERM;
163 
164 	return vfio_cdx_mmap_mmio(vdev->regions[index], vma);
165 }
166 
167 static const struct vfio_device_ops vfio_cdx_ops = {
168 	.name		= "vfio-cdx",
169 	.open_device	= vfio_cdx_open_device,
170 	.close_device	= vfio_cdx_close_device,
171 	.ioctl		= vfio_cdx_ioctl,
172 	.mmap		= vfio_cdx_mmap,
173 	.bind_iommufd	= vfio_iommufd_physical_bind,
174 	.unbind_iommufd	= vfio_iommufd_physical_unbind,
175 	.attach_ioas	= vfio_iommufd_physical_attach_ioas,
176 };
177 
vfio_cdx_probe(struct cdx_device * cdx_dev)178 static int vfio_cdx_probe(struct cdx_device *cdx_dev)
179 {
180 	struct vfio_cdx_device *vdev;
181 	struct device *dev = &cdx_dev->dev;
182 	int ret;
183 
184 	vdev = vfio_alloc_device(vfio_cdx_device, vdev, dev,
185 				 &vfio_cdx_ops);
186 	if (IS_ERR(vdev))
187 		return PTR_ERR(vdev);
188 
189 	ret = vfio_register_group_dev(&vdev->vdev);
190 	if (ret)
191 		goto out_uninit;
192 
193 	dev_set_drvdata(dev, vdev);
194 	return 0;
195 
196 out_uninit:
197 	vfio_put_device(&vdev->vdev);
198 	return ret;
199 }
200 
vfio_cdx_remove(struct cdx_device * cdx_dev)201 static int vfio_cdx_remove(struct cdx_device *cdx_dev)
202 {
203 	struct device *dev = &cdx_dev->dev;
204 	struct vfio_cdx_device *vdev = dev_get_drvdata(dev);
205 
206 	vfio_unregister_group_dev(&vdev->vdev);
207 	vfio_put_device(&vdev->vdev);
208 
209 	return 0;
210 }
211 
212 static const struct cdx_device_id vfio_cdx_table[] = {
213 	{ CDX_DEVICE_DRIVER_OVERRIDE(CDX_ANY_ID, CDX_ANY_ID,
214 				     CDX_ID_F_VFIO_DRIVER_OVERRIDE) }, /* match all by default */
215 	{}
216 };
217 
218 MODULE_DEVICE_TABLE(cdx, vfio_cdx_table);
219 
220 static struct cdx_driver vfio_cdx_driver = {
221 	.probe		= vfio_cdx_probe,
222 	.remove		= vfio_cdx_remove,
223 	.match_id_table	= vfio_cdx_table,
224 	.driver	= {
225 		.name	= "vfio-cdx",
226 	},
227 	.driver_managed_dma = true,
228 };
229 
230 module_driver(vfio_cdx_driver, cdx_driver_register, cdx_driver_unregister);
231 
232 MODULE_LICENSE("GPL");
233 MODULE_DESCRIPTION("VFIO for CDX devices - User Level meta-driver");
234